xref: /dpdk/drivers/net/qede/qede_ethdev.c (revision fb88acb59ac7be5cf20d1570e0c66e3e91167c53)
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8 
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 #include <rte_version.h>
12 
13 /* Globals */
14 static const struct qed_eth_ops *qed_ops;
15 static int64_t timer_period = 1;
16 
17 /* VXLAN tunnel classification mapping */
18 const struct _qede_vxlan_tunn_types {
19 	uint16_t rte_filter_type;
20 	enum ecore_filter_ucast_type qede_type;
21 	enum ecore_tunn_clss qede_tunn_clss;
22 	const char *string;
23 } qede_tunn_types[] = {
24 	{
25 		ETH_TUNNEL_FILTER_OMAC,
26 		ECORE_FILTER_MAC,
27 		ECORE_TUNN_CLSS_MAC_VLAN,
28 		"outer-mac"
29 	},
30 	{
31 		ETH_TUNNEL_FILTER_TENID,
32 		ECORE_FILTER_VNI,
33 		ECORE_TUNN_CLSS_MAC_VNI,
34 		"vni"
35 	},
36 	{
37 		ETH_TUNNEL_FILTER_IMAC,
38 		ECORE_FILTER_INNER_MAC,
39 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
40 		"inner-mac"
41 	},
42 	{
43 		ETH_TUNNEL_FILTER_IVLAN,
44 		ECORE_FILTER_INNER_VLAN,
45 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
46 		"inner-vlan"
47 	},
48 	{
49 		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
50 		ECORE_FILTER_MAC_VNI_PAIR,
51 		ECORE_TUNN_CLSS_MAC_VNI,
52 		"outer-mac and vni"
53 	},
54 	{
55 		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
56 		ECORE_FILTER_UNUSED,
57 		MAX_ECORE_TUNN_CLSS,
58 		"outer-mac and inner-mac"
59 	},
60 	{
61 		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
62 		ECORE_FILTER_UNUSED,
63 		MAX_ECORE_TUNN_CLSS,
64 		"outer-mac and inner-vlan"
65 	},
66 	{
67 		ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
68 		ECORE_FILTER_INNER_MAC_VNI_PAIR,
69 		ECORE_TUNN_CLSS_INNER_MAC_VNI,
70 		"vni and inner-mac",
71 	},
72 	{
73 		ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
74 		ECORE_FILTER_UNUSED,
75 		MAX_ECORE_TUNN_CLSS,
76 		"vni and inner-vlan",
77 	},
78 	{
79 		ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
80 		ECORE_FILTER_INNER_PAIR,
81 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
82 		"inner-mac and inner-vlan",
83 	},
84 	{
85 		ETH_TUNNEL_FILTER_OIP,
86 		ECORE_FILTER_UNUSED,
87 		MAX_ECORE_TUNN_CLSS,
88 		"outer-IP"
89 	},
90 	{
91 		ETH_TUNNEL_FILTER_IIP,
92 		ECORE_FILTER_UNUSED,
93 		MAX_ECORE_TUNN_CLSS,
94 		"inner-IP"
95 	},
96 	{
97 		RTE_TUNNEL_FILTER_IMAC_IVLAN,
98 		ECORE_FILTER_UNUSED,
99 		MAX_ECORE_TUNN_CLSS,
100 		"IMAC_IVLAN"
101 	},
102 	{
103 		RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
104 		ECORE_FILTER_UNUSED,
105 		MAX_ECORE_TUNN_CLSS,
106 		"IMAC_IVLAN_TENID"
107 	},
108 	{
109 		RTE_TUNNEL_FILTER_IMAC_TENID,
110 		ECORE_FILTER_UNUSED,
111 		MAX_ECORE_TUNN_CLSS,
112 		"IMAC_TENID"
113 	},
114 	{
115 		RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
116 		ECORE_FILTER_UNUSED,
117 		MAX_ECORE_TUNN_CLSS,
118 		"OMAC_TENID_IMAC"
119 	},
120 };
121 
122 struct rte_qede_xstats_name_off {
123 	char name[RTE_ETH_XSTATS_NAME_SIZE];
124 	uint64_t offset;
125 };
126 
127 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
128 	{"rx_unicast_bytes",
129 		offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
130 	{"rx_multicast_bytes",
131 		offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
132 	{"rx_broadcast_bytes",
133 		offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
134 	{"rx_unicast_packets",
135 		offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
136 	{"rx_multicast_packets",
137 		offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
138 	{"rx_broadcast_packets",
139 		offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
140 
141 	{"tx_unicast_bytes",
142 		offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
143 	{"tx_multicast_bytes",
144 		offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
145 	{"tx_broadcast_bytes",
146 		offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
147 	{"tx_unicast_packets",
148 		offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
149 	{"tx_multicast_packets",
150 		offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
151 	{"tx_broadcast_packets",
152 		offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
153 
154 	{"rx_64_byte_packets",
155 		offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
156 	{"rx_65_to_127_byte_packets",
157 		offsetof(struct ecore_eth_stats_common,
158 			 rx_65_to_127_byte_packets)},
159 	{"rx_128_to_255_byte_packets",
160 		offsetof(struct ecore_eth_stats_common,
161 			 rx_128_to_255_byte_packets)},
162 	{"rx_256_to_511_byte_packets",
163 		offsetof(struct ecore_eth_stats_common,
164 			 rx_256_to_511_byte_packets)},
165 	{"rx_512_to_1023_byte_packets",
166 		offsetof(struct ecore_eth_stats_common,
167 			 rx_512_to_1023_byte_packets)},
168 	{"rx_1024_to_1518_byte_packets",
169 		offsetof(struct ecore_eth_stats_common,
170 			 rx_1024_to_1518_byte_packets)},
171 	{"tx_64_byte_packets",
172 		offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
173 	{"tx_65_to_127_byte_packets",
174 		offsetof(struct ecore_eth_stats_common,
175 			 tx_65_to_127_byte_packets)},
176 	{"tx_128_to_255_byte_packets",
177 		offsetof(struct ecore_eth_stats_common,
178 			 tx_128_to_255_byte_packets)},
179 	{"tx_256_to_511_byte_packets",
180 		offsetof(struct ecore_eth_stats_common,
181 			 tx_256_to_511_byte_packets)},
182 	{"tx_512_to_1023_byte_packets",
183 		offsetof(struct ecore_eth_stats_common,
184 			 tx_512_to_1023_byte_packets)},
185 	{"tx_1024_to_1518_byte_packets",
186 		offsetof(struct ecore_eth_stats_common,
187 			 tx_1024_to_1518_byte_packets)},
188 
189 	{"rx_mac_crtl_frames",
190 		offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
191 	{"tx_mac_control_frames",
192 		offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
193 	{"rx_pause_frames",
194 		offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
195 	{"tx_pause_frames",
196 		offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
197 	{"rx_priority_flow_control_frames",
198 		offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
199 	{"tx_priority_flow_control_frames",
200 		offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
201 
202 	{"rx_crc_errors",
203 		offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
204 	{"rx_align_errors",
205 		offsetof(struct ecore_eth_stats_common, rx_align_errors)},
206 	{"rx_carrier_errors",
207 		offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
208 	{"rx_oversize_packet_errors",
209 		offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
210 	{"rx_jabber_errors",
211 		offsetof(struct ecore_eth_stats_common, rx_jabbers)},
212 	{"rx_undersize_packet_errors",
213 		offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
214 	{"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
215 	{"rx_host_buffer_not_available",
216 		offsetof(struct ecore_eth_stats_common, no_buff_discards)},
217 	/* Number of packets discarded because they are bigger than MTU */
218 	{"rx_packet_too_big_discards",
219 		offsetof(struct ecore_eth_stats_common,
220 			 packet_too_big_discard)},
221 	{"rx_ttl_zero_discards",
222 		offsetof(struct ecore_eth_stats_common, ttl0_discard)},
223 	{"rx_multi_function_tag_filter_discards",
224 		offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
225 	{"rx_mac_filter_discards",
226 		offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
227 	{"rx_hw_buffer_truncates",
228 		offsetof(struct ecore_eth_stats_common, brb_truncates)},
229 	{"rx_hw_buffer_discards",
230 		offsetof(struct ecore_eth_stats_common, brb_discards)},
231 	{"tx_error_drop_packets",
232 		offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
233 
234 	{"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
235 	{"rx_mac_unicast_packets",
236 		offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
237 	{"rx_mac_multicast_packets",
238 		offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
239 	{"rx_mac_broadcast_packets",
240 		offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
241 	{"rx_mac_frames_ok",
242 		offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
243 	{"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
244 	{"tx_mac_unicast_packets",
245 		offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
246 	{"tx_mac_multicast_packets",
247 		offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
248 	{"tx_mac_broadcast_packets",
249 		offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
250 
251 	{"lro_coalesced_packets",
252 		offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
253 	{"lro_coalesced_events",
254 		offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
255 	{"lro_aborts_num",
256 		offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
257 	{"lro_not_coalesced_packets",
258 		offsetof(struct ecore_eth_stats_common,
259 			 tpa_not_coalesced_pkts)},
260 	{"lro_coalesced_bytes",
261 		offsetof(struct ecore_eth_stats_common,
262 			 tpa_coalesced_bytes)},
263 };
264 
265 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
266 	{"rx_1519_to_1522_byte_packets",
267 		offsetof(struct ecore_eth_stats, bb) +
268 		offsetof(struct ecore_eth_stats_bb,
269 			 rx_1519_to_1522_byte_packets)},
270 	{"rx_1519_to_2047_byte_packets",
271 		offsetof(struct ecore_eth_stats, bb) +
272 		offsetof(struct ecore_eth_stats_bb,
273 			 rx_1519_to_2047_byte_packets)},
274 	{"rx_2048_to_4095_byte_packets",
275 		offsetof(struct ecore_eth_stats, bb) +
276 		offsetof(struct ecore_eth_stats_bb,
277 			 rx_2048_to_4095_byte_packets)},
278 	{"rx_4096_to_9216_byte_packets",
279 		offsetof(struct ecore_eth_stats, bb) +
280 		offsetof(struct ecore_eth_stats_bb,
281 			 rx_4096_to_9216_byte_packets)},
282 	{"rx_9217_to_16383_byte_packets",
283 		offsetof(struct ecore_eth_stats, bb) +
284 		offsetof(struct ecore_eth_stats_bb,
285 			 rx_9217_to_16383_byte_packets)},
286 
287 	{"tx_1519_to_2047_byte_packets",
288 		offsetof(struct ecore_eth_stats, bb) +
289 		offsetof(struct ecore_eth_stats_bb,
290 			 tx_1519_to_2047_byte_packets)},
291 	{"tx_2048_to_4095_byte_packets",
292 		offsetof(struct ecore_eth_stats, bb) +
293 		offsetof(struct ecore_eth_stats_bb,
294 			 tx_2048_to_4095_byte_packets)},
295 	{"tx_4096_to_9216_byte_packets",
296 		offsetof(struct ecore_eth_stats, bb) +
297 		offsetof(struct ecore_eth_stats_bb,
298 			 tx_4096_to_9216_byte_packets)},
299 	{"tx_9217_to_16383_byte_packets",
300 		offsetof(struct ecore_eth_stats, bb) +
301 		offsetof(struct ecore_eth_stats_bb,
302 			 tx_9217_to_16383_byte_packets)},
303 
304 	{"tx_lpi_entry_count",
305 		offsetof(struct ecore_eth_stats, bb) +
306 		offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
307 	{"tx_total_collisions",
308 		offsetof(struct ecore_eth_stats, bb) +
309 		offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
310 };
311 
312 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
313 	{"rx_1519_to_max_byte_packets",
314 		offsetof(struct ecore_eth_stats, ah) +
315 		offsetof(struct ecore_eth_stats_ah,
316 			 rx_1519_to_max_byte_packets)},
317 	{"tx_1519_to_max_byte_packets",
318 		offsetof(struct ecore_eth_stats, ah) +
319 		offsetof(struct ecore_eth_stats_ah,
320 			 tx_1519_to_max_byte_packets)},
321 };
322 
323 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
324 	{"rx_q_segments",
325 		offsetof(struct qede_rx_queue, rx_segs)},
326 	{"rx_q_hw_errors",
327 		offsetof(struct qede_rx_queue, rx_hw_errors)},
328 	{"rx_q_allocation_errors",
329 		offsetof(struct qede_rx_queue, rx_alloc_errors)}
330 };
331 
332 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
333 {
334 	ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
335 }
336 
337 static void
338 qede_interrupt_handler(void *param)
339 {
340 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
341 	struct qede_dev *qdev = eth_dev->data->dev_private;
342 	struct ecore_dev *edev = &qdev->edev;
343 
344 	qede_interrupt_action(ECORE_LEADING_HWFN(edev));
345 	if (rte_intr_enable(eth_dev->intr_handle))
346 		DP_ERR(edev, "rte_intr_enable failed\n");
347 }
348 
349 static void
350 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
351 {
352 	rte_memcpy(&qdev->dev_info, info, sizeof(*info));
353 	qdev->ops = qed_ops;
354 }
355 
356 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
357 static void qede_print_adapter_info(struct qede_dev *qdev)
358 {
359 	struct ecore_dev *edev = &qdev->edev;
360 	struct qed_dev_info *info = &qdev->dev_info.common;
361 	static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
362 	static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
363 
364 	DP_INFO(edev, "*********************************\n");
365 	DP_INFO(edev, " DPDK version:%s\n", rte_version());
366 	DP_INFO(edev, " Chip details : %s %c%d\n",
367 		  ECORE_IS_BB(edev) ? "BB" : "AH",
368 		  'A' + edev->chip_rev,
369 		  (int)edev->chip_metal);
370 	snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
371 		 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
372 	snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
373 		 ver_str, QEDE_PMD_VERSION);
374 	DP_INFO(edev, " Driver version : %s\n", drv_ver);
375 	DP_INFO(edev, " Firmware version : %s\n", ver_str);
376 
377 	snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
378 		 "%d.%d.%d.%d",
379 		(info->mfw_rev >> 24) & 0xff,
380 		(info->mfw_rev >> 16) & 0xff,
381 		(info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
382 	DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
383 	DP_INFO(edev, " Firmware file : %s\n", fw_file);
384 	DP_INFO(edev, "*********************************\n");
385 }
386 #endif
387 
388 static int
389 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
390 {
391 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
392 	struct ecore_sp_vport_start_params params;
393 	struct ecore_hwfn *p_hwfn;
394 	int rc;
395 	int i;
396 
397 	memset(&params, 0, sizeof(params));
398 	params.vport_id = 0;
399 	params.mtu = mtu;
400 	/* @DPDK - Disable FW placement */
401 	params.zero_placement_offset = 1;
402 	for_each_hwfn(edev, i) {
403 		p_hwfn = &edev->hwfns[i];
404 		params.concrete_fid = p_hwfn->hw_info.concrete_fid;
405 		params.opaque_fid = p_hwfn->hw_info.opaque_fid;
406 		rc = ecore_sp_vport_start(p_hwfn, &params);
407 		if (rc != ECORE_SUCCESS) {
408 			DP_ERR(edev, "Start V-PORT failed %d\n", rc);
409 			return rc;
410 		}
411 	}
412 	ecore_reset_vport_stats(edev);
413 	DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
414 
415 	return 0;
416 }
417 
418 static int
419 qede_stop_vport(struct ecore_dev *edev)
420 {
421 	struct ecore_hwfn *p_hwfn;
422 	uint8_t vport_id;
423 	int rc;
424 	int i;
425 
426 	vport_id = 0;
427 	for_each_hwfn(edev, i) {
428 		p_hwfn = &edev->hwfns[i];
429 		rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
430 					 vport_id);
431 		if (rc != ECORE_SUCCESS) {
432 			DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
433 			return rc;
434 		}
435 	}
436 
437 	return 0;
438 }
439 
440 /* Activate or deactivate vport via vport-update */
441 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
442 {
443 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
444 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
445 	struct ecore_sp_vport_update_params params;
446 	struct ecore_hwfn *p_hwfn;
447 	uint8_t i;
448 	int rc = -1;
449 
450 	memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
451 	params.vport_id = 0;
452 	params.update_vport_active_rx_flg = 1;
453 	params.update_vport_active_tx_flg = 1;
454 	params.vport_active_rx_flg = flg;
455 	params.vport_active_tx_flg = flg;
456 	for_each_hwfn(edev, i) {
457 		p_hwfn = &edev->hwfns[i];
458 		params.opaque_fid = p_hwfn->hw_info.opaque_fid;
459 		rc = ecore_sp_vport_update(p_hwfn, &params,
460 				ECORE_SPQ_MODE_EBLOCK, NULL);
461 		if (rc != ECORE_SUCCESS) {
462 			DP_ERR(edev, "Failed to update vport\n");
463 			break;
464 		}
465 	}
466 	DP_INFO(edev, "vport %s\n", flg ? "activated" : "deactivated");
467 	return rc;
468 }
469 
470 static void
471 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
472 			   uint16_t mtu, bool enable)
473 {
474 	/* Enable LRO in split mode */
475 	sge_tpa_params->tpa_ipv4_en_flg = enable;
476 	sge_tpa_params->tpa_ipv6_en_flg = enable;
477 	sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
478 	sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
479 	/* set if tpa enable changes */
480 	sge_tpa_params->update_tpa_en_flg = 1;
481 	/* set if tpa parameters should be handled */
482 	sge_tpa_params->update_tpa_param_flg = enable;
483 
484 	sge_tpa_params->max_buffers_per_cqe = 20;
485 	/* Enable TPA in split mode. In this mode each TPA segment
486 	 * starts on the new BD, so there is one BD per segment.
487 	 */
488 	sge_tpa_params->tpa_pkt_split_flg = 1;
489 	sge_tpa_params->tpa_hdr_data_split_flg = 0;
490 	sge_tpa_params->tpa_gro_consistent_flg = 0;
491 	sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
492 	sge_tpa_params->tpa_max_size = 0x7FFF;
493 	sge_tpa_params->tpa_min_size_to_start = mtu / 2;
494 	sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
495 }
496 
497 /* Enable/disable LRO via vport-update */
498 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
499 {
500 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
501 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
502 	struct ecore_sp_vport_update_params params;
503 	struct ecore_sge_tpa_params tpa_params;
504 	struct ecore_hwfn *p_hwfn;
505 	int rc;
506 	int i;
507 
508 	memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
509 	memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
510 	qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
511 	params.vport_id = 0;
512 	params.sge_tpa_params = &tpa_params;
513 	for_each_hwfn(edev, i) {
514 		p_hwfn = &edev->hwfns[i];
515 		params.opaque_fid = p_hwfn->hw_info.opaque_fid;
516 		rc = ecore_sp_vport_update(p_hwfn, &params,
517 				ECORE_SPQ_MODE_EBLOCK, NULL);
518 		if (rc != ECORE_SUCCESS) {
519 			DP_ERR(edev, "Failed to update LRO\n");
520 			return -1;
521 		}
522 	}
523 
524 	DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
525 
526 	return 0;
527 }
528 
529 /* Update MTU via vport-update without doing port restart.
530  * The vport must be deactivated before calling this API.
531  */
532 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
533 {
534 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
535 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
536 	struct ecore_sp_vport_update_params params;
537 	struct ecore_hwfn *p_hwfn;
538 	int rc;
539 	int i;
540 
541 	memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
542 	params.vport_id = 0;
543 	params.mtu = mtu;
544 	params.vport_id = 0;
545 	for_each_hwfn(edev, i) {
546 		p_hwfn = &edev->hwfns[i];
547 		params.opaque_fid = p_hwfn->hw_info.opaque_fid;
548 		rc = ecore_sp_vport_update(p_hwfn, &params,
549 				ECORE_SPQ_MODE_EBLOCK, NULL);
550 		if (rc != ECORE_SUCCESS) {
551 			DP_ERR(edev, "Failed to update MTU\n");
552 			return -1;
553 		}
554 	}
555 	DP_INFO(edev, "MTU updated to %u\n", mtu);
556 
557 	return 0;
558 }
559 
560 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
561 {
562 	memset(ucast, 0, sizeof(struct ecore_filter_ucast));
563 	ucast->is_rx_filter = true;
564 	ucast->is_tx_filter = true;
565 	/* ucast->assert_on_error = true; - For debug */
566 }
567 
568 static int
569 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
570 			     enum qed_filter_rx_mode_type type)
571 {
572 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
573 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
574 	struct ecore_filter_accept_flags flags;
575 
576 	memset(&flags, 0, sizeof(flags));
577 
578 	flags.update_rx_mode_config = 1;
579 	flags.update_tx_mode_config = 1;
580 	flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
581 		ECORE_ACCEPT_MCAST_MATCHED |
582 		ECORE_ACCEPT_BCAST;
583 
584 	flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
585 		ECORE_ACCEPT_MCAST_MATCHED |
586 		ECORE_ACCEPT_BCAST;
587 
588 	if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
589 		flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
590 		if (IS_VF(edev)) {
591 			flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
592 			DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
593 		}
594 	} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
595 		flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
596 	} else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
597 				QED_FILTER_RX_MODE_TYPE_PROMISC)) {
598 		flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
599 			ECORE_ACCEPT_MCAST_UNMATCHED;
600 	}
601 
602 	return ecore_filter_accept_cmd(edev, 0, flags, false, false,
603 			ECORE_SPQ_MODE_CB, NULL);
604 }
605 static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
606 				    uint8_t clss, bool mode, bool mask)
607 {
608 	memset(p_tunn, 0, sizeof(struct ecore_tunnel_info));
609 	p_tunn->vxlan.b_update_mode = mode;
610 	p_tunn->vxlan.b_mode_enabled = mask;
611 	p_tunn->b_update_rx_cls = true;
612 	p_tunn->b_update_tx_cls = true;
613 	p_tunn->vxlan.tun_cls = clss;
614 }
615 
616 static int
617 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
618 		  bool add)
619 {
620 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
621 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
622 	struct qede_ucast_entry *tmp = NULL;
623 	struct qede_ucast_entry *u;
624 	struct ether_addr *mac_addr;
625 
626 	mac_addr  = (struct ether_addr *)ucast->mac;
627 	if (add) {
628 		SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
629 			if ((memcmp(mac_addr, &tmp->mac,
630 				    ETHER_ADDR_LEN) == 0) &&
631 			     ucast->vni == tmp->vni &&
632 			     ucast->vlan == tmp->vlan) {
633 				DP_ERR(edev, "Unicast MAC is already added"
634 				       " with vlan = %u, vni = %u\n",
635 				       ucast->vlan,  ucast->vni);
636 					return -EEXIST;
637 			}
638 		}
639 		u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
640 			       RTE_CACHE_LINE_SIZE);
641 		if (!u) {
642 			DP_ERR(edev, "Did not allocate memory for ucast\n");
643 			return -ENOMEM;
644 		}
645 		ether_addr_copy(mac_addr, &u->mac);
646 		u->vlan = ucast->vlan;
647 		u->vni = ucast->vni;
648 		SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
649 		qdev->num_uc_addr++;
650 	} else {
651 		SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
652 			if ((memcmp(mac_addr, &tmp->mac,
653 				    ETHER_ADDR_LEN) == 0) &&
654 			    ucast->vlan == tmp->vlan	  &&
655 			    ucast->vni == tmp->vni)
656 			break;
657 		}
658 		if (tmp == NULL) {
659 			DP_INFO(edev, "Unicast MAC is not found\n");
660 			return -EINVAL;
661 		}
662 		SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
663 		qdev->num_uc_addr--;
664 	}
665 
666 	return 0;
667 }
668 
669 static int
670 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
671 		  bool add)
672 {
673 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
674 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
675 	struct ether_addr *mac_addr;
676 	struct qede_mcast_entry *tmp = NULL;
677 	struct qede_mcast_entry *m;
678 
679 	mac_addr  = (struct ether_addr *)mcast->mac;
680 	if (add) {
681 		SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
682 			if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
683 				DP_ERR(edev,
684 					"Multicast MAC is already added\n");
685 				return -EEXIST;
686 			}
687 		}
688 		m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
689 			RTE_CACHE_LINE_SIZE);
690 		if (!m) {
691 			DP_ERR(edev,
692 				"Did not allocate memory for mcast\n");
693 			return -ENOMEM;
694 		}
695 		ether_addr_copy(mac_addr, &m->mac);
696 		SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
697 		qdev->num_mc_addr++;
698 	} else {
699 		SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
700 			if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
701 				break;
702 		}
703 		if (tmp == NULL) {
704 			DP_INFO(edev, "Multicast mac is not found\n");
705 			return -EINVAL;
706 		}
707 		SLIST_REMOVE(&qdev->mc_list_head, tmp,
708 			     qede_mcast_entry, list);
709 		qdev->num_mc_addr--;
710 	}
711 
712 	return 0;
713 }
714 
715 static enum _ecore_status_t
716 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
717 		 bool add)
718 {
719 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
720 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
721 	enum _ecore_status_t rc;
722 	struct ecore_filter_mcast mcast;
723 	struct qede_mcast_entry *tmp;
724 	uint16_t j = 0;
725 
726 	/* Multicast */
727 	if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
728 		if (add) {
729 			if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
730 				DP_ERR(edev,
731 				       "Mcast filter table limit exceeded, "
732 				       "Please enable mcast promisc mode\n");
733 				return -ECORE_INVAL;
734 			}
735 		}
736 		rc = qede_mcast_filter(eth_dev, ucast, add);
737 		if (rc == 0) {
738 			DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
739 			memset(&mcast, 0, sizeof(mcast));
740 			mcast.num_mc_addrs = qdev->num_mc_addr;
741 			mcast.opcode = ECORE_FILTER_ADD;
742 			SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
743 				ether_addr_copy(&tmp->mac,
744 					(struct ether_addr *)&mcast.mac[j]);
745 				j++;
746 			}
747 			rc = ecore_filter_mcast_cmd(edev, &mcast,
748 						    ECORE_SPQ_MODE_CB, NULL);
749 		}
750 		if (rc != ECORE_SUCCESS) {
751 			DP_ERR(edev, "Failed to add multicast filter"
752 			       " rc = %d, op = %d\n", rc, add);
753 		}
754 	} else { /* Unicast */
755 		if (add) {
756 			if (qdev->num_uc_addr >=
757 			    qdev->dev_info.num_mac_filters) {
758 				DP_ERR(edev,
759 				       "Ucast filter table limit exceeded,"
760 				       " Please enable promisc mode\n");
761 				return -ECORE_INVAL;
762 			}
763 		}
764 		rc = qede_ucast_filter(eth_dev, ucast, add);
765 		if (rc == 0)
766 			rc = ecore_filter_ucast_cmd(edev, ucast,
767 						    ECORE_SPQ_MODE_CB, NULL);
768 		if (rc != ECORE_SUCCESS) {
769 			DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
770 			       rc, add);
771 		}
772 	}
773 
774 	return rc;
775 }
776 
777 static int
778 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
779 		  __rte_unused uint32_t index, __rte_unused uint32_t pool)
780 {
781 	struct ecore_filter_ucast ucast;
782 	int re;
783 
784 	qede_set_ucast_cmn_params(&ucast);
785 	ucast.type = ECORE_FILTER_MAC;
786 	ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
787 	re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
788 	return re;
789 }
790 
791 static void
792 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
793 {
794 	struct qede_dev *qdev = eth_dev->data->dev_private;
795 	struct ecore_dev *edev = &qdev->edev;
796 	struct ecore_filter_ucast ucast;
797 
798 	PMD_INIT_FUNC_TRACE(edev);
799 
800 	if (index >= qdev->dev_info.num_mac_filters) {
801 		DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
802 		       index, qdev->dev_info.num_mac_filters);
803 		return;
804 	}
805 
806 	qede_set_ucast_cmn_params(&ucast);
807 	ucast.opcode = ECORE_FILTER_REMOVE;
808 	ucast.type = ECORE_FILTER_MAC;
809 
810 	/* Use the index maintained by rte */
811 	ether_addr_copy(&eth_dev->data->mac_addrs[index],
812 			(struct ether_addr *)&ucast.mac);
813 
814 	ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
815 }
816 
817 static void
818 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
819 {
820 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
821 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
822 
823 	if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
824 					       mac_addr->addr_bytes)) {
825 		DP_ERR(edev, "Setting MAC address is not allowed\n");
826 		ether_addr_copy(&qdev->primary_mac,
827 				&eth_dev->data->mac_addrs[0]);
828 		return;
829 	}
830 
831 	qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
832 }
833 
834 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
835 {
836 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
837 	struct ecore_sp_vport_update_params params;
838 	struct ecore_hwfn *p_hwfn;
839 	uint8_t i;
840 	int rc;
841 
842 	memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
843 	params.vport_id = 0;
844 	params.update_accept_any_vlan_flg = 1;
845 	params.accept_any_vlan = flg;
846 	for_each_hwfn(edev, i) {
847 		p_hwfn = &edev->hwfns[i];
848 		params.opaque_fid = p_hwfn->hw_info.opaque_fid;
849 		rc = ecore_sp_vport_update(p_hwfn, &params,
850 				ECORE_SPQ_MODE_EBLOCK, NULL);
851 		if (rc != ECORE_SUCCESS) {
852 			DP_ERR(edev, "Failed to configure accept-any-vlan\n");
853 			return;
854 		}
855 	}
856 
857 	DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
858 }
859 
860 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
861 {
862 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
863 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
864 	struct ecore_sp_vport_update_params params;
865 	struct ecore_hwfn *p_hwfn;
866 	uint8_t i;
867 	int rc;
868 
869 	memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
870 	params.vport_id = 0;
871 	params.update_inner_vlan_removal_flg = 1;
872 	params.inner_vlan_removal_flg = flg;
873 	for_each_hwfn(edev, i) {
874 		p_hwfn = &edev->hwfns[i];
875 		params.opaque_fid = p_hwfn->hw_info.opaque_fid;
876 		rc = ecore_sp_vport_update(p_hwfn, &params,
877 				ECORE_SPQ_MODE_EBLOCK, NULL);
878 		if (rc != ECORE_SUCCESS) {
879 			DP_ERR(edev, "Failed to update vport\n");
880 			return -1;
881 		}
882 	}
883 
884 	DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
885 	return 0;
886 }
887 
888 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
889 				uint16_t vlan_id, int on)
890 {
891 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
892 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
893 	struct qed_dev_eth_info *dev_info = &qdev->dev_info;
894 	struct qede_vlan_entry *tmp = NULL;
895 	struct qede_vlan_entry *vlan;
896 	struct ecore_filter_ucast ucast;
897 	int rc;
898 
899 	if (on) {
900 		if (qdev->configured_vlans == dev_info->num_vlan_filters) {
901 			DP_ERR(edev, "Reached max VLAN filter limit"
902 				      " enabling accept_any_vlan\n");
903 			qede_config_accept_any_vlan(qdev, true);
904 			return 0;
905 		}
906 
907 		SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
908 			if (tmp->vid == vlan_id) {
909 				DP_ERR(edev, "VLAN %u already configured\n",
910 				       vlan_id);
911 				return -EEXIST;
912 			}
913 		}
914 
915 		vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
916 				  RTE_CACHE_LINE_SIZE);
917 
918 		if (!vlan) {
919 			DP_ERR(edev, "Did not allocate memory for VLAN\n");
920 			return -ENOMEM;
921 		}
922 
923 		qede_set_ucast_cmn_params(&ucast);
924 		ucast.opcode = ECORE_FILTER_ADD;
925 		ucast.type = ECORE_FILTER_VLAN;
926 		ucast.vlan = vlan_id;
927 		rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
928 					    NULL);
929 		if (rc != 0) {
930 			DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
931 			       rc);
932 			rte_free(vlan);
933 		} else {
934 			vlan->vid = vlan_id;
935 			SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
936 			qdev->configured_vlans++;
937 			DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
938 				vlan_id, qdev->configured_vlans);
939 		}
940 	} else {
941 		SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
942 			if (tmp->vid == vlan_id)
943 				break;
944 		}
945 
946 		if (!tmp) {
947 			if (qdev->configured_vlans == 0) {
948 				DP_INFO(edev,
949 					"No VLAN filters configured yet\n");
950 				return 0;
951 			}
952 
953 			DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
954 			return -EINVAL;
955 		}
956 
957 		SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
958 
959 		qede_set_ucast_cmn_params(&ucast);
960 		ucast.opcode = ECORE_FILTER_REMOVE;
961 		ucast.type = ECORE_FILTER_VLAN;
962 		ucast.vlan = vlan_id;
963 		rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
964 					    NULL);
965 		if (rc != 0) {
966 			DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
967 			       vlan_id, rc);
968 		} else {
969 			qdev->configured_vlans--;
970 			DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
971 				vlan_id, qdev->configured_vlans);
972 		}
973 	}
974 
975 	return rc;
976 }
977 
978 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
979 {
980 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
981 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
982 	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
983 
984 	if (mask & ETH_VLAN_STRIP_MASK) {
985 		if (rxmode->hw_vlan_strip)
986 			(void)qede_vlan_stripping(eth_dev, 1);
987 		else
988 			(void)qede_vlan_stripping(eth_dev, 0);
989 	}
990 
991 	if (mask & ETH_VLAN_FILTER_MASK) {
992 		/* VLAN filtering kicks in when a VLAN is added */
993 		if (rxmode->hw_vlan_filter) {
994 			qede_vlan_filter_set(eth_dev, 0, 1);
995 		} else {
996 			if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
997 				DP_ERR(edev,
998 				  " Please remove existing VLAN filters"
999 				  " before disabling VLAN filtering\n");
1000 				/* Signal app that VLAN filtering is still
1001 				 * enabled
1002 				 */
1003 				rxmode->hw_vlan_filter = true;
1004 			} else {
1005 				qede_vlan_filter_set(eth_dev, 0, 0);
1006 			}
1007 		}
1008 	}
1009 
1010 	if (mask & ETH_VLAN_EXTEND_MASK)
1011 		DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
1012 			" and classification is based on outer tag only\n");
1013 
1014 	DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
1015 		mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
1016 }
1017 
1018 static void qede_prandom_bytes(uint32_t *buff)
1019 {
1020 	uint8_t i;
1021 
1022 	srand((unsigned int)time(NULL));
1023 	for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
1024 		buff[i] = rand();
1025 }
1026 
1027 int qede_config_rss(struct rte_eth_dev *eth_dev)
1028 {
1029 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1030 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
1031 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1032 #endif
1033 	uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
1034 	struct rte_eth_rss_reta_entry64 reta_conf[2];
1035 	struct rte_eth_rss_conf rss_conf;
1036 	uint32_t i, id, pos, q;
1037 
1038 	rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1039 	if (!rss_conf.rss_key) {
1040 		DP_INFO(edev, "Applying driver default key\n");
1041 		rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1042 		qede_prandom_bytes(&def_rss_key[0]);
1043 		rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
1044 	}
1045 
1046 	/* Configure RSS hash */
1047 	if (qede_rss_hash_update(eth_dev, &rss_conf))
1048 		return -EINVAL;
1049 
1050 	/* Configure default RETA */
1051 	memset(reta_conf, 0, sizeof(reta_conf));
1052 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
1053 		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
1054 
1055 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1056 		id = i / RTE_RETA_GROUP_SIZE;
1057 		pos = i % RTE_RETA_GROUP_SIZE;
1058 		q = i % QEDE_RSS_COUNT(qdev);
1059 		reta_conf[id].reta[pos] = q;
1060 	}
1061 	if (qede_rss_reta_update(eth_dev, &reta_conf[0],
1062 				 ECORE_RSS_IND_TABLE_SIZE))
1063 		return -EINVAL;
1064 
1065 	return 0;
1066 }
1067 
1068 static void qede_fastpath_start(struct ecore_dev *edev)
1069 {
1070 	struct ecore_hwfn *p_hwfn;
1071 	int i;
1072 
1073 	for_each_hwfn(edev, i) {
1074 		p_hwfn = &edev->hwfns[i];
1075 		ecore_hw_start_fastpath(p_hwfn);
1076 	}
1077 }
1078 
1079 static int qede_dev_start(struct rte_eth_dev *eth_dev)
1080 {
1081 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1082 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1083 
1084 	PMD_INIT_FUNC_TRACE(edev);
1085 
1086 	/* Update MTU only if it has changed */
1087 	if (qdev->mtu != qdev->new_mtu) {
1088 		if (qede_update_mtu(eth_dev, qdev->new_mtu))
1089 			goto err;
1090 		qdev->mtu = qdev->new_mtu;
1091 		/* If MTU has changed then update TPA too */
1092 		if (qdev->enable_lro)
1093 			if (qede_enable_tpa(eth_dev, true))
1094 				goto err;
1095 	}
1096 
1097 	/* Start queues */
1098 	if (qede_start_queues(eth_dev))
1099 		goto err;
1100 
1101 	/* Newer SR-IOV PF driver expects RX/TX queues to be started before
1102 	 * enabling RSS. Hence RSS configuration is deferred upto this point.
1103 	 * Also, we would like to retain similar behavior in PF case, so we
1104 	 * don't do PF/VF specific check here.
1105 	 */
1106 	if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
1107 		if (qede_config_rss(eth_dev))
1108 			goto err;
1109 
1110 	/* Enable vport*/
1111 	if (qede_activate_vport(eth_dev, true))
1112 		goto err;
1113 
1114 	/* Bring-up the link */
1115 	qede_dev_set_link_state(eth_dev, true);
1116 
1117 	/* Start/resume traffic */
1118 	qede_fastpath_start(edev);
1119 
1120 	DP_INFO(edev, "Device started\n");
1121 
1122 	return 0;
1123 err:
1124 	DP_ERR(edev, "Device start fails\n");
1125 	return -1; /* common error code is < 0 */
1126 }
1127 
1128 static void qede_dev_stop(struct rte_eth_dev *eth_dev)
1129 {
1130 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1131 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1132 
1133 	PMD_INIT_FUNC_TRACE(edev);
1134 
1135 	/* Disable vport */
1136 	if (qede_activate_vport(eth_dev, false))
1137 		return;
1138 
1139 	if (qdev->enable_lro)
1140 		qede_enable_tpa(eth_dev, false);
1141 
1142 	/* TODO: Do we need disable LRO or RSS */
1143 	/* Stop queues */
1144 	qede_stop_queues(eth_dev);
1145 
1146 	/* Disable traffic */
1147 	ecore_hw_stop_fastpath(edev); /* TBD - loop */
1148 
1149 	/* Bring the link down */
1150 	qede_dev_set_link_state(eth_dev, false);
1151 
1152 	DP_INFO(edev, "Device is stopped\n");
1153 }
1154 
1155 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
1156 {
1157 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1158 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1159 	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1160 
1161 	PMD_INIT_FUNC_TRACE(edev);
1162 
1163 	/* Check requirements for 100G mode */
1164 	if (ECORE_IS_CMT(edev)) {
1165 		if (eth_dev->data->nb_rx_queues < 2 ||
1166 				eth_dev->data->nb_tx_queues < 2) {
1167 			DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
1168 			return -EINVAL;
1169 		}
1170 
1171 		if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
1172 				(eth_dev->data->nb_tx_queues % 2 != 0)) {
1173 			DP_ERR(edev,
1174 					"100G mode needs even no. of RX/TX queues\n");
1175 			return -EINVAL;
1176 		}
1177 	}
1178 
1179 	/* Sanity checks and throw warnings */
1180 	if (rxmode->enable_scatter)
1181 		eth_dev->data->scattered_rx = 1;
1182 
1183 	if (!rxmode->hw_strip_crc)
1184 		DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
1185 
1186 	if (!rxmode->hw_ip_checksum)
1187 		DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
1188 				"in hw\n");
1189 	if (rxmode->header_split)
1190 		DP_INFO(edev, "Header split enable is not supported\n");
1191 	if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
1192 				ETH_MQ_RX_RSS)) {
1193 		DP_ERR(edev, "Unsupported multi-queue mode\n");
1194 		return -ENOTSUP;
1195 	}
1196 	/* Flow director mode check */
1197 	if (qede_check_fdir_support(eth_dev))
1198 		return -ENOTSUP;
1199 
1200 	/* Deallocate resources if held previously. It is needed only if the
1201 	 * queue count has been changed from previous configuration. If its
1202 	 * going to change then it means RX/TX queue setup will be called
1203 	 * again and the fastpath pointers will be reinitialized there.
1204 	 */
1205 	if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
1206 	    qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
1207 		qede_dealloc_fp_resc(eth_dev);
1208 		/* Proceed with updated queue count */
1209 		qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
1210 		qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
1211 		if (qede_alloc_fp_resc(qdev))
1212 			return -ENOMEM;
1213 	}
1214 
1215 	/* VF's MTU has to be set using vport-start where as
1216 	 * PF's MTU can be updated via vport-update.
1217 	 */
1218 	if (IS_VF(edev)) {
1219 		if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
1220 			return -1;
1221 	} else {
1222 		if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
1223 			return -1;
1224 	}
1225 
1226 	qdev->mtu = rxmode->max_rx_pkt_len;
1227 	qdev->new_mtu = qdev->mtu;
1228 
1229 	/* Configure TPA parameters */
1230 	if (rxmode->enable_lro) {
1231 		if (qede_enable_tpa(eth_dev, true))
1232 			return -EINVAL;
1233 		/* Enable scatter mode for LRO */
1234 		if (!rxmode->enable_scatter)
1235 			eth_dev->data->scattered_rx = 1;
1236 	}
1237 	qdev->enable_lro = rxmode->enable_lro;
1238 
1239 	/* Enable VLAN offloads by default */
1240 	qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
1241 			ETH_VLAN_FILTER_MASK |
1242 			ETH_VLAN_EXTEND_MASK);
1243 
1244 	DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
1245 			QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
1246 
1247 	return 0;
1248 }
1249 
1250 /* Info about HW descriptor ring limitations */
1251 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
1252 	.nb_max = 0x8000, /* 32K */
1253 	.nb_min = 128,
1254 	.nb_align = 128 /* lowest common multiple */
1255 };
1256 
1257 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
1258 	.nb_max = 0x8000, /* 32K */
1259 	.nb_min = 256,
1260 	.nb_align = 256,
1261 	.nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
1262 	.nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1263 };
1264 
1265 static void
1266 qede_dev_info_get(struct rte_eth_dev *eth_dev,
1267 		  struct rte_eth_dev_info *dev_info)
1268 {
1269 	struct qede_dev *qdev = eth_dev->data->dev_private;
1270 	struct ecore_dev *edev = &qdev->edev;
1271 	struct qed_link_output link;
1272 	uint32_t speed_cap = 0;
1273 
1274 	PMD_INIT_FUNC_TRACE(edev);
1275 
1276 	dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1277 	dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
1278 	dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
1279 	dev_info->rx_desc_lim = qede_rx_desc_lim;
1280 	dev_info->tx_desc_lim = qede_tx_desc_lim;
1281 
1282 	if (IS_PF(edev))
1283 		dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1284 			QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
1285 	else
1286 		dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1287 			QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
1288 	dev_info->max_tx_queues = dev_info->max_rx_queues;
1289 
1290 	dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
1291 	dev_info->max_vfs = 0;
1292 	dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
1293 	dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1294 	dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
1295 
1296 	dev_info->default_txconf = (struct rte_eth_txconf) {
1297 		.txq_flags = QEDE_TXQ_FLAGS,
1298 	};
1299 
1300 	dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP	|
1301 				     DEV_RX_OFFLOAD_IPV4_CKSUM	|
1302 				     DEV_RX_OFFLOAD_UDP_CKSUM	|
1303 				     DEV_RX_OFFLOAD_TCP_CKSUM	|
1304 				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1305 				     DEV_RX_OFFLOAD_TCP_LRO);
1306 
1307 	dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT	|
1308 				     DEV_TX_OFFLOAD_IPV4_CKSUM	|
1309 				     DEV_TX_OFFLOAD_UDP_CKSUM	|
1310 				     DEV_TX_OFFLOAD_TCP_CKSUM	|
1311 				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1312 				     DEV_TX_OFFLOAD_TCP_TSO |
1313 				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO);
1314 
1315 	memset(&link, 0, sizeof(struct qed_link_output));
1316 	qdev->ops->common->get_link(edev, &link);
1317 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1318 		speed_cap |= ETH_LINK_SPEED_1G;
1319 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1320 		speed_cap |= ETH_LINK_SPEED_10G;
1321 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1322 		speed_cap |= ETH_LINK_SPEED_25G;
1323 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1324 		speed_cap |= ETH_LINK_SPEED_40G;
1325 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1326 		speed_cap |= ETH_LINK_SPEED_50G;
1327 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1328 		speed_cap |= ETH_LINK_SPEED_100G;
1329 	dev_info->speed_capa = speed_cap;
1330 }
1331 
1332 /* return 0 means link status changed, -1 means not changed */
1333 static int
1334 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1335 {
1336 	struct qede_dev *qdev = eth_dev->data->dev_private;
1337 	struct ecore_dev *edev = &qdev->edev;
1338 	uint16_t link_duplex;
1339 	struct qed_link_output link;
1340 	struct rte_eth_link *curr = &eth_dev->data->dev_link;
1341 
1342 	memset(&link, 0, sizeof(struct qed_link_output));
1343 	qdev->ops->common->get_link(edev, &link);
1344 
1345 	/* Link Speed */
1346 	curr->link_speed = link.speed;
1347 
1348 	/* Link Mode */
1349 	switch (link.duplex) {
1350 	case QEDE_DUPLEX_HALF:
1351 		link_duplex = ETH_LINK_HALF_DUPLEX;
1352 		break;
1353 	case QEDE_DUPLEX_FULL:
1354 		link_duplex = ETH_LINK_FULL_DUPLEX;
1355 		break;
1356 	case QEDE_DUPLEX_UNKNOWN:
1357 	default:
1358 		link_duplex = -1;
1359 	}
1360 	curr->link_duplex = link_duplex;
1361 
1362 	/* Link Status */
1363 	curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
1364 
1365 	/* AN */
1366 	curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1367 			     ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1368 
1369 	DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1370 		curr->link_speed, curr->link_duplex,
1371 		curr->link_autoneg, curr->link_status);
1372 
1373 	/* return 0 means link status changed, -1 means not changed */
1374 	return ((curr->link_status == link.link_up) ? -1 : 0);
1375 }
1376 
1377 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1378 {
1379 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1380 	struct qede_dev *qdev = eth_dev->data->dev_private;
1381 	struct ecore_dev *edev = &qdev->edev;
1382 
1383 	PMD_INIT_FUNC_TRACE(edev);
1384 #endif
1385 
1386 	enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1387 
1388 	if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1389 		type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1390 
1391 	qed_configure_filter_rx_mode(eth_dev, type);
1392 }
1393 
1394 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1395 {
1396 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1397 	struct qede_dev *qdev = eth_dev->data->dev_private;
1398 	struct ecore_dev *edev = &qdev->edev;
1399 
1400 	PMD_INIT_FUNC_TRACE(edev);
1401 #endif
1402 
1403 	if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1404 		qed_configure_filter_rx_mode(eth_dev,
1405 				QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1406 	else
1407 		qed_configure_filter_rx_mode(eth_dev,
1408 				QED_FILTER_RX_MODE_TYPE_REGULAR);
1409 }
1410 
1411 static void qede_poll_sp_sb_cb(void *param)
1412 {
1413 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1414 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1415 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1416 	int rc;
1417 
1418 	qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1419 	qede_interrupt_action(&edev->hwfns[1]);
1420 
1421 	rc = rte_eal_alarm_set(timer_period * US_PER_S,
1422 			       qede_poll_sp_sb_cb,
1423 			       (void *)eth_dev);
1424 	if (rc != 0) {
1425 		DP_ERR(edev, "Unable to start periodic"
1426 			     " timer rc %d\n", rc);
1427 		assert(false && "Unable to start periodic timer");
1428 	}
1429 }
1430 
1431 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1432 {
1433 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1434 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1435 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1436 
1437 	PMD_INIT_FUNC_TRACE(edev);
1438 
1439 	/* dev_stop() shall cleanup fp resources in hw but without releasing
1440 	 * dma memories and sw structures so that dev_start() can be called
1441 	 * by the app without reconfiguration. However, in dev_close() we
1442 	 * can release all the resources and device can be brought up newly
1443 	 */
1444 	if (eth_dev->data->dev_started)
1445 		qede_dev_stop(eth_dev);
1446 
1447 	qede_stop_vport(edev);
1448 	qede_fdir_dealloc_resc(eth_dev);
1449 	qede_dealloc_fp_resc(eth_dev);
1450 
1451 	eth_dev->data->nb_rx_queues = 0;
1452 	eth_dev->data->nb_tx_queues = 0;
1453 
1454 	qdev->ops->common->slowpath_stop(edev);
1455 	qdev->ops->common->remove(edev);
1456 	rte_intr_disable(&pci_dev->intr_handle);
1457 	rte_intr_callback_unregister(&pci_dev->intr_handle,
1458 				     qede_interrupt_handler, (void *)eth_dev);
1459 	if (ECORE_IS_CMT(edev))
1460 		rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1461 }
1462 
1463 static int
1464 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1465 {
1466 	struct qede_dev *qdev = eth_dev->data->dev_private;
1467 	struct ecore_dev *edev = &qdev->edev;
1468 	struct ecore_eth_stats stats;
1469 	unsigned int i = 0, j = 0, qid;
1470 	unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1471 	struct qede_tx_queue *txq;
1472 
1473 	ecore_get_vport_stats(edev, &stats);
1474 
1475 	/* RX Stats */
1476 	eth_stats->ipackets = stats.common.rx_ucast_pkts +
1477 	    stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
1478 
1479 	eth_stats->ibytes = stats.common.rx_ucast_bytes +
1480 	    stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
1481 
1482 	eth_stats->ierrors = stats.common.rx_crc_errors +
1483 	    stats.common.rx_align_errors +
1484 	    stats.common.rx_carrier_errors +
1485 	    stats.common.rx_oversize_packets +
1486 	    stats.common.rx_jabbers + stats.common.rx_undersize_packets;
1487 
1488 	eth_stats->rx_nombuf = stats.common.no_buff_discards;
1489 
1490 	eth_stats->imissed = stats.common.mftag_filter_discards +
1491 	    stats.common.mac_filter_discards +
1492 	    stats.common.no_buff_discards +
1493 	    stats.common.brb_truncates + stats.common.brb_discards;
1494 
1495 	/* TX stats */
1496 	eth_stats->opackets = stats.common.tx_ucast_pkts +
1497 	    stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
1498 
1499 	eth_stats->obytes = stats.common.tx_ucast_bytes +
1500 	    stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
1501 
1502 	eth_stats->oerrors = stats.common.tx_err_drop_pkts;
1503 
1504 	/* Queue stats */
1505 	rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1506 			       RTE_ETHDEV_QUEUE_STAT_CNTRS);
1507 	txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
1508 			       RTE_ETHDEV_QUEUE_STAT_CNTRS);
1509 	if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
1510 	    (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
1511 		DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1512 		       "Not all the queue stats will be displayed. Set"
1513 		       " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1514 		       " appropriately and retry.\n");
1515 
1516 	for_each_rss(qid) {
1517 		eth_stats->q_ipackets[i] =
1518 			*(uint64_t *)(
1519 				((char *)(qdev->fp_array[qid].rxq)) +
1520 				offsetof(struct qede_rx_queue,
1521 				rcv_pkts));
1522 		eth_stats->q_errors[i] =
1523 			*(uint64_t *)(
1524 				((char *)(qdev->fp_array[qid].rxq)) +
1525 				offsetof(struct qede_rx_queue,
1526 				rx_hw_errors)) +
1527 			*(uint64_t *)(
1528 				((char *)(qdev->fp_array[qid].rxq)) +
1529 				offsetof(struct qede_rx_queue,
1530 				rx_alloc_errors));
1531 		i++;
1532 		if (i == rxq_stat_cntrs)
1533 			break;
1534 	}
1535 
1536 	for_each_tss(qid) {
1537 		txq = qdev->fp_array[qid].txq;
1538 		eth_stats->q_opackets[j] =
1539 			*((uint64_t *)(uintptr_t)
1540 				(((uint64_t)(uintptr_t)(txq)) +
1541 				 offsetof(struct qede_tx_queue,
1542 					  xmit_pkts)));
1543 		j++;
1544 		if (j == txq_stat_cntrs)
1545 			break;
1546 	}
1547 
1548 	return 0;
1549 }
1550 
1551 static unsigned
1552 qede_get_xstats_count(struct qede_dev *qdev) {
1553 	if (ECORE_IS_BB(&qdev->edev))
1554 		return RTE_DIM(qede_xstats_strings) +
1555 		       RTE_DIM(qede_bb_xstats_strings) +
1556 		       (RTE_DIM(qede_rxq_xstats_strings) *
1557 			RTE_MIN(QEDE_RSS_COUNT(qdev),
1558 				RTE_ETHDEV_QUEUE_STAT_CNTRS));
1559 	else
1560 		return RTE_DIM(qede_xstats_strings) +
1561 		       RTE_DIM(qede_ah_xstats_strings) +
1562 		       (RTE_DIM(qede_rxq_xstats_strings) *
1563 			RTE_MIN(QEDE_RSS_COUNT(qdev),
1564 				RTE_ETHDEV_QUEUE_STAT_CNTRS));
1565 }
1566 
1567 static int
1568 qede_get_xstats_names(struct rte_eth_dev *dev,
1569 		      struct rte_eth_xstat_name *xstats_names,
1570 		      __rte_unused unsigned int limit)
1571 {
1572 	struct qede_dev *qdev = dev->data->dev_private;
1573 	struct ecore_dev *edev = &qdev->edev;
1574 	const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1575 	unsigned int i, qid, stat_idx = 0;
1576 	unsigned int rxq_stat_cntrs;
1577 
1578 	if (xstats_names != NULL) {
1579 		for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1580 			snprintf(xstats_names[stat_idx].name,
1581 				sizeof(xstats_names[stat_idx].name),
1582 				"%s",
1583 				qede_xstats_strings[i].name);
1584 			stat_idx++;
1585 		}
1586 
1587 		if (ECORE_IS_BB(edev)) {
1588 			for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1589 				snprintf(xstats_names[stat_idx].name,
1590 					sizeof(xstats_names[stat_idx].name),
1591 					"%s",
1592 					qede_bb_xstats_strings[i].name);
1593 				stat_idx++;
1594 			}
1595 		} else {
1596 			for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1597 				snprintf(xstats_names[stat_idx].name,
1598 					sizeof(xstats_names[stat_idx].name),
1599 					"%s",
1600 					qede_ah_xstats_strings[i].name);
1601 				stat_idx++;
1602 			}
1603 		}
1604 
1605 		rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1606 					 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1607 		for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1608 			for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1609 				snprintf(xstats_names[stat_idx].name,
1610 					sizeof(xstats_names[stat_idx].name),
1611 					"%.4s%d%s",
1612 					qede_rxq_xstats_strings[i].name, qid,
1613 					qede_rxq_xstats_strings[i].name + 4);
1614 				stat_idx++;
1615 			}
1616 		}
1617 	}
1618 
1619 	return stat_cnt;
1620 }
1621 
1622 static int
1623 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1624 		unsigned int n)
1625 {
1626 	struct qede_dev *qdev = dev->data->dev_private;
1627 	struct ecore_dev *edev = &qdev->edev;
1628 	struct ecore_eth_stats stats;
1629 	const unsigned int num = qede_get_xstats_count(qdev);
1630 	unsigned int i, qid, stat_idx = 0;
1631 	unsigned int rxq_stat_cntrs;
1632 
1633 	if (n < num)
1634 		return num;
1635 
1636 	ecore_get_vport_stats(edev, &stats);
1637 
1638 	for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1639 		xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1640 					     qede_xstats_strings[i].offset);
1641 		xstats[stat_idx].id = stat_idx;
1642 		stat_idx++;
1643 	}
1644 
1645 	if (ECORE_IS_BB(edev)) {
1646 		for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1647 			xstats[stat_idx].value =
1648 					*(uint64_t *)(((char *)&stats) +
1649 					qede_bb_xstats_strings[i].offset);
1650 			xstats[stat_idx].id = stat_idx;
1651 			stat_idx++;
1652 		}
1653 	} else {
1654 		for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1655 			xstats[stat_idx].value =
1656 					*(uint64_t *)(((char *)&stats) +
1657 					qede_ah_xstats_strings[i].offset);
1658 			xstats[stat_idx].id = stat_idx;
1659 			stat_idx++;
1660 		}
1661 	}
1662 
1663 	rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1664 				 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1665 	for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1666 		for_each_rss(qid) {
1667 			for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1668 				xstats[stat_idx].value = *(uint64_t *)(
1669 					((char *)(qdev->fp_array[qid].rxq)) +
1670 					 qede_rxq_xstats_strings[i].offset);
1671 				xstats[stat_idx].id = stat_idx;
1672 				stat_idx++;
1673 			}
1674 		}
1675 	}
1676 
1677 	return stat_idx;
1678 }
1679 
1680 static void
1681 qede_reset_xstats(struct rte_eth_dev *dev)
1682 {
1683 	struct qede_dev *qdev = dev->data->dev_private;
1684 	struct ecore_dev *edev = &qdev->edev;
1685 
1686 	ecore_reset_vport_stats(edev);
1687 }
1688 
1689 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1690 {
1691 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1692 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1693 	struct qed_link_params link_params;
1694 	int rc;
1695 
1696 	DP_INFO(edev, "setting link state %d\n", link_up);
1697 	memset(&link_params, 0, sizeof(link_params));
1698 	link_params.link_up = link_up;
1699 	rc = qdev->ops->common->set_link(edev, &link_params);
1700 	if (rc != ECORE_SUCCESS)
1701 		DP_ERR(edev, "Unable to set link state %d\n", link_up);
1702 
1703 	return rc;
1704 }
1705 
1706 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1707 {
1708 	return qede_dev_set_link_state(eth_dev, true);
1709 }
1710 
1711 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1712 {
1713 	return qede_dev_set_link_state(eth_dev, false);
1714 }
1715 
1716 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
1717 {
1718 	struct qede_dev *qdev = eth_dev->data->dev_private;
1719 	struct ecore_dev *edev = &qdev->edev;
1720 
1721 	ecore_reset_vport_stats(edev);
1722 }
1723 
1724 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1725 {
1726 	enum qed_filter_rx_mode_type type =
1727 	    QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1728 
1729 	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1730 		type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1731 
1732 	qed_configure_filter_rx_mode(eth_dev, type);
1733 }
1734 
1735 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1736 {
1737 	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1738 		qed_configure_filter_rx_mode(eth_dev,
1739 				QED_FILTER_RX_MODE_TYPE_PROMISC);
1740 	else
1741 		qed_configure_filter_rx_mode(eth_dev,
1742 				QED_FILTER_RX_MODE_TYPE_REGULAR);
1743 }
1744 
1745 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1746 			      struct rte_eth_fc_conf *fc_conf)
1747 {
1748 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1749 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1750 	struct qed_link_output current_link;
1751 	struct qed_link_params params;
1752 
1753 	memset(&current_link, 0, sizeof(current_link));
1754 	qdev->ops->common->get_link(edev, &current_link);
1755 
1756 	memset(&params, 0, sizeof(params));
1757 	params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
1758 	if (fc_conf->autoneg) {
1759 		if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
1760 			DP_ERR(edev, "Autoneg not supported\n");
1761 			return -EINVAL;
1762 		}
1763 		params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1764 	}
1765 
1766 	/* Pause is assumed to be supported (SUPPORTED_Pause) */
1767 	if (fc_conf->mode == RTE_FC_FULL)
1768 		params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
1769 					QED_LINK_PAUSE_RX_ENABLE);
1770 	if (fc_conf->mode == RTE_FC_TX_PAUSE)
1771 		params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1772 	if (fc_conf->mode == RTE_FC_RX_PAUSE)
1773 		params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1774 
1775 	params.link_up = true;
1776 	(void)qdev->ops->common->set_link(edev, &params);
1777 
1778 	return 0;
1779 }
1780 
1781 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1782 			      struct rte_eth_fc_conf *fc_conf)
1783 {
1784 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1785 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1786 	struct qed_link_output current_link;
1787 
1788 	memset(&current_link, 0, sizeof(current_link));
1789 	qdev->ops->common->get_link(edev, &current_link);
1790 
1791 	if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1792 		fc_conf->autoneg = true;
1793 
1794 	if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1795 					 QED_LINK_PAUSE_TX_ENABLE))
1796 		fc_conf->mode = RTE_FC_FULL;
1797 	else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1798 		fc_conf->mode = RTE_FC_RX_PAUSE;
1799 	else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
1800 		fc_conf->mode = RTE_FC_TX_PAUSE;
1801 	else
1802 		fc_conf->mode = RTE_FC_NONE;
1803 
1804 	return 0;
1805 }
1806 
1807 static const uint32_t *
1808 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1809 {
1810 	static const uint32_t ptypes[] = {
1811 		RTE_PTYPE_L2_ETHER,
1812 		RTE_PTYPE_L2_ETHER_VLAN,
1813 		RTE_PTYPE_L3_IPV4,
1814 		RTE_PTYPE_L3_IPV6,
1815 		RTE_PTYPE_L4_TCP,
1816 		RTE_PTYPE_L4_UDP,
1817 		RTE_PTYPE_TUNNEL_VXLAN,
1818 		RTE_PTYPE_L4_FRAG,
1819 		/* Inner */
1820 		RTE_PTYPE_INNER_L2_ETHER,
1821 		RTE_PTYPE_INNER_L2_ETHER_VLAN,
1822 		RTE_PTYPE_INNER_L3_IPV4,
1823 		RTE_PTYPE_INNER_L3_IPV6,
1824 		RTE_PTYPE_INNER_L4_TCP,
1825 		RTE_PTYPE_INNER_L4_UDP,
1826 		RTE_PTYPE_INNER_L4_FRAG,
1827 		RTE_PTYPE_UNKNOWN
1828 	};
1829 
1830 	if (eth_dev->rx_pkt_burst == qede_recv_pkts)
1831 		return ptypes;
1832 
1833 	return NULL;
1834 }
1835 
1836 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
1837 {
1838 	*rss_caps = 0;
1839 	*rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
1840 	*rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
1841 	*rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
1842 	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
1843 	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
1844 	*rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
1845 	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
1846 	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
1847 }
1848 
1849 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
1850 			 struct rte_eth_rss_conf *rss_conf)
1851 {
1852 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1853 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1854 	struct ecore_sp_vport_update_params vport_update_params;
1855 	struct ecore_rss_params rss_params;
1856 	struct ecore_hwfn *p_hwfn;
1857 	uint32_t *key = (uint32_t *)rss_conf->rss_key;
1858 	uint64_t hf = rss_conf->rss_hf;
1859 	uint8_t len = rss_conf->rss_key_len;
1860 	uint8_t idx;
1861 	uint8_t i;
1862 	int rc;
1863 
1864 	memset(&vport_update_params, 0, sizeof(vport_update_params));
1865 	memset(&rss_params, 0, sizeof(rss_params));
1866 
1867 	DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
1868 		(unsigned long)hf, len, key);
1869 
1870 	if (hf != 0) {
1871 		/* Enabling RSS */
1872 		DP_INFO(edev, "Enabling rss\n");
1873 
1874 		/* RSS caps */
1875 		qede_init_rss_caps(&rss_params.rss_caps, hf);
1876 		rss_params.update_rss_capabilities = 1;
1877 
1878 		/* RSS hash key */
1879 		if (key) {
1880 			if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
1881 				DP_ERR(edev, "RSS key length exceeds limit\n");
1882 				return -EINVAL;
1883 			}
1884 			DP_INFO(edev, "Applying user supplied hash key\n");
1885 			rss_params.update_rss_key = 1;
1886 			memcpy(&rss_params.rss_key, key, len);
1887 		}
1888 		rss_params.rss_enable = 1;
1889 	}
1890 
1891 	rss_params.update_rss_config = 1;
1892 	/* tbl_size has to be set with capabilities */
1893 	rss_params.rss_table_size_log = 7;
1894 	vport_update_params.vport_id = 0;
1895 	/* pass the L2 handles instead of qids */
1896 	for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
1897 		idx = qdev->rss_ind_table[i];
1898 		rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
1899 	}
1900 	vport_update_params.rss_params = &rss_params;
1901 
1902 	for_each_hwfn(edev, i) {
1903 		p_hwfn = &edev->hwfns[i];
1904 		vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1905 		rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
1906 					   ECORE_SPQ_MODE_EBLOCK, NULL);
1907 		if (rc) {
1908 			DP_ERR(edev, "vport-update for RSS failed\n");
1909 			return rc;
1910 		}
1911 	}
1912 	qdev->rss_enable = rss_params.rss_enable;
1913 
1914 	/* Update local structure for hash query */
1915 	qdev->rss_conf.rss_hf = hf;
1916 	qdev->rss_conf.rss_key_len = len;
1917 	if (qdev->rss_enable) {
1918 		if  (qdev->rss_conf.rss_key == NULL) {
1919 			qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
1920 			if (qdev->rss_conf.rss_key == NULL) {
1921 				DP_ERR(edev, "No memory to store RSS key\n");
1922 				return -ENOMEM;
1923 			}
1924 		}
1925 		if (key && len) {
1926 			DP_INFO(edev, "Storing RSS key\n");
1927 			memcpy(qdev->rss_conf.rss_key, key, len);
1928 		}
1929 	} else if (!qdev->rss_enable && len == 0) {
1930 		if (qdev->rss_conf.rss_key) {
1931 			free(qdev->rss_conf.rss_key);
1932 			qdev->rss_conf.rss_key = NULL;
1933 			DP_INFO(edev, "Free RSS key\n");
1934 		}
1935 	}
1936 
1937 	return 0;
1938 }
1939 
1940 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
1941 			   struct rte_eth_rss_conf *rss_conf)
1942 {
1943 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1944 
1945 	rss_conf->rss_hf = qdev->rss_conf.rss_hf;
1946 	rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
1947 
1948 	if (rss_conf->rss_key && qdev->rss_conf.rss_key)
1949 		memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
1950 		       rss_conf->rss_key_len);
1951 	return 0;
1952 }
1953 
1954 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
1955 				    struct ecore_rss_params *rss)
1956 {
1957 	int i, fn;
1958 	bool rss_mode = 1; /* enable */
1959 	struct ecore_queue_cid *cid;
1960 	struct ecore_rss_params *t_rss;
1961 
1962 	/* In regular scenario, we'd simply need to take input handlers.
1963 	 * But in CMT, we'd have to split the handlers according to the
1964 	 * engine they were configured on. We'd then have to understand
1965 	 * whether RSS is really required, since 2-queues on CMT doesn't
1966 	 * require RSS.
1967 	 */
1968 
1969 	/* CMT should be round-robin */
1970 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1971 		cid = rss->rss_ind_table[i];
1972 
1973 		if (cid->p_owner == ECORE_LEADING_HWFN(edev))
1974 			t_rss = &rss[0];
1975 		else
1976 			t_rss = &rss[1];
1977 
1978 		t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
1979 	}
1980 
1981 	t_rss = &rss[1];
1982 	t_rss->update_rss_ind_table = 1;
1983 	t_rss->rss_table_size_log = 7;
1984 	t_rss->update_rss_config = 1;
1985 
1986 	/* Make sure RSS is actually required */
1987 	for_each_hwfn(edev, fn) {
1988 		for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
1989 		     i++) {
1990 			if (rss[fn].rss_ind_table[i] !=
1991 			    rss[fn].rss_ind_table[0])
1992 				break;
1993 		}
1994 
1995 		if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
1996 			DP_INFO(edev,
1997 				"CMT - 1 queue per-hwfn; Disabling RSS\n");
1998 			rss_mode = 0;
1999 			goto out;
2000 		}
2001 	}
2002 
2003 out:
2004 	t_rss->rss_enable = rss_mode;
2005 
2006 	return rss_mode;
2007 }
2008 
2009 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
2010 			 struct rte_eth_rss_reta_entry64 *reta_conf,
2011 			 uint16_t reta_size)
2012 {
2013 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2014 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2015 	struct ecore_sp_vport_update_params vport_update_params;
2016 	struct ecore_rss_params *params;
2017 	struct ecore_hwfn *p_hwfn;
2018 	uint16_t i, idx, shift;
2019 	uint8_t entry;
2020 	int rc = 0;
2021 
2022 	if (reta_size > ETH_RSS_RETA_SIZE_128) {
2023 		DP_ERR(edev, "reta_size %d is not supported by hardware\n",
2024 		       reta_size);
2025 		return -EINVAL;
2026 	}
2027 
2028 	memset(&vport_update_params, 0, sizeof(vport_update_params));
2029 	params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
2030 			     RTE_CACHE_LINE_SIZE);
2031 	if (params == NULL) {
2032 		DP_ERR(edev, "failed to allocate memory\n");
2033 		return -ENOMEM;
2034 	}
2035 
2036 	for (i = 0; i < reta_size; i++) {
2037 		idx = i / RTE_RETA_GROUP_SIZE;
2038 		shift = i % RTE_RETA_GROUP_SIZE;
2039 		if (reta_conf[idx].mask & (1ULL << shift)) {
2040 			entry = reta_conf[idx].reta[shift];
2041 			/* Pass rxq handles to ecore */
2042 			params->rss_ind_table[i] =
2043 					qdev->fp_array[entry].rxq->handle;
2044 			/* Update the local copy for RETA query command */
2045 			qdev->rss_ind_table[i] = entry;
2046 		}
2047 	}
2048 
2049 	params->update_rss_ind_table = 1;
2050 	params->rss_table_size_log = 7;
2051 	params->update_rss_config = 1;
2052 
2053 	/* Fix up RETA for CMT mode device */
2054 	if (ECORE_IS_CMT(edev))
2055 		qdev->rss_enable = qede_update_rss_parm_cmt(edev,
2056 							    params);
2057 	vport_update_params.vport_id = 0;
2058 	/* Use the current value of rss_enable */
2059 	params->rss_enable = qdev->rss_enable;
2060 	vport_update_params.rss_params = params;
2061 
2062 	for_each_hwfn(edev, i) {
2063 		p_hwfn = &edev->hwfns[i];
2064 		vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2065 		rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2066 					   ECORE_SPQ_MODE_EBLOCK, NULL);
2067 		if (rc) {
2068 			DP_ERR(edev, "vport-update for RSS failed\n");
2069 			goto out;
2070 		}
2071 	}
2072 
2073 out:
2074 	rte_free(params);
2075 	return rc;
2076 }
2077 
2078 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
2079 			       struct rte_eth_rss_reta_entry64 *reta_conf,
2080 			       uint16_t reta_size)
2081 {
2082 	struct qede_dev *qdev = eth_dev->data->dev_private;
2083 	struct ecore_dev *edev = &qdev->edev;
2084 	uint16_t i, idx, shift;
2085 	uint8_t entry;
2086 
2087 	if (reta_size > ETH_RSS_RETA_SIZE_128) {
2088 		DP_ERR(edev, "reta_size %d is not supported\n",
2089 		       reta_size);
2090 		return -EINVAL;
2091 	}
2092 
2093 	for (i = 0; i < reta_size; i++) {
2094 		idx = i / RTE_RETA_GROUP_SIZE;
2095 		shift = i % RTE_RETA_GROUP_SIZE;
2096 		if (reta_conf[idx].mask & (1ULL << shift)) {
2097 			entry = qdev->rss_ind_table[i];
2098 			reta_conf[idx].reta[shift] = entry;
2099 		}
2100 	}
2101 
2102 	return 0;
2103 }
2104 
2105 
2106 
2107 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
2108 {
2109 	struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
2110 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2111 	struct rte_eth_dev_info dev_info = {0};
2112 	struct qede_fastpath *fp;
2113 	uint32_t frame_size;
2114 	uint16_t rx_buf_size;
2115 	uint16_t bufsz;
2116 	int i;
2117 
2118 	PMD_INIT_FUNC_TRACE(edev);
2119 	qede_dev_info_get(dev, &dev_info);
2120 	frame_size = mtu + QEDE_ETH_OVERHEAD;
2121 	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
2122 		DP_ERR(edev, "MTU %u out of range\n", mtu);
2123 		return -EINVAL;
2124 	}
2125 	if (!dev->data->scattered_rx &&
2126 	    frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
2127 		DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
2128 			dev->data->min_rx_buf_size);
2129 		return -EINVAL;
2130 	}
2131 	/* Temporarily replace I/O functions with dummy ones. It cannot
2132 	 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
2133 	 */
2134 	dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
2135 	dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
2136 	qede_dev_stop(dev);
2137 	rte_delay_ms(1000);
2138 	qdev->mtu = mtu;
2139 	/* Fix up RX buf size for all queues of the port */
2140 	for_each_rss(i) {
2141 		fp = &qdev->fp_array[i];
2142 		bufsz = (uint16_t)rte_pktmbuf_data_room_size(
2143 			fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
2144 		if (dev->data->scattered_rx)
2145 			rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
2146 		else
2147 			rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
2148 		rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
2149 		fp->rxq->rx_buf_size = rx_buf_size;
2150 		DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
2151 	}
2152 	qede_dev_start(dev);
2153 	if (frame_size > ETHER_MAX_LEN)
2154 		dev->data->dev_conf.rxmode.jumbo_frame = 1;
2155 	else
2156 		dev->data->dev_conf.rxmode.jumbo_frame = 0;
2157 	/* update max frame size */
2158 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2159 	/* Reassign back */
2160 	dev->rx_pkt_burst = qede_recv_pkts;
2161 	dev->tx_pkt_burst = qede_xmit_pkts;
2162 
2163 	return 0;
2164 }
2165 
2166 static int
2167 qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
2168 		       struct rte_eth_udp_tunnel *tunnel_udp,
2169 		       bool add)
2170 {
2171 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2172 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2173 	struct ecore_tunnel_info tunn; /* @DPDK */
2174 	struct ecore_hwfn *p_hwfn;
2175 	int rc, i;
2176 
2177 	PMD_INIT_FUNC_TRACE(edev);
2178 
2179 	memset(&tunn, 0, sizeof(tunn));
2180 	if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
2181 		tunn.vxlan_port.b_update_port = true;
2182 		tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port :
2183 						  QEDE_VXLAN_DEF_PORT;
2184 		for_each_hwfn(edev, i) {
2185 			p_hwfn = &edev->hwfns[i];
2186 			struct ecore_ptt *p_ptt = IS_PF(edev) ?
2187 			       ecore_ptt_acquire(p_hwfn) : NULL;
2188 			rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2189 						ECORE_SPQ_MODE_CB, NULL);
2190 			if (rc != ECORE_SUCCESS) {
2191 				DP_ERR(edev, "Unable to config UDP port %u\n",
2192 				       tunn.vxlan_port.port);
2193 				if (IS_PF(edev))
2194 					ecore_ptt_release(p_hwfn, p_ptt);
2195 				return rc;
2196 			}
2197 		}
2198 	}
2199 
2200 	return 0;
2201 }
2202 
2203 static int
2204 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
2205 		      struct rte_eth_udp_tunnel *tunnel_udp)
2206 {
2207 	return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false);
2208 }
2209 
2210 static int
2211 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
2212 		      struct rte_eth_udp_tunnel *tunnel_udp)
2213 {
2214 	return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true);
2215 }
2216 
2217 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
2218 				       uint32_t *clss, char *str)
2219 {
2220 	uint16_t j;
2221 	*clss = MAX_ECORE_TUNN_CLSS;
2222 
2223 	for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
2224 		if (filter == qede_tunn_types[j].rte_filter_type) {
2225 			*type = qede_tunn_types[j].qede_type;
2226 			*clss = qede_tunn_types[j].qede_tunn_clss;
2227 			strcpy(str, qede_tunn_types[j].string);
2228 			return;
2229 		}
2230 	}
2231 }
2232 
2233 static int
2234 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
2235 			      const struct rte_eth_tunnel_filter_conf *conf,
2236 			      uint32_t type)
2237 {
2238 	/* Init commmon ucast params first */
2239 	qede_set_ucast_cmn_params(ucast);
2240 
2241 	/* Copy out the required fields based on classification type */
2242 	ucast->type = type;
2243 
2244 	switch (type) {
2245 	case ECORE_FILTER_VNI:
2246 		ucast->vni = conf->tenant_id;
2247 	break;
2248 	case ECORE_FILTER_INNER_VLAN:
2249 		ucast->vlan = conf->inner_vlan;
2250 	break;
2251 	case ECORE_FILTER_MAC:
2252 		memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2253 		       ETHER_ADDR_LEN);
2254 	break;
2255 	case ECORE_FILTER_INNER_MAC:
2256 		memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2257 		       ETHER_ADDR_LEN);
2258 	break;
2259 	case ECORE_FILTER_MAC_VNI_PAIR:
2260 		memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2261 			ETHER_ADDR_LEN);
2262 		ucast->vni = conf->tenant_id;
2263 	break;
2264 	case ECORE_FILTER_INNER_MAC_VNI_PAIR:
2265 		memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2266 			ETHER_ADDR_LEN);
2267 		ucast->vni = conf->tenant_id;
2268 	break;
2269 	case ECORE_FILTER_INNER_PAIR:
2270 		memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2271 			ETHER_ADDR_LEN);
2272 		ucast->vlan = conf->inner_vlan;
2273 	break;
2274 	default:
2275 		return -EINVAL;
2276 	}
2277 
2278 	return ECORE_SUCCESS;
2279 }
2280 
2281 static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
2282 				  enum rte_filter_op filter_op,
2283 				  const struct rte_eth_tunnel_filter_conf *conf)
2284 {
2285 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2286 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2287 	struct ecore_tunnel_info tunn;
2288 	struct ecore_hwfn *p_hwfn;
2289 	enum ecore_filter_ucast_type type;
2290 	enum ecore_tunn_clss clss;
2291 	struct ecore_filter_ucast ucast;
2292 	char str[80];
2293 	uint16_t filter_type;
2294 	int rc, i;
2295 
2296 	PMD_INIT_FUNC_TRACE(edev);
2297 
2298 	filter_type = conf->filter_type | qdev->vxlan_filter_type;
2299 	/* First determine if the given filter classification is supported */
2300 	qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
2301 	if (clss == MAX_ECORE_TUNN_CLSS) {
2302 		DP_ERR(edev, "Wrong filter type\n");
2303 		return -EINVAL;
2304 	}
2305 	/* Init tunnel ucast params */
2306 	rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
2307 	if (rc != ECORE_SUCCESS) {
2308 		DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
2309 				conf->filter_type);
2310 		return rc;
2311 	}
2312 	DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
2313 		str, filter_op, ucast.type);
2314 	switch (filter_op) {
2315 	case RTE_ETH_FILTER_ADD:
2316 		ucast.opcode = ECORE_FILTER_ADD;
2317 
2318 		/* Skip MAC/VLAN if filter is based on VNI */
2319 		if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
2320 			rc = qede_mac_int_ops(eth_dev, &ucast, 1);
2321 			if (rc == 0) {
2322 				/* Enable accept anyvlan */
2323 				qede_config_accept_any_vlan(qdev, true);
2324 			}
2325 		} else {
2326 			rc = qede_ucast_filter(eth_dev, &ucast, 1);
2327 			if (rc == 0)
2328 				rc = ecore_filter_ucast_cmd(edev, &ucast,
2329 						    ECORE_SPQ_MODE_CB, NULL);
2330 		}
2331 
2332 		if (rc != ECORE_SUCCESS)
2333 			return rc;
2334 
2335 		qdev->vxlan_filter_type = filter_type;
2336 
2337 		DP_INFO(edev, "Enabling VXLAN tunneling\n");
2338 		qede_set_cmn_tunn_param(&tunn, clss, true, true);
2339 		for_each_hwfn(edev, i) {
2340 			p_hwfn = &edev->hwfns[i];
2341 			struct ecore_ptt *p_ptt = IS_PF(edev) ?
2342 			       ecore_ptt_acquire(p_hwfn) : NULL;
2343 			rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
2344 				&tunn, ECORE_SPQ_MODE_CB, NULL);
2345 			if (rc != ECORE_SUCCESS) {
2346 				DP_ERR(edev, "Failed to update tunn_clss %u\n",
2347 				       tunn.vxlan.tun_cls);
2348 				if (IS_PF(edev))
2349 					ecore_ptt_release(p_hwfn, p_ptt);
2350 			}
2351 		}
2352 		qdev->num_tunn_filters++; /* Filter added successfully */
2353 	break;
2354 	case RTE_ETH_FILTER_DELETE:
2355 		ucast.opcode = ECORE_FILTER_REMOVE;
2356 
2357 		if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
2358 			rc = qede_mac_int_ops(eth_dev, &ucast, 0);
2359 		} else {
2360 			rc = qede_ucast_filter(eth_dev, &ucast, 0);
2361 			if (rc == 0)
2362 				rc = ecore_filter_ucast_cmd(edev, &ucast,
2363 						    ECORE_SPQ_MODE_CB, NULL);
2364 		}
2365 		if (rc != ECORE_SUCCESS)
2366 			return rc;
2367 
2368 		qdev->vxlan_filter_type = filter_type;
2369 		qdev->num_tunn_filters--;
2370 
2371 		/* Disable VXLAN if VXLAN filters become 0 */
2372 		if (qdev->num_tunn_filters == 0) {
2373 			DP_INFO(edev, "Disabling VXLAN tunneling\n");
2374 
2375 			/* Use 0 as tunnel mode */
2376 			qede_set_cmn_tunn_param(&tunn, clss, false, true);
2377 			for_each_hwfn(edev, i) {
2378 				p_hwfn = &edev->hwfns[i];
2379 				struct ecore_ptt *p_ptt = IS_PF(edev) ?
2380 				       ecore_ptt_acquire(p_hwfn) : NULL;
2381 				rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
2382 					&tunn, ECORE_SPQ_MODE_CB, NULL);
2383 				if (rc != ECORE_SUCCESS) {
2384 					DP_ERR(edev,
2385 						"Failed to update tunn_clss %u\n",
2386 						tunn.vxlan.tun_cls);
2387 					if (IS_PF(edev))
2388 						ecore_ptt_release(p_hwfn,
2389 								  p_ptt);
2390 					break;
2391 				}
2392 			}
2393 		}
2394 	break;
2395 	default:
2396 		DP_ERR(edev, "Unsupported operation %d\n", filter_op);
2397 		return -EINVAL;
2398 	}
2399 	DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters);
2400 
2401 	return 0;
2402 }
2403 
2404 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
2405 			 enum rte_filter_type filter_type,
2406 			 enum rte_filter_op filter_op,
2407 			 void *arg)
2408 {
2409 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2410 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2411 	struct rte_eth_tunnel_filter_conf *filter_conf =
2412 			(struct rte_eth_tunnel_filter_conf *)arg;
2413 
2414 	switch (filter_type) {
2415 	case RTE_ETH_FILTER_TUNNEL:
2416 		switch (filter_conf->tunnel_type) {
2417 		case RTE_TUNNEL_TYPE_VXLAN:
2418 			DP_INFO(edev,
2419 				"Packet steering to the specified Rx queue"
2420 				" is not supported with VXLAN tunneling");
2421 			return(qede_vxlan_tunn_config(eth_dev, filter_op,
2422 						      filter_conf));
2423 		/* Place holders for future tunneling support */
2424 		case RTE_TUNNEL_TYPE_GENEVE:
2425 		case RTE_TUNNEL_TYPE_TEREDO:
2426 		case RTE_TUNNEL_TYPE_NVGRE:
2427 		case RTE_TUNNEL_TYPE_IP_IN_GRE:
2428 		case RTE_L2_TUNNEL_TYPE_E_TAG:
2429 			DP_ERR(edev, "Unsupported tunnel type %d\n",
2430 				filter_conf->tunnel_type);
2431 			return -EINVAL;
2432 		case RTE_TUNNEL_TYPE_NONE:
2433 		default:
2434 			return 0;
2435 		}
2436 		break;
2437 	case RTE_ETH_FILTER_FDIR:
2438 		return qede_fdir_filter_conf(eth_dev, filter_op, arg);
2439 	case RTE_ETH_FILTER_NTUPLE:
2440 		return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
2441 	case RTE_ETH_FILTER_MACVLAN:
2442 	case RTE_ETH_FILTER_ETHERTYPE:
2443 	case RTE_ETH_FILTER_FLEXIBLE:
2444 	case RTE_ETH_FILTER_SYN:
2445 	case RTE_ETH_FILTER_HASH:
2446 	case RTE_ETH_FILTER_L2_TUNNEL:
2447 	case RTE_ETH_FILTER_MAX:
2448 	default:
2449 		DP_ERR(edev, "Unsupported filter type %d\n",
2450 			filter_type);
2451 		return -EINVAL;
2452 	}
2453 
2454 	return 0;
2455 }
2456 
2457 static const struct eth_dev_ops qede_eth_dev_ops = {
2458 	.dev_configure = qede_dev_configure,
2459 	.dev_infos_get = qede_dev_info_get,
2460 	.rx_queue_setup = qede_rx_queue_setup,
2461 	.rx_queue_release = qede_rx_queue_release,
2462 	.tx_queue_setup = qede_tx_queue_setup,
2463 	.tx_queue_release = qede_tx_queue_release,
2464 	.dev_start = qede_dev_start,
2465 	.dev_set_link_up = qede_dev_set_link_up,
2466 	.dev_set_link_down = qede_dev_set_link_down,
2467 	.link_update = qede_link_update,
2468 	.promiscuous_enable = qede_promiscuous_enable,
2469 	.promiscuous_disable = qede_promiscuous_disable,
2470 	.allmulticast_enable = qede_allmulticast_enable,
2471 	.allmulticast_disable = qede_allmulticast_disable,
2472 	.dev_stop = qede_dev_stop,
2473 	.dev_close = qede_dev_close,
2474 	.stats_get = qede_get_stats,
2475 	.stats_reset = qede_reset_stats,
2476 	.xstats_get = qede_get_xstats,
2477 	.xstats_reset = qede_reset_xstats,
2478 	.xstats_get_names = qede_get_xstats_names,
2479 	.mac_addr_add = qede_mac_addr_add,
2480 	.mac_addr_remove = qede_mac_addr_remove,
2481 	.mac_addr_set = qede_mac_addr_set,
2482 	.vlan_offload_set = qede_vlan_offload_set,
2483 	.vlan_filter_set = qede_vlan_filter_set,
2484 	.flow_ctrl_set = qede_flow_ctrl_set,
2485 	.flow_ctrl_get = qede_flow_ctrl_get,
2486 	.dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2487 	.rss_hash_update = qede_rss_hash_update,
2488 	.rss_hash_conf_get = qede_rss_hash_conf_get,
2489 	.reta_update  = qede_rss_reta_update,
2490 	.reta_query  = qede_rss_reta_query,
2491 	.mtu_set = qede_set_mtu,
2492 	.filter_ctrl = qede_dev_filter_ctrl,
2493 	.udp_tunnel_port_add = qede_udp_dst_port_add,
2494 	.udp_tunnel_port_del = qede_udp_dst_port_del,
2495 };
2496 
2497 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
2498 	.dev_configure = qede_dev_configure,
2499 	.dev_infos_get = qede_dev_info_get,
2500 	.rx_queue_setup = qede_rx_queue_setup,
2501 	.rx_queue_release = qede_rx_queue_release,
2502 	.tx_queue_setup = qede_tx_queue_setup,
2503 	.tx_queue_release = qede_tx_queue_release,
2504 	.dev_start = qede_dev_start,
2505 	.dev_set_link_up = qede_dev_set_link_up,
2506 	.dev_set_link_down = qede_dev_set_link_down,
2507 	.link_update = qede_link_update,
2508 	.promiscuous_enable = qede_promiscuous_enable,
2509 	.promiscuous_disable = qede_promiscuous_disable,
2510 	.allmulticast_enable = qede_allmulticast_enable,
2511 	.allmulticast_disable = qede_allmulticast_disable,
2512 	.dev_stop = qede_dev_stop,
2513 	.dev_close = qede_dev_close,
2514 	.stats_get = qede_get_stats,
2515 	.stats_reset = qede_reset_stats,
2516 	.xstats_get = qede_get_xstats,
2517 	.xstats_reset = qede_reset_xstats,
2518 	.xstats_get_names = qede_get_xstats_names,
2519 	.vlan_offload_set = qede_vlan_offload_set,
2520 	.vlan_filter_set = qede_vlan_filter_set,
2521 	.dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2522 	.rss_hash_update = qede_rss_hash_update,
2523 	.rss_hash_conf_get = qede_rss_hash_conf_get,
2524 	.reta_update  = qede_rss_reta_update,
2525 	.reta_query  = qede_rss_reta_query,
2526 	.mtu_set = qede_set_mtu,
2527 };
2528 
2529 static void qede_update_pf_params(struct ecore_dev *edev)
2530 {
2531 	struct ecore_pf_params pf_params;
2532 
2533 	memset(&pf_params, 0, sizeof(struct ecore_pf_params));
2534 	pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
2535 	pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
2536 	qed_ops->common->update_pf_params(edev, &pf_params);
2537 }
2538 
2539 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
2540 {
2541 	struct rte_pci_device *pci_dev;
2542 	struct rte_pci_addr pci_addr;
2543 	struct qede_dev *adapter;
2544 	struct ecore_dev *edev;
2545 	struct qed_dev_eth_info dev_info;
2546 	struct qed_slowpath_params params;
2547 	static bool do_once = true;
2548 	uint8_t bulletin_change;
2549 	uint8_t vf_mac[ETHER_ADDR_LEN];
2550 	uint8_t is_mac_forced;
2551 	bool is_mac_exist;
2552 	/* Fix up ecore debug level */
2553 	uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
2554 	uint8_t dp_level = ECORE_LEVEL_VERBOSE;
2555 	int rc;
2556 
2557 	/* Extract key data structures */
2558 	adapter = eth_dev->data->dev_private;
2559 	edev = &adapter->edev;
2560 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2561 	pci_addr = pci_dev->addr;
2562 
2563 	PMD_INIT_FUNC_TRACE(edev);
2564 
2565 	snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
2566 		 pci_addr.bus, pci_addr.devid, pci_addr.function,
2567 		 eth_dev->data->port_id);
2568 
2569 	eth_dev->rx_pkt_burst = qede_recv_pkts;
2570 	eth_dev->tx_pkt_burst = qede_xmit_pkts;
2571 	eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
2572 
2573 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2574 		DP_ERR(edev, "Skipping device init from secondary process\n");
2575 		return 0;
2576 	}
2577 
2578 	rte_eth_copy_pci_info(eth_dev, pci_dev);
2579 
2580 	/* @DPDK */
2581 	edev->vendor_id = pci_dev->id.vendor_id;
2582 	edev->device_id = pci_dev->id.device_id;
2583 
2584 	qed_ops = qed_get_eth_ops();
2585 	if (!qed_ops) {
2586 		DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
2587 		return -EINVAL;
2588 	}
2589 
2590 	DP_INFO(edev, "Starting qede probe\n");
2591 	rc = qed_ops->common->probe(edev, pci_dev, dp_module,
2592 				    dp_level, is_vf);
2593 	if (rc != 0) {
2594 		DP_ERR(edev, "qede probe failed rc %d\n", rc);
2595 		return -ENODEV;
2596 	}
2597 	qede_update_pf_params(edev);
2598 	rte_intr_callback_register(&pci_dev->intr_handle,
2599 				   qede_interrupt_handler, (void *)eth_dev);
2600 	if (rte_intr_enable(&pci_dev->intr_handle)) {
2601 		DP_ERR(edev, "rte_intr_enable() failed\n");
2602 		return -ENODEV;
2603 	}
2604 
2605 	/* Start the Slowpath-process */
2606 	memset(&params, 0, sizeof(struct qed_slowpath_params));
2607 	params.int_mode = ECORE_INT_MODE_MSIX;
2608 	params.drv_major = QEDE_PMD_VERSION_MAJOR;
2609 	params.drv_minor = QEDE_PMD_VERSION_MINOR;
2610 	params.drv_rev = QEDE_PMD_VERSION_REVISION;
2611 	params.drv_eng = QEDE_PMD_VERSION_PATCH;
2612 	strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
2613 		QEDE_PMD_DRV_VER_STR_SIZE);
2614 
2615 	/* For CMT mode device do periodic polling for slowpath events.
2616 	 * This is required since uio device uses only one MSI-x
2617 	 * interrupt vector but we need one for each engine.
2618 	 */
2619 	if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
2620 		rc = rte_eal_alarm_set(timer_period * US_PER_S,
2621 				       qede_poll_sp_sb_cb,
2622 				       (void *)eth_dev);
2623 		if (rc != 0) {
2624 			DP_ERR(edev, "Unable to start periodic"
2625 				     " timer rc %d\n", rc);
2626 			return -EINVAL;
2627 		}
2628 	}
2629 
2630 	rc = qed_ops->common->slowpath_start(edev, &params);
2631 	if (rc) {
2632 		DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
2633 		rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2634 				     (void *)eth_dev);
2635 		return -ENODEV;
2636 	}
2637 
2638 	rc = qed_ops->fill_dev_info(edev, &dev_info);
2639 	if (rc) {
2640 		DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
2641 		qed_ops->common->slowpath_stop(edev);
2642 		qed_ops->common->remove(edev);
2643 		rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2644 				     (void *)eth_dev);
2645 		return -ENODEV;
2646 	}
2647 
2648 	qede_alloc_etherdev(adapter, &dev_info);
2649 
2650 	adapter->ops->common->set_name(edev, edev->name);
2651 
2652 	if (!is_vf)
2653 		adapter->dev_info.num_mac_filters =
2654 			(uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
2655 					    ECORE_MAC);
2656 	else
2657 		ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
2658 				(uint32_t *)&adapter->dev_info.num_mac_filters);
2659 
2660 	/* Allocate memory for storing MAC addr */
2661 	eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
2662 					(ETHER_ADDR_LEN *
2663 					adapter->dev_info.num_mac_filters),
2664 					RTE_CACHE_LINE_SIZE);
2665 
2666 	if (eth_dev->data->mac_addrs == NULL) {
2667 		DP_ERR(edev, "Failed to allocate MAC address\n");
2668 		qed_ops->common->slowpath_stop(edev);
2669 		qed_ops->common->remove(edev);
2670 		rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2671 				     (void *)eth_dev);
2672 		return -ENOMEM;
2673 	}
2674 
2675 	if (!is_vf) {
2676 		ether_addr_copy((struct ether_addr *)edev->hwfns[0].
2677 				hw_info.hw_mac_addr,
2678 				&eth_dev->data->mac_addrs[0]);
2679 		ether_addr_copy(&eth_dev->data->mac_addrs[0],
2680 				&adapter->primary_mac);
2681 	} else {
2682 		ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
2683 				       &bulletin_change);
2684 		if (bulletin_change) {
2685 			is_mac_exist =
2686 			    ecore_vf_bulletin_get_forced_mac(
2687 						ECORE_LEADING_HWFN(edev),
2688 						vf_mac,
2689 						&is_mac_forced);
2690 			if (is_mac_exist && is_mac_forced) {
2691 				DP_INFO(edev, "VF macaddr received from PF\n");
2692 				ether_addr_copy((struct ether_addr *)&vf_mac,
2693 						&eth_dev->data->mac_addrs[0]);
2694 				ether_addr_copy(&eth_dev->data->mac_addrs[0],
2695 						&adapter->primary_mac);
2696 			} else {
2697 				DP_ERR(edev, "No VF macaddr assigned\n");
2698 			}
2699 		}
2700 	}
2701 
2702 	eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
2703 
2704 	if (do_once) {
2705 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
2706 		qede_print_adapter_info(adapter);
2707 #endif
2708 		do_once = false;
2709 	}
2710 
2711 	adapter->num_tx_queues = 0;
2712 	adapter->num_rx_queues = 0;
2713 	SLIST_INIT(&adapter->fdir_info.fdir_list_head);
2714 	SLIST_INIT(&adapter->vlan_list_head);
2715 	SLIST_INIT(&adapter->uc_list_head);
2716 	adapter->mtu = ETHER_MTU;
2717 	adapter->new_mtu = ETHER_MTU;
2718 	if (!is_vf)
2719 		if (qede_start_vport(adapter, adapter->mtu))
2720 			return -1;
2721 
2722 	DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
2723 		adapter->primary_mac.addr_bytes[0],
2724 		adapter->primary_mac.addr_bytes[1],
2725 		adapter->primary_mac.addr_bytes[2],
2726 		adapter->primary_mac.addr_bytes[3],
2727 		adapter->primary_mac.addr_bytes[4],
2728 		adapter->primary_mac.addr_bytes[5]);
2729 
2730 	DP_INFO(edev, "Device initialized\n");
2731 
2732 	return 0;
2733 }
2734 
2735 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
2736 {
2737 	return qede_common_dev_init(eth_dev, 1);
2738 }
2739 
2740 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
2741 {
2742 	return qede_common_dev_init(eth_dev, 0);
2743 }
2744 
2745 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
2746 {
2747 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
2748 	struct qede_dev *qdev = eth_dev->data->dev_private;
2749 	struct ecore_dev *edev = &qdev->edev;
2750 
2751 	PMD_INIT_FUNC_TRACE(edev);
2752 #endif
2753 
2754 	/* only uninitialize in the primary process */
2755 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2756 		return 0;
2757 
2758 	/* safe to close dev here */
2759 	qede_dev_close(eth_dev);
2760 
2761 	eth_dev->dev_ops = NULL;
2762 	eth_dev->rx_pkt_burst = NULL;
2763 	eth_dev->tx_pkt_burst = NULL;
2764 
2765 	if (eth_dev->data->mac_addrs)
2766 		rte_free(eth_dev->data->mac_addrs);
2767 
2768 	eth_dev->data->mac_addrs = NULL;
2769 
2770 	return 0;
2771 }
2772 
2773 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2774 {
2775 	return qede_dev_common_uninit(eth_dev);
2776 }
2777 
2778 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2779 {
2780 	return qede_dev_common_uninit(eth_dev);
2781 }
2782 
2783 static const struct rte_pci_id pci_id_qedevf_map[] = {
2784 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2785 	{
2786 		QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
2787 	},
2788 	{
2789 		QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
2790 	},
2791 	{
2792 		QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
2793 	},
2794 	{.vendor_id = 0,}
2795 };
2796 
2797 static const struct rte_pci_id pci_id_qede_map[] = {
2798 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2799 	{
2800 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
2801 	},
2802 	{
2803 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
2804 	},
2805 	{
2806 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
2807 	},
2808 	{
2809 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
2810 	},
2811 	{
2812 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
2813 	},
2814 	{
2815 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
2816 	},
2817 	{
2818 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
2819 	},
2820 	{
2821 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
2822 	},
2823 	{
2824 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
2825 	},
2826 	{
2827 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
2828 	},
2829 	{.vendor_id = 0,}
2830 };
2831 
2832 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2833 	struct rte_pci_device *pci_dev)
2834 {
2835 	return rte_eth_dev_pci_generic_probe(pci_dev,
2836 		sizeof(struct qede_dev), qedevf_eth_dev_init);
2837 }
2838 
2839 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2840 {
2841 	return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
2842 }
2843 
2844 static struct rte_pci_driver rte_qedevf_pmd = {
2845 	.id_table = pci_id_qedevf_map,
2846 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2847 	.probe = qedevf_eth_dev_pci_probe,
2848 	.remove = qedevf_eth_dev_pci_remove,
2849 };
2850 
2851 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2852 	struct rte_pci_device *pci_dev)
2853 {
2854 	return rte_eth_dev_pci_generic_probe(pci_dev,
2855 		sizeof(struct qede_dev), qede_eth_dev_init);
2856 }
2857 
2858 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2859 {
2860 	return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
2861 }
2862 
2863 static struct rte_pci_driver rte_qede_pmd = {
2864 	.id_table = pci_id_qede_map,
2865 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2866 	.probe = qede_eth_dev_pci_probe,
2867 	.remove = qede_eth_dev_pci_remove,
2868 };
2869 
2870 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
2871 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
2872 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
2873 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
2874 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
2875 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
2876