xref: /dpdk/drivers/net/qede/qede_ethdev.c (revision 2f45703c17acb943aaded9f79676fd56a72542b2)
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8 
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 
12 /* Globals */
13 static const struct qed_eth_ops *qed_ops;
14 static const char *drivername = "qede pmd";
15 static int64_t timer_period = 1;
16 
17 struct rte_qede_xstats_name_off {
18 	char name[RTE_ETH_XSTATS_NAME_SIZE];
19 	uint64_t offset;
20 };
21 
22 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
23 	{"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)},
24 	{"rx_multicast_bytes",
25 		offsetof(struct ecore_eth_stats, rx_mcast_bytes)},
26 	{"rx_broadcast_bytes",
27 		offsetof(struct ecore_eth_stats, rx_bcast_bytes)},
28 	{"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)},
29 	{"rx_multicast_packets",
30 		offsetof(struct ecore_eth_stats, rx_mcast_pkts)},
31 	{"rx_broadcast_packets",
32 		offsetof(struct ecore_eth_stats, rx_bcast_pkts)},
33 
34 	{"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)},
35 	{"tx_multicast_bytes",
36 		offsetof(struct ecore_eth_stats, tx_mcast_bytes)},
37 	{"tx_broadcast_bytes",
38 		offsetof(struct ecore_eth_stats, tx_bcast_bytes)},
39 	{"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)},
40 	{"tx_multicast_packets",
41 		offsetof(struct ecore_eth_stats, tx_mcast_pkts)},
42 	{"tx_broadcast_packets",
43 		offsetof(struct ecore_eth_stats, tx_bcast_pkts)},
44 
45 	{"rx_64_byte_packets",
46 		offsetof(struct ecore_eth_stats, rx_64_byte_packets)},
47 	{"rx_65_to_127_byte_packets",
48 		offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)},
49 	{"rx_128_to_255_byte_packets",
50 		offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)},
51 	{"rx_256_to_511_byte_packets",
52 		offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)},
53 	{"rx_512_to_1023_byte_packets",
54 		offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)},
55 	{"rx_1024_to_1518_byte_packets",
56 		offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)},
57 	{"rx_1519_to_1522_byte_packets",
58 		offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)},
59 	{"rx_1519_to_2047_byte_packets",
60 		offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)},
61 	{"rx_2048_to_4095_byte_packets",
62 		offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)},
63 	{"rx_4096_to_9216_byte_packets",
64 		offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)},
65 	{"rx_9217_to_16383_byte_packets",
66 		offsetof(struct ecore_eth_stats,
67 			 rx_9217_to_16383_byte_packets)},
68 	{"tx_64_byte_packets",
69 		offsetof(struct ecore_eth_stats, tx_64_byte_packets)},
70 	{"tx_65_to_127_byte_packets",
71 		offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)},
72 	{"tx_128_to_255_byte_packets",
73 		offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)},
74 	{"tx_256_to_511_byte_packets",
75 		offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)},
76 	{"tx_512_to_1023_byte_packets",
77 		offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)},
78 	{"tx_1024_to_1518_byte_packets",
79 		offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)},
80 	{"trx_1519_to_1522_byte_packets",
81 		offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)},
82 	{"tx_2048_to_4095_byte_packets",
83 		offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)},
84 	{"tx_4096_to_9216_byte_packets",
85 		offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)},
86 	{"tx_9217_to_16383_byte_packets",
87 		offsetof(struct ecore_eth_stats,
88 			 tx_9217_to_16383_byte_packets)},
89 
90 	{"rx_mac_crtl_frames",
91 		offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)},
92 	{"tx_mac_control_frames",
93 		offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)},
94 	{"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)},
95 	{"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)},
96 	{"rx_priority_flow_control_frames",
97 		offsetof(struct ecore_eth_stats, rx_pfc_frames)},
98 	{"tx_priority_flow_control_frames",
99 		offsetof(struct ecore_eth_stats, tx_pfc_frames)},
100 
101 	{"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)},
102 	{"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)},
103 	{"rx_carrier_errors",
104 		offsetof(struct ecore_eth_stats, rx_carrier_errors)},
105 	{"rx_oversize_packet_errors",
106 		offsetof(struct ecore_eth_stats, rx_oversize_packets)},
107 	{"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)},
108 	{"rx_undersize_packet_errors",
109 		offsetof(struct ecore_eth_stats, rx_undersize_packets)},
110 	{"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)},
111 	{"rx_host_buffer_not_available",
112 		offsetof(struct ecore_eth_stats, no_buff_discards)},
113 	/* Number of packets discarded because they are bigger than MTU */
114 	{"rx_packet_too_big_discards",
115 		offsetof(struct ecore_eth_stats, packet_too_big_discard)},
116 	{"rx_ttl_zero_discards",
117 		offsetof(struct ecore_eth_stats, ttl0_discard)},
118 	{"rx_multi_function_tag_filter_discards",
119 		offsetof(struct ecore_eth_stats, mftag_filter_discards)},
120 	{"rx_mac_filter_discards",
121 		offsetof(struct ecore_eth_stats, mac_filter_discards)},
122 	{"rx_hw_buffer_truncates",
123 		offsetof(struct ecore_eth_stats, brb_truncates)},
124 	{"rx_hw_buffer_discards",
125 		offsetof(struct ecore_eth_stats, brb_discards)},
126 	{"tx_lpi_entry_count",
127 		offsetof(struct ecore_eth_stats, tx_lpi_entry_count)},
128 	{"tx_total_collisions",
129 		offsetof(struct ecore_eth_stats, tx_total_collisions)},
130 	{"tx_error_drop_packets",
131 		offsetof(struct ecore_eth_stats, tx_err_drop_pkts)},
132 
133 	{"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)},
134 	{"rx_mac_unicast_packets",
135 		offsetof(struct ecore_eth_stats, rx_mac_uc_packets)},
136 	{"rx_mac_multicast_packets",
137 		offsetof(struct ecore_eth_stats, rx_mac_mc_packets)},
138 	{"rx_mac_broadcast_packets",
139 		offsetof(struct ecore_eth_stats, rx_mac_bc_packets)},
140 	{"rx_mac_frames_ok",
141 		offsetof(struct ecore_eth_stats, rx_mac_frames_ok)},
142 	{"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)},
143 	{"tx_mac_unicast_packets",
144 		offsetof(struct ecore_eth_stats, tx_mac_uc_packets)},
145 	{"tx_mac_multicast_packets",
146 		offsetof(struct ecore_eth_stats, tx_mac_mc_packets)},
147 	{"tx_mac_broadcast_packets",
148 		offsetof(struct ecore_eth_stats, tx_mac_bc_packets)},
149 
150 	{"lro_coalesced_packets",
151 		offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)},
152 	{"lro_coalesced_events",
153 		offsetof(struct ecore_eth_stats, tpa_coalesced_events)},
154 	{"lro_aborts_num",
155 		offsetof(struct ecore_eth_stats, tpa_aborts_num)},
156 	{"lro_not_coalesced_packets",
157 		offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)},
158 	{"lro_coalesced_bytes",
159 		offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
160 };
161 
162 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
163 {
164 	ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
165 }
166 
167 static void
168 qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
169 {
170 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
171 	struct qede_dev *qdev = eth_dev->data->dev_private;
172 	struct ecore_dev *edev = &qdev->edev;
173 
174 	qede_interrupt_action(ECORE_LEADING_HWFN(edev));
175 	if (rte_intr_enable(&eth_dev->pci_dev->intr_handle))
176 		DP_ERR(edev, "rte_intr_enable failed\n");
177 }
178 
179 static void
180 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
181 {
182 	rte_memcpy(&qdev->dev_info, info, sizeof(*info));
183 	qdev->num_tc = qdev->dev_info.num_tc;
184 	qdev->ops = qed_ops;
185 }
186 
187 static void qede_print_adapter_info(struct qede_dev *qdev)
188 {
189 	struct ecore_dev *edev = &qdev->edev;
190 	struct qed_dev_info *info = &qdev->dev_info.common;
191 	static char ver_str[QED_DRV_VER_STR_SIZE];
192 
193 	DP_INFO(edev, "*********************************\n");
194 	DP_INFO(edev, " Chip details : %s%d\n",
195 		ECORE_IS_BB(edev) ? "BB" : "AH",
196 		CHIP_REV_IS_A0(edev) ? 0 : 1);
197 
198 	sprintf(ver_str, "%s %s_%d.%d.%d.%d", QEDE_PMD_VER_PREFIX,
199 		edev->ver_str, QEDE_PMD_VERSION_MAJOR, QEDE_PMD_VERSION_MINOR,
200 		QEDE_PMD_VERSION_REVISION, QEDE_PMD_VERSION_PATCH);
201 	strcpy(qdev->drv_ver, ver_str);
202 	DP_INFO(edev, " Driver version : %s\n", ver_str);
203 
204 	sprintf(ver_str, "%d.%d.%d.%d", info->fw_major, info->fw_minor,
205 		info->fw_rev, info->fw_eng);
206 	DP_INFO(edev, " Firmware version : %s\n", ver_str);
207 
208 	sprintf(ver_str, "%d.%d.%d.%d",
209 		(info->mfw_rev >> 24) & 0xff,
210 		(info->mfw_rev >> 16) & 0xff,
211 		(info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
212 	DP_INFO(edev, " Management firmware version : %s\n", ver_str);
213 
214 	DP_INFO(edev, " Firmware file : %s\n", fw_file);
215 
216 	DP_INFO(edev, "*********************************\n");
217 }
218 
219 static int
220 qede_set_ucast_rx_mac(struct qede_dev *qdev,
221 		      enum qed_filter_xcast_params_type opcode,
222 		      uint8_t mac[ETHER_ADDR_LEN])
223 {
224 	struct ecore_dev *edev = &qdev->edev;
225 	struct qed_filter_params filter_cmd;
226 
227 	memset(&filter_cmd, 0, sizeof(filter_cmd));
228 	filter_cmd.type = QED_FILTER_TYPE_UCAST;
229 	filter_cmd.filter.ucast.type = opcode;
230 	filter_cmd.filter.ucast.mac_valid = 1;
231 	rte_memcpy(&filter_cmd.filter.ucast.mac[0], &mac[0], ETHER_ADDR_LEN);
232 	return qdev->ops->filter_config(edev, &filter_cmd);
233 }
234 
235 static void
236 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
237 		  uint32_t index, __rte_unused uint32_t pool)
238 {
239 	struct qede_dev *qdev = eth_dev->data->dev_private;
240 	struct ecore_dev *edev = &qdev->edev;
241 	int rc;
242 
243 	PMD_INIT_FUNC_TRACE(edev);
244 
245 	if (index >= qdev->dev_info.num_mac_addrs) {
246 		DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
247 		       index, qdev->dev_info.num_mac_addrs);
248 		return;
249 	}
250 
251 	/* Adding macaddr even though promiscuous mode is set */
252 	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
253 		DP_INFO(edev, "Port is in promisc mode, yet adding it\n");
254 
255 	/* Add MAC filters according to the unicast secondary macs */
256 	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
257 				   mac_addr->addr_bytes);
258 	if (rc)
259 		DP_ERR(edev, "Unable to add macaddr rc=%d\n", rc);
260 }
261 
262 static void
263 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
264 {
265 	struct qede_dev *qdev = eth_dev->data->dev_private;
266 	struct ecore_dev *edev = &qdev->edev;
267 	struct ether_addr mac_addr;
268 	int rc;
269 
270 	PMD_INIT_FUNC_TRACE(edev);
271 
272 	if (index >= qdev->dev_info.num_mac_addrs) {
273 		DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
274 		       index, qdev->dev_info.num_mac_addrs);
275 		return;
276 	}
277 
278 	/* Use the index maintained by rte */
279 	ether_addr_copy(&eth_dev->data->mac_addrs[index], &mac_addr);
280 	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
281 				   mac_addr.addr_bytes);
282 	if (rc)
283 		DP_ERR(edev, "Unable to remove macaddr rc=%d\n", rc);
284 }
285 
286 static void
287 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
288 {
289 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
290 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
291 	int rc;
292 
293 	if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
294 					       mac_addr->addr_bytes)) {
295 		DP_ERR(edev, "Setting MAC address is not allowed\n");
296 		ether_addr_copy(&qdev->primary_mac,
297 				&eth_dev->data->mac_addrs[0]);
298 		return;
299 	}
300 
301 	/* First remove the primary mac */
302 	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
303 				   qdev->primary_mac.addr_bytes);
304 
305 	if (rc) {
306 		DP_ERR(edev, "Unable to remove current macaddr"
307 			     " Reverting to previous default mac\n");
308 		ether_addr_copy(&qdev->primary_mac,
309 				&eth_dev->data->mac_addrs[0]);
310 		return;
311 	}
312 
313 	/* Add new MAC */
314 	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
315 				   mac_addr->addr_bytes);
316 
317 	if (rc)
318 		DP_ERR(edev, "Unable to add new default mac\n");
319 	else
320 		ether_addr_copy(mac_addr, &qdev->primary_mac);
321 }
322 
323 
324 
325 
326 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
327 {
328 	struct ecore_dev *edev = &qdev->edev;
329 	struct qed_update_vport_params params = {
330 		.vport_id = 0,
331 		.accept_any_vlan = action,
332 		.update_accept_any_vlan_flg = 1,
333 	};
334 	int rc;
335 
336 	/* Proceed only if action actually needs to be performed */
337 	if (qdev->accept_any_vlan == action)
338 		return;
339 
340 	rc = qdev->ops->vport_update(edev, &params);
341 	if (rc) {
342 		DP_ERR(edev, "Failed to %s accept-any-vlan\n",
343 		       action ? "enable" : "disable");
344 	} else {
345 		DP_INFO(edev, "%s accept-any-vlan\n",
346 			action ? "enabled" : "disabled");
347 		qdev->accept_any_vlan = action;
348 	}
349 }
350 
351 void qede_config_rx_mode(struct rte_eth_dev *eth_dev)
352 {
353 	struct qede_dev *qdev = eth_dev->data->dev_private;
354 	struct ecore_dev *edev = &qdev->edev;
355 	/* TODO: - QED_FILTER_TYPE_UCAST */
356 	enum qed_filter_rx_mode_type accept_flags =
357 			QED_FILTER_RX_MODE_TYPE_REGULAR;
358 	struct qed_filter_params rx_mode;
359 	int rc;
360 
361 	/* Configure the struct for the Rx mode */
362 	memset(&rx_mode, 0, sizeof(struct qed_filter_params));
363 	rx_mode.type = QED_FILTER_TYPE_RX_MODE;
364 
365 	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_REPLACE,
366 				   eth_dev->data->mac_addrs[0].addr_bytes);
367 	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
368 		accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
369 	} else {
370 		rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
371 					   eth_dev->data->
372 					   mac_addrs[0].addr_bytes);
373 		if (rc) {
374 			DP_ERR(edev, "Unable to add filter\n");
375 			return;
376 		}
377 	}
378 
379 	/* take care of VLAN mode */
380 	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
381 		qede_config_accept_any_vlan(qdev, true);
382 	} else if (!qdev->non_configured_vlans) {
383 		/* If we dont have non-configured VLANs and promisc
384 		 * is not set, then check if we need to disable
385 		 * accept_any_vlan mode.
386 		 * Because in this case, accept_any_vlan mode is set
387 		 * as part of IFF_RPOMISC flag handling.
388 		 */
389 		qede_config_accept_any_vlan(qdev, false);
390 	}
391 	rx_mode.filter.accept_flags = accept_flags;
392 	rc = qdev->ops->filter_config(edev, &rx_mode);
393 	if (rc)
394 		DP_ERR(edev, "Filter config failed rc=%d\n", rc);
395 }
396 
397 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
398 {
399 	struct qed_update_vport_params vport_update_params;
400 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
401 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
402 	int rc;
403 
404 	memset(&vport_update_params, 0, sizeof(vport_update_params));
405 	vport_update_params.vport_id = 0;
406 	vport_update_params.update_inner_vlan_removal_flg = 1;
407 	vport_update_params.inner_vlan_removal_flg = set_stripping;
408 	rc = qdev->ops->vport_update(edev, &vport_update_params);
409 	if (rc) {
410 		DP_ERR(edev, "Update V-PORT failed %d\n", rc);
411 		return rc;
412 	}
413 
414 	return 0;
415 }
416 
417 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
418 {
419 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
420 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
421 
422 	if (mask & ETH_VLAN_STRIP_MASK) {
423 		if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
424 			(void)qede_vlan_stripping(eth_dev, 1);
425 		else
426 			(void)qede_vlan_stripping(eth_dev, 0);
427 	}
428 
429 	DP_INFO(edev, "vlan offload mask %d vlan-strip %d\n",
430 		mask, eth_dev->data->dev_conf.rxmode.hw_vlan_strip);
431 }
432 
433 static int qede_set_ucast_rx_vlan(struct qede_dev *qdev,
434 				  enum qed_filter_xcast_params_type opcode,
435 				  uint16_t vid)
436 {
437 	struct qed_filter_params filter_cmd;
438 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
439 
440 	memset(&filter_cmd, 0, sizeof(filter_cmd));
441 	filter_cmd.type = QED_FILTER_TYPE_UCAST;
442 	filter_cmd.filter.ucast.type = opcode;
443 	filter_cmd.filter.ucast.vlan_valid = 1;
444 	filter_cmd.filter.ucast.vlan = vid;
445 
446 	return qdev->ops->filter_config(edev, &filter_cmd);
447 }
448 
449 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
450 				uint16_t vlan_id, int on)
451 {
452 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
453 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
454 	struct qed_dev_eth_info *dev_info = &qdev->dev_info;
455 	int rc;
456 
457 	if (vlan_id != 0 &&
458 	    qdev->configured_vlans == dev_info->num_vlan_filters) {
459 		DP_NOTICE(edev, false, "Reached max VLAN filter limit"
460 				     " enabling accept_any_vlan\n");
461 		qede_config_accept_any_vlan(qdev, true);
462 		return 0;
463 	}
464 
465 	if (on) {
466 		rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD,
467 					    vlan_id);
468 		if (rc)
469 			DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
470 			       rc);
471 		else
472 			if (vlan_id != 0)
473 				qdev->configured_vlans++;
474 	} else {
475 		rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL,
476 					    vlan_id);
477 		if (rc)
478 			DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
479 			       vlan_id, rc);
480 		else
481 			if (vlan_id != 0)
482 				qdev->configured_vlans--;
483 	}
484 
485 	DP_INFO(edev, "vlan_id %u on %u rc %d configured_vlans %u\n",
486 			vlan_id, on, rc, qdev->configured_vlans);
487 
488 	return rc;
489 }
490 
491 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
492 {
493 	struct qede_dev *qdev = eth_dev->data->dev_private;
494 	struct ecore_dev *edev = &qdev->edev;
495 	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
496 
497 	PMD_INIT_FUNC_TRACE(edev);
498 
499 	if (eth_dev->data->nb_rx_queues != eth_dev->data->nb_tx_queues) {
500 		DP_NOTICE(edev, false,
501 			  "Unequal number of rx/tx queues "
502 			  "is not supported RX=%u TX=%u\n",
503 			  eth_dev->data->nb_rx_queues,
504 			  eth_dev->data->nb_tx_queues);
505 		return -EINVAL;
506 	}
507 
508 	/* Check requirements for 100G mode */
509 	if (edev->num_hwfns > 1) {
510 		if (eth_dev->data->nb_rx_queues < 2) {
511 			DP_NOTICE(edev, false,
512 				  "100G mode requires minimum two queues\n");
513 			return -EINVAL;
514 		}
515 
516 		if ((eth_dev->data->nb_rx_queues % 2) != 0) {
517 			DP_NOTICE(edev, false,
518 				  "100G mode requires even number of queues\n");
519 			return -EINVAL;
520 		}
521 	}
522 
523 	qdev->num_rss = eth_dev->data->nb_rx_queues;
524 
525 	/* Initial state */
526 	qdev->state = QEDE_CLOSE;
527 
528 	/* Sanity checks and throw warnings */
529 
530 	if (rxmode->enable_scatter == 1) {
531 		DP_ERR(edev, "RX scatter packets is not supported\n");
532 		return -EINVAL;
533 	}
534 
535 	if (rxmode->enable_lro == 1) {
536 		DP_INFO(edev, "LRO is not supported\n");
537 		return -EINVAL;
538 	}
539 
540 	if (!rxmode->hw_strip_crc)
541 		DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
542 
543 	if (!rxmode->hw_ip_checksum)
544 		DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
545 			      "in hw\n");
546 
547 
548 	DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
549 		QEDE_RSS_CNT(qdev), qdev->num_tc);
550 
551 	DP_INFO(edev, "my_id %u rel_pf_id %u abs_pf_id %u"
552 		" port %u first_on_engine %d\n",
553 		edev->hwfns[0].my_id,
554 		edev->hwfns[0].rel_pf_id,
555 		edev->hwfns[0].abs_pf_id,
556 		edev->hwfns[0].port_id, edev->hwfns[0].first_on_engine);
557 
558 	return 0;
559 }
560 
561 /* Info about HW descriptor ring limitations */
562 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
563 	.nb_max = NUM_RX_BDS_MAX,
564 	.nb_min = 128,
565 	.nb_align = 128	/* lowest common multiple */
566 };
567 
568 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
569 	.nb_max = NUM_TX_BDS_MAX,
570 	.nb_min = 256,
571 	.nb_align = 256
572 };
573 
574 static void
575 qede_dev_info_get(struct rte_eth_dev *eth_dev,
576 		  struct rte_eth_dev_info *dev_info)
577 {
578 	struct qede_dev *qdev = eth_dev->data->dev_private;
579 	struct ecore_dev *edev = &qdev->edev;
580 
581 	PMD_INIT_FUNC_TRACE(edev);
582 
583 	dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
584 					      QEDE_ETH_OVERHEAD);
585 	dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
586 	dev_info->rx_desc_lim = qede_rx_desc_lim;
587 	dev_info->tx_desc_lim = qede_tx_desc_lim;
588 	dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev);
589 	dev_info->max_tx_queues = dev_info->max_rx_queues;
590 	dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs;
591 	if (IS_VF(edev))
592 		dev_info->max_vfs = 0;
593 	else
594 		dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
595 	dev_info->driver_name = qdev->drv_ver;
596 	dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
597 	dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
598 
599 	dev_info->default_txconf = (struct rte_eth_txconf) {
600 		.txq_flags = QEDE_TXQ_FLAGS,
601 	};
602 
603 	dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
604 				     DEV_RX_OFFLOAD_IPV4_CKSUM |
605 				     DEV_RX_OFFLOAD_UDP_CKSUM |
606 				     DEV_RX_OFFLOAD_TCP_CKSUM);
607 	dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
608 				     DEV_TX_OFFLOAD_IPV4_CKSUM |
609 				     DEV_TX_OFFLOAD_UDP_CKSUM |
610 				     DEV_TX_OFFLOAD_TCP_CKSUM);
611 
612 	dev_info->speed_capa = ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
613 }
614 
615 /* return 0 means link status changed, -1 means not changed */
616 static int
617 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
618 {
619 	struct qede_dev *qdev = eth_dev->data->dev_private;
620 	struct ecore_dev *edev = &qdev->edev;
621 	uint16_t link_duplex;
622 	struct qed_link_output link;
623 	struct rte_eth_link *curr = &eth_dev->data->dev_link;
624 
625 	memset(&link, 0, sizeof(struct qed_link_output));
626 	qdev->ops->common->get_link(edev, &link);
627 
628 	/* Link Speed */
629 	curr->link_speed = link.speed;
630 
631 	/* Link Mode */
632 	switch (link.duplex) {
633 	case QEDE_DUPLEX_HALF:
634 		link_duplex = ETH_LINK_HALF_DUPLEX;
635 		break;
636 	case QEDE_DUPLEX_FULL:
637 		link_duplex = ETH_LINK_FULL_DUPLEX;
638 		break;
639 	case QEDE_DUPLEX_UNKNOWN:
640 	default:
641 		link_duplex = -1;
642 	}
643 	curr->link_duplex = link_duplex;
644 
645 	/* Link Status */
646 	curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
647 
648 	/* AN */
649 	curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
650 			     ETH_LINK_AUTONEG : ETH_LINK_FIXED;
651 
652 	DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
653 		curr->link_speed, curr->link_duplex,
654 		curr->link_autoneg, curr->link_status);
655 
656 	/* return 0 means link status changed, -1 means not changed */
657 	return ((curr->link_status == link.link_up) ? -1 : 0);
658 }
659 
660 static void
661 qede_rx_mode_setting(struct rte_eth_dev *eth_dev,
662 		     enum qed_filter_rx_mode_type accept_flags)
663 {
664 	struct qede_dev *qdev = eth_dev->data->dev_private;
665 	struct ecore_dev *edev = &qdev->edev;
666 	struct qed_filter_params rx_mode;
667 
668 	DP_INFO(edev, "%s mode %u\n", __func__, accept_flags);
669 
670 	memset(&rx_mode, 0, sizeof(struct qed_filter_params));
671 	rx_mode.type = QED_FILTER_TYPE_RX_MODE;
672 	rx_mode.filter.accept_flags = accept_flags;
673 	qdev->ops->filter_config(edev, &rx_mode);
674 }
675 
676 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
677 {
678 	struct qede_dev *qdev = eth_dev->data->dev_private;
679 	struct ecore_dev *edev = &qdev->edev;
680 
681 	PMD_INIT_FUNC_TRACE(edev);
682 
683 	enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
684 
685 	if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
686 		type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
687 
688 	qede_rx_mode_setting(eth_dev, type);
689 }
690 
691 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
692 {
693 	struct qede_dev *qdev = eth_dev->data->dev_private;
694 	struct ecore_dev *edev = &qdev->edev;
695 
696 	PMD_INIT_FUNC_TRACE(edev);
697 
698 	if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
699 		qede_rx_mode_setting(eth_dev,
700 				     QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
701 	else
702 		qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
703 }
704 
705 static void qede_poll_sp_sb_cb(void *param)
706 {
707 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
708 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
709 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
710 	int rc;
711 
712 	qede_interrupt_action(ECORE_LEADING_HWFN(edev));
713 	qede_interrupt_action(&edev->hwfns[1]);
714 
715 	rc = rte_eal_alarm_set(timer_period * US_PER_S,
716 			       qede_poll_sp_sb_cb,
717 			       (void *)eth_dev);
718 	if (rc != 0) {
719 		DP_ERR(edev, "Unable to start periodic"
720 			     " timer rc %d\n", rc);
721 		assert(false && "Unable to start periodic timer");
722 	}
723 }
724 
725 static void qede_dev_close(struct rte_eth_dev *eth_dev)
726 {
727 	struct qede_dev *qdev = eth_dev->data->dev_private;
728 	struct ecore_dev *edev = &qdev->edev;
729 
730 	PMD_INIT_FUNC_TRACE(edev);
731 
732 	/* dev_stop() shall cleanup fp resources in hw but without releasing
733 	 * dma memories and sw structures so that dev_start() can be called
734 	 * by the app without reconfiguration. However, in dev_close() we
735 	 * can release all the resources and device can be brought up newly
736 	 */
737 	if (qdev->state != QEDE_STOP)
738 		qede_dev_stop(eth_dev);
739 	else
740 		DP_INFO(edev, "Device is already stopped\n");
741 
742 	qede_free_mem_load(qdev);
743 
744 	qede_free_fp_arrays(qdev);
745 
746 	qede_dev_set_link_state(eth_dev, false);
747 
748 	qdev->ops->common->slowpath_stop(edev);
749 
750 	qdev->ops->common->remove(edev);
751 
752 	rte_intr_disable(&eth_dev->pci_dev->intr_handle);
753 
754 	rte_intr_callback_unregister(&eth_dev->pci_dev->intr_handle,
755 				     qede_interrupt_handler, (void *)eth_dev);
756 
757 	if (edev->num_hwfns > 1)
758 		rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
759 
760 	qdev->state = QEDE_CLOSE;
761 }
762 
763 static void
764 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
765 {
766 	struct qede_dev *qdev = eth_dev->data->dev_private;
767 	struct ecore_dev *edev = &qdev->edev;
768 	struct ecore_eth_stats stats;
769 
770 	qdev->ops->get_vport_stats(edev, &stats);
771 
772 	/* RX Stats */
773 	eth_stats->ipackets = stats.rx_ucast_pkts +
774 	    stats.rx_mcast_pkts + stats.rx_bcast_pkts;
775 
776 	eth_stats->ibytes = stats.rx_ucast_bytes +
777 	    stats.rx_mcast_bytes + stats.rx_bcast_bytes;
778 
779 	eth_stats->ierrors = stats.rx_crc_errors +
780 	    stats.rx_align_errors +
781 	    stats.rx_carrier_errors +
782 	    stats.rx_oversize_packets +
783 	    stats.rx_jabbers + stats.rx_undersize_packets;
784 
785 	eth_stats->rx_nombuf = stats.no_buff_discards;
786 
787 	eth_stats->imissed = stats.mftag_filter_discards +
788 	    stats.mac_filter_discards +
789 	    stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
790 
791 	/* TX stats */
792 	eth_stats->opackets = stats.tx_ucast_pkts +
793 	    stats.tx_mcast_pkts + stats.tx_bcast_pkts;
794 
795 	eth_stats->obytes = stats.tx_ucast_bytes +
796 	    stats.tx_mcast_bytes + stats.tx_bcast_bytes;
797 
798 	eth_stats->oerrors = stats.tx_err_drop_pkts;
799 }
800 
801 static int
802 qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
803 		      struct rte_eth_xstat_name *xstats_names, unsigned limit)
804 {
805 	unsigned int i, stat_cnt = RTE_DIM(qede_xstats_strings);
806 
807 	if (xstats_names != NULL)
808 		for (i = 0; i < stat_cnt; i++)
809 			snprintf(xstats_names[i].name,
810 				sizeof(xstats_names[i].name),
811 				"%s",
812 				qede_xstats_strings[i].name);
813 
814 	return stat_cnt;
815 }
816 
817 static int
818 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
819 		unsigned int n)
820 {
821 	struct qede_dev *qdev = dev->data->dev_private;
822 	struct ecore_dev *edev = &qdev->edev;
823 	struct ecore_eth_stats stats;
824 	unsigned int num = RTE_DIM(qede_xstats_strings);
825 
826 	if (n < num)
827 		return num;
828 
829 	qdev->ops->get_vport_stats(edev, &stats);
830 
831 	for (num = 0; num < n; num++)
832 		xstats[num].value = *(u64 *)(((char *)&stats) +
833 					     qede_xstats_strings[num].offset);
834 
835 	return num;
836 }
837 
838 static void
839 qede_reset_xstats(struct rte_eth_dev *dev)
840 {
841 	struct qede_dev *qdev = dev->data->dev_private;
842 	struct ecore_dev *edev = &qdev->edev;
843 
844 	ecore_reset_vport_stats(edev);
845 }
846 
847 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
848 {
849 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
850 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
851 	struct qed_link_params link_params;
852 	int rc;
853 
854 	DP_INFO(edev, "setting link state %d\n", link_up);
855 	memset(&link_params, 0, sizeof(link_params));
856 	link_params.link_up = link_up;
857 	rc = qdev->ops->common->set_link(edev, &link_params);
858 	if (rc != ECORE_SUCCESS)
859 		DP_ERR(edev, "Unable to set link state %d\n", link_up);
860 
861 	return rc;
862 }
863 
864 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
865 {
866 	return qede_dev_set_link_state(eth_dev, true);
867 }
868 
869 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
870 {
871 	return qede_dev_set_link_state(eth_dev, false);
872 }
873 
874 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
875 {
876 	struct qede_dev *qdev = eth_dev->data->dev_private;
877 	struct ecore_dev *edev = &qdev->edev;
878 
879 	ecore_reset_vport_stats(edev);
880 }
881 
882 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
883 {
884 	enum qed_filter_rx_mode_type type =
885 	    QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
886 
887 	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
888 		type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
889 
890 	qede_rx_mode_setting(eth_dev, type);
891 }
892 
893 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
894 {
895 	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
896 		qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC);
897 	else
898 		qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
899 }
900 
901 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
902 			      struct rte_eth_fc_conf *fc_conf)
903 {
904 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
905 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
906 	struct qed_link_output current_link;
907 	struct qed_link_params params;
908 
909 	memset(&current_link, 0, sizeof(current_link));
910 	qdev->ops->common->get_link(edev, &current_link);
911 
912 	memset(&params, 0, sizeof(params));
913 	params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
914 	if (fc_conf->autoneg) {
915 		if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
916 			DP_ERR(edev, "Autoneg not supported\n");
917 			return -EINVAL;
918 		}
919 		params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
920 	}
921 
922 	/* Pause is assumed to be supported (SUPPORTED_Pause) */
923 	if (fc_conf->mode == RTE_FC_FULL)
924 		params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
925 					QED_LINK_PAUSE_RX_ENABLE);
926 	if (fc_conf->mode == RTE_FC_TX_PAUSE)
927 		params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
928 	if (fc_conf->mode == RTE_FC_RX_PAUSE)
929 		params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
930 
931 	params.link_up = true;
932 	(void)qdev->ops->common->set_link(edev, &params);
933 
934 	return 0;
935 }
936 
937 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
938 			      struct rte_eth_fc_conf *fc_conf)
939 {
940 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
941 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
942 	struct qed_link_output current_link;
943 
944 	memset(&current_link, 0, sizeof(current_link));
945 	qdev->ops->common->get_link(edev, &current_link);
946 
947 	if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
948 		fc_conf->autoneg = true;
949 
950 	if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
951 					 QED_LINK_PAUSE_TX_ENABLE))
952 		fc_conf->mode = RTE_FC_FULL;
953 	else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
954 		fc_conf->mode = RTE_FC_RX_PAUSE;
955 	else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
956 		fc_conf->mode = RTE_FC_TX_PAUSE;
957 	else
958 		fc_conf->mode = RTE_FC_NONE;
959 
960 	return 0;
961 }
962 
963 static const uint32_t *
964 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
965 {
966 	static const uint32_t ptypes[] = {
967 		RTE_PTYPE_L3_IPV4,
968 		RTE_PTYPE_L3_IPV6,
969 		RTE_PTYPE_UNKNOWN
970 	};
971 
972 	if (eth_dev->rx_pkt_burst == qede_recv_pkts)
973 		return ptypes;
974 
975 	return NULL;
976 }
977 
978 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
979 			 struct rte_eth_rss_conf *rss_conf)
980 {
981 	struct qed_update_vport_params vport_update_params;
982 	struct qede_dev *qdev = eth_dev->data->dev_private;
983 	struct ecore_dev *edev = &qdev->edev;
984 	uint8_t rss_caps;
985 	uint32_t *key = (uint32_t *)rss_conf->rss_key;
986 	uint64_t hf = rss_conf->rss_hf;
987 	int i;
988 
989 	if (hf == 0)
990 		DP_ERR(edev, "hash function 0 will disable RSS\n");
991 
992 	rss_caps = 0;
993 	rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
994 	rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
995 	rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
996 	rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
997 	rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
998 	rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
999 
1000 	/* If the mapping doesn't fit any supported, return */
1001 	if (rss_caps == 0 && hf != 0)
1002 		return -EINVAL;
1003 
1004 	memset(&vport_update_params, 0, sizeof(vport_update_params));
1005 
1006 	if (key != NULL)
1007 		memcpy(qdev->rss_params.rss_key, rss_conf->rss_key,
1008 		       rss_conf->rss_key_len);
1009 
1010 	qdev->rss_params.rss_caps = rss_caps;
1011 	memcpy(&vport_update_params.rss_params, &qdev->rss_params,
1012 	       sizeof(vport_update_params.rss_params));
1013 	vport_update_params.update_rss_flg = 1;
1014 	vport_update_params.vport_id = 0;
1015 
1016 	return qdev->ops->vport_update(edev, &vport_update_params);
1017 }
1018 
1019 int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
1020 			   struct rte_eth_rss_conf *rss_conf)
1021 {
1022 	struct qede_dev *qdev = eth_dev->data->dev_private;
1023 	uint64_t hf;
1024 
1025 	if (rss_conf->rss_key_len < sizeof(qdev->rss_params.rss_key))
1026 		return -EINVAL;
1027 
1028 	if (rss_conf->rss_key)
1029 		memcpy(rss_conf->rss_key, qdev->rss_params.rss_key,
1030 		       sizeof(qdev->rss_params.rss_key));
1031 
1032 	hf = 0;
1033 	hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4)     ?
1034 			ETH_RSS_IPV4 : 0;
1035 	hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6)     ?
1036 			ETH_RSS_IPV6 : 0;
1037 	hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6)     ?
1038 			ETH_RSS_IPV6_EX : 0;
1039 	hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4_TCP) ?
1040 			ETH_RSS_NONFRAG_IPV4_TCP : 0;
1041 	hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
1042 			ETH_RSS_NONFRAG_IPV6_TCP : 0;
1043 	hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
1044 			ETH_RSS_IPV6_TCP_EX : 0;
1045 
1046 	rss_conf->rss_hf = hf;
1047 
1048 	return 0;
1049 }
1050 
1051 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
1052 			 struct rte_eth_rss_reta_entry64 *reta_conf,
1053 			 uint16_t reta_size)
1054 {
1055 	struct qed_update_vport_params vport_update_params;
1056 	struct qede_dev *qdev = eth_dev->data->dev_private;
1057 	struct ecore_dev *edev = &qdev->edev;
1058 	uint16_t i, idx, shift;
1059 
1060 	if (reta_size > ETH_RSS_RETA_SIZE_128) {
1061 		DP_ERR(edev, "reta_size %d is not supported by hardware\n",
1062 		       reta_size);
1063 		return -EINVAL;
1064 	}
1065 
1066 	memset(&vport_update_params, 0, sizeof(vport_update_params));
1067 	memcpy(&vport_update_params.rss_params, &qdev->rss_params,
1068 	       sizeof(vport_update_params.rss_params));
1069 
1070 	for (i = 0; i < reta_size; i++) {
1071 		idx = i / RTE_RETA_GROUP_SIZE;
1072 		shift = i % RTE_RETA_GROUP_SIZE;
1073 		if (reta_conf[idx].mask & (1ULL << shift)) {
1074 			uint8_t entry = reta_conf[idx].reta[shift];
1075 			qdev->rss_params.rss_ind_table[i] = entry;
1076 		}
1077 	}
1078 
1079 	vport_update_params.update_rss_flg = 1;
1080 	vport_update_params.vport_id = 0;
1081 
1082 	return qdev->ops->vport_update(edev, &vport_update_params);
1083 }
1084 
1085 int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
1086 			struct rte_eth_rss_reta_entry64 *reta_conf,
1087 			uint16_t reta_size)
1088 {
1089 	struct qede_dev *qdev = eth_dev->data->dev_private;
1090 	uint16_t i, idx, shift;
1091 
1092 	if (reta_size > ETH_RSS_RETA_SIZE_128) {
1093 		struct ecore_dev *edev = &qdev->edev;
1094 		DP_ERR(edev, "reta_size %d is not supported\n",
1095 		       reta_size);
1096 	}
1097 
1098 	for (i = 0; i < reta_size; i++) {
1099 		idx = i / RTE_RETA_GROUP_SIZE;
1100 		shift = i % RTE_RETA_GROUP_SIZE;
1101 		if (reta_conf[idx].mask & (1ULL << shift)) {
1102 			uint8_t entry = qdev->rss_params.rss_ind_table[i];
1103 			reta_conf[idx].reta[shift] = entry;
1104 		}
1105 	}
1106 
1107 	return 0;
1108 }
1109 
1110 int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
1111 {
1112 	uint32_t frame_size;
1113 	struct qede_dev *qdev = dev->data->dev_private;
1114 	struct rte_eth_dev_info dev_info = {0};
1115 
1116 	qede_dev_info_get(dev, &dev_info);
1117 
1118 	/* VLAN_TAG = 4 */
1119 	frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4;
1120 
1121 	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1122 		return -EINVAL;
1123 
1124 	if (!dev->data->scattered_rx &&
1125 	    frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
1126 		return -EINVAL;
1127 
1128 	if (frame_size > ETHER_MAX_LEN)
1129 		dev->data->dev_conf.rxmode.jumbo_frame = 1;
1130 	else
1131 		dev->data->dev_conf.rxmode.jumbo_frame = 0;
1132 
1133 	/* update max frame size */
1134 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1135 	qdev->mtu = mtu;
1136 	qede_dev_stop(dev);
1137 	qede_dev_start(dev);
1138 
1139 	return 0;
1140 }
1141 
1142 static const struct eth_dev_ops qede_eth_dev_ops = {
1143 	.dev_configure = qede_dev_configure,
1144 	.dev_infos_get = qede_dev_info_get,
1145 	.rx_queue_setup = qede_rx_queue_setup,
1146 	.rx_queue_release = qede_rx_queue_release,
1147 	.tx_queue_setup = qede_tx_queue_setup,
1148 	.tx_queue_release = qede_tx_queue_release,
1149 	.dev_start = qede_dev_start,
1150 	.dev_set_link_up = qede_dev_set_link_up,
1151 	.dev_set_link_down = qede_dev_set_link_down,
1152 	.link_update = qede_link_update,
1153 	.promiscuous_enable = qede_promiscuous_enable,
1154 	.promiscuous_disable = qede_promiscuous_disable,
1155 	.allmulticast_enable = qede_allmulticast_enable,
1156 	.allmulticast_disable = qede_allmulticast_disable,
1157 	.dev_stop = qede_dev_stop,
1158 	.dev_close = qede_dev_close,
1159 	.stats_get = qede_get_stats,
1160 	.stats_reset = qede_reset_stats,
1161 	.xstats_get = qede_get_xstats,
1162 	.xstats_reset = qede_reset_xstats,
1163 	.xstats_get_names = qede_get_xstats_names,
1164 	.mac_addr_add = qede_mac_addr_add,
1165 	.mac_addr_remove = qede_mac_addr_remove,
1166 	.mac_addr_set = qede_mac_addr_set,
1167 	.vlan_offload_set = qede_vlan_offload_set,
1168 	.vlan_filter_set = qede_vlan_filter_set,
1169 	.flow_ctrl_set = qede_flow_ctrl_set,
1170 	.flow_ctrl_get = qede_flow_ctrl_get,
1171 	.dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
1172 	.rss_hash_update = qede_rss_hash_update,
1173 	.rss_hash_conf_get = qede_rss_hash_conf_get,
1174 	.reta_update  = qede_rss_reta_update,
1175 	.reta_query  = qede_rss_reta_query,
1176 	.mtu_set = qede_set_mtu,
1177 };
1178 
1179 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
1180 	.dev_configure = qede_dev_configure,
1181 	.dev_infos_get = qede_dev_info_get,
1182 	.rx_queue_setup = qede_rx_queue_setup,
1183 	.rx_queue_release = qede_rx_queue_release,
1184 	.tx_queue_setup = qede_tx_queue_setup,
1185 	.tx_queue_release = qede_tx_queue_release,
1186 	.dev_start = qede_dev_start,
1187 	.dev_set_link_up = qede_dev_set_link_up,
1188 	.dev_set_link_down = qede_dev_set_link_down,
1189 	.link_update = qede_link_update,
1190 	.promiscuous_enable = qede_promiscuous_enable,
1191 	.promiscuous_disable = qede_promiscuous_disable,
1192 	.allmulticast_enable = qede_allmulticast_enable,
1193 	.allmulticast_disable = qede_allmulticast_disable,
1194 	.dev_stop = qede_dev_stop,
1195 	.dev_close = qede_dev_close,
1196 	.stats_get = qede_get_stats,
1197 	.stats_reset = qede_reset_stats,
1198 	.xstats_get = qede_get_xstats,
1199 	.xstats_reset = qede_reset_xstats,
1200 	.xstats_get_names = qede_get_xstats_names,
1201 	.vlan_offload_set = qede_vlan_offload_set,
1202 	.vlan_filter_set = qede_vlan_filter_set,
1203 	.dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
1204 	.rss_hash_update = qede_rss_hash_update,
1205 	.rss_hash_conf_get = qede_rss_hash_conf_get,
1206 	.reta_update  = qede_rss_reta_update,
1207 	.reta_query  = qede_rss_reta_query,
1208 	.mtu_set = qede_set_mtu,
1209 };
1210 
1211 static void qede_update_pf_params(struct ecore_dev *edev)
1212 {
1213 	struct ecore_pf_params pf_params;
1214 	/* 32 rx + 32 tx */
1215 	memset(&pf_params, 0, sizeof(struct ecore_pf_params));
1216 	pf_params.eth_pf_params.num_cons = 64;
1217 	qed_ops->common->update_pf_params(edev, &pf_params);
1218 }
1219 
1220 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
1221 {
1222 	struct rte_pci_device *pci_dev;
1223 	struct rte_pci_addr pci_addr;
1224 	struct qede_dev *adapter;
1225 	struct ecore_dev *edev;
1226 	struct qed_dev_eth_info dev_info;
1227 	struct qed_slowpath_params params;
1228 	uint32_t qed_ver;
1229 	static bool do_once = true;
1230 	uint8_t bulletin_change;
1231 	uint8_t vf_mac[ETHER_ADDR_LEN];
1232 	uint8_t is_mac_forced;
1233 	bool is_mac_exist;
1234 	/* Fix up ecore debug level */
1235 	uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
1236 	uint8_t dp_level = ECORE_LEVEL_VERBOSE;
1237 	uint32_t max_mac_addrs;
1238 	int rc;
1239 
1240 	/* Extract key data structures */
1241 	adapter = eth_dev->data->dev_private;
1242 	edev = &adapter->edev;
1243 	pci_addr = eth_dev->pci_dev->addr;
1244 
1245 	PMD_INIT_FUNC_TRACE(edev);
1246 
1247 	snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
1248 		 pci_addr.bus, pci_addr.devid, pci_addr.function,
1249 		 eth_dev->data->port_id);
1250 
1251 	eth_dev->rx_pkt_burst = qede_recv_pkts;
1252 	eth_dev->tx_pkt_burst = qede_xmit_pkts;
1253 
1254 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1255 		DP_NOTICE(edev, false,
1256 			  "Skipping device init from secondary process\n");
1257 		return 0;
1258 	}
1259 
1260 	pci_dev = eth_dev->pci_dev;
1261 
1262 	rte_eth_copy_pci_info(eth_dev, pci_dev);
1263 
1264 	qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
1265 
1266 	qed_ops = qed_get_eth_ops();
1267 	if (!qed_ops) {
1268 		DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
1269 		return -EINVAL;
1270 	}
1271 
1272 	DP_INFO(edev, "Starting qede probe\n");
1273 
1274 	rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
1275 				    dp_module, dp_level, is_vf);
1276 
1277 	if (rc != 0) {
1278 		DP_ERR(edev, "qede probe failed rc %d\n", rc);
1279 		return -ENODEV;
1280 	}
1281 
1282 	qede_update_pf_params(edev);
1283 
1284 	rte_intr_callback_register(&eth_dev->pci_dev->intr_handle,
1285 				   qede_interrupt_handler, (void *)eth_dev);
1286 
1287 	if (rte_intr_enable(&eth_dev->pci_dev->intr_handle)) {
1288 		DP_ERR(edev, "rte_intr_enable() failed\n");
1289 		return -ENODEV;
1290 	}
1291 
1292 	/* Start the Slowpath-process */
1293 	memset(&params, 0, sizeof(struct qed_slowpath_params));
1294 	params.int_mode = ECORE_INT_MODE_MSIX;
1295 	params.drv_major = QEDE_MAJOR_VERSION;
1296 	params.drv_minor = QEDE_MINOR_VERSION;
1297 	params.drv_rev = QEDE_REVISION_VERSION;
1298 	params.drv_eng = QEDE_ENGINEERING_VERSION;
1299 	strncpy((char *)params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
1300 
1301 	/* For CMT mode device do periodic polling for slowpath events.
1302 	 * This is required since uio device uses only one MSI-x
1303 	 * interrupt vector but we need one for each engine.
1304 	 */
1305 	if (edev->num_hwfns > 1) {
1306 		rc = rte_eal_alarm_set(timer_period * US_PER_S,
1307 				       qede_poll_sp_sb_cb,
1308 				       (void *)eth_dev);
1309 		if (rc != 0) {
1310 			DP_ERR(edev, "Unable to start periodic"
1311 				     " timer rc %d\n", rc);
1312 			return -EINVAL;
1313 		}
1314 	}
1315 
1316 	rc = qed_ops->common->slowpath_start(edev, &params);
1317 	if (rc) {
1318 		DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
1319 		rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1320 				     (void *)eth_dev);
1321 		return -ENODEV;
1322 	}
1323 
1324 	rc = qed_ops->fill_dev_info(edev, &dev_info);
1325 	if (rc) {
1326 		DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
1327 		qed_ops->common->slowpath_stop(edev);
1328 		qed_ops->common->remove(edev);
1329 		rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1330 				     (void *)eth_dev);
1331 		return -ENODEV;
1332 	}
1333 
1334 	qede_alloc_etherdev(adapter, &dev_info);
1335 
1336 	adapter->ops->common->set_id(edev, edev->name, QEDE_DRV_MODULE_VERSION);
1337 
1338 	if (!is_vf)
1339 		adapter->dev_info.num_mac_addrs =
1340 			(uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
1341 					    ECORE_MAC);
1342 	else
1343 		ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
1344 					     &adapter->dev_info.num_mac_addrs);
1345 
1346 	/* Allocate memory for storing MAC addr */
1347 	eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
1348 					(ETHER_ADDR_LEN *
1349 					adapter->dev_info.num_mac_addrs),
1350 					RTE_CACHE_LINE_SIZE);
1351 
1352 	if (eth_dev->data->mac_addrs == NULL) {
1353 		DP_ERR(edev, "Failed to allocate MAC address\n");
1354 		qed_ops->common->slowpath_stop(edev);
1355 		qed_ops->common->remove(edev);
1356 		rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1357 				     (void *)eth_dev);
1358 		return -ENOMEM;
1359 	}
1360 
1361 	if (!is_vf) {
1362 		ether_addr_copy((struct ether_addr *)edev->hwfns[0].
1363 				hw_info.hw_mac_addr,
1364 				&eth_dev->data->mac_addrs[0]);
1365 		ether_addr_copy(&eth_dev->data->mac_addrs[0],
1366 				&adapter->primary_mac);
1367 	} else {
1368 		ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
1369 				       &bulletin_change);
1370 		if (bulletin_change) {
1371 			is_mac_exist =
1372 			    ecore_vf_bulletin_get_forced_mac(
1373 						ECORE_LEADING_HWFN(edev),
1374 						vf_mac,
1375 						&is_mac_forced);
1376 			if (is_mac_exist && is_mac_forced) {
1377 				DP_INFO(edev, "VF macaddr received from PF\n");
1378 				ether_addr_copy((struct ether_addr *)&vf_mac,
1379 						&eth_dev->data->mac_addrs[0]);
1380 				ether_addr_copy(&eth_dev->data->mac_addrs[0],
1381 						&adapter->primary_mac);
1382 			} else {
1383 				DP_NOTICE(edev, false,
1384 					  "No VF macaddr assigned\n");
1385 			}
1386 		}
1387 	}
1388 
1389 	eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
1390 
1391 	if (do_once) {
1392 		qede_print_adapter_info(adapter);
1393 		do_once = false;
1394 	}
1395 
1396 	DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
1397 		  adapter->primary_mac.addr_bytes[0],
1398 		  adapter->primary_mac.addr_bytes[1],
1399 		  adapter->primary_mac.addr_bytes[2],
1400 		  adapter->primary_mac.addr_bytes[3],
1401 		  adapter->primary_mac.addr_bytes[4],
1402 		  adapter->primary_mac.addr_bytes[5]);
1403 
1404 	return rc;
1405 }
1406 
1407 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
1408 {
1409 	return qede_common_dev_init(eth_dev, 1);
1410 }
1411 
1412 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
1413 {
1414 	return qede_common_dev_init(eth_dev, 0);
1415 }
1416 
1417 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
1418 {
1419 	/* only uninitialize in the primary process */
1420 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1421 		return 0;
1422 
1423 	/* safe to close dev here */
1424 	qede_dev_close(eth_dev);
1425 
1426 	eth_dev->dev_ops = NULL;
1427 	eth_dev->rx_pkt_burst = NULL;
1428 	eth_dev->tx_pkt_burst = NULL;
1429 
1430 	if (eth_dev->data->mac_addrs)
1431 		rte_free(eth_dev->data->mac_addrs);
1432 
1433 	eth_dev->data->mac_addrs = NULL;
1434 
1435 	return 0;
1436 }
1437 
1438 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1439 {
1440 	return qede_dev_common_uninit(eth_dev);
1441 }
1442 
1443 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1444 {
1445 	return qede_dev_common_uninit(eth_dev);
1446 }
1447 
1448 static struct rte_pci_id pci_id_qedevf_map[] = {
1449 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1450 	{
1451 		QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF)
1452 	},
1453 	{
1454 		QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV)
1455 	},
1456 	{.vendor_id = 0,}
1457 };
1458 
1459 static struct rte_pci_id pci_id_qede_map[] = {
1460 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1461 	{
1462 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E)
1463 	},
1464 	{
1465 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S)
1466 	},
1467 	{
1468 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40)
1469 	},
1470 	{
1471 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25)
1472 	},
1473 	{
1474 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_100)
1475 	},
1476 	{.vendor_id = 0,}
1477 };
1478 
1479 static struct eth_driver rte_qedevf_pmd = {
1480 	.pci_drv = {
1481 		    .name = "rte_qedevf_pmd",
1482 		    .id_table = pci_id_qedevf_map,
1483 		    .drv_flags =
1484 		    RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1485 		    },
1486 	.eth_dev_init = qedevf_eth_dev_init,
1487 	.eth_dev_uninit = qedevf_eth_dev_uninit,
1488 	.dev_private_size = sizeof(struct qede_dev),
1489 };
1490 
1491 static struct eth_driver rte_qede_pmd = {
1492 	.pci_drv = {
1493 		    .name = "rte_qede_pmd",
1494 		    .id_table = pci_id_qede_map,
1495 		    .drv_flags =
1496 		    RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1497 		    },
1498 	.eth_dev_init = qede_eth_dev_init,
1499 	.eth_dev_uninit = qede_eth_dev_uninit,
1500 	.dev_private_size = sizeof(struct qede_dev),
1501 };
1502 
1503 static int
1504 rte_qedevf_pmd_init(const char *name __rte_unused,
1505 		    const char *params __rte_unused)
1506 {
1507 	rte_eth_driver_register(&rte_qedevf_pmd);
1508 
1509 	return 0;
1510 }
1511 
1512 static int
1513 rte_qede_pmd_init(const char *name __rte_unused,
1514 		  const char *params __rte_unused)
1515 {
1516 	rte_eth_driver_register(&rte_qede_pmd);
1517 
1518 	return 0;
1519 }
1520 
1521 static struct rte_driver rte_qedevf_driver = {
1522 	.type = PMD_PDEV,
1523 	.init = rte_qede_pmd_init
1524 };
1525 
1526 static struct rte_driver rte_qede_driver = {
1527 	.type = PMD_PDEV,
1528 	.init = rte_qedevf_pmd_init
1529 };
1530 
1531 PMD_REGISTER_DRIVER(rte_qede_driver, net_qede);
1532 DRIVER_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
1533 PMD_REGISTER_DRIVER(rte_qedevf_driver, net_qede_vf);
1534 DRIVER_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
1535