xref: /dpdk/drivers/net/qede/qede_ethdev.c (revision 2ea6f76aff402be2c7a19cd244e83b46641bced5)
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8 
9 #include "qede_ethdev.h"
10 
11 /* Globals */
12 static const struct qed_eth_ops *qed_ops;
13 static const char *drivername = "qede pmd";
14 
15 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
16 {
17 	ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
18 }
19 
20 static void
21 qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
22 {
23 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
24 	struct qede_dev *qdev = eth_dev->data->dev_private;
25 	struct ecore_dev *edev = &qdev->edev;
26 
27 	qede_interrupt_action(ECORE_LEADING_HWFN(edev));
28 	if (rte_intr_enable(&eth_dev->pci_dev->intr_handle))
29 		DP_ERR(edev, "rte_intr_enable failed\n");
30 }
31 
32 static void
33 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
34 {
35 	rte_memcpy(&qdev->dev_info, info, sizeof(*info));
36 	qdev->num_tc = qdev->dev_info.num_tc;
37 	qdev->ops = qed_ops;
38 }
39 
40 static void qede_print_adapter_info(struct qede_dev *qdev)
41 {
42 	struct ecore_dev *edev = &qdev->edev;
43 	struct qed_dev_info *info = &qdev->dev_info.common;
44 	static char ver_str[QED_DRV_VER_STR_SIZE];
45 
46 	DP_INFO(edev, "*********************************\n");
47 	DP_INFO(edev, " Chip details : %s%d\n",
48 		ECORE_IS_BB(edev) ? "BB" : "AH",
49 		CHIP_REV_IS_A0(edev) ? 0 : 1);
50 
51 	sprintf(ver_str, "%s %s_%d.%d.%d.%d", QEDE_PMD_VER_PREFIX,
52 		edev->ver_str, QEDE_PMD_VERSION_MAJOR, QEDE_PMD_VERSION_MINOR,
53 		QEDE_PMD_VERSION_REVISION, QEDE_PMD_VERSION_PATCH);
54 	strcpy(qdev->drv_ver, ver_str);
55 	DP_INFO(edev, " Driver version : %s\n", ver_str);
56 
57 	sprintf(ver_str, "%d.%d.%d.%d", info->fw_major, info->fw_minor,
58 		info->fw_rev, info->fw_eng);
59 	DP_INFO(edev, " Firmware version : %s\n", ver_str);
60 
61 	sprintf(ver_str, "%d.%d.%d.%d",
62 		(info->mfw_rev >> 24) & 0xff,
63 		(info->mfw_rev >> 16) & 0xff,
64 		(info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
65 	DP_INFO(edev, " Management firmware version : %s\n", ver_str);
66 
67 	DP_INFO(edev, " Firmware file : %s\n", fw_file);
68 
69 	DP_INFO(edev, "*********************************\n");
70 }
71 
72 static int
73 qede_set_ucast_rx_mac(struct qede_dev *qdev,
74 		      enum qed_filter_xcast_params_type opcode,
75 		      uint8_t mac[ETHER_ADDR_LEN])
76 {
77 	struct ecore_dev *edev = &qdev->edev;
78 	struct qed_filter_params filter_cmd;
79 
80 	memset(&filter_cmd, 0, sizeof(filter_cmd));
81 	filter_cmd.type = QED_FILTER_TYPE_UCAST;
82 	filter_cmd.filter.ucast.type = opcode;
83 	filter_cmd.filter.ucast.mac_valid = 1;
84 	rte_memcpy(&filter_cmd.filter.ucast.mac[0], &mac[0], ETHER_ADDR_LEN);
85 	return qdev->ops->filter_config(edev, &filter_cmd);
86 }
87 
88 static void
89 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
90 		  uint32_t index, __rte_unused uint32_t pool)
91 {
92 	struct qede_dev *qdev = eth_dev->data->dev_private;
93 	struct ecore_dev *edev = &qdev->edev;
94 	int rc;
95 
96 	PMD_INIT_FUNC_TRACE(edev);
97 
98 	if (index >= qdev->dev_info.num_mac_addrs) {
99 		DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
100 		       index, qdev->dev_info.num_mac_addrs);
101 		return;
102 	}
103 
104 	/* Adding macaddr even though promiscuous mode is set */
105 	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
106 		DP_INFO(edev, "Port is in promisc mode, yet adding it\n");
107 
108 	/* Add MAC filters according to the unicast secondary macs */
109 	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
110 				   mac_addr->addr_bytes);
111 	if (rc)
112 		DP_ERR(edev, "Unable to add macaddr rc=%d\n", rc);
113 }
114 
115 static void
116 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
117 {
118 	struct qede_dev *qdev = eth_dev->data->dev_private;
119 	struct ecore_dev *edev = &qdev->edev;
120 	struct ether_addr mac_addr;
121 	int rc;
122 
123 	PMD_INIT_FUNC_TRACE(edev);
124 
125 	if (index >= qdev->dev_info.num_mac_addrs) {
126 		DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
127 		       index, qdev->dev_info.num_mac_addrs);
128 		return;
129 	}
130 
131 	/* Use the index maintained by rte */
132 	ether_addr_copy(&eth_dev->data->mac_addrs[index], &mac_addr);
133 	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
134 				   mac_addr.addr_bytes);
135 	if (rc)
136 		DP_ERR(edev, "Unable to remove macaddr rc=%d\n", rc);
137 }
138 
139 static void
140 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
141 {
142 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
143 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
144 	int rc;
145 
146 	/* First remove the primary mac */
147 	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
148 				   qdev->primary_mac.addr_bytes);
149 
150 	if (rc) {
151 		DP_ERR(edev, "Unable to remove current macaddr"
152 			     " Reverting to previous default mac\n");
153 		ether_addr_copy(&qdev->primary_mac,
154 				&eth_dev->data->mac_addrs[0]);
155 		return;
156 	}
157 
158 	/* Add new MAC */
159 	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
160 				   mac_addr->addr_bytes);
161 
162 	if (rc)
163 		DP_ERR(edev, "Unable to add new default mac\n");
164 	else
165 		ether_addr_copy(mac_addr, &qdev->primary_mac);
166 }
167 
168 
169 
170 
171 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
172 {
173 	struct ecore_dev *edev = &qdev->edev;
174 	struct qed_update_vport_params params = {
175 		.vport_id = 0,
176 		.accept_any_vlan = action,
177 		.update_accept_any_vlan_flg = 1,
178 	};
179 	int rc;
180 
181 	/* Proceed only if action actually needs to be performed */
182 	if (qdev->accept_any_vlan == action)
183 		return;
184 
185 	rc = qdev->ops->vport_update(edev, &params);
186 	if (rc) {
187 		DP_ERR(edev, "Failed to %s accept-any-vlan\n",
188 		       action ? "enable" : "disable");
189 	} else {
190 		DP_INFO(edev, "%s accept-any-vlan\n",
191 			action ? "enabled" : "disabled");
192 		qdev->accept_any_vlan = action;
193 	}
194 }
195 
196 void qede_config_rx_mode(struct rte_eth_dev *eth_dev)
197 {
198 	struct qede_dev *qdev = eth_dev->data->dev_private;
199 	struct ecore_dev *edev = &qdev->edev;
200 	/* TODO: - QED_FILTER_TYPE_UCAST */
201 	enum qed_filter_rx_mode_type accept_flags =
202 			QED_FILTER_RX_MODE_TYPE_REGULAR;
203 	struct qed_filter_params rx_mode;
204 	int rc;
205 
206 	/* Configure the struct for the Rx mode */
207 	memset(&rx_mode, 0, sizeof(struct qed_filter_params));
208 	rx_mode.type = QED_FILTER_TYPE_RX_MODE;
209 
210 	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_REPLACE,
211 				   eth_dev->data->mac_addrs[0].addr_bytes);
212 	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
213 		accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
214 	} else {
215 		rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
216 					   eth_dev->data->
217 					   mac_addrs[0].addr_bytes);
218 		if (rc) {
219 			DP_ERR(edev, "Unable to add filter\n");
220 			return;
221 		}
222 	}
223 
224 	/* take care of VLAN mode */
225 	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
226 		qede_config_accept_any_vlan(qdev, true);
227 	} else if (!qdev->non_configured_vlans) {
228 		/* If we dont have non-configured VLANs and promisc
229 		 * is not set, then check if we need to disable
230 		 * accept_any_vlan mode.
231 		 * Because in this case, accept_any_vlan mode is set
232 		 * as part of IFF_RPOMISC flag handling.
233 		 */
234 		qede_config_accept_any_vlan(qdev, false);
235 	}
236 	rx_mode.filter.accept_flags = accept_flags;
237 	rc = qdev->ops->filter_config(edev, &rx_mode);
238 	if (rc)
239 		DP_ERR(edev, "Filter config failed rc=%d\n", rc);
240 }
241 
242 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
243 {
244 	struct qed_update_vport_params vport_update_params;
245 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
246 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
247 	int rc;
248 
249 	memset(&vport_update_params, 0, sizeof(vport_update_params));
250 	vport_update_params.vport_id = 0;
251 	vport_update_params.update_inner_vlan_removal_flg = 1;
252 	vport_update_params.inner_vlan_removal_flg = set_stripping;
253 	rc = qdev->ops->vport_update(edev, &vport_update_params);
254 	if (rc) {
255 		DP_ERR(edev, "Update V-PORT failed %d\n", rc);
256 		return rc;
257 	}
258 
259 	return 0;
260 }
261 
262 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
263 {
264 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
265 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
266 
267 	if (mask & ETH_VLAN_STRIP_MASK) {
268 		if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
269 			(void)qede_vlan_stripping(eth_dev, 1);
270 		else
271 			(void)qede_vlan_stripping(eth_dev, 0);
272 	}
273 
274 	DP_INFO(edev, "vlan offload mask %d vlan-strip %d\n",
275 		mask, eth_dev->data->dev_conf.rxmode.hw_vlan_strip);
276 }
277 
278 static int qede_set_ucast_rx_vlan(struct qede_dev *qdev,
279 				  enum qed_filter_xcast_params_type opcode,
280 				  uint16_t vid)
281 {
282 	struct qed_filter_params filter_cmd;
283 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
284 
285 	memset(&filter_cmd, 0, sizeof(filter_cmd));
286 	filter_cmd.type = QED_FILTER_TYPE_UCAST;
287 	filter_cmd.filter.ucast.type = opcode;
288 	filter_cmd.filter.ucast.vlan_valid = 1;
289 	filter_cmd.filter.ucast.vlan = vid;
290 
291 	return qdev->ops->filter_config(edev, &filter_cmd);
292 }
293 
294 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
295 				uint16_t vlan_id, int on)
296 {
297 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
298 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
299 	struct qed_dev_eth_info *dev_info = &qdev->dev_info;
300 	int rc;
301 
302 	if (vlan_id != 0 &&
303 	    qdev->configured_vlans == dev_info->num_vlan_filters) {
304 		DP_NOTICE(edev, false, "Reached max VLAN filter limit"
305 				     " enabling accept_any_vlan\n");
306 		qede_config_accept_any_vlan(qdev, true);
307 		return 0;
308 	}
309 
310 	if (on) {
311 		rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD,
312 					    vlan_id);
313 		if (rc)
314 			DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
315 			       rc);
316 		else
317 			if (vlan_id != 0)
318 				qdev->configured_vlans++;
319 	} else {
320 		rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL,
321 					    vlan_id);
322 		if (rc)
323 			DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
324 			       vlan_id, rc);
325 		else
326 			if (vlan_id != 0)
327 				qdev->configured_vlans--;
328 	}
329 
330 	DP_INFO(edev, "vlan_id %u on %u rc %d configured_vlans %u\n",
331 			vlan_id, on, rc, qdev->configured_vlans);
332 
333 	return rc;
334 }
335 
336 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
337 {
338 	struct qede_dev *qdev = eth_dev->data->dev_private;
339 	struct ecore_dev *edev = &qdev->edev;
340 	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
341 
342 	PMD_INIT_FUNC_TRACE(edev);
343 
344 	if (eth_dev->data->nb_rx_queues != eth_dev->data->nb_tx_queues) {
345 		DP_NOTICE(edev, false,
346 			  "Unequal number of rx/tx queues "
347 			  "is not supported RX=%u TX=%u\n",
348 			  eth_dev->data->nb_rx_queues,
349 			  eth_dev->data->nb_tx_queues);
350 		return -EINVAL;
351 	}
352 
353 	qdev->num_rss = eth_dev->data->nb_rx_queues;
354 
355 	/* Initial state */
356 	qdev->state = QEDE_CLOSE;
357 
358 	/* Sanity checks and throw warnings */
359 
360 	if (rxmode->enable_scatter == 1) {
361 		DP_ERR(edev, "RX scatter packets is not supported\n");
362 		return -EINVAL;
363 	}
364 
365 	if (rxmode->enable_lro == 1) {
366 		DP_INFO(edev, "LRO is not supported\n");
367 		return -EINVAL;
368 	}
369 
370 	if (!rxmode->hw_strip_crc)
371 		DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
372 
373 	if (!rxmode->hw_ip_checksum)
374 		DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
375 			      "in hw\n");
376 
377 
378 	DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
379 		QEDE_RSS_CNT(qdev), qdev->num_tc);
380 
381 	DP_INFO(edev, "my_id %u rel_pf_id %u abs_pf_id %u"
382 		" port %u first_on_engine %d\n",
383 		edev->hwfns[0].my_id,
384 		edev->hwfns[0].rel_pf_id,
385 		edev->hwfns[0].abs_pf_id,
386 		edev->hwfns[0].port_id, edev->hwfns[0].first_on_engine);
387 
388 	return 0;
389 }
390 
391 /* Info about HW descriptor ring limitations */
392 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
393 	.nb_max = NUM_RX_BDS_MAX,
394 	.nb_min = 128,
395 	.nb_align = 128	/* lowest common multiple */
396 };
397 
398 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
399 	.nb_max = NUM_TX_BDS_MAX,
400 	.nb_min = 256,
401 	.nb_align = 256
402 };
403 
404 static void
405 qede_dev_info_get(struct rte_eth_dev *eth_dev,
406 		  struct rte_eth_dev_info *dev_info)
407 {
408 	struct qede_dev *qdev = eth_dev->data->dev_private;
409 	struct ecore_dev *edev = &qdev->edev;
410 
411 	PMD_INIT_FUNC_TRACE(edev);
412 
413 	dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
414 					      QEDE_ETH_OVERHEAD);
415 	dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
416 	dev_info->rx_desc_lim = qede_rx_desc_lim;
417 	dev_info->tx_desc_lim = qede_tx_desc_lim;
418 	dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev);
419 	dev_info->max_tx_queues = dev_info->max_rx_queues;
420 	dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs;
421 	dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
422 	dev_info->driver_name = qdev->drv_ver;
423 	dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
424 
425 	dev_info->default_txconf = (struct rte_eth_txconf) {
426 		.txq_flags = QEDE_TXQ_FLAGS,
427 	};
428 
429 	dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
430 				     DEV_RX_OFFLOAD_IPV4_CKSUM |
431 				     DEV_RX_OFFLOAD_UDP_CKSUM |
432 				     DEV_RX_OFFLOAD_TCP_CKSUM);
433 	dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
434 				     DEV_TX_OFFLOAD_IPV4_CKSUM |
435 				     DEV_TX_OFFLOAD_UDP_CKSUM |
436 				     DEV_TX_OFFLOAD_TCP_CKSUM);
437 
438 	dev_info->speed_capa = ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
439 }
440 
441 /* return 0 means link status changed, -1 means not changed */
442 static int
443 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
444 {
445 	struct qede_dev *qdev = eth_dev->data->dev_private;
446 	struct ecore_dev *edev = &qdev->edev;
447 	uint16_t link_duplex;
448 	struct qed_link_output link;
449 	struct rte_eth_link *curr = &eth_dev->data->dev_link;
450 
451 	memset(&link, 0, sizeof(struct qed_link_output));
452 	qdev->ops->common->get_link(edev, &link);
453 
454 	/* Link Speed */
455 	curr->link_speed = link.speed;
456 
457 	/* Link Mode */
458 	switch (link.duplex) {
459 	case QEDE_DUPLEX_HALF:
460 		link_duplex = ETH_LINK_HALF_DUPLEX;
461 		break;
462 	case QEDE_DUPLEX_FULL:
463 		link_duplex = ETH_LINK_FULL_DUPLEX;
464 		break;
465 	case QEDE_DUPLEX_UNKNOWN:
466 	default:
467 		link_duplex = -1;
468 	}
469 	curr->link_duplex = link_duplex;
470 
471 	/* Link Status */
472 	curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
473 
474 	/* AN */
475 	curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
476 			     ETH_LINK_AUTONEG : ETH_LINK_FIXED;
477 
478 	DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
479 		curr->link_speed, curr->link_duplex,
480 		curr->link_autoneg, curr->link_status);
481 
482 	/* return 0 means link status changed, -1 means not changed */
483 	return ((curr->link_status == link.link_up) ? -1 : 0);
484 }
485 
486 static void
487 qede_rx_mode_setting(struct rte_eth_dev *eth_dev,
488 		     enum qed_filter_rx_mode_type accept_flags)
489 {
490 	struct qede_dev *qdev = eth_dev->data->dev_private;
491 	struct ecore_dev *edev = &qdev->edev;
492 	struct qed_filter_params rx_mode;
493 
494 	DP_INFO(edev, "%s mode %u\n", __func__, accept_flags);
495 
496 	memset(&rx_mode, 0, sizeof(struct qed_filter_params));
497 	rx_mode.type = QED_FILTER_TYPE_RX_MODE;
498 	rx_mode.filter.accept_flags = accept_flags;
499 	qdev->ops->filter_config(edev, &rx_mode);
500 }
501 
502 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
503 {
504 	struct qede_dev *qdev = eth_dev->data->dev_private;
505 	struct ecore_dev *edev = &qdev->edev;
506 
507 	PMD_INIT_FUNC_TRACE(edev);
508 
509 	enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
510 
511 	if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
512 		type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
513 
514 	qede_rx_mode_setting(eth_dev, type);
515 }
516 
517 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
518 {
519 	struct qede_dev *qdev = eth_dev->data->dev_private;
520 	struct ecore_dev *edev = &qdev->edev;
521 
522 	PMD_INIT_FUNC_TRACE(edev);
523 
524 	if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
525 		qede_rx_mode_setting(eth_dev,
526 				     QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
527 	else
528 		qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
529 }
530 
531 static void qede_dev_close(struct rte_eth_dev *eth_dev)
532 {
533 	struct qede_dev *qdev = eth_dev->data->dev_private;
534 	struct ecore_dev *edev = &qdev->edev;
535 
536 	PMD_INIT_FUNC_TRACE(edev);
537 
538 	/* dev_stop() shall cleanup fp resources in hw but without releasing
539 	 * dma memories and sw structures so that dev_start() can be called
540 	 * by the app without reconfiguration. However, in dev_close() we
541 	 * can release all the resources and device can be brought up newly
542 	 */
543 	if (qdev->state != QEDE_STOP)
544 		qede_dev_stop(eth_dev);
545 	else
546 		DP_INFO(edev, "Device is already stopped\n");
547 
548 	qede_free_mem_load(qdev);
549 
550 	qede_free_fp_arrays(qdev);
551 
552 	qede_dev_set_link_state(eth_dev, false);
553 
554 	qdev->ops->common->slowpath_stop(edev);
555 
556 	qdev->ops->common->remove(edev);
557 
558 	rte_intr_disable(&eth_dev->pci_dev->intr_handle);
559 
560 	rte_intr_callback_unregister(&eth_dev->pci_dev->intr_handle,
561 				     qede_interrupt_handler, (void *)eth_dev);
562 
563 	qdev->state = QEDE_CLOSE;
564 }
565 
566 static void
567 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
568 {
569 	struct qede_dev *qdev = eth_dev->data->dev_private;
570 	struct ecore_dev *edev = &qdev->edev;
571 	struct ecore_eth_stats stats;
572 
573 	qdev->ops->get_vport_stats(edev, &stats);
574 
575 	/* RX Stats */
576 	eth_stats->ipackets = stats.rx_ucast_pkts +
577 	    stats.rx_mcast_pkts + stats.rx_bcast_pkts;
578 
579 	eth_stats->ibytes = stats.rx_ucast_bytes +
580 	    stats.rx_mcast_bytes + stats.rx_bcast_bytes;
581 
582 	eth_stats->ierrors = stats.rx_crc_errors +
583 	    stats.rx_align_errors +
584 	    stats.rx_carrier_errors +
585 	    stats.rx_oversize_packets +
586 	    stats.rx_jabbers + stats.rx_undersize_packets;
587 
588 	eth_stats->rx_nombuf = stats.no_buff_discards;
589 
590 	eth_stats->imissed = stats.mftag_filter_discards +
591 	    stats.mac_filter_discards +
592 	    stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
593 
594 	/* TX stats */
595 	eth_stats->opackets = stats.tx_ucast_pkts +
596 	    stats.tx_mcast_pkts + stats.tx_bcast_pkts;
597 
598 	eth_stats->obytes = stats.tx_ucast_bytes +
599 	    stats.tx_mcast_bytes + stats.tx_bcast_bytes;
600 
601 	eth_stats->oerrors = stats.tx_err_drop_pkts;
602 
603 	DP_INFO(edev,
604 		"no_buff_discards=%" PRIu64 ""
605 		" mac_filter_discards=%" PRIu64 ""
606 		" brb_truncates=%" PRIu64 ""
607 		" brb_discards=%" PRIu64 "\n",
608 		stats.no_buff_discards,
609 		stats.mac_filter_discards,
610 		stats.brb_truncates, stats.brb_discards);
611 }
612 
613 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
614 {
615 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
616 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
617 	struct qed_link_params link_params;
618 	int rc;
619 
620 	DP_INFO(edev, "setting link state %d\n", link_up);
621 	memset(&link_params, 0, sizeof(link_params));
622 	link_params.link_up = link_up;
623 	rc = qdev->ops->common->set_link(edev, &link_params);
624 	if (rc != ECORE_SUCCESS)
625 		DP_ERR(edev, "Unable to set link state %d\n", link_up);
626 
627 	return rc;
628 }
629 
630 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
631 {
632 	return qede_dev_set_link_state(eth_dev, true);
633 }
634 
635 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
636 {
637 	return qede_dev_set_link_state(eth_dev, false);
638 }
639 
640 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
641 {
642 	enum qed_filter_rx_mode_type type =
643 	    QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
644 
645 	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
646 		type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
647 
648 	qede_rx_mode_setting(eth_dev, type);
649 }
650 
651 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
652 {
653 	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
654 		qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC);
655 	else
656 		qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
657 }
658 
659 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
660 			      struct rte_eth_fc_conf *fc_conf)
661 {
662 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
663 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
664 	struct qed_link_output current_link;
665 	struct qed_link_params params;
666 
667 	memset(&current_link, 0, sizeof(current_link));
668 	qdev->ops->common->get_link(edev, &current_link);
669 
670 	memset(&params, 0, sizeof(params));
671 	params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
672 	if (fc_conf->autoneg) {
673 		if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
674 			DP_ERR(edev, "Autoneg not supported\n");
675 			return -EINVAL;
676 		}
677 		params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
678 	}
679 
680 	/* Pause is assumed to be supported (SUPPORTED_Pause) */
681 	if (fc_conf->mode == RTE_FC_FULL)
682 		params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
683 					QED_LINK_PAUSE_RX_ENABLE);
684 	if (fc_conf->mode == RTE_FC_TX_PAUSE)
685 		params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
686 	if (fc_conf->mode == RTE_FC_RX_PAUSE)
687 		params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
688 
689 	params.link_up = true;
690 	(void)qdev->ops->common->set_link(edev, &params);
691 
692 	return 0;
693 }
694 
695 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
696 			      struct rte_eth_fc_conf *fc_conf)
697 {
698 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
699 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
700 	struct qed_link_output current_link;
701 
702 	memset(&current_link, 0, sizeof(current_link));
703 	qdev->ops->common->get_link(edev, &current_link);
704 
705 	if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
706 		fc_conf->autoneg = true;
707 
708 	if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
709 					 QED_LINK_PAUSE_TX_ENABLE))
710 		fc_conf->mode = RTE_FC_FULL;
711 	else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
712 		fc_conf->mode = RTE_FC_RX_PAUSE;
713 	else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
714 		fc_conf->mode = RTE_FC_TX_PAUSE;
715 	else
716 		fc_conf->mode = RTE_FC_NONE;
717 
718 	return 0;
719 }
720 
721 static const uint32_t *
722 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
723 {
724 	static const uint32_t ptypes[] = {
725 		RTE_PTYPE_L3_IPV4,
726 		RTE_PTYPE_L3_IPV6,
727 		RTE_PTYPE_UNKNOWN
728 	};
729 
730 	if (eth_dev->rx_pkt_burst == qede_recv_pkts)
731 		return ptypes;
732 
733 	return NULL;
734 }
735 
736 static const struct eth_dev_ops qede_eth_dev_ops = {
737 	.dev_configure = qede_dev_configure,
738 	.dev_infos_get = qede_dev_info_get,
739 	.rx_queue_setup = qede_rx_queue_setup,
740 	.rx_queue_release = qede_rx_queue_release,
741 	.tx_queue_setup = qede_tx_queue_setup,
742 	.tx_queue_release = qede_tx_queue_release,
743 	.dev_start = qede_dev_start,
744 	.dev_set_link_up = qede_dev_set_link_up,
745 	.dev_set_link_down = qede_dev_set_link_down,
746 	.link_update = qede_link_update,
747 	.promiscuous_enable = qede_promiscuous_enable,
748 	.promiscuous_disable = qede_promiscuous_disable,
749 	.allmulticast_enable = qede_allmulticast_enable,
750 	.allmulticast_disable = qede_allmulticast_disable,
751 	.dev_stop = qede_dev_stop,
752 	.dev_close = qede_dev_close,
753 	.stats_get = qede_get_stats,
754 	.mac_addr_add = qede_mac_addr_add,
755 	.mac_addr_remove = qede_mac_addr_remove,
756 	.mac_addr_set = qede_mac_addr_set,
757 	.vlan_offload_set = qede_vlan_offload_set,
758 	.vlan_filter_set = qede_vlan_filter_set,
759 	.flow_ctrl_set = qede_flow_ctrl_set,
760 	.flow_ctrl_get = qede_flow_ctrl_get,
761 	.dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
762 };
763 
764 static void qede_update_pf_params(struct ecore_dev *edev)
765 {
766 	struct ecore_pf_params pf_params;
767 	/* 32 rx + 32 tx */
768 	memset(&pf_params, 0, sizeof(struct ecore_pf_params));
769 	pf_params.eth_pf_params.num_cons = 64;
770 	qed_ops->common->update_pf_params(edev, &pf_params);
771 }
772 
773 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
774 {
775 	struct rte_pci_device *pci_dev;
776 	struct rte_pci_addr pci_addr;
777 	struct qede_dev *adapter;
778 	struct ecore_dev *edev;
779 	struct qed_dev_eth_info dev_info;
780 	struct qed_slowpath_params params;
781 	uint32_t qed_ver;
782 	static bool do_once = true;
783 	uint8_t bulletin_change;
784 	uint8_t vf_mac[ETHER_ADDR_LEN];
785 	uint8_t is_mac_forced;
786 	bool is_mac_exist;
787 	/* Fix up ecore debug level */
788 	uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
789 	uint8_t dp_level = ECORE_LEVEL_VERBOSE;
790 	uint32_t max_mac_addrs;
791 	int rc;
792 
793 	/* Extract key data structures */
794 	adapter = eth_dev->data->dev_private;
795 	edev = &adapter->edev;
796 	pci_addr = eth_dev->pci_dev->addr;
797 
798 	PMD_INIT_FUNC_TRACE(edev);
799 
800 	snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
801 		 pci_addr.bus, pci_addr.devid, pci_addr.function,
802 		 eth_dev->data->port_id);
803 
804 	eth_dev->rx_pkt_burst = qede_recv_pkts;
805 	eth_dev->tx_pkt_burst = qede_xmit_pkts;
806 
807 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
808 		DP_NOTICE(edev, false,
809 			  "Skipping device init from secondary process\n");
810 		return 0;
811 	}
812 
813 	pci_dev = eth_dev->pci_dev;
814 
815 	rte_eth_copy_pci_info(eth_dev, pci_dev);
816 
817 	DP_INFO(edev, "Starting qede probe\n");
818 
819 	rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
820 				    dp_module, dp_level, is_vf);
821 
822 	if (rc != 0) {
823 		DP_ERR(edev, "qede probe failed rc %d\n", rc);
824 		return -ENODEV;
825 	}
826 
827 	qede_update_pf_params(edev);
828 
829 	rte_intr_callback_register(&eth_dev->pci_dev->intr_handle,
830 				   qede_interrupt_handler, (void *)eth_dev);
831 
832 	if (rte_intr_enable(&eth_dev->pci_dev->intr_handle)) {
833 		DP_ERR(edev, "rte_intr_enable() failed\n");
834 		return -ENODEV;
835 	}
836 
837 	/* Start the Slowpath-process */
838 	memset(&params, 0, sizeof(struct qed_slowpath_params));
839 	params.int_mode = ECORE_INT_MODE_MSIX;
840 	params.drv_major = QEDE_MAJOR_VERSION;
841 	params.drv_minor = QEDE_MINOR_VERSION;
842 	params.drv_rev = QEDE_REVISION_VERSION;
843 	params.drv_eng = QEDE_ENGINEERING_VERSION;
844 	strncpy((char *)params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
845 
846 	rc = qed_ops->common->slowpath_start(edev, &params);
847 	if (rc) {
848 		DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
849 		return -ENODEV;
850 	}
851 
852 	rc = qed_ops->fill_dev_info(edev, &dev_info);
853 	if (rc) {
854 		DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
855 		qed_ops->common->slowpath_stop(edev);
856 		qed_ops->common->remove(edev);
857 		return -ENODEV;
858 	}
859 
860 	qede_alloc_etherdev(adapter, &dev_info);
861 
862 	adapter->ops->common->set_id(edev, edev->name, QEDE_DRV_MODULE_VERSION);
863 
864 	if (!is_vf)
865 		adapter->dev_info.num_mac_addrs =
866 			(uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
867 					    ECORE_MAC);
868 	else
869 		adapter->dev_info.num_mac_addrs = 1;
870 
871 	/* Allocate memory for storing MAC addr */
872 	eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
873 					(ETHER_ADDR_LEN *
874 					adapter->dev_info.num_mac_addrs),
875 					RTE_CACHE_LINE_SIZE);
876 
877 	if (eth_dev->data->mac_addrs == NULL) {
878 		DP_ERR(edev, "Failed to allocate MAC address\n");
879 		qed_ops->common->slowpath_stop(edev);
880 		qed_ops->common->remove(edev);
881 		return -ENOMEM;
882 	}
883 
884 	ether_addr_copy((struct ether_addr *)edev->hwfns[0].
885 				hw_info.hw_mac_addr,
886 				&eth_dev->data->mac_addrs[0]);
887 
888 	eth_dev->dev_ops = &qede_eth_dev_ops;
889 
890 	if (do_once) {
891 		qede_print_adapter_info(adapter);
892 		do_once = false;
893 	}
894 
895 	DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
896 		  adapter->primary_mac.addr_bytes[0],
897 		  adapter->primary_mac.addr_bytes[1],
898 		  adapter->primary_mac.addr_bytes[2],
899 		  adapter->primary_mac.addr_bytes[3],
900 		  adapter->primary_mac.addr_bytes[4],
901 		  adapter->primary_mac.addr_bytes[5]);
902 
903 	return rc;
904 }
905 
906 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
907 {
908 	return qede_common_dev_init(eth_dev, 1);
909 }
910 
911 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
912 {
913 	return qede_common_dev_init(eth_dev, 0);
914 }
915 
916 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
917 {
918 	/* only uninitialize in the primary process */
919 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
920 		return 0;
921 
922 	/* safe to close dev here */
923 	qede_dev_close(eth_dev);
924 
925 	eth_dev->dev_ops = NULL;
926 	eth_dev->rx_pkt_burst = NULL;
927 	eth_dev->tx_pkt_burst = NULL;
928 
929 	if (eth_dev->data->mac_addrs)
930 		rte_free(eth_dev->data->mac_addrs);
931 
932 	eth_dev->data->mac_addrs = NULL;
933 
934 	return 0;
935 }
936 
937 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
938 {
939 	return qede_dev_common_uninit(eth_dev);
940 }
941 
942 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
943 {
944 	return qede_dev_common_uninit(eth_dev);
945 }
946 
947 static struct rte_pci_id pci_id_qedevf_map[] = {
948 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
949 	{
950 		QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF)
951 	},
952 	{
953 		QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV)
954 	},
955 	{.vendor_id = 0,}
956 };
957 
958 static struct rte_pci_id pci_id_qede_map[] = {
959 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
960 	{
961 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E)
962 	},
963 	{
964 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S)
965 	},
966 	{
967 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40)
968 	},
969 	{
970 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25)
971 	},
972 	{.vendor_id = 0,}
973 };
974 
975 static struct eth_driver rte_qedevf_pmd = {
976 	.pci_drv = {
977 		    .name = "rte_qedevf_pmd",
978 		    .id_table = pci_id_qedevf_map,
979 		    .drv_flags =
980 		    RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
981 		    },
982 	.eth_dev_init = qedevf_eth_dev_init,
983 	.eth_dev_uninit = qedevf_eth_dev_uninit,
984 	.dev_private_size = sizeof(struct qede_dev),
985 };
986 
987 static struct eth_driver rte_qede_pmd = {
988 	.pci_drv = {
989 		    .name = "rte_qede_pmd",
990 		    .id_table = pci_id_qede_map,
991 		    .drv_flags =
992 		    RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
993 		    },
994 	.eth_dev_init = qede_eth_dev_init,
995 	.eth_dev_uninit = qede_eth_dev_uninit,
996 	.dev_private_size = sizeof(struct qede_dev),
997 };
998 
999 static int
1000 rte_qedevf_pmd_init(const char *name __rte_unused,
1001 		    const char *params __rte_unused)
1002 {
1003 	rte_eth_driver_register(&rte_qedevf_pmd);
1004 
1005 	return 0;
1006 }
1007 
1008 static int
1009 rte_qede_pmd_init(const char *name __rte_unused,
1010 		  const char *params __rte_unused)
1011 {
1012 	rte_eth_driver_register(&rte_qede_pmd);
1013 
1014 	return 0;
1015 }
1016 
1017 static struct rte_driver rte_qedevf_driver = {
1018 	.type = PMD_PDEV,
1019 	.init = rte_qede_pmd_init
1020 };
1021 
1022 static struct rte_driver rte_qede_driver = {
1023 	.type = PMD_PDEV,
1024 	.init = rte_qedevf_pmd_init
1025 };
1026 
1027 PMD_REGISTER_DRIVER(rte_qede_driver);
1028 PMD_REGISTER_DRIVER(rte_qedevf_driver);
1029