xref: /dpdk/drivers/net/qede/qede_main.c (revision 0857b942113874c69dc3db5df11a828ee3cc9b6b)
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8 
9 #include <limits.h>
10 #include <time.h>
11 #include <rte_alarm.h>
12 
13 #include "qede_ethdev.h"
14 
15 /* Alarm timeout. */
16 #define QEDE_ALARM_TIMEOUT_US 100000
17 
18 /* Global variable to hold absolute path of fw file */
19 char fw_file[PATH_MAX];
20 
21 const char *QEDE_DEFAULT_FIRMWARE =
22 	"/lib/firmware/qed/qed_init_values-8.18.9.0.bin";
23 
24 static void
25 qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
26 {
27 	int i;
28 
29 	for (i = 0; i < edev->num_hwfns; i++) {
30 		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
31 		p_hwfn->pf_params = *params;
32 	}
33 }
34 
35 static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev)
36 {
37 	edev->regview = pci_dev->mem_resource[0].addr;
38 	edev->doorbells = pci_dev->mem_resource[2].addr;
39 }
40 
41 static int
42 qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
43 	  enum qed_protocol protocol, uint32_t dp_module,
44 	  uint8_t dp_level, bool is_vf)
45 {
46 	struct ecore_hw_prepare_params hw_prepare_params;
47 	struct qede_dev *qdev = (struct qede_dev *)edev;
48 	int rc;
49 
50 	ecore_init_struct(edev);
51 	edev->drv_type = DRV_ID_DRV_TYPE_LINUX;
52 	qdev->protocol = protocol;
53 
54 	if (is_vf)
55 		edev->b_is_vf = true;
56 
57 	ecore_init_dp(edev, dp_module, dp_level, NULL);
58 	qed_init_pci(edev, pci_dev);
59 
60 	memset(&hw_prepare_params, 0, sizeof(hw_prepare_params));
61 	hw_prepare_params.personality = ECORE_PCI_ETH;
62 	hw_prepare_params.drv_resc_alloc = false;
63 	hw_prepare_params.chk_reg_fifo = false;
64 	hw_prepare_params.initiate_pf_flr = true;
65 	hw_prepare_params.epoch = (u32)time(NULL);
66 	rc = ecore_hw_prepare(edev, &hw_prepare_params);
67 	if (rc) {
68 		DP_ERR(edev, "hw prepare failed\n");
69 		return rc;
70 	}
71 
72 	return rc;
73 }
74 
75 static int qed_nic_setup(struct ecore_dev *edev)
76 {
77 	int rc, i;
78 
79 	rc = ecore_resc_alloc(edev);
80 	if (rc)
81 		return rc;
82 
83 	DP_INFO(edev, "Allocated qed resources\n");
84 	ecore_resc_setup(edev);
85 
86 	return rc;
87 }
88 
89 #ifdef CONFIG_ECORE_ZIPPED_FW
90 static int qed_alloc_stream_mem(struct ecore_dev *edev)
91 {
92 	int i;
93 
94 	for_each_hwfn(edev, i) {
95 		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
96 
97 		p_hwfn->stream = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
98 					     sizeof(*p_hwfn->stream));
99 		if (!p_hwfn->stream)
100 			return -ENOMEM;
101 	}
102 
103 	return 0;
104 }
105 
106 static void qed_free_stream_mem(struct ecore_dev *edev)
107 {
108 	int i;
109 
110 	for_each_hwfn(edev, i) {
111 		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
112 
113 		if (!p_hwfn->stream)
114 			return;
115 
116 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->stream);
117 	}
118 }
119 #endif
120 
121 #ifdef CONFIG_ECORE_BINARY_FW
122 static int qed_load_firmware_data(struct ecore_dev *edev)
123 {
124 	int fd;
125 	struct stat st;
126 	const char *fw = RTE_LIBRTE_QEDE_FW;
127 
128 	if (strcmp(fw, "") == 0)
129 		strcpy(fw_file, QEDE_DEFAULT_FIRMWARE);
130 	else
131 		strcpy(fw_file, fw);
132 
133 	fd = open(fw_file, O_RDONLY);
134 	if (fd < 0) {
135 		DP_NOTICE(edev, false, "Can't open firmware file\n");
136 		return -ENOENT;
137 	}
138 
139 	if (fstat(fd, &st) < 0) {
140 		DP_NOTICE(edev, false, "Can't stat firmware file\n");
141 		close(fd);
142 		return -1;
143 	}
144 
145 	edev->firmware = rte_zmalloc("qede_fw", st.st_size,
146 				    RTE_CACHE_LINE_SIZE);
147 	if (!edev->firmware) {
148 		DP_NOTICE(edev, false, "Can't allocate memory for firmware\n");
149 		close(fd);
150 		return -ENOMEM;
151 	}
152 
153 	if (read(fd, edev->firmware, st.st_size) != st.st_size) {
154 		DP_NOTICE(edev, false, "Can't read firmware data\n");
155 		close(fd);
156 		return -1;
157 	}
158 
159 	edev->fw_len = st.st_size;
160 	if (edev->fw_len < 104) {
161 		DP_NOTICE(edev, false, "Invalid fw size: %" PRIu64 "\n",
162 			  edev->fw_len);
163 		close(fd);
164 		return -EINVAL;
165 	}
166 
167 	close(fd);
168 	return 0;
169 }
170 #endif
171 
172 static void qed_handle_bulletin_change(struct ecore_hwfn *hwfn)
173 {
174 	uint8_t mac[ETH_ALEN], is_mac_exist, is_mac_forced;
175 
176 	is_mac_exist = ecore_vf_bulletin_get_forced_mac(hwfn, mac,
177 						      &is_mac_forced);
178 	if (is_mac_exist && is_mac_forced)
179 		rte_memcpy(hwfn->hw_info.hw_mac_addr, mac, ETH_ALEN);
180 
181 	/* Always update link configuration according to bulletin */
182 	qed_link_update(hwfn);
183 }
184 
185 static void qede_vf_task(void *arg)
186 {
187 	struct ecore_hwfn *p_hwfn = arg;
188 	uint8_t change = 0;
189 
190 	/* Read the bulletin board, and re-schedule the task */
191 	ecore_vf_read_bulletin(p_hwfn, &change);
192 	if (change)
193 		qed_handle_bulletin_change(p_hwfn);
194 
195 	rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task, p_hwfn);
196 }
197 
198 static void qed_start_iov_task(struct ecore_dev *edev)
199 {
200 	struct ecore_hwfn *p_hwfn;
201 	int i;
202 
203 	for_each_hwfn(edev, i) {
204 		p_hwfn = &edev->hwfns[i];
205 		if (!IS_PF(edev))
206 			rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task,
207 					  p_hwfn);
208 	}
209 }
210 
211 static void qed_stop_iov_task(struct ecore_dev *edev)
212 {
213 	struct ecore_hwfn *p_hwfn;
214 	int i;
215 
216 	for_each_hwfn(edev, i) {
217 		p_hwfn = &edev->hwfns[i];
218 		if (!IS_PF(edev))
219 			rte_eal_alarm_cancel(qede_vf_task, p_hwfn);
220 	}
221 }
222 static int qed_slowpath_start(struct ecore_dev *edev,
223 			      struct qed_slowpath_params *params)
224 {
225 	const uint8_t *data = NULL;
226 	struct ecore_hwfn *hwfn;
227 	struct ecore_mcp_drv_version drv_version;
228 	struct ecore_hw_init_params hw_init_params;
229 	struct qede_dev *qdev = (struct qede_dev *)edev;
230 	struct ecore_ptt *p_ptt;
231 	int rc;
232 
233 	if (IS_PF(edev)) {
234 #ifdef CONFIG_ECORE_BINARY_FW
235 		rc = qed_load_firmware_data(edev);
236 		if (rc) {
237 			DP_ERR(edev, "Failed to find fw file %s\n", fw_file);
238 			goto err;
239 		}
240 #endif
241 		hwfn = ECORE_LEADING_HWFN(edev);
242 		if (edev->num_hwfns == 1) { /* skip aRFS for 100G device */
243 			p_ptt = ecore_ptt_acquire(hwfn);
244 			if (p_ptt) {
245 				ECORE_LEADING_HWFN(edev)->p_arfs_ptt = p_ptt;
246 			} else {
247 				DP_ERR(edev, "Failed to acquire PTT for flowdir\n");
248 				rc = -ENOMEM;
249 				goto err;
250 			}
251 		}
252 	}
253 
254 	rc = qed_nic_setup(edev);
255 	if (rc)
256 		goto err;
257 
258 	/* set int_coalescing_mode */
259 	edev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
260 
261 #ifdef CONFIG_ECORE_ZIPPED_FW
262 	if (IS_PF(edev)) {
263 		/* Allocate stream for unzipping */
264 		rc = qed_alloc_stream_mem(edev);
265 		if (rc) {
266 			DP_NOTICE(edev, true,
267 			"Failed to allocate stream memory\n");
268 			goto err2;
269 		}
270 	}
271 
272 	qed_start_iov_task(edev);
273 #endif
274 
275 #ifdef CONFIG_ECORE_BINARY_FW
276 	if (IS_PF(edev))
277 		data = (const uint8_t *)edev->firmware + sizeof(u32);
278 #endif
279 
280 	/* Start the slowpath */
281 	memset(&hw_init_params, 0, sizeof(hw_init_params));
282 	hw_init_params.b_hw_start = true;
283 	hw_init_params.int_mode = ECORE_INT_MODE_MSIX;
284 	hw_init_params.allow_npar_tx_switch = true;
285 	hw_init_params.bin_fw_data = data;
286 	hw_init_params.mfw_timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT;
287 	hw_init_params.avoid_eng_reset = false;
288 	rc = ecore_hw_init(edev, &hw_init_params);
289 	if (rc) {
290 		DP_ERR(edev, "ecore_hw_init failed\n");
291 		goto err2;
292 	}
293 
294 	DP_INFO(edev, "HW inited and function started\n");
295 
296 	if (IS_PF(edev)) {
297 		hwfn = ECORE_LEADING_HWFN(edev);
298 		drv_version.version = (params->drv_major << 24) |
299 		    (params->drv_minor << 16) |
300 		    (params->drv_rev << 8) | (params->drv_eng);
301 		/* TBD: strlcpy() */
302 		strncpy((char *)drv_version.name, (const char *)params->name,
303 			MCP_DRV_VER_STR_SIZE - 4);
304 		rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
305 						&drv_version);
306 		if (rc) {
307 			DP_NOTICE(edev, true,
308 				  "Failed sending drv version command\n");
309 			return rc;
310 		}
311 	}
312 
313 	ecore_reset_vport_stats(edev);
314 
315 	return 0;
316 
317 	ecore_hw_stop(edev);
318 err2:
319 	ecore_resc_free(edev);
320 err:
321 #ifdef CONFIG_ECORE_BINARY_FW
322 	if (IS_PF(edev)) {
323 		if (edev->firmware)
324 			rte_free(edev->firmware);
325 		edev->firmware = NULL;
326 	}
327 #endif
328 	qed_stop_iov_task(edev);
329 
330 	return rc;
331 }
332 
333 static int
334 qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
335 {
336 	struct ecore_ptt *ptt = NULL;
337 	struct ecore_tunnel_info *tun = &edev->tunnel;
338 
339 	memset(dev_info, 0, sizeof(struct qed_dev_info));
340 
341 	if (tun->vxlan.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
342 	    tun->vxlan.b_mode_enabled)
343 		dev_info->vxlan_enable = true;
344 
345 	if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
346 	    tun->l2_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
347 	    tun->ip_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN)
348 		dev_info->gre_enable = true;
349 
350 	if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
351 	    tun->l2_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
352 	    tun->ip_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN)
353 		dev_info->geneve_enable = true;
354 
355 	dev_info->num_hwfns = edev->num_hwfns;
356 	dev_info->is_mf_default = IS_MF_DEFAULT(&edev->hwfns[0]);
357 	dev_info->mtu = ECORE_LEADING_HWFN(edev)->hw_info.mtu;
358 
359 	rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
360 	       ETHER_ADDR_LEN);
361 
362 	if (IS_PF(edev)) {
363 		dev_info->fw_major = FW_MAJOR_VERSION;
364 		dev_info->fw_minor = FW_MINOR_VERSION;
365 		dev_info->fw_rev = FW_REVISION_VERSION;
366 		dev_info->fw_eng = FW_ENGINEERING_VERSION;
367 		dev_info->mf_mode = edev->mf_mode;
368 		dev_info->tx_switching = false;
369 
370 		ptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev));
371 		if (ptt) {
372 			ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt,
373 					      &dev_info->mfw_rev, NULL);
374 
375 			ecore_mcp_get_flash_size(ECORE_LEADING_HWFN(edev), ptt,
376 						 &dev_info->flash_size);
377 
378 			/* Workaround to allow PHY-read commands for
379 			 * B0 bringup.
380 			 */
381 			if (ECORE_IS_BB_B0(edev))
382 				dev_info->flash_size = 0xffffffff;
383 
384 			ecore_ptt_release(ECORE_LEADING_HWFN(edev), ptt);
385 		}
386 	} else {
387 		ecore_vf_get_fw_version(&edev->hwfns[0], &dev_info->fw_major,
388 					&dev_info->fw_minor, &dev_info->fw_rev,
389 					&dev_info->fw_eng);
390 
391 		ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt,
392 				      &dev_info->mfw_rev, NULL);
393 	}
394 
395 	return 0;
396 }
397 
398 int
399 qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
400 {
401 	struct qede_dev *qdev = (struct qede_dev *)edev;
402 	uint8_t queues = 0;
403 	int i;
404 
405 	memset(info, 0, sizeof(*info));
406 
407 	info->num_tc = 1 /* @@@TBD aelior MULTI_COS */;
408 
409 	if (IS_PF(edev)) {
410 		int max_vf_vlan_filters = 0;
411 
412 		info->num_queues = 0;
413 		for_each_hwfn(edev, i)
414 			info->num_queues +=
415 			FEAT_NUM(&edev->hwfns[i], ECORE_PF_L2_QUE);
416 
417 		if (edev->p_iov_info)
418 			max_vf_vlan_filters = edev->p_iov_info->total_vfs *
419 					      ECORE_ETH_VF_NUM_VLAN_FILTERS;
420 		info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN) -
421 					 max_vf_vlan_filters;
422 
423 		rte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
424 			   ETHER_ADDR_LEN);
425 	} else {
426 		ecore_vf_get_num_rxqs(ECORE_LEADING_HWFN(edev),
427 				      &info->num_queues);
428 		if (edev->num_hwfns > 1) {
429 			ecore_vf_get_num_rxqs(&edev->hwfns[1], &queues);
430 			info->num_queues += queues;
431 		}
432 
433 		ecore_vf_get_num_vlan_filters(&edev->hwfns[0],
434 					      (u8 *)&info->num_vlan_filters);
435 
436 		ecore_vf_get_port_mac(&edev->hwfns[0],
437 				      (uint8_t *)&info->port_mac);
438 
439 		info->is_legacy = ecore_vf_get_pre_fp_hsi(&edev->hwfns[0]);
440 	}
441 
442 	qed_fill_dev_info(edev, &info->common);
443 
444 	if (IS_VF(edev))
445 		memset(&info->common.hw_mac, 0, ETHER_ADDR_LEN);
446 
447 	return 0;
448 }
449 
450 static void qed_set_name(struct ecore_dev *edev, char name[NAME_SIZE])
451 {
452 	int i;
453 
454 	rte_memcpy(edev->name, name, NAME_SIZE);
455 	for_each_hwfn(edev, i) {
456 		snprintf(edev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
457 	}
458 }
459 
460 static uint32_t
461 qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info,
462 	    void *sb_virt_addr, dma_addr_t sb_phy_addr,
463 	    uint16_t sb_id, enum qed_sb_type type)
464 {
465 	struct ecore_hwfn *p_hwfn;
466 	int hwfn_index;
467 	uint16_t rel_sb_id;
468 	uint8_t n_hwfns;
469 	uint32_t rc;
470 
471 	/* RoCE uses single engine and CMT uses two engines. When using both
472 	 * we force only a single engine. Storage uses only engine 0 too.
473 	 */
474 	if (type == QED_SB_TYPE_L2_QUEUE)
475 		n_hwfns = edev->num_hwfns;
476 	else
477 		n_hwfns = 1;
478 
479 	hwfn_index = sb_id % n_hwfns;
480 	p_hwfn = &edev->hwfns[hwfn_index];
481 	rel_sb_id = sb_id / n_hwfns;
482 
483 	DP_INFO(edev, "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
484 		hwfn_index, rel_sb_id, sb_id);
485 
486 	rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
487 			       sb_virt_addr, sb_phy_addr, rel_sb_id);
488 
489 	return rc;
490 }
491 
492 static void qed_fill_link(struct ecore_hwfn *hwfn,
493 			  struct qed_link_output *if_link)
494 {
495 	struct ecore_mcp_link_params params;
496 	struct ecore_mcp_link_state link;
497 	struct ecore_mcp_link_capabilities link_caps;
498 	uint32_t media_type;
499 	uint8_t change = 0;
500 
501 	memset(if_link, 0, sizeof(*if_link));
502 
503 	/* Prepare source inputs */
504 	if (IS_PF(hwfn->p_dev)) {
505 		rte_memcpy(&params, ecore_mcp_get_link_params(hwfn),
506 		       sizeof(params));
507 		rte_memcpy(&link, ecore_mcp_get_link_state(hwfn), sizeof(link));
508 		rte_memcpy(&link_caps, ecore_mcp_get_link_capabilities(hwfn),
509 		       sizeof(link_caps));
510 	} else {
511 		ecore_vf_read_bulletin(hwfn, &change);
512 		ecore_vf_get_link_params(hwfn, &params);
513 		ecore_vf_get_link_state(hwfn, &link);
514 		ecore_vf_get_link_caps(hwfn, &link_caps);
515 	}
516 
517 	/* Set the link parameters to pass to protocol driver */
518 	if (link.link_up)
519 		if_link->link_up = true;
520 
521 	if (link.link_up)
522 		if_link->speed = link.speed;
523 
524 	if_link->duplex = QEDE_DUPLEX_FULL;
525 
526 	/* Fill up the native advertised speed cap mask */
527 	if_link->adv_speed = params.speed.advertised_speeds;
528 
529 	if (params.speed.autoneg)
530 		if_link->supported_caps |= QEDE_SUPPORTED_AUTONEG;
531 
532 	if (params.pause.autoneg || params.pause.forced_rx ||
533 	    params.pause.forced_tx)
534 		if_link->supported_caps |= QEDE_SUPPORTED_PAUSE;
535 
536 	if (params.pause.autoneg)
537 		if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
538 
539 	if (params.pause.forced_rx)
540 		if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
541 
542 	if (params.pause.forced_tx)
543 		if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
544 }
545 
546 static void
547 qed_get_current_link(struct ecore_dev *edev, struct qed_link_output *if_link)
548 {
549 	qed_fill_link(&edev->hwfns[0], if_link);
550 
551 #ifdef CONFIG_QED_SRIOV
552 	for_each_hwfn(cdev, i)
553 		qed_inform_vf_link_state(&cdev->hwfns[i]);
554 #endif
555 }
556 
557 static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)
558 {
559 	struct ecore_hwfn *hwfn;
560 	struct ecore_ptt *ptt;
561 	struct ecore_mcp_link_params *link_params;
562 	int rc;
563 
564 	if (IS_VF(edev))
565 		return 0;
566 
567 	/* The link should be set only once per PF */
568 	hwfn = &edev->hwfns[0];
569 
570 	ptt = ecore_ptt_acquire(hwfn);
571 	if (!ptt)
572 		return -EBUSY;
573 
574 	link_params = ecore_mcp_get_link_params(hwfn);
575 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
576 		link_params->speed.autoneg = params->autoneg;
577 
578 	if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
579 		if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
580 			link_params->pause.autoneg = true;
581 		else
582 			link_params->pause.autoneg = false;
583 		if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
584 			link_params->pause.forced_rx = true;
585 		else
586 			link_params->pause.forced_rx = false;
587 		if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
588 			link_params->pause.forced_tx = true;
589 		else
590 			link_params->pause.forced_tx = false;
591 	}
592 
593 	rc = ecore_mcp_set_link(hwfn, ptt, params->link_up);
594 
595 	ecore_ptt_release(hwfn, ptt);
596 
597 	return rc;
598 }
599 
600 void qed_link_update(struct ecore_hwfn *hwfn)
601 {
602 	struct qed_link_output if_link;
603 
604 	qed_fill_link(hwfn, &if_link);
605 }
606 
607 static int qed_drain(struct ecore_dev *edev)
608 {
609 	struct ecore_hwfn *hwfn;
610 	struct ecore_ptt *ptt;
611 	int i, rc;
612 
613 	if (IS_VF(edev))
614 		return 0;
615 
616 	for_each_hwfn(edev, i) {
617 		hwfn = &edev->hwfns[i];
618 		ptt = ecore_ptt_acquire(hwfn);
619 		if (!ptt) {
620 			DP_NOTICE(hwfn, true, "Failed to drain NIG; No PTT\n");
621 			return -EBUSY;
622 		}
623 		rc = ecore_mcp_drain(hwfn, ptt);
624 		if (rc)
625 			return rc;
626 		ecore_ptt_release(hwfn, ptt);
627 	}
628 
629 	return 0;
630 }
631 
632 static int qed_nic_stop(struct ecore_dev *edev)
633 {
634 	int i, rc;
635 
636 	rc = ecore_hw_stop(edev);
637 	for (i = 0; i < edev->num_hwfns; i++) {
638 		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
639 
640 		if (p_hwfn->b_sp_dpc_enabled)
641 			p_hwfn->b_sp_dpc_enabled = false;
642 	}
643 	return rc;
644 }
645 
646 static int qed_slowpath_stop(struct ecore_dev *edev)
647 {
648 #ifdef CONFIG_QED_SRIOV
649 	int i;
650 #endif
651 
652 	if (!edev)
653 		return -ENODEV;
654 
655 	if (IS_PF(edev)) {
656 #ifdef CONFIG_ECORE_ZIPPED_FW
657 		qed_free_stream_mem(edev);
658 #endif
659 
660 #ifdef CONFIG_QED_SRIOV
661 		if (IS_QED_ETH_IF(edev))
662 			qed_sriov_disable(edev, true);
663 #endif
664 	}
665 
666 	qed_nic_stop(edev);
667 
668 	ecore_resc_free(edev);
669 	qed_stop_iov_task(edev);
670 
671 	return 0;
672 }
673 
674 static void qed_remove(struct ecore_dev *edev)
675 {
676 	if (!edev)
677 		return;
678 
679 	ecore_hw_remove(edev);
680 }
681 
682 static int qed_send_drv_state(struct ecore_dev *edev, bool active)
683 {
684 	struct ecore_hwfn *hwfn = ECORE_LEADING_HWFN(edev);
685 	struct ecore_ptt *ptt;
686 	int status = 0;
687 
688 	ptt = ecore_ptt_acquire(hwfn);
689 	if (!ptt)
690 		return -EAGAIN;
691 
692 	status = ecore_mcp_ov_update_driver_state(hwfn, ptt, active ?
693 						  ECORE_OV_DRIVER_STATE_ACTIVE :
694 						ECORE_OV_DRIVER_STATE_DISABLED);
695 
696 	ecore_ptt_release(hwfn, ptt);
697 
698 	return status;
699 }
700 
701 static int qed_get_sb_info(struct ecore_dev *edev, struct ecore_sb_info *sb,
702 			   u16 qid, struct ecore_sb_info_dbg *sb_dbg)
703 {
704 	struct ecore_hwfn *hwfn = &edev->hwfns[qid % edev->num_hwfns];
705 	struct ecore_ptt *ptt;
706 	int rc;
707 
708 	if (IS_VF(edev))
709 		return -EINVAL;
710 
711 	ptt = ecore_ptt_acquire(hwfn);
712 	if (!ptt) {
713 		DP_NOTICE(hwfn, true, "Can't acquire PTT\n");
714 		return -EAGAIN;
715 	}
716 
717 	memset(sb_dbg, 0, sizeof(*sb_dbg));
718 	rc = ecore_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg);
719 
720 	ecore_ptt_release(hwfn, ptt);
721 	return rc;
722 }
723 
724 const struct qed_common_ops qed_common_ops_pass = {
725 	INIT_STRUCT_FIELD(probe, &qed_probe),
726 	INIT_STRUCT_FIELD(update_pf_params, &qed_update_pf_params),
727 	INIT_STRUCT_FIELD(slowpath_start, &qed_slowpath_start),
728 	INIT_STRUCT_FIELD(set_name, &qed_set_name),
729 	INIT_STRUCT_FIELD(chain_alloc, &ecore_chain_alloc),
730 	INIT_STRUCT_FIELD(chain_free, &ecore_chain_free),
731 	INIT_STRUCT_FIELD(sb_init, &qed_sb_init),
732 	INIT_STRUCT_FIELD(get_link, &qed_get_current_link),
733 	INIT_STRUCT_FIELD(set_link, &qed_set_link),
734 	INIT_STRUCT_FIELD(drain, &qed_drain),
735 	INIT_STRUCT_FIELD(slowpath_stop, &qed_slowpath_stop),
736 	INIT_STRUCT_FIELD(remove, &qed_remove),
737 	INIT_STRUCT_FIELD(send_drv_state, &qed_send_drv_state),
738 };
739