xref: /dpdk/drivers/net/qede/base/ecore_sriov.c (revision d80e42cce4c7017ed8c99dabb8ae444a492acc1c)
1 /*
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8 
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "reg_addr.h"
12 #include "ecore_sriov.h"
13 #include "ecore_status.h"
14 #include "ecore_hw.h"
15 #include "ecore_hw_defs.h"
16 #include "ecore_int.h"
17 #include "ecore_hsi_eth.h"
18 #include "ecore_l2.h"
19 #include "ecore_vfpf_if.h"
20 #include "ecore_rt_defs.h"
21 #include "ecore_init_ops.h"
22 #include "ecore_gtt_reg_addr.h"
23 #include "ecore_iro.h"
24 #include "ecore_mcp.h"
25 #include "ecore_cxt.h"
26 #include "ecore_vf.h"
27 #include "ecore_init_fw_funcs.h"
28 #include "ecore_sp_commands.h"
29 
30 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
31 						  u8 opcode,
32 						  __le16 echo,
33 						  union event_ring_data *data,
34 						  u8 fw_return_code);
35 
36 const char *ecore_channel_tlvs_string[] = {
37 	"CHANNEL_TLV_NONE",	/* ends tlv sequence */
38 	"CHANNEL_TLV_ACQUIRE",
39 	"CHANNEL_TLV_VPORT_START",
40 	"CHANNEL_TLV_VPORT_UPDATE",
41 	"CHANNEL_TLV_VPORT_TEARDOWN",
42 	"CHANNEL_TLV_START_RXQ",
43 	"CHANNEL_TLV_START_TXQ",
44 	"CHANNEL_TLV_STOP_RXQ",
45 	"CHANNEL_TLV_STOP_TXQ",
46 	"CHANNEL_TLV_UPDATE_RXQ",
47 	"CHANNEL_TLV_INT_CLEANUP",
48 	"CHANNEL_TLV_CLOSE",
49 	"CHANNEL_TLV_RELEASE",
50 	"CHANNEL_TLV_LIST_END",
51 	"CHANNEL_TLV_UCAST_FILTER",
52 	"CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
53 	"CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
54 	"CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
55 	"CHANNEL_TLV_VPORT_UPDATE_MCAST",
56 	"CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
57 	"CHANNEL_TLV_VPORT_UPDATE_RSS",
58 	"CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
59 	"CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
60 	"CHANNEL_TLV_UPDATE_TUNN_PARAM",
61 	"CHANNEL_TLV_COALESCE_UPDATE",
62 	"CHANNEL_TLV_QID",
63 	"CHANNEL_TLV_COALESCE_READ",
64 	"CHANNEL_TLV_MAX"
65 };
66 
67 static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
68 {
69 	u8 legacy = 0;
70 
71 	if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
72 	    ETH_HSI_VER_NO_PKT_LEN_TUNN)
73 		legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
74 
75 	if (!(p_vf->acquire.vfdev_info.capabilities &
76 	     VFPF_ACQUIRE_CAP_QUEUE_QIDS))
77 		legacy |= ECORE_QCID_LEGACY_VF_CID;
78 
79 	return legacy;
80 }
81 
82 /* IOV ramrods */
83 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
84 					      struct ecore_vf_info *p_vf)
85 {
86 	struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
87 	struct ecore_spq_entry *p_ent = OSAL_NULL;
88 	struct ecore_sp_init_data init_data;
89 	enum _ecore_status_t rc = ECORE_NOTIMPL;
90 	u8 fp_minor;
91 
92 	/* Get SPQ entry */
93 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
94 	init_data.cid = ecore_spq_get_cid(p_hwfn);
95 	init_data.opaque_fid = p_vf->opaque_fid;
96 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
97 
98 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
99 				   COMMON_RAMROD_VF_START,
100 				   PROTOCOLID_COMMON, &init_data);
101 	if (rc != ECORE_SUCCESS)
102 		return rc;
103 
104 	p_ramrod = &p_ent->ramrod.vf_start;
105 
106 	p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
107 	p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
108 
109 	switch (p_hwfn->hw_info.personality) {
110 	case ECORE_PCI_ETH:
111 		p_ramrod->personality = PERSONALITY_ETH;
112 		break;
113 	case ECORE_PCI_ETH_ROCE:
114 	case ECORE_PCI_ETH_IWARP:
115 		p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
116 		break;
117 	default:
118 		DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
119 			  p_hwfn->hw_info.personality);
120 		return ECORE_INVAL;
121 	}
122 
123 	fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
124 	if (fp_minor > ETH_HSI_VER_MINOR &&
125 	    fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
126 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
127 			   "VF [%d] - Requested fp hsi %02x.%02x which is"
128 			   " slightly newer than PF's %02x.%02x; Configuring"
129 			   " PFs version\n",
130 			   p_vf->abs_vf_id,
131 			   ETH_HSI_VER_MAJOR, fp_minor,
132 			   ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
133 		fp_minor = ETH_HSI_VER_MINOR;
134 	}
135 
136 	p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
137 	p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
138 
139 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
140 		   "VF[%d] - Starting using HSI %02x.%02x\n",
141 		   p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
142 
143 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
144 }
145 
146 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
147 					     u32 concrete_vfid,
148 					     u16 opaque_vfid)
149 {
150 	struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
151 	struct ecore_spq_entry *p_ent = OSAL_NULL;
152 	struct ecore_sp_init_data init_data;
153 	enum _ecore_status_t rc = ECORE_NOTIMPL;
154 
155 	/* Get SPQ entry */
156 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
157 	init_data.cid = ecore_spq_get_cid(p_hwfn);
158 	init_data.opaque_fid = opaque_vfid;
159 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
160 
161 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
162 				   COMMON_RAMROD_VF_STOP,
163 				   PROTOCOLID_COMMON, &init_data);
164 	if (rc != ECORE_SUCCESS)
165 		return rc;
166 
167 	p_ramrod = &p_ent->ramrod.vf_stop;
168 
169 	p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
170 
171 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
172 }
173 
174 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
175 			     bool b_enabled_only, bool b_non_malicious)
176 {
177 	if (!p_hwfn->pf_iov_info) {
178 		DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
179 		return false;
180 	}
181 
182 	if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
183 	    (rel_vf_id < 0))
184 		return false;
185 
186 	if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
187 	    b_enabled_only)
188 		return false;
189 
190 	if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
191 	    b_non_malicious)
192 		return false;
193 
194 	return true;
195 }
196 
197 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
198 					    u16 relative_vf_id,
199 					    bool b_enabled_only)
200 {
201 	struct ecore_vf_info *vf = OSAL_NULL;
202 
203 	if (!p_hwfn->pf_iov_info) {
204 		DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
205 		return OSAL_NULL;
206 	}
207 
208 	if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
209 				    b_enabled_only, false))
210 		vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
211 	else
212 		DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
213 		       relative_vf_id);
214 
215 	return vf;
216 }
217 
218 static struct ecore_queue_cid *
219 ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
220 {
221 	int i;
222 
223 	for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
224 		if (p_queue->cids[i].p_cid &&
225 		    !p_queue->cids[i].b_is_tx)
226 			return p_queue->cids[i].p_cid;
227 	}
228 
229 	return OSAL_NULL;
230 }
231 
232 enum ecore_iov_validate_q_mode {
233 	ECORE_IOV_VALIDATE_Q_NA,
234 	ECORE_IOV_VALIDATE_Q_ENABLE,
235 	ECORE_IOV_VALIDATE_Q_DISABLE,
236 };
237 
238 static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
239 					  u16 qid,
240 					  enum ecore_iov_validate_q_mode mode,
241 					  bool b_is_tx)
242 {
243 	int i;
244 
245 	if (mode == ECORE_IOV_VALIDATE_Q_NA)
246 		return true;
247 
248 	for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
249 		struct ecore_vf_queue_cid *p_qcid;
250 
251 		p_qcid = &p_vf->vf_queues[qid].cids[i];
252 
253 		if (p_qcid->p_cid == OSAL_NULL)
254 			continue;
255 
256 		if (p_qcid->b_is_tx != b_is_tx)
257 			continue;
258 
259 		/* Found. It's enabled. */
260 		return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
261 	}
262 
263 	/* In case we haven't found any valid cid, then its disabled */
264 	return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
265 }
266 
267 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
268 				   struct ecore_vf_info *p_vf,
269 				   u16 rx_qid,
270 				   enum ecore_iov_validate_q_mode mode)
271 {
272 	if (rx_qid >= p_vf->num_rxqs) {
273 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
274 			   "VF[0x%02x] - can't touch Rx queue[%04x];"
275 			   " Only 0x%04x are allocated\n",
276 			   p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
277 		return false;
278 	}
279 
280 	return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
281 }
282 
283 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
284 				   struct ecore_vf_info *p_vf,
285 				   u16 tx_qid,
286 				   enum ecore_iov_validate_q_mode mode)
287 {
288 	if (tx_qid >= p_vf->num_txqs) {
289 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
290 			   "VF[0x%02x] - can't touch Tx queue[%04x];"
291 			   " Only 0x%04x are allocated\n",
292 			   p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
293 		return false;
294 	}
295 
296 	return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
297 }
298 
299 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
300 				  struct ecore_vf_info *p_vf,
301 				  u16 sb_idx)
302 {
303 	int i;
304 
305 	for (i = 0; i < p_vf->num_sbs; i++)
306 		if (p_vf->igu_sbs[i] == sb_idx)
307 			return true;
308 
309 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
310 		   "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
311 		   " one of its 0x%02x SBs\n",
312 		   p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
313 
314 	return false;
315 }
316 
317 /* Is there at least 1 queue open? */
318 static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
319 {
320 	u8 i;
321 
322 	for (i = 0; i < p_vf->num_rxqs; i++)
323 		if (ecore_iov_validate_queue_mode(p_vf, i,
324 						  ECORE_IOV_VALIDATE_Q_ENABLE,
325 						  false))
326 			return true;
327 
328 	return false;
329 }
330 
331 static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
332 {
333 	u8 i;
334 
335 	for (i = 0; i < p_vf->num_txqs; i++)
336 		if (ecore_iov_validate_queue_mode(p_vf, i,
337 						  ECORE_IOV_VALIDATE_Q_ENABLE,
338 						  true))
339 			return true;
340 
341 	return false;
342 }
343 
344 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
345 						int vfid,
346 						struct ecore_ptt *p_ptt)
347 {
348 	struct ecore_bulletin_content *p_bulletin;
349 	int crc_size = sizeof(p_bulletin->crc);
350 	struct ecore_dmae_params params;
351 	struct ecore_vf_info *p_vf;
352 
353 	p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
354 	if (!p_vf)
355 		return ECORE_INVAL;
356 
357 	/* TODO - check VF is in a state where it can accept message */
358 	if (!p_vf->vf_bulletin)
359 		return ECORE_INVAL;
360 
361 	p_bulletin = p_vf->bulletin.p_virt;
362 
363 	/* Increment bulletin board version and compute crc */
364 	p_bulletin->version++;
365 	p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size,
366 				     p_vf->bulletin.size - crc_size);
367 
368 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
369 		   "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
370 		   p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
371 
372 	/* propagate bulletin board via dmae to vm memory */
373 	OSAL_MEMSET(&params, 0, sizeof(params));
374 	params.flags = ECORE_DMAE_FLAG_VF_DST;
375 	params.dst_vfid = p_vf->abs_vf_id;
376 	return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
377 				    p_vf->vf_bulletin, p_vf->bulletin.size / 4,
378 				    &params);
379 }
380 
381 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
382 {
383 	struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
384 	int pos = iov->pos;
385 
386 	DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
387 	OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
388 
389 	OSAL_PCI_READ_CONFIG_WORD(p_dev,
390 				  pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
391 	OSAL_PCI_READ_CONFIG_WORD(p_dev,
392 				  pos + PCI_SRIOV_INITIAL_VF,
393 				  &iov->initial_vfs);
394 
395 	OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
396 	if (iov->num_vfs) {
397 		/* @@@TODO - in future we might want to add an OSAL here to
398 		 * allow each OS to decide on its own how to act.
399 		 */
400 		DP_VERBOSE(p_dev, ECORE_MSG_IOV,
401 			   "Number of VFs are already set to non-zero value."
402 			   " Ignoring PCI configuration value\n");
403 		iov->num_vfs = 0;
404 	}
405 
406 	OSAL_PCI_READ_CONFIG_WORD(p_dev,
407 				  pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
408 
409 	OSAL_PCI_READ_CONFIG_WORD(p_dev,
410 				  pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
411 
412 	OSAL_PCI_READ_CONFIG_WORD(p_dev,
413 				  pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
414 
415 	OSAL_PCI_READ_CONFIG_DWORD(p_dev,
416 				   pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
417 
418 	OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
419 
420 	OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
421 
422 	DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
423 		   "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
424 		   " stride %d, page size 0x%x\n",
425 		   iov->nres, iov->cap, iov->ctrl,
426 		   iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
427 		   iov->offset, iov->stride, iov->pgsz);
428 
429 	/* Some sanity checks */
430 	if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
431 	    iov->total_vfs > NUM_OF_VFS(p_dev)) {
432 		/* This can happen only due to a bug. In this case we set
433 		 * num_vfs to zero to avoid memory corruption in the code that
434 		 * assumes max number of vfs
435 		 */
436 		DP_NOTICE(p_dev, false,
437 			  "IOV: Unexpected number of vfs set: %d"
438 			  " setting num_vf to zero\n",
439 			  iov->num_vfs);
440 
441 		iov->num_vfs = 0;
442 		iov->total_vfs = 0;
443 	}
444 
445 	return ECORE_SUCCESS;
446 }
447 
448 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
449 {
450 	struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
451 	struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
452 	struct ecore_bulletin_content *p_bulletin_virt;
453 	dma_addr_t req_p, rply_p, bulletin_p;
454 	union pfvf_tlvs *p_reply_virt_addr;
455 	union vfpf_tlvs *p_req_virt_addr;
456 	u8 idx = 0;
457 
458 	OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
459 
460 	p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
461 	req_p = p_iov_info->mbx_msg_phys_addr;
462 	p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
463 	rply_p = p_iov_info->mbx_reply_phys_addr;
464 	p_bulletin_virt = p_iov_info->p_bulletins;
465 	bulletin_p = p_iov_info->bulletins_phys;
466 	if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
467 		DP_ERR(p_hwfn,
468 		       "ecore_iov_setup_vfdb called without alloc mem first\n");
469 		return;
470 	}
471 
472 	for (idx = 0; idx < p_iov->total_vfs; idx++) {
473 		struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
474 		u32 concrete;
475 
476 		vf->vf_mbx.req_virt = p_req_virt_addr + idx;
477 		vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
478 		vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
479 		vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
480 
481 #ifdef CONFIG_ECORE_SW_CHANNEL
482 		vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
483 		vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
484 #endif
485 		vf->state = VF_STOPPED;
486 		vf->b_init = false;
487 
488 		vf->bulletin.phys = idx *
489 		    sizeof(struct ecore_bulletin_content) + bulletin_p;
490 		vf->bulletin.p_virt = p_bulletin_virt + idx;
491 		vf->bulletin.size = sizeof(struct ecore_bulletin_content);
492 
493 		vf->relative_vf_id = idx;
494 		vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
495 		concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
496 		vf->concrete_fid = concrete;
497 		/* TODO - need to devise a better way of getting opaque */
498 		vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
499 		    (vf->abs_vf_id << 8);
500 
501 		vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
502 		vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
503 	}
504 }
505 
506 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
507 {
508 	struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
509 	void **p_v_addr;
510 	u16 num_vfs = 0;
511 
512 	num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
513 
514 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
515 		   "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
516 
517 	/* Allocate PF Mailbox buffer (per-VF) */
518 	p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
519 	p_v_addr = &p_iov_info->mbx_msg_virt_addr;
520 	*p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
521 					    &p_iov_info->mbx_msg_phys_addr,
522 					    p_iov_info->mbx_msg_size);
523 	if (!*p_v_addr)
524 		return ECORE_NOMEM;
525 
526 	/* Allocate PF Mailbox Reply buffer (per-VF) */
527 	p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
528 	p_v_addr = &p_iov_info->mbx_reply_virt_addr;
529 	*p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
530 					    &p_iov_info->mbx_reply_phys_addr,
531 					    p_iov_info->mbx_reply_size);
532 	if (!*p_v_addr)
533 		return ECORE_NOMEM;
534 
535 	p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
536 	    num_vfs;
537 	p_v_addr = &p_iov_info->p_bulletins;
538 	*p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
539 					    &p_iov_info->bulletins_phys,
540 					    p_iov_info->bulletins_size);
541 	if (!*p_v_addr)
542 		return ECORE_NOMEM;
543 
544 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
545 		   "PF's Requests mailbox [%p virt 0x%lx phys],  "
546 		   "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
547 		   " [%p virt 0x%lx phys]\n",
548 		   p_iov_info->mbx_msg_virt_addr,
549 		   (unsigned long)p_iov_info->mbx_msg_phys_addr,
550 		   p_iov_info->mbx_reply_virt_addr,
551 		   (unsigned long)p_iov_info->mbx_reply_phys_addr,
552 		   p_iov_info->p_bulletins,
553 		   (unsigned long)p_iov_info->bulletins_phys);
554 
555 	return ECORE_SUCCESS;
556 }
557 
558 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
559 {
560 	struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
561 
562 	if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
563 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
564 				       p_iov_info->mbx_msg_virt_addr,
565 				       p_iov_info->mbx_msg_phys_addr,
566 				       p_iov_info->mbx_msg_size);
567 
568 	if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
569 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
570 				       p_iov_info->mbx_reply_virt_addr,
571 				       p_iov_info->mbx_reply_phys_addr,
572 				       p_iov_info->mbx_reply_size);
573 
574 	if (p_iov_info->p_bulletins)
575 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
576 				       p_iov_info->p_bulletins,
577 				       p_iov_info->bulletins_phys,
578 				       p_iov_info->bulletins_size);
579 }
580 
581 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
582 {
583 	struct ecore_pf_iov *p_sriov;
584 
585 	if (!IS_PF_SRIOV(p_hwfn)) {
586 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
587 			   "No SR-IOV - no need for IOV db\n");
588 		return ECORE_SUCCESS;
589 	}
590 
591 	p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
592 	if (!p_sriov) {
593 		DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n");
594 		return ECORE_NOMEM;
595 	}
596 
597 	p_hwfn->pf_iov_info = p_sriov;
598 
599 	ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
600 				    ecore_sriov_eqe_event);
601 
602 	return ecore_iov_allocate_vfdb(p_hwfn);
603 }
604 
605 void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
606 {
607 	if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
608 		return;
609 
610 	ecore_iov_setup_vfdb(p_hwfn);
611 }
612 
613 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
614 {
615 	ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
616 
617 	if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
618 		ecore_iov_free_vfdb(p_hwfn);
619 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
620 	}
621 }
622 
623 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
624 {
625 	OSAL_FREE(p_dev, p_dev->p_iov_info);
626 }
627 
628 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
629 {
630 	struct ecore_dev *p_dev = p_hwfn->p_dev;
631 	int pos;
632 	enum _ecore_status_t rc;
633 
634 	if (IS_VF(p_hwfn->p_dev))
635 		return ECORE_SUCCESS;
636 
637 	/* Learn the PCI configuration */
638 	pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
639 					   PCI_EXT_CAP_ID_SRIOV);
640 	if (!pos) {
641 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
642 		return ECORE_SUCCESS;
643 	}
644 
645 	/* Allocate a new struct for IOV information */
646 	/* TODO - can change to VALLOC when its available */
647 	p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
648 					sizeof(*p_dev->p_iov_info));
649 	if (!p_dev->p_iov_info) {
650 		DP_NOTICE(p_hwfn, false,
651 			  "Can't support IOV due to lack of memory\n");
652 		return ECORE_NOMEM;
653 	}
654 	p_dev->p_iov_info->pos = pos;
655 
656 	rc = ecore_iov_pci_cfg_info(p_dev);
657 	if (rc)
658 		return rc;
659 
660 	/* We want PF IOV to be synonemous with the existence of p_iov_info;
661 	 * In case the capability is published but there are no VFs, simply
662 	 * de-allocate the struct.
663 	 */
664 	if (!p_dev->p_iov_info->total_vfs) {
665 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
666 			   "IOV capabilities, but no VFs are published\n");
667 		OSAL_FREE(p_dev, p_dev->p_iov_info);
668 		return ECORE_SUCCESS;
669 	}
670 
671 	/* First VF index based on offset is tricky:
672 	 *  - If ARI is supported [likely], offset - (16 - pf_id) would
673 	 *    provide the number for eng0. 2nd engine Vfs would begin
674 	 *    after the first engine's VFs.
675 	 *  - If !ARI, VFs would start on next device.
676 	 *    so offset - (256 - pf_id) would provide the number.
677 	 * Utilize the fact that (256 - pf_id) is achieved only be later
678 	 * to diffrentiate between the two.
679 	 */
680 
681 	if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
682 		u32 first = p_hwfn->p_dev->p_iov_info->offset +
683 			    p_hwfn->abs_pf_id - 16;
684 
685 		p_dev->p_iov_info->first_vf_in_pf = first;
686 
687 		if (ECORE_PATH_ID(p_hwfn))
688 			p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
689 	} else {
690 		u32 first = p_hwfn->p_dev->p_iov_info->offset +
691 			    p_hwfn->abs_pf_id - 256;
692 
693 		p_dev->p_iov_info->first_vf_in_pf = first;
694 	}
695 
696 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
697 		   "First VF in hwfn 0x%08x\n",
698 		   p_dev->p_iov_info->first_vf_in_pf);
699 
700 	return ECORE_SUCCESS;
701 }
702 
703 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
704 				       bool b_fail_malicious)
705 {
706 	/* Check PF supports sriov */
707 	if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
708 	    !IS_PF_SRIOV_ALLOC(p_hwfn))
709 		return false;
710 
711 	/* Check VF validity */
712 	if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
713 		return false;
714 
715 	return true;
716 }
717 
718 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
719 {
720 	return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
721 }
722 
723 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
724 				 u16 rel_vf_id, u8 to_disable)
725 {
726 	struct ecore_vf_info *vf;
727 	int i;
728 
729 	for_each_hwfn(p_dev, i) {
730 		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
731 
732 		vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
733 		if (!vf)
734 			continue;
735 
736 		vf->to_disable = to_disable;
737 	}
738 }
739 
740 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
741 				  u8 to_disable)
742 {
743 	u16 i;
744 
745 	if (!IS_ECORE_SRIOV(p_dev))
746 		return;
747 
748 	for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
749 		ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
750 }
751 
752 #ifndef LINUX_REMOVE
753 /* @@@TBD Consider taking outside of ecore... */
754 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
755 					  u16		    vf_id,
756 					  void		    *ctx)
757 {
758 	enum _ecore_status_t rc = ECORE_SUCCESS;
759 	struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
760 
761 	if (vf != OSAL_NULL) {
762 		vf->ctx = ctx;
763 #ifdef CONFIG_ECORE_SW_CHANNEL
764 		vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
765 #endif
766 	} else {
767 		rc = ECORE_UNKNOWN_ERROR;
768 	}
769 	return rc;
770 }
771 #endif
772 
773 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn      *p_hwfn,
774 					 struct ecore_ptt	*p_ptt,
775 					 u8			abs_vfid)
776 {
777 	ecore_wr(p_hwfn, p_ptt,
778 		 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
779 		 1 << (abs_vfid & 0x1f));
780 }
781 
782 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
783 				   struct ecore_ptt *p_ptt,
784 				   struct ecore_vf_info *vf)
785 {
786 	int i;
787 
788 	/* Set VF masks and configuration - pretend */
789 	ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
790 
791 	ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
792 
793 	/* unpretend */
794 	ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
795 
796 	/* iterate over all queues, clear sb consumer */
797 	for (i = 0; i < vf->num_sbs; i++)
798 		ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
799 						  vf->igu_sbs[i],
800 						  vf->opaque_fid, true);
801 }
802 
803 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
804 				     struct ecore_ptt *p_ptt,
805 				     struct ecore_vf_info *vf, bool enable)
806 {
807 	u32 igu_vf_conf;
808 
809 	ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
810 
811 	igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
812 
813 	if (enable)
814 		igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
815 	else
816 		igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
817 
818 	ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
819 
820 	/* unpretend */
821 	ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
822 }
823 
824 static enum _ecore_status_t
825 ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn,
826 				struct ecore_ptt *p_ptt,
827 				u8 abs_vf_id,
828 				u8 num_sbs)
829 {
830 	u8 current_max = 0;
831 	int i;
832 
833 	/* If client overrides this, don't do anything */
834 	if (p_hwfn->p_dev->b_dont_override_vf_msix)
835 		return ECORE_SUCCESS;
836 
837 	/* For AH onward, configuration is per-PF. Find maximum of all
838 	 * the currently enabled child VFs, and set the number to be that.
839 	 */
840 	if (!ECORE_IS_BB(p_hwfn->p_dev)) {
841 		ecore_for_each_vf(p_hwfn, i) {
842 			struct ecore_vf_info *p_vf;
843 
844 			p_vf  = ecore_iov_get_vf_info(p_hwfn, (u16)i, true);
845 			if (!p_vf)
846 				continue;
847 
848 			current_max = OSAL_MAX_T(u8, current_max,
849 						 p_vf->num_sbs);
850 		}
851 	}
852 
853 	if (num_sbs > current_max)
854 		return ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
855 						abs_vf_id, num_sbs);
856 
857 	return ECORE_SUCCESS;
858 }
859 
860 static enum _ecore_status_t
861 ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
862 			   struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
863 {
864 	u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
865 	enum _ecore_status_t rc = ECORE_SUCCESS;
866 
867 	/* It's possible VF was previously considered malicious -
868 	 * clear the indication even if we're only going to disable VF.
869 	 */
870 	vf->b_malicious = false;
871 
872 	if (vf->to_disable)
873 		return ECORE_SUCCESS;
874 
875 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
876 		   "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
877 		   ECORE_VF_ABS_ID(p_hwfn, vf));
878 
879 	ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
880 				     ECORE_VF_ABS_ID(p_hwfn, vf));
881 
882 	ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
883 
884 	rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
885 					     vf->abs_vf_id, vf->num_sbs);
886 	if (rc != ECORE_SUCCESS)
887 		return rc;
888 
889 	ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
890 
891 	SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
892 	STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
893 
894 	ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
895 		       p_hwfn->hw_info.hw_mode);
896 
897 	/* unpretend */
898 	ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
899 
900 	vf->state = VF_FREE;
901 
902 	return rc;
903 }
904 
905 /**
906  *
907  * @brief ecore_iov_config_perm_table - configure the permission
908  *      zone table.
909  *      In E4, queue zone permission table size is 320x9. There
910  *      are 320 VF queues for single engine device (256 for dual
911  *      engine device), and each entry has the following format:
912  *      {Valid, VF[7:0]}
913  * @param p_hwfn
914  * @param p_ptt
915  * @param vf
916  * @param enable
917  */
918 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
919 					struct ecore_ptt *p_ptt,
920 					struct ecore_vf_info *vf, u8 enable)
921 {
922 	u32 reg_addr, val;
923 	u16 qzone_id = 0;
924 	int qid;
925 
926 	for (qid = 0; qid < vf->num_rxqs; qid++) {
927 		ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
928 				  &qzone_id);
929 
930 		reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
931 		val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
932 		ecore_wr(p_hwfn, p_ptt, reg_addr, val);
933 	}
934 }
935 
936 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
937 					struct ecore_ptt *p_ptt,
938 					struct ecore_vf_info *vf)
939 {
940 	/* Reset vf in IGU - interrupts are still disabled */
941 	ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
942 
943 	ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
944 
945 	/* Permission Table */
946 	ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
947 }
948 
949 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
950 				     struct ecore_ptt *p_ptt,
951 				     struct ecore_vf_info *vf,
952 				     u16 num_rx_queues)
953 {
954 	struct ecore_igu_block *p_block;
955 	struct cau_sb_entry sb_entry;
956 	int qid = 0;
957 	u32 val = 0;
958 
959 	if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
960 		num_rx_queues =
961 		(u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
962 	p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
963 
964 	SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
965 	SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
966 	SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
967 
968 	for (qid = 0; qid < num_rx_queues; qid++) {
969 		p_block = ecore_get_igu_free_sb(p_hwfn, false);
970 		vf->igu_sbs[qid] = p_block->igu_sb_id;
971 		p_block->status &= ~ECORE_IGU_STATUS_FREE;
972 		SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
973 
974 		ecore_wr(p_hwfn, p_ptt,
975 			 IGU_REG_MAPPING_MEMORY +
976 			 sizeof(u32) * p_block->igu_sb_id, val);
977 
978 		/* Configure igu sb in CAU which were marked valid */
979 		ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
980 					p_hwfn->rel_pf_id,
981 					vf->abs_vf_id, 1);
982 		ecore_dmae_host2grc(p_hwfn, p_ptt,
983 				    (u64)(osal_uintptr_t)&sb_entry,
984 				    CAU_REG_SB_VAR_MEMORY +
985 				    p_block->igu_sb_id * sizeof(u64), 2, 0);
986 	}
987 
988 	vf->num_sbs = (u8)num_rx_queues;
989 
990 	return vf->num_sbs;
991 }
992 
993 /**
994  *
995  * @brief The function invalidates all the VF entries,
996  *        technically this isn't required, but added for
997  *        cleaness and ease of debugging incase a VF attempts to
998  *        produce an interrupt after it has been taken down.
999  *
1000  * @param p_hwfn
1001  * @param p_ptt
1002  * @param vf
1003  */
1004 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
1005 				      struct ecore_ptt *p_ptt,
1006 				      struct ecore_vf_info *vf)
1007 {
1008 	struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1009 	int idx, igu_id;
1010 	u32 addr, val;
1011 
1012 	/* Invalidate igu CAM lines and mark them as free */
1013 	for (idx = 0; idx < vf->num_sbs; idx++) {
1014 		igu_id = vf->igu_sbs[idx];
1015 		addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
1016 
1017 		val = ecore_rd(p_hwfn, p_ptt, addr);
1018 		SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
1019 		ecore_wr(p_hwfn, p_ptt, addr, val);
1020 
1021 		p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
1022 		p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
1023 	}
1024 
1025 	vf->num_sbs = 0;
1026 }
1027 
1028 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
1029 			u16 vfid,
1030 			struct ecore_mcp_link_params *params,
1031 			struct ecore_mcp_link_state *link,
1032 			struct ecore_mcp_link_capabilities *p_caps)
1033 {
1034 	struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
1035 	struct ecore_bulletin_content *p_bulletin;
1036 
1037 	if (!p_vf)
1038 		return;
1039 
1040 	p_bulletin = p_vf->bulletin.p_virt;
1041 	p_bulletin->req_autoneg = params->speed.autoneg;
1042 	p_bulletin->req_adv_speed = params->speed.advertised_speeds;
1043 	p_bulletin->req_forced_speed = params->speed.forced_speed;
1044 	p_bulletin->req_autoneg_pause = params->pause.autoneg;
1045 	p_bulletin->req_forced_rx = params->pause.forced_rx;
1046 	p_bulletin->req_forced_tx = params->pause.forced_tx;
1047 	p_bulletin->req_loopback = params->loopback_mode;
1048 
1049 	p_bulletin->link_up = link->link_up;
1050 	p_bulletin->speed = link->speed;
1051 	p_bulletin->full_duplex = link->full_duplex;
1052 	p_bulletin->autoneg = link->an;
1053 	p_bulletin->autoneg_complete = link->an_complete;
1054 	p_bulletin->parallel_detection = link->parallel_detection;
1055 	p_bulletin->pfc_enabled = link->pfc_enabled;
1056 	p_bulletin->partner_adv_speed = link->partner_adv_speed;
1057 	p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
1058 	p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
1059 	p_bulletin->partner_adv_pause = link->partner_adv_pause;
1060 	p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
1061 
1062 	p_bulletin->capability_speed = p_caps->speed_capabilities;
1063 }
1064 
1065 enum _ecore_status_t
1066 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
1067 			 struct ecore_ptt *p_ptt,
1068 			 struct ecore_iov_vf_init_params *p_params)
1069 {
1070 	struct ecore_mcp_link_capabilities link_caps;
1071 	struct ecore_mcp_link_params link_params;
1072 	struct ecore_mcp_link_state link_state;
1073 	u8 num_of_vf_available_chains  = 0;
1074 	struct ecore_vf_info *vf = OSAL_NULL;
1075 	u16 qid, num_irqs;
1076 	enum _ecore_status_t rc = ECORE_SUCCESS;
1077 	u32 cids;
1078 	u8 i;
1079 
1080 	vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1081 	if (!vf) {
1082 		DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
1083 		return ECORE_UNKNOWN_ERROR;
1084 	}
1085 
1086 	if (vf->b_init) {
1087 		DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
1088 			  p_params->rel_vf_id);
1089 		return ECORE_INVAL;
1090 	}
1091 
1092 	/* Perform sanity checking on the requested vport/rss */
1093 	if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
1094 		DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
1095 			  p_params->rel_vf_id, p_params->vport_id);
1096 		return ECORE_INVAL;
1097 	}
1098 
1099 	if ((p_params->num_queues > 1) &&
1100 	    (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
1101 		DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
1102 			  p_params->rel_vf_id, p_params->rss_eng_id);
1103 		return ECORE_INVAL;
1104 	}
1105 
1106 	/* TODO - remove this once we get confidence of change */
1107 	if (!p_params->vport_id) {
1108 		DP_NOTICE(p_hwfn, false,
1109 			  "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
1110 			  p_params->rel_vf_id);
1111 	}
1112 	if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
1113 		DP_NOTICE(p_hwfn, false,
1114 			  "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
1115 			  p_params->rel_vf_id);
1116 	}
1117 	vf->vport_id = p_params->vport_id;
1118 	vf->rss_eng_id = p_params->rss_eng_id;
1119 
1120 	/* Since it's possible to relocate SBs, it's a bit difficult to check
1121 	 * things here. Simply check whether the index falls in the range
1122 	 * belonging to the PF.
1123 	 */
1124 	for (i = 0; i < p_params->num_queues; i++) {
1125 		qid = p_params->req_rx_queue[i];
1126 		if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1127 			DP_NOTICE(p_hwfn, true,
1128 				  "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1129 				  qid, p_params->rel_vf_id,
1130 				  (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1131 			return ECORE_INVAL;
1132 		}
1133 
1134 		qid = p_params->req_tx_queue[i];
1135 		if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1136 			DP_NOTICE(p_hwfn, true,
1137 				  "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1138 				  qid, p_params->rel_vf_id,
1139 				  (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1140 			return ECORE_INVAL;
1141 		}
1142 	}
1143 
1144 	/* Limit number of queues according to number of CIDs */
1145 	ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1146 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1147 		   "VF[%d] - requesting to initialize for 0x%04x queues"
1148 		   " [0x%04x CIDs available]\n",
1149 		   vf->relative_vf_id, p_params->num_queues, (u16)cids);
1150 	num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
1151 
1152 	num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
1153 							       p_ptt,
1154 							       vf,
1155 							       num_irqs);
1156 	if (num_of_vf_available_chains == 0) {
1157 		DP_ERR(p_hwfn, "no available igu sbs\n");
1158 		return ECORE_NOMEM;
1159 	}
1160 
1161 	/* Choose queue number and index ranges */
1162 	vf->num_rxqs = num_of_vf_available_chains;
1163 	vf->num_txqs = num_of_vf_available_chains;
1164 
1165 	for (i = 0; i < vf->num_rxqs; i++) {
1166 		struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
1167 
1168 		p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1169 		p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1170 
1171 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1172 			   "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1173 			   vf->relative_vf_id, i, vf->igu_sbs[i],
1174 			   p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1175 	}
1176 
1177 	/* Update the link configuration in bulletin.
1178 	 */
1179 	OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
1180 		    sizeof(link_params));
1181 	OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
1182 		    sizeof(link_state));
1183 	OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
1184 		    sizeof(link_caps));
1185 	ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
1186 			   &link_params, &link_state, &link_caps);
1187 
1188 	rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1189 
1190 	if (rc == ECORE_SUCCESS) {
1191 		vf->b_init = true;
1192 		p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
1193 			(1ULL << (vf->relative_vf_id % 64));
1194 
1195 		if (IS_LEAD_HWFN(p_hwfn))
1196 			p_hwfn->p_dev->p_iov_info->num_vfs++;
1197 	}
1198 
1199 	return rc;
1200 }
1201 
1202 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1203 						 struct ecore_ptt *p_ptt,
1204 						 u16 rel_vf_id)
1205 {
1206 	struct ecore_mcp_link_capabilities caps;
1207 	struct ecore_mcp_link_params params;
1208 	struct ecore_mcp_link_state link;
1209 	struct ecore_vf_info *vf = OSAL_NULL;
1210 
1211 	vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1212 	if (!vf) {
1213 		DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1214 		return ECORE_UNKNOWN_ERROR;
1215 	}
1216 
1217 	if (vf->bulletin.p_virt)
1218 		OSAL_MEMSET(vf->bulletin.p_virt, 0,
1219 			    sizeof(*vf->bulletin.p_virt));
1220 
1221 	OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1222 
1223 	/* Get the link configuration back in bulletin so
1224 	 * that when VFs are re-enabled they get the actual
1225 	 * link configuration.
1226 	 */
1227 	OSAL_MEMCPY(&params, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
1228 	OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
1229 	OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
1230 		    sizeof(caps));
1231 	ecore_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
1232 
1233 	/* Forget the VF's acquisition message */
1234 	OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
1235 
1236 	/* disablng interrupts and resetting permission table was done during
1237 	 * vf-close, however, we could get here without going through vf_close
1238 	 */
1239 	/* Disable Interrupts for VF */
1240 	ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1241 
1242 	/* Reset Permission table */
1243 	ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1244 
1245 	vf->num_rxqs = 0;
1246 	vf->num_txqs = 0;
1247 	ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1248 
1249 	if (vf->b_init) {
1250 		vf->b_init = false;
1251 		p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
1252 					~(1ULL << (vf->relative_vf_id / 64));
1253 
1254 		if (IS_LEAD_HWFN(p_hwfn))
1255 			p_hwfn->p_dev->p_iov_info->num_vfs--;
1256 	}
1257 
1258 	return ECORE_SUCCESS;
1259 }
1260 
1261 static bool ecore_iov_tlv_supported(u16 tlvtype)
1262 {
1263 	return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
1264 }
1265 
1266 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1267 					 struct ecore_vf_info *vf, u16 tlv)
1268 {
1269 	/* lock the channel */
1270 	/* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1271 
1272 	/* record the locking op */
1273 	/* vf->op_current = tlv; @@@TBD MichalK */
1274 
1275 	/* log the lock */
1276 	if (ecore_iov_tlv_supported(tlv))
1277 		DP_VERBOSE(p_hwfn,
1278 			   ECORE_MSG_IOV,
1279 			   "VF[%d]: vf pf channel locked by %s\n",
1280 			   vf->abs_vf_id,
1281 			   ecore_channel_tlvs_string[tlv]);
1282 	else
1283 		DP_VERBOSE(p_hwfn,
1284 			   ECORE_MSG_IOV,
1285 			   "VF[%d]: vf pf channel locked by %04x\n",
1286 			   vf->abs_vf_id, tlv);
1287 }
1288 
1289 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1290 					   struct ecore_vf_info *vf,
1291 					   u16 expected_tlv)
1292 {
1293 	/* log the unlock */
1294 	if (ecore_iov_tlv_supported(expected_tlv))
1295 		DP_VERBOSE(p_hwfn,
1296 			   ECORE_MSG_IOV,
1297 			   "VF[%d]: vf pf channel unlocked by %s\n",
1298 			   vf->abs_vf_id,
1299 			   ecore_channel_tlvs_string[expected_tlv]);
1300 	else
1301 		DP_VERBOSE(p_hwfn,
1302 			   ECORE_MSG_IOV,
1303 			   "VF[%d]: vf pf channel unlocked by %04x\n",
1304 			   vf->abs_vf_id, expected_tlv);
1305 
1306 	/* record the locking op */
1307 	/* vf->op_current = CHANNEL_TLV_NONE; */
1308 }
1309 
1310 /* place a given tlv on the tlv buffer, continuing current tlv list */
1311 void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
1312 {
1313 	struct channel_tlv *tl = (struct channel_tlv *)*offset;
1314 
1315 	tl->type = type;
1316 	tl->length = length;
1317 
1318 	/* Offset should keep pointing to next TLV (the end of the last) */
1319 	*offset += length;
1320 
1321 	/* Return a pointer to the start of the added tlv */
1322 	return *offset - length;
1323 }
1324 
1325 /* list the types and lengths of the tlvs on the buffer */
1326 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
1327 {
1328 	u16 i = 1, total_length = 0;
1329 	struct channel_tlv *tlv;
1330 
1331 	do {
1332 		/* cast current tlv list entry to channel tlv header */
1333 		tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1334 
1335 		/* output tlv */
1336 		if (ecore_iov_tlv_supported(tlv->type))
1337 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1338 				   "TLV number %d: type %s, length %d\n",
1339 				   i, ecore_channel_tlvs_string[tlv->type],
1340 				   tlv->length);
1341 		else
1342 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1343 				   "TLV number %d: type %d, length %d\n",
1344 				   i, tlv->type, tlv->length);
1345 
1346 		if (tlv->type == CHANNEL_TLV_LIST_END)
1347 			return;
1348 
1349 		/* Validate entry - protect against malicious VFs */
1350 		if (!tlv->length) {
1351 			DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
1352 			return;
1353 		}
1354 		total_length += tlv->length;
1355 		if (total_length >= sizeof(struct tlv_buffer_size)) {
1356 			DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
1357 			return;
1358 		}
1359 
1360 		i++;
1361 	} while (1);
1362 }
1363 
1364 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
1365 				    struct ecore_ptt *p_ptt,
1366 				    struct ecore_vf_info *p_vf,
1367 #ifdef CONFIG_ECORE_SW_CHANNEL
1368 				    u16 length,
1369 #else
1370 				    u16 OSAL_UNUSED length,
1371 #endif
1372 				    u8 status)
1373 {
1374 	struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1375 	struct ecore_dmae_params params;
1376 	u8 eng_vf_id;
1377 
1378 	mbx->reply_virt->default_resp.hdr.status = status;
1379 
1380 	ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1381 
1382 #ifdef CONFIG_ECORE_SW_CHANNEL
1383 	mbx->sw_mbx.response_size =
1384 	    length + sizeof(struct channel_list_end_tlv);
1385 
1386 	if (!p_vf->b_hw_channel)
1387 		return;
1388 #endif
1389 
1390 	eng_vf_id = p_vf->abs_vf_id;
1391 
1392 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
1393 	params.flags = ECORE_DMAE_FLAG_VF_DST;
1394 	params.dst_vfid = eng_vf_id;
1395 
1396 	ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1397 			     mbx->req_virt->first_tlv.reply_address +
1398 			     sizeof(u64),
1399 			     (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1400 			     &params);
1401 
1402 	/* Once PF copies the rc to the VF, the latter can continue and
1403 	 * and send an additional message. So we have to make sure the
1404 	 * channel would be re-set to ready prior to that.
1405 	 */
1406 	REG_WR(p_hwfn,
1407 	       GTT_BAR0_MAP_REG_USDM_RAM +
1408 	       USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1409 
1410 	ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1411 			     mbx->req_virt->first_tlv.reply_address,
1412 			     sizeof(u64) / 4, &params);
1413 
1414 	OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
1415 }
1416 
1417 static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
1418 {
1419 	switch (flag) {
1420 	case ECORE_IOV_VP_UPDATE_ACTIVATE:
1421 		return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1422 	case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1423 		return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1424 	case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1425 		return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1426 	case ECORE_IOV_VP_UPDATE_MCAST:
1427 		return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1428 	case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1429 		return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1430 	case ECORE_IOV_VP_UPDATE_RSS:
1431 		return CHANNEL_TLV_VPORT_UPDATE_RSS;
1432 	case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1433 		return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1434 	case ECORE_IOV_VP_UPDATE_SGE_TPA:
1435 		return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1436 	default:
1437 		return 0;
1438 	}
1439 }
1440 
1441 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1442 					      struct ecore_vf_info *p_vf,
1443 					      struct ecore_iov_vf_mbx *p_mbx,
1444 					      u8 status, u16 tlvs_mask,
1445 					      u16 tlvs_accepted)
1446 {
1447 	struct pfvf_def_resp_tlv *resp;
1448 	u16 size, total_len, i;
1449 
1450 	OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1451 	p_mbx->offset = (u8 *)p_mbx->reply_virt;
1452 	size = sizeof(struct pfvf_def_resp_tlv);
1453 	total_len = size;
1454 
1455 	ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1456 
1457 	/* Prepare response for all extended tlvs if they are found by PF */
1458 	for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1459 		if (!(tlvs_mask & (1 << i)))
1460 			continue;
1461 
1462 		resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
1463 				     size);
1464 
1465 		if (tlvs_accepted & (1 << i))
1466 			resp->hdr.status = status;
1467 		else
1468 			resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1469 
1470 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1471 			   "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1472 			   p_vf->relative_vf_id,
1473 			   ecore_iov_vport_to_tlv(i),
1474 			   resp->hdr.status);
1475 
1476 		total_len += size;
1477 	}
1478 
1479 	ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
1480 		      sizeof(struct channel_list_end_tlv));
1481 
1482 	return total_len;
1483 }
1484 
1485 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1486 				   struct ecore_ptt *p_ptt,
1487 				   struct ecore_vf_info *vf_info,
1488 				   u16 type, u16 length, u8 status)
1489 {
1490 	struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1491 
1492 	mbx->offset = (u8 *)mbx->reply_virt;
1493 
1494 	ecore_add_tlv(&mbx->offset, type, length);
1495 	ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
1496 		      sizeof(struct channel_list_end_tlv));
1497 
1498 	ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1499 }
1500 
1501 struct ecore_public_vf_info
1502 *ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
1503 			      u16 relative_vf_id,
1504 			      bool b_enabled_only)
1505 {
1506 	struct ecore_vf_info *vf = OSAL_NULL;
1507 
1508 	vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1509 	if (!vf)
1510 		return OSAL_NULL;
1511 
1512 	return &vf->p_vf_info;
1513 }
1514 
1515 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1516 				 struct ecore_vf_info *p_vf)
1517 {
1518 	u32 i, j;
1519 	p_vf->vf_bulletin = 0;
1520 	p_vf->vport_instance = 0;
1521 	p_vf->configured_features = 0;
1522 
1523 	/* If VF previously requested less resources, go back to default */
1524 	p_vf->num_rxqs = p_vf->num_sbs;
1525 	p_vf->num_txqs = p_vf->num_sbs;
1526 
1527 	p_vf->num_active_rxqs = 0;
1528 
1529 	for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1530 		struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
1531 
1532 		for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1533 			if (!p_queue->cids[j].p_cid)
1534 				continue;
1535 
1536 			ecore_eth_queue_cid_release(p_hwfn,
1537 						    p_queue->cids[j].p_cid);
1538 			p_queue->cids[j].p_cid = OSAL_NULL;
1539 		}
1540 	}
1541 
1542 	OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1543 	OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1544 	OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1545 }
1546 
1547 /* Returns either 0, or log(size) */
1548 static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
1549 				    struct ecore_ptt *p_ptt)
1550 {
1551 	u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
1552 
1553 	if (val)
1554 		return val + 11;
1555 	return 0;
1556 }
1557 
1558 static void
1559 ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
1560 				   struct ecore_ptt *p_ptt,
1561 				   struct ecore_vf_info *p_vf,
1562 				   struct vf_pf_resc_request *p_req,
1563 				   struct pf_vf_resc *p_resp)
1564 {
1565 	u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
1566 	u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
1567 		     DB_ADDR_VF(0, DQ_DEMS_LEGACY);
1568 	u32 bar_size;
1569 
1570 	p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
1571 
1572 	/* If VF didn't bother asking for QIDs than don't bother limiting
1573 	 * number of CIDs. The VF doesn't care about the number, and this
1574 	 * has the likely result of causing an additional acquisition.
1575 	 */
1576 	if (!(p_vf->acquire.vfdev_info.capabilities &
1577 	      VFPF_ACQUIRE_CAP_QUEUE_QIDS))
1578 		return;
1579 
1580 	/* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1581 	 * that would make sure doorbells for all CIDs fall within the bar.
1582 	 * If it doesn't, make sure regview window is sufficient.
1583 	 */
1584 	if (p_vf->acquire.vfdev_info.capabilities &
1585 	    VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
1586 		bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
1587 		if (bar_size)
1588 			bar_size = 1 << bar_size;
1589 
1590 		if (ECORE_IS_CMT(p_hwfn->p_dev))
1591 			bar_size /= 2;
1592 	} else {
1593 		bar_size = PXP_VF_BAR0_DQ_LENGTH;
1594 	}
1595 
1596 	if (bar_size / db_size < 256)
1597 		p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
1598 					      (u8)(bar_size / db_size));
1599 }
1600 
1601 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
1602 					struct ecore_ptt *p_ptt,
1603 					struct ecore_vf_info *p_vf,
1604 					struct vf_pf_resc_request *p_req,
1605 					struct pf_vf_resc *p_resp)
1606 {
1607 	u8 i;
1608 
1609 	/* Queue related information */
1610 	p_resp->num_rxqs = p_vf->num_rxqs;
1611 	p_resp->num_txqs = p_vf->num_txqs;
1612 	p_resp->num_sbs = p_vf->num_sbs;
1613 
1614 	for (i = 0; i < p_resp->num_sbs; i++) {
1615 		p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1616 		/* TODO - what's this sb_qid field? Is it deprecated?
1617 		 * or is there an ecore_client that looks at this?
1618 		 */
1619 		p_resp->hw_sbs[i].sb_qid = 0;
1620 	}
1621 
1622 	/* These fields are filled for backward compatibility.
1623 	 * Unused by modern vfs.
1624 	 */
1625 	for (i = 0; i < p_resp->num_rxqs; i++) {
1626 		ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1627 				  (u16 *)&p_resp->hw_qid[i]);
1628 		p_resp->cid[i] = i;
1629 	}
1630 
1631 	/* Filter related information */
1632 	p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
1633 					     p_req->num_mac_filters);
1634 	p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
1635 					      p_req->num_vlan_filters);
1636 
1637 	ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
1638 
1639 	/* This isn't really needed/enforced, but some legacy VFs might depend
1640 	 * on the correct filling of this field.
1641 	 */
1642 	p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
1643 
1644 	/* Validate sufficient resources for VF */
1645 	if (p_resp->num_rxqs < p_req->num_rxqs ||
1646 	    p_resp->num_txqs < p_req->num_txqs ||
1647 	    p_resp->num_sbs < p_req->num_sbs ||
1648 	    p_resp->num_mac_filters < p_req->num_mac_filters ||
1649 	    p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1650 	    p_resp->num_mc_filters < p_req->num_mc_filters ||
1651 	    p_resp->num_cids < p_req->num_cids) {
1652 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1653 			   "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1654 			   p_vf->abs_vf_id,
1655 			   p_req->num_rxqs, p_resp->num_rxqs,
1656 			   p_req->num_rxqs, p_resp->num_txqs,
1657 			   p_req->num_sbs, p_resp->num_sbs,
1658 			   p_req->num_mac_filters, p_resp->num_mac_filters,
1659 			   p_req->num_vlan_filters, p_resp->num_vlan_filters,
1660 			   p_req->num_mc_filters, p_resp->num_mc_filters,
1661 			   p_req->num_cids, p_resp->num_cids);
1662 
1663 		/* Some legacy OSes are incapable of correctly handling this
1664 		 * failure.
1665 		 */
1666 		if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1667 		     ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1668 		    (p_vf->acquire.vfdev_info.os_type ==
1669 		     VFPF_ACQUIRE_OS_WINDOWS))
1670 			return PFVF_STATUS_SUCCESS;
1671 
1672 		return PFVF_STATUS_NO_RESOURCE;
1673 	}
1674 
1675 	return PFVF_STATUS_SUCCESS;
1676 }
1677 
1678 static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
1679 {
1680 	p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1681 				  OFFSETOF(struct mstorm_vf_zone,
1682 					   non_trigger.eth_queue_stat);
1683 	p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1684 	p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1685 				  OFFSETOF(struct ustorm_vf_zone,
1686 					   non_trigger.eth_queue_stat);
1687 	p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1688 	p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1689 				  OFFSETOF(struct pstorm_vf_zone,
1690 					   non_trigger.eth_queue_stat);
1691 	p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1692 	p_stats->tstats.address = 0;
1693 	p_stats->tstats.len = 0;
1694 }
1695 
1696 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
1697 				     struct ecore_ptt	     *p_ptt,
1698 				     struct ecore_vf_info    *vf)
1699 {
1700 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1701 	struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1702 	struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1703 	struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1704 	u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1705 	struct pf_vf_resc *resc = &resp->resc;
1706 	enum _ecore_status_t rc;
1707 
1708 	OSAL_MEMSET(resp, 0, sizeof(*resp));
1709 
1710 	/* Write the PF version so that VF would know which version
1711 	 * is supported - might be later overridden. This guarantees that
1712 	 * VF could recognize legacy PF based on lack of versions in reply.
1713 	 */
1714 	pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1715 	pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1716 
1717 	/* TODO - not doing anything is bad since we'll assert, but this isn't
1718 	 * necessarily the right behavior - perhaps we should have allowed some
1719 	 * versatility here.
1720 	 */
1721 	if (vf->state != VF_FREE &&
1722 	    vf->state != VF_STOPPED) {
1723 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1724 			   "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1725 			   vf->abs_vf_id, vf->state);
1726 		goto out;
1727 	}
1728 
1729 	/* Validate FW compatibility */
1730 	if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1731 		if (req->vfdev_info.capabilities &
1732 		    VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1733 			struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1734 
1735 			/* This legacy support would need to be removed once
1736 			 * the major has changed.
1737 			 */
1738 			OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
1739 
1740 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1741 				   "VF[%d] is pre-fastpath HSI\n",
1742 				   vf->abs_vf_id);
1743 			p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1744 			p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1745 		} else {
1746 			DP_INFO(p_hwfn,
1747 				"VF[%d] needs fastpath HSI %02x.%02x, which is"
1748 				" incompatible with loaded FW's faspath"
1749 				" HSI %02x.%02x\n",
1750 				vf->abs_vf_id,
1751 				req->vfdev_info.eth_fp_hsi_major,
1752 				req->vfdev_info.eth_fp_hsi_minor,
1753 				ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1754 
1755 			goto out;
1756 		}
1757 	}
1758 
1759 	/* On 100g PFs, prevent old VFs from loading */
1760 	if (ECORE_IS_CMT(p_hwfn->p_dev) &&
1761 	    !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1762 		DP_INFO(p_hwfn,
1763 			"VF[%d] is running an old driver that doesn't support"
1764 			" 100g\n",
1765 			vf->abs_vf_id);
1766 		goto out;
1767 	}
1768 
1769 #ifndef __EXTRACT__LINUX__
1770 	if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1771 		vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1772 		goto out;
1773 	}
1774 #endif
1775 
1776 	/* Store the acquire message */
1777 	OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
1778 
1779 	vf->opaque_fid = req->vfdev_info.opaque_fid;
1780 
1781 	vf->vf_bulletin = req->bulletin_addr;
1782 	vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1783 	    vf->bulletin.size : req->bulletin_size;
1784 
1785 	/* fill in pfdev info */
1786 	pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1787 	pfdev_info->db_size = 0;	/* @@@ TBD MichalK Vf Doorbells */
1788 	pfdev_info->indices_per_sb = PIS_PER_SB_E4;
1789 
1790 	pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1791 				   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1792 	if (ECORE_IS_CMT(p_hwfn->p_dev))
1793 		pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1794 
1795 	/* Share our ability to use multiple queue-ids only with VFs
1796 	 * that request it.
1797 	 */
1798 	if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1799 		pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1800 
1801 	/* Share the sizes of the bars with VF */
1802 	resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
1803 							     p_ptt);
1804 
1805 	ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
1806 
1807 	OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1808 		    ETH_ALEN);
1809 
1810 	pfdev_info->fw_major = FW_MAJOR_VERSION;
1811 	pfdev_info->fw_minor = FW_MINOR_VERSION;
1812 	pfdev_info->fw_rev = FW_REVISION_VERSION;
1813 	pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1814 
1815 	/* Incorrect when legacy, but doesn't matter as legacy isn't reading
1816 	 * this field.
1817 	 */
1818 	pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
1819 					      req->vfdev_info.eth_fp_hsi_minor);
1820 	pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1821 	ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
1822 			      OSAL_NULL);
1823 
1824 	pfdev_info->dev_type = p_hwfn->p_dev->type;
1825 	pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1826 
1827 	/* Fill resources available to VF; Make sure there are enough to
1828 	 * satisfy the VF's request.
1829 	 */
1830 	vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1831 						    &req->resc_request, resc);
1832 	if (vfpf_status != PFVF_STATUS_SUCCESS)
1833 		goto out;
1834 
1835 	/* Start the VF in FW */
1836 	rc = ecore_sp_vf_start(p_hwfn, vf);
1837 	if (rc != ECORE_SUCCESS) {
1838 		DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
1839 			  vf->abs_vf_id);
1840 		vfpf_status = PFVF_STATUS_FAILURE;
1841 		goto out;
1842 	}
1843 
1844 	/* Fill agreed size of bulletin board in response, and post
1845 	 * an initial image to the bulletin board.
1846 	 */
1847 	resp->bulletin_size = vf->bulletin.size;
1848 	ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1849 
1850 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1851 		   "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1852 		   " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1853 		   "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1854 		   " n_vlans-%d\n",
1855 		   vf->abs_vf_id, resp->pfdev_info.chip_num,
1856 		   resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1857 		   (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
1858 		   resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1859 		   resc->num_vlan_filters);
1860 
1861 	vf->state = VF_ACQUIRED;
1862 
1863 out:
1864 	/* Prepare Response */
1865 	ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1866 			       sizeof(struct pfvf_acquire_resp_tlv),
1867 			       vfpf_status);
1868 }
1869 
1870 static enum _ecore_status_t
1871 __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1872 			 struct ecore_vf_info *p_vf, bool val)
1873 {
1874 	struct ecore_sp_vport_update_params params;
1875 	enum _ecore_status_t rc;
1876 
1877 	if (val == p_vf->spoof_chk) {
1878 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1879 			   "Spoofchk value[%d] is already configured\n", val);
1880 		return ECORE_SUCCESS;
1881 	}
1882 
1883 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_sp_vport_update_params));
1884 	params.opaque_fid = p_vf->opaque_fid;
1885 	params.vport_id = p_vf->vport_id;
1886 	params.update_anti_spoofing_en_flg = 1;
1887 	params.anti_spoofing_en = val;
1888 
1889 	rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
1890 				   OSAL_NULL);
1891 	if (rc == ECORE_SUCCESS) {
1892 		p_vf->spoof_chk = val;
1893 		p_vf->req_spoofchk_val = p_vf->spoof_chk;
1894 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1895 			   "Spoofchk val[%d] configured\n", val);
1896 	} else {
1897 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1898 			   "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1899 			   val, p_vf->relative_vf_id);
1900 	}
1901 
1902 	return rc;
1903 }
1904 
1905 static enum _ecore_status_t
1906 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1907 				   struct ecore_vf_info *p_vf)
1908 {
1909 	struct ecore_filter_ucast filter;
1910 	enum _ecore_status_t rc = ECORE_SUCCESS;
1911 	int i;
1912 
1913 	OSAL_MEMSET(&filter, 0, sizeof(filter));
1914 	filter.is_rx_filter = 1;
1915 	filter.is_tx_filter = 1;
1916 	filter.vport_to_add_to = p_vf->vport_id;
1917 	filter.opcode = ECORE_FILTER_ADD;
1918 
1919 	/* Reconfigure vlans */
1920 	for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1921 		if (!p_vf->shadow_config.vlans[i].used)
1922 			continue;
1923 
1924 		filter.type = ECORE_FILTER_VLAN;
1925 		filter.vlan = p_vf->shadow_config.vlans[i].vid;
1926 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1927 			   "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1928 			   filter.vlan, p_vf->relative_vf_id);
1929 		rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1930 					       &filter, ECORE_SPQ_MODE_CB,
1931 					       OSAL_NULL);
1932 		if (rc) {
1933 			DP_NOTICE(p_hwfn, true,
1934 				  "Failed to configure VLAN [%04x]"
1935 				  " to VF [%04x]\n",
1936 				  filter.vlan, p_vf->relative_vf_id);
1937 			break;
1938 		}
1939 	}
1940 
1941 	return rc;
1942 }
1943 
1944 static enum _ecore_status_t
1945 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
1946 				     struct ecore_vf_info *p_vf, u64 events)
1947 {
1948 	enum _ecore_status_t rc = ECORE_SUCCESS;
1949 
1950 	/*TODO - what about MACs? */
1951 
1952 	if ((events & (1 << VLAN_ADDR_FORCED)) &&
1953 	    !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1954 		rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1955 
1956 	return rc;
1957 }
1958 
1959 static  enum _ecore_status_t
1960 ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
1961 				 struct ecore_vf_info *p_vf,
1962 				 u64 events)
1963 {
1964 	enum _ecore_status_t rc = ECORE_SUCCESS;
1965 	struct ecore_filter_ucast filter;
1966 
1967 	if (!p_vf->vport_instance)
1968 		return ECORE_INVAL;
1969 
1970 	if ((events & (1 << MAC_ADDR_FORCED)) ||
1971 	    p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) {
1972 		/* Since there's no way [currently] of removing the MAC,
1973 		 * we can always assume this means we need to force it.
1974 		 */
1975 		OSAL_MEMSET(&filter, 0, sizeof(filter));
1976 		filter.type = ECORE_FILTER_MAC;
1977 		filter.opcode = ECORE_FILTER_REPLACE;
1978 		filter.is_rx_filter = 1;
1979 		filter.is_tx_filter = 1;
1980 		filter.vport_to_add_to = p_vf->vport_id;
1981 		OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
1982 
1983 		rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1984 					       &filter,
1985 					       ECORE_SPQ_MODE_CB, OSAL_NULL);
1986 		if (rc) {
1987 			DP_NOTICE(p_hwfn, true,
1988 				  "PF failed to configure MAC for VF\n");
1989 			return rc;
1990 		}
1991 
1992 		if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
1993 			p_vf->configured_features |=
1994 				1 << VFPF_BULLETIN_MAC_ADDR;
1995 		else
1996 			p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1997 	}
1998 
1999 	if (events & (1 << VLAN_ADDR_FORCED)) {
2000 		struct ecore_sp_vport_update_params vport_update;
2001 		u8 removal;
2002 		int i;
2003 
2004 		OSAL_MEMSET(&filter, 0, sizeof(filter));
2005 		filter.type = ECORE_FILTER_VLAN;
2006 		filter.is_rx_filter = 1;
2007 		filter.is_tx_filter = 1;
2008 		filter.vport_to_add_to = p_vf->vport_id;
2009 		filter.vlan = p_vf->bulletin.p_virt->pvid;
2010 		filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
2011 		    ECORE_FILTER_FLUSH;
2012 
2013 		/* Send the ramrod */
2014 		rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
2015 					       &filter,
2016 					       ECORE_SPQ_MODE_CB, OSAL_NULL);
2017 		if (rc) {
2018 			DP_NOTICE(p_hwfn, true,
2019 				  "PF failed to configure VLAN for VF\n");
2020 			return rc;
2021 		}
2022 
2023 		/* Update the default-vlan & silent vlan stripping */
2024 		OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
2025 		vport_update.opaque_fid = p_vf->opaque_fid;
2026 		vport_update.vport_id = p_vf->vport_id;
2027 		vport_update.update_default_vlan_enable_flg = 1;
2028 		vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
2029 		vport_update.update_default_vlan_flg = 1;
2030 		vport_update.default_vlan = filter.vlan;
2031 
2032 		vport_update.update_inner_vlan_removal_flg = 1;
2033 		removal = filter.vlan ?
2034 		    1 : p_vf->shadow_config.inner_vlan_removal;
2035 		vport_update.inner_vlan_removal_flg = removal;
2036 		vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
2037 		rc = ecore_sp_vport_update(p_hwfn, &vport_update,
2038 					   ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
2039 		if (rc) {
2040 			DP_NOTICE(p_hwfn, true,
2041 				  "PF failed to configure VF vport for vlan\n");
2042 			return rc;
2043 		}
2044 
2045 		/* Update all the Rx queues */
2046 		for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
2047 			struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
2048 			struct ecore_queue_cid *p_cid = OSAL_NULL;
2049 
2050 			/* There can be at most 1 Rx queue on qzone. Find it */
2051 			p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
2052 			if (p_cid == OSAL_NULL)
2053 				continue;
2054 
2055 			rc = ecore_sp_eth_rx_queues_update(p_hwfn,
2056 							   (void **)&p_cid,
2057 						   1, 0, 1,
2058 						   ECORE_SPQ_MODE_EBLOCK,
2059 						   OSAL_NULL);
2060 			if (rc) {
2061 				DP_NOTICE(p_hwfn, true,
2062 					  "Failed to send Rx update"
2063 					  " fo queue[0x%04x]\n",
2064 					  p_cid->rel.queue_id);
2065 				return rc;
2066 			}
2067 		}
2068 
2069 		if (filter.vlan)
2070 			p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
2071 		else
2072 			p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
2073 	}
2074 
2075 	/* If forced features are terminated, we need to configure the shadow
2076 	 * configuration back again.
2077 	 */
2078 	if (events)
2079 		ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
2080 
2081 	return rc;
2082 }
2083 
2084 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
2085 					 struct ecore_ptt *p_ptt,
2086 					 struct ecore_vf_info *vf)
2087 {
2088 	struct ecore_sp_vport_start_params params = { 0 };
2089 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2090 	struct vfpf_vport_start_tlv *start;
2091 	u8 status = PFVF_STATUS_SUCCESS;
2092 	struct ecore_vf_info *vf_info;
2093 	u64 *p_bitmap;
2094 	int sb_id;
2095 	enum _ecore_status_t rc;
2096 
2097 	vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
2098 	if (!vf_info) {
2099 		DP_NOTICE(p_hwfn->p_dev, true,
2100 			  "Failed to get VF info, invalid vfid [%d]\n",
2101 			  vf->relative_vf_id);
2102 		return;
2103 	}
2104 
2105 	vf->state = VF_ENABLED;
2106 	start = &mbx->req_virt->start_vport;
2107 
2108 	ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
2109 
2110 	/* Initialize Status block in CAU */
2111 	for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
2112 		if (!start->sb_addr[sb_id]) {
2113 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2114 				   "VF[%d] did not fill the address of SB %d\n",
2115 				   vf->relative_vf_id, sb_id);
2116 			break;
2117 		}
2118 
2119 		ecore_int_cau_conf_sb(p_hwfn, p_ptt,
2120 				      start->sb_addr[sb_id],
2121 				      vf->igu_sbs[sb_id],
2122 				      vf->abs_vf_id, 1);
2123 	}
2124 
2125 	vf->mtu = start->mtu;
2126 	vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
2127 
2128 	/* Take into consideration configuration forced by hypervisor;
2129 	 * If none is configured, use the supplied VF values [for old
2130 	 * vfs that would still be fine, since they passed '0' as padding].
2131 	 */
2132 	p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
2133 	if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
2134 		u8 vf_req = start->only_untagged;
2135 
2136 		vf_info->bulletin.p_virt->default_only_untagged = vf_req;
2137 		*p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
2138 	}
2139 
2140 	params.tpa_mode = start->tpa_mode;
2141 	params.remove_inner_vlan = start->inner_vlan_removal;
2142 	params.tx_switching = true;
2143 
2144 #ifndef ASIC_ONLY
2145 	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2146 		DP_NOTICE(p_hwfn, false,
2147 			  "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
2148 		params.tx_switching = false;
2149 	}
2150 #endif
2151 
2152 	params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
2153 	params.drop_ttl0 = false;
2154 	params.concrete_fid = vf->concrete_fid;
2155 	params.opaque_fid = vf->opaque_fid;
2156 	params.vport_id = vf->vport_id;
2157 	params.max_buffers_per_cqe = start->max_buffers_per_cqe;
2158 	params.mtu = vf->mtu;
2159 	params.check_mac = true;
2160 
2161 	rc = ecore_sp_eth_vport_start(p_hwfn, &params);
2162 	if (rc != ECORE_SUCCESS) {
2163 		DP_ERR(p_hwfn,
2164 		       "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
2165 		status = PFVF_STATUS_FAILURE;
2166 	} else {
2167 		vf->vport_instance++;
2168 
2169 		/* Force configuration if needed on the newly opened vport */
2170 		ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
2171 		OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
2172 					  vf->vport_id, vf->opaque_fid);
2173 		__ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
2174 	}
2175 
2176 	ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
2177 			       sizeof(struct pfvf_def_resp_tlv), status);
2178 }
2179 
2180 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
2181 					struct ecore_ptt *p_ptt,
2182 					struct ecore_vf_info *vf)
2183 {
2184 	u8 status = PFVF_STATUS_SUCCESS;
2185 	enum _ecore_status_t rc;
2186 
2187 	OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf);
2188 	vf->vport_instance--;
2189 	vf->spoof_chk = false;
2190 
2191 	if ((ecore_iov_validate_active_rxq(vf)) ||
2192 	    (ecore_iov_validate_active_txq(vf))) {
2193 		vf->b_malicious = true;
2194 		DP_NOTICE(p_hwfn, false,
2195 			  "VF [%02x] - considered malicious;"
2196 			  " Unable to stop RX/TX queuess\n",
2197 			  vf->abs_vf_id);
2198 		status = PFVF_STATUS_MALICIOUS;
2199 		goto out;
2200 	}
2201 
2202 	rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2203 	if (rc != ECORE_SUCCESS) {
2204 		DP_ERR(p_hwfn,
2205 		       "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
2206 		status = PFVF_STATUS_FAILURE;
2207 	}
2208 
2209 	/* Forget the configuration on the vport */
2210 	vf->configured_features = 0;
2211 	OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2212 
2213 out:
2214 	ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2215 			       sizeof(struct pfvf_def_resp_tlv), status);
2216 }
2217 
2218 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
2219 					    struct ecore_ptt *p_ptt,
2220 					    struct ecore_vf_info *vf,
2221 					    u8 status, bool b_legacy)
2222 {
2223 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2224 	struct pfvf_start_queue_resp_tlv *p_tlv;
2225 	struct vfpf_start_rxq_tlv *req;
2226 	u16 length;
2227 
2228 	mbx->offset = (u8 *)mbx->reply_virt;
2229 
2230 	/* Taking a bigger struct instead of adding a TLV to list was a
2231 	 * mistake, but one which we're now stuck with, as some older
2232 	 * clients assume the size of the previous response.
2233 	 */
2234 	if (!b_legacy)
2235 		length = sizeof(*p_tlv);
2236 	else
2237 		length = sizeof(struct pfvf_def_resp_tlv);
2238 
2239 	p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
2240 	ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2241 		      sizeof(struct channel_list_end_tlv));
2242 
2243 	/* Update the TLV with the response */
2244 	if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2245 		req = &mbx->req_virt->start_rxq;
2246 		p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2247 				OFFSETOF(struct mstorm_vf_zone,
2248 					 non_trigger.eth_rx_queue_producers) +
2249 				sizeof(struct eth_rx_prod_data) * req->rx_qid;
2250 	}
2251 
2252 	ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2253 }
2254 
2255 static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
2256 			       struct ecore_vf_info *p_vf, bool b_is_tx)
2257 {
2258 	struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2259 	struct vfpf_qid_tlv *p_qid_tlv;
2260 
2261 	/* Search for the qid if the VF published if its going to provide it */
2262 	if (!(p_vf->acquire.vfdev_info.capabilities &
2263 	      VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2264 		if (b_is_tx)
2265 			return ECORE_IOV_LEGACY_QID_TX;
2266 		else
2267 			return ECORE_IOV_LEGACY_QID_RX;
2268 	}
2269 
2270 	p_qid_tlv = (struct vfpf_qid_tlv *)
2271 		    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2272 					       CHANNEL_TLV_QID);
2273 	if (p_qid_tlv == OSAL_NULL) {
2274 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2275 			   "VF[%2x]: Failed to provide qid\n",
2276 			   p_vf->relative_vf_id);
2277 
2278 		return ECORE_IOV_QID_INVALID;
2279 	}
2280 
2281 	if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2282 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2283 			   "VF[%02x]: Provided qid out-of-bounds %02x\n",
2284 			   p_vf->relative_vf_id, p_qid_tlv->qid);
2285 		return ECORE_IOV_QID_INVALID;
2286 	}
2287 
2288 	return p_qid_tlv->qid;
2289 }
2290 
2291 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
2292 				       struct ecore_ptt *p_ptt,
2293 				       struct ecore_vf_info *vf)
2294 {
2295 	struct ecore_queue_start_common_params params;
2296 	struct ecore_queue_cid_vf_params vf_params;
2297 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2298 	u8 status = PFVF_STATUS_NO_RESOURCE;
2299 	u8 qid_usage_idx, vf_legacy = 0;
2300 	struct ecore_vf_queue *p_queue;
2301 	struct vfpf_start_rxq_tlv *req;
2302 	struct ecore_queue_cid *p_cid;
2303 	struct ecore_sb_info sb_dummy;
2304 	enum _ecore_status_t rc;
2305 
2306 	req = &mbx->req_virt->start_rxq;
2307 
2308 	if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2309 				    ECORE_IOV_VALIDATE_Q_DISABLE) ||
2310 	    !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2311 		goto out;
2312 
2313 	qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2314 	if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2315 		goto out;
2316 
2317 	p_queue = &vf->vf_queues[req->rx_qid];
2318 	if (p_queue->cids[qid_usage_idx].p_cid)
2319 		goto out;
2320 
2321 	vf_legacy = ecore_vf_calculate_legacy(vf);
2322 
2323 	/* Acquire a new queue-cid */
2324 	OSAL_MEMSET(&params, 0, sizeof(params));
2325 	params.queue_id = (u8)p_queue->fw_rx_qid;
2326 	params.vport_id = vf->vport_id;
2327 	params.stats_id = vf->abs_vf_id + 0x10;
2328 
2329 	/* Since IGU index is passed via sb_info, construct a dummy one */
2330 	OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2331 	sb_dummy.igu_sb_id = req->hw_sb;
2332 	params.p_sb = &sb_dummy;
2333 	params.sb_idx = req->sb_index;
2334 
2335 	OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2336 	vf_params.vfid = vf->relative_vf_id;
2337 	vf_params.vf_qid = (u8)req->rx_qid;
2338 	vf_params.vf_legacy = vf_legacy;
2339 	vf_params.qid_usage_idx = qid_usage_idx;
2340 
2341 	p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2342 				       &params, true, &vf_params);
2343 	if (p_cid == OSAL_NULL)
2344 		goto out;
2345 
2346 	/* Legacy VFs have their Producers in a different location, which they
2347 	 * calculate on their own and clean the producer prior to this.
2348 	 */
2349 	if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
2350 		REG_WR(p_hwfn,
2351 		       GTT_BAR0_MAP_REG_MSDM_RAM +
2352 		       MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2353 		       0);
2354 
2355 	rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
2356 					req->bd_max_bytes,
2357 					req->rxq_addr,
2358 					req->cqe_pbl_addr,
2359 					req->cqe_pbl_size);
2360 	if (rc != ECORE_SUCCESS) {
2361 		status = PFVF_STATUS_FAILURE;
2362 		ecore_eth_queue_cid_release(p_hwfn, p_cid);
2363 	} else {
2364 		p_queue->cids[qid_usage_idx].p_cid = p_cid;
2365 		p_queue->cids[qid_usage_idx].b_is_tx = false;
2366 		status = PFVF_STATUS_SUCCESS;
2367 		vf->num_active_rxqs++;
2368 	}
2369 
2370 out:
2371 	ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2372 					!!(vf_legacy &
2373 					   ECORE_QCID_LEGACY_VF_RX_PROD));
2374 }
2375 
2376 static void
2377 ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2378 				 struct ecore_tunnel_info *p_tun,
2379 				 u16 tunn_feature_mask)
2380 {
2381 	p_resp->tunn_feature_mask = tunn_feature_mask;
2382 	p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2383 	p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2384 	p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2385 	p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2386 	p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2387 	p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2388 	p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2389 	p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2390 	p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2391 	p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2392 	p_resp->geneve_udp_port = p_tun->geneve_port.port;
2393 	p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2394 }
2395 
2396 static void
2397 __ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2398 				struct ecore_tunn_update_type *p_tun,
2399 				enum ecore_tunn_mode mask, u8 tun_cls)
2400 {
2401 	if (p_req->tun_mode_update_mask & (1 << mask)) {
2402 		p_tun->b_update_mode = true;
2403 
2404 		if (p_req->tunn_mode & (1 << mask))
2405 			p_tun->b_mode_enabled = true;
2406 	}
2407 
2408 	p_tun->tun_cls = tun_cls;
2409 }
2410 
2411 static void
2412 ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2413 			      struct ecore_tunn_update_type *p_tun,
2414 			      struct ecore_tunn_update_udp_port *p_port,
2415 			      enum ecore_tunn_mode mask,
2416 			      u8 tun_cls, u8 update_port, u16 port)
2417 {
2418 	if (update_port) {
2419 		p_port->b_update_port = true;
2420 		p_port->port = port;
2421 	}
2422 
2423 	__ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2424 }
2425 
2426 static bool
2427 ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2428 {
2429 	bool b_update_requested = false;
2430 
2431 	if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2432 	    p_req->update_geneve_port || p_req->update_vxlan_port)
2433 		b_update_requested = true;
2434 
2435 	return b_update_requested;
2436 }
2437 
2438 static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
2439 					       struct ecore_ptt *p_ptt,
2440 					       struct ecore_vf_info *p_vf)
2441 {
2442 	struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
2443 	struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2444 	struct pfvf_update_tunn_param_tlv *p_resp;
2445 	struct vfpf_update_tunn_param_tlv *p_req;
2446 	enum _ecore_status_t rc = ECORE_SUCCESS;
2447 	u8 status = PFVF_STATUS_SUCCESS;
2448 	bool b_update_required = false;
2449 	struct ecore_tunnel_info tunn;
2450 	u16 tunn_feature_mask = 0;
2451 	int i;
2452 
2453 	mbx->offset = (u8 *)mbx->reply_virt;
2454 
2455 	OSAL_MEM_ZERO(&tunn, sizeof(tunn));
2456 	p_req = &mbx->req_virt->tunn_param_update;
2457 
2458 	if (!ecore_iov_pf_validate_tunn_param(p_req)) {
2459 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2460 			   "No tunnel update requested by VF\n");
2461 		status = PFVF_STATUS_FAILURE;
2462 		goto send_resp;
2463 	}
2464 
2465 	tunn.b_update_rx_cls = p_req->update_tun_cls;
2466 	tunn.b_update_tx_cls = p_req->update_tun_cls;
2467 
2468 	ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2469 				      ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2470 				      p_req->update_vxlan_port,
2471 				      p_req->vxlan_port);
2472 	ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2473 				      ECORE_MODE_L2GENEVE_TUNN,
2474 				      p_req->l2geneve_clss,
2475 				      p_req->update_geneve_port,
2476 				      p_req->geneve_port);
2477 	__ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2478 					ECORE_MODE_IPGENEVE_TUNN,
2479 					p_req->ipgeneve_clss);
2480 	__ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2481 					ECORE_MODE_L2GRE_TUNN,
2482 					p_req->l2gre_clss);
2483 	__ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2484 					ECORE_MODE_IPGRE_TUNN,
2485 					p_req->ipgre_clss);
2486 
2487 	/* If PF modifies VF's req then it should
2488 	 * still return an error in case of partial configuration
2489 	 * or modified configuration as opposed to requested one.
2490 	 */
2491 	rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
2492 						 &b_update_required, &tunn);
2493 
2494 	if (rc != ECORE_SUCCESS)
2495 		status = PFVF_STATUS_FAILURE;
2496 
2497 	/* If ECORE client is willing to update anything ? */
2498 	if (b_update_required) {
2499 		u16 geneve_port;
2500 
2501 		rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2502 						 ECORE_SPQ_MODE_EBLOCK,
2503 						 OSAL_NULL);
2504 		if (rc != ECORE_SUCCESS)
2505 			status = PFVF_STATUS_FAILURE;
2506 
2507 		geneve_port = p_tun->geneve_port.port;
2508 		ecore_for_each_vf(p_hwfn, i) {
2509 			ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
2510 							 p_tun->vxlan_port.port,
2511 							 geneve_port);
2512 		}
2513 	}
2514 
2515 send_resp:
2516 	p_resp = ecore_add_tlv(&mbx->offset,
2517 			       CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2518 
2519 	ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2520 	ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2521 		      sizeof(struct channel_list_end_tlv));
2522 
2523 	ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2524 }
2525 
2526 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
2527 					    struct ecore_ptt *p_ptt,
2528 					    struct ecore_vf_info *p_vf,
2529 					    u32 cid,
2530 					    u8 status)
2531 {
2532 	struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2533 	struct pfvf_start_queue_resp_tlv *p_tlv;
2534 	bool b_legacy = false;
2535 	u16 length;
2536 
2537 	mbx->offset = (u8 *)mbx->reply_virt;
2538 
2539 	/* Taking a bigger struct instead of adding a TLV to list was a
2540 	 * mistake, but one which we're now stuck with, as some older
2541 	 * clients assume the size of the previous response.
2542 	 */
2543 	if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2544 	    ETH_HSI_VER_NO_PKT_LEN_TUNN)
2545 		b_legacy = true;
2546 
2547 	if (!b_legacy)
2548 		length = sizeof(*p_tlv);
2549 	else
2550 		length = sizeof(struct pfvf_def_resp_tlv);
2551 
2552 	p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
2553 	ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2554 		      sizeof(struct channel_list_end_tlv));
2555 
2556 	/* Update the TLV with the response */
2557 	if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2558 		p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
2559 
2560 	ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2561 }
2562 
2563 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
2564 				       struct ecore_ptt *p_ptt,
2565 				       struct ecore_vf_info *vf)
2566 {
2567 	struct ecore_queue_start_common_params params;
2568 	struct ecore_queue_cid_vf_params vf_params;
2569 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2570 	u8 status = PFVF_STATUS_NO_RESOURCE;
2571 	struct ecore_vf_queue *p_queue;
2572 	struct vfpf_start_txq_tlv *req;
2573 	struct ecore_queue_cid *p_cid;
2574 	struct ecore_sb_info sb_dummy;
2575 	u8 qid_usage_idx, vf_legacy;
2576 	u32 cid = 0;
2577 	enum _ecore_status_t rc;
2578 	u16 pq;
2579 
2580 	OSAL_MEMSET(&params, 0, sizeof(params));
2581 	req = &mbx->req_virt->start_txq;
2582 
2583 	if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2584 				    ECORE_IOV_VALIDATE_Q_NA) ||
2585 	    !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2586 		goto out;
2587 
2588 	qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2589 	if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2590 		goto out;
2591 
2592 	p_queue = &vf->vf_queues[req->tx_qid];
2593 	if (p_queue->cids[qid_usage_idx].p_cid)
2594 		goto out;
2595 
2596 	vf_legacy = ecore_vf_calculate_legacy(vf);
2597 
2598 	/* Acquire a new queue-cid */
2599 	params.queue_id = p_queue->fw_tx_qid;
2600 	params.vport_id = vf->vport_id;
2601 	params.stats_id = vf->abs_vf_id + 0x10;
2602 
2603 	/* Since IGU index is passed via sb_info, construct a dummy one */
2604 	OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2605 	sb_dummy.igu_sb_id = req->hw_sb;
2606 	params.p_sb = &sb_dummy;
2607 	params.sb_idx = req->sb_index;
2608 
2609 	OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2610 	vf_params.vfid = vf->relative_vf_id;
2611 	vf_params.vf_qid = (u8)req->tx_qid;
2612 	vf_params.vf_legacy = vf_legacy;
2613 	vf_params.qid_usage_idx = qid_usage_idx;
2614 
2615 	p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2616 				       &params, false, &vf_params);
2617 	if (p_cid == OSAL_NULL)
2618 		goto out;
2619 
2620 	pq = ecore_get_cm_pq_idx_vf(p_hwfn,
2621 				    vf->relative_vf_id);
2622 	rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
2623 					req->pbl_addr, req->pbl_size, pq);
2624 	if (rc != ECORE_SUCCESS) {
2625 		status = PFVF_STATUS_FAILURE;
2626 		ecore_eth_queue_cid_release(p_hwfn, p_cid);
2627 	} else {
2628 		status = PFVF_STATUS_SUCCESS;
2629 		p_queue->cids[qid_usage_idx].p_cid = p_cid;
2630 		p_queue->cids[qid_usage_idx].b_is_tx = true;
2631 		cid = p_cid->cid;
2632 	}
2633 
2634 out:
2635 	ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
2636 					cid, status);
2637 }
2638 
2639 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
2640 						   struct ecore_vf_info *vf,
2641 						   u16 rxq_id,
2642 						   u8 qid_usage_idx,
2643 						   bool cqe_completion)
2644 {
2645 	struct ecore_vf_queue *p_queue;
2646 	enum _ecore_status_t rc = ECORE_SUCCESS;
2647 
2648 	if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
2649 				    ECORE_IOV_VALIDATE_Q_NA)) {
2650 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2651 			   "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2652 			   vf->relative_vf_id, rxq_id, qid_usage_idx);
2653 		return ECORE_INVAL;
2654 	}
2655 
2656 	p_queue = &vf->vf_queues[rxq_id];
2657 
2658 	/* We've validated the index and the existence of the active RXQ -
2659 	 * now we need to make sure that it's using the correct qid.
2660 	 */
2661 	if (!p_queue->cids[qid_usage_idx].p_cid ||
2662 	    p_queue->cids[qid_usage_idx].b_is_tx) {
2663 		struct ecore_queue_cid *p_cid;
2664 
2665 		p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
2666 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2667 			   "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2668 			    vf->relative_vf_id, rxq_id, qid_usage_idx,
2669 			    rxq_id, p_cid->qid_usage_idx);
2670 		return ECORE_INVAL;
2671 	}
2672 
2673 	/* Now that we know we have a valid Rx-queue - close it */
2674 	rc = ecore_eth_rx_queue_stop(p_hwfn,
2675 				     p_queue->cids[qid_usage_idx].p_cid,
2676 				     false, cqe_completion);
2677 	if (rc != ECORE_SUCCESS)
2678 		return rc;
2679 
2680 	p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2681 	vf->num_active_rxqs--;
2682 
2683 	return ECORE_SUCCESS;
2684 }
2685 
2686 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
2687 						   struct ecore_vf_info *vf,
2688 						   u16 txq_id,
2689 						   u8 qid_usage_idx)
2690 {
2691 	struct ecore_vf_queue *p_queue;
2692 	enum _ecore_status_t rc = ECORE_SUCCESS;
2693 
2694 	if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
2695 				    ECORE_IOV_VALIDATE_Q_NA))
2696 		return ECORE_INVAL;
2697 
2698 	p_queue = &vf->vf_queues[txq_id];
2699 	if (!p_queue->cids[qid_usage_idx].p_cid ||
2700 	    !p_queue->cids[qid_usage_idx].b_is_tx)
2701 		return ECORE_INVAL;
2702 
2703 	rc = ecore_eth_tx_queue_stop(p_hwfn,
2704 				     p_queue->cids[qid_usage_idx].p_cid);
2705 	if (rc != ECORE_SUCCESS)
2706 		return rc;
2707 
2708 	p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2709 	return ECORE_SUCCESS;
2710 }
2711 
2712 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
2713 				       struct ecore_ptt *p_ptt,
2714 				       struct ecore_vf_info *vf)
2715 {
2716 	u16 length = sizeof(struct pfvf_def_resp_tlv);
2717 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2718 	u8 status = PFVF_STATUS_FAILURE;
2719 	struct vfpf_stop_rxqs_tlv *req;
2720 	u8 qid_usage_idx;
2721 	enum _ecore_status_t rc;
2722 
2723 	/* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
2724 	 * would be one. Since no older ecore passed multiple queues
2725 	 * using this API, sanitize on the value.
2726 	 */
2727 	req = &mbx->req_virt->stop_rxqs;
2728 	if (req->num_rxqs != 1) {
2729 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2730 			   "Odd; VF[%d] tried stopping multiple Rx queues\n",
2731 			   vf->relative_vf_id);
2732 		status = PFVF_STATUS_NOT_SUPPORTED;
2733 		goto out;
2734 	}
2735 
2736 	/* Find which qid-index is associated with the queue */
2737 	qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2738 	if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2739 		goto out;
2740 
2741 	rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2742 				    qid_usage_idx, req->cqe_completion);
2743 	if (rc == ECORE_SUCCESS)
2744 		status = PFVF_STATUS_SUCCESS;
2745 out:
2746 	ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2747 			       length, status);
2748 }
2749 
2750 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
2751 				       struct ecore_ptt *p_ptt,
2752 				       struct ecore_vf_info *vf)
2753 {
2754 	u16 length = sizeof(struct pfvf_def_resp_tlv);
2755 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2756 	u8 status = PFVF_STATUS_FAILURE;
2757 	struct vfpf_stop_txqs_tlv *req;
2758 	u8 qid_usage_idx;
2759 	enum _ecore_status_t rc;
2760 
2761 	/* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
2762 	 * would be one. Since no older ecore passed multiple queues
2763 	 * using this API, sanitize on the value.
2764 	 */
2765 	req = &mbx->req_virt->stop_txqs;
2766 	if (req->num_txqs != 1) {
2767 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2768 			   "Odd; VF[%d] tried stopping multiple Tx queues\n",
2769 			   vf->relative_vf_id);
2770 		status = PFVF_STATUS_NOT_SUPPORTED;
2771 		goto out;
2772 	}
2773 
2774 	/* Find which qid-index is associated with the queue */
2775 	qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2776 	if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2777 		goto out;
2778 
2779 	rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
2780 				    qid_usage_idx);
2781 	if (rc == ECORE_SUCCESS)
2782 		status = PFVF_STATUS_SUCCESS;
2783 
2784 out:
2785 	ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2786 			       length, status);
2787 }
2788 
2789 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
2790 					 struct ecore_ptt *p_ptt,
2791 					 struct ecore_vf_info *vf)
2792 {
2793 	struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
2794 	u16 length = sizeof(struct pfvf_def_resp_tlv);
2795 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2796 	struct vfpf_update_rxq_tlv *req;
2797 	u8 status = PFVF_STATUS_FAILURE;
2798 	u8 complete_event_flg;
2799 	u8 complete_cqe_flg;
2800 	u8 qid_usage_idx;
2801 	enum _ecore_status_t rc;
2802 	u16 i;
2803 
2804 	req = &mbx->req_virt->update_rxq;
2805 	complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2806 	complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2807 
2808 	qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2809 	if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2810 		goto out;
2811 
2812 	/* Starting with the addition of CHANNEL_TLV_QID, this API started
2813 	 * expecting a single queue at a time. Validate this.
2814 	 */
2815 	if ((vf->acquire.vfdev_info.capabilities &
2816 	     VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
2817 	     req->num_rxqs != 1) {
2818 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2819 			   "VF[%d] supports QIDs but sends multiple queues\n",
2820 			   vf->relative_vf_id);
2821 		goto out;
2822 	}
2823 
2824 	/* Validate inputs - for the legacy case this is still true since
2825 	 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2826 	 */
2827 	for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2828 		if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
2829 					    ECORE_IOV_VALIDATE_Q_NA) ||
2830 		    !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2831 		    vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2832 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2833 				   "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2834 				   vf->relative_vf_id, req->rx_qid,
2835 				   req->num_rxqs);
2836 			goto out;
2837 		}
2838 	}
2839 
2840 	for (i = 0; i < req->num_rxqs; i++) {
2841 		u16 qid = req->rx_qid + i;
2842 
2843 		handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2844 	}
2845 
2846 	rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2847 					   req->num_rxqs,
2848 					   complete_cqe_flg,
2849 					   complete_event_flg,
2850 					   ECORE_SPQ_MODE_EBLOCK,
2851 					   OSAL_NULL);
2852 	if (rc != ECORE_SUCCESS)
2853 		goto out;
2854 
2855 	status = PFVF_STATUS_SUCCESS;
2856 out:
2857 	ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2858 			       length, status);
2859 }
2860 
2861 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
2862 				 void *p_tlvs_list, u16 req_type)
2863 {
2864 	struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2865 	int len = 0;
2866 
2867 	do {
2868 		if (!p_tlv->length) {
2869 			DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
2870 			return OSAL_NULL;
2871 		}
2872 
2873 		if (p_tlv->type == req_type) {
2874 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2875 				   "Extended tlv type %s, length %d found\n",
2876 				   ecore_channel_tlvs_string[p_tlv->type],
2877 				   p_tlv->length);
2878 			return p_tlv;
2879 		}
2880 
2881 		len += p_tlv->length;
2882 		p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2883 
2884 		if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2885 			DP_NOTICE(p_hwfn, true,
2886 				  "TLVs has overrun the buffer size\n");
2887 			return OSAL_NULL;
2888 		}
2889 	} while (p_tlv->type != CHANNEL_TLV_LIST_END);
2890 
2891 	return OSAL_NULL;
2892 }
2893 
2894 static void
2895 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
2896 			      struct ecore_sp_vport_update_params *p_data,
2897 			      struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2898 {
2899 	struct vfpf_vport_update_activate_tlv *p_act_tlv;
2900 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2901 
2902 	p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2903 	    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2904 	if (!p_act_tlv)
2905 		return;
2906 
2907 	p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2908 	p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2909 	p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2910 	p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2911 	*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
2912 }
2913 
2914 static void
2915 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
2916 			       struct ecore_sp_vport_update_params *p_data,
2917 			       struct ecore_vf_info *p_vf,
2918 			       struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2919 {
2920 	struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2921 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2922 
2923 	p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2924 	    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2925 	if (!p_vlan_tlv)
2926 		return;
2927 
2928 	p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2929 
2930 	/* Ignore the VF request if we're forcing a vlan */
2931 	if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
2932 		p_data->update_inner_vlan_removal_flg = 1;
2933 		p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2934 	}
2935 
2936 	*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
2937 }
2938 
2939 static void
2940 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
2941 			      struct ecore_sp_vport_update_params *p_data,
2942 			      struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2943 {
2944 	struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2945 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2946 
2947 	p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2948 	    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2949 	if (!p_tx_switch_tlv)
2950 		return;
2951 
2952 #ifndef ASIC_ONLY
2953 	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2954 		DP_NOTICE(p_hwfn, false,
2955 			  "FPGA: Ignore tx-switching configuration originating"
2956 			  " from VFs\n");
2957 		return;
2958 	}
2959 #endif
2960 
2961 	p_data->update_tx_switching_flg = 1;
2962 	p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2963 	*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
2964 }
2965 
2966 static void
2967 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
2968 				    struct ecore_sp_vport_update_params *p_data,
2969 				    struct ecore_iov_vf_mbx *p_mbx,
2970 				    u16 *tlvs_mask)
2971 {
2972 	struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2973 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2974 
2975 	p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2976 	    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2977 	if (!p_mcast_tlv)
2978 		return;
2979 
2980 	p_data->update_approx_mcast_flg = 1;
2981 	OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
2982 		    sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2983 	*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
2984 }
2985 
2986 static void
2987 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
2988 				struct ecore_sp_vport_update_params *p_data,
2989 				struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2990 {
2991 	struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
2992 	struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2993 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2994 
2995 	p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2996 	    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2997 	if (!p_accept_tlv)
2998 		return;
2999 
3000 	p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
3001 	p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
3002 	p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
3003 	p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
3004 	*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
3005 }
3006 
3007 static void
3008 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
3009 				    struct ecore_sp_vport_update_params *p_data,
3010 				    struct ecore_iov_vf_mbx *p_mbx,
3011 				    u16 *tlvs_mask)
3012 {
3013 	struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
3014 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
3015 
3016 	p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
3017 	    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3018 	if (!p_accept_any_vlan)
3019 		return;
3020 
3021 	p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
3022 	p_data->update_accept_any_vlan_flg =
3023 			p_accept_any_vlan->update_accept_any_vlan_flg;
3024 	*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
3025 }
3026 
3027 static void
3028 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
3029 			      struct ecore_vf_info *vf,
3030 			      struct ecore_sp_vport_update_params *p_data,
3031 			      struct ecore_rss_params *p_rss,
3032 			      struct ecore_iov_vf_mbx *p_mbx,
3033 			      u16 *tlvs_mask, u16 *tlvs_accepted)
3034 {
3035 	struct vfpf_vport_update_rss_tlv *p_rss_tlv;
3036 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
3037 	bool b_reject = false;
3038 	u16 table_size;
3039 	u16 i, q_idx;
3040 
3041 	p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
3042 	    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3043 	if (!p_rss_tlv) {
3044 		p_data->rss_params = OSAL_NULL;
3045 		return;
3046 	}
3047 
3048 	OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
3049 
3050 	p_rss->update_rss_config =
3051 	    !!(p_rss_tlv->update_rss_flags &
3052 		VFPF_UPDATE_RSS_CONFIG_FLAG);
3053 	p_rss->update_rss_capabilities =
3054 	    !!(p_rss_tlv->update_rss_flags &
3055 		VFPF_UPDATE_RSS_CAPS_FLAG);
3056 	p_rss->update_rss_ind_table =
3057 	    !!(p_rss_tlv->update_rss_flags &
3058 		VFPF_UPDATE_RSS_IND_TABLE_FLAG);
3059 	p_rss->update_rss_key =
3060 	    !!(p_rss_tlv->update_rss_flags &
3061 		VFPF_UPDATE_RSS_KEY_FLAG);
3062 
3063 	p_rss->rss_enable = p_rss_tlv->rss_enable;
3064 	p_rss->rss_eng_id = vf->rss_eng_id;
3065 	p_rss->rss_caps = p_rss_tlv->rss_caps;
3066 	p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
3067 	OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
3068 		    sizeof(p_rss->rss_key));
3069 
3070 	table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
3071 				(1 << p_rss_tlv->rss_table_size_log));
3072 
3073 	for (i = 0; i < table_size; i++) {
3074 		struct ecore_queue_cid *p_cid;
3075 
3076 		q_idx = p_rss_tlv->rss_ind_table[i];
3077 		if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
3078 					    ECORE_IOV_VALIDATE_Q_ENABLE)) {
3079 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3080 				   "VF[%d]: Omitting RSS due to wrong queue %04x\n",
3081 				   vf->relative_vf_id, q_idx);
3082 			b_reject = true;
3083 			goto out;
3084 		}
3085 
3086 		p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
3087 		p_rss->rss_ind_table[i] = p_cid;
3088 	}
3089 
3090 	p_data->rss_params = p_rss;
3091 out:
3092 	*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3093 	if (!b_reject)
3094 		*tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3095 }
3096 
3097 static void
3098 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
3099 				  struct ecore_sp_vport_update_params *p_data,
3100 				  struct ecore_sge_tpa_params *p_sge_tpa,
3101 				  struct ecore_iov_vf_mbx *p_mbx,
3102 				  u16 *tlvs_mask)
3103 {
3104 	struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
3105 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
3106 
3107 	p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
3108 	    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3109 
3110 	if (!p_sge_tpa_tlv) {
3111 		p_data->sge_tpa_params = OSAL_NULL;
3112 		return;
3113 	}
3114 
3115 	OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
3116 
3117 	p_sge_tpa->update_tpa_en_flg =
3118 	    !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
3119 	p_sge_tpa->update_tpa_param_flg =
3120 	    !!(p_sge_tpa_tlv->update_sge_tpa_flags &
3121 		VFPF_UPDATE_TPA_PARAM_FLAG);
3122 
3123 	p_sge_tpa->tpa_ipv4_en_flg =
3124 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
3125 	p_sge_tpa->tpa_ipv6_en_flg =
3126 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
3127 	p_sge_tpa->tpa_pkt_split_flg =
3128 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
3129 	p_sge_tpa->tpa_hdr_data_split_flg =
3130 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
3131 	p_sge_tpa->tpa_gro_consistent_flg =
3132 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
3133 
3134 	p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
3135 	p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
3136 	p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
3137 	p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
3138 	p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
3139 
3140 	p_data->sge_tpa_params = p_sge_tpa;
3141 
3142 	*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
3143 }
3144 
3145 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
3146 					  struct ecore_ptt *p_ptt,
3147 					  struct ecore_vf_info *vf)
3148 {
3149 	struct ecore_rss_params *p_rss_params = OSAL_NULL;
3150 	struct ecore_sp_vport_update_params params;
3151 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3152 	struct ecore_sge_tpa_params sge_tpa_params;
3153 	u16 tlvs_mask = 0, tlvs_accepted = 0;
3154 	u8 status = PFVF_STATUS_SUCCESS;
3155 	u16 length;
3156 	enum _ecore_status_t rc;
3157 
3158 	/* Valiate PF can send such a request */
3159 	if (!vf->vport_instance) {
3160 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3161 			   "No VPORT instance available for VF[%d],"
3162 			   " failing vport update\n",
3163 			   vf->abs_vf_id);
3164 		status = PFVF_STATUS_FAILURE;
3165 		goto out;
3166 	}
3167 
3168 	p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
3169 	if (p_rss_params == OSAL_NULL) {
3170 		status = PFVF_STATUS_FAILURE;
3171 		goto out;
3172 	}
3173 
3174 	OSAL_MEMSET(&params, 0, sizeof(params));
3175 	params.opaque_fid = vf->opaque_fid;
3176 	params.vport_id = vf->vport_id;
3177 	params.rss_params = OSAL_NULL;
3178 
3179 	/* Search for extended tlvs list and update values
3180 	 * from VF in struct ecore_sp_vport_update_params.
3181 	 */
3182 	ecore_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
3183 	ecore_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
3184 	ecore_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
3185 	ecore_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
3186 	ecore_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
3187 	ecore_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
3188 	ecore_iov_vp_update_sge_tpa_param(p_hwfn, &params,
3189 					  &sge_tpa_params, mbx, &tlvs_mask);
3190 
3191 	tlvs_accepted = tlvs_mask;
3192 
3193 	/* Some of the extended TLVs need to be validated first; In that case,
3194 	 * they can update the mask without updating the accepted [so that
3195 	 * PF could communicate to VF it has rejected request].
3196 	 */
3197 	ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
3198 				      mbx, &tlvs_mask, &tlvs_accepted);
3199 
3200 	/* Just log a message if there is no single extended tlv in buffer.
3201 	 * When all features of vport update ramrod would be requested by VF
3202 	 * as extended TLVs in buffer then an error can be returned in response
3203 	 * if there is no extended TLV present in buffer.
3204 	 */
3205 	if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
3206 				     &params, &tlvs_accepted) !=
3207 	    ECORE_SUCCESS) {
3208 		tlvs_accepted = 0;
3209 		status = PFVF_STATUS_NOT_SUPPORTED;
3210 		goto out;
3211 	}
3212 
3213 	if (!tlvs_accepted) {
3214 		if (tlvs_mask)
3215 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3216 				   "Upper-layer prevents said VF"
3217 				   " configuration\n");
3218 		else
3219 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3220 				   "No feature tlvs found for vport update\n");
3221 		status = PFVF_STATUS_NOT_SUPPORTED;
3222 		goto out;
3223 	}
3224 
3225 	rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
3226 				   OSAL_NULL);
3227 
3228 	if (rc)
3229 		status = PFVF_STATUS_FAILURE;
3230 
3231 out:
3232 	OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
3233 	length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3234 						    tlvs_mask, tlvs_accepted);
3235 	ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3236 }
3237 
3238 static enum _ecore_status_t
3239 ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
3240 				struct ecore_vf_info *p_vf,
3241 				struct ecore_filter_ucast *p_params)
3242 {
3243 	int i;
3244 
3245 	/* First remove entries and then add new ones */
3246 	if (p_params->opcode == ECORE_FILTER_REMOVE) {
3247 		for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3248 			if (p_vf->shadow_config.vlans[i].used &&
3249 			    p_vf->shadow_config.vlans[i].vid ==
3250 			    p_params->vlan) {
3251 				p_vf->shadow_config.vlans[i].used = false;
3252 				break;
3253 			}
3254 		if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3255 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3256 				   "VF [%d] - Tries to remove a non-existing"
3257 				   " vlan\n",
3258 				   p_vf->relative_vf_id);
3259 			return ECORE_INVAL;
3260 		}
3261 	} else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3262 		   p_params->opcode == ECORE_FILTER_FLUSH) {
3263 		for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3264 			p_vf->shadow_config.vlans[i].used = false;
3265 	}
3266 
3267 	/* In forced mode, we're willing to remove entries - but we don't add
3268 	 * new ones.
3269 	 */
3270 	if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
3271 		return ECORE_SUCCESS;
3272 
3273 	if (p_params->opcode == ECORE_FILTER_ADD ||
3274 	    p_params->opcode == ECORE_FILTER_REPLACE) {
3275 		for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3276 			if (p_vf->shadow_config.vlans[i].used)
3277 				continue;
3278 
3279 			p_vf->shadow_config.vlans[i].used = true;
3280 			p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3281 			break;
3282 		}
3283 
3284 		if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3285 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3286 				   "VF [%d] - Tries to configure more than %d"
3287 				   " vlan filters\n",
3288 				   p_vf->relative_vf_id,
3289 				   ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
3290 			return ECORE_INVAL;
3291 		}
3292 	}
3293 
3294 	return ECORE_SUCCESS;
3295 }
3296 
3297 static enum _ecore_status_t
3298 ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
3299 			       struct ecore_vf_info *p_vf,
3300 			       struct ecore_filter_ucast *p_params)
3301 {
3302 	char empty_mac[ETH_ALEN];
3303 	int i;
3304 
3305 	OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
3306 
3307 	/* If we're in forced-mode, we don't allow any change */
3308 	/* TODO - this would change if we were ever to implement logic for
3309 	 * removing a forced MAC altogether [in which case, like for vlans,
3310 	 * we should be able to re-trace previous configuration.
3311 	 */
3312 	if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
3313 		return ECORE_SUCCESS;
3314 
3315 	/* First remove entries and then add new ones */
3316 	if (p_params->opcode == ECORE_FILTER_REMOVE) {
3317 		for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3318 			if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3319 					 p_params->mac, ETH_ALEN)) {
3320 				OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
3321 					      ETH_ALEN);
3322 				break;
3323 			}
3324 		}
3325 
3326 		if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3327 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3328 				   "MAC isn't configured\n");
3329 			return ECORE_INVAL;
3330 		}
3331 	} else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3332 		   p_params->opcode == ECORE_FILTER_FLUSH) {
3333 		for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
3334 			OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
3335 	}
3336 
3337 	/* List the new MAC address */
3338 	if (p_params->opcode != ECORE_FILTER_ADD &&
3339 	    p_params->opcode != ECORE_FILTER_REPLACE)
3340 		return ECORE_SUCCESS;
3341 
3342 	for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3343 		if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3344 				 empty_mac, ETH_ALEN)) {
3345 			OSAL_MEMCPY(p_vf->shadow_config.macs[i],
3346 				    p_params->mac, ETH_ALEN);
3347 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3348 				   "Added MAC at %d entry in shadow\n", i);
3349 			break;
3350 		}
3351 	}
3352 
3353 	if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3354 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3355 			   "No available place for MAC\n");
3356 		return ECORE_INVAL;
3357 	}
3358 
3359 	return ECORE_SUCCESS;
3360 }
3361 
3362 static enum _ecore_status_t
3363 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
3364 				   struct ecore_vf_info *p_vf,
3365 				   struct ecore_filter_ucast *p_params)
3366 {
3367 	enum _ecore_status_t rc = ECORE_SUCCESS;
3368 
3369 	if (p_params->type == ECORE_FILTER_MAC) {
3370 		rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3371 		if (rc != ECORE_SUCCESS)
3372 			return rc;
3373 	}
3374 
3375 	if (p_params->type == ECORE_FILTER_VLAN)
3376 		rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3377 
3378 	return rc;
3379 }
3380 
3381 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
3382 					  struct ecore_ptt *p_ptt,
3383 					  struct ecore_vf_info *vf)
3384 {
3385 	struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3386 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3387 	struct vfpf_ucast_filter_tlv *req;
3388 	u8 status = PFVF_STATUS_SUCCESS;
3389 	struct ecore_filter_ucast params;
3390 	enum _ecore_status_t rc;
3391 
3392 	/* Prepare the unicast filter params */
3393 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_filter_ucast));
3394 	req = &mbx->req_virt->ucast_filter;
3395 	params.opcode = (enum ecore_filter_opcode)req->opcode;
3396 	params.type = (enum ecore_filter_ucast_type)req->type;
3397 
3398 	/* @@@TBD - We might need logic on HV side in determining this */
3399 	params.is_rx_filter = 1;
3400 	params.is_tx_filter = 1;
3401 	params.vport_to_remove_from = vf->vport_id;
3402 	params.vport_to_add_to = vf->vport_id;
3403 	OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
3404 	params.vlan = req->vlan;
3405 
3406 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3407 		   "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
3408 		   " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3409 		   vf->abs_vf_id, params.opcode, params.type,
3410 		   params.is_rx_filter ? "RX" : "",
3411 		   params.is_tx_filter ? "TX" : "",
3412 		   params.vport_to_add_to,
3413 		   params.mac[0], params.mac[1], params.mac[2],
3414 		   params.mac[3], params.mac[4], params.mac[5], params.vlan);
3415 
3416 	if (!vf->vport_instance) {
3417 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3418 			   "No VPORT instance available for VF[%d],"
3419 			   " failing ucast MAC configuration\n",
3420 			   vf->abs_vf_id);
3421 		status = PFVF_STATUS_FAILURE;
3422 		goto out;
3423 	}
3424 
3425 	/* Update shadow copy of the VF configuration. In case shadow indicates
3426 	 * the action should be blocked return success to VF to imitate the
3427 	 * firmware behaviour in such case.
3428 	 */
3429 	if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, &params) !=
3430 	    ECORE_SUCCESS)
3431 		goto out;
3432 
3433 	/* Determine if the unicast filtering is acceptible by PF */
3434 	if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
3435 	    (params.type == ECORE_FILTER_VLAN ||
3436 	     params.type == ECORE_FILTER_MAC_VLAN)) {
3437 		/* Once VLAN is forced or PVID is set, do not allow
3438 		 * to add/replace any further VLANs.
3439 		 */
3440 		if (params.opcode == ECORE_FILTER_ADD ||
3441 		    params.opcode == ECORE_FILTER_REPLACE)
3442 			status = PFVF_STATUS_FORCED;
3443 		goto out;
3444 	}
3445 
3446 	if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
3447 	    (params.type == ECORE_FILTER_MAC ||
3448 	     params.type == ECORE_FILTER_MAC_VLAN)) {
3449 		if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
3450 		    (params.opcode != ECORE_FILTER_ADD &&
3451 		     params.opcode != ECORE_FILTER_REPLACE))
3452 			status = PFVF_STATUS_FORCED;
3453 		goto out;
3454 	}
3455 
3456 	rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, &params);
3457 	if (rc == ECORE_EXISTS) {
3458 		goto out;
3459 	} else if (rc == ECORE_INVAL) {
3460 		status = PFVF_STATUS_FAILURE;
3461 		goto out;
3462 	}
3463 
3464 	rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
3465 				       ECORE_SPQ_MODE_CB, OSAL_NULL);
3466 	if (rc)
3467 		status = PFVF_STATUS_FAILURE;
3468 
3469 out:
3470 	ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3471 			       sizeof(struct pfvf_def_resp_tlv), status);
3472 }
3473 
3474 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
3475 					 struct ecore_ptt *p_ptt,
3476 					 struct ecore_vf_info *vf)
3477 {
3478 	int i;
3479 
3480 	/* Reset the SBs */
3481 	for (i = 0; i < vf->num_sbs; i++)
3482 		ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3483 						  vf->igu_sbs[i],
3484 						  vf->opaque_fid, false);
3485 
3486 	ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3487 			       sizeof(struct pfvf_def_resp_tlv),
3488 			       PFVF_STATUS_SUCCESS);
3489 }
3490 
3491 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
3492 				   struct ecore_ptt *p_ptt,
3493 				   struct ecore_vf_info *vf)
3494 {
3495 	u16 length = sizeof(struct pfvf_def_resp_tlv);
3496 	u8 status = PFVF_STATUS_SUCCESS;
3497 
3498 	/* Disable Interrupts for VF */
3499 	ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3500 
3501 	/* Reset Permission table */
3502 	ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3503 
3504 	ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3505 			       length, status);
3506 }
3507 
3508 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
3509 				     struct ecore_ptt *p_ptt,
3510 				     struct ecore_vf_info *p_vf)
3511 {
3512 	u16 length = sizeof(struct pfvf_def_resp_tlv);
3513 	u8 status = PFVF_STATUS_SUCCESS;
3514 	enum _ecore_status_t rc = ECORE_SUCCESS;
3515 
3516 	ecore_iov_vf_cleanup(p_hwfn, p_vf);
3517 
3518 	if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3519 		/* Stopping the VF */
3520 		rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3521 				      p_vf->opaque_fid);
3522 
3523 		if (rc != ECORE_SUCCESS) {
3524 			DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
3525 			       rc);
3526 			status = PFVF_STATUS_FAILURE;
3527 		}
3528 
3529 		p_vf->state = VF_STOPPED;
3530 	}
3531 
3532 	ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3533 			       length, status);
3534 }
3535 
3536 static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
3537 					 struct ecore_ptt *p_ptt,
3538 					 struct ecore_vf_info *p_vf)
3539 {
3540 	struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3541 	struct pfvf_read_coal_resp_tlv *p_resp;
3542 	struct vfpf_read_coal_req_tlv *req;
3543 	u8 status = PFVF_STATUS_FAILURE;
3544 	struct ecore_vf_queue *p_queue;
3545 	struct ecore_queue_cid *p_cid;
3546 	enum _ecore_status_t rc = ECORE_SUCCESS;
3547 	u16 coal = 0, qid, i;
3548 	bool b_is_rx;
3549 
3550 	mbx->offset = (u8 *)mbx->reply_virt;
3551 	req = &mbx->req_virt->read_coal_req;
3552 
3553 	qid = req->qid;
3554 	b_is_rx = req->is_rx ? true : false;
3555 
3556 	if (b_is_rx) {
3557 		if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
3558 					    ECORE_IOV_VALIDATE_Q_ENABLE)) {
3559 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3560 				   "VF[%d]: Invalid Rx queue_id = %d\n",
3561 				   p_vf->abs_vf_id, qid);
3562 			goto send_resp;
3563 		}
3564 
3565 		p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
3566 		rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3567 		if (rc != ECORE_SUCCESS)
3568 			goto send_resp;
3569 	} else {
3570 		if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
3571 					    ECORE_IOV_VALIDATE_Q_ENABLE)) {
3572 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3573 				   "VF[%d]: Invalid Tx queue_id = %d\n",
3574 				   p_vf->abs_vf_id, qid);
3575 			goto send_resp;
3576 		}
3577 		for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3578 			p_queue = &p_vf->vf_queues[qid];
3579 			if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
3580 			    (!p_queue->cids[i].b_is_tx))
3581 				continue;
3582 
3583 			p_cid = p_queue->cids[i].p_cid;
3584 
3585 			rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
3586 						    p_cid, &coal);
3587 			if (rc != ECORE_SUCCESS)
3588 				goto send_resp;
3589 			break;
3590 		}
3591 	}
3592 
3593 	status = PFVF_STATUS_SUCCESS;
3594 
3595 send_resp:
3596 	p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
3597 			       sizeof(*p_resp));
3598 	p_resp->coal = coal;
3599 
3600 	ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
3601 		      sizeof(struct channel_list_end_tlv));
3602 
3603 	ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
3604 }
3605 
3606 static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
3607 					 struct ecore_ptt *p_ptt,
3608 					 struct ecore_vf_info *vf)
3609 {
3610 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3611 	enum _ecore_status_t rc = ECORE_SUCCESS;
3612 	struct vfpf_update_coalesce *req;
3613 	u8 status = PFVF_STATUS_FAILURE;
3614 	struct ecore_queue_cid *p_cid;
3615 	u16 rx_coal, tx_coal;
3616 	u16 qid;
3617 	int i;
3618 
3619 	req = &mbx->req_virt->update_coalesce;
3620 
3621 	rx_coal = req->rx_coal;
3622 	tx_coal = req->tx_coal;
3623 	qid = req->qid;
3624 
3625 	if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3626 				    ECORE_IOV_VALIDATE_Q_ENABLE) &&
3627 	    rx_coal) {
3628 		DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3629 		       vf->abs_vf_id, qid);
3630 		goto out;
3631 	}
3632 
3633 	if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3634 				    ECORE_IOV_VALIDATE_Q_ENABLE) &&
3635 	    tx_coal) {
3636 		DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3637 		       vf->abs_vf_id, qid);
3638 		goto out;
3639 	}
3640 
3641 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3642 		   "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3643 		   vf->abs_vf_id, rx_coal, tx_coal, qid);
3644 
3645 	if (rx_coal) {
3646 		p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3647 
3648 		rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3649 		if (rc != ECORE_SUCCESS) {
3650 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3651 				   "VF[%d]: Unable to set rx queue = %d coalesce\n",
3652 				   vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3653 			goto out;
3654 		}
3655 		vf->rx_coal = rx_coal;
3656 	}
3657 
3658 	/* TODO - in future, it might be possible to pass this in a per-cid
3659 	 * granularity. For now, do this for all Tx queues.
3660 	 */
3661 	if (tx_coal) {
3662 		struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3663 
3664 		for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3665 			if (p_queue->cids[i].p_cid == OSAL_NULL)
3666 				continue;
3667 
3668 			if (!p_queue->cids[i].b_is_tx)
3669 				continue;
3670 
3671 			rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3672 						    p_queue->cids[i].p_cid);
3673 			if (rc != ECORE_SUCCESS) {
3674 				DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3675 					   "VF[%d]: Unable to set tx queue coalesce\n",
3676 					   vf->abs_vf_id);
3677 				goto out;
3678 			}
3679 		}
3680 		vf->tx_coal = tx_coal;
3681 	}
3682 
3683 	status = PFVF_STATUS_SUCCESS;
3684 out:
3685 	ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3686 			       sizeof(struct pfvf_def_resp_tlv), status);
3687 }
3688 
3689 enum _ecore_status_t
3690 ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
3691 					 u16 rx_coal, u16 tx_coal,
3692 					 u16 vf_id, u16 qid)
3693 {
3694 	struct ecore_queue_cid *p_cid;
3695 	struct ecore_vf_info *vf;
3696 	struct ecore_ptt *p_ptt;
3697 	int i, rc = 0;
3698 
3699 	if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
3700 		DP_NOTICE(p_hwfn, true,
3701 			  "VF[%d] - Can not set coalescing: VF is not active\n",
3702 			  vf_id);
3703 		return ECORE_INVAL;
3704 	}
3705 
3706 	vf = &p_hwfn->pf_iov_info->vfs_array[vf_id];
3707 	p_ptt = ecore_ptt_acquire(p_hwfn);
3708 	if (!p_ptt)
3709 		return ECORE_AGAIN;
3710 
3711 	if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3712 				    ECORE_IOV_VALIDATE_Q_ENABLE) &&
3713 	    rx_coal) {
3714 		DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3715 		       vf->abs_vf_id, qid);
3716 		goto out;
3717 	}
3718 
3719 	if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3720 				    ECORE_IOV_VALIDATE_Q_ENABLE) &&
3721 	    tx_coal) {
3722 		DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3723 		       vf->abs_vf_id, qid);
3724 		goto out;
3725 	}
3726 
3727 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3728 		   "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3729 		   vf->abs_vf_id, rx_coal, tx_coal, qid);
3730 
3731 	if (rx_coal) {
3732 		p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3733 
3734 		rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3735 		if (rc != ECORE_SUCCESS) {
3736 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3737 				   "VF[%d]: Unable to set rx queue = %d coalesce\n",
3738 				   vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3739 			goto out;
3740 		}
3741 		vf->rx_coal = rx_coal;
3742 	}
3743 
3744 	/* TODO - in future, it might be possible to pass this in a per-cid
3745 	 * granularity. For now, do this for all Tx queues.
3746 	 */
3747 	if (tx_coal) {
3748 		struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3749 
3750 		for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3751 			if (p_queue->cids[i].p_cid == OSAL_NULL)
3752 				continue;
3753 
3754 			if (!p_queue->cids[i].b_is_tx)
3755 				continue;
3756 
3757 			rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3758 						    p_queue->cids[i].p_cid);
3759 			if (rc != ECORE_SUCCESS) {
3760 				DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3761 					   "VF[%d]: Unable to set tx queue coalesce\n",
3762 					   vf->abs_vf_id);
3763 				goto out;
3764 			}
3765 		}
3766 		vf->tx_coal = tx_coal;
3767 	}
3768 
3769 out:
3770 	ecore_ptt_release(p_hwfn, p_ptt);
3771 
3772 	return rc;
3773 }
3774 
3775 static enum _ecore_status_t
3776 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
3777 			   struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3778 {
3779 	int cnt;
3780 	u32 val;
3781 
3782 	ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
3783 
3784 	for (cnt = 0; cnt < 50; cnt++) {
3785 		val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3786 		if (!val)
3787 			break;
3788 		OSAL_MSLEEP(20);
3789 	}
3790 	ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
3791 
3792 	if (cnt == 50) {
3793 		DP_ERR(p_hwfn,
3794 		       "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3795 		       p_vf->abs_vf_id, val);
3796 		return ECORE_TIMEOUT;
3797 	}
3798 
3799 	return ECORE_SUCCESS;
3800 }
3801 
3802 static enum _ecore_status_t
3803 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
3804 			  struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3805 {
3806 	u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
3807 	int i, cnt;
3808 
3809 	/* Read initial consumers & producers */
3810 	for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
3811 		u32 prod;
3812 
3813 		cons[i] = ecore_rd(p_hwfn, p_ptt,
3814 				   PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3815 				   i * 0x40);
3816 		prod = ecore_rd(p_hwfn, p_ptt,
3817 				PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3818 				i * 0x40);
3819 		distance[i] = prod - cons[i];
3820 	}
3821 
3822 	/* Wait for consumers to pass the producers */
3823 	i = 0;
3824 	for (cnt = 0; cnt < 50; cnt++) {
3825 		for (; i < MAX_NUM_VOQS_E4; i++) {
3826 			u32 tmp;
3827 
3828 			tmp = ecore_rd(p_hwfn, p_ptt,
3829 				       PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3830 				       i * 0x40);
3831 			if (distance[i] > tmp - cons[i])
3832 				break;
3833 		}
3834 
3835 		if (i == MAX_NUM_VOQS_E4)
3836 			break;
3837 
3838 		OSAL_MSLEEP(20);
3839 	}
3840 
3841 	if (cnt == 50) {
3842 		DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3843 		       p_vf->abs_vf_id, i);
3844 		return ECORE_TIMEOUT;
3845 	}
3846 
3847 	return ECORE_SUCCESS;
3848 }
3849 
3850 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
3851 						  struct ecore_vf_info *p_vf,
3852 						  struct ecore_ptt *p_ptt)
3853 {
3854 	enum _ecore_status_t rc;
3855 
3856 	/* TODO - add SRC and TM polling once we add storage IOV */
3857 
3858 	rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3859 	if (rc)
3860 		return rc;
3861 
3862 	rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3863 	if (rc)
3864 		return rc;
3865 
3866 	return ECORE_SUCCESS;
3867 }
3868 
3869 static enum _ecore_status_t
3870 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3871 				 struct ecore_ptt *p_ptt,
3872 				 u16 rel_vf_id, u32 *ack_vfs)
3873 {
3874 	struct ecore_vf_info *p_vf;
3875 	enum _ecore_status_t rc = ECORE_SUCCESS;
3876 
3877 	p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3878 	if (!p_vf)
3879 		return ECORE_SUCCESS;
3880 
3881 	if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3882 	    (1ULL << (rel_vf_id % 64))) {
3883 		u16 vfid = p_vf->abs_vf_id;
3884 
3885 		/* TODO - should we lock channel? */
3886 
3887 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3888 			   "VF[%d] - Handling FLR\n", vfid);
3889 
3890 		ecore_iov_vf_cleanup(p_hwfn, p_vf);
3891 
3892 		/* If VF isn't active, no need for anything but SW */
3893 		if (!p_vf->b_init)
3894 			goto cleanup;
3895 
3896 		/* TODO - what to do in case of failure? */
3897 		rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3898 		if (rc != ECORE_SUCCESS)
3899 			goto cleanup;
3900 
3901 		rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
3902 		if (rc) {
3903 			/* TODO - what's now? What a mess.... */
3904 			DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3905 			return rc;
3906 		}
3907 
3908 		/* Workaround to make VF-PF channel ready, as FW
3909 		 * doesn't do that as a part of FLR.
3910 		 */
3911 		REG_WR(p_hwfn,
3912 		       GTT_BAR0_MAP_REG_USDM_RAM +
3913 		       USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3914 
3915 		/* VF_STOPPED has to be set only after final cleanup
3916 		 * but prior to re-enabling the VF.
3917 		 */
3918 		p_vf->state = VF_STOPPED;
3919 
3920 		rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3921 		if (rc) {
3922 			/* TODO - again, a mess... */
3923 			DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3924 			       vfid);
3925 			return rc;
3926 		}
3927 cleanup:
3928 		/* Mark VF for ack and clean pending state */
3929 		if (p_vf->state == VF_RESET)
3930 			p_vf->state = VF_STOPPED;
3931 		ack_vfs[vfid / 32] |= (1 << (vfid % 32));
3932 		p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3933 		    ~(1ULL << (rel_vf_id % 64));
3934 		p_vf->vf_mbx.b_pending_msg = false;
3935 	}
3936 
3937 	return rc;
3938 }
3939 
3940 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3941 					      struct ecore_ptt *p_ptt)
3942 {
3943 	u32 ack_vfs[VF_MAX_STATIC / 32];
3944 	enum _ecore_status_t rc = ECORE_SUCCESS;
3945 	u16 i;
3946 
3947 	OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3948 
3949 	/* Since BRB <-> PRS interface can't be tested as part of the flr
3950 	 * polling due to HW limitations, simply sleep a bit. And since
3951 	 * there's no need to wait per-vf, do it before looping.
3952 	 */
3953 	OSAL_MSLEEP(100);
3954 
3955 	for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
3956 		ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3957 
3958 	rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3959 	return rc;
3960 }
3961 
3962 enum _ecore_status_t
3963 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3964 				struct ecore_ptt *p_ptt, u16 rel_vf_id)
3965 {
3966 	u32 ack_vfs[VF_MAX_STATIC / 32];
3967 	enum _ecore_status_t rc = ECORE_SUCCESS;
3968 
3969 	OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3970 
3971 	/* Wait instead of polling the BRB <-> PRS interface */
3972 	OSAL_MSLEEP(100);
3973 
3974 	ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
3975 
3976 	rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3977 	return rc;
3978 }
3979 
3980 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
3981 {
3982 	bool found = false;
3983 	u16 i;
3984 
3985 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
3986 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3987 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3988 			   "[%08x,...,%08x]: %08x\n",
3989 			   i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3990 
3991 	if (!p_hwfn->p_dev->p_iov_info) {
3992 		DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
3993 		return false;
3994 	}
3995 
3996 	/* Mark VFs */
3997 	for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
3998 		struct ecore_vf_info *p_vf;
3999 		u8 vfid;
4000 
4001 		p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
4002 		if (!p_vf)
4003 			continue;
4004 
4005 		vfid = p_vf->abs_vf_id;
4006 		if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
4007 			u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
4008 			u16 rel_vf_id = p_vf->relative_vf_id;
4009 
4010 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4011 				   "VF[%d] [rel %d] got FLR-ed\n",
4012 				   vfid, rel_vf_id);
4013 
4014 			p_vf->state = VF_RESET;
4015 
4016 			/* No need to lock here, since pending_flr should
4017 			 * only change here and before ACKing MFw. Since
4018 			 * MFW will not trigger an additional attention for
4019 			 * VF flr until ACKs, we're safe.
4020 			 */
4021 			p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
4022 			found = true;
4023 		}
4024 	}
4025 
4026 	return found;
4027 }
4028 
4029 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
4030 			u16 vfid,
4031 			struct ecore_mcp_link_params *p_params,
4032 			struct ecore_mcp_link_state *p_link,
4033 			struct ecore_mcp_link_capabilities *p_caps)
4034 {
4035 	struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
4036 	struct ecore_bulletin_content *p_bulletin;
4037 
4038 	if (!p_vf)
4039 		return;
4040 
4041 	p_bulletin = p_vf->bulletin.p_virt;
4042 
4043 	if (p_params)
4044 		__ecore_vf_get_link_params(p_params, p_bulletin);
4045 	if (p_link)
4046 		__ecore_vf_get_link_state(p_link, p_bulletin);
4047 	if (p_caps)
4048 		__ecore_vf_get_link_caps(p_caps, p_bulletin);
4049 }
4050 
4051 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
4052 			       struct ecore_ptt *p_ptt, int vfid)
4053 {
4054 	struct ecore_iov_vf_mbx *mbx;
4055 	struct ecore_vf_info *p_vf;
4056 
4057 	p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4058 	if (!p_vf)
4059 		return;
4060 
4061 	mbx = &p_vf->vf_mbx;
4062 
4063 	/* ecore_iov_process_mbx_request */
4064 #ifndef CONFIG_ECORE_SW_CHANNEL
4065 	if (!mbx->b_pending_msg) {
4066 		DP_NOTICE(p_hwfn, true,
4067 			  "VF[%02x]: Trying to process mailbox message when none is pending\n",
4068 			  p_vf->abs_vf_id);
4069 		return;
4070 	}
4071 	mbx->b_pending_msg = false;
4072 #endif
4073 
4074 	mbx->first_tlv = mbx->req_virt->first_tlv;
4075 
4076 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4077 		   "VF[%02x]: Processing mailbox message [type %04x]\n",
4078 		   p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4079 
4080 	OSAL_IOV_VF_MSG_TYPE(p_hwfn,
4081 			     p_vf->relative_vf_id,
4082 			     mbx->first_tlv.tl.type);
4083 
4084 	/* Lock the per vf op mutex and note the locker's identity.
4085 	 * The unlock will take place in mbx response.
4086 	 */
4087 	ecore_iov_lock_vf_pf_channel(p_hwfn,
4088 				     p_vf, mbx->first_tlv.tl.type);
4089 
4090 	/* check if tlv type is known */
4091 	if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
4092 	    !p_vf->b_malicious) {
4093 		/* switch on the opcode */
4094 		switch (mbx->first_tlv.tl.type) {
4095 		case CHANNEL_TLV_ACQUIRE:
4096 			ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
4097 			break;
4098 		case CHANNEL_TLV_VPORT_START:
4099 			ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
4100 			break;
4101 		case CHANNEL_TLV_VPORT_TEARDOWN:
4102 			ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
4103 			break;
4104 		case CHANNEL_TLV_START_RXQ:
4105 			ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
4106 			break;
4107 		case CHANNEL_TLV_START_TXQ:
4108 			ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
4109 			break;
4110 		case CHANNEL_TLV_STOP_RXQS:
4111 			ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
4112 			break;
4113 		case CHANNEL_TLV_STOP_TXQS:
4114 			ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
4115 			break;
4116 		case CHANNEL_TLV_UPDATE_RXQ:
4117 			ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
4118 			break;
4119 		case CHANNEL_TLV_VPORT_UPDATE:
4120 			ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
4121 			break;
4122 		case CHANNEL_TLV_UCAST_FILTER:
4123 			ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
4124 			break;
4125 		case CHANNEL_TLV_CLOSE:
4126 			ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
4127 			break;
4128 		case CHANNEL_TLV_INT_CLEANUP:
4129 			ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
4130 			break;
4131 		case CHANNEL_TLV_RELEASE:
4132 			ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
4133 			break;
4134 		case CHANNEL_TLV_UPDATE_TUNN_PARAM:
4135 			ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
4136 			break;
4137 		case CHANNEL_TLV_COALESCE_UPDATE:
4138 			ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
4139 			break;
4140 		case CHANNEL_TLV_COALESCE_READ:
4141 			ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
4142 			break;
4143 		}
4144 	} else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
4145 		/* If we've received a message from a VF we consider malicious
4146 		 * we ignore the messasge unless it's one for RELEASE, in which
4147 		 * case we'll let it have the benefit of doubt, allowing the
4148 		 * next loaded driver to start again.
4149 		 */
4150 		if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
4151 			/* TODO - initiate FLR, remove malicious indication */
4152 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4153 				   "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
4154 				   p_vf->abs_vf_id);
4155 		} else {
4156 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4157 				   "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
4158 				   p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4159 		}
4160 
4161 		ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4162 				       mbx->first_tlv.tl.type,
4163 				       sizeof(struct pfvf_def_resp_tlv),
4164 				       PFVF_STATUS_MALICIOUS);
4165 	} else {
4166 		/* unknown TLV - this may belong to a VF driver from the future
4167 		 * - a version written after this PF driver was written, which
4168 		 * supports features unknown as of yet. Too bad since we don't
4169 		 * support them. Or this may be because someone wrote a crappy
4170 		 * VF driver and is sending garbage over the channel.
4171 		 */
4172 		DP_NOTICE(p_hwfn, false,
4173 			  "VF[%02x]: unknown TLV. type %04x length %04x"
4174 			  " padding %08x reply address %lu\n",
4175 			  p_vf->abs_vf_id,
4176 			  mbx->first_tlv.tl.type,
4177 			  mbx->first_tlv.tl.length,
4178 			  mbx->first_tlv.padding,
4179 			  (unsigned long)mbx->first_tlv.reply_address);
4180 
4181 		/* Try replying in case reply address matches the acquisition's
4182 		 * posted address.
4183 		 */
4184 		if (p_vf->acquire.first_tlv.reply_address &&
4185 		    (mbx->first_tlv.reply_address ==
4186 		     p_vf->acquire.first_tlv.reply_address))
4187 			ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4188 					       mbx->first_tlv.tl.type,
4189 					       sizeof(struct pfvf_def_resp_tlv),
4190 					       PFVF_STATUS_NOT_SUPPORTED);
4191 		else
4192 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4193 				   "VF[%02x]: Can't respond to TLV -"
4194 				   " no valid reply address\n",
4195 				   p_vf->abs_vf_id);
4196 	}
4197 
4198 	ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
4199 				       mbx->first_tlv.tl.type);
4200 
4201 #ifdef CONFIG_ECORE_SW_CHANNEL
4202 	mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
4203 	mbx->sw_mbx.response_offset = 0;
4204 #endif
4205 }
4206 
4207 void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
4208 				     u64 *events)
4209 {
4210 	int i;
4211 
4212 	OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
4213 
4214 	ecore_for_each_vf(p_hwfn, i) {
4215 		struct ecore_vf_info *p_vf;
4216 
4217 		p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
4218 		if (p_vf->vf_mbx.b_pending_msg)
4219 			events[i / 64] |= 1ULL << (i % 64);
4220 	}
4221 }
4222 
4223 static struct ecore_vf_info *
4224 ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
4225 {
4226 	u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
4227 
4228 	if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
4229 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4230 			   "Got indication for VF [abs 0x%08x] that cannot be"
4231 			   " handled by PF\n",
4232 			   abs_vfid);
4233 		return OSAL_NULL;
4234 	}
4235 
4236 	return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
4237 }
4238 
4239 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
4240 						 u16 abs_vfid,
4241 						 struct regpair *vf_msg)
4242 {
4243 	struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
4244 								   abs_vfid);
4245 
4246 	if (!p_vf)
4247 		return ECORE_SUCCESS;
4248 
4249 	/* List the physical address of the request so that handler
4250 	 * could later on copy the message from it.
4251 	 */
4252 	p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
4253 
4254 	p_vf->vf_mbx.b_pending_msg = true;
4255 
4256 	return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
4257 }
4258 
4259 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
4260 				       struct malicious_vf_eqe_data *p_data)
4261 {
4262 	struct ecore_vf_info *p_vf;
4263 
4264 	p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
4265 
4266 	if (!p_vf)
4267 		return;
4268 
4269 	if (!p_vf->b_malicious) {
4270 		DP_NOTICE(p_hwfn, false,
4271 			  "VF [%d] - Malicious behavior [%02x]\n",
4272 			  p_vf->abs_vf_id, p_data->err_id);
4273 
4274 		p_vf->b_malicious = true;
4275 	} else {
4276 		DP_INFO(p_hwfn,
4277 			"VF [%d] - Malicious behavior [%02x]\n",
4278 			p_vf->abs_vf_id, p_data->err_id);
4279 	}
4280 
4281 	OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
4282 }
4283 
4284 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
4285 						  u8 opcode,
4286 						  __le16 echo,
4287 						  union event_ring_data *data,
4288 						  u8 OSAL_UNUSED fw_return_code)
4289 {
4290 	switch (opcode) {
4291 	case COMMON_EVENT_VF_PF_CHANNEL:
4292 		return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
4293 					    &data->vf_pf_channel.msg_addr);
4294 	case COMMON_EVENT_VF_FLR:
4295 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4296 			   "VF-FLR is still not supported\n");
4297 		return ECORE_SUCCESS;
4298 	case COMMON_EVENT_MALICIOUS_VF:
4299 		ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4300 		return ECORE_SUCCESS;
4301 	default:
4302 		DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
4303 			opcode);
4304 		return ECORE_INVAL;
4305 	}
4306 }
4307 
4308 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4309 {
4310 	return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
4311 		   (1ULL << (rel_vf_id % 64)));
4312 }
4313 
4314 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4315 {
4316 	struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
4317 	u16 i;
4318 
4319 	if (!p_iov)
4320 		goto out;
4321 
4322 	for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4323 		if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4324 			return i;
4325 
4326 out:
4327 	return MAX_NUM_VFS_E4;
4328 }
4329 
4330 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
4331 					   struct ecore_ptt *ptt, int vfid)
4332 {
4333 	struct ecore_dmae_params params;
4334 	struct ecore_vf_info *vf_info;
4335 
4336 	vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4337 	if (!vf_info)
4338 		return ECORE_INVAL;
4339 
4340 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
4341 	params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
4342 	params.src_vfid = vf_info->abs_vf_id;
4343 
4344 	if (ecore_dmae_host2host(p_hwfn, ptt,
4345 				 vf_info->vf_mbx.pending_req,
4346 				 vf_info->vf_mbx.req_phys,
4347 				 sizeof(union vfpf_tlvs) / 4, &params)) {
4348 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4349 			   "Failed to copy message from VF 0x%02x\n", vfid);
4350 
4351 		return ECORE_IO;
4352 	}
4353 
4354 	return ECORE_SUCCESS;
4355 }
4356 
4357 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
4358 				       u8 *mac, int vfid)
4359 {
4360 	struct ecore_vf_info *vf_info;
4361 	u64 feature;
4362 
4363 	vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4364 	if (!vf_info) {
4365 		DP_NOTICE(p_hwfn->p_dev, true,
4366 			  "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4367 		return;
4368 	}
4369 	if (vf_info->b_malicious) {
4370 		DP_NOTICE(p_hwfn->p_dev, false,
4371 			  "Can't set forced MAC to malicious VF [%d]\n",
4372 			  vfid);
4373 		return;
4374 	}
4375 
4376 	if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
4377 		feature = 1 << VFPF_BULLETIN_MAC_ADDR;
4378 	else
4379 		feature = 1 << MAC_ADDR_FORCED;
4380 
4381 	OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4382 
4383 	vf_info->bulletin.p_virt->valid_bitmap |= feature;
4384 	/* Forced MAC will disable MAC_ADDR */
4385 	vf_info->bulletin.p_virt->valid_bitmap &=
4386 	    ~(1 << VFPF_BULLETIN_MAC_ADDR);
4387 
4388 	ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4389 }
4390 
4391 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
4392 						u8 *mac, int vfid)
4393 {
4394 	struct ecore_vf_info *vf_info;
4395 	u64 feature;
4396 
4397 	vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4398 	if (!vf_info) {
4399 		DP_NOTICE(p_hwfn->p_dev, true,
4400 			  "Can not set MAC, invalid vfid [%d]\n", vfid);
4401 		return ECORE_INVAL;
4402 	}
4403 	if (vf_info->b_malicious) {
4404 		DP_NOTICE(p_hwfn->p_dev, false,
4405 			  "Can't set MAC to malicious VF [%d]\n",
4406 			  vfid);
4407 		return ECORE_INVAL;
4408 	}
4409 
4410 	if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
4411 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4412 			   "Can not set MAC, Forced MAC is configured\n");
4413 		return ECORE_INVAL;
4414 	}
4415 
4416 	feature = 1 << VFPF_BULLETIN_MAC_ADDR;
4417 	OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4418 
4419 	vf_info->bulletin.p_virt->valid_bitmap |= feature;
4420 
4421 	if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
4422 		ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4423 
4424 	return ECORE_SUCCESS;
4425 }
4426 
4427 #ifndef LINUX_REMOVE
4428 enum _ecore_status_t
4429 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
4430 					       bool b_untagged_only, int vfid)
4431 {
4432 	struct ecore_vf_info *vf_info;
4433 	u64 feature;
4434 
4435 	vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4436 	if (!vf_info) {
4437 		DP_NOTICE(p_hwfn->p_dev, true,
4438 			  "Can not set untagged default, invalid vfid [%d]\n",
4439 			  vfid);
4440 		return ECORE_INVAL;
4441 	}
4442 	if (vf_info->b_malicious) {
4443 		DP_NOTICE(p_hwfn->p_dev, false,
4444 			  "Can't set untagged default to malicious VF [%d]\n",
4445 			  vfid);
4446 		return ECORE_INVAL;
4447 	}
4448 
4449 	/* Since this is configurable only during vport-start, don't take it
4450 	 * if we're past that point.
4451 	 */
4452 	if (vf_info->state == VF_ENABLED) {
4453 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4454 			   "Can't support untagged change for vfid[%d] -"
4455 			   " VF is already active\n",
4456 			   vfid);
4457 		return ECORE_INVAL;
4458 	}
4459 
4460 	/* Set configuration; This will later be taken into account during the
4461 	 * VF initialization.
4462 	 */
4463 	feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
4464 	    (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
4465 	vf_info->bulletin.p_virt->valid_bitmap |= feature;
4466 
4467 	vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
4468 	    : 0;
4469 
4470 	return ECORE_SUCCESS;
4471 }
4472 
4473 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
4474 				  u16 *opaque_fid)
4475 {
4476 	struct ecore_vf_info *vf_info;
4477 
4478 	vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4479 	if (!vf_info)
4480 		return;
4481 
4482 	*opaque_fid = vf_info->opaque_fid;
4483 }
4484 #endif
4485 
4486 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
4487 					u16 pvid, int vfid)
4488 {
4489 	struct ecore_vf_info *vf_info;
4490 	u64 feature;
4491 
4492 	vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4493 	if (!vf_info) {
4494 		DP_NOTICE(p_hwfn->p_dev, true,
4495 			  "Can not set forced MAC, invalid vfid [%d]\n",
4496 			  vfid);
4497 		return;
4498 	}
4499 	if (vf_info->b_malicious) {
4500 		DP_NOTICE(p_hwfn->p_dev, false,
4501 			  "Can't set forced vlan to malicious VF [%d]\n",
4502 			  vfid);
4503 		return;
4504 	}
4505 
4506 	feature = 1 << VLAN_ADDR_FORCED;
4507 	vf_info->bulletin.p_virt->pvid = pvid;
4508 	if (pvid)
4509 		vf_info->bulletin.p_virt->valid_bitmap |= feature;
4510 	else
4511 		vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4512 
4513 	ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4514 }
4515 
4516 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
4517 				      int vfid, u16 vxlan_port, u16 geneve_port)
4518 {
4519 	struct ecore_vf_info *vf_info;
4520 
4521 	vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4522 	if (!vf_info) {
4523 		DP_NOTICE(p_hwfn->p_dev, true,
4524 			  "Can not set udp ports, invalid vfid [%d]\n", vfid);
4525 		return;
4526 	}
4527 
4528 	if (vf_info->b_malicious) {
4529 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4530 			   "Can not set udp ports to malicious VF [%d]\n",
4531 			   vfid);
4532 		return;
4533 	}
4534 
4535 	vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4536 	vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4537 }
4538 
4539 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
4540 {
4541 	struct ecore_vf_info *p_vf_info;
4542 
4543 	p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4544 	if (!p_vf_info)
4545 		return false;
4546 
4547 	return !!p_vf_info->vport_instance;
4548 }
4549 
4550 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
4551 {
4552 	struct ecore_vf_info *p_vf_info;
4553 
4554 	p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4555 	if (!p_vf_info)
4556 		return true;
4557 
4558 	return p_vf_info->state == VF_STOPPED;
4559 }
4560 
4561 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
4562 {
4563 	struct ecore_vf_info *vf_info;
4564 
4565 	vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4566 	if (!vf_info)
4567 		return false;
4568 
4569 	return vf_info->spoof_chk;
4570 }
4571 
4572 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
4573 					    int vfid, bool val)
4574 {
4575 	struct ecore_vf_info *vf;
4576 	enum _ecore_status_t rc = ECORE_INVAL;
4577 
4578 	if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4579 		DP_NOTICE(p_hwfn, true,
4580 			  "SR-IOV sanity check failed, can't set spoofchk\n");
4581 		goto out;
4582 	}
4583 
4584 	vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4585 	if (!vf)
4586 		goto out;
4587 
4588 	if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4589 		/* After VF VPORT start PF will configure spoof check */
4590 		vf->req_spoofchk_val = val;
4591 		rc = ECORE_SUCCESS;
4592 		goto out;
4593 	}
4594 
4595 	rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
4596 
4597 out:
4598 	return rc;
4599 }
4600 
4601 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
4602 {
4603 	u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
4604 
4605 	max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
4606 	    : ECORE_MAX_VF_CHAINS_PER_PF;
4607 
4608 	return max_chains_per_vf;
4609 }
4610 
4611 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4612 					  u16 rel_vf_id,
4613 					  void **pp_req_virt_addr,
4614 					  u16 *p_req_virt_size)
4615 {
4616 	struct ecore_vf_info *vf_info =
4617 	    ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4618 
4619 	if (!vf_info)
4620 		return;
4621 
4622 	if (pp_req_virt_addr)
4623 		*pp_req_virt_addr = vf_info->vf_mbx.req_virt;
4624 
4625 	if (p_req_virt_size)
4626 		*p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
4627 }
4628 
4629 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4630 					    u16 rel_vf_id,
4631 					    void **pp_reply_virt_addr,
4632 					    u16 *p_reply_virt_size)
4633 {
4634 	struct ecore_vf_info *vf_info =
4635 	    ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4636 
4637 	if (!vf_info)
4638 		return;
4639 
4640 	if (pp_reply_virt_addr)
4641 		*pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
4642 
4643 	if (p_reply_virt_size)
4644 		*p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
4645 }
4646 
4647 #ifdef CONFIG_ECORE_SW_CHANNEL
4648 struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
4649 						 u16 rel_vf_id)
4650 {
4651 	struct ecore_vf_info *vf_info =
4652 	    ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4653 
4654 	if (!vf_info)
4655 		return OSAL_NULL;
4656 
4657 	return &vf_info->vf_mbx.sw_mbx;
4658 }
4659 #endif
4660 
4661 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
4662 {
4663 	return (length >= sizeof(struct vfpf_first_tlv) &&
4664 		(length <= sizeof(union vfpf_tlvs)));
4665 }
4666 
4667 u32 ecore_iov_pfvf_msg_length(void)
4668 {
4669 	return sizeof(union pfvf_tlvs);
4670 }
4671 
4672 u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn,
4673 				      u16 rel_vf_id)
4674 {
4675 	struct ecore_vf_info *p_vf;
4676 
4677 	p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4678 	if (!p_vf || !p_vf->bulletin.p_virt)
4679 		return OSAL_NULL;
4680 
4681 	if (!(p_vf->bulletin.p_virt->valid_bitmap &
4682 		(1 << VFPF_BULLETIN_MAC_ADDR)))
4683 		return OSAL_NULL;
4684 
4685 	return p_vf->bulletin.p_virt->mac;
4686 }
4687 
4688 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4689 {
4690 	struct ecore_vf_info *p_vf;
4691 
4692 	p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4693 	if (!p_vf || !p_vf->bulletin.p_virt)
4694 		return OSAL_NULL;
4695 
4696 	if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
4697 		return OSAL_NULL;
4698 
4699 	return p_vf->bulletin.p_virt->mac;
4700 }
4701 
4702 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
4703 				       u16 rel_vf_id)
4704 {
4705 	struct ecore_vf_info *p_vf;
4706 
4707 	p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4708 	if (!p_vf || !p_vf->bulletin.p_virt)
4709 		return 0;
4710 
4711 	if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
4712 		return 0;
4713 
4714 	return p_vf->bulletin.p_virt->pvid;
4715 }
4716 
4717 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
4718 						 struct ecore_ptt *p_ptt,
4719 						 int vfid, int val)
4720 {
4721 	struct ecore_mcp_link_state *p_link;
4722 	struct ecore_vf_info *vf;
4723 	u8 abs_vp_id = 0;
4724 	enum _ecore_status_t rc;
4725 
4726 	vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4727 
4728 	if (!vf)
4729 		return ECORE_INVAL;
4730 
4731 	rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4732 	if (rc != ECORE_SUCCESS)
4733 		return rc;
4734 
4735 	p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
4736 
4737 	return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
4738 				   p_link->speed);
4739 }
4740 
4741 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
4742 					    struct ecore_ptt *p_ptt,
4743 					    int vfid,
4744 					    struct ecore_eth_stats *p_stats)
4745 {
4746 	struct ecore_vf_info *vf;
4747 
4748 	vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4749 	if (!vf)
4750 		return ECORE_INVAL;
4751 
4752 	if (vf->state != VF_ENABLED)
4753 		return ECORE_INVAL;
4754 
4755 	__ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
4756 				vf->abs_vf_id + 0x10, false);
4757 
4758 	return ECORE_SUCCESS;
4759 }
4760 
4761 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4762 {
4763 	struct ecore_vf_info *p_vf;
4764 
4765 	p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4766 	if (!p_vf)
4767 		return 0;
4768 
4769 	return p_vf->num_rxqs;
4770 }
4771 
4772 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4773 {
4774 	struct ecore_vf_info *p_vf;
4775 
4776 	p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4777 	if (!p_vf)
4778 		return 0;
4779 
4780 	return p_vf->num_active_rxqs;
4781 }
4782 
4783 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4784 {
4785 	struct ecore_vf_info *p_vf;
4786 
4787 	p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4788 	if (!p_vf)
4789 		return OSAL_NULL;
4790 
4791 	return p_vf->ctx;
4792 }
4793 
4794 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4795 {
4796 	struct ecore_vf_info *p_vf;
4797 
4798 	p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4799 	if (!p_vf)
4800 		return 0;
4801 
4802 	return p_vf->num_sbs;
4803 }
4804 
4805 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4806 {
4807 	struct ecore_vf_info *p_vf;
4808 
4809 	p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4810 	if (!p_vf)
4811 		return false;
4812 
4813 	return (p_vf->state == VF_FREE);
4814 }
4815 
4816 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
4817 					      u16 rel_vf_id)
4818 {
4819 	struct ecore_vf_info *p_vf;
4820 
4821 	p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4822 	if (!p_vf)
4823 		return false;
4824 
4825 	return (p_vf->state == VF_ACQUIRED);
4826 }
4827 
4828 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4829 {
4830 	struct ecore_vf_info *p_vf;
4831 
4832 	p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4833 	if (!p_vf)
4834 		return false;
4835 
4836 	return (p_vf->state == VF_ENABLED);
4837 }
4838 
4839 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
4840 			     u16 rel_vf_id)
4841 {
4842 	struct ecore_vf_info *p_vf;
4843 
4844 	p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4845 	if (!p_vf)
4846 		return false;
4847 
4848 	return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
4849 }
4850 
4851 enum _ecore_status_t
4852 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
4853 {
4854 	struct ecore_wfq_data *vf_vp_wfq;
4855 	struct ecore_vf_info *vf_info;
4856 
4857 	vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4858 	if (!vf_info)
4859 		return 0;
4860 
4861 	vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4862 
4863 	if (vf_vp_wfq->configured)
4864 		return vf_vp_wfq->min_speed;
4865 	else
4866 		return 0;
4867 }
4868 
4869 #ifdef CONFIG_ECORE_SW_CHANNEL
4870 void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
4871 				 bool b_is_hw)
4872 {
4873 	struct ecore_vf_info *vf_info;
4874 
4875 	vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4876 	if (!vf_info)
4877 		return;
4878 
4879 	vf_info->b_hw_channel = b_is_hw;
4880 }
4881 #endif
4882