xref: /dpdk/drivers/net/qede/base/ecore_vf.c (revision 2ccebadab2f1e7fa867c708cc1487986ac3be122)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 #include "bcm_osal.h"
8 #include "ecore.h"
9 #include "ecore_hsi_eth.h"
10 #include "ecore_sriov.h"
11 #include "ecore_l2_api.h"
12 #include "ecore_vf.h"
13 #include "ecore_vfpf_if.h"
14 #include "ecore_status.h"
15 #include "reg_addr.h"
16 #include "ecore_int.h"
17 #include "ecore_l2.h"
18 #include "ecore_mcp_api.h"
19 #include "ecore_vf_api.h"
20 
ecore_vf_pf_prep(struct ecore_hwfn * p_hwfn,u16 type,u16 length)21 static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length)
22 {
23 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
24 	void *p_tlv;
25 
26 	/* This lock is released when we receive PF's response
27 	 * in ecore_send_msg2pf().
28 	 * So, ecore_vf_pf_prep() and ecore_send_msg2pf()
29 	 * must come in sequence.
30 	 */
31 	OSAL_MUTEX_ACQUIRE(&p_iov->mutex);
32 
33 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
34 		   "preparing to send %s tlv over vf pf channel\n",
35 		   qede_ecore_channel_tlvs_string[type]);
36 
37 	/* Reset Request offset */
38 	p_iov->offset = (u8 *)(p_iov->vf2pf_request);
39 
40 	/* Clear mailbox - both request and reply */
41 	OSAL_MEMSET(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
42 	OSAL_MEMSET(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
43 
44 	/* Init type and length */
45 	p_tlv = ecore_add_tlv(&p_iov->offset, type, length);
46 
47 	/* Init first tlv header */
48 	((struct vfpf_first_tlv *)p_tlv)->reply_address =
49 	    (u64)p_iov->pf2vf_reply_phys;
50 
51 	return p_tlv;
52 }
53 
ecore_vf_pf_req_end(struct ecore_hwfn * p_hwfn,enum _ecore_status_t req_status)54 static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn,
55 				 enum _ecore_status_t req_status)
56 {
57 	union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
58 
59 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
60 		   "VF request status = 0x%x, PF reply status = 0x%x\n",
61 		   req_status, resp->default_resp.hdr.status);
62 
63 	OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex);
64 }
65 
66 #ifdef CONFIG_ECORE_SW_CHANNEL
67 /* The SW channel implementation of Windows needs to know the 'exact'
68  * response size of any given message. That means that for future
69  * messages we'd be unable to send TLVs to PF if he'll be unable to
70  * answer them if the |response| != |default response|.
71  * We'd need to handshake in acquire capabilities for any such.
72  */
73 #endif
74 static enum _ecore_status_t
ecore_send_msg2pf(struct ecore_hwfn * p_hwfn,u8 * done,__rte_unused u32 resp_size)75 ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
76 		  u8 *done, __rte_unused u32 resp_size)
77 {
78 	union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
79 	struct ustorm_trigger_vf_zone trigger;
80 	struct ustorm_vf_zone *zone_data;
81 	enum _ecore_status_t rc = ECORE_SUCCESS;
82 	int time = 100;
83 
84 	zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
85 
86 	/* output tlvs list */
87 	ecore_dp_tlv_list(p_hwfn, p_req);
88 
89 	/* Send TLVs over HW channel */
90 	OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
91 	trigger.vf_pf_msg_valid = 1;
92 
93 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
94 		   "VF -> PF [%02x] message: [%08x, %08x] --> %p,"
95 		   " %08x --> %p\n",
96 		   GET_FIELD(p_hwfn->hw_info.concrete_fid,
97 			     PXP_CONCRETE_FID_PFID),
98 		   U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys),
99 		   U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys),
100 		   &zone_data->non_trigger.vf_pf_msg_addr,
101 		   *((u32 *)&trigger), &zone_data->trigger);
102 
103 	REG_WR(p_hwfn,
104 	       (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
105 	       U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys));
106 
107 	REG_WR(p_hwfn,
108 	       (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
109 	       U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys));
110 
111 	/* The message data must be written first, to prevent trigger before
112 	 * data is written.
113 	 */
114 	OSAL_WMB(p_hwfn->p_dev);
115 
116 	REG_WR(p_hwfn, (osal_uintptr_t)&zone_data->trigger,
117 	       *((u32 *)&trigger));
118 
119 	/* When PF would be done with the response, it would write back to the
120 	 * `done' address. Poll until then.
121 	 */
122 	while ((!*done) && time) {
123 		OSAL_MSLEEP(25);
124 		time--;
125 	}
126 
127 	if (!*done) {
128 		DP_NOTICE(p_hwfn, true,
129 			  "VF <-- PF Timeout [Type %d]\n",
130 			  p_req->first_tlv.tl.type);
131 		rc = ECORE_TIMEOUT;
132 	} else {
133 		if ((*done != PFVF_STATUS_SUCCESS) &&
134 		    (*done != PFVF_STATUS_NO_RESOURCE))
135 			DP_NOTICE(p_hwfn, false,
136 				  "PF response: %d [Type %d]\n",
137 				  *done, p_req->first_tlv.tl.type);
138 		else
139 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
140 				   "PF response: %d [Type %d]\n",
141 				   *done, p_req->first_tlv.tl.type);
142 	}
143 
144 	return rc;
145 }
146 
ecore_vf_pf_add_qid(struct ecore_hwfn * p_hwfn,struct ecore_queue_cid * p_cid)147 static void ecore_vf_pf_add_qid(struct ecore_hwfn *p_hwfn,
148 				struct ecore_queue_cid *p_cid)
149 {
150 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
151 	struct vfpf_qid_tlv *p_qid_tlv;
152 
153 	/* Only add QIDs for the queue if it was negotiated with PF */
154 	if (!(p_iov->acquire_resp.pfdev_info.capabilities &
155 	      PFVF_ACQUIRE_CAP_QUEUE_QIDS))
156 		return;
157 
158 	p_qid_tlv = ecore_add_tlv(&p_iov->offset,
159 				  CHANNEL_TLV_QID, sizeof(*p_qid_tlv));
160 	p_qid_tlv->qid = p_cid->qid_usage_idx;
161 }
162 
_ecore_vf_pf_release(struct ecore_hwfn * p_hwfn,bool b_final)163 enum _ecore_status_t _ecore_vf_pf_release(struct ecore_hwfn *p_hwfn,
164 					  bool b_final)
165 {
166 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
167 	struct pfvf_def_resp_tlv *resp;
168 	struct vfpf_first_tlv *req;
169 	u32 size;
170 	enum _ecore_status_t rc;
171 
172 	/* clear mailbox and prep first tlv */
173 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
174 
175 	/* add list termination tlv */
176 	ecore_add_tlv(&p_iov->offset,
177 		      CHANNEL_TLV_LIST_END,
178 		      sizeof(struct channel_list_end_tlv));
179 
180 	resp = &p_iov->pf2vf_reply->default_resp;
181 	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
182 
183 	if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
184 		rc = ECORE_AGAIN;
185 
186 	ecore_vf_pf_req_end(p_hwfn, rc);
187 	if (!b_final)
188 		return rc;
189 
190 	p_hwfn->b_int_enabled = 0;
191 
192 	if (p_iov->vf2pf_request)
193 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
194 				       p_iov->vf2pf_request,
195 				       p_iov->vf2pf_request_phys,
196 				       sizeof(union vfpf_tlvs));
197 	if (p_iov->pf2vf_reply)
198 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
199 				       p_iov->pf2vf_reply,
200 				       p_iov->pf2vf_reply_phys,
201 				       sizeof(union pfvf_tlvs));
202 
203 	if (p_iov->bulletin.p_virt) {
204 		size = sizeof(struct ecore_bulletin_content);
205 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
206 				       p_iov->bulletin.p_virt,
207 				       p_iov->bulletin.phys,
208 				       size);
209 	}
210 
211 #ifdef CONFIG_ECORE_LOCK_ALLOC
212 	OSAL_MUTEX_DEALLOC(&p_iov->mutex);
213 #endif
214 
215 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
216 	p_hwfn->vf_iov_info = OSAL_NULL;
217 
218 	return rc;
219 }
220 
ecore_vf_pf_release(struct ecore_hwfn * p_hwfn)221 enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
222 {
223 	return _ecore_vf_pf_release(p_hwfn, true);
224 }
225 
ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn * p_hwfn,struct vf_pf_resc_request * p_req,struct pf_vf_resc * p_resp)226 static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
227 					    struct vf_pf_resc_request *p_req,
228 					    struct pf_vf_resc *p_resp)
229 {
230 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
231 		   "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n",
232 		   p_req->num_rxqs, p_resp->num_rxqs,
233 		   p_req->num_rxqs, p_resp->num_txqs,
234 		   p_req->num_sbs, p_resp->num_sbs,
235 		   p_req->num_mac_filters, p_resp->num_mac_filters,
236 		   p_req->num_vlan_filters, p_resp->num_vlan_filters,
237 		   p_req->num_mc_filters, p_resp->num_mc_filters,
238 		   p_req->num_cids, p_resp->num_cids);
239 
240 	/* humble our request */
241 	p_req->num_txqs = p_resp->num_txqs;
242 	p_req->num_rxqs = p_resp->num_rxqs;
243 	p_req->num_sbs = p_resp->num_sbs;
244 	p_req->num_mac_filters = p_resp->num_mac_filters;
245 	p_req->num_vlan_filters = p_resp->num_vlan_filters;
246 	p_req->num_mc_filters = p_resp->num_mc_filters;
247 	p_req->num_cids = p_resp->num_cids;
248 }
249 
250 static enum _ecore_status_t
ecore_vf_pf_soft_flr_acquire(struct ecore_hwfn * p_hwfn)251 ecore_vf_pf_soft_flr_acquire(struct ecore_hwfn *p_hwfn)
252 {
253 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
254 	struct pfvf_def_resp_tlv *resp;
255 	struct vfpf_soft_flr_tlv *req;
256 	enum _ecore_status_t rc;
257 
258 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_SOFT_FLR, sizeof(*req));
259 
260 	/* add list termination tlv */
261 	ecore_add_tlv(&p_iov->offset,
262 		      CHANNEL_TLV_LIST_END,
263 		      sizeof(struct channel_list_end_tlv));
264 
265 	resp = &p_iov->pf2vf_reply->default_resp;
266 	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
267 
268 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "rc=0x%x\n", rc);
269 
270 	/* to release the mutex as ecore_vf_pf_acquire() take the mutex */
271 	ecore_vf_pf_req_end(p_hwfn, ECORE_AGAIN);
272 
273 	/* As of today, there is no mechanism in place for VF to know the FLR
274 	 * status, so sufficiently (worst case time) wait for FLR to complete,
275 	 * as mailbox request to MFW by the PF for initiating VF flr and PF
276 	 * processing VF FLR could take time.
277 	 */
278 	OSAL_MSLEEP(3000);
279 
280 	return ecore_vf_pf_acquire(p_hwfn);
281 }
282 
ecore_vf_pf_acquire(struct ecore_hwfn * p_hwfn)283 enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
284 {
285 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
286 	struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
287 	struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
288 	struct ecore_vf_acquire_sw_info vf_sw_info;
289 	struct ecore_dev *p_dev = p_hwfn->p_dev;
290 	u8 retry_cnt = p_iov->acquire_retry_cnt;
291 	struct vf_pf_resc_request *p_resc;
292 	bool resources_acquired = false;
293 	struct vfpf_acquire_tlv *req;
294 	int attempts = 0;
295 	enum _ecore_status_t rc = ECORE_SUCCESS;
296 
297 	/* clear mailbox and prep first tlv */
298 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
299 	p_resc = &req->resc_request;
300 
301 	/* @@@ TBD: PF may not be ready bnx2x_get_vf_id... */
302 	req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
303 
304 	p_resc->num_rxqs = ECORE_MAX_VF_CHAINS_PER_PF;
305 	p_resc->num_txqs = ECORE_MAX_VF_CHAINS_PER_PF;
306 	p_resc->num_sbs = ECORE_MAX_VF_CHAINS_PER_PF;
307 	p_resc->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
308 	p_resc->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
309 	p_resc->num_cids = ECORE_ETH_VF_DEFAULT_NUM_CIDS;
310 
311 	OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info));
312 	OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info);
313 
314 	req->vfdev_info.os_type = vf_sw_info.os_type;
315 	req->vfdev_info.driver_version = vf_sw_info.driver_version;
316 	req->vfdev_info.fw_major = FW_MAJOR_VERSION;
317 	req->vfdev_info.fw_minor = FW_MINOR_VERSION;
318 	req->vfdev_info.fw_revision = FW_REVISION_VERSION;
319 	req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
320 	req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
321 	req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
322 
323 	/* Fill capability field with any non-deprecated config we support */
324 	req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
325 
326 	/* If we've mapped the doorbell bar, try using queue qids */
327 	if (p_iov->b_doorbell_bar)
328 		req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR |
329 						VFPF_ACQUIRE_CAP_QUEUE_QIDS;
330 
331 	/* pf 2 vf bulletin board address */
332 	req->bulletin_addr = p_iov->bulletin.phys;
333 	req->bulletin_size = p_iov->bulletin.size;
334 
335 	/* add list termination tlv */
336 	ecore_add_tlv(&p_iov->offset,
337 		      CHANNEL_TLV_LIST_END,
338 		      sizeof(struct channel_list_end_tlv));
339 
340 	while (!resources_acquired) {
341 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
342 			   "attempting to acquire resources\n");
343 
344 		/* Clear response buffer, as this might be a re-send */
345 		OSAL_MEMSET(p_iov->pf2vf_reply, 0,
346 			    sizeof(union pfvf_tlvs));
347 
348 		/* send acquire request */
349 		rc = ecore_send_msg2pf(p_hwfn,
350 				       &resp->hdr.status, sizeof(*resp));
351 
352 		if (retry_cnt && rc == ECORE_TIMEOUT) {
353 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
354 				   "VF retrying to acquire due to VPC timeout\n");
355 			retry_cnt--;
356 			continue;
357 		}
358 
359 		if (rc != ECORE_SUCCESS)
360 			goto exit;
361 
362 		/* copy acquire response from buffer to p_hwfn */
363 		OSAL_MEMCPY(&p_iov->acquire_resp,
364 			    resp, sizeof(p_iov->acquire_resp));
365 
366 		attempts++;
367 
368 		if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
369 			/* PF agrees to allocate our resources */
370 			if (!(resp->pfdev_info.capabilities &
371 			      PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
372 				/* It's possible legacy PF mistakenly accepted;
373 				 * but we don't care - simply mark it as
374 				 * legacy and continue.
375 				 */
376 				req->vfdev_info.capabilities |=
377 					VFPF_ACQUIRE_CAP_PRE_FP_HSI;
378 			}
379 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
380 				   "resources acquired\n");
381 			resources_acquired = true;
382 		} /* PF refuses to allocate our resources */
383 		else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
384 			 attempts < ECORE_VF_ACQUIRE_THRESH) {
385 			ecore_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
386 							&resp->resc);
387 
388 		} else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
389 			if (pfdev_info->major_fp_hsi &&
390 			    (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
391 				DP_NOTICE(p_hwfn, false,
392 					  "PF uses an incompatible fastpath HSI"
393 					  " %02x.%02x [VF requires %02x.%02x]."
394 					  " Please change to a VF driver using"
395 					  " %02x.xx.\n",
396 					  pfdev_info->major_fp_hsi,
397 					  pfdev_info->minor_fp_hsi,
398 					  ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR,
399 					  pfdev_info->major_fp_hsi);
400 				rc = ECORE_INVAL;
401 				goto exit;
402 			}
403 
404 			if (!pfdev_info->major_fp_hsi) {
405 				if (req->vfdev_info.capabilities &
406 				    VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
407 					DP_NOTICE(p_hwfn, false,
408 						  "PF uses very old drivers."
409 						  " Please change to a VF"
410 						  " driver using no later than"
411 						  " 8.8.x.x.\n");
412 					rc = ECORE_INVAL;
413 					goto exit;
414 				} else {
415 					DP_INFO(p_hwfn,
416 						"PF is old - try re-acquire to"
417 						" see if it supports FW-version"
418 						" override\n");
419 					req->vfdev_info.capabilities |=
420 						VFPF_ACQUIRE_CAP_PRE_FP_HSI;
421 					continue;
422 				}
423 			}
424 
425 			/* If PF/VF are using same Major, PF must have had
426 			 * it's reasons. Simply fail.
427 			 */
428 			DP_NOTICE(p_hwfn, false,
429 				  "PF rejected acquisition by VF\n");
430 			rc = ECORE_INVAL;
431 			goto exit;
432 		} else if (resp->hdr.status == PFVF_STATUS_ACQUIRED) {
433 			ecore_vf_pf_req_end(p_hwfn, ECORE_AGAIN);
434 			return ecore_vf_pf_soft_flr_acquire(p_hwfn);
435 		} else {
436 			DP_ERR(p_hwfn,
437 			       "PF returned err %d to VF acquisition request\n",
438 			       resp->hdr.status);
439 			rc = ECORE_AGAIN;
440 			goto exit;
441 		}
442 	}
443 
444 	/* Mark the PF as legacy, if needed */
445 	if (req->vfdev_info.capabilities &
446 	    VFPF_ACQUIRE_CAP_PRE_FP_HSI)
447 		p_iov->b_pre_fp_hsi = true;
448 
449 	/* In case PF doesn't support multi-queue Tx, update the number of
450 	 * CIDs to reflect the number of queues [older PFs didn't fill that
451 	 * field].
452 	 */
453 	if (!(resp->pfdev_info.capabilities &
454 	      PFVF_ACQUIRE_CAP_QUEUE_QIDS))
455 		resp->resc.num_cids = resp->resc.num_rxqs +
456 				      resp->resc.num_txqs;
457 
458 	rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc);
459 	if (rc) {
460 		DP_NOTICE(p_hwfn, true,
461 			  "VF_UPDATE_ACQUIRE_RESC_RESP Failed:"
462 			  " status = 0x%x.\n",
463 			  rc);
464 		rc = ECORE_AGAIN;
465 		goto exit;
466 	}
467 
468 	/* Update bulletin board size with response from PF */
469 	p_iov->bulletin.size = resp->bulletin_size;
470 
471 	/* get HW info */
472 	p_dev->type = resp->pfdev_info.dev_type;
473 	p_dev->chip_rev = (u8)resp->pfdev_info.chip_rev;
474 
475 	DP_INFO(p_hwfn, "Chip details - %s%d\n",
476 		ECORE_IS_BB(p_dev) ? "BB" : "AH",
477 		CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1);
478 
479 	p_dev->chip_num = pfdev_info->chip_num & 0xffff;
480 
481 	/* Learn of the possibility of CMT */
482 	if (IS_LEAD_HWFN(p_hwfn)) {
483 		if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
484 			DP_INFO(p_hwfn, "100g VF\n");
485 			p_dev->num_hwfns = 2;
486 		}
487 	}
488 
489 	/* @DPDK */
490 	if (((p_iov->b_pre_fp_hsi == true) &
491 	    ETH_HSI_VER_MINOR) &&
492 	    (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR))
493 		DP_INFO(p_hwfn,
494 			"PF is using older fastpath HSI;"
495 			" %02x.%02x is configured\n",
496 			ETH_HSI_VER_MAJOR,
497 			resp->pfdev_info.minor_fp_hsi);
498 
499 exit:
500 	ecore_vf_pf_req_end(p_hwfn, rc);
501 
502 	return rc;
503 }
504 
ecore_vf_hw_bar_size(struct ecore_hwfn * p_hwfn,enum BAR_ID bar_id)505 u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn,
506 			 enum BAR_ID bar_id)
507 {
508 	u32 bar_size;
509 
510 	/* Regview size is fixed */
511 	if (bar_id == BAR_ID_0)
512 		return 1 << 17;
513 
514 	/* Doorbell is received from PF */
515 	bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size;
516 	if (bar_size)
517 		return 1 << bar_size;
518 	return 0;
519 }
520 
521 enum _ecore_status_t
ecore_vf_hw_prepare(struct ecore_hwfn * p_hwfn,struct ecore_hw_prepare_params * p_params)522 ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn,
523 		    struct ecore_hw_prepare_params *p_params)
524 {
525 	struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_hwfn->p_dev);
526 	struct ecore_vf_iov *p_iov;
527 	u32 reg;
528 	enum _ecore_status_t rc;
529 
530 	/* Set number of hwfns - might be overridden once leading hwfn learns
531 	 * actual configuration from PF.
532 	 */
533 	if (IS_LEAD_HWFN(p_hwfn))
534 		p_hwfn->p_dev->num_hwfns = 1;
535 
536 	reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
537 	p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
538 
539 	reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
540 	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
541 
542 	/* Allocate vf sriov info */
543 	p_iov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_iov));
544 	if (!p_iov) {
545 		DP_NOTICE(p_hwfn, true,
546 			  "Failed to allocate `struct ecore_sriov'\n");
547 		return ECORE_NOMEM;
548 	}
549 
550 	/* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell
551 	 * value, but there are several incompatibily scenarios where that
552 	 * would be incorrect and we'd need to override it.
553 	 */
554 	if (p_hwfn->doorbells == OSAL_NULL) {
555 		p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
556 						     PXP_VF_BAR0_START_DQ;
557 	} else if (p_hwfn == p_lead) {
558 		/* For leading hw-function, value is always correct, but need
559 		 * to handle scenario where legacy PF would not support 100g
560 		 * mapped bars later.
561 		 */
562 		p_iov->b_doorbell_bar = true;
563 	} else {
564 		/* here, value would be correct ONLY if the leading hwfn
565 		 * received indication that mapped-bars are supported.
566 		 */
567 		if (p_lead->vf_iov_info->b_doorbell_bar)
568 			p_iov->b_doorbell_bar = true;
569 		else
570 			p_hwfn->doorbells = (u8 OSAL_IOMEM *)
571 					    p_hwfn->regview +
572 					    PXP_VF_BAR0_START_DQ;
573 	}
574 
575 	/* Allocate vf2pf msg */
576 	p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
577 							 &p_iov->
578 							 vf2pf_request_phys,
579 							 sizeof(union
580 								vfpf_tlvs));
581 	if (!p_iov->vf2pf_request) {
582 		DP_NOTICE(p_hwfn, true,
583 			 "Failed to allocate `vf2pf_request' DMA memory\n");
584 		goto free_p_iov;
585 	}
586 
587 	p_iov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
588 						       &p_iov->
589 						       pf2vf_reply_phys,
590 						       sizeof(union pfvf_tlvs));
591 	if (!p_iov->pf2vf_reply) {
592 		DP_NOTICE(p_hwfn, true,
593 			  "Failed to allocate `pf2vf_reply' DMA memory\n");
594 		goto free_vf2pf_request;
595 	}
596 
597 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
598 		   "VF's Request mailbox [%p virt 0x%lx phys], "
599 		   "Response mailbox [%p virt 0x%lx phys]\n",
600 		   p_iov->vf2pf_request,
601 		   (unsigned long)p_iov->vf2pf_request_phys,
602 		   p_iov->pf2vf_reply,
603 		   (unsigned long)p_iov->pf2vf_reply_phys);
604 
605 	/* Allocate Bulletin board */
606 	p_iov->bulletin.size = sizeof(struct ecore_bulletin_content);
607 	p_iov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
608 							   &p_iov->bulletin.
609 							   phys,
610 							   p_iov->bulletin.
611 							   size);
612 	if (!p_iov->bulletin.p_virt) {
613 		DP_NOTICE(p_hwfn, false, "Failed to alloc bulletin memory\n");
614 		goto free_pf2vf_reply;
615 	}
616 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
617 		   "VF's bulletin Board [%p virt 0x%lx phys 0x%08x bytes]\n",
618 		   p_iov->bulletin.p_virt, (unsigned long)p_iov->bulletin.phys,
619 		   p_iov->bulletin.size);
620 
621 #ifdef CONFIG_ECORE_LOCK_ALLOC
622 	if (OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex)) {
623 		DP_NOTICE(p_hwfn, false, "Failed to allocate p_iov->mutex\n");
624 		goto free_bulletin_mem;
625 	}
626 #endif
627 	OSAL_MUTEX_INIT(&p_iov->mutex);
628 
629 	p_iov->acquire_retry_cnt = p_params->acquire_retry_cnt;
630 	p_hwfn->vf_iov_info = p_iov;
631 
632 	p_hwfn->hw_info.personality = ECORE_PCI_ETH;
633 
634 	rc = ecore_vf_pf_acquire(p_hwfn);
635 
636 	/* If VF is 100g using a mapped bar and PF is too old to support that,
637 	 * acquisition would succeed - but the VF would have no way knowing
638 	 * the size of the doorbell bar configured in HW and thus will not
639 	 * know how to split it for 2nd hw-function.
640 	 * In this case we re-try without the indication of the mapped
641 	 * doorbell.
642 	 */
643 	if (rc == ECORE_SUCCESS &&
644 	    p_iov->b_doorbell_bar &&
645 	    !ecore_vf_hw_bar_size(p_hwfn, BAR_ID_1) &&
646 	    ECORE_IS_CMT(p_hwfn->p_dev)) {
647 		rc = _ecore_vf_pf_release(p_hwfn, false);
648 		if (rc != ECORE_SUCCESS)
649 			return rc;
650 
651 		p_iov->b_doorbell_bar = false;
652 		p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
653 						     PXP_VF_BAR0_START_DQ;
654 		rc = ecore_vf_pf_acquire(p_hwfn);
655 	}
656 
657 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
658 		   "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n",
659 		   p_hwfn->regview, p_hwfn->doorbells,
660 		   p_hwfn->p_dev->doorbells);
661 
662 	return rc;
663 
664 #ifdef CONFIG_ECORE_LOCK_ALLOC
665 free_bulletin_mem:
666 	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->bulletin.p_virt,
667 			       p_iov->bulletin.phys,
668 			       p_iov->bulletin.size);
669 #endif
670 free_pf2vf_reply:
671 	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->pf2vf_reply,
672 			       p_iov->pf2vf_reply_phys,
673 			       sizeof(union pfvf_tlvs));
674 free_vf2pf_request:
675 	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request,
676 			       p_iov->vf2pf_request_phys,
677 			       sizeof(union vfpf_tlvs));
678 free_p_iov:
679 	OSAL_FREE(p_hwfn->p_dev, p_iov);
680 
681 	return ECORE_NOMEM;
682 }
683 
684 /* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */
685 static void
__ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv * p_req,struct ecore_tunn_update_type * p_src,enum ecore_tunn_mode mask,u8 * p_cls)686 __ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
687 			     struct ecore_tunn_update_type *p_src,
688 			     enum ecore_tunn_mode mask, u8 *p_cls)
689 {
690 	if (p_src->b_update_mode) {
691 		p_req->tun_mode_update_mask |= (1 << mask);
692 
693 		if (p_src->b_mode_enabled)
694 			p_req->tunn_mode |= (1 << mask);
695 	}
696 
697 	*p_cls = p_src->tun_cls;
698 }
699 
700 /* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */
701 static void
ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv * p_req,struct ecore_tunn_update_type * p_src,enum ecore_tunn_mode mask,u8 * p_cls,struct ecore_tunn_update_udp_port * p_port,u8 * p_update_port,u16 * p_udp_port)702 ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
703 			   struct ecore_tunn_update_type *p_src,
704 			   enum ecore_tunn_mode mask, u8 *p_cls,
705 			   struct ecore_tunn_update_udp_port *p_port,
706 			   u8 *p_update_port, u16 *p_udp_port)
707 {
708 	if (p_port->b_update_port) {
709 		*p_update_port = 1;
710 		*p_udp_port = p_port->port;
711 	}
712 
713 	__ecore_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls);
714 }
715 
ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info * p_tun)716 void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun)
717 {
718 	if (p_tun->vxlan.b_mode_enabled)
719 		p_tun->vxlan.b_update_mode = true;
720 	if (p_tun->l2_geneve.b_mode_enabled)
721 		p_tun->l2_geneve.b_update_mode = true;
722 	if (p_tun->ip_geneve.b_mode_enabled)
723 		p_tun->ip_geneve.b_update_mode = true;
724 	if (p_tun->l2_gre.b_mode_enabled)
725 		p_tun->l2_gre.b_update_mode = true;
726 	if (p_tun->ip_gre.b_mode_enabled)
727 		p_tun->ip_gre.b_update_mode = true;
728 
729 	p_tun->b_update_rx_cls = true;
730 	p_tun->b_update_tx_cls = true;
731 }
732 
733 static void
__ecore_vf_update_tunn_param(struct ecore_tunn_update_type * p_tun,u16 feature_mask,u8 tunn_mode,u8 tunn_cls,enum ecore_tunn_mode val)734 __ecore_vf_update_tunn_param(struct ecore_tunn_update_type *p_tun,
735 			     u16 feature_mask, u8 tunn_mode, u8 tunn_cls,
736 			     enum ecore_tunn_mode val)
737 {
738 	if (feature_mask & (1 << val)) {
739 		p_tun->b_mode_enabled = tunn_mode;
740 		p_tun->tun_cls = tunn_cls;
741 	} else {
742 		p_tun->b_mode_enabled = false;
743 	}
744 }
745 
746 static void
ecore_vf_update_tunn_param(struct ecore_hwfn * p_hwfn,struct ecore_tunnel_info * p_tun,struct pfvf_update_tunn_param_tlv * p_resp)747 ecore_vf_update_tunn_param(struct ecore_hwfn *p_hwfn,
748 			   struct ecore_tunnel_info *p_tun,
749 			   struct pfvf_update_tunn_param_tlv *p_resp)
750 {
751 	/* Update mode and classes provided by PF */
752 	u16 feat_mask = p_resp->tunn_feature_mask;
753 
754 	__ecore_vf_update_tunn_param(&p_tun->vxlan, feat_mask,
755 				     p_resp->vxlan_mode, p_resp->vxlan_clss,
756 				     ECORE_MODE_VXLAN_TUNN);
757 	__ecore_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask,
758 				     p_resp->l2geneve_mode,
759 				     p_resp->l2geneve_clss,
760 				     ECORE_MODE_L2GENEVE_TUNN);
761 	__ecore_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask,
762 				     p_resp->ipgeneve_mode,
763 				     p_resp->ipgeneve_clss,
764 				     ECORE_MODE_IPGENEVE_TUNN);
765 	__ecore_vf_update_tunn_param(&p_tun->l2_gre, feat_mask,
766 				     p_resp->l2gre_mode, p_resp->l2gre_clss,
767 				     ECORE_MODE_L2GRE_TUNN);
768 	__ecore_vf_update_tunn_param(&p_tun->ip_gre, feat_mask,
769 				     p_resp->ipgre_mode, p_resp->ipgre_clss,
770 				     ECORE_MODE_IPGRE_TUNN);
771 	p_tun->geneve_port.port = p_resp->geneve_udp_port;
772 	p_tun->vxlan_port.port = p_resp->vxlan_udp_port;
773 
774 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
775 		   "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
776 		   p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled,
777 		   p_tun->ip_geneve.b_mode_enabled,
778 		   p_tun->l2_gre.b_mode_enabled,
779 		   p_tun->ip_gre.b_mode_enabled);
780 }
781 
782 enum _ecore_status_t
ecore_vf_pf_tunnel_param_update(struct ecore_hwfn * p_hwfn,struct ecore_tunnel_info * p_src)783 ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
784 				struct ecore_tunnel_info *p_src)
785 {
786 	struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
787 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
788 	struct pfvf_update_tunn_param_tlv *p_resp;
789 	struct vfpf_update_tunn_param_tlv *p_req;
790 	enum _ecore_status_t rc;
791 
792 	p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM,
793 				 sizeof(*p_req));
794 
795 	if (p_src->b_update_rx_cls && p_src->b_update_tx_cls)
796 		p_req->update_tun_cls = 1;
797 
798 	ecore_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, ECORE_MODE_VXLAN_TUNN,
799 				   &p_req->vxlan_clss, &p_src->vxlan_port,
800 				   &p_req->update_vxlan_port,
801 				   &p_req->vxlan_port);
802 	ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve,
803 				   ECORE_MODE_L2GENEVE_TUNN,
804 				   &p_req->l2geneve_clss, &p_src->geneve_port,
805 				   &p_req->update_geneve_port,
806 				   &p_req->geneve_port);
807 	__ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve,
808 				     ECORE_MODE_IPGENEVE_TUNN,
809 				     &p_req->ipgeneve_clss);
810 	__ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre,
811 				     ECORE_MODE_L2GRE_TUNN, &p_req->l2gre_clss);
812 	__ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre,
813 				     ECORE_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
814 
815 	/* add list termination tlv */
816 	ecore_add_tlv(&p_iov->offset,
817 		      CHANNEL_TLV_LIST_END,
818 		      sizeof(struct channel_list_end_tlv));
819 
820 	p_resp = &p_iov->pf2vf_reply->tunn_param_resp;
821 	rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
822 
823 	if (rc)
824 		goto exit;
825 
826 	if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) {
827 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
828 			   "Failed to update tunnel parameters\n");
829 		rc = ECORE_INVAL;
830 	}
831 
832 	ecore_vf_update_tunn_param(p_hwfn, p_tun, p_resp);
833 exit:
834 	ecore_vf_pf_req_end(p_hwfn, rc);
835 	return rc;
836 }
837 
838 enum _ecore_status_t
ecore_vf_pf_rxq_start(struct ecore_hwfn * p_hwfn,struct ecore_queue_cid * p_cid,u16 bd_max_bytes,dma_addr_t bd_chain_phys_addr,dma_addr_t cqe_pbl_addr,u16 cqe_pbl_size,void OSAL_IOMEM ** pp_prod)839 ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
840 		      struct ecore_queue_cid *p_cid,
841 		      u16 bd_max_bytes,
842 		      dma_addr_t bd_chain_phys_addr,
843 		      dma_addr_t cqe_pbl_addr,
844 		      u16 cqe_pbl_size,
845 		      void OSAL_IOMEM **pp_prod)
846 {
847 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
848 	struct pfvf_start_queue_resp_tlv *resp;
849 	struct vfpf_start_rxq_tlv *req;
850 	u16 rx_qid = p_cid->rel.queue_id;
851 	enum _ecore_status_t rc;
852 
853 	/* clear mailbox and prep first tlv */
854 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
855 
856 	req->rx_qid = rx_qid;
857 	req->cqe_pbl_addr = cqe_pbl_addr;
858 	req->cqe_pbl_size = cqe_pbl_size;
859 	req->rxq_addr = bd_chain_phys_addr;
860 	req->hw_sb = p_cid->sb_igu_id;
861 	req->sb_index = p_cid->sb_idx;
862 	req->bd_max_bytes = bd_max_bytes;
863 	req->stat_id = -1; /* Keep initialized, for future compatibility */
864 
865 	/* If PF is legacy, we'll need to calculate producers ourselves
866 	 * as well as clean them.
867 	 */
868 	if (p_iov->b_pre_fp_hsi) {
869 		u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
870 		u32 init_prod_val = 0;
871 
872 		*pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
873 			   MSTORM_QZONE_START(p_hwfn->p_dev) +
874 			   (hw_qid) * MSTORM_QZONE_SIZE;
875 
876 		/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
877 		__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
878 				  (u32 *)(&init_prod_val));
879 	}
880 
881 	ecore_vf_pf_add_qid(p_hwfn, p_cid);
882 
883 	/* add list termination tlv */
884 	ecore_add_tlv(&p_iov->offset,
885 		      CHANNEL_TLV_LIST_END,
886 		      sizeof(struct channel_list_end_tlv));
887 
888 	resp = &p_iov->pf2vf_reply->queue_start;
889 	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
890 	if (rc)
891 		goto exit;
892 
893 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
894 		rc = ECORE_INVAL;
895 		goto exit;
896 	}
897 
898 	/* Learn the address of the producer from the response */
899 	if (!p_iov->b_pre_fp_hsi) {
900 		u32 init_prod_val = 0;
901 
902 		*pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset;
903 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
904 			   "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
905 			   rx_qid, *pp_prod, resp->offset);
906 
907 		/* Init the rcq, rx bd and rx sge (if valid) producers to 0.
908 		 * It was actually the PF's responsibility, but since some
909 		 * old PFs might fail to do so, we do this as well.
910 		 */
911 		OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
912 		__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
913 				  (u32 *)&init_prod_val);
914 	}
915 
916 exit:
917 	ecore_vf_pf_req_end(p_hwfn, rc);
918 
919 	return rc;
920 }
921 
ecore_vf_pf_rxq_stop(struct ecore_hwfn * p_hwfn,struct ecore_queue_cid * p_cid,bool cqe_completion)922 enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
923 					  struct ecore_queue_cid *p_cid,
924 					  bool cqe_completion)
925 {
926 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
927 	struct vfpf_stop_rxqs_tlv *req;
928 	struct pfvf_def_resp_tlv *resp;
929 	enum _ecore_status_t rc;
930 
931 	/* clear mailbox and prep first tlv */
932 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
933 
934 	req->rx_qid = p_cid->rel.queue_id;
935 	req->num_rxqs = 1;
936 	req->cqe_completion = cqe_completion;
937 
938 	ecore_vf_pf_add_qid(p_hwfn, p_cid);
939 
940 	/* add list termination tlv */
941 	ecore_add_tlv(&p_iov->offset,
942 		      CHANNEL_TLV_LIST_END,
943 		      sizeof(struct channel_list_end_tlv));
944 
945 	resp = &p_iov->pf2vf_reply->default_resp;
946 	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
947 	if (rc)
948 		goto exit;
949 
950 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
951 		rc = ECORE_INVAL;
952 		goto exit;
953 	}
954 
955 exit:
956 	ecore_vf_pf_req_end(p_hwfn, rc);
957 
958 	return rc;
959 }
960 
961 enum _ecore_status_t
ecore_vf_pf_txq_start(struct ecore_hwfn * p_hwfn,struct ecore_queue_cid * p_cid,dma_addr_t pbl_addr,u16 pbl_size,void OSAL_IOMEM ** pp_doorbell)962 ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
963 		      struct ecore_queue_cid *p_cid,
964 		      dma_addr_t pbl_addr, u16 pbl_size,
965 		      void OSAL_IOMEM **pp_doorbell)
966 {
967 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
968 	struct pfvf_start_queue_resp_tlv *resp;
969 	struct vfpf_start_txq_tlv *req;
970 	u16 qid = p_cid->rel.queue_id;
971 	enum _ecore_status_t rc;
972 
973 	/* clear mailbox and prep first tlv */
974 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
975 
976 	req->tx_qid = qid;
977 
978 	/* Tx */
979 	req->pbl_addr = pbl_addr;
980 	req->pbl_size = pbl_size;
981 	req->hw_sb = p_cid->sb_igu_id;
982 	req->sb_index = p_cid->sb_idx;
983 
984 	ecore_vf_pf_add_qid(p_hwfn, p_cid);
985 
986 	/* add list termination tlv */
987 	ecore_add_tlv(&p_iov->offset,
988 		      CHANNEL_TLV_LIST_END,
989 		      sizeof(struct channel_list_end_tlv));
990 
991 	resp  = &p_iov->pf2vf_reply->queue_start;
992 	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
993 	if (rc)
994 		goto exit;
995 
996 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
997 		rc = ECORE_INVAL;
998 		goto exit;
999 	}
1000 
1001 	/* Modern PFs provide the actual offsets, while legacy
1002 	 * provided only the queue id.
1003 	 */
1004 	if (!p_iov->b_pre_fp_hsi) {
1005 		*pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
1006 						resp->offset;
1007 	} else {
1008 		u8 cid = p_iov->acquire_resp.resc.cid[qid];
1009 
1010 		*pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
1011 						DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
1012 	}
1013 
1014 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1015 		   "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
1016 		   qid, *pp_doorbell, resp->offset);
1017 exit:
1018 	ecore_vf_pf_req_end(p_hwfn, rc);
1019 
1020 	return rc;
1021 }
1022 
ecore_vf_pf_txq_stop(struct ecore_hwfn * p_hwfn,struct ecore_queue_cid * p_cid)1023 enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
1024 					  struct ecore_queue_cid *p_cid)
1025 {
1026 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1027 	struct vfpf_stop_txqs_tlv *req;
1028 	struct pfvf_def_resp_tlv *resp;
1029 	enum _ecore_status_t rc;
1030 
1031 	/* clear mailbox and prep first tlv */
1032 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
1033 
1034 	req->tx_qid = p_cid->rel.queue_id;
1035 	req->num_txqs = 1;
1036 
1037 	ecore_vf_pf_add_qid(p_hwfn, p_cid);
1038 
1039 	/* add list termination tlv */
1040 	ecore_add_tlv(&p_iov->offset,
1041 		      CHANNEL_TLV_LIST_END,
1042 		      sizeof(struct channel_list_end_tlv));
1043 
1044 	resp = &p_iov->pf2vf_reply->default_resp;
1045 	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1046 	if (rc)
1047 		goto exit;
1048 
1049 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1050 		rc = ECORE_INVAL;
1051 		goto exit;
1052 	}
1053 
1054 exit:
1055 	ecore_vf_pf_req_end(p_hwfn, rc);
1056 
1057 	return rc;
1058 }
1059 
ecore_vf_pf_rxqs_update(struct ecore_hwfn * p_hwfn,struct ecore_queue_cid ** pp_cid,u8 num_rxqs,u8 comp_cqe_flg,u8 comp_event_flg)1060 enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
1061 					     struct ecore_queue_cid **pp_cid,
1062 					     u8 num_rxqs,
1063 					     u8 comp_cqe_flg,
1064 					     u8 comp_event_flg)
1065 {
1066 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1067 	struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
1068 	struct vfpf_update_rxq_tlv *req;
1069 	enum _ecore_status_t rc;
1070 
1071 	/* Starting with CHANNEL_TLV_QID and the need for additional queue
1072 	 * information, this API stopped supporting multiple rxqs.
1073 	 * TODO - remove this and change the API to accept a single queue-cid
1074 	 * in a follow-up patch.
1075 	 */
1076 	if (num_rxqs != 1) {
1077 		DP_NOTICE(p_hwfn, true,
1078 			  "VFs can no longer update more than a single queue\n");
1079 		return ECORE_INVAL;
1080 	}
1081 
1082 	/* clear mailbox and prep first tlv */
1083 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));
1084 
1085 	req->rx_qid = (*pp_cid)->rel.queue_id;
1086 	req->num_rxqs = 1;
1087 
1088 	if (comp_cqe_flg)
1089 		req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;
1090 	if (comp_event_flg)
1091 		req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG;
1092 
1093 	ecore_vf_pf_add_qid(p_hwfn, *pp_cid);
1094 
1095 	/* add list termination tlv */
1096 	ecore_add_tlv(&p_iov->offset,
1097 		      CHANNEL_TLV_LIST_END,
1098 		      sizeof(struct channel_list_end_tlv));
1099 
1100 	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1101 	if (rc)
1102 		goto exit;
1103 
1104 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1105 		rc = ECORE_INVAL;
1106 		goto exit;
1107 	}
1108 
1109 exit:
1110 	ecore_vf_pf_req_end(p_hwfn, rc);
1111 	return rc;
1112 }
1113 
1114 enum _ecore_status_t
ecore_vf_pf_vport_start(struct ecore_hwfn * p_hwfn,u8 vport_id,u16 mtu,u8 inner_vlan_removal,enum ecore_tpa_mode tpa_mode,u8 max_buffers_per_cqe,u8 only_untagged)1115 ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id,
1116 			u16 mtu, u8 inner_vlan_removal,
1117 			enum ecore_tpa_mode tpa_mode, u8 max_buffers_per_cqe,
1118 			u8 only_untagged)
1119 {
1120 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1121 	struct vfpf_vport_start_tlv *req;
1122 	struct pfvf_def_resp_tlv *resp;
1123 	enum _ecore_status_t rc;
1124 	int i;
1125 
1126 	/* clear mailbox and prep first tlv */
1127 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
1128 
1129 	req->mtu = mtu;
1130 	req->vport_id = vport_id;
1131 	req->inner_vlan_removal = inner_vlan_removal;
1132 	req->tpa_mode = tpa_mode;
1133 	req->max_buffers_per_cqe = max_buffers_per_cqe;
1134 	req->only_untagged = only_untagged;
1135 
1136 	/* status blocks */
1137 	for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) {
1138 		struct ecore_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i];
1139 
1140 		if (p_sb)
1141 			req->sb_addr[i] = p_sb->sb_phys;
1142 	}
1143 
1144 	/* add list termination tlv */
1145 	ecore_add_tlv(&p_iov->offset,
1146 		      CHANNEL_TLV_LIST_END,
1147 		      sizeof(struct channel_list_end_tlv));
1148 
1149 	resp  = &p_iov->pf2vf_reply->default_resp;
1150 	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1151 	if (rc)
1152 		goto exit;
1153 
1154 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1155 		rc = ECORE_INVAL;
1156 		goto exit;
1157 	}
1158 
1159 exit:
1160 	ecore_vf_pf_req_end(p_hwfn, rc);
1161 
1162 	return rc;
1163 }
1164 
ecore_vf_pf_vport_stop(struct ecore_hwfn * p_hwfn)1165 enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn)
1166 {
1167 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1168 	struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
1169 	enum _ecore_status_t rc;
1170 
1171 	/* clear mailbox and prep first tlv */
1172 	ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
1173 			 sizeof(struct vfpf_first_tlv));
1174 
1175 	/* add list termination tlv */
1176 	ecore_add_tlv(&p_iov->offset,
1177 		      CHANNEL_TLV_LIST_END,
1178 		      sizeof(struct channel_list_end_tlv));
1179 
1180 	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1181 	if (rc)
1182 		goto exit;
1183 
1184 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1185 		rc = ECORE_INVAL;
1186 		goto exit;
1187 	}
1188 
1189 exit:
1190 	ecore_vf_pf_req_end(p_hwfn, rc);
1191 
1192 	return rc;
1193 }
1194 
1195 static bool
ecore_vf_handle_vp_update_is_needed(struct ecore_hwfn * p_hwfn,struct ecore_sp_vport_update_params * p_data,u16 tlv)1196 ecore_vf_handle_vp_update_is_needed(struct ecore_hwfn *p_hwfn,
1197 				    struct ecore_sp_vport_update_params *p_data,
1198 				    u16 tlv)
1199 {
1200 	switch (tlv) {
1201 	case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
1202 		return !!(p_data->update_vport_active_rx_flg ||
1203 			  p_data->update_vport_active_tx_flg);
1204 	case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
1205 #ifndef ASIC_ONLY
1206 		/* FPGA doesn't have PVFC and so can't support tx-switching */
1207 		return !!(p_data->update_tx_switching_flg &&
1208 			  !CHIP_REV_IS_FPGA(p_hwfn->p_dev));
1209 #else
1210 		return !!p_data->update_tx_switching_flg;
1211 #endif
1212 	case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
1213 		return !!p_data->update_inner_vlan_removal_flg;
1214 	case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
1215 		return !!p_data->update_accept_any_vlan_flg;
1216 	case CHANNEL_TLV_VPORT_UPDATE_MCAST:
1217 		return !!p_data->update_approx_mcast_flg;
1218 	case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
1219 		return !!(p_data->accept_flags.update_rx_mode_config ||
1220 			  p_data->accept_flags.update_tx_mode_config);
1221 	case CHANNEL_TLV_VPORT_UPDATE_RSS:
1222 		return !!p_data->rss_params;
1223 	case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
1224 		return !!p_data->sge_tpa_params;
1225 	default:
1226 		DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d] %s\n",
1227 			tlv, qede_ecore_channel_tlvs_string[tlv]);
1228 		return false;
1229 	}
1230 }
1231 
1232 static void
ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn * p_hwfn,struct ecore_sp_vport_update_params * p_data)1233 ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn,
1234 				    struct ecore_sp_vport_update_params *p_data)
1235 {
1236 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1237 	struct pfvf_def_resp_tlv *p_resp;
1238 	u16 tlv;
1239 
1240 	for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1241 	     tlv < CHANNEL_TLV_VPORT_UPDATE_MAX;
1242 	     tlv++) {
1243 		if (!ecore_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
1244 			continue;
1245 
1246 		p_resp = (struct pfvf_def_resp_tlv *)
1247 		    ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
1248 		if (p_resp && p_resp->hdr.status)
1249 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1250 				   "TLV[%d] type %s Configuration %s\n",
1251 				   tlv, qede_ecore_channel_tlvs_string[tlv],
1252 				   (p_resp && p_resp->hdr.status) ? "succeeded"
1253 								  : "failed");
1254 	}
1255 }
1256 
1257 enum _ecore_status_t
ecore_vf_pf_vport_update(struct ecore_hwfn * p_hwfn,struct ecore_sp_vport_update_params * p_params)1258 ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
1259 			 struct ecore_sp_vport_update_params *p_params)
1260 {
1261 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1262 	struct vfpf_vport_update_tlv *req;
1263 	struct pfvf_def_resp_tlv *resp;
1264 	u8 update_rx, update_tx;
1265 	u32 resp_size = 0;
1266 	u16 size, tlv;
1267 	enum _ecore_status_t rc;
1268 
1269 	resp = &p_iov->pf2vf_reply->default_resp;
1270 	resp_size = sizeof(*resp);
1271 
1272 	update_rx = p_params->update_vport_active_rx_flg;
1273 	update_tx = p_params->update_vport_active_tx_flg;
1274 
1275 	/* clear mailbox and prep header tlv */
1276 	ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
1277 
1278 	/* Prepare extended tlvs */
1279 	if (update_rx || update_tx) {
1280 		struct vfpf_vport_update_activate_tlv *p_act_tlv;
1281 
1282 		size = sizeof(struct vfpf_vport_update_activate_tlv);
1283 		p_act_tlv = ecore_add_tlv(&p_iov->offset,
1284 					  CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
1285 					  size);
1286 		resp_size += sizeof(struct pfvf_def_resp_tlv);
1287 
1288 		if (update_rx) {
1289 			p_act_tlv->update_rx = update_rx;
1290 			p_act_tlv->active_rx = p_params->vport_active_rx_flg;
1291 		}
1292 
1293 		if (update_tx) {
1294 			p_act_tlv->update_tx = update_tx;
1295 			p_act_tlv->active_tx = p_params->vport_active_tx_flg;
1296 		}
1297 	}
1298 
1299 	if (p_params->update_inner_vlan_removal_flg) {
1300 		struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
1301 
1302 		size = sizeof(struct vfpf_vport_update_vlan_strip_tlv);
1303 		p_vlan_tlv = ecore_add_tlv(&p_iov->offset,
1304 					   CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
1305 					   size);
1306 		resp_size += sizeof(struct pfvf_def_resp_tlv);
1307 
1308 		p_vlan_tlv->remove_vlan = p_params->inner_vlan_removal_flg;
1309 	}
1310 
1311 	if (p_params->update_tx_switching_flg) {
1312 		struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
1313 
1314 		size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
1315 		tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1316 		p_tx_switch_tlv = ecore_add_tlv(&p_iov->offset,
1317 						tlv, size);
1318 		resp_size += sizeof(struct pfvf_def_resp_tlv);
1319 
1320 		p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
1321 	}
1322 
1323 	if (p_params->update_approx_mcast_flg) {
1324 		struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
1325 
1326 		size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
1327 		p_mcast_tlv = ecore_add_tlv(&p_iov->offset,
1328 					    CHANNEL_TLV_VPORT_UPDATE_MCAST,
1329 					    size);
1330 		resp_size += sizeof(struct pfvf_def_resp_tlv);
1331 
1332 		OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins,
1333 			    sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
1334 	}
1335 
1336 	update_rx = p_params->accept_flags.update_rx_mode_config;
1337 	update_tx = p_params->accept_flags.update_tx_mode_config;
1338 
1339 	if (update_rx || update_tx) {
1340 		struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
1341 
1342 		tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1343 		size = sizeof(struct vfpf_vport_update_accept_param_tlv);
1344 		p_accept_tlv = ecore_add_tlv(&p_iov->offset, tlv, size);
1345 		resp_size += sizeof(struct pfvf_def_resp_tlv);
1346 
1347 		if (update_rx) {
1348 			p_accept_tlv->update_rx_mode = update_rx;
1349 			p_accept_tlv->rx_accept_filter =
1350 			    p_params->accept_flags.rx_accept_filter;
1351 		}
1352 
1353 		if (update_tx) {
1354 			p_accept_tlv->update_tx_mode = update_tx;
1355 			p_accept_tlv->tx_accept_filter =
1356 			    p_params->accept_flags.tx_accept_filter;
1357 		}
1358 	}
1359 
1360 	if (p_params->rss_params) {
1361 		struct ecore_rss_params *rss_params = p_params->rss_params;
1362 		struct vfpf_vport_update_rss_tlv *p_rss_tlv;
1363 		int i, table_size;
1364 
1365 		size = sizeof(struct vfpf_vport_update_rss_tlv);
1366 		p_rss_tlv = ecore_add_tlv(&p_iov->offset,
1367 					  CHANNEL_TLV_VPORT_UPDATE_RSS, size);
1368 		resp_size += sizeof(struct pfvf_def_resp_tlv);
1369 
1370 		if (rss_params->update_rss_config)
1371 			p_rss_tlv->update_rss_flags |=
1372 			    VFPF_UPDATE_RSS_CONFIG_FLAG;
1373 		if (rss_params->update_rss_capabilities)
1374 			p_rss_tlv->update_rss_flags |=
1375 			    VFPF_UPDATE_RSS_CAPS_FLAG;
1376 		if (rss_params->update_rss_ind_table)
1377 			p_rss_tlv->update_rss_flags |=
1378 			    VFPF_UPDATE_RSS_IND_TABLE_FLAG;
1379 		if (rss_params->update_rss_key)
1380 			p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
1381 
1382 		p_rss_tlv->rss_enable = rss_params->rss_enable;
1383 		p_rss_tlv->rss_caps = rss_params->rss_caps;
1384 		p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
1385 
1386 		table_size = OSAL_MIN_T(int, T_ETH_INDIRECTION_TABLE_SIZE,
1387 					1 << p_rss_tlv->rss_table_size_log);
1388 		for (i = 0; i < table_size; i++) {
1389 			struct ecore_queue_cid *p_queue;
1390 
1391 			p_queue = rss_params->rss_ind_table[i];
1392 			p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id;
1393 		}
1394 
1395 		OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key,
1396 			    sizeof(rss_params->rss_key));
1397 	}
1398 
1399 	if (p_params->update_accept_any_vlan_flg) {
1400 		struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
1401 
1402 		size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
1403 		tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1404 		p_any_vlan_tlv = ecore_add_tlv(&p_iov->offset, tlv, size);
1405 
1406 		resp_size += sizeof(struct pfvf_def_resp_tlv);
1407 		p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
1408 		p_any_vlan_tlv->update_accept_any_vlan_flg =
1409 		    p_params->update_accept_any_vlan_flg;
1410 	}
1411 
1412 	if (p_params->sge_tpa_params) {
1413 		struct ecore_sge_tpa_params *sge_tpa_params;
1414 		struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
1415 
1416 		sge_tpa_params = p_params->sge_tpa_params;
1417 		size = sizeof(struct vfpf_vport_update_sge_tpa_tlv);
1418 		p_sge_tpa_tlv = ecore_add_tlv(&p_iov->offset,
1419 					      CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
1420 					      size);
1421 		resp_size += sizeof(struct pfvf_def_resp_tlv);
1422 
1423 		if (sge_tpa_params->update_tpa_en_flg)
1424 			p_sge_tpa_tlv->update_sge_tpa_flags |=
1425 			    VFPF_UPDATE_TPA_EN_FLAG;
1426 		if (sge_tpa_params->update_tpa_param_flg)
1427 			p_sge_tpa_tlv->update_sge_tpa_flags |=
1428 			    VFPF_UPDATE_TPA_PARAM_FLAG;
1429 
1430 		if (sge_tpa_params->tpa_ipv4_en_flg)
1431 			p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV4_EN_FLAG;
1432 		if (sge_tpa_params->tpa_ipv6_en_flg)
1433 			p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV6_EN_FLAG;
1434 		if (sge_tpa_params->tpa_pkt_split_flg)
1435 			p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_PKT_SPLIT_FLAG;
1436 		if (sge_tpa_params->tpa_hdr_data_split_flg)
1437 			p_sge_tpa_tlv->sge_tpa_flags |=
1438 			    VFPF_TPA_HDR_DATA_SPLIT_FLAG;
1439 		if (sge_tpa_params->tpa_gro_consistent_flg)
1440 			p_sge_tpa_tlv->sge_tpa_flags |=
1441 			    VFPF_TPA_GRO_CONSIST_FLAG;
1442 		if (sge_tpa_params->tpa_ipv4_tunn_en_flg)
1443 			p_sge_tpa_tlv->sge_tpa_flags |=
1444 			    VFPF_TPA_TUNN_IPV4_EN_FLAG;
1445 		if (sge_tpa_params->tpa_ipv6_tunn_en_flg)
1446 			p_sge_tpa_tlv->sge_tpa_flags |=
1447 			    VFPF_TPA_TUNN_IPV6_EN_FLAG;
1448 
1449 		p_sge_tpa_tlv->tpa_max_aggs_num =
1450 		    sge_tpa_params->tpa_max_aggs_num;
1451 		p_sge_tpa_tlv->tpa_max_size = sge_tpa_params->tpa_max_size;
1452 		p_sge_tpa_tlv->tpa_min_size_to_start =
1453 		    sge_tpa_params->tpa_min_size_to_start;
1454 		p_sge_tpa_tlv->tpa_min_size_to_cont =
1455 		    sge_tpa_params->tpa_min_size_to_cont;
1456 
1457 		p_sge_tpa_tlv->max_buffers_per_cqe =
1458 		    sge_tpa_params->max_buffers_per_cqe;
1459 	}
1460 
1461 	/* add list termination tlv */
1462 	ecore_add_tlv(&p_iov->offset,
1463 		      CHANNEL_TLV_LIST_END,
1464 		      sizeof(struct channel_list_end_tlv));
1465 
1466 	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
1467 	if (rc)
1468 		goto exit;
1469 
1470 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1471 		rc = ECORE_INVAL;
1472 		goto exit;
1473 	}
1474 
1475 	ecore_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
1476 
1477 exit:
1478 	ecore_vf_pf_req_end(p_hwfn, rc);
1479 
1480 	return rc;
1481 }
1482 
ecore_vf_pf_reset(struct ecore_hwfn * p_hwfn)1483 enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
1484 {
1485 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1486 	struct pfvf_def_resp_tlv *resp;
1487 	struct vfpf_first_tlv *req;
1488 	enum _ecore_status_t rc;
1489 
1490 	/* clear mailbox and prep first tlv */
1491 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
1492 
1493 	/* add list termination tlv */
1494 	ecore_add_tlv(&p_iov->offset,
1495 		      CHANNEL_TLV_LIST_END,
1496 		      sizeof(struct channel_list_end_tlv));
1497 
1498 	resp = &p_iov->pf2vf_reply->default_resp;
1499 	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1500 	if (rc)
1501 		goto exit;
1502 
1503 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1504 		rc = ECORE_AGAIN;
1505 		goto exit;
1506 	}
1507 
1508 	p_hwfn->b_int_enabled = 0;
1509 
1510 exit:
1511 	ecore_vf_pf_req_end(p_hwfn, rc);
1512 
1513 	return rc;
1514 }
1515 
ecore_vf_pf_filter_mcast(struct ecore_hwfn * p_hwfn,struct ecore_filter_mcast * p_filter_cmd)1516 void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
1517 			      struct ecore_filter_mcast *p_filter_cmd)
1518 {
1519 	struct ecore_sp_vport_update_params sp_params;
1520 	int i;
1521 
1522 	OSAL_MEMSET(&sp_params, 0, sizeof(sp_params));
1523 	sp_params.update_approx_mcast_flg = 1;
1524 
1525 	if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
1526 		for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1527 			u32 bit;
1528 
1529 			bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1530 			sp_params.bins[bit / 32] |= 1 << (bit % 32);
1531 		}
1532 	}
1533 
1534 	ecore_vf_pf_vport_update(p_hwfn, &sp_params);
1535 }
1536 
ecore_vf_pf_filter_ucast(struct ecore_hwfn * p_hwfn,struct ecore_filter_ucast * p_ucast)1537 enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
1538 					      struct ecore_filter_ucast
1539 					      *p_ucast)
1540 {
1541 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1542 	struct vfpf_ucast_filter_tlv *req;
1543 	struct pfvf_def_resp_tlv *resp;
1544 	enum _ecore_status_t rc;
1545 
1546 	/* Sanitize */
1547 	if (p_ucast->opcode == ECORE_FILTER_MOVE) {
1548 		DP_NOTICE(p_hwfn, true,
1549 			  "VFs don't support Moving of filters\n");
1550 		return ECORE_INVAL;
1551 	}
1552 
1553 	/* clear mailbox and prep first tlv */
1554 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
1555 	req->opcode = (u8)p_ucast->opcode;
1556 	req->type = (u8)p_ucast->type;
1557 	OSAL_MEMCPY(req->mac, p_ucast->mac, ETH_ALEN);
1558 	req->vlan = p_ucast->vlan;
1559 
1560 	/* add list termination tlv */
1561 	ecore_add_tlv(&p_iov->offset,
1562 		      CHANNEL_TLV_LIST_END,
1563 		      sizeof(struct channel_list_end_tlv));
1564 
1565 	resp = &p_iov->pf2vf_reply->default_resp;
1566 	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1567 	if (rc)
1568 		goto exit;
1569 
1570 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1571 		rc = ECORE_AGAIN;
1572 		goto exit;
1573 	}
1574 
1575 exit:
1576 	ecore_vf_pf_req_end(p_hwfn, rc);
1577 
1578 	return rc;
1579 }
1580 
ecore_vf_pf_int_cleanup(struct ecore_hwfn * p_hwfn)1581 enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
1582 {
1583 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1584 	struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
1585 	enum _ecore_status_t rc;
1586 
1587 	/* clear mailbox and prep first tlv */
1588 	ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
1589 			 sizeof(struct vfpf_first_tlv));
1590 
1591 	/* add list termination tlv */
1592 	ecore_add_tlv(&p_iov->offset,
1593 		      CHANNEL_TLV_LIST_END,
1594 		      sizeof(struct channel_list_end_tlv));
1595 
1596 	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1597 	if (rc)
1598 		goto exit;
1599 
1600 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1601 		rc = ECORE_INVAL;
1602 		goto exit;
1603 	}
1604 
1605 exit:
1606 	ecore_vf_pf_req_end(p_hwfn, rc);
1607 
1608 	return rc;
1609 }
1610 
ecore_vf_pf_get_coalesce(struct ecore_hwfn * p_hwfn,u16 * p_coal,struct ecore_queue_cid * p_cid)1611 enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
1612 					      u16 *p_coal,
1613 					      struct ecore_queue_cid *p_cid)
1614 {
1615 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1616 	struct pfvf_read_coal_resp_tlv *resp;
1617 	struct vfpf_read_coal_req_tlv *req;
1618 	enum _ecore_status_t rc;
1619 
1620 	/* clear mailbox and prep header tlv */
1621 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ,
1622 			       sizeof(*req));
1623 	req->qid = p_cid->rel.queue_id;
1624 	req->is_rx = p_cid->b_is_rx ? 1 : 0;
1625 
1626 	ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END,
1627 		      sizeof(struct channel_list_end_tlv));
1628 	resp = &p_iov->pf2vf_reply->read_coal_resp;
1629 
1630 	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1631 	if (rc != ECORE_SUCCESS)
1632 		goto exit;
1633 
1634 	if (resp->hdr.status != PFVF_STATUS_SUCCESS)
1635 		goto exit;
1636 
1637 	*p_coal = resp->coal;
1638 exit:
1639 	ecore_vf_pf_req_end(p_hwfn, rc);
1640 
1641 	return rc;
1642 }
1643 
1644 enum _ecore_status_t
ecore_vf_pf_set_coalesce(struct ecore_hwfn * p_hwfn,u16 rx_coal,u16 tx_coal,struct ecore_queue_cid * p_cid)1645 ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal,
1646 			 struct ecore_queue_cid     *p_cid)
1647 {
1648 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1649 	struct vfpf_update_coalesce *req;
1650 	struct pfvf_def_resp_tlv *resp;
1651 	enum _ecore_status_t rc;
1652 
1653 	/* clear mailbox and prep header tlv */
1654 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE,
1655 			       sizeof(*req));
1656 
1657 	req->rx_coal = rx_coal;
1658 	req->tx_coal = tx_coal;
1659 	req->qid = p_cid->rel.queue_id;
1660 
1661 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1662 		   "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n",
1663 		   rx_coal, tx_coal, req->qid);
1664 
1665 	/* add list termination tlv */
1666 	ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END,
1667 		      sizeof(struct channel_list_end_tlv));
1668 
1669 	resp = &p_iov->pf2vf_reply->default_resp;
1670 	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1671 
1672 	if (rc != ECORE_SUCCESS)
1673 		goto exit;
1674 
1675 	if (resp->hdr.status != PFVF_STATUS_SUCCESS)
1676 		goto exit;
1677 
1678 	p_hwfn->p_dev->rx_coalesce_usecs = rx_coal;
1679 	p_hwfn->p_dev->tx_coalesce_usecs = tx_coal;
1680 
1681 exit:
1682 	ecore_vf_pf_req_end(p_hwfn, rc);
1683 	return rc;
1684 }
1685 
1686 enum _ecore_status_t
ecore_vf_pf_update_mtu(struct ecore_hwfn * p_hwfn,u16 mtu)1687 ecore_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, u16 mtu)
1688 {
1689 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1690 	struct vfpf_update_mtu_tlv *p_req;
1691 	struct pfvf_def_resp_tlv *p_resp;
1692 	enum _ecore_status_t rc;
1693 
1694 	if (!mtu)
1695 		return ECORE_INVAL;
1696 
1697 	/* clear mailbox and prep header tlv */
1698 	p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_MTU,
1699 				 sizeof(*p_req));
1700 	p_req->mtu = mtu;
1701 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1702 		   "Requesting MTU update to %d\n", mtu);
1703 
1704 	/* add list termination tlv */
1705 	ecore_add_tlv(&p_iov->offset,
1706 		      CHANNEL_TLV_LIST_END,
1707 		      sizeof(struct channel_list_end_tlv));
1708 
1709 	p_resp = &p_iov->pf2vf_reply->default_resp;
1710 	rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
1711 	if (p_resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED)
1712 		rc = ECORE_INVAL;
1713 
1714 	ecore_vf_pf_req_end(p_hwfn, rc);
1715 
1716 	return rc;
1717 }
1718 
ecore_vf_get_igu_sb_id(struct ecore_hwfn * p_hwfn,u16 sb_id)1719 u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
1720 			   u16               sb_id)
1721 {
1722 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1723 
1724 	if (!p_iov) {
1725 		DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n");
1726 		return 0;
1727 	}
1728 
1729 	return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
1730 }
1731 
ecore_vf_set_sb_info(struct ecore_hwfn * p_hwfn,u16 sb_id,struct ecore_sb_info * p_sb)1732 void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn,
1733 			  u16 sb_id, struct ecore_sb_info *p_sb)
1734 {
1735 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1736 
1737 	if (!p_iov) {
1738 		DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n");
1739 		return;
1740 	}
1741 
1742 	if (sb_id >= PFVF_MAX_SBS_PER_VF) {
1743 		DP_NOTICE(p_hwfn, true, "Can't configure SB %04x\n", sb_id);
1744 		return;
1745 	}
1746 
1747 	p_iov->sbs_info[sb_id] = p_sb;
1748 }
1749 
ecore_vf_read_bulletin(struct ecore_hwfn * p_hwfn,u8 * p_change)1750 enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
1751 					    u8 *p_change)
1752 {
1753 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1754 	struct ecore_bulletin_content shadow;
1755 	u32 crc, crc_size;
1756 
1757 	crc_size = sizeof(p_iov->bulletin.p_virt->crc);
1758 	*p_change = 0;
1759 
1760 	/* Need to guarantee PF is not in the middle of writing it */
1761 	OSAL_MEMCPY(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
1762 
1763 	/* If version did not update, no need to do anything */
1764 	if (shadow.version == p_iov->bulletin_shadow.version)
1765 		return ECORE_SUCCESS;
1766 
1767 	/* Verify the bulletin we see is valid */
1768 	crc = OSAL_CRC32(0, (u8 *)&shadow + crc_size,
1769 			 p_iov->bulletin.size - crc_size);
1770 	if (crc != shadow.crc)
1771 		return ECORE_AGAIN;
1772 
1773 	/* Set the shadow bulletin and process it */
1774 	OSAL_MEMCPY(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
1775 
1776 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1777 		   "Read a bulletin update %08x\n", shadow.version);
1778 
1779 	*p_change = 1;
1780 
1781 	return ECORE_SUCCESS;
1782 }
1783 
__ecore_vf_get_link_params(struct ecore_mcp_link_params * p_params,struct ecore_bulletin_content * p_bulletin)1784 void __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params,
1785 				struct ecore_bulletin_content *p_bulletin)
1786 {
1787 	OSAL_MEMSET(p_params, 0, sizeof(*p_params));
1788 
1789 	p_params->speed.autoneg = p_bulletin->req_autoneg;
1790 	p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
1791 	p_params->speed.forced_speed = p_bulletin->req_forced_speed;
1792 	p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
1793 	p_params->pause.forced_rx = p_bulletin->req_forced_rx;
1794 	p_params->pause.forced_tx = p_bulletin->req_forced_tx;
1795 	p_params->loopback_mode = p_bulletin->req_loopback;
1796 }
1797 
ecore_vf_get_link_params(struct ecore_hwfn * p_hwfn,struct ecore_mcp_link_params * params)1798 void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
1799 			      struct ecore_mcp_link_params *params)
1800 {
1801 	__ecore_vf_get_link_params(params,
1802 				   &p_hwfn->vf_iov_info->bulletin_shadow);
1803 }
1804 
__ecore_vf_get_link_state(struct ecore_mcp_link_state * p_link,struct ecore_bulletin_content * p_bulletin)1805 void __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link,
1806 			       struct ecore_bulletin_content *p_bulletin)
1807 {
1808 	OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1809 
1810 	p_link->link_up = p_bulletin->link_up;
1811 	p_link->speed = p_bulletin->speed;
1812 	p_link->full_duplex = p_bulletin->full_duplex;
1813 	p_link->an = p_bulletin->autoneg;
1814 	p_link->an_complete = p_bulletin->autoneg_complete;
1815 	p_link->parallel_detection = p_bulletin->parallel_detection;
1816 	p_link->pfc_enabled = p_bulletin->pfc_enabled;
1817 	p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
1818 	p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
1819 	p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
1820 	p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
1821 	p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
1822 }
1823 
ecore_vf_get_link_state(struct ecore_hwfn * p_hwfn,struct ecore_mcp_link_state * link)1824 void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
1825 			     struct ecore_mcp_link_state *link)
1826 {
1827 	__ecore_vf_get_link_state(link,
1828 				  &p_hwfn->vf_iov_info->bulletin_shadow);
1829 }
1830 
__ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities * p_link_caps,struct ecore_bulletin_content * p_bulletin)1831 void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps,
1832 			      struct ecore_bulletin_content *p_bulletin)
1833 {
1834 	OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps));
1835 	p_link_caps->speed_capabilities = p_bulletin->capability_speed;
1836 }
1837 
ecore_vf_get_link_caps(struct ecore_hwfn * p_hwfn,struct ecore_mcp_link_capabilities * p_link_caps)1838 void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
1839 			    struct ecore_mcp_link_capabilities *p_link_caps)
1840 {
1841 	__ecore_vf_get_link_caps(p_link_caps,
1842 				 &p_hwfn->vf_iov_info->bulletin_shadow);
1843 }
1844 
ecore_vf_get_num_rxqs(struct ecore_hwfn * p_hwfn,u8 * num_rxqs)1845 void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs)
1846 {
1847 	*num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
1848 }
1849 
ecore_vf_get_num_txqs(struct ecore_hwfn * p_hwfn,u8 * num_txqs)1850 void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn,
1851 			   u8 *num_txqs)
1852 {
1853 	*num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs;
1854 }
1855 
ecore_vf_get_port_mac(struct ecore_hwfn * p_hwfn,u8 * port_mac)1856 void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac)
1857 {
1858 	OSAL_MEMCPY(port_mac,
1859 		    p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac,
1860 		    ETH_ALEN);
1861 }
1862 
ecore_vf_get_num_vlan_filters(struct ecore_hwfn * p_hwfn,u8 * num_vlan_filters)1863 void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
1864 				   u8 *num_vlan_filters)
1865 {
1866 	struct ecore_vf_iov *p_vf;
1867 
1868 	p_vf = p_hwfn->vf_iov_info;
1869 	*num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
1870 }
1871 
ecore_vf_get_num_sbs(struct ecore_hwfn * p_hwfn,u32 * num_sbs)1872 void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
1873 			  u32 *num_sbs)
1874 {
1875 	struct ecore_vf_iov *p_vf;
1876 
1877 	p_vf = p_hwfn->vf_iov_info;
1878 	*num_sbs = (u32)p_vf->acquire_resp.resc.num_sbs;
1879 }
1880 
ecore_vf_get_num_mac_filters(struct ecore_hwfn * p_hwfn,u32 * num_mac_filters)1881 void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
1882 				  u32 *num_mac_filters)
1883 {
1884 	struct ecore_vf_iov *p_vf = p_hwfn->vf_iov_info;
1885 
1886 	*num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters;
1887 }
1888 
ecore_vf_check_mac(struct ecore_hwfn * p_hwfn,u8 * mac)1889 bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)
1890 {
1891 	struct ecore_bulletin_content *bulletin;
1892 
1893 	bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
1894 	if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
1895 		return true;
1896 
1897 	/* Forbid VF from changing a MAC enforced by PF */
1898 	if (OSAL_MEMCMP(bulletin->mac, mac, ETH_ALEN))
1899 		return false;
1900 
1901 	return false;
1902 }
1903 
ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn * hwfn,u8 * dst_mac,u8 * p_is_forced)1904 bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
1905 				      u8 *p_is_forced)
1906 {
1907 	struct ecore_bulletin_content *bulletin;
1908 
1909 	bulletin = &hwfn->vf_iov_info->bulletin_shadow;
1910 
1911 	if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
1912 		if (p_is_forced)
1913 			*p_is_forced = 1;
1914 	} else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
1915 		if (p_is_forced)
1916 			*p_is_forced = 0;
1917 	} else {
1918 		return false;
1919 	}
1920 
1921 	OSAL_MEMCPY(dst_mac, bulletin->mac, ETH_ALEN);
1922 
1923 	return true;
1924 }
1925 
ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn * p_hwfn,u16 * p_vxlan_port,u16 * p_geneve_port)1926 void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn,
1927 				     u16 *p_vxlan_port,
1928 				     u16 *p_geneve_port)
1929 {
1930 	struct ecore_bulletin_content *p_bulletin;
1931 
1932 	p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
1933 
1934 	*p_vxlan_port = p_bulletin->vxlan_udp_port;
1935 	*p_geneve_port = p_bulletin->geneve_udp_port;
1936 }
1937 
ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn * hwfn,u16 * dst_pvid)1938 bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid)
1939 {
1940 	struct ecore_bulletin_content *bulletin;
1941 
1942 	bulletin = &hwfn->vf_iov_info->bulletin_shadow;
1943 
1944 	if (!(bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
1945 		return false;
1946 
1947 	if (dst_pvid)
1948 		*dst_pvid = bulletin->pvid;
1949 
1950 	return true;
1951 }
1952 
ecore_vf_get_pre_fp_hsi(struct ecore_hwfn * p_hwfn)1953 bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn)
1954 {
1955 	return p_hwfn->vf_iov_info->b_pre_fp_hsi;
1956 }
1957 
ecore_vf_get_fw_version(struct ecore_hwfn * p_hwfn,u16 * fw_major,u16 * fw_minor,u16 * fw_rev,u16 * fw_eng)1958 void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
1959 			     u16 *fw_major, u16 *fw_minor, u16 *fw_rev,
1960 			     u16 *fw_eng)
1961 {
1962 	struct pf_vf_pfdev_info *info;
1963 
1964 	info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
1965 
1966 	*fw_major = info->fw_major;
1967 	*fw_minor = info->fw_minor;
1968 	*fw_rev = info->fw_rev;
1969 	*fw_eng = info->fw_eng;
1970 }
1971 
1972 #ifdef CONFIG_ECORE_SW_CHANNEL
ecore_vf_set_hw_channel(struct ecore_hwfn * p_hwfn,bool b_is_hw)1973 void ecore_vf_set_hw_channel(struct ecore_hwfn *p_hwfn, bool b_is_hw)
1974 {
1975 	p_hwfn->vf_iov_info->b_hw_channel = b_is_hw;
1976 }
1977 #endif
1978