xref: /dpdk/drivers/net/qede/base/ecore_iov_api.h (revision d459b04329750ed7087d93406b831ee8e4a5a0e4)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 #ifndef __ECORE_SRIOV_API_H__
8 #define __ECORE_SRIOV_API_H__
9 
10 #include "common_hsi.h"
11 #include "ecore_status.h"
12 
13 #define ECORE_ETH_VF_NUM_MAC_FILTERS 1
14 #define ECORE_ETH_VF_NUM_VLAN_FILTERS 2
15 #define ECORE_VF_ARRAY_LENGTH (3)
16 
17 #define ECORE_VF_ARRAY_GET_VFID(arr, vfid)	\
18 	(((arr)[(vfid) / 64]) & (1ULL << ((vfid) % 64)))
19 
20 #define IS_VF(p_dev)		((p_dev)->b_is_vf)
21 #define IS_PF(p_dev)		(!((p_dev)->b_is_vf))
22 #ifdef CONFIG_ECORE_SRIOV
23 #define IS_PF_SRIOV(p_hwfn)	(!!((p_hwfn)->p_dev->p_iov_info))
24 #else
25 #define IS_PF_SRIOV(p_hwfn)	(0)
26 #endif
27 #define IS_PF_SRIOV_ALLOC(p_hwfn)	(!!((p_hwfn)->pf_iov_info))
28 #define IS_PF_PDA(p_hwfn)	0 /* @@TBD Michalk */
29 
30 /* @@@ TBD MichalK - what should this number be*/
31 #define ECORE_MAX_VF_CHAINS_PER_PF 16
32 
33 /* vport update extended feature tlvs flags */
34 enum ecore_iov_vport_update_flag {
35 	ECORE_IOV_VP_UPDATE_ACTIVATE		= 0,
36 	ECORE_IOV_VP_UPDATE_VLAN_STRIP		= 1,
37 	ECORE_IOV_VP_UPDATE_TX_SWITCH		= 2,
38 	ECORE_IOV_VP_UPDATE_MCAST		= 3,
39 	ECORE_IOV_VP_UPDATE_ACCEPT_PARAM	= 4,
40 	ECORE_IOV_VP_UPDATE_RSS			= 5,
41 	ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN	= 6,
42 	ECORE_IOV_VP_UPDATE_SGE_TPA		= 7,
43 	ECORE_IOV_VP_UPDATE_MAX			= 8,
44 };
45 
46 /* PF to VF STATUS is part of vfpf-channel API
47  * and must be forward compatible
48 */
49 enum ecore_iov_pf_to_vf_status {
50 	PFVF_STATUS_WAITING = 0,
51 	PFVF_STATUS_SUCCESS,
52 	PFVF_STATUS_FAILURE,
53 	PFVF_STATUS_NOT_SUPPORTED,
54 	PFVF_STATUS_NO_RESOURCE,
55 	PFVF_STATUS_FORCED,
56 	PFVF_STATUS_MALICIOUS,
57 	PFVF_STATUS_ACQUIRED,
58 };
59 
60 struct ecore_mcp_link_params;
61 struct ecore_mcp_link_state;
62 struct ecore_mcp_link_capabilities;
63 
64 /* These defines are used by the hw-channel; should never change order */
65 #define VFPF_ACQUIRE_OS_LINUX (0)
66 #define VFPF_ACQUIRE_OS_WINDOWS (1)
67 #define VFPF_ACQUIRE_OS_ESX (2)
68 #define VFPF_ACQUIRE_OS_SOLARIS (3)
69 #define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
70 
71 struct ecore_vf_acquire_sw_info {
72 	u32 driver_version;
73 	u8 os_type;
74 
75 	/* We have several close releases that all use ~same FW with different
76 	 * versions [making it incompatible as the versioning scheme is still
77 	 * tied directly to FW version], allow to override the checking. Only
78 	 * those versions would actually support this feature [so it would not
79 	 * break forward compatibility with newer HV drivers that are no longer
80 	 * suited].
81 	 */
82 	bool override_fw_version;
83 };
84 
85 struct ecore_public_vf_info {
86 	/* These copies will later be reflected in the bulletin board,
87 	 * but this copy should be newer.
88 	 */
89 	u8 forced_mac[ETH_ALEN];
90 	u16 forced_vlan;
91 
92 	/* Trusted VFs can configure promiscuous mode and
93 	 * set MAC address inspite PF has set forced MAC.
94 	 * Also store shadow promisc configuration if needed.
95 	 */
96 	bool is_trusted_configured;
97 	bool is_trusted_request;
98 };
99 
100 struct ecore_iov_vf_init_params {
101 	u16 rel_vf_id;
102 
103 	/* Number of requested Queues; Currently, don't support different
104 	 * number of Rx/Tx queues.
105 	 */
106 	/* TODO - remove this limitation */
107 	u16 num_queues;
108 
109 	/* Allow the client to choose which qzones to use for Rx/Tx,
110 	 * and which queue_base to use for Tx queues on a per-queue basis.
111 	 * Notice values should be relative to the PF resources.
112 	 */
113 	u16 req_rx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
114 	u16 req_tx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
115 
116 	u8 vport_id;
117 
118 	/* Should be set in case RSS is going to be used for VF */
119 	u8 rss_eng_id;
120 };
121 
122 #ifdef CONFIG_ECORE_SW_CHANNEL
123 /* This is SW channel related only... */
124 enum mbx_state {
125 	VF_PF_UNKNOWN_STATE			= 0,
126 	VF_PF_WAIT_FOR_START_REQUEST		= 1,
127 	VF_PF_WAIT_FOR_NEXT_CHUNK_OF_REQUEST	= 2,
128 	VF_PF_REQUEST_IN_PROCESSING		= 3,
129 	VF_PF_RESPONSE_READY			= 4,
130 };
131 
132 struct ecore_iov_sw_mbx {
133 	enum mbx_state		mbx_state;
134 
135 	u32			request_size;
136 	u32			request_offset;
137 
138 	u32			response_size;
139 	u32			response_offset;
140 };
141 
142 /**
143  * @brief Get the vf sw mailbox params
144  *
145  * @param p_hwfn
146  * @param rel_vf_id
147  *
148  * @return struct ecore_iov_sw_mbx*
149  */
150 struct ecore_iov_sw_mbx*
151 ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
152 			u16 rel_vf_id);
153 #endif
154 
155 /* This struct is part of ecore_dev and contains data relevant to all hwfns;
156  * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
157  */
158 struct ecore_hw_sriov_info {
159 	/* standard SRIOV capability fields, mostly for debugging */
160 	int	pos;		/* capability position */
161 	int	nres;		/* number of resources */
162 	u32	cap;		/* SR-IOV Capabilities */
163 	u16	ctrl;		/* SR-IOV Control */
164 	u16	total_vfs;	/* total VFs associated with the PF */
165 	u16	num_vfs;        /* number of vfs that have been started */
166 	u16	initial_vfs;    /* initial VFs associated with the PF */
167 	u16	nr_virtfn;	/* number of VFs available */
168 	u16	offset;		/* first VF Routing ID offset */
169 	u16	stride;		/* following VF stride */
170 	u16	vf_device_id;	/* VF device id */
171 	u32	pgsz;		/* page size for BAR alignment */
172 	u8	link;		/* Function Dependency Link */
173 
174 	u32	first_vf_in_pf;
175 };
176 
177 #ifdef CONFIG_ECORE_SRIOV
178 #ifndef LINUX_REMOVE
179 /**
180  * @brief mark/clear all VFs before/after an incoming PCIe sriov
181  *        disable.
182  *
183  * @param p_dev
184  * @param to_disable
185  */
186 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
187 				  u8 to_disable);
188 
189 /**
190  * @brief mark/clear chosen VF before/after an incoming PCIe
191  *        sriov disable.
192  *
193  * @param p_dev
194  * @param rel_vf_id
195  * @param to_disable
196  */
197 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
198 				 u16 rel_vf_id,
199 				 u8 to_disable);
200 
201 /**
202  * @brief ecore_iov_init_hw_for_vf - initialize the HW for
203  *        enabling access of a VF. Also includes preparing the
204  *        IGU for VF access. This needs to be called AFTER hw is
205  *        initialized and BEFORE VF is loaded inside the VM.
206  *
207  * @param p_hwfn
208  * @param p_ptt
209  * @param p_params
210  *
211  * @return enum _ecore_status_t
212  */
213 enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
214 					      struct ecore_ptt *p_ptt,
215 					      struct ecore_iov_vf_init_params
216 						     *p_params);
217 
218 /**
219  * @brief ecore_iov_process_mbx_req - process a request received
220  *        from the VF
221  *
222  * @param p_hwfn
223  * @param p_ptt
224  * @param vfid
225  */
226 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
227 			       struct ecore_ptt *p_ptt,
228 			       int vfid);
229 
230 /**
231  * @brief ecore_iov_release_hw_for_vf - called once upper layer
232  *        knows VF is done with - can release any resources
233  *        allocated for VF at this point. this must be done once
234  *        we know VF is no longer loaded in VM.
235  *
236  * @param p_hwfn
237  * @param p_ptt
238  * @param rel_vf_id
239  *
240  * @return enum _ecore_status_t
241  */
242 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
243 						 struct ecore_ptt *p_ptt,
244 						 u16 rel_vf_id);
245 
246 /**
247  * @brief ecore_iov_set_vf_ctx - set a context for a given VF
248  *
249  * @param p_hwfn
250  * @param vf_id
251  * @param ctx
252  *
253  * @return enum _ecore_status_t
254  */
255 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
256 					  u16 vf_id,
257 					  void *ctx);
258 
259 /**
260  * @brief FLR cleanup for all VFs
261  *
262  * @param p_hwfn
263  * @param p_ptt
264  *
265  * @return enum _ecore_status_t
266  */
267 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
268 					      struct ecore_ptt *p_ptt);
269 
270 /**
271  * @brief FLR cleanup for single VF
272  *
273  * @param p_hwfn
274  * @param p_ptt
275  * @param rel_vf_id
276  *
277  * @return enum _ecore_status_t
278  */
279 enum _ecore_status_t
280 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
281 				struct ecore_ptt *p_ptt,
282 				u16 rel_vf_id);
283 
284 /**
285  * @brief Update the bulletin with link information. Notice this does NOT
286  *        send a bulletin update, only updates the PF's bulletin.
287  *
288  * @param p_hwfn
289  * @param p_vf
290  * @param params - the link params to use for the VF link configuration
291  * @param link - the link output to use for the VF link configuration
292  * @param p_caps - the link default capabilities.
293  */
294 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
295 			u16 vfid,
296 			struct ecore_mcp_link_params *params,
297 			struct ecore_mcp_link_state *link,
298 			struct ecore_mcp_link_capabilities *p_caps);
299 
300 /**
301  * @brief Returns link information as perceived by VF.
302  *
303  * @param p_hwfn
304  * @param p_vf
305  * @param p_params - the link params visible to vf.
306  * @param p_link - the link state visible to vf.
307  * @param p_caps - the link default capabilities visible to vf.
308  */
309 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
310 			u16 vfid,
311 			struct ecore_mcp_link_params *params,
312 			struct ecore_mcp_link_state *link,
313 			struct ecore_mcp_link_capabilities *p_caps);
314 
315 /**
316  * @brief return if the VF is pending FLR
317  *
318  * @param p_hwfn
319  * @param rel_vf_id
320  *
321  * @return bool
322  */
323 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
324 				 u16 rel_vf_id);
325 
326 /**
327  * @brief Check if given VF ID @vfid is valid
328  *        w.r.t. @b_enabled_only value
329  *        if b_enabled_only = true - only enabled VF id is valid
330  *        else any VF id less than max_vfs is valid
331  *
332  * @param p_hwfn
333  * @param rel_vf_id - Relative VF ID
334  * @param b_enabled_only - consider only enabled VF
335  * @param b_non_malicious - true iff we want to validate vf isn't malicious.
336  *
337  * @return bool - true for valid VF ID
338  */
339 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
340 			     int rel_vf_id,
341 			     bool b_enabled_only, bool b_non_malicious);
342 
343 /**
344  * @brief Get VF's public info structure
345  *
346  * @param p_hwfn
347  * @param vfid - Relative VF ID
348  * @param b_enabled_only - false if want to access even if vf is disabled
349  *
350  * @return struct ecore_public_vf_info *
351  */
352 struct ecore_public_vf_info*
353 ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
354 			     u16 vfid, bool b_enabled_only);
355 
356 /**
357  * @brief fills a bitmask of all VFs which have pending unhandled
358  *        messages.
359  *
360  * @param p_hwfn
361  */
362 void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
363 				     u64 *events);
364 
365 /**
366  * @brief Copy VF's message to PF's buffer
367  *
368  * @param p_hwfn
369  * @param ptt
370  * @param vfid
371  *
372  * @return enum _ecore_status_t
373  */
374 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
375 					   struct ecore_ptt *ptt,
376 					   int vfid);
377 /**
378  * @brief Set forced MAC address in PFs copy of bulletin board
379  *        and configures FW/HW to support the configuration.
380  *
381  * @param p_hwfn
382  * @param mac
383  * @param vfid
384  */
385 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
386 				       u8 *mac, int vfid);
387 
388 /**
389  * @brief Set MAC address in PFs copy of bulletin board without
390  *        configuring FW/HW.
391  *
392  * @param p_hwfn
393  * @param mac
394  * @param vfid
395  */
396 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
397 						u8 *mac, int vfid);
398 
399 /**
400  * @brief Set default behaviour of VF in case no vlans are configured for it
401  *        whether to accept only untagged traffic or all.
402  *        Must be called prior to the VF vport-start.
403  *
404  * @param p_hwfn
405  * @param b_untagged_only
406  * @param vfid
407  *
408  * @return ECORE_SUCCESS if configuration would stick.
409  */
410 enum _ecore_status_t
411 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
412 					       bool b_untagged_only,
413 					       int vfid);
414 
415 /**
416  * @brief Get VFs opaque fid.
417  *
418  * @param p_hwfn
419  * @param vfid
420  * @param opaque_fid
421  */
422 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
423 				  u16 *opaque_fid);
424 
425 /**
426  * @brief Set forced VLAN [pvid] in PFs copy of bulletin board
427  *        and configures FW/HW to support the configuration.
428  *        Setting of pvid 0 would clear the feature.
429  * @param p_hwfn
430  * @param pvid
431  * @param vfid
432  */
433 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
434 					u16 pvid, int vfid);
435 
436 /**
437  * @brief Check if VF has VPORT instance. This can be used
438  *	  to check if VPORT is active.
439  *
440  * @param p_hwfn
441  */
442 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid);
443 
444 /**
445  * @brief PF posts the bulletin to the VF
446  *
447  * @param p_hwfn
448  * @param p_vf
449  * @param p_ptt
450  *
451  * @return enum _ecore_status_t
452  */
453 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
454 						int vfid,
455 						struct ecore_ptt *p_ptt);
456 
457 /**
458  * @brief Check if given VF (@vfid) is marked as stopped
459  *
460  * @param p_hwfn
461  * @param vfid
462  *
463  * @return bool : true if stopped
464  */
465 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid);
466 
467 /**
468  * @brief Configure VF anti spoofing
469  *
470  * @param p_hwfn
471  * @param vfid
472  * @param val - spoofchk value - true/false
473  *
474  * @return enum _ecore_status_t
475  */
476 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
477 					    int vfid, bool val);
478 
479 /**
480  * @brief Get VF's configured spoof value.
481  *
482  * @param p_hwfn
483  * @param vfid
484  *
485  * @return bool - spoofchk value - true/false
486  */
487 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid);
488 
489 /**
490  * @brief Check for SRIOV sanity by PF.
491  *
492  * @param p_hwfn
493  * @param vfid
494  *
495  * @return bool - true if sanity checks passes, else false
496  */
497 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid);
498 
499 /**
500  * @brief Get the num of VF chains.
501  *
502  * @param p_hwfn
503  *
504  * @return u8
505  */
506 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn);
507 
508 /**
509  * @brief Get vf request mailbox params
510  *
511  * @param p_hwfn
512  * @param rel_vf_id
513  * @param pp_req_virt_addr
514  * @param p_req_virt_size
515  */
516 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
517 					  u16 rel_vf_id,
518 					  void **pp_req_virt_addr,
519 					  u16 *p_req_virt_size);
520 
521 /**
522  * @brief Get vf mailbox params
523  *
524  * @param p_hwfn
525  * @param rel_vf_id
526  * @param pp_reply_virt_addr
527  * @param p_reply_virt_size
528  */
529 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
530 					    u16	rel_vf_id,
531 					    void **pp_reply_virt_addr,
532 					    u16	*p_reply_virt_size);
533 
534 /**
535  * @brief Validate if the given length is a valid vfpf message
536  *        length
537  *
538  * @param length
539  *
540  * @return bool
541  */
542 bool ecore_iov_is_valid_vfpf_msg_length(u32 length);
543 
544 /**
545  * @brief Return the max pfvf message length
546  *
547  * @return u32
548  */
549 u32 ecore_iov_pfvf_msg_length(void);
550 
551 /**
552  * @brief Returns MAC address if one is configured
553  *
554  * @parm p_hwfn
555  * @parm rel_vf_id
556  *
557  * @return OSAL_NULL if mac isn't set; Otherwise, returns MAC.
558  */
559 u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn,
560 			       u16 rel_vf_id);
561 
562 /**
563  * @brief Returns forced MAC address if one is configured
564  *
565  * @parm p_hwfn
566  * @parm rel_vf_id
567  *
568  * @return OSAL_NULL if mac isn't forced; Otherwise, returns MAC.
569  */
570 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn,
571 				      u16 rel_vf_id);
572 
573 /**
574  * @brief Returns pvid if one is configured
575  *
576  * @parm p_hwfn
577  * @parm rel_vf_id
578  *
579  * @return 0 if no pvid is configured, otherwise the pvid.
580  */
581 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
582 				       u16 rel_vf_id);
583 /**
584  * @brief Configure VFs tx rate
585  *
586  * @param p_hwfn
587  * @param p_ptt
588  * @param vfid
589  * @param val - tx rate value in Mb/sec.
590  *
591  * @return enum _ecore_status_t
592  */
593 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
594 						 struct ecore_ptt *p_ptt,
595 						 int vfid, int val);
596 
597 /**
598  * @brief - Retrieves the statistics associated with a VF
599  *
600  * @param p_hwfn
601  * @param p_ptt
602  * @param vfid
603  * @param p_stats - this will be filled with the VF statistics
604  *
605  * @return ECORE_SUCCESS iff statistics were retrieved. Error otherwise.
606  */
607 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
608 					    struct ecore_ptt *p_ptt,
609 					    int vfid,
610 					    struct ecore_eth_stats *p_stats);
611 
612 /**
613  * @brief - Retrieves num of rxqs chains
614  *
615  * @param p_hwfn
616  * @param rel_vf_id
617  *
618  * @return num of rxqs chains.
619  */
620 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn,
621 			     u16 rel_vf_id);
622 
623 /**
624  * @brief - Retrieves num of active rxqs chains
625  *
626  * @param p_hwfn
627  * @param rel_vf_id
628  *
629  * @return
630  */
631 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn,
632 				    u16 rel_vf_id);
633 
634 /**
635  * @brief - Retrieves ctx pointer
636  *
637  * @param p_hwfn
638  * @param rel_vf_id
639  *
640  * @return
641  */
642 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn,
643 			   u16 rel_vf_id);
644 
645 /**
646  * @brief - Retrieves VF`s num sbs
647  *
648  * @param p_hwfn
649  * @param rel_vf_id
650  *
651  * @return
652  */
653 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn,
654 			    u16 rel_vf_id);
655 
656 /**
657  * @brief - Returm true if VF is waiting for acquire
658  *
659  * @param p_hwfn
660  * @param rel_vf_id
661  *
662  * @return
663  */
664 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn,
665 				      u16 rel_vf_id);
666 
667 /**
668  * @brief - Returm true if VF is acquired but not initialized
669  *
670  * @param p_hwfn
671  * @param rel_vf_id
672  *
673  * @return
674  */
675 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
676 					      u16 rel_vf_id);
677 
678 /**
679  * @brief - Returm true if VF is acquired and initialized
680  *
681  * @param p_hwfn
682  * @param rel_vf_id
683  *
684  * @return
685  */
686 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
687 				 u16 rel_vf_id);
688 
689 /**
690  * @brief - Returm true if VF has started in FW
691  *
692  * @param p_hwfn
693  * @param rel_vf_id
694  *
695  * @return
696  */
697 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
698 			     u16 rel_vf_id);
699 
700 /**
701  * @brief - Get VF's vport min rate configured.
702  * @param p_hwfn
703  * @param rel_vf_id
704  *
705  * @return - rate in Mbps
706  */
707 int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
708 
709 /**
710  * @brief - Configure min rate for VF's vport.
711  * @param p_dev
712  * @param vfid
713  * @param - rate in Mbps
714  *
715  * @return
716  */
717 enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
718 						     int vfid, u32 rate);
719 #endif
720 
721 /**
722  * @brief ecore_pf_configure_vf_queue_coalesce - PF configure coalesce
723  *    parameters of VFs for Rx and Tx queue.
724  *    While the API allows setting coalescing per-qid, all queues sharing a SB
725  *    should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
726  *    otherwise configuration would break.
727  *
728  * @param p_hwfn
729  * @param rx_coal - Rx Coalesce value in micro seconds.
730  * @param tx_coal - TX Coalesce value in micro seconds.
731  * @param vf_id
732  * @param qid
733  *
734  * @return int
735  **/
736 enum _ecore_status_t
737 ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
738 					 u16 rx_coal, u16 tx_coal,
739 					 u16 vf_id, u16 qid);
740 
741 /**
742  * @brief - Given a VF index, return index of next [including that] active VF.
743  *
744  * @param p_hwfn
745  * @param rel_vf_id
746  *
747  * @return MAX_NUM_VFS_K2 in case no further active VFs, otherwise index.
748  */
749 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
750 
751 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid,
752 				      u16 vxlan_port, u16 geneve_port);
753 
754 #ifdef CONFIG_ECORE_SW_CHANNEL
755 /**
756  * @brief Set whether PF should communicate with VF using SW/HW channel
757  *        Needs to be called for an enabled VF before acquire is over
758  *        [latest good point for doing that is OSAL_IOV_VF_ACQUIRE()]
759  *
760  * @param p_hwfn
761  * @param vfid - relative vf index
762  * @param b_is_hw - true iff PF is to use HW channel for communication
763  */
764 void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
765 				 bool b_is_hw);
766 #endif
767 #endif /* CONFIG_ECORE_SRIOV */
768 
769 #define ecore_for_each_vf(_p_hwfn, _i)					\
770 	for (_i = ecore_iov_get_next_active_vf(_p_hwfn, 0);		\
771 	     _i < MAX_NUM_VFS_K2;					\
772 	     _i = ecore_iov_get_next_active_vf(_p_hwfn, _i + 1))
773 
774 #endif
775