xref: /dpdk/drivers/net/qede/base/ecore_dev_api.h (revision d80e42cce4c7017ed8c99dabb8ae444a492acc1c)
1 /*
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8 
9 #ifndef __ECORE_DEV_API_H__
10 #define __ECORE_DEV_API_H__
11 
12 #include "ecore_status.h"
13 #include "ecore_chain.h"
14 #include "ecore_int_api.h"
15 
16 /**
17  * @brief ecore_init_dp - initialize the debug level
18  *
19  * @param p_dev
20  * @param dp_module
21  * @param dp_level
22  * @param dp_ctx
23  */
24 void ecore_init_dp(struct ecore_dev *p_dev,
25 		   u32 dp_module,
26 		   u8 dp_level,
27 		   void *dp_ctx);
28 
29 /**
30  * @brief ecore_init_struct - initialize the device structure to
31  *        its defaults
32  *
33  * @param p_dev
34  */
35 enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev);
36 
37 /**
38  * @brief ecore_resc_free -
39  *
40  * @param p_dev
41  */
42 void ecore_resc_free(struct ecore_dev *p_dev);
43 
44 /**
45  * @brief ecore_resc_alloc -
46  *
47  * @param p_dev
48  *
49  * @return enum _ecore_status_t
50  */
51 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
52 
53 /**
54  * @brief ecore_resc_setup -
55  *
56  * @param p_dev
57  */
58 void ecore_resc_setup(struct ecore_dev *p_dev);
59 
60 enum ecore_mfw_timeout_fallback {
61 	ECORE_TO_FALLBACK_TO_NONE,
62 	ECORE_TO_FALLBACK_TO_DEFAULT,
63 	ECORE_TO_FALLBACK_FAIL_LOAD,
64 };
65 
66 enum ecore_override_force_load {
67 	ECORE_OVERRIDE_FORCE_LOAD_NONE,
68 	ECORE_OVERRIDE_FORCE_LOAD_ALWAYS,
69 	ECORE_OVERRIDE_FORCE_LOAD_NEVER,
70 };
71 
72 struct ecore_drv_load_params {
73 	/* Indicates whether the driver is running over a crash kernel.
74 	 * As part of the load request, this will be used for providing the
75 	 * driver role to the MFW.
76 	 * In case of a crash kernel over PDA - this should be set to false.
77 	 */
78 	bool is_crash_kernel;
79 
80 	/* The timeout value that the MFW should use when locking the engine for
81 	 * the driver load process.
82 	 * A value of '0' means the default value, and '255' means no timeout.
83 	 */
84 	u8 mfw_timeout_val;
85 #define ECORE_LOAD_REQ_LOCK_TO_DEFAULT	0
86 #define ECORE_LOAD_REQ_LOCK_TO_NONE	255
87 
88 	/* Action to take in case the MFW doesn't support timeout values other
89 	 * than default and none.
90 	 */
91 	enum ecore_mfw_timeout_fallback mfw_timeout_fallback;
92 
93 	/* Avoid engine reset when first PF loads on it */
94 	bool avoid_eng_reset;
95 
96 	/* Allow overriding the default force load behavior */
97 	enum ecore_override_force_load override_force_load;
98 };
99 
100 struct ecore_hw_init_params {
101 	/* Tunneling parameters */
102 	struct ecore_tunnel_info *p_tunn;
103 
104 	bool b_hw_start;
105 
106 	/* Interrupt mode [msix, inta, etc.] to use */
107 	enum ecore_int_mode int_mode;
108 
109 	/* NPAR tx switching to be used for vports configured for tx-switching
110 	 */
111 	bool allow_npar_tx_switch;
112 
113 	/* Binary fw data pointer in binary fw file */
114 	const u8 *bin_fw_data;
115 
116 	/* Driver load parameters */
117 	struct ecore_drv_load_params *p_drv_load_params;
118 
119 	/* SPQ block timeout in msec */
120 	u32 spq_timeout_ms;
121 };
122 
123 /**
124  * @brief ecore_hw_init -
125  *
126  * @param p_dev
127  * @param p_params
128  *
129  * @return enum _ecore_status_t
130  */
131 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
132 				   struct ecore_hw_init_params *p_params);
133 
134 /**
135  * @brief ecore_hw_timers_stop_all -
136  *
137  * @param p_dev
138  *
139  * @return void
140  */
141 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev);
142 
143 /**
144  * @brief ecore_hw_stop -
145  *
146  * @param p_dev
147  *
148  * @return enum _ecore_status_t
149  */
150 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
151 
152 /**
153  * @brief ecore_hw_stop_fastpath -should be called incase
154  *        slowpath is still required for the device,
155  *        but fastpath is not.
156  *
157  * @param p_dev
158  *
159  * @return enum _ecore_status_t
160  */
161 enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
162 
163 #ifndef LINUX_REMOVE
164 /**
165  * @brief ecore_prepare_hibernate -should be called when
166  *        the system is going into the hibernate state
167  *
168  * @param p_dev
169  *
170  */
171 void ecore_prepare_hibernate(struct ecore_dev *p_dev);
172 
173 enum ecore_db_rec_width {
174 	DB_REC_WIDTH_32B,
175 	DB_REC_WIDTH_64B,
176 };
177 
178 enum ecore_db_rec_space {
179 	DB_REC_KERNEL,
180 	DB_REC_USER,
181 };
182 
183 /**
184  * @brief db_recovery_add - add doorbell information to the doorbell
185  * recovery mechanism.
186  *
187  * @param p_dev
188  * @param db_addr - doorbell address
189  * @param db_data - address of where db_data is stored
190  * @param db_width - doorbell is 32b pr 64b
191  * @param db_space - doorbell recovery addresses are user or kernel space
192  */
193 enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
194 					   void OSAL_IOMEM *db_addr,
195 					   void *db_data,
196 					   enum ecore_db_rec_width db_width,
197 					   enum ecore_db_rec_space db_space);
198 
199 /**
200  * @brief db_recovery_del - remove doorbell information from the doorbell
201  * recovery mechanism. db_data serves as key (db_addr is not unique).
202  *
203  * @param cdev
204  * @param db_addr - doorbell address
205  * @param db_data - address where db_data is stored. Serves as key for the
206  *                  entry to delete.
207  */
208 enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
209 					   void OSAL_IOMEM *db_addr,
210 					   void *db_data);
211 
212 static OSAL_INLINE bool ecore_is_mf_ufp(struct ecore_hwfn *p_hwfn)
213 {
214 	return !!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits);
215 }
216 
217 #endif
218 
219 /**
220  * @brief ecore_hw_start_fastpath -restart fastpath traffic,
221  *        only if hw_stop_fastpath was called
222 
223  * @param p_hwfn
224  *
225  * @return enum _ecore_status_t
226  */
227 enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
228 
229 enum ecore_hw_prepare_result {
230 	ECORE_HW_PREPARE_SUCCESS,
231 
232 	/* FAILED results indicate probe has failed & cleaned up */
233 	ECORE_HW_PREPARE_FAILED_ENG2,
234 	ECORE_HW_PREPARE_FAILED_ME,
235 	ECORE_HW_PREPARE_FAILED_MEM,
236 	ECORE_HW_PREPARE_FAILED_DEV,
237 	ECORE_HW_PREPARE_FAILED_NVM,
238 
239 	/* BAD results indicate probe is passed even though some wrongness
240 	 * has occurred; Trying to actually use [I.e., hw_init()] might have
241 	 * dire reprecautions.
242 	 */
243 	ECORE_HW_PREPARE_BAD_IOV,
244 	ECORE_HW_PREPARE_BAD_MCP,
245 	ECORE_HW_PREPARE_BAD_IGU,
246 };
247 
248 struct ecore_hw_prepare_params {
249 	/* Personality to initialize */
250 	int personality;
251 
252 	/* Force the driver's default resource allocation */
253 	bool drv_resc_alloc;
254 
255 	/* Check the reg_fifo after any register access */
256 	bool chk_reg_fifo;
257 
258 	/* Request the MFW to initiate PF FLR */
259 	bool initiate_pf_flr;
260 
261 	/* The OS Epoch time in seconds */
262 	u32 epoch;
263 
264 	/* Allow the MFW to collect a crash dump */
265 	bool allow_mdump;
266 
267 	/* Allow prepare to pass even if some initializations are failing.
268 	 * If set, the `p_prepare_res' field would be set with the return,
269 	 * and might allow probe to pass even if there are certain issues.
270 	 */
271 	bool b_relaxed_probe;
272 	enum ecore_hw_prepare_result p_relaxed_res;
273 
274 	/* Enable/disable request by ecore client for pacing */
275 	bool b_en_pacing;
276 };
277 
278 /**
279  * @brief ecore_hw_prepare -
280  *
281  * @param p_dev
282  * @param p_params
283  *
284  * @return enum _ecore_status_t
285  */
286 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
287 				      struct ecore_hw_prepare_params *p_params);
288 
289 /**
290  * @brief ecore_hw_remove -
291  *
292  * @param p_dev
293  */
294 void ecore_hw_remove(struct ecore_dev *p_dev);
295 
296 /**
297  * @brief ecore_ptt_acquire - Allocate a PTT window
298  *
299  * Should be called at the entry point to the driver (at the beginning of an
300  * exported function)
301  *
302  * @param p_hwfn
303  *
304  * @return struct ecore_ptt
305  */
306 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
307 
308 /**
309  * @brief ecore_ptt_release - Release PTT Window
310  *
311  * Should be called at the end of a flow - at the end of the function that
312  * acquired the PTT.
313  *
314  *
315  * @param p_hwfn
316  * @param p_ptt
317  */
318 void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
319 		       struct ecore_ptt *p_ptt);
320 
321 struct ecore_eth_stats_common {
322 	u64 no_buff_discards;
323 	u64 packet_too_big_discard;
324 	u64 ttl0_discard;
325 	u64 rx_ucast_bytes;
326 	u64 rx_mcast_bytes;
327 	u64 rx_bcast_bytes;
328 	u64 rx_ucast_pkts;
329 	u64 rx_mcast_pkts;
330 	u64 rx_bcast_pkts;
331 	u64 mftag_filter_discards;
332 	u64 mac_filter_discards;
333 	u64 tx_ucast_bytes;
334 	u64 tx_mcast_bytes;
335 	u64 tx_bcast_bytes;
336 	u64 tx_ucast_pkts;
337 	u64 tx_mcast_pkts;
338 	u64 tx_bcast_pkts;
339 	u64 tx_err_drop_pkts;
340 	u64 tpa_coalesced_pkts;
341 	u64 tpa_coalesced_events;
342 	u64 tpa_aborts_num;
343 	u64 tpa_not_coalesced_pkts;
344 	u64 tpa_coalesced_bytes;
345 
346 	/* port */
347 	u64 rx_64_byte_packets;
348 	u64 rx_65_to_127_byte_packets;
349 	u64 rx_128_to_255_byte_packets;
350 	u64 rx_256_to_511_byte_packets;
351 	u64 rx_512_to_1023_byte_packets;
352 	u64 rx_1024_to_1518_byte_packets;
353 	u64 rx_crc_errors;
354 	u64 rx_mac_crtl_frames;
355 	u64 rx_pause_frames;
356 	u64 rx_pfc_frames;
357 	u64 rx_align_errors;
358 	u64 rx_carrier_errors;
359 	u64 rx_oversize_packets;
360 	u64 rx_jabbers;
361 	u64 rx_undersize_packets;
362 	u64 rx_fragments;
363 	u64 tx_64_byte_packets;
364 	u64 tx_65_to_127_byte_packets;
365 	u64 tx_128_to_255_byte_packets;
366 	u64 tx_256_to_511_byte_packets;
367 	u64 tx_512_to_1023_byte_packets;
368 	u64 tx_1024_to_1518_byte_packets;
369 	u64 tx_pause_frames;
370 	u64 tx_pfc_frames;
371 	u64 brb_truncates;
372 	u64 brb_discards;
373 	u64 rx_mac_bytes;
374 	u64 rx_mac_uc_packets;
375 	u64 rx_mac_mc_packets;
376 	u64 rx_mac_bc_packets;
377 	u64 rx_mac_frames_ok;
378 	u64 tx_mac_bytes;
379 	u64 tx_mac_uc_packets;
380 	u64 tx_mac_mc_packets;
381 	u64 tx_mac_bc_packets;
382 	u64 tx_mac_ctrl_frames;
383 	u64 link_change_count;
384 };
385 
386 struct ecore_eth_stats_bb {
387 	u64 rx_1519_to_1522_byte_packets;
388 	u64 rx_1519_to_2047_byte_packets;
389 	u64 rx_2048_to_4095_byte_packets;
390 	u64 rx_4096_to_9216_byte_packets;
391 	u64 rx_9217_to_16383_byte_packets;
392 	u64 tx_1519_to_2047_byte_packets;
393 	u64 tx_2048_to_4095_byte_packets;
394 	u64 tx_4096_to_9216_byte_packets;
395 	u64 tx_9217_to_16383_byte_packets;
396 	u64 tx_lpi_entry_count;
397 	u64 tx_total_collisions;
398 };
399 
400 struct ecore_eth_stats_ah {
401 	u64 rx_1519_to_max_byte_packets;
402 	u64 tx_1519_to_max_byte_packets;
403 };
404 
405 struct ecore_eth_stats {
406 	struct ecore_eth_stats_common common;
407 	union {
408 		struct ecore_eth_stats_bb bb;
409 		struct ecore_eth_stats_ah ah;
410 	};
411 };
412 
413 enum ecore_dmae_address_type_t {
414 	ECORE_DMAE_ADDRESS_HOST_VIRT,
415 	ECORE_DMAE_ADDRESS_HOST_PHYS,
416 	ECORE_DMAE_ADDRESS_GRC
417 };
418 
419 /* value of flags If ECORE_DMAE_FLAG_RW_REPL_SRC flag is set and the
420  * source is a block of length DMAE_MAX_RW_SIZE and the
421  * destination is larger, the source block will be duplicated as
422  * many times as required to fill the destination block. This is
423  * used mostly to write a zeroed buffer to destination address
424  * using DMA
425  */
426 #define ECORE_DMAE_FLAG_RW_REPL_SRC	0x00000001
427 #define ECORE_DMAE_FLAG_VF_SRC		0x00000002
428 #define ECORE_DMAE_FLAG_VF_DST		0x00000004
429 #define ECORE_DMAE_FLAG_COMPLETION_DST	0x00000008
430 
431 struct ecore_dmae_params {
432 	u32 flags; /* consists of ECORE_DMAE_FLAG_* values */
433 	u8 src_vfid;
434 	u8 dst_vfid;
435 };
436 
437 /**
438  * @brief ecore_dmae_host2grc - copy data from source addr to
439  * dmae registers using the given ptt
440  *
441  * @param p_hwfn
442  * @param p_ptt
443  * @param source_addr
444  * @param grc_addr (dmae_data_offset)
445  * @param size_in_dwords
446  * @param flags (one of the flags defined above)
447  */
448 enum _ecore_status_t
449 ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
450 		    struct ecore_ptt *p_ptt,
451 		    u64 source_addr,
452 		    u32 grc_addr,
453 		    u32 size_in_dwords,
454 		    u32 flags);
455 
456 /**
457  * @brief ecore_dmae_grc2host - Read data from dmae data offset
458  * to source address using the given ptt
459  *
460  * @param p_ptt
461  * @param grc_addr (dmae_data_offset)
462  * @param dest_addr
463  * @param size_in_dwords
464  * @param flags - one of the flags defined above
465  */
466 enum _ecore_status_t
467 ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
468 		    struct ecore_ptt *p_ptt,
469 		    u32 grc_addr,
470 		    dma_addr_t dest_addr,
471 		    u32 size_in_dwords,
472 		    u32 flags);
473 
474 /**
475  * @brief ecore_dmae_host2host - copy data from to source address
476  * to a destination address (for SRIOV) using the given ptt
477  *
478  * @param p_hwfn
479  * @param p_ptt
480  * @param source_addr
481  * @param dest_addr
482  * @param size_in_dwords
483  * @param params
484  */
485 enum _ecore_status_t
486 ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
487 		     struct ecore_ptt *p_ptt,
488 		     dma_addr_t source_addr,
489 		     dma_addr_t dest_addr,
490 		     u32 size_in_dwords,
491 		     struct ecore_dmae_params *p_params);
492 
493 /**
494  * @brief ecore_chain_alloc - Allocate and initialize a chain
495  *
496  * @param p_hwfn
497  * @param intended_use
498  * @param mode
499  * @param num_elems
500  * @param elem_size
501  * @param p_chain
502  *
503  * @return enum _ecore_status_t
504  */
505 enum _ecore_status_t
506 ecore_chain_alloc(struct ecore_dev *p_dev,
507 		  enum ecore_chain_use_mode intended_use,
508 		  enum ecore_chain_mode mode,
509 		  enum ecore_chain_cnt_type cnt_type,
510 		  u32 num_elems,
511 		  osal_size_t elem_size,
512 		  struct ecore_chain *p_chain,
513 		  struct ecore_chain_ext_pbl *ext_pbl);
514 
515 /**
516  * @brief ecore_chain_free - Free chain DMA memory
517  *
518  * @param p_hwfn
519  * @param p_chain
520  */
521 void ecore_chain_free(struct ecore_dev *p_dev,
522 		      struct ecore_chain *p_chain);
523 
524 /**
525  * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID
526  *
527  *  @param p_hwfn
528  *  @param src_id - relative to p_hwfn
529  *  @param dst_id - absolute per engine
530  *
531  *  @return enum _ecore_status_t
532  */
533 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
534 				       u16 src_id,
535 				       u16 *dst_id);
536 
537 /**
538  * @@brief ecore_fw_vport - Get absolute vport ID
539  *
540  *  @param p_hwfn
541  *  @param src_id - relative to p_hwfn
542  *  @param dst_id - absolute per engine
543  *
544  *  @return enum _ecore_status_t
545  */
546 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
547 				    u8 src_id,
548 				    u8 *dst_id);
549 
550 /**
551  * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID
552  *
553  *  @param p_hwfn
554  *  @param src_id - relative to p_hwfn
555  *  @param dst_id - absolute per engine
556  *
557  *  @return enum _ecore_status_t
558  */
559 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
560 				      u8 src_id,
561 				      u8 *dst_id);
562 
563 /**
564  * @brief ecore_llh_add_mac_filter - configures a MAC filter in llh
565  *
566  * @param p_hwfn
567  * @param p_ptt
568  * @param p_filter - MAC to add
569  */
570 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
571 					  struct ecore_ptt *p_ptt,
572 					  u8 *p_filter);
573 
574 /**
575  * @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh
576  *
577  * @param p_hwfn
578  * @param p_ptt
579  * @param p_filter - MAC to remove
580  */
581 void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
582 			     struct ecore_ptt *p_ptt,
583 			     u8 *p_filter);
584 
585 enum ecore_llh_port_filter_type_t {
586 	ECORE_LLH_FILTER_ETHERTYPE,
587 	ECORE_LLH_FILTER_TCP_SRC_PORT,
588 	ECORE_LLH_FILTER_TCP_DEST_PORT,
589 	ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT,
590 	ECORE_LLH_FILTER_UDP_SRC_PORT,
591 	ECORE_LLH_FILTER_UDP_DEST_PORT,
592 	ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT
593 };
594 
595 /**
596  * @brief ecore_llh_add_protocol_filter - configures a protocol filter in llh
597  *
598  * @param p_hwfn
599  * @param p_ptt
600  * @param source_port_or_eth_type - source port or ethertype to add
601  * @param dest_port - destination port to add
602  * @param type - type of filters and comparing
603  */
604 enum _ecore_status_t
605 ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
606 			      struct ecore_ptt *p_ptt,
607 			      u16 source_port_or_eth_type,
608 			      u16 dest_port,
609 			      enum ecore_llh_port_filter_type_t type);
610 
611 /**
612  * @brief ecore_llh_remove_protocol_filter - remove a protocol filter in llh
613  *
614  * @param p_hwfn
615  * @param p_ptt
616  * @param source_port_or_eth_type - source port or ethertype to add
617  * @param dest_port - destination port to add
618  * @param type - type of filters and comparing
619  */
620 void
621 ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
622 				 struct ecore_ptt *p_ptt,
623 				 u16 source_port_or_eth_type,
624 				 u16 dest_port,
625 				 enum ecore_llh_port_filter_type_t type);
626 
627 /**
628  * @brief ecore_llh_clear_all_filters - removes all MAC filters from llh
629  *
630  * @param p_hwfn
631  * @param p_ptt
632  */
633 void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
634 			     struct ecore_ptt *p_ptt);
635 
636 /**
637  * @brief ecore_llh_set_function_as_default - set function as default per port
638  *
639  * @param p_hwfn
640  * @param p_ptt
641  */
642 enum _ecore_status_t
643 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
644 				  struct ecore_ptt *p_ptt);
645 
646 /**
647  *@brief Cleanup of previous driver remains prior to load
648  *
649  * @param p_hwfn
650  * @param p_ptt
651  * @param id - For PF, engine-relative. For VF, PF-relative.
652  * @param is_vf - true iff cleanup is made for a VF.
653  *
654  * @return enum _ecore_status_t
655  */
656 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn	*p_hwfn,
657 					 struct ecore_ptt	*p_ptt,
658 					 u16			id,
659 					 bool			is_vf);
660 
661 /**
662  * @brief ecore_get_queue_coalesce - Retrieve coalesce value for a given queue.
663  *
664  * @param p_hwfn
665  * @param p_coal - store coalesce value read from the hardware.
666  * @param p_handle
667  *
668  * @return enum _ecore_status_t
669  **/
670 enum _ecore_status_t
671 ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *coal,
672 			 void *handle);
673 
674 /**
675  * @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
676  *    Tx queue. The fact that we can configure coalescing to up to 511, but on
677  *    varying accuracy [the bigger the value the less accurate] up to a mistake
678  *    of 3usec for the highest values.
679  *    While the API allows setting coalescing per-qid, all queues sharing a SB
680  *    should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
681  *    otherwise configuration would break.
682  *
683  * @param p_hwfn
684  * @param rx_coal - Rx Coalesce value in micro seconds.
685  * @param tx_coal - TX Coalesce value in micro seconds.
686  * @param p_handle
687  *
688  * @return enum _ecore_status_t
689  **/
690 enum _ecore_status_t
691 ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
692 			 u16 tx_coal, void *p_handle);
693 
694 /**
695  * @brief ecore_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
696  *
697  * @param p_hwfn
698  * @param p_ptt
699  * @param b_enable - true/false
700  *
701  * @return enum _ecore_status_t
702  */
703 enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
704 						  struct ecore_ptt *p_ptt,
705 						  bool b_enable);
706 #endif
707