xref: /dpdk/drivers/net/qede/base/ecore_dev_api.h (revision 7bd6f76ee678ec6aa81cb53562f852a43e842718)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 #ifndef __ECORE_DEV_API_H__
8 #define __ECORE_DEV_API_H__
9 
10 #include "ecore_status.h"
11 #include "ecore_chain.h"
12 #include "ecore_int_api.h"
13 
14 /**
15  * @brief ecore_init_dp - initialize the debug level
16  *
17  * @param p_dev
18  * @param dp_module
19  * @param dp_level
20  * @param dp_ctx
21  */
22 void ecore_init_dp(struct ecore_dev *p_dev,
23 		   u32 dp_module,
24 		   u8 dp_level,
25 		   void *dp_ctx);
26 
27 /**
28  * @brief ecore_init_struct - initialize the device structure to
29  *        its defaults
30  *
31  * @param p_dev
32  */
33 enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev);
34 
35 /**
36  * @brief ecore_resc_free -
37  *
38  * @param p_dev
39  */
40 void ecore_resc_free(struct ecore_dev *p_dev);
41 
42 /**
43  * @brief ecore_resc_alloc -
44  *
45  * @param p_dev
46  *
47  * @return enum _ecore_status_t
48  */
49 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
50 
51 /**
52  * @brief ecore_resc_setup -
53  *
54  * @param p_dev
55  */
56 void ecore_resc_setup(struct ecore_dev *p_dev);
57 
58 enum ecore_mfw_timeout_fallback {
59 	ECORE_TO_FALLBACK_TO_NONE,
60 	ECORE_TO_FALLBACK_TO_DEFAULT,
61 	ECORE_TO_FALLBACK_FAIL_LOAD,
62 };
63 
64 enum ecore_override_force_load {
65 	ECORE_OVERRIDE_FORCE_LOAD_NONE,
66 	ECORE_OVERRIDE_FORCE_LOAD_ALWAYS,
67 	ECORE_OVERRIDE_FORCE_LOAD_NEVER,
68 };
69 
70 struct ecore_drv_load_params {
71 	/* Indicates whether the driver is running over a crash kernel.
72 	 * As part of the load request, this will be used for providing the
73 	 * driver role to the MFW.
74 	 * In case of a crash kernel over PDA - this should be set to false.
75 	 */
76 	bool is_crash_kernel;
77 
78 	/* The timeout value that the MFW should use when locking the engine for
79 	 * the driver load process.
80 	 * A value of '0' means the default value, and '255' means no timeout.
81 	 */
82 	u8 mfw_timeout_val;
83 #define ECORE_LOAD_REQ_LOCK_TO_DEFAULT	0
84 #define ECORE_LOAD_REQ_LOCK_TO_NONE	255
85 
86 	/* Action to take in case the MFW doesn't support timeout values other
87 	 * than default and none.
88 	 */
89 	enum ecore_mfw_timeout_fallback mfw_timeout_fallback;
90 
91 	/* Avoid engine reset when first PF loads on it */
92 	bool avoid_eng_reset;
93 
94 	/* Allow overriding the default force load behavior */
95 	enum ecore_override_force_load override_force_load;
96 };
97 
98 struct ecore_hw_init_params {
99 	/* Tunneling parameters */
100 	struct ecore_tunnel_info *p_tunn;
101 
102 	bool b_hw_start;
103 
104 	/* Interrupt mode [msix, inta, etc.] to use */
105 	enum ecore_int_mode int_mode;
106 
107 	/* NPAR tx switching to be used for vports configured for tx-switching
108 	 */
109 	bool allow_npar_tx_switch;
110 
111 	/* Binary fw data pointer in binary fw file */
112 	const u8 *bin_fw_data;
113 
114 	/* Driver load parameters */
115 	struct ecore_drv_load_params *p_drv_load_params;
116 
117 	/* SPQ block timeout in msec */
118 	u32 spq_timeout_ms;
119 };
120 
121 /**
122  * @brief ecore_hw_init -
123  *
124  * @param p_dev
125  * @param p_params
126  *
127  * @return enum _ecore_status_t
128  */
129 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
130 				   struct ecore_hw_init_params *p_params);
131 
132 /**
133  * @brief ecore_hw_timers_stop_all -
134  *
135  * @param p_dev
136  *
137  * @return void
138  */
139 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev);
140 
141 /**
142  * @brief ecore_hw_stop -
143  *
144  * @param p_dev
145  *
146  * @return enum _ecore_status_t
147  */
148 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
149 
150 /**
151  * @brief ecore_hw_stop_fastpath -should be called incase
152  *        slowpath is still required for the device,
153  *        but fastpath is not.
154  *
155  * @param p_dev
156  *
157  * @return enum _ecore_status_t
158  */
159 enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
160 
161 #ifndef LINUX_REMOVE
162 /**
163  * @brief ecore_prepare_hibernate -should be called when
164  *        the system is going into the hibernate state
165  *
166  * @param p_dev
167  *
168  */
169 void ecore_prepare_hibernate(struct ecore_dev *p_dev);
170 
171 enum ecore_db_rec_width {
172 	DB_REC_WIDTH_32B,
173 	DB_REC_WIDTH_64B,
174 };
175 
176 enum ecore_db_rec_space {
177 	DB_REC_KERNEL,
178 	DB_REC_USER,
179 };
180 
181 /**
182  * @brief db_recovery_add - add doorbell information to the doorbell
183  * recovery mechanism.
184  *
185  * @param p_dev
186  * @param db_addr - doorbell address
187  * @param db_data - address of where db_data is stored
188  * @param db_width - doorbell is 32b pr 64b
189  * @param db_space - doorbell recovery addresses are user or kernel space
190  */
191 enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
192 					   void OSAL_IOMEM *db_addr,
193 					   void *db_data,
194 					   enum ecore_db_rec_width db_width,
195 					   enum ecore_db_rec_space db_space);
196 
197 /**
198  * @brief db_recovery_del - remove doorbell information from the doorbell
199  * recovery mechanism. db_data serves as key (db_addr is not unique).
200  *
201  * @param cdev
202  * @param db_addr - doorbell address
203  * @param db_data - address where db_data is stored. Serves as key for the
204  *                  entry to delete.
205  */
206 enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
207 					   void OSAL_IOMEM *db_addr,
208 					   void *db_data);
209 
210 static OSAL_INLINE bool ecore_is_mf_ufp(struct ecore_hwfn *p_hwfn)
211 {
212 	return !!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits);
213 }
214 
215 #endif
216 
217 /**
218  * @brief ecore_hw_start_fastpath -restart fastpath traffic,
219  *        only if hw_stop_fastpath was called
220 
221  * @param p_hwfn
222  *
223  * @return enum _ecore_status_t
224  */
225 enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
226 
227 enum ecore_hw_prepare_result {
228 	ECORE_HW_PREPARE_SUCCESS,
229 
230 	/* FAILED results indicate probe has failed & cleaned up */
231 	ECORE_HW_PREPARE_FAILED_ENG2,
232 	ECORE_HW_PREPARE_FAILED_ME,
233 	ECORE_HW_PREPARE_FAILED_MEM,
234 	ECORE_HW_PREPARE_FAILED_DEV,
235 	ECORE_HW_PREPARE_FAILED_NVM,
236 
237 	/* BAD results indicate probe is passed even though some wrongness
238 	 * has occurred; Trying to actually use [I.e., hw_init()] might have
239 	 * dire reprecautions.
240 	 */
241 	ECORE_HW_PREPARE_BAD_IOV,
242 	ECORE_HW_PREPARE_BAD_MCP,
243 	ECORE_HW_PREPARE_BAD_IGU,
244 };
245 
246 struct ecore_hw_prepare_params {
247 	/* Personality to initialize */
248 	int personality;
249 
250 	/* Force the driver's default resource allocation */
251 	bool drv_resc_alloc;
252 
253 	/* Check the reg_fifo after any register access */
254 	bool chk_reg_fifo;
255 
256 	/* Request the MFW to initiate PF FLR */
257 	bool initiate_pf_flr;
258 
259 	/* The OS Epoch time in seconds */
260 	u32 epoch;
261 
262 	/* Allow the MFW to collect a crash dump */
263 	bool allow_mdump;
264 
265 	/* Allow prepare to pass even if some initializations are failing.
266 	 * If set, the `p_prepare_res' field would be set with the return,
267 	 * and might allow probe to pass even if there are certain issues.
268 	 */
269 	bool b_relaxed_probe;
270 	enum ecore_hw_prepare_result p_relaxed_res;
271 
272 	/* Enable/disable request by ecore client for pacing */
273 	bool b_en_pacing;
274 
275 	/* Indicates whether this PF serves a storage target */
276 	bool b_is_target;
277 };
278 
279 /**
280  * @brief ecore_hw_prepare -
281  *
282  * @param p_dev
283  * @param p_params
284  *
285  * @return enum _ecore_status_t
286  */
287 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
288 				      struct ecore_hw_prepare_params *p_params);
289 
290 /**
291  * @brief ecore_hw_remove -
292  *
293  * @param p_dev
294  */
295 void ecore_hw_remove(struct ecore_dev *p_dev);
296 
297 /**
298  * @brief ecore_ptt_acquire - Allocate a PTT window
299  *
300  * Should be called at the entry point to the driver (at the beginning of an
301  * exported function)
302  *
303  * @param p_hwfn
304  *
305  * @return struct ecore_ptt
306  */
307 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
308 
309 /**
310  * @brief ecore_ptt_release - Release PTT Window
311  *
312  * Should be called at the end of a flow - at the end of the function that
313  * acquired the PTT.
314  *
315  *
316  * @param p_hwfn
317  * @param p_ptt
318  */
319 void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
320 		       struct ecore_ptt *p_ptt);
321 
322 struct ecore_eth_stats_common {
323 	u64 no_buff_discards;
324 	u64 packet_too_big_discard;
325 	u64 ttl0_discard;
326 	u64 rx_ucast_bytes;
327 	u64 rx_mcast_bytes;
328 	u64 rx_bcast_bytes;
329 	u64 rx_ucast_pkts;
330 	u64 rx_mcast_pkts;
331 	u64 rx_bcast_pkts;
332 	u64 mftag_filter_discards;
333 	u64 mac_filter_discards;
334 	u64 tx_ucast_bytes;
335 	u64 tx_mcast_bytes;
336 	u64 tx_bcast_bytes;
337 	u64 tx_ucast_pkts;
338 	u64 tx_mcast_pkts;
339 	u64 tx_bcast_pkts;
340 	u64 tx_err_drop_pkts;
341 	u64 tpa_coalesced_pkts;
342 	u64 tpa_coalesced_events;
343 	u64 tpa_aborts_num;
344 	u64 tpa_not_coalesced_pkts;
345 	u64 tpa_coalesced_bytes;
346 
347 	/* port */
348 	u64 rx_64_byte_packets;
349 	u64 rx_65_to_127_byte_packets;
350 	u64 rx_128_to_255_byte_packets;
351 	u64 rx_256_to_511_byte_packets;
352 	u64 rx_512_to_1023_byte_packets;
353 	u64 rx_1024_to_1518_byte_packets;
354 	u64 rx_crc_errors;
355 	u64 rx_mac_crtl_frames;
356 	u64 rx_pause_frames;
357 	u64 rx_pfc_frames;
358 	u64 rx_align_errors;
359 	u64 rx_carrier_errors;
360 	u64 rx_oversize_packets;
361 	u64 rx_jabbers;
362 	u64 rx_undersize_packets;
363 	u64 rx_fragments;
364 	u64 tx_64_byte_packets;
365 	u64 tx_65_to_127_byte_packets;
366 	u64 tx_128_to_255_byte_packets;
367 	u64 tx_256_to_511_byte_packets;
368 	u64 tx_512_to_1023_byte_packets;
369 	u64 tx_1024_to_1518_byte_packets;
370 	u64 tx_pause_frames;
371 	u64 tx_pfc_frames;
372 	u64 brb_truncates;
373 	u64 brb_discards;
374 	u64 rx_mac_bytes;
375 	u64 rx_mac_uc_packets;
376 	u64 rx_mac_mc_packets;
377 	u64 rx_mac_bc_packets;
378 	u64 rx_mac_frames_ok;
379 	u64 tx_mac_bytes;
380 	u64 tx_mac_uc_packets;
381 	u64 tx_mac_mc_packets;
382 	u64 tx_mac_bc_packets;
383 	u64 tx_mac_ctrl_frames;
384 	u64 link_change_count;
385 };
386 
387 struct ecore_eth_stats_bb {
388 	u64 rx_1519_to_1522_byte_packets;
389 	u64 rx_1519_to_2047_byte_packets;
390 	u64 rx_2048_to_4095_byte_packets;
391 	u64 rx_4096_to_9216_byte_packets;
392 	u64 rx_9217_to_16383_byte_packets;
393 	u64 tx_1519_to_2047_byte_packets;
394 	u64 tx_2048_to_4095_byte_packets;
395 	u64 tx_4096_to_9216_byte_packets;
396 	u64 tx_9217_to_16383_byte_packets;
397 	u64 tx_lpi_entry_count;
398 	u64 tx_total_collisions;
399 };
400 
401 struct ecore_eth_stats_ah {
402 	u64 rx_1519_to_max_byte_packets;
403 	u64 tx_1519_to_max_byte_packets;
404 };
405 
406 struct ecore_eth_stats {
407 	struct ecore_eth_stats_common common;
408 	union {
409 		struct ecore_eth_stats_bb bb;
410 		struct ecore_eth_stats_ah ah;
411 	};
412 };
413 
414 enum ecore_dmae_address_type_t {
415 	ECORE_DMAE_ADDRESS_HOST_VIRT,
416 	ECORE_DMAE_ADDRESS_HOST_PHYS,
417 	ECORE_DMAE_ADDRESS_GRC
418 };
419 
420 /* value of flags If ECORE_DMAE_FLAG_RW_REPL_SRC flag is set and the
421  * source is a block of length DMAE_MAX_RW_SIZE and the
422  * destination is larger, the source block will be duplicated as
423  * many times as required to fill the destination block. This is
424  * used mostly to write a zeroed buffer to destination address
425  * using DMA
426  */
427 #define ECORE_DMAE_FLAG_RW_REPL_SRC	0x00000001
428 #define ECORE_DMAE_FLAG_VF_SRC		0x00000002
429 #define ECORE_DMAE_FLAG_VF_DST		0x00000004
430 #define ECORE_DMAE_FLAG_COMPLETION_DST	0x00000008
431 
432 struct ecore_dmae_params {
433 	u32 flags; /* consists of ECORE_DMAE_FLAG_* values */
434 	u8 src_vfid;
435 	u8 dst_vfid;
436 };
437 
438 /**
439  * @brief ecore_dmae_host2grc - copy data from source addr to
440  * dmae registers using the given ptt
441  *
442  * @param p_hwfn
443  * @param p_ptt
444  * @param source_addr
445  * @param grc_addr (dmae_data_offset)
446  * @param size_in_dwords
447  * @param flags (one of the flags defined above)
448  */
449 enum _ecore_status_t
450 ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
451 		    struct ecore_ptt *p_ptt,
452 		    u64 source_addr,
453 		    u32 grc_addr,
454 		    u32 size_in_dwords,
455 		    u32 flags);
456 
457 /**
458  * @brief ecore_dmae_grc2host - Read data from dmae data offset
459  * to source address using the given ptt
460  *
461  * @param p_ptt
462  * @param grc_addr (dmae_data_offset)
463  * @param dest_addr
464  * @param size_in_dwords
465  * @param flags - one of the flags defined above
466  */
467 enum _ecore_status_t
468 ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
469 		    struct ecore_ptt *p_ptt,
470 		    u32 grc_addr,
471 		    dma_addr_t dest_addr,
472 		    u32 size_in_dwords,
473 		    u32 flags);
474 
475 /**
476  * @brief ecore_dmae_host2host - copy data from to source address
477  * to a destination address (for SRIOV) using the given ptt
478  *
479  * @param p_hwfn
480  * @param p_ptt
481  * @param source_addr
482  * @param dest_addr
483  * @param size_in_dwords
484  * @param params
485  */
486 enum _ecore_status_t
487 ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
488 		     struct ecore_ptt *p_ptt,
489 		     dma_addr_t source_addr,
490 		     dma_addr_t dest_addr,
491 		     u32 size_in_dwords,
492 		     struct ecore_dmae_params *p_params);
493 
494 /**
495  * @brief ecore_chain_alloc - Allocate and initialize a chain
496  *
497  * @param p_hwfn
498  * @param intended_use
499  * @param mode
500  * @param num_elems
501  * @param elem_size
502  * @param p_chain
503  *
504  * @return enum _ecore_status_t
505  */
506 enum _ecore_status_t
507 ecore_chain_alloc(struct ecore_dev *p_dev,
508 		  enum ecore_chain_use_mode intended_use,
509 		  enum ecore_chain_mode mode,
510 		  enum ecore_chain_cnt_type cnt_type,
511 		  u32 num_elems,
512 		  osal_size_t elem_size,
513 		  struct ecore_chain *p_chain,
514 		  struct ecore_chain_ext_pbl *ext_pbl);
515 
516 /**
517  * @brief ecore_chain_free - Free chain DMA memory
518  *
519  * @param p_hwfn
520  * @param p_chain
521  */
522 void ecore_chain_free(struct ecore_dev *p_dev,
523 		      struct ecore_chain *p_chain);
524 
525 /**
526  * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID
527  *
528  *  @param p_hwfn
529  *  @param src_id - relative to p_hwfn
530  *  @param dst_id - absolute per engine
531  *
532  *  @return enum _ecore_status_t
533  */
534 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
535 				       u16 src_id,
536 				       u16 *dst_id);
537 
538 /**
539  * @@brief ecore_fw_vport - Get absolute vport ID
540  *
541  *  @param p_hwfn
542  *  @param src_id - relative to p_hwfn
543  *  @param dst_id - absolute per engine
544  *
545  *  @return enum _ecore_status_t
546  */
547 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
548 				    u8 src_id,
549 				    u8 *dst_id);
550 
551 /**
552  * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID
553  *
554  *  @param p_hwfn
555  *  @param src_id - relative to p_hwfn
556  *  @param dst_id - absolute per engine
557  *
558  *  @return enum _ecore_status_t
559  */
560 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
561 				      u8 src_id,
562 				      u8 *dst_id);
563 
564 /**
565  * @brief ecore_llh_add_mac_filter - configures a MAC filter in llh
566  *
567  * @param p_hwfn
568  * @param p_ptt
569  * @param p_filter - MAC to add
570  */
571 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
572 					  struct ecore_ptt *p_ptt,
573 					  u8 *p_filter);
574 
575 /**
576  * @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh
577  *
578  * @param p_hwfn
579  * @param p_ptt
580  * @param p_filter - MAC to remove
581  */
582 void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
583 			     struct ecore_ptt *p_ptt,
584 			     u8 *p_filter);
585 
586 enum ecore_llh_port_filter_type_t {
587 	ECORE_LLH_FILTER_ETHERTYPE,
588 	ECORE_LLH_FILTER_TCP_SRC_PORT,
589 	ECORE_LLH_FILTER_TCP_DEST_PORT,
590 	ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT,
591 	ECORE_LLH_FILTER_UDP_SRC_PORT,
592 	ECORE_LLH_FILTER_UDP_DEST_PORT,
593 	ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT
594 };
595 
596 /**
597  * @brief ecore_llh_add_protocol_filter - configures a protocol filter in llh
598  *
599  * @param p_hwfn
600  * @param p_ptt
601  * @param source_port_or_eth_type - source port or ethertype to add
602  * @param dest_port - destination port to add
603  * @param type - type of filters and comparing
604  */
605 enum _ecore_status_t
606 ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
607 			      struct ecore_ptt *p_ptt,
608 			      u16 source_port_or_eth_type,
609 			      u16 dest_port,
610 			      enum ecore_llh_port_filter_type_t type);
611 
612 /**
613  * @brief ecore_llh_remove_protocol_filter - remove a protocol filter in llh
614  *
615  * @param p_hwfn
616  * @param p_ptt
617  * @param source_port_or_eth_type - source port or ethertype to add
618  * @param dest_port - destination port to add
619  * @param type - type of filters and comparing
620  */
621 void
622 ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
623 				 struct ecore_ptt *p_ptt,
624 				 u16 source_port_or_eth_type,
625 				 u16 dest_port,
626 				 enum ecore_llh_port_filter_type_t type);
627 
628 /**
629  * @brief ecore_llh_clear_all_filters - removes all MAC filters from llh
630  *
631  * @param p_hwfn
632  * @param p_ptt
633  */
634 void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
635 			     struct ecore_ptt *p_ptt);
636 
637 /**
638  * @brief ecore_llh_set_function_as_default - set function as default per port
639  *
640  * @param p_hwfn
641  * @param p_ptt
642  */
643 enum _ecore_status_t
644 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
645 				  struct ecore_ptt *p_ptt);
646 
647 /**
648  *@brief Cleanup of previous driver remains prior to load
649  *
650  * @param p_hwfn
651  * @param p_ptt
652  * @param id - For PF, engine-relative. For VF, PF-relative.
653  * @param is_vf - true iff cleanup is made for a VF.
654  *
655  * @return enum _ecore_status_t
656  */
657 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn	*p_hwfn,
658 					 struct ecore_ptt	*p_ptt,
659 					 u16			id,
660 					 bool			is_vf);
661 
662 /**
663  * @brief ecore_get_queue_coalesce - Retrieve coalesce value for a given queue.
664  *
665  * @param p_hwfn
666  * @param p_coal - store coalesce value read from the hardware.
667  * @param p_handle
668  *
669  * @return enum _ecore_status_t
670  **/
671 enum _ecore_status_t
672 ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *coal,
673 			 void *handle);
674 
675 /**
676  * @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
677  *    Tx queue. The fact that we can configure coalescing to up to 511, but on
678  *    varying accuracy [the bigger the value the less accurate] up to a mistake
679  *    of 3usec for the highest values.
680  *    While the API allows setting coalescing per-qid, all queues sharing a SB
681  *    should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
682  *    otherwise configuration would break.
683  *
684  * @param p_hwfn
685  * @param rx_coal - Rx Coalesce value in micro seconds.
686  * @param tx_coal - TX Coalesce value in micro seconds.
687  * @param p_handle
688  *
689  * @return enum _ecore_status_t
690  **/
691 enum _ecore_status_t
692 ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
693 			 u16 tx_coal, void *p_handle);
694 
695 /**
696  * @brief ecore_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
697  *
698  * @param p_hwfn
699  * @param p_ptt
700  * @param b_enable - true/false
701  *
702  * @return enum _ecore_status_t
703  */
704 enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
705 						  struct ecore_ptt *p_ptt,
706 						  bool b_enable);
707 #endif
708