xref: /dpdk/drivers/net/qede/base/ecore_dev_api.h (revision 5c451eb543fab68bb40ea6d0ed4090b14f0973e4)
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8 
9 #ifndef __ECORE_DEV_API_H__
10 #define __ECORE_DEV_API_H__
11 
12 #include "ecore_status.h"
13 #include "ecore_chain.h"
14 #include "ecore_int_api.h"
15 
16 /**
17  * @brief ecore_init_dp - initialize the debug level
18  *
19  * @param p_dev
20  * @param dp_module
21  * @param dp_level
22  * @param dp_ctx
23  */
24 void ecore_init_dp(struct ecore_dev *p_dev,
25 		   u32 dp_module,
26 		   u8 dp_level,
27 		   void *dp_ctx);
28 
29 /**
30  * @brief ecore_init_struct - initialize the device structure to
31  *        its defaults
32  *
33  * @param p_dev
34  */
35 void ecore_init_struct(struct ecore_dev *p_dev);
36 
37 /**
38  * @brief ecore_resc_free -
39  *
40  * @param p_dev
41  */
42 void ecore_resc_free(struct ecore_dev *p_dev);
43 
44 /**
45  * @brief ecore_resc_alloc -
46  *
47  * @param p_dev
48  *
49  * @return enum _ecore_status_t
50  */
51 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
52 
53 /**
54  * @brief ecore_resc_setup -
55  *
56  * @param p_dev
57  */
58 void ecore_resc_setup(struct ecore_dev *p_dev);
59 
60 struct ecore_hw_init_params {
61 	/* Tunnelling parameters */
62 	struct ecore_tunnel_info *p_tunn;
63 
64 	bool b_hw_start;
65 
66 	/* Interrupt mode [msix, inta, etc.] to use */
67 	enum ecore_int_mode int_mode;
68 
69 	/* NPAR tx switching to be used for vports configured for tx-switching
70 	 */
71 	bool allow_npar_tx_switch;
72 
73 	/* Binary fw data pointer in binary fw file */
74 	const u8 *bin_fw_data;
75 
76 	/* Indicates whether the driver is running over a crash kernel.
77 	 * As part of the load request, this will be used for providing the
78 	 * driver role to the MFW.
79 	 * In case of a crash kernel over PDA - this should be set to false.
80 	 */
81 	bool is_crash_kernel;
82 
83 	/* The timeout value that the MFW should use when locking the engine for
84 	 * the driver load process.
85 	 * A value of '0' means the default value, and '255' means no timeout.
86 	 */
87 	u8 mfw_timeout_val;
88 #define ECORE_LOAD_REQ_LOCK_TO_DEFAULT	0
89 #define ECORE_LOAD_REQ_LOCK_TO_NONE	255
90 
91 	/* Avoid engine reset when first PF loads on it */
92 	bool avoid_eng_reset;
93 };
94 
95 /**
96  * @brief ecore_hw_init -
97  *
98  * @param p_dev
99  * @param p_params
100  *
101  * @return enum _ecore_status_t
102  */
103 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
104 				   struct ecore_hw_init_params *p_params);
105 
106 /**
107  * @brief ecore_hw_timers_stop_all -
108  *
109  * @param p_dev
110  *
111  * @return void
112  */
113 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev);
114 
115 /**
116  * @brief ecore_hw_stop -
117  *
118  * @param p_dev
119  *
120  * @return enum _ecore_status_t
121  */
122 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
123 
124 /**
125  * @brief ecore_hw_stop_fastpath -should be called incase
126  *        slowpath is still required for the device,
127  *        but fastpath is not.
128  *
129  * @param p_dev
130  *
131  */
132 void ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
133 
134 #ifndef LINUX_REMOVE
135 /**
136  * @brief ecore_prepare_hibernate -should be called when
137  *        the system is going into the hibernate state
138  *
139  * @param p_dev
140  *
141  */
142 void ecore_prepare_hibernate(struct ecore_dev *p_dev);
143 #endif
144 
145 /**
146  * @brief ecore_hw_start_fastpath -restart fastpath traffic,
147  *        only if hw_stop_fastpath was called
148 
149  * @param p_dev
150  *
151  */
152 void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
153 
154 enum ecore_hw_prepare_result {
155 	ECORE_HW_PREPARE_SUCCESS,
156 
157 	/* FAILED results indicate probe has failed & cleaned up */
158 	ECORE_HW_PREPARE_FAILED_ENG2,
159 	ECORE_HW_PREPARE_FAILED_ME,
160 	ECORE_HW_PREPARE_FAILED_MEM,
161 	ECORE_HW_PREPARE_FAILED_DEV,
162 	ECORE_HW_PREPARE_FAILED_NVM,
163 
164 	/* BAD results indicate probe is passed even though some wrongness
165 	 * has occurred; Trying to actually use [I.e., hw_init()] might have
166 	 * dire reprecautions.
167 	 */
168 	ECORE_HW_PREPARE_BAD_IOV,
169 	ECORE_HW_PREPARE_BAD_MCP,
170 	ECORE_HW_PREPARE_BAD_IGU,
171 };
172 
173 struct ecore_hw_prepare_params {
174 	/* Personality to initialize */
175 	int personality;
176 
177 	/* Force the driver's default resource allocation */
178 	bool drv_resc_alloc;
179 
180 	/* Check the reg_fifo after any register access */
181 	bool chk_reg_fifo;
182 
183 	/* Request the MFW to initiate PF FLR */
184 	bool initiate_pf_flr;
185 
186 	/* The OS Epoch time in seconds */
187 	u32 epoch;
188 
189 	/* Allow the MFW to collect a crash dump */
190 	bool allow_mdump;
191 
192 	/* Allow prepare to pass even if some initializations are failing.
193 	 * If set, the `p_prepare_res' field would be set with the return,
194 	 * and might allow probe to pass even if there are certain issues.
195 	 */
196 	bool b_relaxed_probe;
197 	enum ecore_hw_prepare_result p_relaxed_res;
198 };
199 
200 /**
201  * @brief ecore_hw_prepare -
202  *
203  * @param p_dev
204  * @param p_params
205  *
206  * @return enum _ecore_status_t
207  */
208 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
209 				      struct ecore_hw_prepare_params *p_params);
210 
211 /**
212  * @brief ecore_hw_remove -
213  *
214  * @param p_dev
215  */
216 void ecore_hw_remove(struct ecore_dev *p_dev);
217 
218 /**
219  * @brief ecore_ptt_acquire - Allocate a PTT window
220  *
221  * Should be called at the entry point to the driver (at the beginning of an
222  * exported function)
223  *
224  * @param p_hwfn
225  *
226  * @return struct ecore_ptt
227  */
228 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
229 
230 /**
231  * @brief ecore_ptt_release - Release PTT Window
232  *
233  * Should be called at the end of a flow - at the end of the function that
234  * acquired the PTT.
235  *
236  *
237  * @param p_hwfn
238  * @param p_ptt
239  */
240 void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
241 		       struct ecore_ptt *p_ptt);
242 
243 #ifndef __EXTRACT__LINUX__
244 struct ecore_eth_stats_common {
245 	u64 no_buff_discards;
246 	u64 packet_too_big_discard;
247 	u64 ttl0_discard;
248 	u64 rx_ucast_bytes;
249 	u64 rx_mcast_bytes;
250 	u64 rx_bcast_bytes;
251 	u64 rx_ucast_pkts;
252 	u64 rx_mcast_pkts;
253 	u64 rx_bcast_pkts;
254 	u64 mftag_filter_discards;
255 	u64 mac_filter_discards;
256 	u64 tx_ucast_bytes;
257 	u64 tx_mcast_bytes;
258 	u64 tx_bcast_bytes;
259 	u64 tx_ucast_pkts;
260 	u64 tx_mcast_pkts;
261 	u64 tx_bcast_pkts;
262 	u64 tx_err_drop_pkts;
263 	u64 tpa_coalesced_pkts;
264 	u64 tpa_coalesced_events;
265 	u64 tpa_aborts_num;
266 	u64 tpa_not_coalesced_pkts;
267 	u64 tpa_coalesced_bytes;
268 
269 	/* port */
270 	u64 rx_64_byte_packets;
271 	u64 rx_65_to_127_byte_packets;
272 	u64 rx_128_to_255_byte_packets;
273 	u64 rx_256_to_511_byte_packets;
274 	u64 rx_512_to_1023_byte_packets;
275 	u64 rx_1024_to_1518_byte_packets;
276 	u64 rx_crc_errors;
277 	u64 rx_mac_crtl_frames;
278 	u64 rx_pause_frames;
279 	u64 rx_pfc_frames;
280 	u64 rx_align_errors;
281 	u64 rx_carrier_errors;
282 	u64 rx_oversize_packets;
283 	u64 rx_jabbers;
284 	u64 rx_undersize_packets;
285 	u64 rx_fragments;
286 	u64 tx_64_byte_packets;
287 	u64 tx_65_to_127_byte_packets;
288 	u64 tx_128_to_255_byte_packets;
289 	u64 tx_256_to_511_byte_packets;
290 	u64 tx_512_to_1023_byte_packets;
291 	u64 tx_1024_to_1518_byte_packets;
292 	u64 tx_pause_frames;
293 	u64 tx_pfc_frames;
294 	u64 brb_truncates;
295 	u64 brb_discards;
296 	u64 rx_mac_bytes;
297 	u64 rx_mac_uc_packets;
298 	u64 rx_mac_mc_packets;
299 	u64 rx_mac_bc_packets;
300 	u64 rx_mac_frames_ok;
301 	u64 tx_mac_bytes;
302 	u64 tx_mac_uc_packets;
303 	u64 tx_mac_mc_packets;
304 	u64 tx_mac_bc_packets;
305 	u64 tx_mac_ctrl_frames;
306 };
307 
308 struct ecore_eth_stats_bb {
309 	u64 rx_1519_to_1522_byte_packets;
310 	u64 rx_1519_to_2047_byte_packets;
311 	u64 rx_2048_to_4095_byte_packets;
312 	u64 rx_4096_to_9216_byte_packets;
313 	u64 rx_9217_to_16383_byte_packets;
314 	u64 tx_1519_to_2047_byte_packets;
315 	u64 tx_2048_to_4095_byte_packets;
316 	u64 tx_4096_to_9216_byte_packets;
317 	u64 tx_9217_to_16383_byte_packets;
318 	u64 tx_lpi_entry_count;
319 	u64 tx_total_collisions;
320 };
321 
322 struct ecore_eth_stats_ah {
323 	u64 rx_1519_to_max_byte_packets;
324 	u64 tx_1519_to_max_byte_packets;
325 };
326 
327 struct ecore_eth_stats {
328 	struct ecore_eth_stats_common common;
329 	union {
330 		struct ecore_eth_stats_bb bb;
331 		struct ecore_eth_stats_ah ah;
332 	};
333 };
334 #endif
335 
336 enum ecore_dmae_address_type_t {
337 	ECORE_DMAE_ADDRESS_HOST_VIRT,
338 	ECORE_DMAE_ADDRESS_HOST_PHYS,
339 	ECORE_DMAE_ADDRESS_GRC
340 };
341 
342 /* value of flags If ECORE_DMAE_FLAG_RW_REPL_SRC flag is set and the
343  * source is a block of length DMAE_MAX_RW_SIZE and the
344  * destination is larger, the source block will be duplicated as
345  * many times as required to fill the destination block. This is
346  * used mostly to write a zeroed buffer to destination address
347  * using DMA
348  */
349 #define ECORE_DMAE_FLAG_RW_REPL_SRC	0x00000001
350 #define ECORE_DMAE_FLAG_VF_SRC		0x00000002
351 #define ECORE_DMAE_FLAG_VF_DST		0x00000004
352 #define ECORE_DMAE_FLAG_COMPLETION_DST	0x00000008
353 
354 struct ecore_dmae_params {
355 	u32 flags; /* consists of ECORE_DMAE_FLAG_* values */
356 	u8 src_vfid;
357 	u8 dst_vfid;
358 };
359 
360 /**
361  * @brief ecore_dmae_host2grc - copy data from source addr to
362  * dmae registers using the given ptt
363  *
364  * @param p_hwfn
365  * @param p_ptt
366  * @param source_addr
367  * @param grc_addr (dmae_data_offset)
368  * @param size_in_dwords
369  * @param flags (one of the flags defined above)
370  */
371 enum _ecore_status_t
372 ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
373 		    struct ecore_ptt *p_ptt,
374 		    u64 source_addr,
375 		    u32 grc_addr,
376 		    u32 size_in_dwords,
377 		    u32 flags);
378 
379 /**
380  * @brief ecore_dmae_grc2host - Read data from dmae data offset
381  * to source address using the given ptt
382  *
383  * @param p_ptt
384  * @param grc_addr (dmae_data_offset)
385  * @param dest_addr
386  * @param size_in_dwords
387  * @param flags - one of the flags defined above
388  */
389 enum _ecore_status_t
390 ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
391 		    struct ecore_ptt *p_ptt,
392 		    u32 grc_addr,
393 		    dma_addr_t dest_addr,
394 		    u32 size_in_dwords,
395 		    u32 flags);
396 
397 /**
398  * @brief ecore_dmae_host2host - copy data from to source address
399  * to a destination address (for SRIOV) using the given ptt
400  *
401  * @param p_hwfn
402  * @param p_ptt
403  * @param source_addr
404  * @param dest_addr
405  * @param size_in_dwords
406  * @param params
407  */
408 enum _ecore_status_t
409 ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
410 		     struct ecore_ptt *p_ptt,
411 		     dma_addr_t source_addr,
412 		     dma_addr_t dest_addr,
413 		     u32 size_in_dwords,
414 		     struct ecore_dmae_params *p_params);
415 
416 /**
417  * @brief ecore_chain_alloc - Allocate and initialize a chain
418  *
419  * @param p_hwfn
420  * @param intended_use
421  * @param mode
422  * @param num_elems
423  * @param elem_size
424  * @param p_chain
425  *
426  * @return enum _ecore_status_t
427  */
428 enum _ecore_status_t
429 ecore_chain_alloc(struct ecore_dev *p_dev,
430 		  enum ecore_chain_use_mode intended_use,
431 		  enum ecore_chain_mode mode,
432 		  enum ecore_chain_cnt_type cnt_type,
433 		  u32 num_elems,
434 		  osal_size_t elem_size,
435 		  struct ecore_chain *p_chain,
436 		  struct ecore_chain_ext_pbl *ext_pbl);
437 
438 /**
439  * @brief ecore_chain_free - Free chain DMA memory
440  *
441  * @param p_hwfn
442  * @param p_chain
443  */
444 void ecore_chain_free(struct ecore_dev *p_dev,
445 		      struct ecore_chain *p_chain);
446 
447 /**
448  * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID
449  *
450  *  @param p_hwfn
451  *  @param src_id - relative to p_hwfn
452  *  @param dst_id - absolute per engine
453  *
454  *  @return enum _ecore_status_t
455  */
456 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
457 				       u16 src_id,
458 				       u16 *dst_id);
459 
460 /**
461  * @@brief ecore_fw_vport - Get absolute vport ID
462  *
463  *  @param p_hwfn
464  *  @param src_id - relative to p_hwfn
465  *  @param dst_id - absolute per engine
466  *
467  *  @return enum _ecore_status_t
468  */
469 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
470 				    u8 src_id,
471 				    u8 *dst_id);
472 
473 /**
474  * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID
475  *
476  *  @param p_hwfn
477  *  @param src_id - relative to p_hwfn
478  *  @param dst_id - absolute per engine
479  *
480  *  @return enum _ecore_status_t
481  */
482 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
483 				      u8 src_id,
484 				      u8 *dst_id);
485 
486 /**
487  * @brief ecore_llh_add_mac_filter - configures a MAC filter in llh
488  *
489  * @param p_hwfn
490  * @param p_ptt
491  * @param p_filter - MAC to add
492  */
493 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
494 					  struct ecore_ptt *p_ptt,
495 					  u8 *p_filter);
496 
497 /**
498  * @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh
499  *
500  * @param p_hwfn
501  * @param p_ptt
502  * @param p_filter - MAC to remove
503  */
504 void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
505 			     struct ecore_ptt *p_ptt,
506 			     u8 *p_filter);
507 
508 enum ecore_llh_port_filter_type_t {
509 	ECORE_LLH_FILTER_ETHERTYPE,
510 	ECORE_LLH_FILTER_TCP_SRC_PORT,
511 	ECORE_LLH_FILTER_TCP_DEST_PORT,
512 	ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT,
513 	ECORE_LLH_FILTER_UDP_SRC_PORT,
514 	ECORE_LLH_FILTER_UDP_DEST_PORT,
515 	ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT
516 };
517 
518 /**
519  * @brief ecore_llh_add_protocol_filter - configures a protocol filter in llh
520  *
521  * @param p_hwfn
522  * @param p_ptt
523  * @param source_port_or_eth_type - source port or ethertype to add
524  * @param dest_port - destination port to add
525  * @param type - type of filters and comparing
526  */
527 enum _ecore_status_t
528 ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
529 			      struct ecore_ptt *p_ptt,
530 			      u16 source_port_or_eth_type,
531 			      u16 dest_port,
532 			      enum ecore_llh_port_filter_type_t type);
533 
534 /**
535  * @brief ecore_llh_remove_protocol_filter - remove a protocol filter in llh
536  *
537  * @param p_hwfn
538  * @param p_ptt
539  * @param source_port_or_eth_type - source port or ethertype to add
540  * @param dest_port - destination port to add
541  * @param type - type of filters and comparing
542  */
543 void
544 ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
545 				 struct ecore_ptt *p_ptt,
546 				 u16 source_port_or_eth_type,
547 				 u16 dest_port,
548 				 enum ecore_llh_port_filter_type_t type);
549 
550 /**
551  * @brief ecore_llh_clear_all_filters - removes all MAC filters from llh
552  *
553  * @param p_hwfn
554  * @param p_ptt
555  */
556 void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
557 			     struct ecore_ptt *p_ptt);
558 
559 /**
560  * @brief ecore_llh_set_function_as_default - set function as default per port
561  *
562  * @param p_hwfn
563  * @param p_ptt
564  */
565 enum _ecore_status_t
566 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
567 				  struct ecore_ptt *p_ptt);
568 
569 /**
570  *@brief Cleanup of previous driver remains prior to load
571  *
572  * @param p_hwfn
573  * @param p_ptt
574  * @param id - For PF, engine-relative. For VF, PF-relative.
575  * @param is_vf - true iff cleanup is made for a VF.
576  *
577  * @return enum _ecore_status_t
578  */
579 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn	*p_hwfn,
580 					 struct ecore_ptt	*p_ptt,
581 					 u16			id,
582 					 bool			is_vf);
583 /**
584  * @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
585  *    Tx queue. The fact that we can configure coalescing to up to 511, but on
586  *    varying accuracy [the bigger the value the less accurate] up to a mistake
587  *    of 3usec for the highest values.
588  *    While the API allows setting coalescing per-qid, all queues sharing a SB
589  *    should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
590  *    otherwise configuration would break.
591  *
592  * @param p_hwfn
593  * @param rx_coal - Rx Coalesce value in micro seconds.
594  * @param tx_coal - TX Coalesce value in micro seconds.
595  * @param p_handle
596  *
597  * @return enum _ecore_status_t
598  **/
599 enum _ecore_status_t
600 ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
601 			 u16 tx_coal, void *p_handle);
602 
603 /**
604  * @brief ecore_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
605  *
606  * @param p_hwfn
607  * @param p_ptt
608  * @param b_enable - true/false
609  *
610  * @return enum _ecore_status_t
611  */
612 enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
613 						  struct ecore_ptt *p_ptt,
614 						  bool b_enable);
615 #endif
616