1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
3 * All rights reserved.
4 * www.cavium.com
5 */
6
7 #ifndef __ECORE_DEV_API_H__
8 #define __ECORE_DEV_API_H__
9
10 #include "ecore_status.h"
11 #include "ecore_chain.h"
12 #include "ecore_int_api.h"
13
14 /**
15 * @brief ecore_init_dp - initialize the debug level
16 *
17 * @param p_dev
18 * @param dp_module
19 * @param dp_level
20 * @param dp_ctx
21 */
22 void ecore_init_dp(struct ecore_dev *p_dev,
23 u32 dp_module,
24 u8 dp_level,
25 void *dp_ctx);
26
27 /**
28 * @brief ecore_init_struct - initialize the device structure to
29 * its defaults
30 *
31 * @param p_dev
32 */
33 enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev);
34
35 /**
36 * @brief ecore_resc_free -
37 *
38 * @param p_dev
39 */
40 void ecore_resc_free(struct ecore_dev *p_dev);
41
42 /**
43 * @brief ecore_resc_alloc -
44 *
45 * @param p_dev
46 *
47 * @return enum _ecore_status_t
48 */
49 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
50
51 /**
52 * @brief ecore_resc_setup -
53 *
54 * @param p_dev
55 */
56 void ecore_resc_setup(struct ecore_dev *p_dev);
57
58 enum ecore_mfw_timeout_fallback {
59 ECORE_TO_FALLBACK_TO_NONE,
60 ECORE_TO_FALLBACK_TO_DEFAULT,
61 ECORE_TO_FALLBACK_FAIL_LOAD,
62 };
63
64 enum ecore_override_force_load {
65 ECORE_OVERRIDE_FORCE_LOAD_NONE,
66 ECORE_OVERRIDE_FORCE_LOAD_ALWAYS,
67 ECORE_OVERRIDE_FORCE_LOAD_NEVER,
68 };
69
70 struct ecore_drv_load_params {
71 /* Indicates whether the driver is running over a crash kernel.
72 * As part of the load request, this will be used for providing the
73 * driver role to the MFW.
74 * In case of a crash kernel over PDA - this should be set to false.
75 */
76 bool is_crash_kernel;
77
78 /* The timeout value that the MFW should use when locking the engine for
79 * the driver load process.
80 * A value of '0' means the default value, and '255' means no timeout.
81 */
82 u8 mfw_timeout_val;
83 #define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0
84 #define ECORE_LOAD_REQ_LOCK_TO_NONE 255
85
86 /* Action to take in case the MFW doesn't support timeout values other
87 * than default and none.
88 */
89 enum ecore_mfw_timeout_fallback mfw_timeout_fallback;
90
91 /* Avoid engine reset when first PF loads on it */
92 bool avoid_eng_reset;
93
94 /* Allow overriding the default force load behavior */
95 enum ecore_override_force_load override_force_load;
96 };
97
98 struct ecore_hw_init_params {
99 /* Tunneling parameters */
100 struct ecore_tunnel_info *p_tunn;
101
102 bool b_hw_start;
103
104 /* Interrupt mode [msix, inta, etc.] to use */
105 enum ecore_int_mode int_mode;
106
107 /* NPAR tx switching to be used for vports configured for tx-switching
108 */
109 bool allow_npar_tx_switch;
110
111 /* Binary fw data pointer in binary fw file */
112 const u8 *bin_fw_data;
113
114 /* Driver load parameters */
115 struct ecore_drv_load_params *p_drv_load_params;
116
117 /* Avoid engine affinity for RoCE/storage in case of CMT mode */
118 bool avoid_eng_affin;
119
120 /* SPQ block timeout in msec */
121 u32 spq_timeout_ms;
122 };
123
124 /**
125 * @brief ecore_hw_init -
126 *
127 * @param p_dev
128 * @param p_params
129 *
130 * @return enum _ecore_status_t
131 */
132 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
133 struct ecore_hw_init_params *p_params);
134
135 /**
136 * @brief ecore_hw_timers_stop_all -
137 *
138 * @param p_dev
139 *
140 * @return void
141 */
142 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev);
143
144 /**
145 * @brief ecore_hw_stop -
146 *
147 * @param p_dev
148 *
149 * @return enum _ecore_status_t
150 */
151 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
152
153 /**
154 * @brief ecore_hw_stop_fastpath -should be called incase
155 * slowpath is still required for the device,
156 * but fastpath is not.
157 *
158 * @param p_dev
159 *
160 * @return enum _ecore_status_t
161 */
162 enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
163
164 #ifndef LINUX_REMOVE
165 /**
166 * @brief ecore_prepare_hibernate -should be called when
167 * the system is going into the hibernate state
168 *
169 * @param p_dev
170 *
171 */
172 void ecore_prepare_hibernate(struct ecore_dev *p_dev);
173
174 enum ecore_db_rec_width {
175 DB_REC_WIDTH_32B,
176 DB_REC_WIDTH_64B,
177 };
178
179 enum ecore_db_rec_space {
180 DB_REC_KERNEL,
181 DB_REC_USER,
182 };
183
184 /**
185 * @brief db_recovery_add - add doorbell information to the doorbell
186 * recovery mechanism.
187 *
188 * @param p_dev
189 * @param db_addr - doorbell address
190 * @param db_data - address of where db_data is stored
191 * @param db_width - doorbell is 32b pr 64b
192 * @param db_space - doorbell recovery addresses are user or kernel space
193 */
194 enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
195 void OSAL_IOMEM *db_addr,
196 void *db_data,
197 enum ecore_db_rec_width db_width,
198 enum ecore_db_rec_space db_space);
199
200 /**
201 * @brief db_recovery_del - remove doorbell information from the doorbell
202 * recovery mechanism. db_data serves as key (db_addr is not unique).
203 *
204 * @param cdev
205 * @param db_addr - doorbell address
206 * @param db_data - address where db_data is stored. Serves as key for the
207 * entry to delete.
208 */
209 enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
210 void OSAL_IOMEM *db_addr,
211 void *db_data);
212
ecore_is_mf_ufp(struct ecore_hwfn * p_hwfn)213 static OSAL_INLINE bool ecore_is_mf_ufp(struct ecore_hwfn *p_hwfn)
214 {
215 return !!OSAL_GET_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits);
216 }
217
218 #endif
219
220 /**
221 * @brief ecore_hw_start_fastpath -restart fastpath traffic,
222 * only if hw_stop_fastpath was called
223
224 * @param p_hwfn
225 *
226 * @return enum _ecore_status_t
227 */
228 enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
229
230 enum ecore_hw_prepare_result {
231 ECORE_HW_PREPARE_SUCCESS,
232
233 /* FAILED results indicate probe has failed & cleaned up */
234 ECORE_HW_PREPARE_FAILED_ENG2,
235 ECORE_HW_PREPARE_FAILED_ME,
236 ECORE_HW_PREPARE_FAILED_MEM,
237 ECORE_HW_PREPARE_FAILED_DEV,
238 ECORE_HW_PREPARE_FAILED_NVM,
239
240 /* BAD results indicate probe is passed even though some wrongness
241 * has occurred; Trying to actually use [I.e., hw_init()] might have
242 * dire reprecautions.
243 */
244 ECORE_HW_PREPARE_BAD_IOV,
245 ECORE_HW_PREPARE_BAD_MCP,
246 ECORE_HW_PREPARE_BAD_IGU,
247 };
248
249 struct ecore_hw_prepare_params {
250 /* Personality to initialize */
251 int personality;
252
253 /* Force the driver's default resource allocation */
254 bool drv_resc_alloc;
255
256 /* Check the reg_fifo after any register access */
257 bool chk_reg_fifo;
258
259 /* Request the MFW to initiate PF FLR */
260 bool initiate_pf_flr;
261
262 /* The OS Epoch time in seconds */
263 u32 epoch;
264
265 /* Allow the MFW to collect a crash dump */
266 bool allow_mdump;
267
268 /* Allow prepare to pass even if some initializations are failing.
269 * If set, the `p_prepare_res' field would be set with the return,
270 * and might allow probe to pass even if there are certain issues.
271 */
272 bool b_relaxed_probe;
273 enum ecore_hw_prepare_result p_relaxed_res;
274
275 /* Enable/disable request by ecore client for pacing */
276 bool b_en_pacing;
277
278 /* Indicates whether this PF serves a storage target */
279 bool b_is_target;
280
281 /* retry count for VF acquire on channel timeout */
282 u8 acquire_retry_cnt;
283 };
284
285 /**
286 * @brief ecore_hw_prepare -
287 *
288 * @param p_dev
289 * @param p_params
290 *
291 * @return enum _ecore_status_t
292 */
293 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
294 struct ecore_hw_prepare_params *p_params);
295
296 /**
297 * @brief ecore_hw_remove -
298 *
299 * @param p_dev
300 */
301 void ecore_hw_remove(struct ecore_dev *p_dev);
302
303 /**
304 * @brief ecore_ptt_acquire - Allocate a PTT window
305 *
306 * Should be called at the entry point to the driver (at the beginning of an
307 * exported function)
308 *
309 * @param p_hwfn
310 *
311 * @return struct ecore_ptt
312 */
313 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
314
315 /**
316 * @brief ecore_ptt_release - Release PTT Window
317 *
318 * Should be called at the end of a flow - at the end of the function that
319 * acquired the PTT.
320 *
321 *
322 * @param p_hwfn
323 * @param p_ptt
324 */
325 void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
326 struct ecore_ptt *p_ptt);
327
328 struct ecore_eth_stats_common {
329 u64 no_buff_discards;
330 u64 packet_too_big_discard;
331 u64 ttl0_discard;
332 u64 rx_ucast_bytes;
333 u64 rx_mcast_bytes;
334 u64 rx_bcast_bytes;
335 u64 rx_ucast_pkts;
336 u64 rx_mcast_pkts;
337 u64 rx_bcast_pkts;
338 u64 mftag_filter_discards;
339 u64 mac_filter_discards;
340 u64 gft_filter_drop;
341 u64 tx_ucast_bytes;
342 u64 tx_mcast_bytes;
343 u64 tx_bcast_bytes;
344 u64 tx_ucast_pkts;
345 u64 tx_mcast_pkts;
346 u64 tx_bcast_pkts;
347 u64 tx_err_drop_pkts;
348 u64 tpa_coalesced_pkts;
349 u64 tpa_coalesced_events;
350 u64 tpa_aborts_num;
351 u64 tpa_not_coalesced_pkts;
352 u64 tpa_coalesced_bytes;
353
354 /* port */
355 u64 rx_64_byte_packets;
356 u64 rx_65_to_127_byte_packets;
357 u64 rx_128_to_255_byte_packets;
358 u64 rx_256_to_511_byte_packets;
359 u64 rx_512_to_1023_byte_packets;
360 u64 rx_1024_to_1518_byte_packets;
361 u64 rx_crc_errors;
362 u64 rx_mac_crtl_frames;
363 u64 rx_pause_frames;
364 u64 rx_pfc_frames;
365 u64 rx_align_errors;
366 u64 rx_carrier_errors;
367 u64 rx_oversize_packets;
368 u64 rx_jabbers;
369 u64 rx_undersize_packets;
370 u64 rx_fragments;
371 u64 tx_64_byte_packets;
372 u64 tx_65_to_127_byte_packets;
373 u64 tx_128_to_255_byte_packets;
374 u64 tx_256_to_511_byte_packets;
375 u64 tx_512_to_1023_byte_packets;
376 u64 tx_1024_to_1518_byte_packets;
377 u64 tx_pause_frames;
378 u64 tx_pfc_frames;
379 u64 brb_truncates;
380 u64 brb_discards;
381 u64 rx_mac_bytes;
382 u64 rx_mac_uc_packets;
383 u64 rx_mac_mc_packets;
384 u64 rx_mac_bc_packets;
385 u64 rx_mac_frames_ok;
386 u64 tx_mac_bytes;
387 u64 tx_mac_uc_packets;
388 u64 tx_mac_mc_packets;
389 u64 tx_mac_bc_packets;
390 u64 tx_mac_ctrl_frames;
391 u64 link_change_count;
392 };
393
394 struct ecore_eth_stats_bb {
395 u64 rx_1519_to_1522_byte_packets;
396 u64 rx_1519_to_2047_byte_packets;
397 u64 rx_2048_to_4095_byte_packets;
398 u64 rx_4096_to_9216_byte_packets;
399 u64 rx_9217_to_16383_byte_packets;
400 u64 tx_1519_to_2047_byte_packets;
401 u64 tx_2048_to_4095_byte_packets;
402 u64 tx_4096_to_9216_byte_packets;
403 u64 tx_9217_to_16383_byte_packets;
404 u64 tx_lpi_entry_count;
405 u64 tx_total_collisions;
406 };
407
408 struct ecore_eth_stats_ah {
409 u64 rx_1519_to_max_byte_packets;
410 u64 tx_1519_to_max_byte_packets;
411 };
412
413 struct ecore_eth_stats {
414 struct ecore_eth_stats_common common;
415 union {
416 struct ecore_eth_stats_bb bb;
417 struct ecore_eth_stats_ah ah;
418 };
419 };
420
421 /**
422 * @brief ecore_chain_alloc - Allocate and initialize a chain
423 *
424 * @param p_hwfn
425 * @param intended_use
426 * @param mode
427 * @param num_elems
428 * @param elem_size
429 * @param p_chain
430 *
431 * @return enum _ecore_status_t
432 */
433 enum _ecore_status_t
434 ecore_chain_alloc(struct ecore_dev *p_dev,
435 enum ecore_chain_use_mode intended_use,
436 enum ecore_chain_mode mode,
437 enum ecore_chain_cnt_type cnt_type,
438 u32 num_elems,
439 osal_size_t elem_size,
440 struct ecore_chain *p_chain,
441 struct ecore_chain_ext_pbl *ext_pbl);
442
443 /**
444 * @brief ecore_chain_free - Free chain DMA memory
445 *
446 * @param p_hwfn
447 * @param p_chain
448 */
449 void ecore_chain_free(struct ecore_dev *p_dev,
450 struct ecore_chain *p_chain);
451
452 /**
453 * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID
454 *
455 * @param p_hwfn
456 * @param src_id - relative to p_hwfn
457 * @param dst_id - absolute per engine
458 *
459 * @return enum _ecore_status_t
460 */
461 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
462 u16 src_id,
463 u16 *dst_id);
464
465 /**
466 * @@brief ecore_fw_vport - Get absolute vport ID
467 *
468 * @param p_hwfn
469 * @param src_id - relative to p_hwfn
470 * @param dst_id - absolute per engine
471 *
472 * @return enum _ecore_status_t
473 */
474 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
475 u8 src_id,
476 u8 *dst_id);
477
478 /**
479 * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID
480 *
481 * @param p_hwfn
482 * @param src_id - relative to p_hwfn
483 * @param dst_id - absolute per engine
484 *
485 * @return enum _ecore_status_t
486 */
487 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
488 u8 src_id,
489 u8 *dst_id);
490
491 /**
492 * @brief ecore_llh_get_num_ppfid - Return the allocated number of LLH filter
493 * banks that are allocated to the PF.
494 *
495 * @param p_dev
496 *
497 * @return u8 - Number of LLH filter banks
498 */
499 u8 ecore_llh_get_num_ppfid(struct ecore_dev *p_dev);
500
501 enum ecore_eng {
502 ECORE_ENG0,
503 ECORE_ENG1,
504 ECORE_BOTH_ENG,
505 };
506
507 /**
508 * @brief ecore_llh_get_l2_affinity_hint - Return the hint for the L2 affinity
509 *
510 * @param p_dev
511 *
512 * @return enum ecore_eng - L2 affintiy hint
513 */
514 enum ecore_eng ecore_llh_get_l2_affinity_hint(struct ecore_dev *p_dev);
515
516 /**
517 * @brief ecore_llh_set_ppfid_affinity - Set the engine affinity for the given
518 * LLH filter bank.
519 *
520 * @param p_dev
521 * @param ppfid - relative within the allocated ppfids ('0' is the default one).
522 * @param eng
523 *
524 * @return enum _ecore_status_t
525 */
526 enum _ecore_status_t ecore_llh_set_ppfid_affinity(struct ecore_dev *p_dev,
527 u8 ppfid, enum ecore_eng eng);
528
529 /**
530 * @brief ecore_llh_set_roce_affinity - Set the RoCE engine affinity
531 *
532 * @param p_dev
533 * @param eng
534 *
535 * @return enum _ecore_status_t
536 */
537 enum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev,
538 enum ecore_eng eng);
539
540 /**
541 * @brief ecore_llh_add_mac_filter - Add a LLH MAC filter into the given filter
542 * bank.
543 *
544 * @param p_dev
545 * @param ppfid - relative within the allocated ppfids ('0' is the default one).
546 * @param mac_addr - MAC to add
547 *
548 * @return enum _ecore_status_t
549 */
550 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
551 u8 mac_addr[ETH_ALEN]);
552
553 /**
554 * @brief ecore_llh_remove_mac_filter - Remove a LLH MAC filter from the given
555 * filter bank.
556 *
557 * @param p_dev
558 * @param ppfid - relative within the allocated ppfids ('0' is the default one).
559 * @param mac_addr - MAC to remove
560 */
561 void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
562 u8 mac_addr[ETH_ALEN]);
563
564 enum ecore_llh_prot_filter_type_t {
565 ECORE_LLH_FILTER_ETHERTYPE,
566 ECORE_LLH_FILTER_TCP_SRC_PORT,
567 ECORE_LLH_FILTER_TCP_DEST_PORT,
568 ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT,
569 ECORE_LLH_FILTER_UDP_SRC_PORT,
570 ECORE_LLH_FILTER_UDP_DEST_PORT,
571 ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT
572 };
573
574 /**
575 * @brief ecore_llh_add_protocol_filter - Add a LLH protocol filter into the
576 * given filter bank.
577 *
578 * @param p_dev
579 * @param ppfid - relative within the allocated ppfids ('0' is the default one).
580 * @param type - type of filters and comparing
581 * @param source_port_or_eth_type - source port or ethertype to add
582 * @param dest_port - destination port to add
583 *
584 * @return enum _ecore_status_t
585 */
586 enum _ecore_status_t
587 ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
588 enum ecore_llh_prot_filter_type_t type,
589 u16 source_port_or_eth_type, u16 dest_port);
590
591 /**
592 * @brief ecore_llh_remove_protocol_filter - Remove a LLH protocol filter from
593 * the given filter bank.
594 *
595 * @param p_dev
596 * @param ppfid - relative within the allocated ppfids ('0' is the default one).
597 * @param type - type of filters and comparing
598 * @param source_port_or_eth_type - source port or ethertype to add
599 * @param dest_port - destination port to add
600 */
601 void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
602 enum ecore_llh_prot_filter_type_t type,
603 u16 source_port_or_eth_type,
604 u16 dest_port);
605
606 /**
607 * @brief ecore_llh_clear_ppfid_filters - Remove all LLH filters from the given
608 * filter bank.
609 *
610 * @param p_dev
611 * @param ppfid - relative within the allocated ppfids ('0' is the default one).
612 */
613 void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid);
614
615 /**
616 * @brief ecore_llh_clear_all_filters - Remove all LLH filters
617 *
618 * @param p_dev
619 */
620 void ecore_llh_clear_all_filters(struct ecore_dev *p_dev);
621
622 /**
623 * @brief ecore_llh_set_function_as_default - set function as default per port
624 *
625 * @param p_hwfn
626 * @param p_ptt
627 */
628 enum _ecore_status_t
629 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
630 struct ecore_ptt *p_ptt);
631
632 /**
633 *@brief Cleanup of previous driver remains prior to load
634 *
635 * @param p_hwfn
636 * @param p_ptt
637 * @param id - For PF, engine-relative. For VF, PF-relative.
638 * @param is_vf - true iff cleanup is made for a VF.
639 *
640 * @return enum _ecore_status_t
641 */
642 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
643 struct ecore_ptt *p_ptt,
644 u16 id,
645 bool is_vf);
646
647 /**
648 * @brief ecore_get_queue_coalesce - Retrieve coalesce value for a given queue.
649 *
650 * @param p_hwfn
651 * @param p_coal - store coalesce value read from the hardware.
652 * @param p_handle
653 *
654 * @return enum _ecore_status_t
655 **/
656 enum _ecore_status_t
657 ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *coal,
658 void *handle);
659
660 /**
661 * @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
662 * Tx queue. The fact that we can configure coalescing to up to 511, but on
663 * varying accuracy [the bigger the value the less accurate] up to a mistake
664 * of 3usec for the highest values.
665 * While the API allows setting coalescing per-qid, all queues sharing a SB
666 * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
667 * otherwise configuration would break.
668 *
669 * @param p_hwfn
670 * @param rx_coal - Rx Coalesce value in micro seconds.
671 * @param tx_coal - TX Coalesce value in micro seconds.
672 * @param p_handle
673 *
674 * @return enum _ecore_status_t
675 **/
676 enum _ecore_status_t
677 ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
678 u16 tx_coal, void *p_handle);
679
680 /**
681 * @brief ecore_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
682 *
683 * @param p_hwfn
684 * @param p_ptt
685 * @param b_enable - true/false
686 *
687 * @return enum _ecore_status_t
688 */
689 enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
690 struct ecore_ptt *p_ptt,
691 bool b_enable);
692
693 /**
694 * @brief Whether FIP discovery fallback special mode is enabled or not.
695 *
696 * @param cdev
697 *
698 * @return true if device is in FIP special mode, false otherwise.
699 */
700 bool ecore_is_mf_fip_special(struct ecore_dev *p_dev);
701 #endif
702