1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 #ifndef __ECORE_DEV_API_H__ 8 #define __ECORE_DEV_API_H__ 9 10 #include "ecore_status.h" 11 #include "ecore_chain.h" 12 #include "ecore_int_api.h" 13 14 /** 15 * @brief ecore_init_dp - initialize the debug level 16 * 17 * @param p_dev 18 * @param dp_module 19 * @param dp_level 20 * @param dp_ctx 21 */ 22 void ecore_init_dp(struct ecore_dev *p_dev, 23 u32 dp_module, 24 u8 dp_level, 25 void *dp_ctx); 26 27 /** 28 * @brief ecore_init_struct - initialize the device structure to 29 * its defaults 30 * 31 * @param p_dev 32 */ 33 enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev); 34 35 /** 36 * @brief ecore_resc_free - 37 * 38 * @param p_dev 39 */ 40 void ecore_resc_free(struct ecore_dev *p_dev); 41 42 /** 43 * @brief ecore_resc_alloc - 44 * 45 * @param p_dev 46 * 47 * @return enum _ecore_status_t 48 */ 49 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev); 50 51 /** 52 * @brief ecore_resc_setup - 53 * 54 * @param p_dev 55 */ 56 void ecore_resc_setup(struct ecore_dev *p_dev); 57 58 enum ecore_mfw_timeout_fallback { 59 ECORE_TO_FALLBACK_TO_NONE, 60 ECORE_TO_FALLBACK_TO_DEFAULT, 61 ECORE_TO_FALLBACK_FAIL_LOAD, 62 }; 63 64 enum ecore_override_force_load { 65 ECORE_OVERRIDE_FORCE_LOAD_NONE, 66 ECORE_OVERRIDE_FORCE_LOAD_ALWAYS, 67 ECORE_OVERRIDE_FORCE_LOAD_NEVER, 68 }; 69 70 struct ecore_drv_load_params { 71 /* Indicates whether the driver is running over a crash kernel. 72 * As part of the load request, this will be used for providing the 73 * driver role to the MFW. 74 * In case of a crash kernel over PDA - this should be set to false. 75 */ 76 bool is_crash_kernel; 77 78 /* The timeout value that the MFW should use when locking the engine for 79 * the driver load process. 80 * A value of '0' means the default value, and '255' means no timeout. 81 */ 82 u8 mfw_timeout_val; 83 #define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0 84 #define ECORE_LOAD_REQ_LOCK_TO_NONE 255 85 86 /* Action to take in case the MFW doesn't support timeout values other 87 * than default and none. 88 */ 89 enum ecore_mfw_timeout_fallback mfw_timeout_fallback; 90 91 /* Avoid engine reset when first PF loads on it */ 92 bool avoid_eng_reset; 93 94 /* Allow overriding the default force load behavior */ 95 enum ecore_override_force_load override_force_load; 96 }; 97 98 struct ecore_hw_init_params { 99 /* Tunneling parameters */ 100 struct ecore_tunnel_info *p_tunn; 101 102 bool b_hw_start; 103 104 /* Interrupt mode [msix, inta, etc.] to use */ 105 enum ecore_int_mode int_mode; 106 107 /* NPAR tx switching to be used for vports configured for tx-switching 108 */ 109 bool allow_npar_tx_switch; 110 111 /* Binary fw data pointer in binary fw file */ 112 const u8 *bin_fw_data; 113 114 /* Driver load parameters */ 115 struct ecore_drv_load_params *p_drv_load_params; 116 117 /* Avoid engine affinity for RoCE/storage in case of CMT mode */ 118 bool avoid_eng_affin; 119 120 /* SPQ block timeout in msec */ 121 u32 spq_timeout_ms; 122 }; 123 124 /** 125 * @brief ecore_hw_init - 126 * 127 * @param p_dev 128 * @param p_params 129 * 130 * @return enum _ecore_status_t 131 */ 132 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, 133 struct ecore_hw_init_params *p_params); 134 135 /** 136 * @brief ecore_hw_timers_stop_all - 137 * 138 * @param p_dev 139 * 140 * @return void 141 */ 142 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev); 143 144 /** 145 * @brief ecore_hw_stop - 146 * 147 * @param p_dev 148 * 149 * @return enum _ecore_status_t 150 */ 151 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev); 152 153 /** 154 * @brief ecore_hw_stop_fastpath -should be called incase 155 * slowpath is still required for the device, 156 * but fastpath is not. 157 * 158 * @param p_dev 159 * 160 * @return enum _ecore_status_t 161 */ 162 enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev); 163 164 #ifndef LINUX_REMOVE 165 /** 166 * @brief ecore_prepare_hibernate -should be called when 167 * the system is going into the hibernate state 168 * 169 * @param p_dev 170 * 171 */ 172 void ecore_prepare_hibernate(struct ecore_dev *p_dev); 173 174 enum ecore_db_rec_width { 175 DB_REC_WIDTH_32B, 176 DB_REC_WIDTH_64B, 177 }; 178 179 enum ecore_db_rec_space { 180 DB_REC_KERNEL, 181 DB_REC_USER, 182 }; 183 184 /** 185 * @brief db_recovery_add - add doorbell information to the doorbell 186 * recovery mechanism. 187 * 188 * @param p_dev 189 * @param db_addr - doorbell address 190 * @param db_data - address of where db_data is stored 191 * @param db_width - doorbell is 32b pr 64b 192 * @param db_space - doorbell recovery addresses are user or kernel space 193 */ 194 enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev, 195 void OSAL_IOMEM *db_addr, 196 void *db_data, 197 enum ecore_db_rec_width db_width, 198 enum ecore_db_rec_space db_space); 199 200 /** 201 * @brief db_recovery_del - remove doorbell information from the doorbell 202 * recovery mechanism. db_data serves as key (db_addr is not unique). 203 * 204 * @param cdev 205 * @param db_addr - doorbell address 206 * @param db_data - address where db_data is stored. Serves as key for the 207 * entry to delete. 208 */ 209 enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev, 210 void OSAL_IOMEM *db_addr, 211 void *db_data); 212 213 static OSAL_INLINE bool ecore_is_mf_ufp(struct ecore_hwfn *p_hwfn) 214 { 215 return !!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits); 216 } 217 218 #endif 219 220 /** 221 * @brief ecore_hw_start_fastpath -restart fastpath traffic, 222 * only if hw_stop_fastpath was called 223 224 * @param p_hwfn 225 * 226 * @return enum _ecore_status_t 227 */ 228 enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn); 229 230 enum ecore_hw_prepare_result { 231 ECORE_HW_PREPARE_SUCCESS, 232 233 /* FAILED results indicate probe has failed & cleaned up */ 234 ECORE_HW_PREPARE_FAILED_ENG2, 235 ECORE_HW_PREPARE_FAILED_ME, 236 ECORE_HW_PREPARE_FAILED_MEM, 237 ECORE_HW_PREPARE_FAILED_DEV, 238 ECORE_HW_PREPARE_FAILED_NVM, 239 240 /* BAD results indicate probe is passed even though some wrongness 241 * has occurred; Trying to actually use [I.e., hw_init()] might have 242 * dire reprecautions. 243 */ 244 ECORE_HW_PREPARE_BAD_IOV, 245 ECORE_HW_PREPARE_BAD_MCP, 246 ECORE_HW_PREPARE_BAD_IGU, 247 }; 248 249 struct ecore_hw_prepare_params { 250 /* Personality to initialize */ 251 int personality; 252 253 /* Force the driver's default resource allocation */ 254 bool drv_resc_alloc; 255 256 /* Check the reg_fifo after any register access */ 257 bool chk_reg_fifo; 258 259 /* Request the MFW to initiate PF FLR */ 260 bool initiate_pf_flr; 261 262 /* The OS Epoch time in seconds */ 263 u32 epoch; 264 265 /* Allow the MFW to collect a crash dump */ 266 bool allow_mdump; 267 268 /* Allow prepare to pass even if some initializations are failing. 269 * If set, the `p_prepare_res' field would be set with the return, 270 * and might allow probe to pass even if there are certain issues. 271 */ 272 bool b_relaxed_probe; 273 enum ecore_hw_prepare_result p_relaxed_res; 274 275 /* Enable/disable request by ecore client for pacing */ 276 bool b_en_pacing; 277 278 /* Indicates whether this PF serves a storage target */ 279 bool b_is_target; 280 }; 281 282 /** 283 * @brief ecore_hw_prepare - 284 * 285 * @param p_dev 286 * @param p_params 287 * 288 * @return enum _ecore_status_t 289 */ 290 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, 291 struct ecore_hw_prepare_params *p_params); 292 293 /** 294 * @brief ecore_hw_remove - 295 * 296 * @param p_dev 297 */ 298 void ecore_hw_remove(struct ecore_dev *p_dev); 299 300 /** 301 * @brief ecore_ptt_acquire - Allocate a PTT window 302 * 303 * Should be called at the entry point to the driver (at the beginning of an 304 * exported function) 305 * 306 * @param p_hwfn 307 * 308 * @return struct ecore_ptt 309 */ 310 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn); 311 312 /** 313 * @brief ecore_ptt_release - Release PTT Window 314 * 315 * Should be called at the end of a flow - at the end of the function that 316 * acquired the PTT. 317 * 318 * 319 * @param p_hwfn 320 * @param p_ptt 321 */ 322 void ecore_ptt_release(struct ecore_hwfn *p_hwfn, 323 struct ecore_ptt *p_ptt); 324 325 struct ecore_eth_stats_common { 326 u64 no_buff_discards; 327 u64 packet_too_big_discard; 328 u64 ttl0_discard; 329 u64 rx_ucast_bytes; 330 u64 rx_mcast_bytes; 331 u64 rx_bcast_bytes; 332 u64 rx_ucast_pkts; 333 u64 rx_mcast_pkts; 334 u64 rx_bcast_pkts; 335 u64 mftag_filter_discards; 336 u64 mac_filter_discards; 337 u64 gft_filter_drop; 338 u64 tx_ucast_bytes; 339 u64 tx_mcast_bytes; 340 u64 tx_bcast_bytes; 341 u64 tx_ucast_pkts; 342 u64 tx_mcast_pkts; 343 u64 tx_bcast_pkts; 344 u64 tx_err_drop_pkts; 345 u64 tpa_coalesced_pkts; 346 u64 tpa_coalesced_events; 347 u64 tpa_aborts_num; 348 u64 tpa_not_coalesced_pkts; 349 u64 tpa_coalesced_bytes; 350 351 /* port */ 352 u64 rx_64_byte_packets; 353 u64 rx_65_to_127_byte_packets; 354 u64 rx_128_to_255_byte_packets; 355 u64 rx_256_to_511_byte_packets; 356 u64 rx_512_to_1023_byte_packets; 357 u64 rx_1024_to_1518_byte_packets; 358 u64 rx_crc_errors; 359 u64 rx_mac_crtl_frames; 360 u64 rx_pause_frames; 361 u64 rx_pfc_frames; 362 u64 rx_align_errors; 363 u64 rx_carrier_errors; 364 u64 rx_oversize_packets; 365 u64 rx_jabbers; 366 u64 rx_undersize_packets; 367 u64 rx_fragments; 368 u64 tx_64_byte_packets; 369 u64 tx_65_to_127_byte_packets; 370 u64 tx_128_to_255_byte_packets; 371 u64 tx_256_to_511_byte_packets; 372 u64 tx_512_to_1023_byte_packets; 373 u64 tx_1024_to_1518_byte_packets; 374 u64 tx_pause_frames; 375 u64 tx_pfc_frames; 376 u64 brb_truncates; 377 u64 brb_discards; 378 u64 rx_mac_bytes; 379 u64 rx_mac_uc_packets; 380 u64 rx_mac_mc_packets; 381 u64 rx_mac_bc_packets; 382 u64 rx_mac_frames_ok; 383 u64 tx_mac_bytes; 384 u64 tx_mac_uc_packets; 385 u64 tx_mac_mc_packets; 386 u64 tx_mac_bc_packets; 387 u64 tx_mac_ctrl_frames; 388 u64 link_change_count; 389 }; 390 391 struct ecore_eth_stats_bb { 392 u64 rx_1519_to_1522_byte_packets; 393 u64 rx_1519_to_2047_byte_packets; 394 u64 rx_2048_to_4095_byte_packets; 395 u64 rx_4096_to_9216_byte_packets; 396 u64 rx_9217_to_16383_byte_packets; 397 u64 tx_1519_to_2047_byte_packets; 398 u64 tx_2048_to_4095_byte_packets; 399 u64 tx_4096_to_9216_byte_packets; 400 u64 tx_9217_to_16383_byte_packets; 401 u64 tx_lpi_entry_count; 402 u64 tx_total_collisions; 403 }; 404 405 struct ecore_eth_stats_ah { 406 u64 rx_1519_to_max_byte_packets; 407 u64 tx_1519_to_max_byte_packets; 408 }; 409 410 struct ecore_eth_stats { 411 struct ecore_eth_stats_common common; 412 union { 413 struct ecore_eth_stats_bb bb; 414 struct ecore_eth_stats_ah ah; 415 }; 416 }; 417 418 enum ecore_dmae_address_type_t { 419 ECORE_DMAE_ADDRESS_HOST_VIRT, 420 ECORE_DMAE_ADDRESS_HOST_PHYS, 421 ECORE_DMAE_ADDRESS_GRC 422 }; 423 424 /* value of flags If ECORE_DMAE_FLAG_RW_REPL_SRC flag is set and the 425 * source is a block of length DMAE_MAX_RW_SIZE and the 426 * destination is larger, the source block will be duplicated as 427 * many times as required to fill the destination block. This is 428 * used mostly to write a zeroed buffer to destination address 429 * using DMA 430 */ 431 #define ECORE_DMAE_FLAG_RW_REPL_SRC 0x00000001 432 #define ECORE_DMAE_FLAG_VF_SRC 0x00000002 433 #define ECORE_DMAE_FLAG_VF_DST 0x00000004 434 #define ECORE_DMAE_FLAG_COMPLETION_DST 0x00000008 435 #define ECORE_DMAE_FLAG_PORT 0x00000010 436 #define ECORE_DMAE_FLAG_PF_SRC 0x00000020 437 #define ECORE_DMAE_FLAG_PF_DST 0x00000040 438 439 struct ecore_dmae_params { 440 u32 flags; /* consists of ECORE_DMAE_FLAG_* values */ 441 u8 src_vfid; 442 u8 dst_vfid; 443 u8 port_id; 444 u8 src_pfid; 445 u8 dst_pfid; 446 }; 447 448 /** 449 * @brief ecore_dmae_host2grc - copy data from source addr to 450 * dmae registers using the given ptt 451 * 452 * @param p_hwfn 453 * @param p_ptt 454 * @param source_addr 455 * @param grc_addr (dmae_data_offset) 456 * @param size_in_dwords 457 * @param p_params (default parameters will be used in case of OSAL_NULL) 458 * 459 * @return enum _ecore_status_t 460 */ 461 enum _ecore_status_t 462 ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn, 463 struct ecore_ptt *p_ptt, 464 u64 source_addr, 465 u32 grc_addr, 466 u32 size_in_dwords, 467 struct ecore_dmae_params *p_params); 468 469 /** 470 * @brief ecore_dmae_grc2host - Read data from dmae data offset 471 * to source address using the given ptt 472 * 473 * @param p_ptt 474 * @param grc_addr (dmae_data_offset) 475 * @param dest_addr 476 * @param size_in_dwords 477 * @param p_params (default parameters will be used in case of OSAL_NULL) 478 * 479 * @return enum _ecore_status_t 480 */ 481 enum _ecore_status_t 482 ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn, 483 struct ecore_ptt *p_ptt, 484 u32 grc_addr, 485 dma_addr_t dest_addr, 486 u32 size_in_dwords, 487 struct ecore_dmae_params *p_params); 488 489 /** 490 * @brief ecore_dmae_host2host - copy data from to source address 491 * to a destination address (for SRIOV) using the given ptt 492 * 493 * @param p_hwfn 494 * @param p_ptt 495 * @param source_addr 496 * @param dest_addr 497 * @param size_in_dwords 498 * @param p_params (default parameters will be used in case of OSAL_NULL) 499 * 500 * @return enum _ecore_status_t 501 */ 502 enum _ecore_status_t 503 ecore_dmae_host2host(struct ecore_hwfn *p_hwfn, 504 struct ecore_ptt *p_ptt, 505 dma_addr_t source_addr, 506 dma_addr_t dest_addr, 507 u32 size_in_dwords, 508 struct ecore_dmae_params *p_params); 509 510 /** 511 * @brief ecore_chain_alloc - Allocate and initialize a chain 512 * 513 * @param p_hwfn 514 * @param intended_use 515 * @param mode 516 * @param num_elems 517 * @param elem_size 518 * @param p_chain 519 * 520 * @return enum _ecore_status_t 521 */ 522 enum _ecore_status_t 523 ecore_chain_alloc(struct ecore_dev *p_dev, 524 enum ecore_chain_use_mode intended_use, 525 enum ecore_chain_mode mode, 526 enum ecore_chain_cnt_type cnt_type, 527 u32 num_elems, 528 osal_size_t elem_size, 529 struct ecore_chain *p_chain, 530 struct ecore_chain_ext_pbl *ext_pbl); 531 532 /** 533 * @brief ecore_chain_free - Free chain DMA memory 534 * 535 * @param p_hwfn 536 * @param p_chain 537 */ 538 void ecore_chain_free(struct ecore_dev *p_dev, 539 struct ecore_chain *p_chain); 540 541 /** 542 * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID 543 * 544 * @param p_hwfn 545 * @param src_id - relative to p_hwfn 546 * @param dst_id - absolute per engine 547 * 548 * @return enum _ecore_status_t 549 */ 550 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn, 551 u16 src_id, 552 u16 *dst_id); 553 554 /** 555 * @@brief ecore_fw_vport - Get absolute vport ID 556 * 557 * @param p_hwfn 558 * @param src_id - relative to p_hwfn 559 * @param dst_id - absolute per engine 560 * 561 * @return enum _ecore_status_t 562 */ 563 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn, 564 u8 src_id, 565 u8 *dst_id); 566 567 /** 568 * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID 569 * 570 * @param p_hwfn 571 * @param src_id - relative to p_hwfn 572 * @param dst_id - absolute per engine 573 * 574 * @return enum _ecore_status_t 575 */ 576 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn, 577 u8 src_id, 578 u8 *dst_id); 579 580 /** 581 * @brief ecore_llh_get_num_ppfid - Return the allocated number of LLH filter 582 * banks that are allocated to the PF. 583 * 584 * @param p_dev 585 * 586 * @return u8 - Number of LLH filter banks 587 */ 588 u8 ecore_llh_get_num_ppfid(struct ecore_dev *p_dev); 589 590 enum ecore_eng { 591 ECORE_ENG0, 592 ECORE_ENG1, 593 ECORE_BOTH_ENG, 594 }; 595 596 /** 597 * @brief ecore_llh_get_l2_affinity_hint - Return the hint for the L2 affinity 598 * 599 * @param p_dev 600 * 601 * @return enum ecore_eng - L2 affintiy hint 602 */ 603 enum ecore_eng ecore_llh_get_l2_affinity_hint(struct ecore_dev *p_dev); 604 605 /** 606 * @brief ecore_llh_set_ppfid_affinity - Set the engine affinity for the given 607 * LLH filter bank. 608 * 609 * @param p_dev 610 * @param ppfid - relative within the allocated ppfids ('0' is the default one). 611 * @param eng 612 * 613 * @return enum _ecore_status_t 614 */ 615 enum _ecore_status_t ecore_llh_set_ppfid_affinity(struct ecore_dev *p_dev, 616 u8 ppfid, enum ecore_eng eng); 617 618 /** 619 * @brief ecore_llh_set_roce_affinity - Set the RoCE engine affinity 620 * 621 * @param p_dev 622 * @param eng 623 * 624 * @return enum _ecore_status_t 625 */ 626 enum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev, 627 enum ecore_eng eng); 628 629 /** 630 * @brief ecore_llh_add_mac_filter - Add a LLH MAC filter into the given filter 631 * bank. 632 * 633 * @param p_dev 634 * @param ppfid - relative within the allocated ppfids ('0' is the default one). 635 * @param mac_addr - MAC to add 636 * 637 * @return enum _ecore_status_t 638 */ 639 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid, 640 u8 mac_addr[ETH_ALEN]); 641 642 /** 643 * @brief ecore_llh_remove_mac_filter - Remove a LLH MAC filter from the given 644 * filter bank. 645 * 646 * @param p_dev 647 * @param ppfid - relative within the allocated ppfids ('0' is the default one). 648 * @param mac_addr - MAC to remove 649 */ 650 void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid, 651 u8 mac_addr[ETH_ALEN]); 652 653 enum ecore_llh_prot_filter_type_t { 654 ECORE_LLH_FILTER_ETHERTYPE, 655 ECORE_LLH_FILTER_TCP_SRC_PORT, 656 ECORE_LLH_FILTER_TCP_DEST_PORT, 657 ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT, 658 ECORE_LLH_FILTER_UDP_SRC_PORT, 659 ECORE_LLH_FILTER_UDP_DEST_PORT, 660 ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT 661 }; 662 663 /** 664 * @brief ecore_llh_add_protocol_filter - Add a LLH protocol filter into the 665 * given filter bank. 666 * 667 * @param p_dev 668 * @param ppfid - relative within the allocated ppfids ('0' is the default one). 669 * @param type - type of filters and comparing 670 * @param source_port_or_eth_type - source port or ethertype to add 671 * @param dest_port - destination port to add 672 * 673 * @return enum _ecore_status_t 674 */ 675 enum _ecore_status_t 676 ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid, 677 enum ecore_llh_prot_filter_type_t type, 678 u16 source_port_or_eth_type, u16 dest_port); 679 680 /** 681 * @brief ecore_llh_remove_protocol_filter - Remove a LLH protocol filter from 682 * the given filter bank. 683 * 684 * @param p_dev 685 * @param ppfid - relative within the allocated ppfids ('0' is the default one). 686 * @param type - type of filters and comparing 687 * @param source_port_or_eth_type - source port or ethertype to add 688 * @param dest_port - destination port to add 689 */ 690 void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid, 691 enum ecore_llh_prot_filter_type_t type, 692 u16 source_port_or_eth_type, 693 u16 dest_port); 694 695 /** 696 * @brief ecore_llh_clear_ppfid_filters - Remove all LLH filters from the given 697 * filter bank. 698 * 699 * @param p_dev 700 * @param ppfid - relative within the allocated ppfids ('0' is the default one). 701 */ 702 void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid); 703 704 /** 705 * @brief ecore_llh_clear_all_filters - Remove all LLH filters 706 * 707 * @param p_dev 708 */ 709 void ecore_llh_clear_all_filters(struct ecore_dev *p_dev); 710 711 /** 712 * @brief ecore_llh_set_function_as_default - set function as default per port 713 * 714 * @param p_hwfn 715 * @param p_ptt 716 */ 717 enum _ecore_status_t 718 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn, 719 struct ecore_ptt *p_ptt); 720 721 /** 722 *@brief Cleanup of previous driver remains prior to load 723 * 724 * @param p_hwfn 725 * @param p_ptt 726 * @param id - For PF, engine-relative. For VF, PF-relative. 727 * @param is_vf - true iff cleanup is made for a VF. 728 * 729 * @return enum _ecore_status_t 730 */ 731 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn, 732 struct ecore_ptt *p_ptt, 733 u16 id, 734 bool is_vf); 735 736 /** 737 * @brief ecore_get_queue_coalesce - Retrieve coalesce value for a given queue. 738 * 739 * @param p_hwfn 740 * @param p_coal - store coalesce value read from the hardware. 741 * @param p_handle 742 * 743 * @return enum _ecore_status_t 744 **/ 745 enum _ecore_status_t 746 ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *coal, 747 void *handle); 748 749 /** 750 * @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and 751 * Tx queue. The fact that we can configure coalescing to up to 511, but on 752 * varying accuracy [the bigger the value the less accurate] up to a mistake 753 * of 3usec for the highest values. 754 * While the API allows setting coalescing per-qid, all queues sharing a SB 755 * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] 756 * otherwise configuration would break. 757 * 758 * @param p_hwfn 759 * @param rx_coal - Rx Coalesce value in micro seconds. 760 * @param tx_coal - TX Coalesce value in micro seconds. 761 * @param p_handle 762 * 763 * @return enum _ecore_status_t 764 **/ 765 enum _ecore_status_t 766 ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, 767 u16 tx_coal, void *p_handle); 768 769 /** 770 * @brief ecore_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER 771 * 772 * @param p_hwfn 773 * @param p_ptt 774 * @param b_enable - true/false 775 * 776 * @return enum _ecore_status_t 777 */ 778 enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn, 779 struct ecore_ptt *p_ptt, 780 bool b_enable); 781 782 /** 783 * @brief Whether FIP discovery fallback special mode is enabled or not. 784 * 785 * @param cdev 786 * 787 * @return true if device is in FIP special mode, false otherwise. 788 */ 789 bool ecore_is_mf_fip_special(struct ecore_dev *p_dev); 790 #endif 791