1 /* 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #ifndef __ECORE_L2_API_H__ 10 #define __ECORE_L2_API_H__ 11 12 #include "ecore_status.h" 13 #include "ecore_sp_api.h" 14 #include "ecore_int_api.h" 15 16 #ifndef __EXTRACT__LINUX__ 17 enum ecore_rss_caps { 18 ECORE_RSS_IPV4 = 0x1, 19 ECORE_RSS_IPV6 = 0x2, 20 ECORE_RSS_IPV4_TCP = 0x4, 21 ECORE_RSS_IPV6_TCP = 0x8, 22 ECORE_RSS_IPV4_UDP = 0x10, 23 ECORE_RSS_IPV6_UDP = 0x20, 24 }; 25 26 /* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */ 27 #define ECORE_RSS_IND_TABLE_SIZE 128 28 #define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */ 29 #endif 30 31 struct ecore_queue_start_common_params { 32 /* Should always be relative to entity sending this. */ 33 u8 vport_id; 34 u16 queue_id; 35 36 /* Relative, but relevant only for PFs */ 37 u8 stats_id; 38 39 struct ecore_sb_info *p_sb; 40 u8 sb_idx; 41 }; 42 43 struct ecore_rxq_start_ret_params { 44 void OSAL_IOMEM *p_prod; 45 void *p_handle; 46 }; 47 48 struct ecore_txq_start_ret_params { 49 void OSAL_IOMEM *p_doorbell; 50 void *p_handle; 51 }; 52 53 struct ecore_rss_params { 54 u8 update_rss_config; 55 u8 rss_enable; 56 u8 rss_eng_id; 57 u8 update_rss_capabilities; 58 u8 update_rss_ind_table; 59 u8 update_rss_key; 60 u8 rss_caps; 61 u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */ 62 63 /* Indirection table consist of rx queue handles */ 64 void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE]; 65 u32 rss_key[ECORE_RSS_KEY_SIZE]; 66 }; 67 68 struct ecore_sge_tpa_params { 69 u8 max_buffers_per_cqe; 70 71 u8 update_tpa_en_flg; 72 u8 tpa_ipv4_en_flg; 73 u8 tpa_ipv6_en_flg; 74 u8 tpa_ipv4_tunn_en_flg; 75 u8 tpa_ipv6_tunn_en_flg; 76 77 u8 update_tpa_param_flg; 78 u8 tpa_pkt_split_flg; 79 u8 tpa_hdr_data_split_flg; 80 u8 tpa_gro_consistent_flg; 81 u8 tpa_max_aggs_num; 82 u16 tpa_max_size; 83 u16 tpa_min_size_to_start; 84 u16 tpa_min_size_to_cont; 85 }; 86 87 enum ecore_filter_opcode { 88 ECORE_FILTER_ADD, 89 ECORE_FILTER_REMOVE, 90 ECORE_FILTER_MOVE, 91 ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */ 92 ECORE_FILTER_FLUSH, /* Removes all filters */ 93 }; 94 95 enum ecore_filter_ucast_type { 96 ECORE_FILTER_MAC, 97 ECORE_FILTER_VLAN, 98 ECORE_FILTER_MAC_VLAN, 99 ECORE_FILTER_INNER_MAC, 100 ECORE_FILTER_INNER_VLAN, 101 ECORE_FILTER_INNER_PAIR, 102 ECORE_FILTER_INNER_MAC_VNI_PAIR, 103 ECORE_FILTER_MAC_VNI_PAIR, 104 ECORE_FILTER_VNI, 105 ECORE_FILTER_UNUSED, /* @DPDK */ 106 }; 107 108 struct ecore_filter_ucast { 109 enum ecore_filter_opcode opcode; 110 enum ecore_filter_ucast_type type; 111 u8 is_rx_filter; 112 u8 is_tx_filter; 113 u8 vport_to_add_to; 114 u8 vport_to_remove_from; 115 unsigned char mac[ETH_ALEN]; 116 u8 assert_on_error; 117 u16 vlan; 118 u32 vni; 119 }; 120 121 struct ecore_filter_mcast { 122 /* MOVE is not supported for multicast */ 123 enum ecore_filter_opcode opcode; 124 u8 vport_to_add_to; 125 u8 vport_to_remove_from; 126 u8 num_mc_addrs; 127 #define ECORE_MAX_MC_ADDRS 64 128 unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN]; 129 }; 130 131 struct ecore_filter_accept_flags { 132 u8 update_rx_mode_config; 133 u8 update_tx_mode_config; 134 u8 rx_accept_filter; 135 u8 tx_accept_filter; 136 #define ECORE_ACCEPT_NONE 0x01 137 #define ECORE_ACCEPT_UCAST_MATCHED 0x02 138 #define ECORE_ACCEPT_UCAST_UNMATCHED 0x04 139 #define ECORE_ACCEPT_MCAST_MATCHED 0x08 140 #define ECORE_ACCEPT_MCAST_UNMATCHED 0x10 141 #define ECORE_ACCEPT_BCAST 0x20 142 }; 143 144 struct ecore_arfs_config_params { 145 bool tcp; 146 bool udp; 147 bool ipv4; 148 bool ipv6; 149 bool arfs_enable; /* Enable or disable arfs mode */ 150 }; 151 152 /* Add / remove / move / remove-all unicast MAC-VLAN filters. 153 * FW will assert in the following cases, so driver should take care...: 154 * 1. Adding a filter to a full table. 155 * 2. Adding a filter which already exists on that vport. 156 * 3. Removing a filter which doesn't exist. 157 */ 158 159 enum _ecore_status_t 160 ecore_filter_ucast_cmd(struct ecore_dev *p_dev, 161 struct ecore_filter_ucast *p_filter_cmd, 162 enum spq_mode comp_mode, 163 struct ecore_spq_comp_cb *p_comp_data); 164 165 /* Add / remove / move multicast MAC filters. */ 166 enum _ecore_status_t 167 ecore_filter_mcast_cmd(struct ecore_dev *p_dev, 168 struct ecore_filter_mcast *p_filter_cmd, 169 enum spq_mode comp_mode, 170 struct ecore_spq_comp_cb *p_comp_data); 171 172 /* Set "accept" filters */ 173 enum _ecore_status_t 174 ecore_filter_accept_cmd( 175 struct ecore_dev *p_dev, 176 u8 vport, 177 struct ecore_filter_accept_flags accept_flags, 178 u8 update_accept_any_vlan, 179 u8 accept_any_vlan, 180 enum spq_mode comp_mode, 181 struct ecore_spq_comp_cb *p_comp_data); 182 183 /** 184 * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod 185 * 186 * This ramrod initializes an RX Queue for a VPort. An Assert is generated if 187 * the VPort ID is not currently initialized. 188 * 189 * @param p_hwfn 190 * @param opaque_fid 191 * @p_params Inputs; Relative for PF [SB being an exception] 192 * @param bd_max_bytes Maximum bytes that can be placed on a BD 193 * @param bd_chain_phys_addr Physical address of BDs for receive. 194 * @param cqe_pbl_addr Physical address of the CQE PBL Table. 195 * @param cqe_pbl_size Size of the CQE PBL Table 196 * @param p_ret_params Pointed struct to be filled with outputs. 197 * 198 * @return enum _ecore_status_t 199 */ 200 enum _ecore_status_t 201 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn, 202 u16 opaque_fid, 203 struct ecore_queue_start_common_params *p_params, 204 u16 bd_max_bytes, 205 dma_addr_t bd_chain_phys_addr, 206 dma_addr_t cqe_pbl_addr, 207 u16 cqe_pbl_size, 208 struct ecore_rxq_start_ret_params *p_ret_params); 209 210 /** 211 * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue 212 * 213 * @param p_hwfn 214 * @param p_rxq Handler of queue to close 215 * @param eq_completion_only If True completion will be on 216 * EQe, if False completion will be 217 * on EQe if p_hwfn opaque 218 * different from the RXQ opaque 219 * otherwise on CQe. 220 * @param cqe_completion If True completion will be 221 * receive on CQe. 222 * @return enum _ecore_status_t 223 */ 224 enum _ecore_status_t 225 ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn, 226 void *p_rxq, 227 bool eq_completion_only, 228 bool cqe_completion); 229 230 /** 231 * @brief - TX Queue Start Ramrod 232 * 233 * This ramrod initializes a TX Queue for a VPort. An Assert is generated if 234 * the VPort is not currently initialized. 235 * 236 * @param p_hwfn 237 * @param opaque_fid 238 * @p_params 239 * @param tc traffic class to use with this L2 txq 240 * @param pbl_addr address of the pbl array 241 * @param pbl_size number of entries in pbl 242 * @param p_ret_params Pointer to fill the return parameters in. 243 * 244 * @return enum _ecore_status_t 245 */ 246 enum _ecore_status_t 247 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, 248 u16 opaque_fid, 249 struct ecore_queue_start_common_params *p_params, 250 u8 tc, 251 dma_addr_t pbl_addr, 252 u16 pbl_size, 253 struct ecore_txq_start_ret_params *p_ret_params); 254 255 /** 256 * @brief ecore_eth_tx_queue_stop - closes a Tx queue 257 * 258 * @param p_hwfn 259 * @param p_txq - handle to Tx queue needed to be closed 260 * 261 * @return enum _ecore_status_t 262 */ 263 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn, 264 void *p_txq); 265 266 enum ecore_tpa_mode { 267 ECORE_TPA_MODE_NONE, 268 ECORE_TPA_MODE_RSC, 269 ECORE_TPA_MODE_GRO, 270 ECORE_TPA_MODE_MAX 271 }; 272 273 struct ecore_sp_vport_start_params { 274 enum ecore_tpa_mode tpa_mode; 275 bool remove_inner_vlan; /* Inner VLAN removal is enabled */ 276 bool tx_switching; /* Vport supports tx-switching */ 277 bool handle_ptp_pkts; /* Handle PTP packets */ 278 bool only_untagged; /* Untagged pkt control */ 279 bool drop_ttl0; /* Drop packets with TTL = 0 */ 280 u8 max_buffers_per_cqe; 281 u32 concrete_fid; 282 u16 opaque_fid; 283 u8 vport_id; /* VPORT ID */ 284 u16 mtu; /* VPORT MTU */ 285 bool zero_placement_offset; 286 bool check_mac; 287 bool check_ethtype; 288 289 /* Strict behavior on transmission errors */ 290 bool b_err_illegal_vlan_mode; 291 bool b_err_illegal_inband_mode; 292 bool b_err_vlan_insert_with_inband; 293 bool b_err_small_pkt; 294 bool b_err_big_pkt; 295 bool b_err_anti_spoof; 296 bool b_err_ctrl_frame; 297 }; 298 299 /** 300 * @brief ecore_sp_vport_start - 301 * 302 * This ramrod initializes a VPort. An Assert if generated if the Function ID 303 * of the VPort is not enabled. 304 * 305 * @param p_hwfn 306 * @param p_params VPORT start params 307 * 308 * @return enum _ecore_status_t 309 */ 310 enum _ecore_status_t 311 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn, 312 struct ecore_sp_vport_start_params *p_params); 313 314 struct ecore_sp_vport_update_params { 315 u16 opaque_fid; 316 u8 vport_id; 317 u8 update_vport_active_rx_flg; 318 u8 vport_active_rx_flg; 319 u8 update_vport_active_tx_flg; 320 u8 vport_active_tx_flg; 321 u8 update_inner_vlan_removal_flg; 322 u8 inner_vlan_removal_flg; 323 u8 silent_vlan_removal_flg; 324 u8 update_default_vlan_enable_flg; 325 u8 default_vlan_enable_flg; 326 u8 update_default_vlan_flg; 327 u16 default_vlan; 328 u8 update_tx_switching_flg; 329 u8 tx_switching_flg; 330 u8 update_approx_mcast_flg; 331 u8 update_anti_spoofing_en_flg; 332 u8 anti_spoofing_en; 333 u8 update_accept_any_vlan_flg; 334 u8 accept_any_vlan; 335 u32 bins[8]; 336 struct ecore_rss_params *rss_params; 337 struct ecore_filter_accept_flags accept_flags; 338 struct ecore_sge_tpa_params *sge_tpa_params; 339 /* MTU change - notice this requires the vport to be disabled. 340 * If non-zero, value would be used. 341 */ 342 u16 mtu; 343 }; 344 345 /** 346 * @brief ecore_sp_vport_update - 347 * 348 * This ramrod updates the parameters of the VPort. Every field can be updated 349 * independently, according to flags. 350 * 351 * This ramrod is also used to set the VPort state to active after creation. 352 * An Assert is generated if the VPort does not contain an RX queue. 353 * 354 * @param p_hwfn 355 * @param p_params 356 * 357 * @return enum _ecore_status_t 358 */ 359 enum _ecore_status_t 360 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn, 361 struct ecore_sp_vport_update_params *p_params, 362 enum spq_mode comp_mode, 363 struct ecore_spq_comp_cb *p_comp_data); 364 /** 365 * @brief ecore_sp_vport_stop - 366 * 367 * This ramrod closes a VPort after all its RX and TX queues are terminated. 368 * An Assert is generated if any queues are left open. 369 * 370 * @param p_hwfn 371 * @param opaque_fid 372 * @param vport_id VPort ID 373 * 374 * @return enum _ecore_status_t 375 */ 376 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn, 377 u16 opaque_fid, 378 u8 vport_id); 379 380 enum _ecore_status_t 381 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn, 382 u16 opaque_fid, 383 struct ecore_filter_ucast *p_filter_cmd, 384 enum spq_mode comp_mode, 385 struct ecore_spq_comp_cb *p_comp_data); 386 387 /** 388 * @brief ecore_sp_rx_eth_queues_update - 389 * 390 * This ramrod updates an RX queue. It is used for setting the active state 391 * of the queue and updating the TPA and SGE parameters. 392 * 393 * @note Final phase API. 394 * 395 * @param p_hwfn 396 * @param pp_rxq_handlers An array of queue handlers to be updated. 397 * @param num_rxqs number of queues to update. 398 * @param complete_cqe_flg Post completion to the CQE Ring if set 399 * @param complete_event_flg Post completion to the Event Ring if set 400 * @param comp_mode 401 * @param p_comp_data 402 * 403 * @return enum _ecore_status_t 404 */ 405 406 enum _ecore_status_t 407 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn, 408 void **pp_rxq_handlers, 409 u8 num_rxqs, 410 u8 complete_cqe_flg, 411 u8 complete_event_flg, 412 enum spq_mode comp_mode, 413 struct ecore_spq_comp_cb *p_comp_data); 414 415 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn, 416 struct ecore_ptt *p_ptt, 417 struct ecore_eth_stats *stats, 418 u16 statistics_bin, bool b_get_port_stats); 419 420 void ecore_get_vport_stats(struct ecore_dev *p_dev, 421 struct ecore_eth_stats *stats); 422 423 void ecore_reset_vport_stats(struct ecore_dev *p_dev); 424 425 /** 426 *@brief ecore_arfs_mode_configure - 427 * 428 *Enable or disable rfs mode. It must accept atleast one of tcp or udp true 429 *and atleast one of ipv4 or ipv6 true to enable rfs mode. 430 * 431 *@param p_hwfn 432 *@param p_ptt 433 *@param p_cfg_params arfs mode configuration parameters. 434 * 435 */ 436 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn, 437 struct ecore_ptt *p_ptt, 438 struct ecore_arfs_config_params *p_cfg_params); 439 440 /** 441 * @brief - ecore_configure_rfs_ntuple_filter 442 * 443 * This ramrod should be used to add or remove arfs hw filter 444 * 445 * @params p_hwfn 446 * @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize 447 * it with cookie and callback function address, if not 448 * using this mode then client must pass NULL. 449 * @params p_addr p_addr is an actual packet header that needs to be 450 * filter. It has to mapped with IO to read prior to 451 * calling this, [contains 4 tuples- src ip, dest ip, 452 * src port, dest port]. 453 * @params length length of p_addr header up to past the transport header. 454 * @params qid receive packet will be directed to this queue. 455 * @params vport_id 456 * @params b_is_add flag to add or remove filter. 457 * 458 */ 459 enum _ecore_status_t 460 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn, 461 struct ecore_spq_comp_cb *p_cb, 462 dma_addr_t p_addr, u16 length, 463 u16 qid, u8 vport_id, 464 bool b_is_add); 465 #endif 466