xref: /dpdk/drivers/net/qede/base/ecore_l2_api.h (revision 3b307c55f2ac7f3f4146bd0dc9b474e1f3076f97)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 #ifndef __ECORE_L2_API_H__
8 #define __ECORE_L2_API_H__
9 
10 #include "ecore_status.h"
11 #include "ecore_sp_api.h"
12 #include "ecore_int_api.h"
13 
14 #ifndef __EXTRACT__LINUX__
15 enum ecore_rss_caps {
16 	ECORE_RSS_IPV4		= 0x1,
17 	ECORE_RSS_IPV6		= 0x2,
18 	ECORE_RSS_IPV4_TCP	= 0x4,
19 	ECORE_RSS_IPV6_TCP	= 0x8,
20 	ECORE_RSS_IPV4_UDP	= 0x10,
21 	ECORE_RSS_IPV6_UDP	= 0x20,
22 };
23 
24 /* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
25 #define ECORE_RSS_IND_TABLE_SIZE 128
26 #define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */
27 #endif
28 
29 struct ecore_queue_start_common_params {
30 	/* Should always be relative to entity sending this. */
31 	u8 vport_id;
32 	u16 queue_id;
33 
34 	/* Relative, but relevant only for PFs */
35 	u8 stats_id;
36 
37 	struct ecore_sb_info *p_sb;
38 	u8 sb_idx;
39 };
40 
41 struct ecore_rxq_start_ret_params {
42 	void OSAL_IOMEM *p_prod;
43 	void *p_handle;
44 };
45 
46 struct ecore_txq_start_ret_params {
47 	void OSAL_IOMEM *p_doorbell;
48 	void *p_handle;
49 };
50 
51 struct ecore_rss_params {
52 	u8 update_rss_config;
53 	u8 rss_enable;
54 	u8 rss_eng_id;
55 	u8 update_rss_capabilities;
56 	u8 update_rss_ind_table;
57 	u8 update_rss_key;
58 	u8 rss_caps;
59 	u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
60 
61 	/* Indirection table consist of rx queue handles */
62 	void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
63 	u32 rss_key[ECORE_RSS_KEY_SIZE];
64 };
65 
66 struct ecore_sge_tpa_params {
67 	u8 max_buffers_per_cqe;
68 
69 	u8 update_tpa_en_flg;
70 	u8 tpa_ipv4_en_flg;
71 	u8 tpa_ipv6_en_flg;
72 	u8 tpa_ipv4_tunn_en_flg;
73 	u8 tpa_ipv6_tunn_en_flg;
74 
75 	u8 update_tpa_param_flg;
76 	u8 tpa_pkt_split_flg;
77 	u8 tpa_hdr_data_split_flg;
78 	u8 tpa_gro_consistent_flg;
79 	u8 tpa_max_aggs_num;
80 	u16 tpa_max_size;
81 	u16 tpa_min_size_to_start;
82 	u16 tpa_min_size_to_cont;
83 };
84 
85 enum ecore_filter_opcode {
86 	ECORE_FILTER_ADD,
87 	ECORE_FILTER_REMOVE,
88 	ECORE_FILTER_MOVE,
89 	ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */
90 	ECORE_FILTER_FLUSH, /* Removes all filters */
91 };
92 
93 enum ecore_filter_ucast_type {
94 	ECORE_FILTER_MAC,
95 	ECORE_FILTER_VLAN,
96 	ECORE_FILTER_MAC_VLAN,
97 	ECORE_FILTER_INNER_MAC,
98 	ECORE_FILTER_INNER_VLAN,
99 	ECORE_FILTER_INNER_PAIR,
100 	ECORE_FILTER_INNER_MAC_VNI_PAIR,
101 	ECORE_FILTER_MAC_VNI_PAIR,
102 	ECORE_FILTER_VNI,
103 	ECORE_FILTER_UNUSED, /* @DPDK */
104 };
105 
106 struct ecore_filter_ucast {
107 	enum ecore_filter_opcode opcode;
108 	enum ecore_filter_ucast_type type;
109 	u8 is_rx_filter;
110 	u8 is_tx_filter;
111 	u8 vport_to_add_to;
112 	u8 vport_to_remove_from;
113 	unsigned char mac[ETH_ALEN];
114 	u8 assert_on_error;
115 	u16 vlan;
116 	u32 vni;
117 };
118 
119 struct ecore_filter_mcast {
120 	/* MOVE is not supported for multicast */
121 	enum ecore_filter_opcode opcode;
122 	u8 vport_to_add_to;
123 	u8 vport_to_remove_from;
124 	u8	num_mc_addrs;
125 #define ECORE_MAX_MC_ADDRS	64
126 	unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN];
127 };
128 
129 struct ecore_filter_accept_flags {
130 	u8 update_rx_mode_config;
131 	u8 update_tx_mode_config;
132 	u8 rx_accept_filter;
133 	u8 tx_accept_filter;
134 #define	ECORE_ACCEPT_NONE		0x01
135 #define ECORE_ACCEPT_UCAST_MATCHED	0x02
136 #define ECORE_ACCEPT_UCAST_UNMATCHED	0x04
137 #define ECORE_ACCEPT_MCAST_MATCHED	0x08
138 #define ECORE_ACCEPT_MCAST_UNMATCHED	0x10
139 #define ECORE_ACCEPT_BCAST		0x20
140 #define ECORE_ACCEPT_ANY_VNI		0x40
141 };
142 
143 enum ecore_filter_config_mode {
144 	ECORE_FILTER_CONFIG_MODE_DISABLE,
145 	ECORE_FILTER_CONFIG_MODE_5_TUPLE,
146 	ECORE_FILTER_CONFIG_MODE_L4_PORT,
147 	ECORE_FILTER_CONFIG_MODE_IP_DEST,
148 	ECORE_FILTER_CONFIG_MODE_TUNN_TYPE,
149 	ECORE_FILTER_CONFIG_MODE_IP_SRC,
150 };
151 
152 struct ecore_arfs_config_params {
153 	bool tcp;
154 	bool udp;
155 	bool ipv4;
156 	bool ipv6;
157 	enum ecore_filter_config_mode mode;
158 };
159 
160 /* Add / remove / move / remove-all unicast MAC-VLAN filters.
161  * FW will assert in the following cases, so driver should take care...:
162  * 1. Adding a filter to a full table.
163  * 2. Adding a filter which already exists on that vport.
164  * 3. Removing a filter which doesn't exist.
165  */
166 
167 enum _ecore_status_t
168 ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
169 		       struct ecore_filter_ucast *p_filter_cmd,
170 		       enum spq_mode comp_mode,
171 		       struct ecore_spq_comp_cb *p_comp_data);
172 
173 /* Add / remove / move multicast MAC filters. */
174 enum _ecore_status_t
175 ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
176 		       struct ecore_filter_mcast *p_filter_cmd,
177 		       enum spq_mode comp_mode,
178 		       struct ecore_spq_comp_cb *p_comp_data);
179 
180 /* Set "accept" filters */
181 enum _ecore_status_t
182 ecore_filter_accept_cmd(
183 	struct ecore_dev		 *p_dev,
184 	u8				 vport,
185 	struct ecore_filter_accept_flags accept_flags,
186 	u8				 update_accept_any_vlan,
187 	u8				 accept_any_vlan,
188 	enum spq_mode			 comp_mode,
189 	struct ecore_spq_comp_cb	 *p_comp_data);
190 
191 /**
192  * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod
193  *
194  * This ramrod initializes an RX Queue for a VPort. An Assert is generated if
195  * the VPort ID is not currently initialized.
196  *
197  * @param p_hwfn
198  * @param opaque_fid
199  * @p_params			Inputs; Relative for PF [SB being an exception]
200  * @param bd_max_bytes		Maximum bytes that can be placed on a BD
201  * @param bd_chain_phys_addr	Physical address of BDs for receive.
202  * @param cqe_pbl_addr		Physical address of the CQE PBL Table.
203  * @param cqe_pbl_size		Size of the CQE PBL Table
204  * @param p_ret_params		Pointed struct to be filled with outputs.
205  *
206  * @return enum _ecore_status_t
207  */
208 enum _ecore_status_t
209 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
210 			 u16 opaque_fid,
211 			 struct ecore_queue_start_common_params *p_params,
212 			 u16 bd_max_bytes,
213 			 dma_addr_t bd_chain_phys_addr,
214 			 dma_addr_t cqe_pbl_addr,
215 			 u16 cqe_pbl_size,
216 			 struct ecore_rxq_start_ret_params *p_ret_params);
217 
218 /**
219  * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue
220  *
221  * @param p_hwfn
222  * @param p_rxq			Handler of queue to close
223  * @param eq_completion_only	If True completion will be on
224  *				EQe, if False completion will be
225  *				on EQe if p_hwfn opaque
226  *				different from the RXQ opaque
227  *				otherwise on CQe.
228  * @param cqe_completion	If True completion will be
229  *				receive on CQe.
230  * @return enum _ecore_status_t
231  */
232 enum _ecore_status_t
233 ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
234 			void *p_rxq,
235 			bool eq_completion_only,
236 			bool cqe_completion);
237 
238 /**
239  * @brief - TX Queue Start Ramrod
240  *
241  * This ramrod initializes a TX Queue for a VPort. An Assert is generated if
242  * the VPort is not currently initialized.
243  *
244  * @param p_hwfn
245  * @param opaque_fid
246  * @p_params
247  * @param tc			traffic class to use with this L2 txq
248  * @param pbl_addr		address of the pbl array
249  * @param pbl_size		number of entries in pbl
250  * @param p_ret_params		Pointer to fill the return parameters in.
251  *
252  * @return enum _ecore_status_t
253  */
254 enum _ecore_status_t
255 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
256 			 u16 opaque_fid,
257 			 struct ecore_queue_start_common_params *p_params,
258 			 u8 tc,
259 			 dma_addr_t pbl_addr,
260 			 u16 pbl_size,
261 			 struct ecore_txq_start_ret_params *p_ret_params);
262 
263 /**
264  * @brief ecore_eth_tx_queue_stop - closes a Tx queue
265  *
266  * @param p_hwfn
267  * @param p_txq - handle to Tx queue needed to be closed
268  *
269  * @return enum _ecore_status_t
270  */
271 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
272 					     void *p_txq);
273 
274 enum ecore_tpa_mode	{
275 	ECORE_TPA_MODE_NONE,
276 	ECORE_TPA_MODE_RSC,
277 	ECORE_TPA_MODE_GRO,
278 	ECORE_TPA_MODE_MAX
279 };
280 
281 struct ecore_sp_vport_start_params {
282 	enum ecore_tpa_mode tpa_mode;
283 	bool remove_inner_vlan;	/* Inner VLAN removal is enabled */
284 	bool tx_switching;	/* Vport supports tx-switching */
285 	bool handle_ptp_pkts;	/* Handle PTP packets */
286 	bool only_untagged;	/* Untagged pkt control */
287 	bool drop_ttl0;		/* Drop packets with TTL = 0 */
288 	u8 max_buffers_per_cqe;
289 	u32 concrete_fid;
290 	u16 opaque_fid;
291 	u8 vport_id;		/* VPORT ID */
292 	u16 mtu;		/* VPORT MTU */
293 	bool zero_placement_offset;
294 	bool check_mac;
295 	bool check_ethtype;
296 
297 	/* Strict behavior on transmission errors */
298 	bool b_err_illegal_vlan_mode;
299 	bool b_err_illegal_inband_mode;
300 	bool b_err_vlan_insert_with_inband;
301 	bool b_err_small_pkt;
302 	bool b_err_big_pkt;
303 	bool b_err_anti_spoof;
304 	bool b_err_ctrl_frame;
305 	bool b_en_rgfs;
306 	bool b_en_tgfs;
307 };
308 
309 /**
310  * @brief ecore_sp_vport_start -
311  *
312  * This ramrod initializes a VPort. An Assert if generated if the Function ID
313  * of the VPort is not enabled.
314  *
315  * @param p_hwfn
316  * @param p_params		VPORT start params
317  *
318  * @return enum _ecore_status_t
319  */
320 enum _ecore_status_t
321 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
322 		     struct ecore_sp_vport_start_params *p_params);
323 
324 struct ecore_sp_vport_update_params {
325 	u16			opaque_fid;
326 	u8			vport_id;
327 	u8			update_vport_active_rx_flg;
328 	u8			vport_active_rx_flg;
329 	u8			update_vport_active_tx_flg;
330 	u8			vport_active_tx_flg;
331 	u8			update_inner_vlan_removal_flg;
332 	u8			inner_vlan_removal_flg;
333 	u8			silent_vlan_removal_flg;
334 	u8			update_default_vlan_enable_flg;
335 	u8			default_vlan_enable_flg;
336 	u8			update_default_vlan_flg;
337 	u16			default_vlan;
338 	u8			update_tx_switching_flg;
339 	u8			tx_switching_flg;
340 	u8			update_approx_mcast_flg;
341 	u8			update_anti_spoofing_en_flg;
342 	u8			anti_spoofing_en;
343 	u8			update_accept_any_vlan_flg;
344 	u8			accept_any_vlan;
345 	u32			bins[8];
346 	struct ecore_rss_params	*rss_params;
347 	struct ecore_filter_accept_flags accept_flags;
348 	struct ecore_sge_tpa_params *sge_tpa_params;
349 	/* MTU change - notice this requires the vport to be disabled.
350 	 * If non-zero, value would be used.
351 	 */
352 	u16                     mtu;
353 	u8			update_ctl_frame_check;
354 	u8			mac_chk_en;
355 	u8			ethtype_chk_en;
356 };
357 
358 /**
359  * @brief ecore_sp_vport_update -
360  *
361  * This ramrod updates the parameters of the VPort. Every field can be updated
362  * independently, according to flags.
363  *
364  * This ramrod is also used to set the VPort state to active after creation.
365  * An Assert is generated if the VPort does not contain an RX queue.
366  *
367  * @param p_hwfn
368  * @param p_params
369  *
370  * @return enum _ecore_status_t
371  */
372 enum _ecore_status_t
373 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
374 		      struct ecore_sp_vport_update_params *p_params,
375 		      enum spq_mode comp_mode,
376 		      struct ecore_spq_comp_cb *p_comp_data);
377 /**
378  * @brief ecore_sp_vport_stop -
379  *
380  * This ramrod closes a VPort after all its RX and TX queues are terminated.
381  * An Assert is generated if any queues are left open.
382  *
383  * @param p_hwfn
384  * @param opaque_fid
385  * @param vport_id VPort ID
386  *
387  * @return enum _ecore_status_t
388  */
389 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
390 					 u16 opaque_fid,
391 					 u8 vport_id);
392 
393 enum _ecore_status_t
394 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
395 			  u16 opaque_fid,
396 			  struct ecore_filter_ucast *p_filter_cmd,
397 			  enum spq_mode comp_mode,
398 			  struct ecore_spq_comp_cb *p_comp_data);
399 
400 /**
401  * @brief ecore_sp_rx_eth_queues_update -
402  *
403  * This ramrod updates an RX queue. It is used for setting the active state
404  * of the queue and updating the TPA and SGE parameters.
405  *
406  * @note Final phase API.
407  *
408  * @param p_hwfn
409  * @param pp_rxq_handlers	An array of queue handlers to be updated.
410  * @param num_rxqs              number of queues to update.
411  * @param complete_cqe_flg	Post completion to the CQE Ring if set
412  * @param complete_event_flg	Post completion to the Event Ring if set
413  * @param comp_mode
414  * @param p_comp_data
415  *
416  * @return enum _ecore_status_t
417  */
418 
419 enum _ecore_status_t
420 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
421 			      void **pp_rxq_handlers,
422 			      u8 num_rxqs,
423 			      u8 complete_cqe_flg,
424 			      u8 complete_event_flg,
425 			      enum spq_mode comp_mode,
426 			      struct ecore_spq_comp_cb *p_comp_data);
427 
428 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
429 			     struct ecore_ptt *p_ptt,
430 			     struct ecore_eth_stats *stats,
431 			     u16 statistics_bin, bool b_get_port_stats);
432 
433 void ecore_get_vport_stats(struct ecore_dev *p_dev,
434 			   struct ecore_eth_stats *stats);
435 
436 void ecore_reset_vport_stats(struct ecore_dev *p_dev);
437 
438 /**
439  *@brief ecore_arfs_mode_configure -
440  *
441  *Enable or disable rfs mode. It must accept atleast one of tcp or udp true
442  *and atleast one of ipv4 or ipv6 true to enable rfs mode.
443  *
444  *@param p_hwfn
445  *@param p_ptt
446  *@param p_cfg_params		arfs mode configuration parameters.
447  *
448  */
449 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
450 			       struct ecore_ptt *p_ptt,
451 			       struct ecore_arfs_config_params *p_cfg_params);
452 
453 struct ecore_ntuple_filter_params {
454 	/* Physically mapped address containing header of buffer to be used
455 	 * as filter.
456 	 */
457 	dma_addr_t addr;
458 
459 	/* Length of header in bytes */
460 	u16 length;
461 
462 	/* Relative queue-id to receive classified packet */
463 	#define ECORE_RFS_NTUPLE_QID_RSS ((u16)-1)
464 	u16 qid;
465 
466 	/* Identifier can either be according to vport-id or vfid */
467 	bool b_is_vf;
468 	u8 vport_id;
469 	u8 vf_id;
470 
471 	/* true if this filter is to be added. Else to be removed */
472 	bool b_is_add;
473 
474 	/* If packet needs to be dropped */
475 	bool b_is_drop;
476 };
477 
478 /**
479  * @brief - ecore_configure_rfs_ntuple_filter
480  *
481  * This ramrod should be used to add or remove arfs hw filter
482  *
483  * @params p_hwfn
484  * @params p_cb		Used for ECORE_SPQ_MODE_CB,where client would initialize
485  *			it with cookie and callback function address, if not
486  *			using this mode then client must pass NULL.
487  * @params p_params
488  */
489 enum _ecore_status_t
490 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
491 				  struct ecore_spq_comp_cb *p_cb,
492 				  struct ecore_ntuple_filter_params *p_params);
493 
494 /**
495  * @brief - ecore_update_eth_rss_ind_table_entry
496  *
497  * This function being used to update RSS indirection table entry to FW RAM
498  * instead of using the SP vport update ramrod with rss params.
499  *
500  * Notice:
501  * This function supports only one outstanding command per engine. Ecore
502  * clients which use this function should call ecore_mcp_ind_table_lock() prior
503  * to it and ecore_mcp_ind_table_unlock() after it.
504  *
505  * @params p_hwfn
506  * @params vport_id
507  * @params ind_table_index
508  * @params ind_table_value
509  *
510  * @return enum _ecore_status_t
511  */
512 enum _ecore_status_t
513 ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
514 				     u8 vport_id,
515 				     u8 ind_table_index,
516 				     u16 ind_table_value);
517 #endif
518