xref: /dpdk/drivers/net/intel/ice/ice_ethdev.h (revision 552979dfb1c98a939b0f8b087547386d3c32ac00)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #ifndef _ICE_ETHDEV_H_
6 #define _ICE_ETHDEV_H_
7 
8 #include <rte_compat.h>
9 #include <rte_kvargs.h>
10 #include <rte_time.h>
11 
12 #include <ethdev_driver.h>
13 #include <rte_tm_driver.h>
14 
15 #include "base/ice_common.h"
16 #include "base/ice_adminq_cmd.h"
17 #include "base/ice_flow.h"
18 #include "base/ice_sched.h"
19 
20 #define ICE_ADMINQ_LEN               32
21 #define ICE_SBIOQ_LEN                32
22 #define ICE_MAILBOXQ_LEN             32
23 #define ICE_SBQ_LEN                  64
24 #define ICE_ADMINQ_BUF_SZ            4096
25 #define ICE_SBIOQ_BUF_SZ             4096
26 #define ICE_MAILBOXQ_BUF_SZ          4096
27 /* Number of queues per TC should be one of 1, 2, 4, 8, 16, 32, 64, 128, 256 */
28 #define ICE_MAX_Q_PER_TC         256
29 #define ICE_NUM_DESC_DEFAULT     512
30 #define ICE_BUF_SIZE_MIN         1024
31 #define ICE_FRAME_SIZE_MAX       9728
32 #define ICE_QUEUE_BASE_ADDR_UNIT 128
33 /* number of VSIs and queue default setting */
34 #define ICE_MAX_QP_NUM_PER_VF    16
35 #define ICE_DEFAULT_QP_NUM_FDIR  1
36 #define ICE_UINT32_BIT_SIZE      (CHAR_BIT * sizeof(uint32_t))
37 #define ICE_VFTA_SIZE            (4096 / ICE_UINT32_BIT_SIZE)
38 /* Maximun number of MAC addresses */
39 #define ICE_NUM_MACADDR_MAX       64
40 /* Maximum number of VFs */
41 #define ICE_MAX_VF               128
42 #define ICE_MAX_INTR_QUEUE_NUM   256
43 
44 #define ICE_MISC_VEC_ID          RTE_INTR_VEC_ZERO_OFFSET
45 #define ICE_RX_VEC_ID            RTE_INTR_VEC_RXTX_OFFSET
46 
47 #define ICE_MAX_PKT_TYPE  1024
48 
49 /* DDP package search path */
50 #define ICE_PKG_FILE_DEFAULT "/lib/firmware/intel/ice/ddp/ice.pkg"
51 #define ICE_PKG_FILE_UPDATES "/lib/firmware/updates/intel/ice/ddp/ice.pkg"
52 #define ICE_PKG_FILE_SEARCH_PATH_DEFAULT "/lib/firmware/intel/ice/ddp/"
53 #define ICE_PKG_FILE_SEARCH_PATH_UPDATES "/lib/firmware/updates/intel/ice/ddp/"
54 #define ICE_PKG_FILE_CUSTOMIZED_PATH "/sys/module/firmware_class/parameters/path"
55 #define ICE_MAX_PKG_FILENAME_SIZE   256
56 
57 #define MAX_ACL_NORMAL_ENTRIES    256
58 
59 /**
60  * vlan_id is a 12 bit number.
61  * The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
62  * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element.
63  * The higher 7 bit val specifies VFTA array index.
64  */
65 #define ICE_VFTA_BIT(vlan_id)    (1 << ((vlan_id) & 0x1F))
66 #define ICE_VFTA_IDX(vlan_id)    ((vlan_id) >> 5)
67 
68 /* Default TC traffic in case DCB is not enabled */
69 #define ICE_DEFAULT_TCMAP        0x1
70 #define ICE_FDIR_QUEUE_ID        0
71 
72 /* Always assign pool 0 to main VSI, VMDQ will start from 1 */
73 #define ICE_VMDQ_POOL_BASE       1
74 
75 #define ICE_DEFAULT_RX_FREE_THRESH  32
76 #define ICE_DEFAULT_RX_PTHRESH      8
77 #define ICE_DEFAULT_RX_HTHRESH      8
78 #define ICE_DEFAULT_RX_WTHRESH      0
79 
80 #define ICE_DEFAULT_TX_FREE_THRESH  32
81 #define ICE_DEFAULT_TX_PTHRESH      32
82 #define ICE_DEFAULT_TX_HTHRESH      0
83 #define ICE_DEFAULT_TX_WTHRESH      0
84 #define ICE_DEFAULT_TX_RSBIT_THRESH 32
85 
86 /* Bit shift and mask */
87 #define ICE_4_BIT_WIDTH  (CHAR_BIT / 2)
88 #define ICE_4_BIT_MASK   RTE_LEN2MASK(ICE_4_BIT_WIDTH, uint8_t)
89 #define ICE_8_BIT_WIDTH  CHAR_BIT
90 #define ICE_8_BIT_MASK   UINT8_MAX
91 #define ICE_16_BIT_WIDTH (CHAR_BIT * 2)
92 #define ICE_16_BIT_MASK  UINT16_MAX
93 #define ICE_32_BIT_WIDTH (CHAR_BIT * 4)
94 #define ICE_32_BIT_MASK  UINT32_MAX
95 #define ICE_40_BIT_WIDTH (CHAR_BIT * 5)
96 #define ICE_40_BIT_MASK  RTE_LEN2MASK(ICE_40_BIT_WIDTH, uint64_t)
97 #define ICE_48_BIT_WIDTH (CHAR_BIT * 6)
98 #define ICE_48_BIT_MASK  RTE_LEN2MASK(ICE_48_BIT_WIDTH, uint64_t)
99 
100 #define ICE_FLAG_RSS                   BIT_ULL(0)
101 #define ICE_FLAG_DCB                   BIT_ULL(1)
102 #define ICE_FLAG_VMDQ                  BIT_ULL(2)
103 #define ICE_FLAG_SRIOV                 BIT_ULL(3)
104 #define ICE_FLAG_HEADER_SPLIT_DISABLED BIT_ULL(4)
105 #define ICE_FLAG_HEADER_SPLIT_ENABLED  BIT_ULL(5)
106 #define ICE_FLAG_FDIR                  BIT_ULL(6)
107 #define ICE_FLAG_VXLAN                 BIT_ULL(7)
108 #define ICE_FLAG_RSS_AQ_CAPABLE        BIT_ULL(8)
109 #define ICE_FLAG_VF_MAC_BY_PF          BIT_ULL(9)
110 #define ICE_FLAG_ALL  (ICE_FLAG_RSS | \
111 		       ICE_FLAG_DCB | \
112 		       ICE_FLAG_VMDQ | \
113 		       ICE_FLAG_SRIOV | \
114 		       ICE_FLAG_HEADER_SPLIT_DISABLED | \
115 		       ICE_FLAG_HEADER_SPLIT_ENABLED | \
116 		       ICE_FLAG_FDIR | \
117 		       ICE_FLAG_VXLAN | \
118 		       ICE_FLAG_RSS_AQ_CAPABLE | \
119 		       ICE_FLAG_VF_MAC_BY_PF)
120 
121 #define ICE_RSS_OFFLOAD_ALL ( \
122 	RTE_ETH_RSS_IPV4 | \
123 	RTE_ETH_RSS_FRAG_IPV4 | \
124 	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
125 	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
126 	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
127 	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
128 	RTE_ETH_RSS_IPV6 | \
129 	RTE_ETH_RSS_FRAG_IPV6 | \
130 	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
131 	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
132 	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
133 	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
134 	RTE_ETH_RSS_L2_PAYLOAD)
135 
136 /**
137  * The overhead from MTU to max frame size.
138  * Considering QinQ packet, the VLAN tag needs to be counted twice.
139  */
140 #define ICE_ETH_OVERHEAD \
141 	(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN * 2)
142 #define ICE_ETH_MAX_LEN (RTE_ETHER_MTU + ICE_ETH_OVERHEAD)
143 
144 #define ICE_RXTX_BYTES_HIGH(bytes) ((bytes) & ~ICE_40_BIT_MASK)
145 #define ICE_RXTX_BYTES_LOW(bytes) ((bytes) & ICE_40_BIT_MASK)
146 
147 /* Max number of flexible descriptor rxdid */
148 #define ICE_FLEX_DESC_RXDID_MAX_NUM 64
149 
150 #define ICE_I2C_EEPROM_DEV_ADDR		0xA0
151 #define ICE_I2C_EEPROM_DEV_ADDR2	0xA2
152 #define ICE_MODULE_TYPE_SFP		0x03
153 #define ICE_MODULE_TYPE_QSFP_PLUS	0x0D
154 #define ICE_MODULE_TYPE_QSFP28		0x11
155 #define ICE_MODULE_SFF_ADDR_MODE	0x04
156 #define ICE_MODULE_SFF_DIAG_CAPAB	0x40
157 #define ICE_MODULE_REVISION_ADDR	0x01
158 #define ICE_MODULE_SFF_8472_COMP	0x5E
159 #define ICE_MODULE_SFF_8472_SWAP	0x5C
160 #define ICE_MODULE_QSFP_MAX_LEN		640
161 
162 /* EEPROM Standards for plug in modules */
163 #define ICE_MODULE_SFF_8079		0x1
164 #define ICE_MODULE_SFF_8079_LEN		256
165 #define ICE_MODULE_SFF_8472		0x2
166 #define ICE_MODULE_SFF_8472_LEN		512
167 #define ICE_MODULE_SFF_8636		0x3
168 #define ICE_MODULE_SFF_8636_LEN		256
169 #define ICE_MODULE_SFF_8636_MAX_LEN     640
170 #define ICE_MODULE_SFF_8436		0x4
171 #define ICE_MODULE_SFF_8436_LEN		256
172 #define ICE_MODULE_SFF_8436_MAX_LEN     640
173 
174 
175 /* Per-channel register definitions */
176 #define GLTSYN_AUX_OUT(_chan, _idx)     (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8))
177 #define GLTSYN_CLKO(_chan, _idx)        (GLTSYN_CLKO_0(_idx) + ((_chan) * 8))
178 #define GLTSYN_TGT_L(_chan, _idx)       (GLTSYN_TGT_L_0(_idx) + ((_chan) * 16))
179 #define GLTSYN_TGT_H(_chan, _idx)       (GLTSYN_TGT_H_0(_idx) + ((_chan) * 16))
180 
181 /* DDP package type */
182 enum ice_pkg_type {
183 	ICE_PKG_TYPE_UNKNOWN,
184 	ICE_PKG_TYPE_OS_DEFAULT,
185 	ICE_PKG_TYPE_COMMS,
186 };
187 
188 enum pps_type {
189 	PPS_NONE,
190 	PPS_PIN,
191 	PPS_MAX,
192 };
193 
194 struct ice_adapter;
195 
196 /**
197  * MAC filter structure
198  */
199 struct ice_mac_filter_info {
200 	struct rte_ether_addr mac_addr;
201 };
202 
203 TAILQ_HEAD(ice_mac_filter_list, ice_mac_filter);
204 
205 /* MAC filter list structure */
206 struct ice_mac_filter {
207 	TAILQ_ENTRY(ice_mac_filter) next;
208 	struct ice_mac_filter_info mac_info;
209 };
210 
211 struct ice_vlan {
212 	uint16_t tpid;
213 	uint16_t vid;
214 };
215 
216 #define ICE_VLAN(tpid, vid) \
217 	((struct ice_vlan){ tpid, vid })
218 
219 /**
220  * VLAN filter structure
221  */
222 struct ice_vlan_filter_info {
223 	struct ice_vlan vlan;
224 };
225 
226 TAILQ_HEAD(ice_vlan_filter_list, ice_vlan_filter);
227 
228 /* VLAN filter list structure */
229 struct ice_vlan_filter {
230 	TAILQ_ENTRY(ice_vlan_filter) next;
231 	struct ice_vlan_filter_info vlan_info;
232 };
233 
234 struct pool_entry {
235 	LIST_ENTRY(pool_entry) next;
236 	uint16_t base;
237 	uint16_t len;
238 };
239 
240 LIST_HEAD(res_list, pool_entry);
241 
242 struct ice_res_pool_info {
243 	uint32_t base;              /* Resource start index */
244 	uint32_t num_alloc;         /* Allocated resource number */
245 	uint32_t num_free;          /* Total available resource number */
246 	struct res_list alloc_list; /* Allocated resource list */
247 	struct res_list free_list;  /* Available resource list */
248 };
249 
250 TAILQ_HEAD(ice_vsi_list_head, ice_vsi_list);
251 
252 struct ice_vsi;
253 
254 /* VSI list structure */
255 struct ice_vsi_list {
256 	TAILQ_ENTRY(ice_vsi_list) list;
257 	struct ice_vsi *vsi;
258 };
259 
260 struct ice_rx_queue;
261 struct ci_tx_queue;
262 
263 /**
264  * Structure that defines a VSI, associated with a adapter.
265  */
266 struct ice_vsi {
267 	struct ice_adapter *adapter; /* Backreference to associated adapter */
268 	struct ice_aqc_vsi_props info; /* VSI properties */
269 	/**
270 	 * When drivers loaded, only a default main VSI exists. In case new VSI
271 	 * needs to add, HW needs to know the layout that VSIs are organized.
272 	 * Besides that, VSI isan element and can't switch packets, which needs
273 	 * to add new component VEB to perform switching. So, a new VSI needs
274 	 * to specify the uplink VSI (Parent VSI) before created. The
275 	 * uplink VSI will check whether it had a VEB to switch packets. If no,
276 	 * it will try to create one. Then, uplink VSI will move the new VSI
277 	 * into its' sib_vsi_list to manage all the downlink VSI.
278 	 *  sib_vsi_list: the VSI list that shared the same uplink VSI.
279 	 *  parent_vsi  : the uplink VSI. It's NULL for main VSI.
280 	 *  veb         : the VEB associates with the VSI.
281 	 */
282 	struct ice_vsi_list sib_vsi_list; /* sibling vsi list */
283 	struct ice_vsi *parent_vsi;
284 	enum ice_vsi_type type; /* VSI types */
285 	uint16_t vlan_num;       /* Total VLAN number */
286 	uint16_t mac_num;        /* Total mac number */
287 	struct ice_mac_filter_list mac_list; /* macvlan filter list */
288 	struct ice_vlan_filter_list vlan_list; /* vlan filter list */
289 	uint16_t nb_qps;         /* Number of queue pairs VSI can occupy */
290 	uint16_t nb_used_qps;    /* Number of queue pairs VSI uses */
291 	uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
292 	uint16_t base_queue;     /* The first queue index of this VSI */
293 	uint16_t vsi_id;         /* Hardware Id */
294 	uint16_t idx;            /* vsi_handle: SW index in hw->vsi_ctx */
295 	/* VF number to which the VSI connects, valid when VSI is VF type */
296 	uint8_t vf_num;
297 	uint16_t msix_intr; /* The MSIX interrupt binds to VSI */
298 	uint16_t nb_msix;   /* The max number of msix vector */
299 	uint8_t enabled_tc; /* The traffic class enabled */
300 	uint8_t vlan_anti_spoof_on; /* The VLAN anti-spoofing enabled */
301 	uint8_t vlan_filter_on; /* The VLAN filter enabled */
302 	/* information about rss configuration */
303 	u32 rss_key_size;
304 	u32 rss_lut_size;
305 	uint8_t *rss_lut;
306 	uint8_t *rss_key;
307 	struct ice_eth_stats eth_stats_offset;
308 	struct ice_eth_stats eth_stats;
309 	bool offset_loaded;
310 	uint64_t old_rx_bytes;
311 	uint64_t old_tx_bytes;
312 };
313 
314 enum proto_xtr_type {
315 	PROTO_XTR_NONE,
316 	PROTO_XTR_VLAN,
317 	PROTO_XTR_IPV4,
318 	PROTO_XTR_IPV6,
319 	PROTO_XTR_IPV6_FLOW,
320 	PROTO_XTR_TCP,
321 	PROTO_XTR_IP_OFFSET,
322 	PROTO_XTR_MAX /* The last one */
323 };
324 
325 enum ice_fdir_tunnel_type {
326 	ICE_FDIR_TUNNEL_TYPE_NONE = 0,
327 	ICE_FDIR_TUNNEL_TYPE_VXLAN,
328 	ICE_FDIR_TUNNEL_TYPE_GTPU,
329 	ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
330 };
331 
332 struct rte_flow;
333 TAILQ_HEAD(ice_flow_list, rte_flow);
334 
335 struct ice_flow_parser_node;
336 TAILQ_HEAD(ice_parser_list, ice_flow_parser_node);
337 
338 struct ice_fdir_filter_conf {
339 	struct ice_fdir_fltr input;
340 	enum ice_fdir_tunnel_type tunnel_type;
341 
342 	struct ice_fdir_counter *counter; /* flow specific counter context */
343 	struct rte_flow_action_count act_count;
344 
345 	uint64_t input_set_o; /* used for non-tunnel or tunnel outer fields */
346 	uint64_t input_set_i; /* only for tunnel inner fields */
347 	uint32_t mark_flag;
348 
349 	struct ice_parser_profile *prof;
350 	bool parser_ena;
351 	u8 *pkt_buf;
352 	u8 pkt_len;
353 };
354 
355 struct ice_fdir_fltr_pattern {
356 	enum ice_fltr_ptype flow_type;
357 
358 	union {
359 		struct ice_fdir_v4 v4;
360 		struct ice_fdir_v6 v6;
361 	} ip, mask;
362 
363 	struct ice_fdir_udp_gtp gtpu_data;
364 	struct ice_fdir_udp_gtp gtpu_mask;
365 
366 	struct ice_fdir_extra ext_data;
367 	struct ice_fdir_extra ext_mask;
368 
369 	enum ice_fdir_tunnel_type tunnel_type;
370 };
371 
372 #define ICE_FDIR_COUNTER_DEFAULT_POOL_SIZE	1
373 #define ICE_FDIR_COUNTER_MAX_POOL_SIZE		32
374 #define ICE_FDIR_COUNTERS_PER_BLOCK		256
375 #define ICE_FDIR_COUNTER_INDEX(base_idx) \
376 				((base_idx) * ICE_FDIR_COUNTERS_PER_BLOCK)
377 struct ice_fdir_counter_pool;
378 
379 struct ice_fdir_counter {
380 	TAILQ_ENTRY(ice_fdir_counter) next;
381 	struct ice_fdir_counter_pool *pool;
382 	uint8_t shared;
383 	uint32_t ref_cnt;
384 	uint32_t id;
385 	uint64_t hits;
386 	uint64_t bytes;
387 	uint32_t hw_index;
388 };
389 
390 TAILQ_HEAD(ice_fdir_counter_list, ice_fdir_counter);
391 
392 struct ice_fdir_counter_pool {
393 	TAILQ_ENTRY(ice_fdir_counter_pool) next;
394 	struct ice_fdir_counter_list counter_list;
395 	struct ice_fdir_counter counters[0];
396 };
397 
398 TAILQ_HEAD(ice_fdir_counter_pool_list, ice_fdir_counter_pool);
399 
400 struct ice_fdir_counter_pool_container {
401 	struct ice_fdir_counter_pool_list pool_list;
402 	struct ice_fdir_counter_pool *pools[ICE_FDIR_COUNTER_MAX_POOL_SIZE];
403 	uint8_t index_free;
404 };
405 
406 /**
407  *  A structure used to define fields of a FDIR related info.
408  */
409 struct ice_fdir_info {
410 	struct ice_vsi *fdir_vsi;     /* pointer to fdir VSI structure */
411 	struct ci_tx_queue *txq;
412 	struct ice_rx_queue *rxq;
413 	void *prg_pkt;                 /* memory for fdir program packet */
414 	uint64_t dma_addr;             /* physic address of packet memory*/
415 	const struct rte_memzone *mz;
416 	struct ice_fdir_filter_conf conf;
417 
418 	struct ice_fdir_filter_conf **hash_map;
419 	struct rte_hash *hash_table;
420 
421 	struct ice_fdir_counter_pool_container counter;
422 };
423 
424 #define ICE_HASH_GTPU_CTX_EH_IP		0
425 #define ICE_HASH_GTPU_CTX_EH_IP_UDP	1
426 #define ICE_HASH_GTPU_CTX_EH_IP_TCP	2
427 #define ICE_HASH_GTPU_CTX_UP_IP		3
428 #define ICE_HASH_GTPU_CTX_UP_IP_UDP	4
429 #define ICE_HASH_GTPU_CTX_UP_IP_TCP	5
430 #define ICE_HASH_GTPU_CTX_DW_IP		6
431 #define ICE_HASH_GTPU_CTX_DW_IP_UDP	7
432 #define ICE_HASH_GTPU_CTX_DW_IP_TCP	8
433 #define ICE_HASH_GTPU_CTX_MAX		9
434 
435 struct ice_hash_gtpu_ctx {
436 	struct ice_rss_hash_cfg ctx[ICE_HASH_GTPU_CTX_MAX];
437 };
438 
439 struct ice_hash_ctx {
440 	struct ice_hash_gtpu_ctx gtpu4;
441 	struct ice_hash_gtpu_ctx gtpu6;
442 };
443 
444 struct ice_acl_conf {
445 	struct ice_fdir_fltr input;
446 	uint64_t input_set;
447 };
448 
449 /**
450  * A structure used to define fields of ACL related info.
451  */
452 struct ice_acl_info {
453 	struct ice_acl_conf conf;
454 	struct rte_bitmap *slots;
455 	uint64_t hw_entry_id[MAX_ACL_NORMAL_ENTRIES];
456 };
457 
458 TAILQ_HEAD(ice_shaper_profile_list, ice_tm_shaper_profile);
459 TAILQ_HEAD(ice_tm_node_list, ice_tm_node);
460 
461 #define ICE_TM_MAX_LAYERS ICE_SCHED_9_LAYERS
462 
463 struct ice_tm_shaper_profile {
464 	TAILQ_ENTRY(ice_tm_shaper_profile) node;
465 	uint32_t shaper_profile_id;
466 	uint32_t reference_count;
467 	struct rte_tm_shaper_params profile;
468 };
469 
470 /* Struct to store Traffic Manager node configuration. */
471 struct ice_tm_node {
472 	TAILQ_ENTRY(ice_tm_node) node;
473 	uint32_t id;
474 	uint32_t priority;
475 	uint32_t weight;
476 	uint32_t level;
477 	uint32_t reference_count;
478 	struct ice_tm_node *parent;
479 	struct ice_tm_node **children;
480 	struct ice_tm_shaper_profile *shaper_profile;
481 	struct rte_tm_node_params params;
482 	struct ice_sched_node *sched_node;
483 };
484 
485 /* Struct to store all the Traffic Manager configuration. */
486 struct ice_tm_conf {
487 	struct ice_shaper_profile_list shaper_profile_list;
488 	struct ice_tm_node *root; /* root node - port */
489 	uint8_t hidden_layers;    /* the number of hierarchy layers hidden from app */
490 	bool committed;
491 	bool clear_on_fail;
492 };
493 
494 struct ice_mbuf_stats {
495 	uint64_t tx_pkt_errors;
496 };
497 
498 struct ice_pf {
499 	struct ice_adapter *adapter; /* The adapter this PF associate to */
500 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
501 	/* Used for next free software vsi idx.
502 	 * To save the effort, we don't recycle the index.
503 	 * Suppose the indexes are more than enough.
504 	 */
505 	uint16_t next_vsi_idx;
506 	uint16_t vsis_allocated;
507 	uint16_t vsis_unallocated;
508 	struct ice_res_pool_info qp_pool;    /*Queue pair pool */
509 	struct ice_res_pool_info msix_pool;  /* MSIX interrupt pool */
510 	struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
511 	struct rte_ether_addr dev_addr; /* PF device mac address */
512 	uint64_t flags; /* PF feature flags */
513 	uint16_t hash_lut_size; /* The size of hash lookup table */
514 	uint16_t lan_nb_qp_max;
515 	uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
516 	uint16_t base_queue; /* The base queue pairs index  in the device */
517 	uint8_t *proto_xtr; /* Protocol extraction type for all queues */
518 	uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
519 	uint16_t fdir_qp_offset;
520 	struct ice_fdir_info fdir; /* flow director info */
521 	struct ice_acl_info acl; /* ACL info */
522 	struct ice_hash_ctx hash_ctx;
523 	uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
524 	uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
525 	struct ice_hw_port_stats stats_offset;
526 	struct ice_hw_port_stats stats;
527 	struct ice_mbuf_stats mbuf_stats;
528 	/* internal packet statistics, it should be excluded from the total */
529 	struct ice_eth_stats internal_stats_offset;
530 	struct ice_eth_stats internal_stats;
531 	bool offset_loaded;
532 	bool adapter_stopped;
533 	struct ice_flow_list flow_list;
534 	rte_spinlock_t flow_ops_lock;
535 	bool init_link_up;
536 	uint64_t old_rx_bytes;
537 	uint64_t old_tx_bytes;
538 	uint64_t supported_rxdid; /* bitmap for supported RXDID */
539 	uint64_t rss_hf;
540 	struct ice_tm_conf tm_conf;
541 	uint16_t outer_ethertype;
542 	/* lock prevent race condition between lsc interrupt handler
543 	 * and link status update during dev_start.
544 	 */
545 	rte_spinlock_t link_lock;
546 };
547 
548 #define ICE_MAX_QUEUE_NUM  2048
549 #define ICE_MAX_PIN_NUM   4
550 
551 /**
552  * Cache devargs parse result.
553  */
554 struct ice_devargs {
555 	int rx_low_latency;
556 	int safe_mode_support;
557 	uint8_t proto_xtr_dflt;
558 	uint8_t default_mac_disable;
559 	uint8_t proto_xtr[ICE_MAX_QUEUE_NUM];
560 	uint8_t pin_idx;
561 	uint8_t pps_out_ena;
562 	uint8_t ddp_load_sched;
563 	uint8_t tm_exposed_levels;
564 	int xtr_field_offs;
565 	uint8_t xtr_flag_offs[PROTO_XTR_MAX];
566 	/* Name of the field. */
567 	char xtr_field_name[RTE_MBUF_DYN_NAMESIZE];
568 	uint64_t mbuf_check;
569 	const char *ddp_filename;
570 };
571 
572 /**
573  * Structure to store fdir fv entry.
574  */
575 struct ice_fdir_prof_info {
576 	struct ice_parser_profile prof;
577 	u64 fdir_actived_cnt;
578 };
579 
580 /**
581  * Structure to store rss fv entry.
582  */
583 struct ice_rss_prof_info {
584 	struct ice_parser_profile prof;
585 	bool symm;
586 };
587 
588 #define ICE_MBUF_CHECK_F_TX_MBUF        (1ULL << 0)
589 #define ICE_MBUF_CHECK_F_TX_SIZE        (1ULL << 1)
590 #define ICE_MBUF_CHECK_F_TX_SEGMENT     (1ULL << 2)
591 #define ICE_MBUF_CHECK_F_TX_OFFLOAD     (1ULL << 3)
592 
593 /**
594  * Structure to store private data for each PF/VF instance.
595  */
596 struct ice_adapter {
597 	/* Common for both PF and VF */
598 	struct ice_hw hw;
599 	struct ice_pf pf;
600 	bool rx_bulk_alloc_allowed;
601 	bool rx_vec_allowed;
602 	bool tx_vec_allowed;
603 	bool tx_simple_allowed;
604 	/* ptype mapping table */
605 	alignas(RTE_CACHE_LINE_MIN_SIZE) uint32_t ptype_tbl[ICE_MAX_PKT_TYPE];
606 	bool is_safe_mode;
607 	struct ice_devargs devargs;
608 	enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
609 	uint16_t fdir_ref_cnt;
610 	/* For vector PMD */
611 	eth_rx_burst_t tx_pkt_burst;
612 	/* For PTP */
613 	uint8_t ptp_tx_block;
614 	uint8_t ptp_tx_index;
615 	bool ptp_ena;
616 	uint64_t time_hw;
617 	struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
618 	struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
619 	/* True if DCF state of the associated PF is on */
620 	RTE_ATOMIC(bool) dcf_state_on;
621 	/* Set bit if the engine is disabled */
622 	unsigned long disabled_engine_mask;
623 	struct ice_parser *psr;
624 	/* used only on X86, zero on other Archs */
625 	bool rx_use_avx2;
626 	bool rx_use_avx512;
627 	bool tx_use_avx2;
628 	bool tx_use_avx512;
629 	bool rx_vec_offload_support;
630 };
631 
632 struct ice_vsi_vlan_pvid_info {
633 	uint16_t on;		/* Enable or disable pvid */
634 	union {
635 		uint16_t pvid;	/* Valid in case 'on' is set to set pvid */
636 		struct {
637 			/* Valid in case 'on' is cleared. 'tagged' will reject
638 			 * tagged packets, while 'untagged' will reject
639 			 * untagged packets.
640 			 */
641 			uint8_t tagged;
642 			uint8_t untagged;
643 		} reject;
644 	} config;
645 };
646 
647 #define ICE_DEV_TO_PCI(eth_dev) \
648 	RTE_DEV_TO_PCI((eth_dev)->device)
649 
650 /* ICE_DEV_PRIVATE_TO */
651 #define ICE_DEV_PRIVATE_TO_PF(adapter) \
652 	(&((struct ice_adapter *)adapter)->pf)
653 #define ICE_DEV_PRIVATE_TO_HW(adapter) \
654 	(&((struct ice_adapter *)adapter)->hw)
655 #define ICE_DEV_PRIVATE_TO_ADAPTER(adapter) \
656 	((struct ice_adapter *)adapter)
657 
658 /* ICE_VSI_TO */
659 #define ICE_VSI_TO_HW(vsi) \
660 	(&(((struct ice_vsi *)vsi)->adapter->hw))
661 #define ICE_VSI_TO_PF(vsi) \
662 	(&(((struct ice_vsi *)vsi)->adapter->pf))
663 
664 /* ICE_PF_TO */
665 #define ICE_PF_TO_HW(pf) \
666 	(&((pf)->adapter->hw))
667 #define ICE_PF_TO_ADAPTER(pf) \
668 	((struct ice_adapter *)(pf)->adapter)
669 #define ICE_PF_TO_ETH_DEV(pf) \
670 	(((struct ice_pf *)pf)->adapter->eth_dev)
671 
672 bool is_ice_supported(struct rte_eth_dev *dev);
673 int
674 ice_load_pkg(struct ice_adapter *adapter, bool use_dsn, uint64_t dsn);
675 struct ice_vsi *
676 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type);
677 int
678 ice_release_vsi(struct ice_vsi *vsi);
679 void ice_vsi_enable_queues_intr(struct ice_vsi *vsi);
680 void ice_vsi_disable_queues_intr(struct ice_vsi *vsi);
681 void ice_vsi_queues_bind_intr(struct ice_vsi *vsi);
682 int ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
683 			 struct ice_rss_hash_cfg *cfg);
684 int ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
685 			 struct ice_rss_hash_cfg *cfg);
686 void ice_tm_conf_init(struct rte_eth_dev *dev);
687 void ice_tm_conf_uninit(struct rte_eth_dev *dev);
688 extern const struct rte_tm_ops ice_tm_ops;
689 
690 static inline int
691 ice_align_floor(int n)
692 {
693 	if (n == 0)
694 		return 0;
695 	return 1 << (sizeof(n) * CHAR_BIT - 1 - rte_clz32(n));
696 }
697 
698 #define ICE_PHY_TYPE_SUPPORT_50G(phy_type) \
699 	(((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_CR2) || \
700 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_SR2) || \
701 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_LR2) || \
702 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_KR2) || \
703 	((phy_type) & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC) || \
704 	((phy_type) & ICE_PHY_TYPE_LOW_50G_LAUI2) || \
705 	((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC) || \
706 	((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI2) || \
707 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_CP) || \
708 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_SR) || \
709 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_FR) || \
710 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_LR) || \
711 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) || \
712 	((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC) || \
713 	((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI1))
714 
715 #define ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type) \
716 	(((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CR4) || \
717 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_SR4) || \
718 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_LR4) || \
719 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_KR4) || \
720 	((phy_type) & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC) || \
721 	((phy_type) & ICE_PHY_TYPE_LOW_100G_CAUI4) || \
722 	((phy_type) & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC) || \
723 	((phy_type) & ICE_PHY_TYPE_LOW_100G_AUI4) || \
724 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4) || \
725 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4) || \
726 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CP2) || \
727 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_SR2) || \
728 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_DR))
729 
730 #define ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type) \
731 	(((phy_type) & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) || \
732 	((phy_type) & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC) || \
733 	((phy_type) & ICE_PHY_TYPE_HIGH_100G_CAUI2) || \
734 	((phy_type) & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC) || \
735 	((phy_type) & ICE_PHY_TYPE_HIGH_100G_AUI2))
736 
737 #define ICE_PHY_TYPE_SUPPORT_200G_HIGH(phy_type) \
738 	(((phy_type) & ICE_PHY_TYPE_HIGH_200G_CR4_PAM4) || \
739 	((phy_type) & ICE_PHY_TYPE_HIGH_200G_SR4) || \
740 	((phy_type) & ICE_PHY_TYPE_HIGH_200G_FR4) || \
741 	((phy_type) & ICE_PHY_TYPE_HIGH_200G_LR4) || \
742 	((phy_type) & ICE_PHY_TYPE_HIGH_200G_DR4) || \
743 	((phy_type) & ICE_PHY_TYPE_HIGH_200G_KR4_PAM4) || \
744 	((phy_type) & ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC) || \
745 	((phy_type) & ICE_PHY_TYPE_HIGH_200G_AUI4) || \
746 	((phy_type) & ICE_PHY_TYPE_HIGH_200G_AUI8_AOC_ACC) || \
747 	((phy_type) & ICE_PHY_TYPE_HIGH_200G_AUI8))
748 
749 __rte_experimental
750 int rte_pmd_ice_dump_package(uint16_t port, uint8_t **buff, uint32_t *size);
751 
752 __rte_experimental
753 int rte_pmd_ice_dump_switch(uint16_t port, uint8_t **buff, uint32_t *size);
754 
755 __rte_experimental
756 int rte_pmd_ice_dump_txsched(uint16_t port, bool detail, FILE *stream);
757 
758 int
759 ice_tm_setup_txq_node(struct ice_pf *pf, struct ice_hw *hw, uint16_t qid, uint32_t node_teid);
760 
761 #endif /* _ICE_ETHDEV_H_ */
762