xref: /dpdk/drivers/net/cnxk/cnxk_ethdev.h (revision d524a5526efa6b4cc01d13d8d50785c08d9b6891)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #ifndef __CNXK_ETHDEV_H__
5 #define __CNXK_ETHDEV_H__
6 
7 #include <math.h>
8 #include <stdint.h>
9 
10 #include <ethdev_driver.h>
11 #include <ethdev_pci.h>
12 #include <rte_compat.h>
13 #include <rte_kvargs.h>
14 #include <rte_mbuf.h>
15 #include <rte_mbuf_pool_ops.h>
16 #include <rte_mempool.h>
17 #include <rte_mtr_driver.h>
18 #include <rte_security.h>
19 #include <rte_security_driver.h>
20 #include <rte_tailq.h>
21 #include <rte_time.h>
22 #include <rte_tm_driver.h>
23 
24 #include "roc_api.h"
25 #include <cnxk_ethdev_dp.h>
26 
27 #define CNXK_ETH_DEV_PMD_VERSION "1.0"
28 
29 /* Used for struct cnxk_eth_dev::flags */
30 #define CNXK_LINK_CFG_IN_PROGRESS_F BIT_ULL(0)
31 
32 /* VLAN tag inserted by NIX_TX_VTAG_ACTION.
33  * In Tx space is always reserved for this in FRS.
34  */
35 #define CNXK_NIX_MAX_VTAG_INS	   2
36 #define CNXK_NIX_MAX_VTAG_ACT_SIZE (4 * CNXK_NIX_MAX_VTAG_INS)
37 
38 /* ETH_HLEN+ETH_FCS+2*VLAN_HLEN */
39 #define CNXK_NIX_L2_OVERHEAD (RTE_ETHER_HDR_LEN + \
40 			      RTE_ETHER_CRC_LEN + \
41 			      CNXK_NIX_MAX_VTAG_ACT_SIZE)
42 
43 #define CNXK_NIX_RX_MIN_DESC	    16
44 #define CNXK_NIX_RX_MIN_DESC_ALIGN  16
45 #define CNXK_NIX_RX_NB_SEG_MAX	    6
46 #define CNXK_NIX_RX_DEFAULT_RING_SZ 4096
47 /* Max supported SQB count */
48 #define CNXK_NIX_TX_MAX_SQB 512
49 /* LPB & SPB */
50 #define CNXK_NIX_NUM_POOLS_MAX 2
51 
52 #define CNXK_NIX_DEF_SQ_COUNT	512
53 
54 #define CNXK_NIX_RSS_L3_L4_SRC_DST                                             \
55 	(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |                   \
56 	 RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
57 
58 #define CNXK_NIX_RSS_OFFLOAD                                                   \
59 	(RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |                 \
60 	 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_TUNNEL |             \
61 	 RTE_ETH_RSS_L2_PAYLOAD | CNXK_NIX_RSS_L3_L4_SRC_DST |                 \
62 	 RTE_ETH_RSS_LEVEL_MASK | RTE_ETH_RSS_C_VLAN)
63 
64 #define CNXK_NIX_TX_OFFLOAD_CAPA                                               \
65 	(RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |          \
66 	 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT |             \
67 	 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |    \
68 	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM |                 \
69 	 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_TSO |                  \
70 	 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |        \
71 	 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS |              \
72 	 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_SECURITY)
73 
74 #define CNXK_NIX_RX_OFFLOAD_CAPA                                               \
75 	(RTE_ETH_RX_OFFLOAD_CHECKSUM | RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |         \
76 	 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_SCATTER |    \
77 	 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_RSS_HASH |    \
78 	 RTE_ETH_RX_OFFLOAD_TIMESTAMP | RTE_ETH_RX_OFFLOAD_VLAN_STRIP |        \
79 	 RTE_ETH_RX_OFFLOAD_SECURITY)
80 
81 #define RSS_IPV4_ENABLE                                                        \
82 	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |                            \
83 	 RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV4_TCP |         \
84 	 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
85 
86 #define RSS_IPV6_ENABLE                                                        \
87 	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |                            \
88 	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |         \
89 	 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
90 
91 #define RSS_IPV6_EX_ENABLE                                                     \
92 	(RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_IPV6_TCP_EX | RTE_ETH_RSS_IPV6_UDP_EX)
93 
94 #define RSS_MAX_LEVELS 3
95 
96 #define RSS_IPV4_INDEX 0
97 #define RSS_IPV6_INDEX 1
98 #define RSS_TCP_INDEX  2
99 #define RSS_UDP_INDEX  3
100 #define RSS_SCTP_INDEX 4
101 #define RSS_DMAC_INDEX 5
102 
103 /* Default mark value used when none is provided. */
104 #define CNXK_NIX_MTR_COUNT_MAX	      73 /* 64(leaf) + 8(mid) + 1(top) */
105 
106 /* Default cycle counter mask */
107 #define CNXK_CYCLECOUNTER_MASK     0xffffffffffffffffULL
108 
109 /* Fastpath lookup */
110 #define CNXK_NIX_FASTPATH_LOOKUP_MEM "cnxk_nix_fastpath_lookup_mem"
111 
112 struct cnxk_fc_cfg {
113 	enum rte_eth_fc_mode mode;
114 	uint8_t rx_pause;
115 	uint8_t tx_pause;
116 };
117 
118 struct cnxk_pfc_cfg {
119 	uint16_t class_en;
120 	uint16_t pause_time;
121 	uint16_t rx_pause_en;
122 	uint16_t tx_pause_en;
123 };
124 
125 struct cnxk_eth_qconf {
126 	union {
127 		struct rte_eth_txconf tx;
128 		struct rte_eth_rxconf rx;
129 	} conf;
130 	struct rte_mempool *mp;
131 	uint16_t nb_desc;
132 	uint8_t valid;
133 };
134 
135 struct cnxk_meter_node {
136 #define MAX_PRV_MTR_NODES 10
137 	TAILQ_ENTRY(cnxk_meter_node) next;
138 	/**< Pointer to the next flow meter structure. */
139 	uint32_t id; /**< Usr mtr id. */
140 	struct cnxk_mtr_profile_node *profile;
141 	struct cnxk_mtr_policy_node *policy;
142 	uint32_t bpf_id; /**< Hw mtr id. */
143 	uint32_t rq_num;
144 	uint32_t *rq_id;
145 	uint16_t level;
146 	uint32_t prev_id[MAX_PRV_MTR_NODES]; /**< Prev mtr id for chaining */
147 	uint32_t prev_cnt;
148 	uint32_t next_id; /**< Next mtr id for chaining */
149 	bool is_prev;
150 	bool is_next;
151 	struct rte_mtr_params params;
152 	struct roc_nix_bpf_objs profs;
153 	bool is_used;
154 	uint32_t ref_cnt;
155 };
156 
157 struct action_rss {
158 	enum rte_eth_hash_function func;
159 	uint32_t level;
160 	uint64_t types;
161 	uint32_t key_len;
162 	uint32_t queue_num;
163 	uint8_t *key;
164 	uint16_t *queue;
165 };
166 
167 struct policy_actions {
168 	uint32_t action_fate;
169 	union {
170 		uint16_t queue;
171 		uint32_t mtr_id;
172 		struct action_rss *rss_desc;
173 		bool skip_red;
174 	};
175 };
176 
177 struct cnxk_mtr_policy_node {
178 	TAILQ_ENTRY(cnxk_mtr_policy_node) next;
179 	/**< Pointer to the next flow meter structure. */
180 	uint32_t id;	 /**< Policy id */
181 	uint32_t mtr_id; /** Meter id */
182 	struct rte_mtr_meter_policy_params policy;
183 	struct policy_actions actions[RTE_COLORS];
184 	uint32_t ref_cnt;
185 };
186 
187 struct cnxk_mtr_profile_node {
188 	TAILQ_ENTRY(cnxk_mtr_profile_node) next;
189 	struct rte_mtr_meter_profile profile; /**< Profile detail. */
190 	uint32_t ref_cnt;		      /**< Use count. */
191 	uint32_t id;			      /**< Profile id. */
192 };
193 
194 TAILQ_HEAD(cnxk_mtr_profiles, cnxk_mtr_profile_node);
195 TAILQ_HEAD(cnxk_mtr_policy, cnxk_mtr_policy_node);
196 TAILQ_HEAD(cnxk_mtr, cnxk_meter_node);
197 
198 /* Security session private data */
199 struct cnxk_eth_sec_sess {
200 	/* List entry */
201 	TAILQ_ENTRY(cnxk_eth_sec_sess) entry;
202 
203 	/* Inbound SA is from NIX_RX_IPSEC_SA_BASE or
204 	 * Outbound SA from roc_nix_inl_outb_sa_base_get()
205 	 */
206 	void *sa;
207 
208 	/* SA index */
209 	uint32_t sa_idx;
210 
211 	/* SPI */
212 	uint32_t spi;
213 
214 	/* Back pointer to session */
215 	struct rte_security_session *sess;
216 
217 	/* Inbound */
218 	bool inb;
219 
220 	/* Inbound session on inl dev */
221 	bool inl_dev;
222 
223 	/* Out-Of-Place processing */
224 	bool inb_oop;
225 };
226 
227 TAILQ_HEAD(cnxk_eth_sec_sess_list, cnxk_eth_sec_sess);
228 
229 /* Inbound security data */
230 struct cnxk_eth_dev_sec_inb {
231 	/* IPSec inbound min SPI */
232 	uint32_t min_spi;
233 
234 	/* IPSec inbound max SPI */
235 	uint32_t max_spi;
236 
237 	/* Using inbound with inline device */
238 	bool inl_dev;
239 
240 	/* Device argument to disable inline device usage for inb */
241 	bool no_inl_dev;
242 
243 	/* Active sessions */
244 	uint16_t nb_sess;
245 
246 	/* List of sessions */
247 	struct cnxk_eth_sec_sess_list list;
248 
249 	/* DPTR for WRITE_SA microcode op */
250 	void *sa_dptr;
251 
252 	/* Number of oop sessions */
253 	uint16_t nb_oop;
254 
255 	/* Reassembly enabled */
256 	bool reass_en;
257 
258 	/* Lock to synchronize sa setup/release */
259 	rte_spinlock_t lock;
260 
261 	/* Disable custom meta aura */
262 	bool custom_meta_aura_dis;
263 
264 	/* Inline device CPT queue info */
265 	struct roc_nix_inl_dev_q *inl_dev_q;
266 };
267 
268 /* Outbound security data */
269 struct cnxk_eth_dev_sec_outb {
270 	/* IPSec outbound max SA */
271 	uint16_t max_sa;
272 
273 	/* Per CPT LF descriptor count */
274 	uint32_t nb_desc;
275 
276 	/* SA Bitmap */
277 	struct plt_bitmap *sa_bmap;
278 
279 	/* SA bitmap memory */
280 	void *sa_bmap_mem;
281 
282 	/* SA base */
283 	uint64_t sa_base;
284 
285 	/* CPT LF base */
286 	struct roc_cpt_lf *lf_base;
287 
288 	/* Crypto queues => CPT lf count */
289 	uint16_t nb_crypto_qs;
290 
291 	/* FC sw mem */
292 	uint64_t *fc_sw_mem;
293 
294 	/* Active sessions */
295 	uint16_t nb_sess;
296 
297 	/* List of sessions */
298 	struct cnxk_eth_sec_sess_list list;
299 
300 	/* DPTR for WRITE_SA microcode op */
301 	void *sa_dptr;
302 
303 	/* Lock to synchronize sa setup/release */
304 	rte_spinlock_t lock;
305 
306 	/* Engine caps */
307 	uint64_t cpt_eng_caps;
308 };
309 
310 /* MACsec session private data */
311 struct cnxk_macsec_sess {
312 	/* List entry */
313 	TAILQ_ENTRY(cnxk_macsec_sess) entry;
314 
315 	/* Back pointer to session */
316 	struct rte_security_session *sess;
317 	enum mcs_direction dir;
318 	uint64_t sci;
319 	uint8_t secy_id;
320 	uint8_t sc_id;
321 	uint8_t flow_id;
322 };
323 TAILQ_HEAD(cnxk_macsec_sess_list, cnxk_macsec_sess);
324 
325 struct cnxk_eth_dev {
326 	/* ROC NIX */
327 	struct roc_nix nix;
328 
329 	/* ROC NPC */
330 	struct roc_npc npc;
331 
332 	/* ROC RQs, SQs and CQs */
333 	struct roc_nix_rq *rqs;
334 	struct roc_nix_sq *sqs;
335 	struct roc_nix_cq *cqs;
336 
337 	/* Configured queue count */
338 	uint16_t nb_rxq;
339 	uint16_t nb_txq;
340 	uint16_t nb_rxq_sso;
341 	uint8_t configured;
342 
343 	/* Max macfilter entries */
344 	uint8_t dmac_filter_count;
345 	uint8_t max_mac_entries;
346 	bool dmac_filter_enable;
347 	int *dmac_idx_map;
348 
349 	uint16_t flags;
350 	uint8_t ptype_disable;
351 	bool scalar_ena;
352 	bool tx_compl_ena;
353 	bool tx_mark;
354 	bool ptp_en;
355 
356 	/* Pointer back to rte */
357 	struct rte_eth_dev *eth_dev;
358 
359 	/* HW capabilities / Limitations */
360 	union {
361 		struct {
362 			uint64_t cq_min_4k : 1;
363 			uint64_t ipsecd_drop_re_dis : 1;
364 			uint64_t vec_drop_re_dis : 1;
365 		};
366 		uint64_t hwcap;
367 	};
368 
369 	/* Rx and Tx offload capabilities */
370 	uint64_t rx_offload_capa;
371 	uint64_t tx_offload_capa;
372 	uint32_t speed_capa;
373 	/* Configured Rx and Tx offloads */
374 	uint64_t rx_offloads;
375 	uint64_t tx_offloads;
376 	/* Platform specific offload flags */
377 	uint16_t rx_offload_flags;
378 	uint16_t tx_offload_flags;
379 
380 	/* ETHDEV RSS HF bitmask */
381 	uint64_t ethdev_rss_hf;
382 
383 	/* Saved qconf before lf realloc */
384 	struct cnxk_eth_qconf *tx_qconf;
385 	struct cnxk_eth_qconf *rx_qconf;
386 
387 	/* Flow control configuration */
388 	struct cnxk_pfc_cfg pfc_cfg;
389 	struct cnxk_fc_cfg fc_cfg;
390 
391 	/* PTP Counters */
392 	struct cnxk_timesync_info tstamp;
393 	struct rte_timecounter systime_tc;
394 	struct rte_timecounter rx_tstamp_tc;
395 	struct rte_timecounter tx_tstamp_tc;
396 	double clk_freq_mult;
397 	uint64_t clk_delta;
398 
399 	/* Ingress policer */
400 	enum roc_nix_bpf_color precolor_tbl[ROC_NIX_BPF_PRECOLOR_TBL_SIZE_DSCP];
401 	enum rte_mtr_color_in_protocol proto;
402 	struct cnxk_mtr_profiles mtr_profiles;
403 	struct cnxk_mtr_policy mtr_policy;
404 	struct cnxk_mtr mtr;
405 
406 	/* Congestion Management */
407 	struct rte_eth_cman_config cman_cfg;
408 
409 	/* Rx burst for cleanup(Only Primary) */
410 	eth_rx_burst_t rx_pkt_burst_no_offload;
411 
412 	/* Default mac address */
413 	uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
414 
415 	/* LSO Tunnel format indices */
416 	uint64_t lso_tun_fmt;
417 
418 	/* Per queue statistics counters */
419 	uint32_t txq_stat_map[RTE_ETHDEV_QUEUE_STAT_CNTRS];
420 	uint32_t rxq_stat_map[RTE_ETHDEV_QUEUE_STAT_CNTRS];
421 
422 	/* Security data */
423 	struct cnxk_eth_dev_sec_inb inb;
424 	struct cnxk_eth_dev_sec_outb outb;
425 
426 	/* Reassembly dynfield/flag offsets */
427 	int reass_dynfield_off;
428 	int reass_dynflag_bit;
429 
430 	/* MCS device */
431 	struct cnxk_mcs_dev *mcs_dev;
432 	struct cnxk_macsec_sess_list mcs_list;
433 
434 	/* Inject packets */
435 	struct cnxk_ethdev_inj_cfg inj_cfg;
436 
437 	/* Eswitch domain ID */
438 	uint16_t switch_domain_id;
439 
440 	/* SSO event dev */
441 	void *evdev_priv;
442 
443 	/* SSO event dev ptp  */
444 	void (*cnxk_sso_ptp_tstamp_cb)
445 	     (uint16_t port_id, uint16_t flags, bool ptp_en);
446 };
447 
448 struct cnxk_eth_rxq_sp {
449 	struct cnxk_eth_dev *dev;
450 	struct cnxk_eth_qconf qconf;
451 	uint16_t qid;
452 	uint8_t tx_pause;
453 	uint8_t tc;
454 } __plt_cache_aligned;
455 
456 struct cnxk_eth_txq_sp {
457 	struct cnxk_eth_dev *dev;
458 	struct cnxk_eth_qconf qconf;
459 	uint16_t qid;
460 } __plt_cache_aligned;
461 
462 static inline struct cnxk_eth_dev *
463 cnxk_eth_pmd_priv(const struct rte_eth_dev *eth_dev)
464 {
465 	return eth_dev->data->dev_private;
466 }
467 
468 static inline struct cnxk_eth_rxq_sp *
469 cnxk_eth_rxq_to_sp(void *__rxq)
470 {
471 	return ((struct cnxk_eth_rxq_sp *)__rxq) - 1;
472 }
473 
474 static inline struct cnxk_eth_txq_sp *
475 cnxk_eth_txq_to_sp(void *__txq)
476 {
477 	return ((struct cnxk_eth_txq_sp *)__txq) - 1;
478 }
479 
480 static inline int
481 cnxk_nix_tx_queue_count(uint64_t *mem, uint16_t sqes_per_sqb_log2)
482 {
483 	uint64_t val;
484 
485 	val = rte_atomic_load_explicit((RTE_ATOMIC(uint64_t)*)mem, rte_memory_order_relaxed);
486 	val = (val << sqes_per_sqb_log2) - val;
487 
488 	return (val & 0xFFFF);
489 }
490 
491 static inline int
492 cnxk_nix_tx_queue_sec_count(uint64_t *mem, uint16_t sqes_per_sqb_log2, uint64_t *sec_fc)
493 {
494 	uint64_t sq_cnt, sec_cnt, val;
495 
496 	sq_cnt = rte_atomic_load_explicit((RTE_ATOMIC(uint64_t)*)mem, rte_memory_order_relaxed);
497 	sq_cnt = (sq_cnt << sqes_per_sqb_log2) - sq_cnt;
498 	sec_cnt = rte_atomic_load_explicit((RTE_ATOMIC(uint64_t)*)sec_fc,
499 					   rte_memory_order_relaxed);
500 	val = RTE_MAX(sq_cnt, sec_cnt);
501 
502 	return (val & 0xFFFF);
503 }
504 
505 static inline int
506 cnxk_nix_inl_fc_check(uint64_t *fc, int32_t *fc_sw, uint32_t nb_desc, uint16_t nb_inst)
507 {
508 	uint8_t retry_count = 32;
509 	int32_t val, newval;
510 
511 	/* Check if there is any CPT instruction to submit */
512 	if (!nb_inst)
513 		return -EINVAL;
514 
515 retry:
516 	val = rte_atomic_fetch_sub_explicit((RTE_ATOMIC(int32_t)*)fc_sw, nb_inst,
517 					    rte_memory_order_relaxed) - nb_inst;
518 	if (likely(val >= 0))
519 		return 0;
520 
521 	newval = (int64_t)nb_desc - rte_atomic_load_explicit((RTE_ATOMIC(uint64_t)*)fc,
522 							     rte_memory_order_relaxed);
523 	newval -= nb_inst;
524 
525 	if (!rte_atomic_compare_exchange_strong_explicit((RTE_ATOMIC(int32_t)*)fc_sw, &val, newval,
526 							 rte_memory_order_release,
527 							 rte_memory_order_relaxed)) {
528 		if (retry_count) {
529 			retry_count--;
530 			goto retry;
531 		} else {
532 			return -EAGAIN;
533 		}
534 	}
535 	if (unlikely(newval < 0))
536 		return -EAGAIN;
537 
538 	return 0;
539 }
540 
541 /* Common ethdev ops */
542 extern struct eth_dev_ops cnxk_eth_dev_ops;
543 
544 /* Common flow ops */
545 extern struct rte_flow_ops cnxk_flow_ops;
546 
547 /* Common security ops */
548 extern struct rte_security_ops cnxk_eth_sec_ops;
549 
550 /* Common tm ops */
551 extern struct rte_tm_ops cnxk_tm_ops;
552 
553 /* Platform specific rte pmd cnxk ops */
554 typedef uint16_t (*cnxk_inl_dev_submit_cb_t)(struct roc_nix_inl_dev_q *q, void *inst,
555 					     uint16_t nb_inst);
556 
557 struct cnxk_ethdev_pmd_ops {
558 	cnxk_inl_dev_submit_cb_t inl_dev_submit;
559 };
560 extern struct cnxk_ethdev_pmd_ops cnxk_pmd_ops;
561 
562 /* Ops */
563 int cnxk_nix_probe(struct rte_pci_driver *pci_drv,
564 		   struct rte_pci_device *pci_dev);
565 int cnxk_nix_remove(struct rte_pci_device *pci_dev);
566 int cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu);
567 int cnxk_nix_sq_flush(struct rte_eth_dev *eth_dev);
568 int cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
569 				    struct rte_ether_addr *mc_addr_set,
570 				    uint32_t nb_mc_addr);
571 int cnxk_nix_mac_addr_add(struct rte_eth_dev *eth_dev,
572 			  struct rte_ether_addr *addr, uint32_t index,
573 			  uint32_t pool);
574 void cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index);
575 int cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev,
576 			  struct rte_ether_addr *addr);
577 int cnxk_nix_promisc_enable(struct rte_eth_dev *eth_dev);
578 int cnxk_nix_promisc_disable(struct rte_eth_dev *eth_dev);
579 int cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev);
580 int cnxk_nix_allmulticast_disable(struct rte_eth_dev *eth_dev);
581 int cnxk_nix_info_get(struct rte_eth_dev *eth_dev,
582 		      struct rte_eth_dev_info *dev_info);
583 int cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
584 			       struct rte_eth_burst_mode *mode);
585 int cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
586 			       struct rte_eth_burst_mode *mode);
587 int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
588 			   struct rte_eth_fc_conf *fc_conf);
589 int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
590 			   struct rte_eth_fc_conf *fc_conf);
591 int cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
592 					     struct rte_eth_pfc_queue_conf *pfc_conf);
593 int cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
594 					       struct rte_eth_pfc_queue_info *pfc_info);
595 int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);
596 int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);
597 int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
598 			     struct rte_eth_dev_module_info *modinfo);
599 int cnxk_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
600 			       struct rte_dev_eeprom_info *info);
601 int cnxk_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
602 				  uint16_t rx_queue_id);
603 int cnxk_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
604 				   uint16_t rx_queue_id);
605 int cnxk_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool);
606 int cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt);
607 int cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
608 			  const struct rte_flow_ops **ops);
609 int cnxk_nix_configure(struct rte_eth_dev *eth_dev);
610 int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
611 			    uint16_t nb_desc, uint16_t fp_tx_q_sz,
612 			    const struct rte_eth_txconf *tx_conf);
613 int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
614 			    uint32_t nb_desc, uint16_t fp_rx_q_sz,
615 			    const struct rte_eth_rxconf *rx_conf,
616 			    struct rte_mempool *mp);
617 int cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid);
618 void cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid);
619 int cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid);
620 int cnxk_nix_dev_start(struct rte_eth_dev *eth_dev);
621 int cnxk_nix_timesync_enable(struct rte_eth_dev *eth_dev);
622 int cnxk_nix_timesync_disable(struct rte_eth_dev *eth_dev);
623 int cnxk_nix_timesync_read_rx_timestamp(struct rte_eth_dev *eth_dev,
624 					struct timespec *timestamp,
625 					uint32_t flags);
626 int cnxk_nix_timesync_read_tx_timestamp(struct rte_eth_dev *eth_dev,
627 					struct timespec *timestamp);
628 int cnxk_nix_timesync_read_time(struct rte_eth_dev *eth_dev,
629 				struct timespec *ts);
630 int cnxk_nix_timesync_write_time(struct rte_eth_dev *eth_dev,
631 				 const struct timespec *ts);
632 int cnxk_nix_timesync_adjust_time(struct rte_eth_dev *eth_dev, int64_t delta);
633 int cnxk_nix_tsc_convert(struct cnxk_eth_dev *dev);
634 int cnxk_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *clock);
635 
636 uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
637 int cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *ops);
638 int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
639 				     uint16_t queue_idx, uint32_t tx_rate);
640 int cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
641 			      int mark_yellow, int mark_red,
642 			      struct rte_tm_error *error);
643 int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
644 			    int mark_yellow, int mark_red,
645 			    struct rte_tm_error *error);
646 int cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
647 			     int mark_yellow, int mark_red,
648 			     struct rte_tm_error *error);
649 int cnxk_nix_tx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t offset,
650 				uint16_t num, FILE *file);
651 
652 /* MTR */
653 int cnxk_nix_mtr_ops_get(struct rte_eth_dev *dev, void *ops);
654 
655 /* RSS */
656 uint32_t cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
657 				uint8_t rss_level);
658 int cnxk_nix_reta_update(struct rte_eth_dev *eth_dev,
659 			 struct rte_eth_rss_reta_entry64 *reta_conf,
660 			 uint16_t reta_size);
661 int cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
662 			struct rte_eth_rss_reta_entry64 *reta_conf,
663 			uint16_t reta_size);
664 int cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
665 			     struct rte_eth_rss_conf *rss_conf);
666 int cnxk_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
667 			       struct rte_eth_rss_conf *rss_conf);
668 int cnxk_nix_eth_dev_priv_dump(struct rte_eth_dev *eth_dev, FILE *file);
669 
670 /* Link */
671 void cnxk_nix_toggle_flag_link_cfg(struct cnxk_eth_dev *dev, bool set);
672 void cnxk_eth_dev_link_status_cb(struct roc_nix *nix,
673 				 struct roc_nix_link_info *link);
674 void cnxk_eth_dev_link_status_get_cb(struct roc_nix *nix,
675 				     struct roc_nix_link_info *link);
676 void cnxk_eth_dev_q_err_cb(struct roc_nix *nix, void *data);
677 int cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete);
678 int cnxk_nix_queue_stats_mapping(struct rte_eth_dev *dev, uint16_t queue_id,
679 				 uint8_t stat_idx, uint8_t is_rx);
680 int cnxk_nix_stats_reset(struct rte_eth_dev *dev);
681 int cnxk_nix_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
682 int cnxk_nix_xstats_get(struct rte_eth_dev *eth_dev,
683 			struct rte_eth_xstat *xstats, unsigned int n);
684 int cnxk_nix_xstats_get_names(struct rte_eth_dev *eth_dev,
685 			      struct rte_eth_xstat_name *xstats_names,
686 			      unsigned int limit);
687 int cnxk_nix_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
688 				    const uint64_t *ids,
689 				    struct rte_eth_xstat_name *xstats_names,
690 				    unsigned int limit);
691 int cnxk_nix_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids,
692 			      uint64_t *values, unsigned int n);
693 int cnxk_nix_xstats_reset(struct rte_eth_dev *eth_dev);
694 int cnxk_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
695 			    size_t fw_size);
696 void cnxk_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
697 			   struct rte_eth_rxq_info *qinfo);
698 void cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
699 			   struct rte_eth_txq_info *qinfo);
700 
701 /* Queue status */
702 int cnxk_nix_rx_descriptor_status(void *rxq, uint16_t offset);
703 int cnxk_nix_tx_descriptor_status(void *txq, uint16_t offset);
704 uint32_t cnxk_nix_rx_queue_count(void *rxq);
705 
706 /* Lookup configuration */
707 const uint32_t *cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev,
708 					      size_t *no_of_elements);
709 void *cnxk_nix_fastpath_lookup_mem_get(void);
710 
711 /* Devargs */
712 int cnxk_ethdev_parse_devargs(struct rte_devargs *devargs,
713 			      struct cnxk_eth_dev *dev);
714 
715 /* Debug */
716 int cnxk_nix_dev_get_reg(struct rte_eth_dev *eth_dev,
717 			 struct rte_dev_reg_info *regs);
718 /* Security */
719 int cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p,
720 			     uint32_t spi);
721 int cnxk_eth_outb_sa_idx_put(struct cnxk_eth_dev *dev, uint32_t idx);
722 int cnxk_nix_lookup_mem_sa_base_set(struct cnxk_eth_dev *dev);
723 int cnxk_nix_lookup_mem_sa_base_clear(struct cnxk_eth_dev *dev);
724 int cnxk_nix_lookup_mem_metapool_set(struct cnxk_eth_dev *dev);
725 int cnxk_nix_lookup_mem_metapool_clear(struct cnxk_eth_dev *dev);
726 __rte_internal
727 int cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev);
728 typedef void (*cnxk_ethdev_rx_offload_cb_t)(uint16_t port_id, uint64_t flags);
729 __rte_internal
730 void cnxk_ethdev_rx_offload_cb_register(cnxk_ethdev_rx_offload_cb_t cb);
731 
732 struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev,
733 						       uint32_t spi, bool inb);
734 struct cnxk_eth_sec_sess *
735 cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
736 			      struct rte_security_session *sess);
737 int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_sz,
738 			      uint32_t nb_bufs, bool destroy, const char *mempool_name);
739 int cnxk_nix_inl_custom_meta_pool_cb(uintptr_t pmpool, uintptr_t *mpool, const char *mempool_name,
740 				     uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs,
741 				     bool destroy);
742 
743 /* Congestion Management */
744 int cnxk_nix_cman_info_get(struct rte_eth_dev *dev, struct rte_eth_cman_info *info);
745 
746 int cnxk_nix_cman_config_init(struct rte_eth_dev *dev, struct rte_eth_cman_config *config);
747 
748 int cnxk_nix_cman_config_set(struct rte_eth_dev *dev, const struct rte_eth_cman_config *config);
749 
750 int cnxk_nix_cman_config_get(struct rte_eth_dev *dev, struct rte_eth_cman_config *config);
751 
752 int cnxk_mcs_dev_init(struct cnxk_eth_dev *dev, uint8_t mcs_idx);
753 void cnxk_mcs_dev_fini(struct cnxk_eth_dev *dev);
754 
755 struct cnxk_macsec_sess *cnxk_eth_macsec_sess_get_by_sess(struct cnxk_eth_dev *dev,
756 							  const struct rte_security_session *sess);
757 int cnxk_mcs_flow_configure(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
758 			     const struct rte_flow_item pattern[],
759 			     const struct rte_flow_action actions[], struct rte_flow_error *error,
760 			     void **mcs_flow);
761 int cnxk_mcs_flow_destroy(struct cnxk_eth_dev *eth_dev, void *mcs_flow);
762 
763 /* Other private functions */
764 int nix_recalc_mtu(struct rte_eth_dev *eth_dev);
765 int nix_mtr_validate(struct rte_eth_dev *dev, uint32_t id);
766 int nix_mtr_policy_act_get(struct rte_eth_dev *eth_dev, uint32_t id,
767 			   struct cnxk_mtr_policy_node **policy);
768 int nix_mtr_rq_update(struct rte_eth_dev *eth_dev, uint32_t id,
769 		      uint32_t queue_num, const uint16_t *queue);
770 int nix_mtr_chain_update(struct rte_eth_dev *eth_dev, uint32_t cur_id,
771 			 uint32_t prev_id, uint32_t next_id);
772 int nix_mtr_chain_reset(struct rte_eth_dev *eth_dev, uint32_t cur_id);
773 struct cnxk_meter_node *nix_get_mtr(struct rte_eth_dev *eth_dev,
774 				    uint32_t cur_id);
775 int nix_mtr_level_update(struct rte_eth_dev *eth_dev, uint32_t id,
776 			 uint32_t level);
777 int nix_mtr_capabilities_init(struct rte_eth_dev *eth_dev);
778 int nix_mtr_configure(struct rte_eth_dev *eth_dev, uint32_t id);
779 int nix_mtr_connect(struct rte_eth_dev *eth_dev, uint32_t id);
780 int nix_mtr_destroy(struct rte_eth_dev *eth_dev, uint32_t id,
781 		    struct rte_mtr_error *error);
782 int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
783 				  uint32_t *prev_id, uint32_t *next_id,
784 				  struct cnxk_mtr_policy_node *policy,
785 				  int *tree_level);
786 int nix_priority_flow_ctrl_rq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
787 				   uint8_t tx_pause, uint8_t tc);
788 int nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
789 				   uint8_t rx_pause, uint8_t tc);
790 
791 #endif /* __CNXK_ETHDEV_H__ */
792