xref: /dpdk/drivers/net/enic/enic.h (revision 00ce43111dc5b364722c882cdd37d3664d87b6cc)
12e99ea80SHyong Youb Kim /* SPDX-License-Identifier: BSD-3-Clause
22e99ea80SHyong Youb Kim  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
372f3de30SBruce Richardson  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
472f3de30SBruce Richardson  */
572f3de30SBruce Richardson 
672f3de30SBruce Richardson #ifndef _ENIC_H_
772f3de30SBruce Richardson #define _ENIC_H_
872f3de30SBruce Richardson 
9512d873fSFlavia Musatescu #include <rte_vxlan.h>
10846ac76cSJohn Daley #include <rte_ether.h>
1172f3de30SBruce Richardson #include "vnic_enet.h"
1272f3de30SBruce Richardson #include "vnic_dev.h"
13ea7768b5SHyong Youb Kim #include "vnic_flowman.h"
1472f3de30SBruce Richardson #include "vnic_wq.h"
1572f3de30SBruce Richardson #include "vnic_rq.h"
1672f3de30SBruce Richardson #include "vnic_cq.h"
1772f3de30SBruce Richardson #include "vnic_intr.h"
1872f3de30SBruce Richardson #include "vnic_stats.h"
1972f3de30SBruce Richardson #include "vnic_nic.h"
2072f3de30SBruce Richardson #include "vnic_rss.h"
2172f3de30SBruce Richardson #include "enic_res.h"
22947d860cSJohn Daley #include "cq_enet_desc.h"
2393fb21fdSHyong Youb Kim #include <stdbool.h>
24da5f560bSNelson Escobar #include <sys/queue.h>
25da5f560bSNelson Escobar #include <rte_spinlock.h>
2672f3de30SBruce Richardson 
2772f3de30SBruce Richardson #define DRV_NAME		"enic_pmd"
2872f3de30SBruce Richardson #define DRV_DESCRIPTION		"Cisco VIC Ethernet NIC Poll-mode Driver"
2972f3de30SBruce Richardson #define DRV_COPYRIGHT		"Copyright 2008-2015 Cisco Systems, Inc"
3072f3de30SBruce Richardson 
3172f3de30SBruce Richardson #define VLAN_ETH_HLEN           18
3272f3de30SBruce Richardson 
3372f3de30SBruce Richardson #define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
3472f3de30SBruce Richardson 
3572f3de30SBruce Richardson #define ENIC_CALC_IP_CKSUM      1
3672f3de30SBruce Richardson #define ENIC_CALC_TCP_UDP_CKSUM 2
3772f3de30SBruce Richardson #define ENIC_MAX_MTU            9000
3872f3de30SBruce Richardson #define ENIC_PAGE_SIZE          4096
3972f3de30SBruce Richardson #define PAGE_ROUND_UP(x) \
4072f3de30SBruce Richardson 	((((unsigned long)(x)) + ENIC_PAGE_SIZE-1) & (~(ENIC_PAGE_SIZE-1)))
411b4ce87dSJohn Daley 
4272f3de30SBruce Richardson #define ENICPMD_VFIO_PATH          "/dev/vfio/vfio"
4372f3de30SBruce Richardson /*#define ENIC_DESC_COUNT_MAKE_ODD (x) do{if ((~(x)) & 1) { (x)--; } }while(0)*/
4472f3de30SBruce Richardson 
4572f3de30SBruce Richardson #define PCI_DEVICE_ID_CISCO_VIC_ENET         0x0043  /* ethernet vnic */
4657bb45b3SJohn Daley /* enet SRIOV Standalone vNic VF */
4757bb45b3SJohn Daley #define PCI_DEVICE_ID_CISCO_VIC_ENET_SN      0x02B7
4872f3de30SBruce Richardson 
496ced1376SJohn Daley /* Special Filter id for non-specific packet flagging. Don't change value */
506ced1376SJohn Daley #define ENIC_MAGIC_FILTER_ID 0xffff
5172f3de30SBruce Richardson 
520f872d31SHyong Youb Kim /*
53*00ce4311SHyong Youb Kim  * Interrupt 0: LSC and errors / VF admin channel RQ
540f872d31SHyong Youb Kim  * Interrupt 1: rx queue 0
550f872d31SHyong Youb Kim  * Interrupt 2: rx queue 1
560f872d31SHyong Youb Kim  * ...
570f872d31SHyong Youb Kim  */
580f872d31SHyong Youb Kim #define ENICPMD_LSC_INTR_OFFSET 0
590f872d31SHyong Youb Kim #define ENICPMD_RXQ_INTR_OFFSET 1
600f872d31SHyong Youb Kim 
6165b5434dSJohn Daley struct enic_soft_stats {
6265b5434dSJohn Daley 	rte_atomic64_t rx_nombuf;
63c44d9f01SJohn Daley 	rte_atomic64_t rx_packet_errors;
64ed6e564cSJohn Daley 	rte_atomic64_t tx_oversized;
6565b5434dSJohn Daley };
6665b5434dSJohn Daley 
67da5f560bSNelson Escobar struct enic_memzone_entry {
68da5f560bSNelson Escobar 	const struct rte_memzone *rz;
69da5f560bSNelson Escobar 	LIST_ENTRY(enic_memzone_entry) entries;
70da5f560bSNelson Escobar };
71da5f560bSNelson Escobar 
72ea7768b5SHyong Youb Kim /* Defined in enic_fm_flow.c */
73ea7768b5SHyong Youb Kim struct enic_flowman;
74ea7768b5SHyong Youb Kim struct enic_fm_flow;
75ea7768b5SHyong Youb Kim 
766ced1376SJohn Daley struct rte_flow {
776ced1376SJohn Daley 	LIST_ENTRY(rte_flow) next;
78ea7768b5SHyong Youb Kim 	/* Data for filter API based flow (enic_flow.c) */
79ea7768b5SHyong Youb Kim 	uint16_t enic_filter_id;
806ced1376SJohn Daley 	struct filter_v2 enic_filter;
81ea7768b5SHyong Youb Kim 	/* Data for flow manager based flow (enic_fm_flow.c) */
82ea7768b5SHyong Youb Kim 	struct enic_fm_flow *fm;
83859540e7SHyong Youb Kim 	int internal;
846ced1376SJohn Daley };
856ced1376SJohn Daley 
8672f3de30SBruce Richardson /* Per-instance private data structure */
8772f3de30SBruce Richardson struct enic {
8872f3de30SBruce Richardson 	struct rte_pci_device *pdev;
8972f3de30SBruce Richardson 	struct vnic_enet_config config;
9072f3de30SBruce Richardson 	struct vnic_dev_bar bar0;
9172f3de30SBruce Richardson 	struct vnic_dev *vdev;
9272f3de30SBruce Richardson 
938a6ff33dSHyong Youb Kim 	/*
948a6ff33dSHyong Youb Kim 	 * mbuf_initializer contains 64 bits of mbuf rearm_data, used by
958a6ff33dSHyong Youb Kim 	 * the avx2 handler at this time.
968a6ff33dSHyong Youb Kim 	 */
978a6ff33dSHyong Youb Kim 	uint64_t mbuf_initializer;
9872f3de30SBruce Richardson 	unsigned int port_id;
9993fb21fdSHyong Youb Kim 	bool overlay_offload;
10072f3de30SBruce Richardson 	struct rte_eth_dev *rte_dev;
101c655c547SHyong Youb Kim 	struct rte_eth_dev_data *dev_data;
1022fc03b23SThomas Monjalon 	char bdf_name[PCI_PRI_STR_SIZE];
10372f3de30SBruce Richardson 	int dev_fd;
10472f3de30SBruce Richardson 	int iommu_group_fd;
10572f3de30SBruce Richardson 	int iommu_groupid;
10672f3de30SBruce Richardson 	int eventfd;
107846ac76cSJohn Daley 	uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
10872f3de30SBruce Richardson 	pthread_t err_intr_thread;
10972f3de30SBruce Richardson 	int promisc;
11072f3de30SBruce Richardson 	int allmulti;
11104e8ec74SJohn Daley 	uint8_t ig_vlan_strip_en;
11272f3de30SBruce Richardson 	int link_status;
11304e8ec74SJohn Daley 	uint8_t hw_ip_checksum;
11404e8ec74SJohn Daley 	uint16_t max_mtu;
11504e8ec74SJohn Daley 	uint8_t adv_filters;
11604e8ec74SJohn Daley 	uint32_t flow_filter_mode;
11704e8ec74SJohn Daley 	uint8_t filter_actions; /* HW supported actions */
1188b428cb5SHyong Youb Kim 	uint64_t cq_entry_sizes; /* supported CQ entry sizes */
11961c7b522SJohn Daley 	bool geneve;
12093fb21fdSHyong Youb Kim 	bool vxlan;
1218b428cb5SHyong Youb Kim 	bool cq64;            /* actually using 64B CQ entry */
1228b428cb5SHyong Youb Kim 	bool cq64_request;    /* devargs cq64=1 */
12393fb21fdSHyong Youb Kim 	bool disable_overlay; /* devargs disable_overlay=1 */
1248a6ff33dSHyong Youb Kim 	uint8_t enable_avx2_rx;  /* devargs enable-avx2-rx=1 */
125c02a96fcSHyong Youb Kim 	uint8_t geneve_opt_request;  /* devargs geneve-opt=1 */
1265bc989e6SHyong Youb Kim 	bool nic_cfg_chk;     /* NIC_CFG_CHK available */
1275bc989e6SHyong Youb Kim 	bool udp_rss_weak;    /* Bodega style UDP RSS */
128e39c2756SHyong Youb Kim 	uint8_t ig_vlan_rewrite_mode; /* devargs ig-vlan-rewrite */
12961c7b522SJohn Daley 	uint16_t geneve_port; /* current geneve port pushed to NIC */
1308a4efd17SHyong Youb Kim 	uint16_t vxlan_port;  /* current vxlan port pushed to NIC */
131e92a4b41SHyong Youb Kim 	int use_simple_tx_handler;
132f011fa0aSHyong Youb Kim 	int use_noscatter_vec_rx_handler;
13372f3de30SBruce Richardson 
13472f3de30SBruce Richardson 	unsigned int flags;
13572f3de30SBruce Richardson 	unsigned int priv_flags;
13672f3de30SBruce Richardson 
1376c45c330SHyong Youb Kim 	/* work queue (len = conf_wq_count) */
1386c45c330SHyong Youb Kim 	struct vnic_wq *wq;
1396c45c330SHyong Youb Kim 	unsigned int wq_count; /* equals eth_dev nb_tx_queues */
14072f3de30SBruce Richardson 
1416c45c330SHyong Youb Kim 	/* receive queue (len = conf_rq_count) */
1426c45c330SHyong Youb Kim 	struct vnic_rq *rq;
1436c45c330SHyong Youb Kim 	unsigned int rq_count; /* equals eth_dev nb_rx_queues */
14472f3de30SBruce Richardson 
1456c45c330SHyong Youb Kim 	/* completion queue (len = conf_cq_count) */
1466c45c330SHyong Youb Kim 	struct vnic_cq *cq;
1476c45c330SHyong Youb Kim 	unsigned int cq_count; /* equals rq_count + wq_count */
14872f3de30SBruce Richardson 
1490f872d31SHyong Youb Kim 	/* interrupt vectors (len = conf_intr_count) */
1500f872d31SHyong Youb Kim 	struct vnic_intr *intr;
1510f872d31SHyong Youb Kim 	unsigned int intr_count; /* equals enabled interrupts (lsc + rxqs) */
15265b5434dSJohn Daley 
15365b5434dSJohn Daley 	/* software counters */
15465b5434dSJohn Daley 	struct enic_soft_stats soft_stats;
155da5f560bSNelson Escobar 
156*00ce4311SHyong Youb Kim 	struct vnic_wq admin_wq;
157*00ce4311SHyong Youb Kim 	struct vnic_rq admin_rq;
158*00ce4311SHyong Youb Kim 	struct vnic_cq admin_cq[2];
159*00ce4311SHyong Youb Kim 
160ce93d3c3SNelson Escobar 	/* configured resources on vic */
161ce93d3c3SNelson Escobar 	unsigned int conf_rq_count;
162ce93d3c3SNelson Escobar 	unsigned int conf_wq_count;
163ce93d3c3SNelson Escobar 	unsigned int conf_cq_count;
164ce93d3c3SNelson Escobar 	unsigned int conf_intr_count;
165*00ce4311SHyong Youb Kim 	/* SR-IOV VF has queues for admin channel to PF */
166*00ce4311SHyong Youb Kim 	unsigned int conf_admin_rq_count;
167*00ce4311SHyong Youb Kim 	unsigned int conf_admin_wq_count;
168*00ce4311SHyong Youb Kim 	unsigned int conf_admin_cq_count;
169*00ce4311SHyong Youb Kim 	uint64_t admin_chan_msg_num;
170*00ce4311SHyong Youb Kim 	int admin_chan_vf_id;
171*00ce4311SHyong Youb Kim 	uint32_t admin_pf_cap_version;
172*00ce4311SHyong Youb Kim 	bool admin_chan_enabled;
173*00ce4311SHyong Youb Kim 	bool sriov_vf_soft_rx_stats;
174*00ce4311SHyong Youb Kim 	bool sriov_vf_compat_mode;
175*00ce4311SHyong Youb Kim 	pthread_mutex_t admin_chan_lock;
176ce93d3c3SNelson Escobar 
177da5f560bSNelson Escobar 	/* linked list storing memory allocations */
178da5f560bSNelson Escobar 	LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list;
179da5f560bSNelson Escobar 	rte_spinlock_t memzone_list_lock;
180c3e09182SJohn Daley 	rte_spinlock_t mtu_lock;
181da5f560bSNelson Escobar 
1826ced1376SJohn Daley 	LIST_HEAD(enic_flows, rte_flow) flows;
183c2fec27bSHyong Youb Kim 
184c2fec27bSHyong Youb Kim 	/* RSS */
185c2fec27bSHyong Youb Kim 	uint16_t reta_size;
186c2fec27bSHyong Youb Kim 	uint8_t hash_key_size;
187c2fec27bSHyong Youb Kim 	uint64_t flow_type_rss_offloads; /* 0 indicates RSS not supported */
188c2fec27bSHyong Youb Kim 	/*
189c2fec27bSHyong Youb Kim 	 * Keep a copy of current RSS config for queries, as we cannot retrieve
190c2fec27bSHyong Youb Kim 	 * it from the NIC.
191c2fec27bSHyong Youb Kim 	 */
192c2fec27bSHyong Youb Kim 	uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */
193c2fec27bSHyong Youb Kim 	uint8_t rss_enable;
194295968d1SFerruh Yigit 	uint64_t rss_hf; /* RTE_ETH_RSS flags */
195c2fec27bSHyong Youb Kim 	union vnic_rss_key rss_key;
196c2fec27bSHyong Youb Kim 	union vnic_rss_cpu rss_cpu;
19793fb21fdSHyong Youb Kim 
19893fb21fdSHyong Youb Kim 	uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */
19993fb21fdSHyong Youb Kim 	uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */
200bcaa54c1SHyong Youb Kim 	uint64_t tx_queue_offload_capa; /* DEV_TX_OFFLOAD flags */
20193fb21fdSHyong Youb Kim 	uint64_t tx_offload_mask; /* PKT_TX flags accepted */
2028d496995SHyong Youb Kim 
2038d496995SHyong Youb Kim 	/* Multicast MAC addresses added to the NIC */
2048d496995SHyong Youb Kim 	uint32_t mc_count;
2056d13ea8eSOlivier Matz 	struct rte_ether_addr mc_addrs[ENIC_MULTICAST_PERFECT_FILTERS];
206ea7768b5SHyong Youb Kim 
207ea7768b5SHyong Youb Kim 	/* Flow manager API */
208ea7768b5SHyong Youb Kim 	struct enic_flowman *fm;
209859540e7SHyong Youb Kim 	uint64_t fm_vnic_handle;
210859540e7SHyong Youb Kim 	uint32_t fm_vnic_uif;
21139cf83f1SHyong Youb Kim 	/* switchdev */
21239cf83f1SHyong Youb Kim 	uint8_t switchdev_mode;
21339cf83f1SHyong Youb Kim 	uint16_t switch_domain_id;
21439cf83f1SHyong Youb Kim 	uint16_t max_vf_id;
215edd08548SHyong Youb Kim 	/* Number of queues needed for VF representor paths */
216edd08548SHyong Youb Kim 	uint32_t vf_required_wq;
217edd08548SHyong Youb Kim 	uint32_t vf_required_cq;
218edd08548SHyong Youb Kim 	uint32_t vf_required_rq;
21939cf83f1SHyong Youb Kim 	/*
22039cf83f1SHyong Youb Kim 	 * Lock to serialize devcmds from PF, VF representors as they all share
22139cf83f1SHyong Youb Kim 	 * the same PF devcmd instance in firmware.
22239cf83f1SHyong Youb Kim 	 */
22339cf83f1SHyong Youb Kim 	rte_spinlock_t devcmd_lock;
22472f3de30SBruce Richardson };
22572f3de30SBruce Richardson 
22639cf83f1SHyong Youb Kim struct enic_vf_representor {
22739cf83f1SHyong Youb Kim 	struct enic enic;
22839cf83f1SHyong Youb Kim 	struct vnic_enet_config config;
22939cf83f1SHyong Youb Kim 	struct rte_eth_dev *eth_dev;
23039cf83f1SHyong Youb Kim 	struct rte_ether_addr mac_addr;
23139cf83f1SHyong Youb Kim 	struct rte_pci_addr bdf;
23239cf83f1SHyong Youb Kim 	struct enic *pf;
23339cf83f1SHyong Youb Kim 	uint16_t switch_domain_id;
23439cf83f1SHyong Youb Kim 	uint16_t vf_id;
23539cf83f1SHyong Youb Kim 	int allmulti;
23639cf83f1SHyong Youb Kim 	int promisc;
237edd08548SHyong Youb Kim 	/* Representor path uses PF queues. These are reserved during init */
238edd08548SHyong Youb Kim 	uint16_t pf_wq_idx;      /* WQ dedicated to VF rep */
239edd08548SHyong Youb Kim 	uint16_t pf_wq_cq_idx;   /* CQ for WQ */
240edd08548SHyong Youb Kim 	uint16_t pf_rq_sop_idx;  /* SOP RQ dedicated to VF rep */
241edd08548SHyong Youb Kim 	uint16_t pf_rq_data_idx; /* Data RQ */
242859540e7SHyong Youb Kim 	/* Representor flows managed by flowman */
243859540e7SHyong Youb Kim 	struct rte_flow *vf2rep_flow[2];
244859540e7SHyong Youb Kim 	struct rte_flow *rep2vf_flow[2];
24539cf83f1SHyong Youb Kim };
24639cf83f1SHyong Youb Kim 
247*00ce4311SHyong Youb Kim #define ENIC_ADMIN_WQ_CQ 0
248*00ce4311SHyong Youb Kim #define ENIC_ADMIN_RQ_CQ 1
249*00ce4311SHyong Youb Kim #define ENIC_ADMIN_BUF_SIZE 1024
250*00ce4311SHyong Youb Kim 
251*00ce4311SHyong Youb Kim static inline bool enic_is_vf(struct enic *enic)
252*00ce4311SHyong Youb Kim {
253*00ce4311SHyong Youb Kim 	return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN &&
254*00ce4311SHyong Youb Kim 		!enic->sriov_vf_compat_mode;
255*00ce4311SHyong Youb Kim }
256*00ce4311SHyong Youb Kim 
25739cf83f1SHyong Youb Kim #define VF_ENIC_TO_VF_REP(vf_enic) \
25839cf83f1SHyong Youb Kim 	container_of(vf_enic, struct enic_vf_representor, enic)
25939cf83f1SHyong Youb Kim 
260422ba917SHyong Youb Kim /* Compute ethdev's max packet size from MTU */
261422ba917SHyong Youb Kim static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu)
262422ba917SHyong Youb Kim {
263c09eb935SHyong Youb Kim 	/* ethdev max size includes eth whereas NIC MTU does not */
26435b2d13fSOlivier Matz 	return mtu + RTE_ETHER_HDR_LEN;
265422ba917SHyong Youb Kim }
266422ba917SHyong Youb Kim 
267ceeb00b9SJohn Daley /* Get the CQ index from a Start of Packet(SOP) RQ index */
268ceeb00b9SJohn Daley static inline unsigned int enic_sop_rq_idx_to_cq_idx(unsigned int sop_idx)
269ceeb00b9SJohn Daley {
270285fd7c4SJohn Daley 	return sop_idx;
271ceeb00b9SJohn Daley }
272aa07bf8fSJohn Daley 
273aa07bf8fSJohn Daley /* Get the RTE RQ index from a Start of Packet(SOP) RQ index */
274aa07bf8fSJohn Daley static inline unsigned int enic_sop_rq_idx_to_rte_idx(unsigned int sop_idx)
275c3e09182SJohn Daley {
276285fd7c4SJohn Daley 	return sop_idx;
277c3e09182SJohn Daley }
278c3e09182SJohn Daley 
279aa07bf8fSJohn Daley /* Get the Start of Packet(SOP) RQ index from a RTE RQ index */
280aa07bf8fSJohn Daley static inline unsigned int enic_rte_rq_idx_to_sop_idx(unsigned int rte_idx)
281856d7ba7SNelson Escobar {
282285fd7c4SJohn Daley 	return rte_idx;
283856d7ba7SNelson Escobar }
284856d7ba7SNelson Escobar 
285aa07bf8fSJohn Daley /* Get the Data RQ index from a RTE RQ index */
286285fd7c4SJohn Daley static inline unsigned int enic_rte_rq_idx_to_data_idx(unsigned int rte_idx,
287285fd7c4SJohn Daley 						       struct enic *enic)
288856d7ba7SNelson Escobar {
289285fd7c4SJohn Daley 	return enic->rq_count + rte_idx;
290856d7ba7SNelson Escobar }
291856d7ba7SNelson Escobar 
292856d7ba7SNelson Escobar static inline unsigned int enic_vnic_rq_count(struct enic *enic)
293856d7ba7SNelson Escobar {
294856d7ba7SNelson Escobar 	return enic->rq_count * 2;
295856d7ba7SNelson Escobar }
296856d7ba7SNelson Escobar 
29772f3de30SBruce Richardson static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq)
29872f3de30SBruce Richardson {
299285fd7c4SJohn Daley 	return rq;
30072f3de30SBruce Richardson }
30172f3de30SBruce Richardson 
30272f3de30SBruce Richardson static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
30372f3de30SBruce Richardson {
30472f3de30SBruce Richardson 	return enic->rq_count + wq;
30572f3de30SBruce Richardson }
30672f3de30SBruce Richardson 
307edd08548SHyong Youb Kim /*
308edd08548SHyong Youb Kim  * WQ, RQ, CQ allocation scheme. Firmware gives the driver an array of
309edd08548SHyong Youb Kim  * WQs, an array of RQs, and an array of CQs. Fow now, these are
310edd08548SHyong Youb Kim  * statically allocated between PF app send/receive queues and VF
311edd08548SHyong Youb Kim  * representor app send/receive queues. VF representor supports only 1
312edd08548SHyong Youb Kim  * send and 1 receive queue. The number of PF app queue is not known
313edd08548SHyong Youb Kim  * until the queue setup time.
314edd08548SHyong Youb Kim  *
315edd08548SHyong Youb Kim  * R = number of receive queues for PF app
316edd08548SHyong Youb Kim  * S = number of send queues for PF app
317edd08548SHyong Youb Kim  * V = number of VF representors
318edd08548SHyong Youb Kim  *
319edd08548SHyong Youb Kim  * wI = WQ for PF app send queue I
320edd08548SHyong Youb Kim  * rI = SOP RQ for PF app receive queue I
321edd08548SHyong Youb Kim  * dI = Data RQ for rI
322edd08548SHyong Youb Kim  * cwI = CQ for wI
323edd08548SHyong Youb Kim  * crI = CQ for rI
324edd08548SHyong Youb Kim  * vwI = WQ for VF representor send queue I
325edd08548SHyong Youb Kim  * vrI = SOP RQ for VF representor receive queue I
326edd08548SHyong Youb Kim  * vdI = Data RQ for vrI
327edd08548SHyong Youb Kim  * vcwI = CQ for vwI
328edd08548SHyong Youb Kim  * vcrI = CQ for vrI
329edd08548SHyong Youb Kim  *
330edd08548SHyong Youb Kim  * WQ array: | w0 |..| wS-1 |..| vwV-1 |..| vw0 |
331edd08548SHyong Youb Kim  *             ^         ^         ^         ^
332edd08548SHyong Youb Kim  *    index    0        S-1       W-V       W-1    W=len(WQ array)
333edd08548SHyong Youb Kim  *
334edd08548SHyong Youb Kim  * RQ array: | r0  |..| rR-1  |d0 |..|dR-1|  ..|vdV-1 |..| vd0 |vrV-1 |..|vr0 |
335edd08548SHyong Youb Kim  *             ^         ^     ^       ^         ^          ^     ^        ^
336edd08548SHyong Youb Kim  *    index    0        R-1    R      2R-1      X-2V    X-(V+1)  X-V      X-1
337edd08548SHyong Youb Kim  * X=len(RQ array)
338edd08548SHyong Youb Kim  *
339edd08548SHyong Youb Kim  * CQ array: | cr0 |..| crR-1 |cw0|..|cwS-1|..|vcwV-1|..| vcw0|vcrV-1|..|vcr0|..
340edd08548SHyong Youb Kim  *              ^         ^     ^       ^        ^         ^      ^        ^
341edd08548SHyong Youb Kim  *    index     0        R-1    R     R+S-1     X-2V    X-(V+1)  X-V      X-1
342edd08548SHyong Youb Kim  * X is not a typo. It really is len(RQ array) to accommodate enic_cq_rq() used
343edd08548SHyong Youb Kim  * throughout RX handlers. The current scheme requires
344edd08548SHyong Youb Kim  * len(CQ array) >= len(RQ array).
345edd08548SHyong Youb Kim  */
346edd08548SHyong Youb Kim 
347edd08548SHyong Youb Kim static inline unsigned int vf_wq_cq_idx(struct enic_vf_representor *vf)
348edd08548SHyong Youb Kim {
349edd08548SHyong Youb Kim 	/* rq is not a typo. index(vcwI) coincides with index(vdI) */
350edd08548SHyong Youb Kim 	return vf->pf->conf_rq_count - (vf->pf->max_vf_id + vf->vf_id + 2);
351edd08548SHyong Youb Kim }
352edd08548SHyong Youb Kim 
353edd08548SHyong Youb Kim static inline unsigned int vf_wq_idx(struct enic_vf_representor *vf)
354edd08548SHyong Youb Kim {
355edd08548SHyong Youb Kim 	return vf->pf->conf_wq_count - vf->vf_id - 1;
356edd08548SHyong Youb Kim }
357edd08548SHyong Youb Kim 
358edd08548SHyong Youb Kim static inline unsigned int vf_rq_sop_idx(struct enic_vf_representor *vf)
359edd08548SHyong Youb Kim {
360edd08548SHyong Youb Kim 	return vf->pf->conf_rq_count - vf->vf_id - 1;
361edd08548SHyong Youb Kim }
362edd08548SHyong Youb Kim 
363edd08548SHyong Youb Kim static inline unsigned int vf_rq_data_idx(struct enic_vf_representor *vf)
364edd08548SHyong Youb Kim {
365edd08548SHyong Youb Kim 	return vf->pf->conf_rq_count - (vf->pf->max_vf_id + vf->vf_id + 2);
366edd08548SHyong Youb Kim }
367edd08548SHyong Youb Kim 
36872f3de30SBruce Richardson static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
36972f3de30SBruce Richardson {
37069b16b36SStephen Hemminger 	return eth_dev->data->dev_private;
37172f3de30SBruce Richardson }
37272f3de30SBruce Richardson 
373a3b1e955SJohn Daley static inline uint32_t
374a3b1e955SJohn Daley enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
375a3b1e955SJohn Daley {
376a3b1e955SJohn Daley 	uint32_t d = i0 + i1;
377a3b1e955SJohn Daley 	d -= (d >= n_descriptors) ? n_descriptors : 0;
378a3b1e955SJohn Daley 	return d;
379a3b1e955SJohn Daley }
380a3b1e955SJohn Daley 
381a3b1e955SJohn Daley static inline uint32_t
382a3b1e955SJohn Daley enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
383a3b1e955SJohn Daley {
384a3b1e955SJohn Daley 	int32_t d = i1 - i0;
385a3b1e955SJohn Daley 	return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d);
386a3b1e955SJohn Daley }
387a3b1e955SJohn Daley 
388a3b1e955SJohn Daley static inline uint32_t
389a3b1e955SJohn Daley enic_ring_incr(uint32_t n_descriptors, uint32_t idx)
390a3b1e955SJohn Daley {
391a3b1e955SJohn Daley 	idx++;
392a3b1e955SJohn Daley 	if (unlikely(idx == n_descriptors))
393a3b1e955SJohn Daley 		idx = 0;
394a3b1e955SJohn Daley 	return idx;
395a3b1e955SJohn Daley }
396a3b1e955SJohn Daley 
397ea7768b5SHyong Youb Kim int dev_is_enic(struct rte_eth_dev *dev);
398d98f9d5cSJohn Daley void enic_free_wq(void *txq);
399d98f9d5cSJohn Daley int enic_alloc_intr_resources(struct enic *enic);
400d98f9d5cSJohn Daley int enic_setup_finish(struct enic *enic);
401d98f9d5cSJohn Daley int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
40272f3de30SBruce Richardson 		  unsigned int socket_id, uint16_t nb_desc);
403d98f9d5cSJohn Daley void enic_start_wq(struct enic *enic, uint16_t queue_idx);
404d98f9d5cSJohn Daley int enic_stop_wq(struct enic *enic, uint16_t queue_idx);
405d98f9d5cSJohn Daley void enic_start_rq(struct enic *enic, uint16_t queue_idx);
406d98f9d5cSJohn Daley int enic_stop_rq(struct enic *enic, uint16_t queue_idx);
407d98f9d5cSJohn Daley void enic_free_rq(void *rxq);
408d98f9d5cSJohn Daley int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
40972f3de30SBruce Richardson 		  unsigned int socket_id, struct rte_mempool *mp,
410ce16fd70SJohn Daley 		  uint16_t nb_desc, uint16_t free_thresh);
411d98f9d5cSJohn Daley int enic_set_vnic_res(struct enic *enic);
412c2fec27bSHyong Youb Kim int enic_init_rss_nic_cfg(struct enic *enic);
413c2fec27bSHyong Youb Kim int enic_set_rss_conf(struct enic *enic,
414c2fec27bSHyong Youb Kim 		      struct rte_eth_rss_conf *rss_conf);
415c2fec27bSHyong Youb Kim int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu);
416c2fec27bSHyong Youb Kim int enic_set_vlan_strip(struct enic *enic);
417d98f9d5cSJohn Daley int enic_enable(struct enic *enic);
418d98f9d5cSJohn Daley int enic_disable(struct enic *enic);
419d98f9d5cSJohn Daley void enic_remove(struct enic *enic);
420d98f9d5cSJohn Daley int enic_get_link_status(struct enic *enic);
421d98f9d5cSJohn Daley int enic_dev_stats_get(struct enic *enic,
42272f3de30SBruce Richardson 		       struct rte_eth_stats *r_stats);
4239970a9adSIgor Romanov int enic_dev_stats_clear(struct enic *enic);
4249039c812SAndrew Rybchenko int enic_add_packet_filter(struct enic *enic);
4256d01e580SWei Dai int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
426740f5bf1SDavid Marchand int enic_del_mac_address(struct enic *enic, int mac_index);
427d98f9d5cSJohn Daley unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
428d98f9d5cSJohn Daley void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
42972f3de30SBruce Richardson 		   struct rte_mbuf *tx_pkt, unsigned short len,
430d739ba4cSJohn Daley 		   uint8_t sop, uint8_t eop, uint8_t cq_entry,
43172f3de30SBruce Richardson 		   uint16_t ol_flags, uint16_t vlan_tag);
432d739ba4cSJohn Daley 
433d98f9d5cSJohn Daley void enic_post_wq_index(struct vnic_wq *wq);
434d98f9d5cSJohn Daley int enic_probe(struct enic *enic);
435ea7768b5SHyong Youb Kim int enic_fm_init(struct enic *enic);
436ea7768b5SHyong Youb Kim void enic_fm_destroy(struct enic *enic);
437ea7768b5SHyong Youb Kim void *enic_alloc_consistent(void *priv, size_t size, dma_addr_t *dma_handle,
43804e8ec74SJohn Daley 			    uint8_t *name);
439ea7768b5SHyong Youb Kim void enic_free_consistent(void *priv, size_t size, void *vaddr,
440ea7768b5SHyong Youb Kim 			  dma_addr_t dma_handle);
441947d860cSJohn Daley uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
442947d860cSJohn Daley 			uint16_t nb_pkts);
4438b428cb5SHyong Youb Kim uint16_t enic_recv_pkts_64(void *rx_queue, struct rte_mbuf **rx_pkts,
4448b428cb5SHyong Youb Kim 			   uint16_t nb_pkts);
44535e2cb6aSJohn Daley uint16_t enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
44635e2cb6aSJohn Daley 				  uint16_t nb_pkts);
447d309bdc2SJohn Daley uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
448606adbd5SJohn Daley 			uint16_t nb_pkts);
449ed933c35SHyong Youb Kim uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
450ed933c35SHyong Youb Kim 			       uint16_t nb_pkts);
4511e81dbb5SHyong Youb Kim uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
4521e81dbb5SHyong Youb Kim 			uint16_t nb_pkts);
453396a6d71SJohn Daley int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
454c655c547SHyong Youb Kim int enic_link_update(struct rte_eth_dev *eth_dev);
455e92a4b41SHyong Youb Kim bool enic_use_vector_rx_handler(struct rte_eth_dev *eth_dev);
456e92a4b41SHyong Youb Kim void enic_pick_rx_handler(struct rte_eth_dev *eth_dev);
457e92a4b41SHyong Youb Kim void enic_pick_tx_handler(struct rte_eth_dev *eth_dev);
45839cf83f1SHyong Youb Kim int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params);
45939cf83f1SHyong Youb Kim int enic_vf_representor_uninit(struct rte_eth_dev *ethdev);
46039cf83f1SHyong Youb Kim int enic_fm_allocate_switch_domain(struct enic *pf);
461859540e7SHyong Youb Kim int enic_fm_add_rep2vf_flow(struct enic_vf_representor *vf);
462859540e7SHyong Youb Kim int enic_fm_add_vf2rep_flow(struct enic_vf_representor *vf);
463edd08548SHyong Youb Kim int enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq);
464edd08548SHyong Youb Kim void enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq);
465edd08548SHyong Youb Kim void enic_free_wq_buf(struct rte_mbuf **buf);
466edd08548SHyong Youb Kim void enic_free_rq_buf(struct rte_mbuf **mbuf);
4670f766680SJohn Daley extern const struct rte_flow_ops enic_flow_ops;
468ea7768b5SHyong Youb Kim extern const struct rte_flow_ops enic_fm_flow_ops;
46939cf83f1SHyong Youb Kim 
47072f3de30SBruce Richardson #endif /* _ENIC_H_ */
471