xref: /dpdk/drivers/net/enic/enic.h (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5 
6 #ifndef _ENIC_H_
7 #define _ENIC_H_
8 
9 #include <rte_vxlan.h>
10 #include <rte_ether.h>
11 #include "vnic_enet.h"
12 #include "vnic_dev.h"
13 #include "vnic_flowman.h"
14 #include "vnic_wq.h"
15 #include "vnic_rq.h"
16 #include "vnic_cq.h"
17 #include "vnic_intr.h"
18 #include "vnic_stats.h"
19 #include "vnic_nic.h"
20 #include "vnic_rss.h"
21 #include "enic_res.h"
22 #include "cq_enet_desc.h"
23 #include <stdbool.h>
24 #include <sys/queue.h>
25 #include <rte_spinlock.h>
26 
27 #define DRV_NAME		"enic_pmd"
28 #define DRV_DESCRIPTION		"Cisco VIC Ethernet NIC Poll-mode Driver"
29 #define DRV_COPYRIGHT		"Copyright 2008-2015 Cisco Systems, Inc"
30 
31 #define VLAN_ETH_HLEN           18
32 
33 #define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
34 
35 #define ENICPMD_BDF_LENGTH      13   /* 0000:00:00.0'\0' */
36 #define ENIC_CALC_IP_CKSUM      1
37 #define ENIC_CALC_TCP_UDP_CKSUM 2
38 #define ENIC_MAX_MTU            9000
39 #define ENIC_PAGE_SIZE          4096
40 #define PAGE_ROUND_UP(x) \
41 	((((unsigned long)(x)) + ENIC_PAGE_SIZE-1) & (~(ENIC_PAGE_SIZE-1)))
42 
43 #define ENICPMD_VFIO_PATH          "/dev/vfio/vfio"
44 /*#define ENIC_DESC_COUNT_MAKE_ODD (x) do{if ((~(x)) & 1) { (x)--; } }while(0)*/
45 
46 #define PCI_DEVICE_ID_CISCO_VIC_ENET         0x0043  /* ethernet vnic */
47 #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF      0x0071  /* enet SRIOV VF */
48 /* enet SRIOV Standalone vNic VF */
49 #define PCI_DEVICE_ID_CISCO_VIC_ENET_SN      0x02B7
50 
51 /* Special Filter id for non-specific packet flagging. Don't change value */
52 #define ENIC_MAGIC_FILTER_ID 0xffff
53 
54 /*
55  * Interrupt 0: LSC and errors
56  * Interrupt 1: rx queue 0
57  * Interrupt 2: rx queue 1
58  * ...
59  */
60 #define ENICPMD_LSC_INTR_OFFSET 0
61 #define ENICPMD_RXQ_INTR_OFFSET 1
62 
63 struct enic_soft_stats {
64 	rte_atomic64_t rx_nombuf;
65 	rte_atomic64_t rx_packet_errors;
66 	rte_atomic64_t tx_oversized;
67 };
68 
69 struct enic_memzone_entry {
70 	const struct rte_memzone *rz;
71 	LIST_ENTRY(enic_memzone_entry) entries;
72 };
73 
74 /* Defined in enic_fm_flow.c */
75 struct enic_flowman;
76 struct enic_fm_flow;
77 
78 struct rte_flow {
79 	LIST_ENTRY(rte_flow) next;
80 	/* Data for filter API based flow (enic_flow.c) */
81 	uint16_t enic_filter_id;
82 	struct filter_v2 enic_filter;
83 	/* Data for flow manager based flow (enic_fm_flow.c) */
84 	struct enic_fm_flow *fm;
85 	int internal;
86 };
87 
88 /* Per-instance private data structure */
89 struct enic {
90 	struct rte_pci_device *pdev;
91 	struct vnic_enet_config config;
92 	struct vnic_dev_bar bar0;
93 	struct vnic_dev *vdev;
94 
95 	/*
96 	 * mbuf_initializer contains 64 bits of mbuf rearm_data, used by
97 	 * the avx2 handler at this time.
98 	 */
99 	uint64_t mbuf_initializer;
100 	unsigned int port_id;
101 	bool overlay_offload;
102 	struct rte_eth_dev *rte_dev;
103 	struct rte_eth_dev_data *dev_data;
104 	char bdf_name[ENICPMD_BDF_LENGTH];
105 	int dev_fd;
106 	int iommu_group_fd;
107 	int iommu_groupid;
108 	int eventfd;
109 	uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
110 	pthread_t err_intr_thread;
111 	int promisc;
112 	int allmulti;
113 	uint8_t ig_vlan_strip_en;
114 	int link_status;
115 	uint8_t hw_ip_checksum;
116 	uint16_t max_mtu;
117 	uint8_t adv_filters;
118 	uint32_t flow_filter_mode;
119 	uint8_t filter_actions; /* HW supported actions */
120 	uint64_t cq_entry_sizes; /* supported CQ entry sizes */
121 	bool vxlan;
122 	bool cq64;            /* actually using 64B CQ entry */
123 	bool cq64_request;    /* devargs cq64=1 */
124 	bool disable_overlay; /* devargs disable_overlay=1 */
125 	uint8_t enable_avx2_rx;  /* devargs enable-avx2-rx=1 */
126 	uint8_t geneve_opt_avail;    /* Geneve with options offload available */
127 	uint8_t geneve_opt_enabled;  /* Geneve with options offload enabled */
128 	uint8_t geneve_opt_request;  /* devargs geneve-opt=1 */
129 	bool nic_cfg_chk;     /* NIC_CFG_CHK available */
130 	bool udp_rss_weak;    /* Bodega style UDP RSS */
131 	uint8_t ig_vlan_rewrite_mode; /* devargs ig-vlan-rewrite */
132 	uint16_t vxlan_port;  /* current vxlan port pushed to NIC */
133 	int use_simple_tx_handler;
134 	int use_noscatter_vec_rx_handler;
135 
136 	unsigned int flags;
137 	unsigned int priv_flags;
138 
139 	/* work queue (len = conf_wq_count) */
140 	struct vnic_wq *wq;
141 	unsigned int wq_count; /* equals eth_dev nb_tx_queues */
142 
143 	/* receive queue (len = conf_rq_count) */
144 	struct vnic_rq *rq;
145 	unsigned int rq_count; /* equals eth_dev nb_rx_queues */
146 
147 	/* completion queue (len = conf_cq_count) */
148 	struct vnic_cq *cq;
149 	unsigned int cq_count; /* equals rq_count + wq_count */
150 
151 	/* interrupt vectors (len = conf_intr_count) */
152 	struct vnic_intr *intr;
153 	unsigned int intr_count; /* equals enabled interrupts (lsc + rxqs) */
154 
155 	/* software counters */
156 	struct enic_soft_stats soft_stats;
157 
158 	/* configured resources on vic */
159 	unsigned int conf_rq_count;
160 	unsigned int conf_wq_count;
161 	unsigned int conf_cq_count;
162 	unsigned int conf_intr_count;
163 
164 	/* linked list storing memory allocations */
165 	LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list;
166 	rte_spinlock_t memzone_list_lock;
167 	rte_spinlock_t mtu_lock;
168 
169 	LIST_HEAD(enic_flows, rte_flow) flows;
170 
171 	/* RSS */
172 	uint16_t reta_size;
173 	uint8_t hash_key_size;
174 	uint64_t flow_type_rss_offloads; /* 0 indicates RSS not supported */
175 	/*
176 	 * Keep a copy of current RSS config for queries, as we cannot retrieve
177 	 * it from the NIC.
178 	 */
179 	uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */
180 	uint8_t rss_enable;
181 	uint64_t rss_hf; /* ETH_RSS flags */
182 	union vnic_rss_key rss_key;
183 	union vnic_rss_cpu rss_cpu;
184 
185 	uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */
186 	uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */
187 	uint64_t tx_queue_offload_capa; /* DEV_TX_OFFLOAD flags */
188 	uint64_t tx_offload_mask; /* PKT_TX flags accepted */
189 
190 	/* Multicast MAC addresses added to the NIC */
191 	uint32_t mc_count;
192 	struct rte_ether_addr mc_addrs[ENIC_MULTICAST_PERFECT_FILTERS];
193 
194 	/* Flow manager API */
195 	struct enic_flowman *fm;
196 	uint64_t fm_vnic_handle;
197 	uint32_t fm_vnic_uif;
198 	/* switchdev */
199 	uint8_t switchdev_mode;
200 	uint16_t switch_domain_id;
201 	uint16_t max_vf_id;
202 	/* Number of queues needed for VF representor paths */
203 	uint32_t vf_required_wq;
204 	uint32_t vf_required_cq;
205 	uint32_t vf_required_rq;
206 	/*
207 	 * Lock to serialize devcmds from PF, VF representors as they all share
208 	 * the same PF devcmd instance in firmware.
209 	 */
210 	rte_spinlock_t devcmd_lock;
211 };
212 
213 struct enic_vf_representor {
214 	struct enic enic;
215 	struct vnic_enet_config config;
216 	struct rte_eth_dev *eth_dev;
217 	struct rte_ether_addr mac_addr;
218 	struct rte_pci_addr bdf;
219 	struct enic *pf;
220 	uint16_t switch_domain_id;
221 	uint16_t vf_id;
222 	int allmulti;
223 	int promisc;
224 	/* Representor path uses PF queues. These are reserved during init */
225 	uint16_t pf_wq_idx;      /* WQ dedicated to VF rep */
226 	uint16_t pf_wq_cq_idx;   /* CQ for WQ */
227 	uint16_t pf_rq_sop_idx;  /* SOP RQ dedicated to VF rep */
228 	uint16_t pf_rq_data_idx; /* Data RQ */
229 	/* Representor flows managed by flowman */
230 	struct rte_flow *vf2rep_flow[2];
231 	struct rte_flow *rep2vf_flow[2];
232 };
233 
234 #define VF_ENIC_TO_VF_REP(vf_enic) \
235 	container_of(vf_enic, struct enic_vf_representor, enic)
236 
237 static inline int enic_is_vf_rep(struct enic *enic)
238 {
239 	return !!(enic->rte_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR);
240 }
241 
242 /* Compute ethdev's max packet size from MTU */
243 static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu)
244 {
245 	/* ethdev max size includes eth whereas NIC MTU does not */
246 	return mtu + RTE_ETHER_HDR_LEN;
247 }
248 
249 /* Get the CQ index from a Start of Packet(SOP) RQ index */
250 static inline unsigned int enic_sop_rq_idx_to_cq_idx(unsigned int sop_idx)
251 {
252 	return sop_idx;
253 }
254 
255 /* Get the RTE RQ index from a Start of Packet(SOP) RQ index */
256 static inline unsigned int enic_sop_rq_idx_to_rte_idx(unsigned int sop_idx)
257 {
258 	return sop_idx;
259 }
260 
261 /* Get the Start of Packet(SOP) RQ index from a RTE RQ index */
262 static inline unsigned int enic_rte_rq_idx_to_sop_idx(unsigned int rte_idx)
263 {
264 	return rte_idx;
265 }
266 
267 /* Get the Data RQ index from a RTE RQ index */
268 static inline unsigned int enic_rte_rq_idx_to_data_idx(unsigned int rte_idx,
269 						       struct enic *enic)
270 {
271 	return enic->rq_count + rte_idx;
272 }
273 
274 static inline unsigned int enic_vnic_rq_count(struct enic *enic)
275 {
276 	return enic->rq_count * 2;
277 }
278 
279 static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq)
280 {
281 	return rq;
282 }
283 
284 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
285 {
286 	return enic->rq_count + wq;
287 }
288 
289 /*
290  * WQ, RQ, CQ allocation scheme. Firmware gives the driver an array of
291  * WQs, an array of RQs, and an array of CQs. Fow now, these are
292  * statically allocated between PF app send/receive queues and VF
293  * representor app send/receive queues. VF representor supports only 1
294  * send and 1 receive queue. The number of PF app queue is not known
295  * until the queue setup time.
296  *
297  * R = number of receive queues for PF app
298  * S = number of send queues for PF app
299  * V = number of VF representors
300  *
301  * wI = WQ for PF app send queue I
302  * rI = SOP RQ for PF app receive queue I
303  * dI = Data RQ for rI
304  * cwI = CQ for wI
305  * crI = CQ for rI
306  * vwI = WQ for VF representor send queue I
307  * vrI = SOP RQ for VF representor receive queue I
308  * vdI = Data RQ for vrI
309  * vcwI = CQ for vwI
310  * vcrI = CQ for vrI
311  *
312  * WQ array: | w0 |..| wS-1 |..| vwV-1 |..| vw0 |
313  *             ^         ^         ^         ^
314  *    index    0        S-1       W-V       W-1    W=len(WQ array)
315  *
316  * RQ array: | r0  |..| rR-1  |d0 |..|dR-1|  ..|vdV-1 |..| vd0 |vrV-1 |..|vr0 |
317  *             ^         ^     ^       ^         ^          ^     ^        ^
318  *    index    0        R-1    R      2R-1      X-2V    X-(V+1)  X-V      X-1
319  * X=len(RQ array)
320  *
321  * CQ array: | cr0 |..| crR-1 |cw0|..|cwS-1|..|vcwV-1|..| vcw0|vcrV-1|..|vcr0|..
322  *              ^         ^     ^       ^        ^         ^      ^        ^
323  *    index     0        R-1    R     R+S-1     X-2V    X-(V+1)  X-V      X-1
324  * X is not a typo. It really is len(RQ array) to accommodate enic_cq_rq() used
325  * throughout RX handlers. The current scheme requires
326  * len(CQ array) >= len(RQ array).
327  */
328 
329 static inline unsigned int vf_wq_cq_idx(struct enic_vf_representor *vf)
330 {
331 	/* rq is not a typo. index(vcwI) coincides with index(vdI) */
332 	return vf->pf->conf_rq_count - (vf->pf->max_vf_id + vf->vf_id + 2);
333 }
334 
335 static inline unsigned int vf_wq_idx(struct enic_vf_representor *vf)
336 {
337 	return vf->pf->conf_wq_count - vf->vf_id - 1;
338 }
339 
340 static inline unsigned int vf_rq_sop_idx(struct enic_vf_representor *vf)
341 {
342 	return vf->pf->conf_rq_count - vf->vf_id - 1;
343 }
344 
345 static inline unsigned int vf_rq_data_idx(struct enic_vf_representor *vf)
346 {
347 	return vf->pf->conf_rq_count - (vf->pf->max_vf_id + vf->vf_id + 2);
348 }
349 
350 static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
351 {
352 	return eth_dev->data->dev_private;
353 }
354 
355 static inline uint32_t
356 enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
357 {
358 	uint32_t d = i0 + i1;
359 	d -= (d >= n_descriptors) ? n_descriptors : 0;
360 	return d;
361 }
362 
363 static inline uint32_t
364 enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
365 {
366 	int32_t d = i1 - i0;
367 	return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d);
368 }
369 
370 static inline uint32_t
371 enic_ring_incr(uint32_t n_descriptors, uint32_t idx)
372 {
373 	idx++;
374 	if (unlikely(idx == n_descriptors))
375 		idx = 0;
376 	return idx;
377 }
378 
379 int dev_is_enic(struct rte_eth_dev *dev);
380 void enic_free_wq(void *txq);
381 int enic_alloc_intr_resources(struct enic *enic);
382 int enic_setup_finish(struct enic *enic);
383 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
384 		  unsigned int socket_id, uint16_t nb_desc);
385 void enic_start_wq(struct enic *enic, uint16_t queue_idx);
386 int enic_stop_wq(struct enic *enic, uint16_t queue_idx);
387 void enic_start_rq(struct enic *enic, uint16_t queue_idx);
388 int enic_stop_rq(struct enic *enic, uint16_t queue_idx);
389 void enic_free_rq(void *rxq);
390 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
391 		  unsigned int socket_id, struct rte_mempool *mp,
392 		  uint16_t nb_desc, uint16_t free_thresh);
393 int enic_set_vnic_res(struct enic *enic);
394 int enic_init_rss_nic_cfg(struct enic *enic);
395 int enic_set_rss_conf(struct enic *enic,
396 		      struct rte_eth_rss_conf *rss_conf);
397 int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu);
398 int enic_set_vlan_strip(struct enic *enic);
399 int enic_enable(struct enic *enic);
400 int enic_disable(struct enic *enic);
401 void enic_remove(struct enic *enic);
402 int enic_get_link_status(struct enic *enic);
403 int enic_dev_stats_get(struct enic *enic,
404 		       struct rte_eth_stats *r_stats);
405 int enic_dev_stats_clear(struct enic *enic);
406 int enic_add_packet_filter(struct enic *enic);
407 int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
408 int enic_del_mac_address(struct enic *enic, int mac_index);
409 unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
410 void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
411 		   struct rte_mbuf *tx_pkt, unsigned short len,
412 		   uint8_t sop, uint8_t eop, uint8_t cq_entry,
413 		   uint16_t ol_flags, uint16_t vlan_tag);
414 
415 void enic_post_wq_index(struct vnic_wq *wq);
416 int enic_probe(struct enic *enic);
417 int enic_fm_init(struct enic *enic);
418 void enic_fm_destroy(struct enic *enic);
419 void *enic_alloc_consistent(void *priv, size_t size, dma_addr_t *dma_handle,
420 			    uint8_t *name);
421 void enic_free_consistent(void *priv, size_t size, void *vaddr,
422 			  dma_addr_t dma_handle);
423 uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
424 			uint16_t nb_pkts);
425 uint16_t enic_recv_pkts_64(void *rx_queue, struct rte_mbuf **rx_pkts,
426 			   uint16_t nb_pkts);
427 uint16_t enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
428 				  uint16_t nb_pkts);
429 uint16_t enic_dummy_recv_pkts(void *rx_queue,
430 			      struct rte_mbuf **rx_pkts,
431 			      uint16_t nb_pkts);
432 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
433 			uint16_t nb_pkts);
434 uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
435 			       uint16_t nb_pkts);
436 uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
437 			uint16_t nb_pkts);
438 int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
439 int enic_link_update(struct rte_eth_dev *eth_dev);
440 bool enic_use_vector_rx_handler(struct rte_eth_dev *eth_dev);
441 void enic_pick_rx_handler(struct rte_eth_dev *eth_dev);
442 void enic_pick_tx_handler(struct rte_eth_dev *eth_dev);
443 int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params);
444 int enic_vf_representor_uninit(struct rte_eth_dev *ethdev);
445 int enic_fm_allocate_switch_domain(struct enic *pf);
446 int enic_fm_add_rep2vf_flow(struct enic_vf_representor *vf);
447 int enic_fm_add_vf2rep_flow(struct enic_vf_representor *vf);
448 int enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq);
449 void enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq);
450 void enic_free_wq_buf(struct rte_mbuf **buf);
451 void enic_free_rq_buf(struct rte_mbuf **mbuf);
452 extern const struct rte_flow_ops enic_flow_ops;
453 extern const struct rte_flow_ops enic_fm_flow_ops;
454 
455 #endif /* _ENIC_H_ */
456