xref: /dpdk/drivers/net/enic/enic.h (revision 00ce43111dc5b364722c882cdd37d3664d87b6cc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5 
6 #ifndef _ENIC_H_
7 #define _ENIC_H_
8 
9 #include <rte_vxlan.h>
10 #include <rte_ether.h>
11 #include "vnic_enet.h"
12 #include "vnic_dev.h"
13 #include "vnic_flowman.h"
14 #include "vnic_wq.h"
15 #include "vnic_rq.h"
16 #include "vnic_cq.h"
17 #include "vnic_intr.h"
18 #include "vnic_stats.h"
19 #include "vnic_nic.h"
20 #include "vnic_rss.h"
21 #include "enic_res.h"
22 #include "cq_enet_desc.h"
23 #include <stdbool.h>
24 #include <sys/queue.h>
25 #include <rte_spinlock.h>
26 
27 #define DRV_NAME		"enic_pmd"
28 #define DRV_DESCRIPTION		"Cisco VIC Ethernet NIC Poll-mode Driver"
29 #define DRV_COPYRIGHT		"Copyright 2008-2015 Cisco Systems, Inc"
30 
31 #define VLAN_ETH_HLEN           18
32 
33 #define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
34 
35 #define ENIC_CALC_IP_CKSUM      1
36 #define ENIC_CALC_TCP_UDP_CKSUM 2
37 #define ENIC_MAX_MTU            9000
38 #define ENIC_PAGE_SIZE          4096
39 #define PAGE_ROUND_UP(x) \
40 	((((unsigned long)(x)) + ENIC_PAGE_SIZE-1) & (~(ENIC_PAGE_SIZE-1)))
41 
42 #define ENICPMD_VFIO_PATH          "/dev/vfio/vfio"
43 /*#define ENIC_DESC_COUNT_MAKE_ODD (x) do{if ((~(x)) & 1) { (x)--; } }while(0)*/
44 
45 #define PCI_DEVICE_ID_CISCO_VIC_ENET         0x0043  /* ethernet vnic */
46 /* enet SRIOV Standalone vNic VF */
47 #define PCI_DEVICE_ID_CISCO_VIC_ENET_SN      0x02B7
48 
49 /* Special Filter id for non-specific packet flagging. Don't change value */
50 #define ENIC_MAGIC_FILTER_ID 0xffff
51 
52 /*
53  * Interrupt 0: LSC and errors / VF admin channel RQ
54  * Interrupt 1: rx queue 0
55  * Interrupt 2: rx queue 1
56  * ...
57  */
58 #define ENICPMD_LSC_INTR_OFFSET 0
59 #define ENICPMD_RXQ_INTR_OFFSET 1
60 
61 struct enic_soft_stats {
62 	rte_atomic64_t rx_nombuf;
63 	rte_atomic64_t rx_packet_errors;
64 	rte_atomic64_t tx_oversized;
65 };
66 
67 struct enic_memzone_entry {
68 	const struct rte_memzone *rz;
69 	LIST_ENTRY(enic_memzone_entry) entries;
70 };
71 
72 /* Defined in enic_fm_flow.c */
73 struct enic_flowman;
74 struct enic_fm_flow;
75 
76 struct rte_flow {
77 	LIST_ENTRY(rte_flow) next;
78 	/* Data for filter API based flow (enic_flow.c) */
79 	uint16_t enic_filter_id;
80 	struct filter_v2 enic_filter;
81 	/* Data for flow manager based flow (enic_fm_flow.c) */
82 	struct enic_fm_flow *fm;
83 	int internal;
84 };
85 
86 /* Per-instance private data structure */
87 struct enic {
88 	struct rte_pci_device *pdev;
89 	struct vnic_enet_config config;
90 	struct vnic_dev_bar bar0;
91 	struct vnic_dev *vdev;
92 
93 	/*
94 	 * mbuf_initializer contains 64 bits of mbuf rearm_data, used by
95 	 * the avx2 handler at this time.
96 	 */
97 	uint64_t mbuf_initializer;
98 	unsigned int port_id;
99 	bool overlay_offload;
100 	struct rte_eth_dev *rte_dev;
101 	struct rte_eth_dev_data *dev_data;
102 	char bdf_name[PCI_PRI_STR_SIZE];
103 	int dev_fd;
104 	int iommu_group_fd;
105 	int iommu_groupid;
106 	int eventfd;
107 	uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
108 	pthread_t err_intr_thread;
109 	int promisc;
110 	int allmulti;
111 	uint8_t ig_vlan_strip_en;
112 	int link_status;
113 	uint8_t hw_ip_checksum;
114 	uint16_t max_mtu;
115 	uint8_t adv_filters;
116 	uint32_t flow_filter_mode;
117 	uint8_t filter_actions; /* HW supported actions */
118 	uint64_t cq_entry_sizes; /* supported CQ entry sizes */
119 	bool geneve;
120 	bool vxlan;
121 	bool cq64;            /* actually using 64B CQ entry */
122 	bool cq64_request;    /* devargs cq64=1 */
123 	bool disable_overlay; /* devargs disable_overlay=1 */
124 	uint8_t enable_avx2_rx;  /* devargs enable-avx2-rx=1 */
125 	uint8_t geneve_opt_request;  /* devargs geneve-opt=1 */
126 	bool nic_cfg_chk;     /* NIC_CFG_CHK available */
127 	bool udp_rss_weak;    /* Bodega style UDP RSS */
128 	uint8_t ig_vlan_rewrite_mode; /* devargs ig-vlan-rewrite */
129 	uint16_t geneve_port; /* current geneve port pushed to NIC */
130 	uint16_t vxlan_port;  /* current vxlan port pushed to NIC */
131 	int use_simple_tx_handler;
132 	int use_noscatter_vec_rx_handler;
133 
134 	unsigned int flags;
135 	unsigned int priv_flags;
136 
137 	/* work queue (len = conf_wq_count) */
138 	struct vnic_wq *wq;
139 	unsigned int wq_count; /* equals eth_dev nb_tx_queues */
140 
141 	/* receive queue (len = conf_rq_count) */
142 	struct vnic_rq *rq;
143 	unsigned int rq_count; /* equals eth_dev nb_rx_queues */
144 
145 	/* completion queue (len = conf_cq_count) */
146 	struct vnic_cq *cq;
147 	unsigned int cq_count; /* equals rq_count + wq_count */
148 
149 	/* interrupt vectors (len = conf_intr_count) */
150 	struct vnic_intr *intr;
151 	unsigned int intr_count; /* equals enabled interrupts (lsc + rxqs) */
152 
153 	/* software counters */
154 	struct enic_soft_stats soft_stats;
155 
156 	struct vnic_wq admin_wq;
157 	struct vnic_rq admin_rq;
158 	struct vnic_cq admin_cq[2];
159 
160 	/* configured resources on vic */
161 	unsigned int conf_rq_count;
162 	unsigned int conf_wq_count;
163 	unsigned int conf_cq_count;
164 	unsigned int conf_intr_count;
165 	/* SR-IOV VF has queues for admin channel to PF */
166 	unsigned int conf_admin_rq_count;
167 	unsigned int conf_admin_wq_count;
168 	unsigned int conf_admin_cq_count;
169 	uint64_t admin_chan_msg_num;
170 	int admin_chan_vf_id;
171 	uint32_t admin_pf_cap_version;
172 	bool admin_chan_enabled;
173 	bool sriov_vf_soft_rx_stats;
174 	bool sriov_vf_compat_mode;
175 	pthread_mutex_t admin_chan_lock;
176 
177 	/* linked list storing memory allocations */
178 	LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list;
179 	rte_spinlock_t memzone_list_lock;
180 	rte_spinlock_t mtu_lock;
181 
182 	LIST_HEAD(enic_flows, rte_flow) flows;
183 
184 	/* RSS */
185 	uint16_t reta_size;
186 	uint8_t hash_key_size;
187 	uint64_t flow_type_rss_offloads; /* 0 indicates RSS not supported */
188 	/*
189 	 * Keep a copy of current RSS config for queries, as we cannot retrieve
190 	 * it from the NIC.
191 	 */
192 	uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */
193 	uint8_t rss_enable;
194 	uint64_t rss_hf; /* RTE_ETH_RSS flags */
195 	union vnic_rss_key rss_key;
196 	union vnic_rss_cpu rss_cpu;
197 
198 	uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */
199 	uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */
200 	uint64_t tx_queue_offload_capa; /* DEV_TX_OFFLOAD flags */
201 	uint64_t tx_offload_mask; /* PKT_TX flags accepted */
202 
203 	/* Multicast MAC addresses added to the NIC */
204 	uint32_t mc_count;
205 	struct rte_ether_addr mc_addrs[ENIC_MULTICAST_PERFECT_FILTERS];
206 
207 	/* Flow manager API */
208 	struct enic_flowman *fm;
209 	uint64_t fm_vnic_handle;
210 	uint32_t fm_vnic_uif;
211 	/* switchdev */
212 	uint8_t switchdev_mode;
213 	uint16_t switch_domain_id;
214 	uint16_t max_vf_id;
215 	/* Number of queues needed for VF representor paths */
216 	uint32_t vf_required_wq;
217 	uint32_t vf_required_cq;
218 	uint32_t vf_required_rq;
219 	/*
220 	 * Lock to serialize devcmds from PF, VF representors as they all share
221 	 * the same PF devcmd instance in firmware.
222 	 */
223 	rte_spinlock_t devcmd_lock;
224 };
225 
226 struct enic_vf_representor {
227 	struct enic enic;
228 	struct vnic_enet_config config;
229 	struct rte_eth_dev *eth_dev;
230 	struct rte_ether_addr mac_addr;
231 	struct rte_pci_addr bdf;
232 	struct enic *pf;
233 	uint16_t switch_domain_id;
234 	uint16_t vf_id;
235 	int allmulti;
236 	int promisc;
237 	/* Representor path uses PF queues. These are reserved during init */
238 	uint16_t pf_wq_idx;      /* WQ dedicated to VF rep */
239 	uint16_t pf_wq_cq_idx;   /* CQ for WQ */
240 	uint16_t pf_rq_sop_idx;  /* SOP RQ dedicated to VF rep */
241 	uint16_t pf_rq_data_idx; /* Data RQ */
242 	/* Representor flows managed by flowman */
243 	struct rte_flow *vf2rep_flow[2];
244 	struct rte_flow *rep2vf_flow[2];
245 };
246 
247 #define ENIC_ADMIN_WQ_CQ 0
248 #define ENIC_ADMIN_RQ_CQ 1
249 #define ENIC_ADMIN_BUF_SIZE 1024
250 
251 static inline bool enic_is_vf(struct enic *enic)
252 {
253 	return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN &&
254 		!enic->sriov_vf_compat_mode;
255 }
256 
257 #define VF_ENIC_TO_VF_REP(vf_enic) \
258 	container_of(vf_enic, struct enic_vf_representor, enic)
259 
260 /* Compute ethdev's max packet size from MTU */
261 static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu)
262 {
263 	/* ethdev max size includes eth whereas NIC MTU does not */
264 	return mtu + RTE_ETHER_HDR_LEN;
265 }
266 
267 /* Get the CQ index from a Start of Packet(SOP) RQ index */
268 static inline unsigned int enic_sop_rq_idx_to_cq_idx(unsigned int sop_idx)
269 {
270 	return sop_idx;
271 }
272 
273 /* Get the RTE RQ index from a Start of Packet(SOP) RQ index */
274 static inline unsigned int enic_sop_rq_idx_to_rte_idx(unsigned int sop_idx)
275 {
276 	return sop_idx;
277 }
278 
279 /* Get the Start of Packet(SOP) RQ index from a RTE RQ index */
280 static inline unsigned int enic_rte_rq_idx_to_sop_idx(unsigned int rte_idx)
281 {
282 	return rte_idx;
283 }
284 
285 /* Get the Data RQ index from a RTE RQ index */
286 static inline unsigned int enic_rte_rq_idx_to_data_idx(unsigned int rte_idx,
287 						       struct enic *enic)
288 {
289 	return enic->rq_count + rte_idx;
290 }
291 
292 static inline unsigned int enic_vnic_rq_count(struct enic *enic)
293 {
294 	return enic->rq_count * 2;
295 }
296 
297 static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq)
298 {
299 	return rq;
300 }
301 
302 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
303 {
304 	return enic->rq_count + wq;
305 }
306 
307 /*
308  * WQ, RQ, CQ allocation scheme. Firmware gives the driver an array of
309  * WQs, an array of RQs, and an array of CQs. Fow now, these are
310  * statically allocated between PF app send/receive queues and VF
311  * representor app send/receive queues. VF representor supports only 1
312  * send and 1 receive queue. The number of PF app queue is not known
313  * until the queue setup time.
314  *
315  * R = number of receive queues for PF app
316  * S = number of send queues for PF app
317  * V = number of VF representors
318  *
319  * wI = WQ for PF app send queue I
320  * rI = SOP RQ for PF app receive queue I
321  * dI = Data RQ for rI
322  * cwI = CQ for wI
323  * crI = CQ for rI
324  * vwI = WQ for VF representor send queue I
325  * vrI = SOP RQ for VF representor receive queue I
326  * vdI = Data RQ for vrI
327  * vcwI = CQ for vwI
328  * vcrI = CQ for vrI
329  *
330  * WQ array: | w0 |..| wS-1 |..| vwV-1 |..| vw0 |
331  *             ^         ^         ^         ^
332  *    index    0        S-1       W-V       W-1    W=len(WQ array)
333  *
334  * RQ array: | r0  |..| rR-1  |d0 |..|dR-1|  ..|vdV-1 |..| vd0 |vrV-1 |..|vr0 |
335  *             ^         ^     ^       ^         ^          ^     ^        ^
336  *    index    0        R-1    R      2R-1      X-2V    X-(V+1)  X-V      X-1
337  * X=len(RQ array)
338  *
339  * CQ array: | cr0 |..| crR-1 |cw0|..|cwS-1|..|vcwV-1|..| vcw0|vcrV-1|..|vcr0|..
340  *              ^         ^     ^       ^        ^         ^      ^        ^
341  *    index     0        R-1    R     R+S-1     X-2V    X-(V+1)  X-V      X-1
342  * X is not a typo. It really is len(RQ array) to accommodate enic_cq_rq() used
343  * throughout RX handlers. The current scheme requires
344  * len(CQ array) >= len(RQ array).
345  */
346 
347 static inline unsigned int vf_wq_cq_idx(struct enic_vf_representor *vf)
348 {
349 	/* rq is not a typo. index(vcwI) coincides with index(vdI) */
350 	return vf->pf->conf_rq_count - (vf->pf->max_vf_id + vf->vf_id + 2);
351 }
352 
353 static inline unsigned int vf_wq_idx(struct enic_vf_representor *vf)
354 {
355 	return vf->pf->conf_wq_count - vf->vf_id - 1;
356 }
357 
358 static inline unsigned int vf_rq_sop_idx(struct enic_vf_representor *vf)
359 {
360 	return vf->pf->conf_rq_count - vf->vf_id - 1;
361 }
362 
363 static inline unsigned int vf_rq_data_idx(struct enic_vf_representor *vf)
364 {
365 	return vf->pf->conf_rq_count - (vf->pf->max_vf_id + vf->vf_id + 2);
366 }
367 
368 static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
369 {
370 	return eth_dev->data->dev_private;
371 }
372 
373 static inline uint32_t
374 enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
375 {
376 	uint32_t d = i0 + i1;
377 	d -= (d >= n_descriptors) ? n_descriptors : 0;
378 	return d;
379 }
380 
381 static inline uint32_t
382 enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
383 {
384 	int32_t d = i1 - i0;
385 	return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d);
386 }
387 
388 static inline uint32_t
389 enic_ring_incr(uint32_t n_descriptors, uint32_t idx)
390 {
391 	idx++;
392 	if (unlikely(idx == n_descriptors))
393 		idx = 0;
394 	return idx;
395 }
396 
397 int dev_is_enic(struct rte_eth_dev *dev);
398 void enic_free_wq(void *txq);
399 int enic_alloc_intr_resources(struct enic *enic);
400 int enic_setup_finish(struct enic *enic);
401 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
402 		  unsigned int socket_id, uint16_t nb_desc);
403 void enic_start_wq(struct enic *enic, uint16_t queue_idx);
404 int enic_stop_wq(struct enic *enic, uint16_t queue_idx);
405 void enic_start_rq(struct enic *enic, uint16_t queue_idx);
406 int enic_stop_rq(struct enic *enic, uint16_t queue_idx);
407 void enic_free_rq(void *rxq);
408 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
409 		  unsigned int socket_id, struct rte_mempool *mp,
410 		  uint16_t nb_desc, uint16_t free_thresh);
411 int enic_set_vnic_res(struct enic *enic);
412 int enic_init_rss_nic_cfg(struct enic *enic);
413 int enic_set_rss_conf(struct enic *enic,
414 		      struct rte_eth_rss_conf *rss_conf);
415 int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu);
416 int enic_set_vlan_strip(struct enic *enic);
417 int enic_enable(struct enic *enic);
418 int enic_disable(struct enic *enic);
419 void enic_remove(struct enic *enic);
420 int enic_get_link_status(struct enic *enic);
421 int enic_dev_stats_get(struct enic *enic,
422 		       struct rte_eth_stats *r_stats);
423 int enic_dev_stats_clear(struct enic *enic);
424 int enic_add_packet_filter(struct enic *enic);
425 int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
426 int enic_del_mac_address(struct enic *enic, int mac_index);
427 unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
428 void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
429 		   struct rte_mbuf *tx_pkt, unsigned short len,
430 		   uint8_t sop, uint8_t eop, uint8_t cq_entry,
431 		   uint16_t ol_flags, uint16_t vlan_tag);
432 
433 void enic_post_wq_index(struct vnic_wq *wq);
434 int enic_probe(struct enic *enic);
435 int enic_fm_init(struct enic *enic);
436 void enic_fm_destroy(struct enic *enic);
437 void *enic_alloc_consistent(void *priv, size_t size, dma_addr_t *dma_handle,
438 			    uint8_t *name);
439 void enic_free_consistent(void *priv, size_t size, void *vaddr,
440 			  dma_addr_t dma_handle);
441 uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
442 			uint16_t nb_pkts);
443 uint16_t enic_recv_pkts_64(void *rx_queue, struct rte_mbuf **rx_pkts,
444 			   uint16_t nb_pkts);
445 uint16_t enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
446 				  uint16_t nb_pkts);
447 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
448 			uint16_t nb_pkts);
449 uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
450 			       uint16_t nb_pkts);
451 uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
452 			uint16_t nb_pkts);
453 int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
454 int enic_link_update(struct rte_eth_dev *eth_dev);
455 bool enic_use_vector_rx_handler(struct rte_eth_dev *eth_dev);
456 void enic_pick_rx_handler(struct rte_eth_dev *eth_dev);
457 void enic_pick_tx_handler(struct rte_eth_dev *eth_dev);
458 int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params);
459 int enic_vf_representor_uninit(struct rte_eth_dev *ethdev);
460 int enic_fm_allocate_switch_domain(struct enic *pf);
461 int enic_fm_add_rep2vf_flow(struct enic_vf_representor *vf);
462 int enic_fm_add_vf2rep_flow(struct enic_vf_representor *vf);
463 int enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq);
464 void enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq);
465 void enic_free_wq_buf(struct rte_mbuf **buf);
466 void enic_free_rq_buf(struct rte_mbuf **mbuf);
467 extern const struct rte_flow_ops enic_flow_ops;
468 extern const struct rte_flow_ops enic_fm_flow_ops;
469 
470 #endif /* _ENIC_H_ */
471