xref: /dpdk/drivers/net/enic/enic_main.c (revision c103585df76017fedd5b0ea2f4769fb9ee42f31f)
12e99ea80SHyong Youb Kim /* SPDX-License-Identifier: BSD-3-Clause
22e99ea80SHyong Youb Kim  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
372f3de30SBruce Richardson  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
472f3de30SBruce Richardson  */
572f3de30SBruce Richardson 
672f3de30SBruce Richardson #include <stdio.h>
772f3de30SBruce Richardson 
872f3de30SBruce Richardson #include <sys/stat.h>
972f3de30SBruce Richardson #include <sys/mman.h>
1072f3de30SBruce Richardson #include <fcntl.h>
1172f3de30SBruce Richardson 
1272f3de30SBruce Richardson #include <rte_pci.h>
131f37cb2bSDavid Marchand #include <bus_pci_driver.h>
1472f3de30SBruce Richardson #include <rte_memzone.h>
1572f3de30SBruce Richardson #include <rte_malloc.h>
1672f3de30SBruce Richardson #include <rte_mbuf.h>
1772f3de30SBruce Richardson #include <rte_string_fns.h>
18df96fd0dSBruce Richardson #include <ethdev_driver.h>
1961c7b522SJohn Daley #include <rte_geneve.h>
2072f3de30SBruce Richardson 
2172f3de30SBruce Richardson #include "enic_compat.h"
2272f3de30SBruce Richardson #include "enic.h"
23*00ce4311SHyong Youb Kim #include "enic_sriov.h"
2472f3de30SBruce Richardson #include "wq_enet_desc.h"
2572f3de30SBruce Richardson #include "rq_enet_desc.h"
2672f3de30SBruce Richardson #include "cq_enet_desc.h"
2772f3de30SBruce Richardson #include "vnic_enet.h"
2872f3de30SBruce Richardson #include "vnic_dev.h"
2972f3de30SBruce Richardson #include "vnic_wq.h"
3072f3de30SBruce Richardson #include "vnic_rq.h"
3172f3de30SBruce Richardson #include "vnic_cq.h"
3272f3de30SBruce Richardson #include "vnic_intr.h"
3372f3de30SBruce Richardson #include "vnic_nic.h"
3472f3de30SBruce Richardson 
35edd08548SHyong Youb Kim void
36a44a1724SNelson Escobar enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
3772f3de30SBruce Richardson {
38947d860cSJohn Daley 	uint16_t i;
3972f3de30SBruce Richardson 
40947d860cSJohn Daley 	if (!rq || !rq->mbuf_ring) {
41947d860cSJohn Daley 		dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
42947d860cSJohn Daley 		return;
4372f3de30SBruce Richardson 	}
4472f3de30SBruce Richardson 
45a44a1724SNelson Escobar 	for (i = 0; i < rq->ring.desc_count; i++) {
46947d860cSJohn Daley 		if (rq->mbuf_ring[i]) {
47947d860cSJohn Daley 			rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
48947d860cSJohn Daley 			rq->mbuf_ring[i] = NULL;
49947d860cSJohn Daley 		}
50947d860cSJohn Daley 	}
51947d860cSJohn Daley }
52947d860cSJohn Daley 
53edd08548SHyong Youb Kim void enic_free_wq_buf(struct rte_mbuf **buf)
5472f3de30SBruce Richardson {
55d355a942SHyong Youb Kim 	struct rte_mbuf *mbuf = *buf;
5672f3de30SBruce Richardson 
57da24f6f6SJohn Daley 	rte_pktmbuf_free_seg(mbuf);
58d355a942SHyong Youb Kim 	*buf = NULL;
5972f3de30SBruce Richardson }
6072f3de30SBruce Richardson 
6172f3de30SBruce Richardson static void enic_log_q_error(struct enic *enic)
6272f3de30SBruce Richardson {
6372f3de30SBruce Richardson 	unsigned int i;
6404e8ec74SJohn Daley 	uint32_t error_status;
6572f3de30SBruce Richardson 
6672f3de30SBruce Richardson 	for (i = 0; i < enic->wq_count; i++) {
6772f3de30SBruce Richardson 		error_status = vnic_wq_error_status(&enic->wq[i]);
6872f3de30SBruce Richardson 		if (error_status)
6972f3de30SBruce Richardson 			dev_err(enic, "WQ[%d] error_status %d\n", i,
7072f3de30SBruce Richardson 				error_status);
7172f3de30SBruce Richardson 	}
7272f3de30SBruce Richardson 
73856d7ba7SNelson Escobar 	for (i = 0; i < enic_vnic_rq_count(enic); i++) {
74e3725e7fSNelson Escobar 		if (!enic->rq[i].in_use)
75e3725e7fSNelson Escobar 			continue;
7672f3de30SBruce Richardson 		error_status = vnic_rq_error_status(&enic->rq[i]);
7772f3de30SBruce Richardson 		if (error_status)
7872f3de30SBruce Richardson 			dev_err(enic, "RQ[%d] error_status %d\n", i,
7972f3de30SBruce Richardson 				error_status);
8072f3de30SBruce Richardson 	}
8172f3de30SBruce Richardson }
8272f3de30SBruce Richardson 
8365b5434dSJohn Daley static void enic_clear_soft_stats(struct enic *enic)
8465b5434dSJohn Daley {
8565b5434dSJohn Daley 	struct enic_soft_stats *soft_stats = &enic->soft_stats;
8665b5434dSJohn Daley 	rte_atomic64_clear(&soft_stats->rx_nombuf);
87c44d9f01SJohn Daley 	rte_atomic64_clear(&soft_stats->rx_packet_errors);
88ed6e564cSJohn Daley 	rte_atomic64_clear(&soft_stats->tx_oversized);
8965b5434dSJohn Daley }
9065b5434dSJohn Daley 
9165b5434dSJohn Daley static void enic_init_soft_stats(struct enic *enic)
9265b5434dSJohn Daley {
9365b5434dSJohn Daley 	struct enic_soft_stats *soft_stats = &enic->soft_stats;
9465b5434dSJohn Daley 	rte_atomic64_init(&soft_stats->rx_nombuf);
95c44d9f01SJohn Daley 	rte_atomic64_init(&soft_stats->rx_packet_errors);
96ed6e564cSJohn Daley 	rte_atomic64_init(&soft_stats->tx_oversized);
9765b5434dSJohn Daley 	enic_clear_soft_stats(enic);
9865b5434dSJohn Daley }
9965b5434dSJohn Daley 
1009970a9adSIgor Romanov int enic_dev_stats_clear(struct enic *enic)
10172f3de30SBruce Richardson {
1029970a9adSIgor Romanov 	int ret;
1039970a9adSIgor Romanov 
1049970a9adSIgor Romanov 	ret = vnic_dev_stats_clear(enic->vdev);
1059970a9adSIgor Romanov 	if (ret != 0) {
10672f3de30SBruce Richardson 		dev_err(enic, "Error in clearing stats\n");
1079970a9adSIgor Romanov 		return ret;
1089970a9adSIgor Romanov 	}
10965b5434dSJohn Daley 	enic_clear_soft_stats(enic);
1109970a9adSIgor Romanov 
1119970a9adSIgor Romanov 	return 0;
11272f3de30SBruce Richardson }
11372f3de30SBruce Richardson 
114d5b0924bSMatan Azrad int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
11572f3de30SBruce Richardson {
11672f3de30SBruce Richardson 	struct vnic_stats *stats;
117c44d9f01SJohn Daley 	struct enic_soft_stats *soft_stats = &enic->soft_stats;
118c44d9f01SJohn Daley 	int64_t rx_truncated;
119c44d9f01SJohn Daley 	uint64_t rx_packet_errors;
120d5b0924bSMatan Azrad 	int ret = vnic_dev_stats_dump(enic->vdev, &stats);
12172f3de30SBruce Richardson 
122d5b0924bSMatan Azrad 	if (ret) {
12372f3de30SBruce Richardson 		dev_err(enic, "Error in getting stats\n");
124d5b0924bSMatan Azrad 		return ret;
12572f3de30SBruce Richardson 	}
12672f3de30SBruce Richardson 
127c44d9f01SJohn Daley 	/* The number of truncated packets can only be calculated by
128c44d9f01SJohn Daley 	 * subtracting a hardware counter from error packets received by
129c44d9f01SJohn Daley 	 * the driver. Note: this causes transient inaccuracies in the
130c44d9f01SJohn Daley 	 * ipackets count. Also, the length of truncated packets are
131c44d9f01SJohn Daley 	 * counted in ibytes even though truncated packets are dropped
132c44d9f01SJohn Daley 	 * which can make ibytes be slightly higher than it should be.
133c44d9f01SJohn Daley 	 */
134c44d9f01SJohn Daley 	rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
135c92efcaeSNelson Escobar 	rx_truncated = rx_packet_errors - stats->rx.rx_errors;
136c44d9f01SJohn Daley 
137c44d9f01SJohn Daley 	r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
13872f3de30SBruce Richardson 	r_stats->opackets = stats->tx.tx_frames_ok;
13972f3de30SBruce Richardson 
14072f3de30SBruce Richardson 	r_stats->ibytes = stats->rx.rx_bytes_ok;
14172f3de30SBruce Richardson 	r_stats->obytes = stats->tx.tx_bytes_ok;
14272f3de30SBruce Richardson 
14365b5434dSJohn Daley 	r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
144ed6e564cSJohn Daley 	r_stats->oerrors = stats->tx.tx_errors
145ed6e564cSJohn Daley 			   + rte_atomic64_read(&soft_stats->tx_oversized);
14672f3de30SBruce Richardson 
147c44d9f01SJohn Daley 	r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
1487182d3e7SJohn Daley 
14965b5434dSJohn Daley 	r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
150d5b0924bSMatan Azrad 	return 0;
15172f3de30SBruce Richardson }
15272f3de30SBruce Richardson 
153740f5bf1SDavid Marchand int enic_del_mac_address(struct enic *enic, int mac_index)
15472f3de30SBruce Richardson {
155bbab3d97SJohn Daley 	struct rte_eth_dev *eth_dev = enic->rte_dev;
156bbab3d97SJohn Daley 	uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
157bbab3d97SJohn Daley 
158*00ce4311SHyong Youb Kim 	return enic_dev_del_addr(enic, mac_addr);
15972f3de30SBruce Richardson }
16072f3de30SBruce Richardson 
1616d01e580SWei Dai int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
16272f3de30SBruce Richardson {
16372f3de30SBruce Richardson 	int err;
16472f3de30SBruce Richardson 
165*00ce4311SHyong Youb Kim 	err = enic_dev_add_addr(enic, mac_addr);
1666d01e580SWei Dai 	if (err)
16772f3de30SBruce Richardson 		dev_err(enic, "add mac addr failed\n");
1686d01e580SWei Dai 	return err;
16972f3de30SBruce Richardson }
17072f3de30SBruce Richardson 
171edd08548SHyong Youb Kim void enic_free_rq_buf(struct rte_mbuf **mbuf)
17272f3de30SBruce Richardson {
173947d860cSJohn Daley 	if (*mbuf == NULL)
17472f3de30SBruce Richardson 		return;
17572f3de30SBruce Richardson 
176947d860cSJohn Daley 	rte_pktmbuf_free(*mbuf);
177e735c8e2SAaron Conole 	*mbuf = NULL;
17872f3de30SBruce Richardson }
17972f3de30SBruce Richardson 
18072f3de30SBruce Richardson void enic_init_vnic_resources(struct enic *enic)
18172f3de30SBruce Richardson {
18272f3de30SBruce Richardson 	unsigned int error_interrupt_enable = 1;
18372f3de30SBruce Richardson 	unsigned int error_interrupt_offset = 0;
1840f872d31SHyong Youb Kim 	unsigned int rxq_interrupt_enable = 0;
18505e85682SJohn Daley 	unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
18672f3de30SBruce Richardson 	unsigned int index = 0;
187fc2c8c06SJohn Daley 	unsigned int cq_idx;
188856d7ba7SNelson Escobar 	struct vnic_rq *data_rq;
18972f3de30SBruce Richardson 
19005e85682SJohn Daley 	if (enic->rte_dev->data->dev_conf.intr_conf.rxq)
1910f872d31SHyong Youb Kim 		rxq_interrupt_enable = 1;
19205e85682SJohn Daley 
19372f3de30SBruce Richardson 	for (index = 0; index < enic->rq_count; index++) {
194aa07bf8fSJohn Daley 		cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
19516dba071SNelson Escobar 
196aa07bf8fSJohn Daley 		vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)],
19716dba071SNelson Escobar 			cq_idx,
198856d7ba7SNelson Escobar 			error_interrupt_enable,
199856d7ba7SNelson Escobar 			error_interrupt_offset);
200856d7ba7SNelson Escobar 
201285fd7c4SJohn Daley 		data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index, enic)];
202856d7ba7SNelson Escobar 		if (data_rq->in_use)
203856d7ba7SNelson Escobar 			vnic_rq_init(data_rq,
20416dba071SNelson Escobar 				     cq_idx,
20572f3de30SBruce Richardson 				     error_interrupt_enable,
20672f3de30SBruce Richardson 				     error_interrupt_offset);
207fc2c8c06SJohn Daley 		vnic_cq_init(&enic->cq[cq_idx],
20872f3de30SBruce Richardson 			0 /* flow_control_enable */,
20972f3de30SBruce Richardson 			1 /* color_enable */,
21072f3de30SBruce Richardson 			0 /* cq_head */,
21172f3de30SBruce Richardson 			0 /* cq_tail */,
21272f3de30SBruce Richardson 			1 /* cq_tail_color */,
2130f872d31SHyong Youb Kim 			rxq_interrupt_enable,
21472f3de30SBruce Richardson 			1 /* cq_entry_enable */,
21572f3de30SBruce Richardson 			0 /* cq_message_enable */,
2160f872d31SHyong Youb Kim 			rxq_interrupt_offset,
21772f3de30SBruce Richardson 			0 /* cq_message_addr */);
2180f872d31SHyong Youb Kim 		if (rxq_interrupt_enable)
2190f872d31SHyong Youb Kim 			rxq_interrupt_offset++;
22072f3de30SBruce Richardson 	}
22172f3de30SBruce Richardson 
222fc2c8c06SJohn Daley 	for (index = 0; index < enic->wq_count; index++) {
223fc2c8c06SJohn Daley 		vnic_wq_init(&enic->wq[index],
224fc2c8c06SJohn Daley 			enic_cq_wq(enic, index),
225fc2c8c06SJohn Daley 			error_interrupt_enable,
226fc2c8c06SJohn Daley 			error_interrupt_offset);
22793fb21fdSHyong Youb Kim 		/* Compute unsupported ol flags for enic_prep_pkts() */
22893fb21fdSHyong Youb Kim 		enic->wq[index].tx_offload_notsup_mask =
229daa02b5cSOlivier Matz 			RTE_MBUF_F_TX_OFFLOAD_MASK ^ enic->tx_offload_mask;
230fc2c8c06SJohn Daley 
231fc2c8c06SJohn Daley 		cq_idx = enic_cq_wq(enic, index);
232fc2c8c06SJohn Daley 		vnic_cq_init(&enic->cq[cq_idx],
233fc2c8c06SJohn Daley 			0 /* flow_control_enable */,
234fc2c8c06SJohn Daley 			1 /* color_enable */,
235fc2c8c06SJohn Daley 			0 /* cq_head */,
236fc2c8c06SJohn Daley 			0 /* cq_tail */,
237fc2c8c06SJohn Daley 			1 /* cq_tail_color */,
238fc2c8c06SJohn Daley 			0 /* interrupt_enable */,
239fc2c8c06SJohn Daley 			0 /* cq_entry_enable */,
240fc2c8c06SJohn Daley 			1 /* cq_message_enable */,
241fc2c8c06SJohn Daley 			0 /* interrupt offset */,
24204e8ec74SJohn Daley 			(uint64_t)enic->wq[index].cqmsg_rz->iova);
243fc2c8c06SJohn Daley 	}
244fc2c8c06SJohn Daley 
2450f872d31SHyong Youb Kim 	for (index = 0; index < enic->intr_count; index++) {
2460f872d31SHyong Youb Kim 		vnic_intr_init(&enic->intr[index],
24772f3de30SBruce Richardson 			       enic->config.intr_timer_usec,
24872f3de30SBruce Richardson 			       enic->config.intr_timer_type,
24972f3de30SBruce Richardson 			       /*mask_on_assertion*/1);
25072f3de30SBruce Richardson 	}
2510f872d31SHyong Youb Kim }
25272f3de30SBruce Richardson 
25372f3de30SBruce Richardson 
254edd08548SHyong Youb Kim int
255947d860cSJohn Daley enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
25672f3de30SBruce Richardson {
257947d860cSJohn Daley 	struct rte_mbuf *mb;
258947d860cSJohn Daley 	struct rq_enet_desc *rqd = rq->ring.descs;
259947d860cSJohn Daley 	unsigned i;
26072f3de30SBruce Richardson 	dma_addr_t dma_addr;
2611bb4a528SFerruh Yigit 	uint32_t max_rx_pktlen;
262422ba917SHyong Youb Kim 	uint16_t rq_buf_len;
26372f3de30SBruce Richardson 
264856d7ba7SNelson Escobar 	if (!rq->in_use)
265856d7ba7SNelson Escobar 		return 0;
266856d7ba7SNelson Escobar 
267bba57df3SNelson Escobar 	dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
268947d860cSJohn Daley 		  rq->ring.desc_count);
269947d860cSJohn Daley 
270422ba917SHyong Youb Kim 	/*
27195faa2a9SHyong Youb Kim 	 * If *not* using scatter and the mbuf size is greater than the
2721bb4a528SFerruh Yigit 	 * requested max packet size (mtu + eth overhead), then reduce the
2731bb4a528SFerruh Yigit 	 * posted buffer size to max packet size. HW still receives packets
2741bb4a528SFerruh Yigit 	 * larger than max packet size, but they will be truncated, which we
275422ba917SHyong Youb Kim 	 * drop in the rx handler. Not ideal, but better than returning
276422ba917SHyong Youb Kim 	 * large packets when the user is not expecting them.
277422ba917SHyong Youb Kim 	 */
2781bb4a528SFerruh Yigit 	max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu);
279422ba917SHyong Youb Kim 	rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM;
2801bb4a528SFerruh Yigit 	if (max_rx_pktlen < rq_buf_len && !rq->data_queue_enable)
2811bb4a528SFerruh Yigit 		rq_buf_len = max_rx_pktlen;
282947d860cSJohn Daley 	for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
283fbfd9955SOlivier Matz 		mb = rte_mbuf_raw_alloc(rq->mp);
284947d860cSJohn Daley 		if (mb == NULL) {
285bba57df3SNelson Escobar 			dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
286947d860cSJohn Daley 			(unsigned)rq->index);
287947d860cSJohn Daley 			return -ENOMEM;
28872f3de30SBruce Richardson 		}
28972f3de30SBruce Richardson 
2901ccc51b0SJohn Daley 		mb->data_off = RTE_PKTMBUF_HEADROOM;
291455da545SSantosh Shukla 		dma_addr = (dma_addr_t)(mb->buf_iova
2924a3259d6SJohn Daley 			   + RTE_PKTMBUF_HEADROOM);
293856d7ba7SNelson Escobar 		rq_enet_desc_enc(rqd, dma_addr,
294856d7ba7SNelson Escobar 				(rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
295856d7ba7SNelson Escobar 				: RQ_ENET_TYPE_NOT_SOP),
296422ba917SHyong Youb Kim 				rq_buf_len);
297947d860cSJohn Daley 		rq->mbuf_ring[i] = mb;
29872f3de30SBruce Richardson 	}
299a74629cfSHyong Youb Kim 	/*
300a74629cfSHyong Youb Kim 	 * Do not post the buffers to the NIC until we enable the RQ via
301a74629cfSHyong Youb Kim 	 * enic_start_rq().
302a74629cfSHyong Youb Kim 	 */
303a74629cfSHyong Youb Kim 	rq->need_initial_post = true;
3041c7c3ad1SHyong Youb Kim 	/* Initialize fetch index while RQ is disabled */
3051c7c3ad1SHyong Youb Kim 	iowrite32(0, &rq->ctrl->fetch_index);
306a74629cfSHyong Youb Kim 	return 0;
307a74629cfSHyong Youb Kim }
308a74629cfSHyong Youb Kim 
309a74629cfSHyong Youb Kim /*
310a74629cfSHyong Youb Kim  * Post the Rx buffers for the first time. enic_alloc_rx_queue_mbufs() has
311a74629cfSHyong Youb Kim  * allocated the buffers and filled the RQ descriptor ring. Just need to push
312a74629cfSHyong Youb Kim  * the post index to the NIC.
313a74629cfSHyong Youb Kim  */
314a74629cfSHyong Youb Kim static void
315a74629cfSHyong Youb Kim enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq)
316a74629cfSHyong Youb Kim {
317a74629cfSHyong Youb Kim 	if (!rq->in_use || !rq->need_initial_post)
318a74629cfSHyong Youb Kim 		return;
31972f3de30SBruce Richardson 
320947d860cSJohn Daley 	/* make sure all prior writes are complete before doing the PIO write */
321947d860cSJohn Daley 	rte_rmb();
32272f3de30SBruce Richardson 
323856d7ba7SNelson Escobar 	/* Post all but the last buffer to VIC. */
324856d7ba7SNelson Escobar 	rq->posted_index = rq->ring.desc_count - 1;
325856d7ba7SNelson Escobar 
326947d860cSJohn Daley 	rq->rx_nb_hold = 0;
32772f3de30SBruce Richardson 
328947d860cSJohn Daley 	dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
329947d860cSJohn Daley 		enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
330947d860cSJohn Daley 	iowrite32(rq->posted_index, &rq->ctrl->posted_index);
331947d860cSJohn Daley 	rte_rmb();
332a74629cfSHyong Youb Kim 	rq->need_initial_post = false;
33372f3de30SBruce Richardson }
33472f3de30SBruce Richardson 
335ea7768b5SHyong Youb Kim void *
336da5f560bSNelson Escobar enic_alloc_consistent(void *priv, size_t size,
33704e8ec74SJohn Daley 	dma_addr_t *dma_handle, uint8_t *name)
33872f3de30SBruce Richardson {
33972f3de30SBruce Richardson 	void *vaddr;
34072f3de30SBruce Richardson 	const struct rte_memzone *rz;
34172f3de30SBruce Richardson 	*dma_handle = 0;
342da5f560bSNelson Escobar 	struct enic *enic = (struct enic *)priv;
343da5f560bSNelson Escobar 	struct enic_memzone_entry *mze;
34472f3de30SBruce Richardson 
34546e4fb12SAnatoly Burakov 	rz = rte_memzone_reserve_aligned((const char *)name, size,
34604e8ec74SJohn Daley 			SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE);
34772f3de30SBruce Richardson 	if (!rz) {
348bba57df3SNelson Escobar 		pr_err("%s : Failed to allocate memory requested for %s\n",
34972f3de30SBruce Richardson 			__func__, name);
35072f3de30SBruce Richardson 		return NULL;
35172f3de30SBruce Richardson 	}
35272f3de30SBruce Richardson 
35372f3de30SBruce Richardson 	vaddr = rz->addr;
354f17ca787SThomas Monjalon 	*dma_handle = (dma_addr_t)rz->iova;
35572f3de30SBruce Richardson 
356da5f560bSNelson Escobar 	mze = rte_malloc("enic memzone entry",
357da5f560bSNelson Escobar 			 sizeof(struct enic_memzone_entry), 0);
358da5f560bSNelson Escobar 
359da5f560bSNelson Escobar 	if (!mze) {
360da5f560bSNelson Escobar 		pr_err("%s : Failed to allocate memory for memzone list\n",
361da5f560bSNelson Escobar 		       __func__);
362da5f560bSNelson Escobar 		rte_memzone_free(rz);
3637f4a1aa1SRongQiang Xie 		return NULL;
364da5f560bSNelson Escobar 	}
365da5f560bSNelson Escobar 
366da5f560bSNelson Escobar 	mze->rz = rz;
367da5f560bSNelson Escobar 
368da5f560bSNelson Escobar 	rte_spinlock_lock(&enic->memzone_list_lock);
369da5f560bSNelson Escobar 	LIST_INSERT_HEAD(&enic->memzone_list, mze, entries);
370da5f560bSNelson Escobar 	rte_spinlock_unlock(&enic->memzone_list_lock);
371da5f560bSNelson Escobar 
37272f3de30SBruce Richardson 	return vaddr;
37372f3de30SBruce Richardson }
37472f3de30SBruce Richardson 
375ea7768b5SHyong Youb Kim void
376da5f560bSNelson Escobar enic_free_consistent(void *priv,
37772f3de30SBruce Richardson 		     __rte_unused size_t size,
378da5f560bSNelson Escobar 		     void *vaddr,
379da5f560bSNelson Escobar 		     dma_addr_t dma_handle)
38072f3de30SBruce Richardson {
381da5f560bSNelson Escobar 	struct enic_memzone_entry *mze;
382da5f560bSNelson Escobar 	struct enic *enic = (struct enic *)priv;
383da5f560bSNelson Escobar 
384da5f560bSNelson Escobar 	rte_spinlock_lock(&enic->memzone_list_lock);
385da5f560bSNelson Escobar 	LIST_FOREACH(mze, &enic->memzone_list, entries) {
386da5f560bSNelson Escobar 		if (mze->rz->addr == vaddr &&
387f17ca787SThomas Monjalon 		    mze->rz->iova == dma_handle)
388da5f560bSNelson Escobar 			break;
389da5f560bSNelson Escobar 	}
390da5f560bSNelson Escobar 	if (mze == NULL) {
391da5f560bSNelson Escobar 		rte_spinlock_unlock(&enic->memzone_list_lock);
392da5f560bSNelson Escobar 		dev_warning(enic,
393da5f560bSNelson Escobar 			    "Tried to free memory, but couldn't find it in the memzone list\n");
394da5f560bSNelson Escobar 		return;
395da5f560bSNelson Escobar 	}
396da5f560bSNelson Escobar 	LIST_REMOVE(mze, entries);
397da5f560bSNelson Escobar 	rte_spinlock_unlock(&enic->memzone_list_lock);
398da5f560bSNelson Escobar 	rte_memzone_free(mze->rz);
399da5f560bSNelson Escobar 	rte_free(mze);
40072f3de30SBruce Richardson }
40172f3de30SBruce Richardson 
402c655c547SHyong Youb Kim int enic_link_update(struct rte_eth_dev *eth_dev)
403cf8d9826SNelson Escobar {
404c655c547SHyong Youb Kim 	struct enic *enic = pmd_priv(eth_dev);
4055042dde0SStephen Hemminger 	struct rte_eth_link link;
406cf8d9826SNelson Escobar 
4075042dde0SStephen Hemminger 	memset(&link, 0, sizeof(link));
4085042dde0SStephen Hemminger 	link.link_status = enic_get_link_status(enic);
409295968d1SFerruh Yigit 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
4105042dde0SStephen Hemminger 	link.link_speed = vnic_dev_port_speed(enic->vdev);
4115042dde0SStephen Hemminger 
4125042dde0SStephen Hemminger 	return rte_eth_linkstatus_set(eth_dev, &link);
413cf8d9826SNelson Escobar }
414cf8d9826SNelson Escobar 
41572f3de30SBruce Richardson static void
416c23a1a30SQi Zhang enic_intr_handler(void *arg)
41772f3de30SBruce Richardson {
41853fa8cc0SNelson Escobar 	struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
41953fa8cc0SNelson Escobar 	struct enic *enic = pmd_priv(dev);
42072f3de30SBruce Richardson 
421*00ce4311SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
422*00ce4311SHyong Youb Kim 
4230f872d31SHyong Youb Kim 	vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
42472f3de30SBruce Richardson 
425*00ce4311SHyong Youb Kim 	if (enic_is_vf(enic)) {
426*00ce4311SHyong Youb Kim 		/*
427*00ce4311SHyong Youb Kim 		 * When using the admin channel, VF receives link
428*00ce4311SHyong Youb Kim 		 * status changes from PF. enic_poll_vf_admin_chan()
429*00ce4311SHyong Youb Kim 		 * calls RTE_ETH_EVENT_INTR_LSC.
430*00ce4311SHyong Youb Kim 		 */
431*00ce4311SHyong Youb Kim 		enic_poll_vf_admin_chan(enic);
432*00ce4311SHyong Youb Kim 		return;
433*00ce4311SHyong Youb Kim 	}
434*00ce4311SHyong Youb Kim 
435c655c547SHyong Youb Kim 	enic_link_update(dev);
4365723fbedSFerruh Yigit 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
43772f3de30SBruce Richardson 	enic_log_q_error(enic);
4388bac78f8SHyong Youb Kim 	/* Re-enable irq in case of INTx */
439d61138d4SHarman Kalra 	rte_intr_ack(enic->pdev->intr_handle);
44072f3de30SBruce Richardson }
44172f3de30SBruce Richardson 
4420f872d31SHyong Youb Kim static int enic_rxq_intr_init(struct enic *enic)
4430f872d31SHyong Youb Kim {
4440f872d31SHyong Youb Kim 	struct rte_intr_handle *intr_handle;
4450f872d31SHyong Youb Kim 	uint32_t rxq_intr_count, i;
4460f872d31SHyong Youb Kim 	int err;
4470f872d31SHyong Youb Kim 
4480f872d31SHyong Youb Kim 	intr_handle = enic->rte_dev->intr_handle;
4490f872d31SHyong Youb Kim 	if (!enic->rte_dev->data->dev_conf.intr_conf.rxq)
4500f872d31SHyong Youb Kim 		return 0;
4510f872d31SHyong Youb Kim 	/*
4520f872d31SHyong Youb Kim 	 * Rx queue interrupts only work when we have MSI-X interrupts,
4530f872d31SHyong Youb Kim 	 * one per queue. Sharing one interrupt is technically
4540f872d31SHyong Youb Kim 	 * possible with VIC, but it is not worth the complications it brings.
4550f872d31SHyong Youb Kim 	 */
4560f872d31SHyong Youb Kim 	if (!rte_intr_cap_multiple(intr_handle)) {
4570f872d31SHyong Youb Kim 		dev_err(enic, "Rx queue interrupts require MSI-X interrupts"
4580f872d31SHyong Youb Kim 			" (vfio-pci driver)\n");
4590f872d31SHyong Youb Kim 		return -ENOTSUP;
4600f872d31SHyong Youb Kim 	}
4610f872d31SHyong Youb Kim 	rxq_intr_count = enic->intr_count - ENICPMD_RXQ_INTR_OFFSET;
4620f872d31SHyong Youb Kim 	err = rte_intr_efd_enable(intr_handle, rxq_intr_count);
4630f872d31SHyong Youb Kim 	if (err) {
4640f872d31SHyong Youb Kim 		dev_err(enic, "Failed to enable event fds for Rx queue"
4650f872d31SHyong Youb Kim 			" interrupts\n");
4660f872d31SHyong Youb Kim 		return err;
4670f872d31SHyong Youb Kim 	}
468d61138d4SHarman Kalra 
469d61138d4SHarman Kalra 	if (rte_intr_vec_list_alloc(intr_handle, "enic_intr_vec",
470d61138d4SHarman Kalra 					   rxq_intr_count)) {
4710f872d31SHyong Youb Kim 		dev_err(enic, "Failed to allocate intr_vec\n");
4720f872d31SHyong Youb Kim 		return -ENOMEM;
4730f872d31SHyong Youb Kim 	}
4740f872d31SHyong Youb Kim 	for (i = 0; i < rxq_intr_count; i++)
475d61138d4SHarman Kalra 		if (rte_intr_vec_list_index_set(intr_handle, i,
476d61138d4SHarman Kalra 						   i + ENICPMD_RXQ_INTR_OFFSET))
477d61138d4SHarman Kalra 			return -rte_errno;
4780f872d31SHyong Youb Kim 	return 0;
4790f872d31SHyong Youb Kim }
4800f872d31SHyong Youb Kim 
4810f872d31SHyong Youb Kim static void enic_rxq_intr_deinit(struct enic *enic)
4820f872d31SHyong Youb Kim {
4830f872d31SHyong Youb Kim 	struct rte_intr_handle *intr_handle;
4840f872d31SHyong Youb Kim 
4850f872d31SHyong Youb Kim 	intr_handle = enic->rte_dev->intr_handle;
4860f872d31SHyong Youb Kim 	rte_intr_efd_disable(intr_handle);
487d61138d4SHarman Kalra 
488d61138d4SHarman Kalra 	rte_intr_vec_list_free(intr_handle);
4890f872d31SHyong Youb Kim }
4900f872d31SHyong Youb Kim 
491ed933c35SHyong Youb Kim static void enic_prep_wq_for_simple_tx(struct enic *enic, uint16_t queue_idx)
492ed933c35SHyong Youb Kim {
493ed933c35SHyong Youb Kim 	struct wq_enet_desc *desc;
494ed933c35SHyong Youb Kim 	struct vnic_wq *wq;
495ed933c35SHyong Youb Kim 	unsigned int i;
496ed933c35SHyong Youb Kim 
497ed933c35SHyong Youb Kim 	/*
498ed933c35SHyong Youb Kim 	 * Fill WQ descriptor fields that never change. Every descriptor is
499ed933c35SHyong Youb Kim 	 * one packet, so set EOP. Also set CQ_ENTRY every ENIC_WQ_CQ_THRESH
500ed933c35SHyong Youb Kim 	 * descriptors (i.e. request one completion update every 32 packets).
501ed933c35SHyong Youb Kim 	 */
502ed933c35SHyong Youb Kim 	wq = &enic->wq[queue_idx];
503ed933c35SHyong Youb Kim 	desc = (struct wq_enet_desc *)wq->ring.descs;
504ed933c35SHyong Youb Kim 	for (i = 0; i < wq->ring.desc_count; i++, desc++) {
505ed933c35SHyong Youb Kim 		desc->header_length_flags = 1 << WQ_ENET_FLAGS_EOP_SHIFT;
506ed933c35SHyong Youb Kim 		if (i % ENIC_WQ_CQ_THRESH == ENIC_WQ_CQ_THRESH - 1)
507ed933c35SHyong Youb Kim 			desc->header_length_flags |=
508ed933c35SHyong Youb Kim 				(1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT);
509ed933c35SHyong Youb Kim 	}
510ed933c35SHyong Youb Kim }
511ed933c35SHyong Youb Kim 
5128a6ff33dSHyong Youb Kim /*
5138a6ff33dSHyong Youb Kim  * The 'strong' version is in enic_rxtx_vec_avx2.c. This weak version is used
5148a6ff33dSHyong Youb Kim  * used when that file is not compiled.
5158a6ff33dSHyong Youb Kim  */
516eeef60b0SHyong Youb Kim __rte_weak bool
517e92a4b41SHyong Youb Kim enic_use_vector_rx_handler(__rte_unused struct rte_eth_dev *eth_dev)
5188a6ff33dSHyong Youb Kim {
5198a6ff33dSHyong Youb Kim 	return false;
5208a6ff33dSHyong Youb Kim }
5218a6ff33dSHyong Youb Kim 
522e92a4b41SHyong Youb Kim void enic_pick_rx_handler(struct rte_eth_dev *eth_dev)
523920aae3fSHyong Youb Kim {
524e92a4b41SHyong Youb Kim 	struct enic *enic = pmd_priv(eth_dev);
525920aae3fSHyong Youb Kim 
5268b428cb5SHyong Youb Kim 	if (enic->cq64) {
5278b428cb5SHyong Youb Kim 		ENICPMD_LOG(DEBUG, " use the normal Rx handler for 64B CQ entry");
5288b428cb5SHyong Youb Kim 		eth_dev->rx_pkt_burst = &enic_recv_pkts_64;
5298b428cb5SHyong Youb Kim 		return;
5308b428cb5SHyong Youb Kim 	}
5318a6ff33dSHyong Youb Kim 	/*
5328a6ff33dSHyong Youb Kim 	 * Preference order:
5338a6ff33dSHyong Youb Kim 	 * 1. The vectorized handler if possible and requested.
5348a6ff33dSHyong Youb Kim 	 * 2. The non-scatter, simplified handler if scatter Rx is not used.
5358a6ff33dSHyong Youb Kim 	 * 3. The default handler as a fallback.
5368a6ff33dSHyong Youb Kim 	 */
537e92a4b41SHyong Youb Kim 	if (enic_use_vector_rx_handler(eth_dev))
5388a6ff33dSHyong Youb Kim 		return;
539920aae3fSHyong Youb Kim 	if (enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0) {
540bbd8ecc0SJohn Daley 		ENICPMD_LOG(DEBUG, " use the non-scatter Rx handler");
541920aae3fSHyong Youb Kim 		eth_dev->rx_pkt_burst = &enic_noscatter_recv_pkts;
542920aae3fSHyong Youb Kim 	} else {
543bbd8ecc0SJohn Daley 		ENICPMD_LOG(DEBUG, " use the normal Rx handler");
544920aae3fSHyong Youb Kim 		eth_dev->rx_pkt_burst = &enic_recv_pkts;
545920aae3fSHyong Youb Kim 	}
546920aae3fSHyong Youb Kim }
547920aae3fSHyong Youb Kim 
548e92a4b41SHyong Youb Kim /* Secondary process uses this to set the Tx handler */
549e92a4b41SHyong Youb Kim void enic_pick_tx_handler(struct rte_eth_dev *eth_dev)
550e92a4b41SHyong Youb Kim {
551e92a4b41SHyong Youb Kim 	struct enic *enic = pmd_priv(eth_dev);
552e92a4b41SHyong Youb Kim 
553e92a4b41SHyong Youb Kim 	if (enic->use_simple_tx_handler) {
554e92a4b41SHyong Youb Kim 		ENICPMD_LOG(DEBUG, " use the simple tx handler");
555e92a4b41SHyong Youb Kim 		eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts;
556e92a4b41SHyong Youb Kim 	} else {
557e92a4b41SHyong Youb Kim 		ENICPMD_LOG(DEBUG, " use the default tx handler");
558e92a4b41SHyong Youb Kim 		eth_dev->tx_pkt_burst = &enic_xmit_pkts;
559e92a4b41SHyong Youb Kim 	}
560e92a4b41SHyong Youb Kim }
561e92a4b41SHyong Youb Kim 
56272f3de30SBruce Richardson int enic_enable(struct enic *enic)
56372f3de30SBruce Richardson {
56472f3de30SBruce Richardson 	unsigned int index;
565947d860cSJohn Daley 	int err;
56672f3de30SBruce Richardson 	struct rte_eth_dev *eth_dev = enic->rte_dev;
56770401fd7SHyong Youb Kim 	uint64_t simple_tx_offloads;
5688a6ff33dSHyong Youb Kim 	uintptr_t p;
5698a6ff33dSHyong Youb Kim 
5708a6ff33dSHyong Youb Kim 	if (enic->enable_avx2_rx) {
5718a6ff33dSHyong Youb Kim 		struct rte_mbuf mb_def = { .buf_addr = 0 };
5728a6ff33dSHyong Youb Kim 
5738a6ff33dSHyong Youb Kim 		/*
5748a6ff33dSHyong Youb Kim 		 * mbuf_initializer contains const-after-init fields of
5758a6ff33dSHyong Youb Kim 		 * receive mbufs (i.e. 64 bits of fields from rearm_data).
5768a6ff33dSHyong Youb Kim 		 * It is currently used by the vectorized handler.
5778a6ff33dSHyong Youb Kim 		 */
5788a6ff33dSHyong Youb Kim 		mb_def.nb_segs = 1;
5798a6ff33dSHyong Youb Kim 		mb_def.data_off = RTE_PKTMBUF_HEADROOM;
5808a6ff33dSHyong Youb Kim 		mb_def.port = enic->port_id;
5818a6ff33dSHyong Youb Kim 		rte_mbuf_refcnt_set(&mb_def, 1);
5828a6ff33dSHyong Youb Kim 		rte_compiler_barrier();
5838a6ff33dSHyong Youb Kim 		p = (uintptr_t)&mb_def.rearm_data;
5848a6ff33dSHyong Youb Kim 		enic->mbuf_initializer = *(uint64_t *)p;
5858a6ff33dSHyong Youb Kim 	}
58672f3de30SBruce Richardson 
58772f3de30SBruce Richardson 	eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
588295968d1SFerruh Yigit 	eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
58972f3de30SBruce Richardson 
59053fa8cc0SNelson Escobar 	/* vnic notification of link status has already been turned on in
59153fa8cc0SNelson Escobar 	 * enic_dev_init() which is called during probe time.  Here we are
59253fa8cc0SNelson Escobar 	 * just turning on interrupt vector 0 if needed.
59353fa8cc0SNelson Escobar 	 */
59453fa8cc0SNelson Escobar 	if (eth_dev->data->dev_conf.intr_conf.lsc)
59553fa8cc0SNelson Escobar 		vnic_dev_notify_set(enic->vdev, 0);
59653fa8cc0SNelson Escobar 
5970f872d31SHyong Youb Kim 	err = enic_rxq_intr_init(enic);
5980f872d31SHyong Youb Kim 	if (err)
5990f872d31SHyong Youb Kim 		return err;
60072f3de30SBruce Richardson 
60139cf83f1SHyong Youb Kim 	/* Initialize flowman if not already initialized during probe */
60239cf83f1SHyong Youb Kim 	if (enic->fm == NULL && enic_fm_init(enic))
603ea7768b5SHyong Youb Kim 		dev_warning(enic, "Init of flowman failed.\n");
604ea7768b5SHyong Youb Kim 
60572f3de30SBruce Richardson 	for (index = 0; index < enic->rq_count; index++) {
606856d7ba7SNelson Escobar 		err = enic_alloc_rx_queue_mbufs(enic,
607aa07bf8fSJohn Daley 			&enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
608947d860cSJohn Daley 		if (err) {
609856d7ba7SNelson Escobar 			dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
610856d7ba7SNelson Escobar 			return err;
611856d7ba7SNelson Escobar 		}
612856d7ba7SNelson Escobar 		err = enic_alloc_rx_queue_mbufs(enic,
613285fd7c4SJohn Daley 			&enic->rq[enic_rte_rq_idx_to_data_idx(index, enic)]);
614856d7ba7SNelson Escobar 		if (err) {
615856d7ba7SNelson Escobar 			/* release the allocated mbufs for the sop rq*/
616856d7ba7SNelson Escobar 			enic_rxmbuf_queue_release(enic,
617aa07bf8fSJohn Daley 				&enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
618856d7ba7SNelson Escobar 
619856d7ba7SNelson Escobar 			dev_err(enic, "Failed to alloc data RX queue mbufs\n");
620947d860cSJohn Daley 			return err;
62172f3de30SBruce Richardson 		}
62272f3de30SBruce Richardson 	}
62372f3de30SBruce Richardson 
624ed933c35SHyong Youb Kim 	/*
62570401fd7SHyong Youb Kim 	 * Use the simple TX handler if possible. Only checksum offloads
62670401fd7SHyong Youb Kim 	 * and vlan insertion are supported.
627ed933c35SHyong Youb Kim 	 */
62870401fd7SHyong Youb Kim 	simple_tx_offloads = enic->tx_offload_capa &
629295968d1SFerruh Yigit 		(RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
630295968d1SFerruh Yigit 		 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
631295968d1SFerruh Yigit 		 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
632295968d1SFerruh Yigit 		 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
633295968d1SFerruh Yigit 		 RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
63470401fd7SHyong Youb Kim 	if ((eth_dev->data->dev_conf.txmode.offloads &
63570401fd7SHyong Youb Kim 	     ~simple_tx_offloads) == 0) {
636bbd8ecc0SJohn Daley 		ENICPMD_LOG(DEBUG, " use the simple tx handler");
637ed933c35SHyong Youb Kim 		eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts;
638ed933c35SHyong Youb Kim 		for (index = 0; index < enic->wq_count; index++)
639ed933c35SHyong Youb Kim 			enic_prep_wq_for_simple_tx(enic, index);
640e92a4b41SHyong Youb Kim 		enic->use_simple_tx_handler = 1;
641ed933c35SHyong Youb Kim 	} else {
642bbd8ecc0SJohn Daley 		ENICPMD_LOG(DEBUG, " use the default tx handler");
643ed933c35SHyong Youb Kim 		eth_dev->tx_pkt_burst = &enic_xmit_pkts;
644ed933c35SHyong Youb Kim 	}
645ed933c35SHyong Youb Kim 
646e92a4b41SHyong Youb Kim 	enic_pick_rx_handler(eth_dev);
64735e2cb6aSJohn Daley 
64872f3de30SBruce Richardson 	for (index = 0; index < enic->wq_count; index++)
649856d7ba7SNelson Escobar 		enic_start_wq(enic, index);
65072f3de30SBruce Richardson 	for (index = 0; index < enic->rq_count; index++)
651856d7ba7SNelson Escobar 		enic_start_rq(enic, index);
65272f3de30SBruce Richardson 
653*00ce4311SHyong Youb Kim 	enic_dev_add_addr(enic, enic->mac_addr);
654e5b60cf1SNelson Escobar 
65572f3de30SBruce Richardson 	vnic_dev_enable_wait(enic->vdev);
65672f3de30SBruce Richardson 
65772f3de30SBruce Richardson 	/* Register and enable error interrupt */
658d61138d4SHarman Kalra 	rte_intr_callback_register(enic->pdev->intr_handle,
65972f3de30SBruce Richardson 		enic_intr_handler, (void *)enic->rte_dev);
660d61138d4SHarman Kalra 	rte_intr_enable(enic->pdev->intr_handle);
6610f872d31SHyong Youb Kim 	/* Unmask LSC interrupt */
6620f872d31SHyong Youb Kim 	vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
66372f3de30SBruce Richardson 
66472f3de30SBruce Richardson 	return 0;
66572f3de30SBruce Richardson }
66672f3de30SBruce Richardson 
66772f3de30SBruce Richardson int enic_alloc_intr_resources(struct enic *enic)
66872f3de30SBruce Richardson {
66972f3de30SBruce Richardson 	int err;
6700f872d31SHyong Youb Kim 	unsigned int i;
67172f3de30SBruce Richardson 
67272f3de30SBruce Richardson 	dev_info(enic, "vNIC resources used:  "\
67372f3de30SBruce Richardson 		"wq %d rq %d cq %d intr %d\n",
674856d7ba7SNelson Escobar 		enic->wq_count, enic_vnic_rq_count(enic),
67572f3de30SBruce Richardson 		enic->cq_count, enic->intr_count);
67672f3de30SBruce Richardson 
677*00ce4311SHyong Youb Kim 	if (enic_is_vf(enic)) {
678*00ce4311SHyong Youb Kim 		dev_info(enic, "vNIC admin channel resources used: wq %d rq %d cq %d\n",
679*00ce4311SHyong Youb Kim 			 enic->conf_admin_wq_count, enic->conf_admin_rq_count,
680*00ce4311SHyong Youb Kim 			 enic->conf_admin_cq_count);
681*00ce4311SHyong Youb Kim 	}
682*00ce4311SHyong Youb Kim 
6830f872d31SHyong Youb Kim 	for (i = 0; i < enic->intr_count; i++) {
6840f872d31SHyong Youb Kim 		err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
6850f872d31SHyong Youb Kim 		if (err) {
68672f3de30SBruce Richardson 			enic_free_vnic_resources(enic);
68772f3de30SBruce Richardson 			return err;
68872f3de30SBruce Richardson 		}
6890f872d31SHyong Youb Kim 	}
690*00ce4311SHyong Youb Kim 
6910f872d31SHyong Youb Kim 	return 0;
6920f872d31SHyong Youb Kim }
69372f3de30SBruce Richardson 
69472f3de30SBruce Richardson void enic_free_rq(void *rxq)
69572f3de30SBruce Richardson {
696856d7ba7SNelson Escobar 	struct vnic_rq *rq_sop, *rq_data;
69783a9d8b7SJohn Daley 	struct enic *enic;
69872f3de30SBruce Richardson 
69983a9d8b7SJohn Daley 	if (rxq == NULL)
70083a9d8b7SJohn Daley 		return;
70183a9d8b7SJohn Daley 
702856d7ba7SNelson Escobar 	rq_sop = (struct vnic_rq *)rxq;
703856d7ba7SNelson Escobar 	enic = vnic_dev_priv(rq_sop->vdev);
704856d7ba7SNelson Escobar 	rq_data = &enic->rq[rq_sop->data_queue_idx];
705856d7ba7SNelson Escobar 
70635e2cb6aSJohn Daley 	if (rq_sop->free_mbufs) {
70735e2cb6aSJohn Daley 		struct rte_mbuf **mb;
70835e2cb6aSJohn Daley 		int i;
70935e2cb6aSJohn Daley 
71035e2cb6aSJohn Daley 		mb = rq_sop->free_mbufs;
71135e2cb6aSJohn Daley 		for (i = ENIC_RX_BURST_MAX - rq_sop->num_free_mbufs;
71235e2cb6aSJohn Daley 		     i < ENIC_RX_BURST_MAX; i++)
71335e2cb6aSJohn Daley 			rte_pktmbuf_free(mb[i]);
71435e2cb6aSJohn Daley 		rte_free(rq_sop->free_mbufs);
71535e2cb6aSJohn Daley 		rq_sop->free_mbufs = NULL;
71635e2cb6aSJohn Daley 		rq_sop->num_free_mbufs = 0;
71735e2cb6aSJohn Daley 	}
71835e2cb6aSJohn Daley 
719856d7ba7SNelson Escobar 	enic_rxmbuf_queue_release(enic, rq_sop);
720856d7ba7SNelson Escobar 	if (rq_data->in_use)
721856d7ba7SNelson Escobar 		enic_rxmbuf_queue_release(enic, rq_data);
722856d7ba7SNelson Escobar 
723856d7ba7SNelson Escobar 	rte_free(rq_sop->mbuf_ring);
724856d7ba7SNelson Escobar 	if (rq_data->in_use)
725856d7ba7SNelson Escobar 		rte_free(rq_data->mbuf_ring);
726856d7ba7SNelson Escobar 
727856d7ba7SNelson Escobar 	rq_sop->mbuf_ring = NULL;
728856d7ba7SNelson Escobar 	rq_data->mbuf_ring = NULL;
729856d7ba7SNelson Escobar 
730856d7ba7SNelson Escobar 	vnic_rq_free(rq_sop);
731856d7ba7SNelson Escobar 	if (rq_data->in_use)
732856d7ba7SNelson Escobar 		vnic_rq_free(rq_data);
733856d7ba7SNelson Escobar 
734ceeb00b9SJohn Daley 	vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]);
735c3e09182SJohn Daley 
736c3e09182SJohn Daley 	rq_sop->in_use = 0;
737c3e09182SJohn Daley 	rq_data->in_use = 0;
73872f3de30SBruce Richardson }
73972f3de30SBruce Richardson 
74072f3de30SBruce Richardson void enic_start_wq(struct enic *enic, uint16_t queue_idx)
74172f3de30SBruce Richardson {
742c655c547SHyong Youb Kim 	struct rte_eth_dev_data *data = enic->dev_data;
74372f3de30SBruce Richardson 	vnic_wq_enable(&enic->wq[queue_idx]);
744c655c547SHyong Youb Kim 	data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
74572f3de30SBruce Richardson }
74672f3de30SBruce Richardson 
74772f3de30SBruce Richardson int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
74872f3de30SBruce Richardson {
749c655c547SHyong Youb Kim 	struct rte_eth_dev_data *data = enic->dev_data;
750837e68aeSJohn Daley 	int ret;
751837e68aeSJohn Daley 
752837e68aeSJohn Daley 	ret = vnic_wq_disable(&enic->wq[queue_idx]);
753837e68aeSJohn Daley 	if (ret)
754837e68aeSJohn Daley 		return ret;
755837e68aeSJohn Daley 
756c655c547SHyong Youb Kim 	data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
757837e68aeSJohn Daley 	return 0;
75872f3de30SBruce Richardson }
75972f3de30SBruce Richardson 
76072f3de30SBruce Richardson void enic_start_rq(struct enic *enic, uint16_t queue_idx)
76172f3de30SBruce Richardson {
762c655c547SHyong Youb Kim 	struct rte_eth_dev_data *data = enic->dev_data;
763aa07bf8fSJohn Daley 	struct vnic_rq *rq_sop;
764aa07bf8fSJohn Daley 	struct vnic_rq *rq_data;
765aa07bf8fSJohn Daley 	rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
766aa07bf8fSJohn Daley 	rq_data = &enic->rq[rq_sop->data_queue_idx];
767856d7ba7SNelson Escobar 
768a74629cfSHyong Youb Kim 	if (rq_data->in_use) {
769856d7ba7SNelson Escobar 		vnic_rq_enable(rq_data);
770a74629cfSHyong Youb Kim 		enic_initial_post_rx(enic, rq_data);
771a74629cfSHyong Youb Kim 	}
772856d7ba7SNelson Escobar 	rte_mb();
773856d7ba7SNelson Escobar 	vnic_rq_enable(rq_sop);
774a74629cfSHyong Youb Kim 	enic_initial_post_rx(enic, rq_sop);
775c655c547SHyong Youb Kim 	data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
77672f3de30SBruce Richardson }
77772f3de30SBruce Richardson 
77872f3de30SBruce Richardson int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
77972f3de30SBruce Richardson {
780c655c547SHyong Youb Kim 	struct rte_eth_dev_data *data = enic->dev_data;
781856d7ba7SNelson Escobar 	int ret1 = 0, ret2 = 0;
782aa07bf8fSJohn Daley 	struct vnic_rq *rq_sop;
783aa07bf8fSJohn Daley 	struct vnic_rq *rq_data;
784aa07bf8fSJohn Daley 	rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
785aa07bf8fSJohn Daley 	rq_data = &enic->rq[rq_sop->data_queue_idx];
786856d7ba7SNelson Escobar 
787856d7ba7SNelson Escobar 	ret2 = vnic_rq_disable(rq_sop);
788856d7ba7SNelson Escobar 	rte_mb();
789856d7ba7SNelson Escobar 	if (rq_data->in_use)
790856d7ba7SNelson Escobar 		ret1 = vnic_rq_disable(rq_data);
791856d7ba7SNelson Escobar 
792856d7ba7SNelson Escobar 	if (ret2)
793856d7ba7SNelson Escobar 		return ret2;
794837e68aeSJohn Daley 	else if (ret1)
795856d7ba7SNelson Escobar 		return ret1;
796837e68aeSJohn Daley 
797c655c547SHyong Youb Kim 	data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
798837e68aeSJohn Daley 	return 0;
79972f3de30SBruce Richardson }
80072f3de30SBruce Richardson 
80172f3de30SBruce Richardson int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
80272f3de30SBruce Richardson 	unsigned int socket_id, struct rte_mempool *mp,
803ce16fd70SJohn Daley 	uint16_t nb_desc, uint16_t free_thresh)
80472f3de30SBruce Richardson {
805edd08548SHyong Youb Kim 	struct enic_vf_representor *vf;
806947d860cSJohn Daley 	int rc;
807edd08548SHyong Youb Kim 	uint16_t sop_queue_idx;
808edd08548SHyong Youb Kim 	uint16_t data_queue_idx;
809edd08548SHyong Youb Kim 	uint16_t cq_idx;
810edd08548SHyong Youb Kim 	struct vnic_rq *rq_sop;
811edd08548SHyong Youb Kim 	struct vnic_rq *rq_data;
812856d7ba7SNelson Escobar 	unsigned int mbuf_size, mbufs_per_pkt;
813856d7ba7SNelson Escobar 	unsigned int nb_sop_desc, nb_data_desc;
814856d7ba7SNelson Escobar 	uint16_t min_sop, max_sop, min_data, max_data;
8151bb4a528SFerruh Yigit 	uint32_t max_rx_pktlen;
81672f3de30SBruce Richardson 
817edd08548SHyong Youb Kim 	/*
818edd08548SHyong Youb Kim 	 * Representor uses a reserved PF queue. Translate representor
819edd08548SHyong Youb Kim 	 * queue number to PF queue number.
820edd08548SHyong Youb Kim 	 */
821c99e1db8SLong Wu 	if (rte_eth_dev_is_repr(enic->rte_dev)) {
822edd08548SHyong Youb Kim 		RTE_ASSERT(queue_idx == 0);
823edd08548SHyong Youb Kim 		vf = VF_ENIC_TO_VF_REP(enic);
824edd08548SHyong Youb Kim 		sop_queue_idx = vf->pf_rq_sop_idx;
825edd08548SHyong Youb Kim 		data_queue_idx = vf->pf_rq_data_idx;
826edd08548SHyong Youb Kim 		enic = vf->pf;
827edd08548SHyong Youb Kim 		queue_idx = sop_queue_idx;
828edd08548SHyong Youb Kim 	} else {
829edd08548SHyong Youb Kim 		sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
830edd08548SHyong Youb Kim 		data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx, enic);
831edd08548SHyong Youb Kim 	}
832edd08548SHyong Youb Kim 	cq_idx = enic_cq_rq(enic, sop_queue_idx);
833edd08548SHyong Youb Kim 	rq_sop = &enic->rq[sop_queue_idx];
834edd08548SHyong Youb Kim 	rq_data = &enic->rq[data_queue_idx];
835856d7ba7SNelson Escobar 	rq_sop->is_sop = 1;
836856d7ba7SNelson Escobar 	rq_sop->data_queue_idx = data_queue_idx;
837856d7ba7SNelson Escobar 	rq_data->is_sop = 0;
838856d7ba7SNelson Escobar 	rq_data->data_queue_idx = 0;
839856d7ba7SNelson Escobar 	rq_sop->socket_id = socket_id;
840856d7ba7SNelson Escobar 	rq_sop->mp = mp;
841856d7ba7SNelson Escobar 	rq_data->socket_id = socket_id;
842856d7ba7SNelson Escobar 	rq_data->mp = mp;
843856d7ba7SNelson Escobar 	rq_sop->in_use = 1;
844ce16fd70SJohn Daley 	rq_sop->rx_free_thresh = free_thresh;
845ce16fd70SJohn Daley 	rq_data->rx_free_thresh = free_thresh;
846ce16fd70SJohn Daley 	dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
847ce16fd70SJohn Daley 		  free_thresh);
84872f3de30SBruce Richardson 
849856d7ba7SNelson Escobar 	mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
850856d7ba7SNelson Escobar 			       RTE_PKTMBUF_HEADROOM);
8511bb4a528SFerruh Yigit 	/* max_rx_pktlen includes the ethernet header and CRC. */
8521bb4a528SFerruh Yigit 	max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu);
853856d7ba7SNelson Escobar 
854a062bafaSHyong Youb Kim 	if (enic->rte_dev->data->dev_conf.rxmode.offloads &
855295968d1SFerruh Yigit 	    RTE_ETH_RX_OFFLOAD_SCATTER) {
856c3e09182SJohn Daley 		dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
857422ba917SHyong Youb Kim 		/* ceil((max pkt len)/mbuf_size) */
8581bb4a528SFerruh Yigit 		mbufs_per_pkt = (max_rx_pktlen + mbuf_size - 1) / mbuf_size;
859856d7ba7SNelson Escobar 	} else {
860856d7ba7SNelson Escobar 		dev_info(enic, "Scatter rx mode disabled\n");
861856d7ba7SNelson Escobar 		mbufs_per_pkt = 1;
8621bb4a528SFerruh Yigit 		if (max_rx_pktlen > mbuf_size) {
863422ba917SHyong Youb Kim 			dev_warning(enic, "The maximum Rx packet size (%u) is"
864422ba917SHyong Youb Kim 				    " larger than the mbuf size (%u), and"
865422ba917SHyong Youb Kim 				    " scatter is disabled. Larger packets will"
866422ba917SHyong Youb Kim 				    " be truncated.\n",
8671bb4a528SFerruh Yigit 				    max_rx_pktlen, mbuf_size);
868422ba917SHyong Youb Kim 		}
869856d7ba7SNelson Escobar 	}
870856d7ba7SNelson Escobar 
871856d7ba7SNelson Escobar 	if (mbufs_per_pkt > 1) {
872c3e09182SJohn Daley 		dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
873e8a71c46SNelson Escobar 		rq_sop->data_queue_enable = 1;
874856d7ba7SNelson Escobar 		rq_data->in_use = 1;
875422ba917SHyong Youb Kim 		/*
8761bb4a528SFerruh Yigit 		 * HW does not directly support MTU. HW always
877422ba917SHyong Youb Kim 		 * receives packet sizes up to the "max" MTU.
878422ba917SHyong Youb Kim 		 * If not using scatter, we can achieve the effect of dropping
879422ba917SHyong Youb Kim 		 * larger packets by reducing the size of posted buffers.
880422ba917SHyong Youb Kim 		 * See enic_alloc_rx_queue_mbufs().
881422ba917SHyong Youb Kim 		 */
8821bb4a528SFerruh Yigit 		if (enic->rte_dev->data->mtu < enic->max_mtu) {
8831bb4a528SFerruh Yigit 			dev_warning(enic,
8841bb4a528SFerruh Yigit 				"mtu is ignored when scatter rx mode is in use.\n");
885422ba917SHyong Youb Kim 		}
886856d7ba7SNelson Escobar 	} else {
887c3e09182SJohn Daley 		dev_info(enic, "Rq %u Scatter rx mode not being used\n",
888c3e09182SJohn Daley 			 queue_idx);
889e8a71c46SNelson Escobar 		rq_sop->data_queue_enable = 0;
890856d7ba7SNelson Escobar 		rq_data->in_use = 0;
891856d7ba7SNelson Escobar 	}
892856d7ba7SNelson Escobar 
893856d7ba7SNelson Escobar 	/* number of descriptors have to be a multiple of 32 */
8949466a38dSHyong Youb Kim 	nb_sop_desc = (nb_desc / mbufs_per_pkt) & ENIC_ALIGN_DESCS_MASK;
8959466a38dSHyong Youb Kim 	nb_data_desc = (nb_desc - nb_sop_desc) & ENIC_ALIGN_DESCS_MASK;
896856d7ba7SNelson Escobar 
897856d7ba7SNelson Escobar 	rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
898856d7ba7SNelson Escobar 	rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
899856d7ba7SNelson Escobar 
900856d7ba7SNelson Escobar 	if (mbufs_per_pkt > 1) {
90135e2cb6aSJohn Daley 		min_sop = ENIC_RX_BURST_MAX;
902856d7ba7SNelson Escobar 		max_sop = ((enic->config.rq_desc_count /
9039466a38dSHyong Youb Kim 			    (mbufs_per_pkt - 1)) & ENIC_ALIGN_DESCS_MASK);
904856d7ba7SNelson Escobar 		min_data = min_sop * (mbufs_per_pkt - 1);
905856d7ba7SNelson Escobar 		max_data = enic->config.rq_desc_count;
906856d7ba7SNelson Escobar 	} else {
90735e2cb6aSJohn Daley 		min_sop = ENIC_RX_BURST_MAX;
908856d7ba7SNelson Escobar 		max_sop = enic->config.rq_desc_count;
909856d7ba7SNelson Escobar 		min_data = 0;
910856d7ba7SNelson Escobar 		max_data = 0;
911856d7ba7SNelson Escobar 	}
912856d7ba7SNelson Escobar 
913856d7ba7SNelson Escobar 	if (nb_desc < (min_sop + min_data)) {
91472f3de30SBruce Richardson 		dev_warning(enic,
915856d7ba7SNelson Escobar 			    "Number of rx descs too low, adjusting to minimum\n");
916856d7ba7SNelson Escobar 		nb_sop_desc = min_sop;
917856d7ba7SNelson Escobar 		nb_data_desc = min_data;
918856d7ba7SNelson Escobar 	} else if (nb_desc > (max_sop + max_data)) {
919856d7ba7SNelson Escobar 		dev_warning(enic,
920856d7ba7SNelson Escobar 			    "Number of rx_descs too high, adjusting to maximum\n");
921856d7ba7SNelson Escobar 		nb_sop_desc = max_sop;
922856d7ba7SNelson Escobar 		nb_data_desc = max_data;
92372f3de30SBruce Richardson 	}
924856d7ba7SNelson Escobar 	if (mbufs_per_pkt > 1) {
925422ba917SHyong Youb Kim 		dev_info(enic, "For max packet size %u and mbuf size %u valid"
926422ba917SHyong Youb Kim 			 " rx descriptor range is %u to %u\n",
9271bb4a528SFerruh Yigit 			 max_rx_pktlen, mbuf_size, min_sop + min_data,
928856d7ba7SNelson Escobar 			 max_sop + max_data);
92972f3de30SBruce Richardson 	}
930856d7ba7SNelson Escobar 	dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
931856d7ba7SNelson Escobar 		 nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
93272f3de30SBruce Richardson 
933856d7ba7SNelson Escobar 	/* Allocate sop queue resources */
934856d7ba7SNelson Escobar 	rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
935856d7ba7SNelson Escobar 		nb_sop_desc, sizeof(struct rq_enet_desc));
936947d860cSJohn Daley 	if (rc) {
937856d7ba7SNelson Escobar 		dev_err(enic, "error in allocation of sop rq\n");
938947d860cSJohn Daley 		goto err_exit;
93972f3de30SBruce Richardson 	}
940856d7ba7SNelson Escobar 	nb_sop_desc = rq_sop->ring.desc_count;
94172f3de30SBruce Richardson 
942856d7ba7SNelson Escobar 	if (rq_data->in_use) {
943856d7ba7SNelson Escobar 		/* Allocate data queue resources */
944856d7ba7SNelson Escobar 		rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
945856d7ba7SNelson Escobar 				   nb_data_desc,
946856d7ba7SNelson Escobar 				   sizeof(struct rq_enet_desc));
947856d7ba7SNelson Escobar 		if (rc) {
948856d7ba7SNelson Escobar 			dev_err(enic, "error in allocation of data rq\n");
949856d7ba7SNelson Escobar 			goto err_free_rq_sop;
950856d7ba7SNelson Escobar 		}
951856d7ba7SNelson Escobar 		nb_data_desc = rq_data->ring.desc_count;
952856d7ba7SNelson Escobar 	}
9538b428cb5SHyong Youb Kim 	/* Enable 64B CQ entry if requested */
9548b428cb5SHyong Youb Kim 	if (enic->cq64 && vnic_dev_set_cq_entry_size(enic->vdev,
9558b428cb5SHyong Youb Kim 				sop_queue_idx, VNIC_RQ_CQ_ENTRY_SIZE_64)) {
9568b428cb5SHyong Youb Kim 		dev_err(enic, "failed to enable 64B CQ entry on sop rq\n");
9578b428cb5SHyong Youb Kim 		goto err_free_rq_data;
9588b428cb5SHyong Youb Kim 	}
9598b428cb5SHyong Youb Kim 	if (rq_data->in_use && enic->cq64 &&
9608b428cb5SHyong Youb Kim 	    vnic_dev_set_cq_entry_size(enic->vdev, data_queue_idx,
9618b428cb5SHyong Youb Kim 		VNIC_RQ_CQ_ENTRY_SIZE_64)) {
9628b428cb5SHyong Youb Kim 		dev_err(enic, "failed to enable 64B CQ entry on data rq\n");
9638b428cb5SHyong Youb Kim 		goto err_free_rq_data;
9648b428cb5SHyong Youb Kim 	}
9658b428cb5SHyong Youb Kim 
966edd08548SHyong Youb Kim 	rc = vnic_cq_alloc(enic->vdev, &enic->cq[cq_idx], cq_idx,
967856d7ba7SNelson Escobar 			   socket_id, nb_sop_desc + nb_data_desc,
9688b428cb5SHyong Youb Kim 			   enic->cq64 ?	sizeof(struct cq_enet_rq_desc_64) :
96972f3de30SBruce Richardson 			   sizeof(struct cq_enet_rq_desc));
970947d860cSJohn Daley 	if (rc) {
97172f3de30SBruce Richardson 		dev_err(enic, "error in allocation of cq for rq\n");
972856d7ba7SNelson Escobar 		goto err_free_rq_data;
97372f3de30SBruce Richardson 	}
97472f3de30SBruce Richardson 
975856d7ba7SNelson Escobar 	/* Allocate the mbuf rings */
976856d7ba7SNelson Escobar 	rq_sop->mbuf_ring = (struct rte_mbuf **)
977856d7ba7SNelson Escobar 		rte_zmalloc_socket("rq->mbuf_ring",
978856d7ba7SNelson Escobar 				   sizeof(struct rte_mbuf *) * nb_sop_desc,
979856d7ba7SNelson Escobar 				   RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
980856d7ba7SNelson Escobar 	if (rq_sop->mbuf_ring == NULL)
981856d7ba7SNelson Escobar 		goto err_free_cq;
982947d860cSJohn Daley 
983856d7ba7SNelson Escobar 	if (rq_data->in_use) {
984856d7ba7SNelson Escobar 		rq_data->mbuf_ring = (struct rte_mbuf **)
985856d7ba7SNelson Escobar 			rte_zmalloc_socket("rq->mbuf_ring",
986856d7ba7SNelson Escobar 				sizeof(struct rte_mbuf *) * nb_data_desc,
987856d7ba7SNelson Escobar 				RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
988856d7ba7SNelson Escobar 		if (rq_data->mbuf_ring == NULL)
989856d7ba7SNelson Escobar 			goto err_free_sop_mbuf;
990856d7ba7SNelson Escobar 	}
991856d7ba7SNelson Escobar 
99235e2cb6aSJohn Daley 	rq_sop->free_mbufs = (struct rte_mbuf **)
99335e2cb6aSJohn Daley 		rte_zmalloc_socket("rq->free_mbufs",
99435e2cb6aSJohn Daley 				   sizeof(struct rte_mbuf *) *
99535e2cb6aSJohn Daley 				   ENIC_RX_BURST_MAX,
99635e2cb6aSJohn Daley 				   RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
99735e2cb6aSJohn Daley 	if (rq_sop->free_mbufs == NULL)
99835e2cb6aSJohn Daley 		goto err_free_data_mbuf;
99935e2cb6aSJohn Daley 	rq_sop->num_free_mbufs = 0;
100035e2cb6aSJohn Daley 
1001c3e09182SJohn Daley 	rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
1002c3e09182SJohn Daley 
1003947d860cSJohn Daley 	return 0;
1004947d860cSJohn Daley 
100535e2cb6aSJohn Daley err_free_data_mbuf:
100635e2cb6aSJohn Daley 	rte_free(rq_data->mbuf_ring);
1007856d7ba7SNelson Escobar err_free_sop_mbuf:
1008856d7ba7SNelson Escobar 	rte_free(rq_sop->mbuf_ring);
1009856d7ba7SNelson Escobar err_free_cq:
1010947d860cSJohn Daley 	/* cleanup on error */
1011edd08548SHyong Youb Kim 	vnic_cq_free(&enic->cq[cq_idx]);
1012856d7ba7SNelson Escobar err_free_rq_data:
1013856d7ba7SNelson Escobar 	if (rq_data->in_use)
1014856d7ba7SNelson Escobar 		vnic_rq_free(rq_data);
1015856d7ba7SNelson Escobar err_free_rq_sop:
1016856d7ba7SNelson Escobar 	vnic_rq_free(rq_sop);
1017947d860cSJohn Daley err_exit:
1018947d860cSJohn Daley 	return -ENOMEM;
101972f3de30SBruce Richardson }
102072f3de30SBruce Richardson 
102172f3de30SBruce Richardson void enic_free_wq(void *txq)
102272f3de30SBruce Richardson {
102383a9d8b7SJohn Daley 	struct vnic_wq *wq;
102483a9d8b7SJohn Daley 	struct enic *enic;
102572f3de30SBruce Richardson 
102683a9d8b7SJohn Daley 	if (txq == NULL)
102783a9d8b7SJohn Daley 		return;
102883a9d8b7SJohn Daley 
102983a9d8b7SJohn Daley 	wq = (struct vnic_wq *)txq;
103083a9d8b7SJohn Daley 	enic = vnic_dev_priv(wq->vdev);
1031fc2c8c06SJohn Daley 	rte_memzone_free(wq->cqmsg_rz);
103272f3de30SBruce Richardson 	vnic_wq_free(wq);
103372f3de30SBruce Richardson 	vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
103472f3de30SBruce Richardson }
103572f3de30SBruce Richardson 
103672f3de30SBruce Richardson int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
103772f3de30SBruce Richardson 	unsigned int socket_id, uint16_t nb_desc)
103872f3de30SBruce Richardson {
1039edd08548SHyong Youb Kim 	struct enic_vf_representor *vf;
104072f3de30SBruce Richardson 	int err;
1041edd08548SHyong Youb Kim 	struct vnic_wq *wq;
1042edd08548SHyong Youb Kim 	unsigned int cq_index;
1043846ac76cSJohn Daley 	char name[RTE_MEMZONE_NAMESIZE];
1044fc2c8c06SJohn Daley 	static int instance;
104572f3de30SBruce Richardson 
1046edd08548SHyong Youb Kim 	/*
1047edd08548SHyong Youb Kim 	 * Representor uses a reserved PF queue. Translate representor
1048edd08548SHyong Youb Kim 	 * queue number to PF queue number.
1049edd08548SHyong Youb Kim 	 */
1050c99e1db8SLong Wu 	if (rte_eth_dev_is_repr(enic->rte_dev)) {
1051edd08548SHyong Youb Kim 		RTE_ASSERT(queue_idx == 0);
1052edd08548SHyong Youb Kim 		vf = VF_ENIC_TO_VF_REP(enic);
1053edd08548SHyong Youb Kim 		queue_idx = vf->pf_wq_idx;
1054edd08548SHyong Youb Kim 		cq_index = vf->pf_wq_cq_idx;
1055edd08548SHyong Youb Kim 		enic = vf->pf;
1056edd08548SHyong Youb Kim 	} else {
1057edd08548SHyong Youb Kim 		cq_index = enic_cq_wq(enic, queue_idx);
1058edd08548SHyong Youb Kim 	}
1059edd08548SHyong Youb Kim 	wq = &enic->wq[queue_idx];
106072f3de30SBruce Richardson 	wq->socket_id = socket_id;
10619466a38dSHyong Youb Kim 	/*
10629466a38dSHyong Youb Kim 	 * rte_eth_tx_queue_setup() checks min, max, and alignment. So just
10639466a38dSHyong Youb Kim 	 * print an info message for diagnostics.
10649466a38dSHyong Youb Kim 	 */
10659466a38dSHyong Youb Kim 	dev_info(enic, "TX Queues - effective number of descs:%d\n", nb_desc);
106672f3de30SBruce Richardson 
106772f3de30SBruce Richardson 	/* Allocate queue resources */
106872f3de30SBruce Richardson 	err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
10692a7e3d54SHyong Youb Kim 		nb_desc,
107072f3de30SBruce Richardson 		sizeof(struct wq_enet_desc));
107172f3de30SBruce Richardson 	if (err) {
107272f3de30SBruce Richardson 		dev_err(enic, "error in allocation of wq\n");
107372f3de30SBruce Richardson 		return err;
107472f3de30SBruce Richardson 	}
107572f3de30SBruce Richardson 
107672f3de30SBruce Richardson 	err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
10772a7e3d54SHyong Youb Kim 		socket_id, nb_desc,
107872f3de30SBruce Richardson 		sizeof(struct cq_enet_wq_desc));
107972f3de30SBruce Richardson 	if (err) {
108072f3de30SBruce Richardson 		vnic_wq_free(wq);
108172f3de30SBruce Richardson 		dev_err(enic, "error in allocation of cq for wq\n");
108272f3de30SBruce Richardson 	}
108372f3de30SBruce Richardson 
1084fc2c8c06SJohn Daley 	/* setup up CQ message */
1085fc2c8c06SJohn Daley 	snprintf((char *)name, sizeof(name),
1086fc2c8c06SJohn Daley 		 "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
1087fc2c8c06SJohn Daley 		instance++);
1088fc2c8c06SJohn Daley 
1089fc2c8c06SJohn Daley 	wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
109046e4fb12SAnatoly Burakov 			sizeof(uint32_t), SOCKET_ID_ANY,
109104e8ec74SJohn Daley 			RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE);
1092fc2c8c06SJohn Daley 	if (!wq->cqmsg_rz)
1093fc2c8c06SJohn Daley 		return -ENOMEM;
1094fc2c8c06SJohn Daley 
109572f3de30SBruce Richardson 	return err;
109672f3de30SBruce Richardson }
109772f3de30SBruce Richardson 
109872f3de30SBruce Richardson int enic_disable(struct enic *enic)
109972f3de30SBruce Richardson {
110072f3de30SBruce Richardson 	unsigned int i;
110172f3de30SBruce Richardson 	int err;
110272f3de30SBruce Richardson 
11030f872d31SHyong Youb Kim 	for (i = 0; i < enic->intr_count; i++) {
11040f872d31SHyong Youb Kim 		vnic_intr_mask(&enic->intr[i]);
11050f872d31SHyong Youb Kim 		(void)vnic_intr_masked(&enic->intr[i]); /* flush write */
11060f872d31SHyong Youb Kim 	}
11070f872d31SHyong Youb Kim 	enic_rxq_intr_deinit(enic);
1108d61138d4SHarman Kalra 	rte_intr_disable(enic->pdev->intr_handle);
1109d61138d4SHarman Kalra 	rte_intr_callback_unregister(enic->pdev->intr_handle,
1110667b8a3bSNelson Escobar 				     enic_intr_handler,
1111667b8a3bSNelson Escobar 				     (void *)enic->rte_dev);
111272f3de30SBruce Richardson 
111372f3de30SBruce Richardson 	vnic_dev_disable(enic->vdev);
111472f3de30SBruce Richardson 
1115ea7768b5SHyong Youb Kim 	enic_fm_destroy(enic);
111672f3de30SBruce Richardson 
1117*00ce4311SHyong Youb Kim 	enic_dev_del_addr(enic, enic->mac_addr);
111872f3de30SBruce Richardson 
111972f3de30SBruce Richardson 	for (i = 0; i < enic->wq_count; i++) {
112072f3de30SBruce Richardson 		err = vnic_wq_disable(&enic->wq[i]);
112172f3de30SBruce Richardson 		if (err)
112272f3de30SBruce Richardson 			return err;
112372f3de30SBruce Richardson 	}
1124856d7ba7SNelson Escobar 	for (i = 0; i < enic_vnic_rq_count(enic); i++) {
1125856d7ba7SNelson Escobar 		if (enic->rq[i].in_use) {
112672f3de30SBruce Richardson 			err = vnic_rq_disable(&enic->rq[i]);
112772f3de30SBruce Richardson 			if (err)
112872f3de30SBruce Richardson 				return err;
112972f3de30SBruce Richardson 		}
1130856d7ba7SNelson Escobar 	}
113172f3de30SBruce Richardson 
113253fa8cc0SNelson Escobar 	/* If we were using interrupts, set the interrupt vector to -1
11337be78d02SJosh Soref 	 * to disable interrupts.  We are not disabling link notifications,
113453fa8cc0SNelson Escobar 	 * though, as we want the polling of link status to continue working.
113553fa8cc0SNelson Escobar 	 */
113653fa8cc0SNelson Escobar 	if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
113753fa8cc0SNelson Escobar 		vnic_dev_notify_set(enic->vdev, -1);
113853fa8cc0SNelson Escobar 
113972f3de30SBruce Richardson 	vnic_dev_set_reset_flag(enic->vdev, 1);
114072f3de30SBruce Richardson 
114172f3de30SBruce Richardson 	for (i = 0; i < enic->wq_count; i++)
114272f3de30SBruce Richardson 		vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1143947d860cSJohn Daley 
1144856d7ba7SNelson Escobar 	for (i = 0; i < enic_vnic_rq_count(enic); i++)
1145856d7ba7SNelson Escobar 		if (enic->rq[i].in_use)
114672f3de30SBruce Richardson 			vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
114772f3de30SBruce Richardson 	for (i = 0; i < enic->cq_count; i++)
114872f3de30SBruce Richardson 		vnic_cq_clean(&enic->cq[i]);
11490f872d31SHyong Youb Kim 	for (i = 0; i < enic->intr_count; i++)
11500f872d31SHyong Youb Kim 		vnic_intr_clean(&enic->intr[i]);
115172f3de30SBruce Richardson 
1152*00ce4311SHyong Youb Kim 	if (enic_is_vf(enic))
1153*00ce4311SHyong Youb Kim 		enic_disable_vf_admin_chan(enic, true);
115472f3de30SBruce Richardson 	return 0;
115572f3de30SBruce Richardson }
115672f3de30SBruce Richardson 
115772f3de30SBruce Richardson static int enic_dev_wait(struct vnic_dev *vdev,
115872f3de30SBruce Richardson 	int (*start)(struct vnic_dev *, int),
115972f3de30SBruce Richardson 	int (*finished)(struct vnic_dev *, int *),
116072f3de30SBruce Richardson 	int arg)
116172f3de30SBruce Richardson {
116272f3de30SBruce Richardson 	int done;
116372f3de30SBruce Richardson 	int err;
116472f3de30SBruce Richardson 	int i;
116572f3de30SBruce Richardson 
116672f3de30SBruce Richardson 	err = start(vdev, arg);
116772f3de30SBruce Richardson 	if (err)
116872f3de30SBruce Richardson 		return err;
116972f3de30SBruce Richardson 
117072f3de30SBruce Richardson 	/* Wait for func to complete...2 seconds max */
117172f3de30SBruce Richardson 	for (i = 0; i < 2000; i++) {
117272f3de30SBruce Richardson 		err = finished(vdev, &done);
117372f3de30SBruce Richardson 		if (err)
117472f3de30SBruce Richardson 			return err;
117572f3de30SBruce Richardson 		if (done)
117672f3de30SBruce Richardson 			return 0;
117772f3de30SBruce Richardson 		usleep(1000);
117872f3de30SBruce Richardson 	}
117972f3de30SBruce Richardson 	return -ETIMEDOUT;
118072f3de30SBruce Richardson }
118172f3de30SBruce Richardson 
118272f3de30SBruce Richardson static int enic_dev_open(struct enic *enic)
118372f3de30SBruce Richardson {
118472f3de30SBruce Richardson 	int err;
1185fe26a3bbSHyong Youb Kim 	int flags = CMD_OPENF_IG_DESCCACHE;
118672f3de30SBruce Richardson 
118772f3de30SBruce Richardson 	err = enic_dev_wait(enic->vdev, vnic_dev_open,
1188fe26a3bbSHyong Youb Kim 		vnic_dev_open_done, flags);
118972f3de30SBruce Richardson 	if (err)
119072f3de30SBruce Richardson 		dev_err(enic_get_dev(enic),
119172f3de30SBruce Richardson 			"vNIC device open failed, err %d\n", err);
119272f3de30SBruce Richardson 
119372f3de30SBruce Richardson 	return err;
119472f3de30SBruce Richardson }
119572f3de30SBruce Richardson 
1196c2fec27bSHyong Youb Kim static int enic_set_rsskey(struct enic *enic, uint8_t *user_key)
119772f3de30SBruce Richardson {
119872f3de30SBruce Richardson 	dma_addr_t rss_key_buf_pa;
119972f3de30SBruce Richardson 	union vnic_rss_key *rss_key_buf_va = NULL;
1200c2fec27bSHyong Youb Kim 	int err, i;
120104e8ec74SJohn Daley 	uint8_t name[RTE_MEMZONE_NAMESIZE];
120272f3de30SBruce Richardson 
1203c2fec27bSHyong Youb Kim 	RTE_ASSERT(user_key != NULL);
1204846ac76cSJohn Daley 	snprintf((char *)name, sizeof(name), "rss_key-%s", enic->bdf_name);
120572f3de30SBruce Richardson 	rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
120672f3de30SBruce Richardson 		&rss_key_buf_pa, name);
120772f3de30SBruce Richardson 	if (!rss_key_buf_va)
120872f3de30SBruce Richardson 		return -ENOMEM;
120972f3de30SBruce Richardson 
1210c2fec27bSHyong Youb Kim 	for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++)
1211c2fec27bSHyong Youb Kim 		rss_key_buf_va->key[i / 10].b[i % 10] = user_key[i];
121272f3de30SBruce Richardson 
121372f3de30SBruce Richardson 	err = enic_set_rss_key(enic,
121472f3de30SBruce Richardson 		rss_key_buf_pa,
121572f3de30SBruce Richardson 		sizeof(union vnic_rss_key));
121672f3de30SBruce Richardson 
1217c2fec27bSHyong Youb Kim 	/* Save for later queries */
1218c2fec27bSHyong Youb Kim 	if (!err) {
1219c2fec27bSHyong Youb Kim 		rte_memcpy(&enic->rss_key, rss_key_buf_va,
1220c2fec27bSHyong Youb Kim 			   sizeof(union vnic_rss_key));
1221c2fec27bSHyong Youb Kim 	}
1222da5f560bSNelson Escobar 	enic_free_consistent(enic, sizeof(union vnic_rss_key),
122372f3de30SBruce Richardson 		rss_key_buf_va, rss_key_buf_pa);
122472f3de30SBruce Richardson 
122572f3de30SBruce Richardson 	return err;
122672f3de30SBruce Richardson }
122772f3de30SBruce Richardson 
1228c2fec27bSHyong Youb Kim int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu)
122972f3de30SBruce Richardson {
123072f3de30SBruce Richardson 	dma_addr_t rss_cpu_buf_pa;
123172f3de30SBruce Richardson 	union vnic_rss_cpu *rss_cpu_buf_va = NULL;
123272f3de30SBruce Richardson 	int err;
123304e8ec74SJohn Daley 	uint8_t name[RTE_MEMZONE_NAMESIZE];
123472f3de30SBruce Richardson 
1235846ac76cSJohn Daley 	snprintf((char *)name, sizeof(name), "rss_cpu-%s", enic->bdf_name);
123672f3de30SBruce Richardson 	rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
123772f3de30SBruce Richardson 		&rss_cpu_buf_pa, name);
123872f3de30SBruce Richardson 	if (!rss_cpu_buf_va)
123972f3de30SBruce Richardson 		return -ENOMEM;
124072f3de30SBruce Richardson 
1241c2fec27bSHyong Youb Kim 	rte_memcpy(rss_cpu_buf_va, rss_cpu, sizeof(union vnic_rss_cpu));
124272f3de30SBruce Richardson 
124372f3de30SBruce Richardson 	err = enic_set_rss_cpu(enic,
124472f3de30SBruce Richardson 		rss_cpu_buf_pa,
124572f3de30SBruce Richardson 		sizeof(union vnic_rss_cpu));
124672f3de30SBruce Richardson 
1247da5f560bSNelson Escobar 	enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
124872f3de30SBruce Richardson 		rss_cpu_buf_va, rss_cpu_buf_pa);
124972f3de30SBruce Richardson 
1250c2fec27bSHyong Youb Kim 	/* Save for later queries */
1251c2fec27bSHyong Youb Kim 	if (!err)
1252c2fec27bSHyong Youb Kim 		rte_memcpy(&enic->rss_cpu, rss_cpu, sizeof(union vnic_rss_cpu));
125372f3de30SBruce Richardson 	return err;
125472f3de30SBruce Richardson }
125572f3de30SBruce Richardson 
125604e8ec74SJohn Daley static int enic_set_niccfg(struct enic *enic, uint8_t rss_default_cpu,
125704e8ec74SJohn Daley 	uint8_t rss_hash_type, uint8_t rss_hash_bits, uint8_t rss_base_cpu,
125804e8ec74SJohn Daley 	uint8_t rss_enable)
125972f3de30SBruce Richardson {
126004e8ec74SJohn Daley 	const uint8_t tso_ipid_split_en = 0;
126172f3de30SBruce Richardson 	int err;
126272f3de30SBruce Richardson 
126372f3de30SBruce Richardson 	err = enic_set_nic_cfg(enic,
126472f3de30SBruce Richardson 		rss_default_cpu, rss_hash_type,
126572f3de30SBruce Richardson 		rss_hash_bits, rss_base_cpu,
126672f3de30SBruce Richardson 		rss_enable, tso_ipid_split_en,
126772f3de30SBruce Richardson 		enic->ig_vlan_strip_en);
126872f3de30SBruce Richardson 
126972f3de30SBruce Richardson 	return err;
127072f3de30SBruce Richardson }
127172f3de30SBruce Richardson 
1272c2fec27bSHyong Youb Kim /* Initialize RSS with defaults, called from dev_configure */
1273c2fec27bSHyong Youb Kim int enic_init_rss_nic_cfg(struct enic *enic)
127472f3de30SBruce Richardson {
1275c2fec27bSHyong Youb Kim 	static uint8_t default_rss_key[] = {
1276c2fec27bSHyong Youb Kim 		85, 67, 83, 97, 119, 101, 115, 111, 109, 101,
1277c2fec27bSHyong Youb Kim 		80, 65, 76, 79, 117, 110, 105, 113, 117, 101,
1278c2fec27bSHyong Youb Kim 		76, 73, 78, 85, 88, 114, 111, 99, 107, 115,
1279c2fec27bSHyong Youb Kim 		69, 78, 73, 67, 105, 115, 99, 111, 111, 108,
1280c2fec27bSHyong Youb Kim 	};
1281c2fec27bSHyong Youb Kim 	struct rte_eth_rss_conf rss_conf;
1282c2fec27bSHyong Youb Kim 	union vnic_rss_cpu rss_cpu;
1283c2fec27bSHyong Youb Kim 	int ret, i;
128472f3de30SBruce Richardson 
1285c2fec27bSHyong Youb Kim 	rss_conf = enic->rte_dev->data->dev_conf.rx_adv_conf.rss_conf;
1286c2fec27bSHyong Youb Kim 	/*
1287c2fec27bSHyong Youb Kim 	 * If setting key for the first time, and the user gives us none, then
1288c2fec27bSHyong Youb Kim 	 * push the default key to NIC.
1289c2fec27bSHyong Youb Kim 	 */
1290c2fec27bSHyong Youb Kim 	if (rss_conf.rss_key == NULL) {
1291c2fec27bSHyong Youb Kim 		rss_conf.rss_key = default_rss_key;
1292c2fec27bSHyong Youb Kim 		rss_conf.rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
129372f3de30SBruce Richardson 	}
1294c2fec27bSHyong Youb Kim 	ret = enic_set_rss_conf(enic, &rss_conf);
1295c2fec27bSHyong Youb Kim 	if (ret) {
1296c2fec27bSHyong Youb Kim 		dev_err(enic, "Failed to configure RSS\n");
1297c2fec27bSHyong Youb Kim 		return ret;
129872f3de30SBruce Richardson 	}
1299c2fec27bSHyong Youb Kim 	if (enic->rss_enable) {
1300c2fec27bSHyong Youb Kim 		/* If enabling RSS, use the default reta */
1301c2fec27bSHyong Youb Kim 		for (i = 0; i < ENIC_RSS_RETA_SIZE; i++) {
1302c2fec27bSHyong Youb Kim 			rss_cpu.cpu[i / 4].b[i % 4] =
1303c2fec27bSHyong Youb Kim 				enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
130472f3de30SBruce Richardson 		}
1305c2fec27bSHyong Youb Kim 		ret = enic_set_rss_reta(enic, &rss_cpu);
1306c2fec27bSHyong Youb Kim 		if (ret)
1307c2fec27bSHyong Youb Kim 			dev_err(enic, "Failed to set RSS indirection table\n");
1308c2fec27bSHyong Youb Kim 	}
1309c2fec27bSHyong Youb Kim 	return ret;
131072f3de30SBruce Richardson }
131172f3de30SBruce Richardson 
131272f3de30SBruce Richardson int enic_setup_finish(struct enic *enic)
131372f3de30SBruce Richardson {
1314*00ce4311SHyong Youb Kim 	int err;
1315*00ce4311SHyong Youb Kim 
1316*00ce4311SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
131765b5434dSJohn Daley 	enic_init_soft_stats(enic);
131865b5434dSJohn Daley 
1319*00ce4311SHyong Youb Kim 	/*
1320*00ce4311SHyong Youb Kim 	 * Enable admin channel so we can perform certain devcmds
1321*00ce4311SHyong Youb Kim 	 * via admin channel. For example, vnic_dev_packet_filter()
1322*00ce4311SHyong Youb Kim 	 */
1323*00ce4311SHyong Youb Kim 	if (enic_is_vf(enic)) {
1324*00ce4311SHyong Youb Kim 		err = enic_enable_vf_admin_chan(enic);
1325*00ce4311SHyong Youb Kim 		if (err)
1326*00ce4311SHyong Youb Kim 			return err;
1327*00ce4311SHyong Youb Kim 	}
1328*00ce4311SHyong Youb Kim 
132939cf83f1SHyong Youb Kim 	/* switchdev: enable promisc mode on PF */
133039cf83f1SHyong Youb Kim 	if (enic->switchdev_mode) {
1331*00ce4311SHyong Youb Kim 		RTE_VERIFY(!enic_is_vf(enic));
133239cf83f1SHyong Youb Kim 		vnic_dev_packet_filter(enic->vdev,
133339cf83f1SHyong Youb Kim 				       0 /* directed  */,
133439cf83f1SHyong Youb Kim 				       0 /* multicast */,
133539cf83f1SHyong Youb Kim 				       0 /* broadcast */,
133639cf83f1SHyong Youb Kim 				       1 /* promisc   */,
133739cf83f1SHyong Youb Kim 				       0 /* allmulti  */);
133839cf83f1SHyong Youb Kim 		enic->promisc = 1;
133939cf83f1SHyong Youb Kim 		enic->allmulti = 0;
134039cf83f1SHyong Youb Kim 		return 0;
134139cf83f1SHyong Youb Kim 	}
134272f3de30SBruce Richardson 	/* Default conf */
1343*00ce4311SHyong Youb Kim 	err = enic_dev_packet_filter(enic,
134472f3de30SBruce Richardson 		1 /* directed  */,
134572f3de30SBruce Richardson 		1 /* multicast */,
134672f3de30SBruce Richardson 		1 /* broadcast */,
134772f3de30SBruce Richardson 		0 /* promisc   */,
134872f3de30SBruce Richardson 		1 /* allmulti  */);
134972f3de30SBruce Richardson 
135072f3de30SBruce Richardson 	enic->promisc = 0;
135172f3de30SBruce Richardson 	enic->allmulti = 1;
135272f3de30SBruce Richardson 
1353*00ce4311SHyong Youb Kim 	return err;
135472f3de30SBruce Richardson }
135572f3de30SBruce Richardson 
1356c2fec27bSHyong Youb Kim static int enic_rss_conf_valid(struct enic *enic,
1357c2fec27bSHyong Youb Kim 			       struct rte_eth_rss_conf *rss_conf)
1358c2fec27bSHyong Youb Kim {
1359c2fec27bSHyong Youb Kim 	/* RSS is disabled per VIC settings. Ignore rss_conf. */
1360c2fec27bSHyong Youb Kim 	if (enic->flow_type_rss_offloads == 0)
1361c2fec27bSHyong Youb Kim 		return 0;
1362c2fec27bSHyong Youb Kim 	if (rss_conf->rss_key != NULL &&
1363c2fec27bSHyong Youb Kim 	    rss_conf->rss_key_len != ENIC_RSS_HASH_KEY_SIZE) {
1364c2fec27bSHyong Youb Kim 		dev_err(enic, "Given rss_key is %d bytes, it must be %d\n",
1365c2fec27bSHyong Youb Kim 			rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
1366c2fec27bSHyong Youb Kim 		return -EINVAL;
1367c2fec27bSHyong Youb Kim 	}
1368c2fec27bSHyong Youb Kim 	if (rss_conf->rss_hf != 0 &&
1369c2fec27bSHyong Youb Kim 	    (rss_conf->rss_hf & enic->flow_type_rss_offloads) == 0) {
1370c2fec27bSHyong Youb Kim 		dev_err(enic, "Given rss_hf contains none of the supported"
1371c2fec27bSHyong Youb Kim 			" types\n");
1372c2fec27bSHyong Youb Kim 		return -EINVAL;
1373c2fec27bSHyong Youb Kim 	}
1374c2fec27bSHyong Youb Kim 	return 0;
1375c2fec27bSHyong Youb Kim }
1376c2fec27bSHyong Youb Kim 
1377c2fec27bSHyong Youb Kim /* Set hash type and key according to rss_conf */
1378c2fec27bSHyong Youb Kim int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
1379c2fec27bSHyong Youb Kim {
1380c2fec27bSHyong Youb Kim 	struct rte_eth_dev *eth_dev;
1381c2fec27bSHyong Youb Kim 	uint64_t rss_hf;
138204e8ec74SJohn Daley 	uint8_t rss_hash_type;
138304e8ec74SJohn Daley 	uint8_t rss_enable;
1384c2fec27bSHyong Youb Kim 	int ret;
1385c2fec27bSHyong Youb Kim 
1386c2fec27bSHyong Youb Kim 	RTE_ASSERT(rss_conf != NULL);
1387c2fec27bSHyong Youb Kim 	ret = enic_rss_conf_valid(enic, rss_conf);
1388c2fec27bSHyong Youb Kim 	if (ret) {
1389c2fec27bSHyong Youb Kim 		dev_err(enic, "RSS configuration (rss_conf) is invalid\n");
1390c2fec27bSHyong Youb Kim 		return ret;
1391c2fec27bSHyong Youb Kim 	}
1392c2fec27bSHyong Youb Kim 
1393c2fec27bSHyong Youb Kim 	eth_dev = enic->rte_dev;
1394c2fec27bSHyong Youb Kim 	rss_hash_type = 0;
1395c2fec27bSHyong Youb Kim 	rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
1396c2fec27bSHyong Youb Kim 	if (enic->rq_count > 1 &&
1397295968d1SFerruh Yigit 	    (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) &&
1398c2fec27bSHyong Youb Kim 	    rss_hf != 0) {
1399c2fec27bSHyong Youb Kim 		rss_enable = 1;
1400295968d1SFerruh Yigit 		if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
1401295968d1SFerruh Yigit 			      RTE_ETH_RSS_NONFRAG_IPV4_OTHER))
1402c2fec27bSHyong Youb Kim 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
1403295968d1SFerruh Yigit 		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
1404c2fec27bSHyong Youb Kim 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
1405295968d1SFerruh Yigit 		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
140694c35189SHyong Youb Kim 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4;
14075bc989e6SHyong Youb Kim 			if (enic->udp_rss_weak) {
14089bd04182SJohn Daley 				/*
140994c35189SHyong Youb Kim 				 * 'TCP' is not a typo. The "weak" version of
141094c35189SHyong Youb Kim 				 * UDP RSS requires both the TCP and UDP bits
141194c35189SHyong Youb Kim 				 * be set. It does enable TCP RSS as well.
14129bd04182SJohn Daley 				 */
14139bd04182SJohn Daley 				rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
14149bd04182SJohn Daley 			}
141594c35189SHyong Youb Kim 		}
1416295968d1SFerruh Yigit 		if (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_IPV6_EX |
1417295968d1SFerruh Yigit 			      RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER))
1418c2fec27bSHyong Youb Kim 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
1419295968d1SFerruh Yigit 		if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX))
1420c2fec27bSHyong Youb Kim 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1421295968d1SFerruh Yigit 		if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX)) {
142294c35189SHyong Youb Kim 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6;
14235bc989e6SHyong Youb Kim 			if (enic->udp_rss_weak)
14249bd04182SJohn Daley 				rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
14259bd04182SJohn Daley 		}
1426c2fec27bSHyong Youb Kim 	} else {
1427c2fec27bSHyong Youb Kim 		rss_enable = 0;
1428c2fec27bSHyong Youb Kim 		rss_hf = 0;
1429c2fec27bSHyong Youb Kim 	}
1430c2fec27bSHyong Youb Kim 
1431c2fec27bSHyong Youb Kim 	/* Set the hash key if provided */
1432c2fec27bSHyong Youb Kim 	if (rss_enable && rss_conf->rss_key) {
1433c2fec27bSHyong Youb Kim 		ret = enic_set_rsskey(enic, rss_conf->rss_key);
1434c2fec27bSHyong Youb Kim 		if (ret) {
1435c2fec27bSHyong Youb Kim 			dev_err(enic, "Failed to set RSS key\n");
1436c2fec27bSHyong Youb Kim 			return ret;
1437c2fec27bSHyong Youb Kim 		}
1438c2fec27bSHyong Youb Kim 	}
1439c2fec27bSHyong Youb Kim 
1440c2fec27bSHyong Youb Kim 	ret = enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, rss_hash_type,
1441c2fec27bSHyong Youb Kim 			      ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
1442c2fec27bSHyong Youb Kim 			      rss_enable);
1443c2fec27bSHyong Youb Kim 	if (!ret) {
1444c2fec27bSHyong Youb Kim 		enic->rss_hf = rss_hf;
1445c2fec27bSHyong Youb Kim 		enic->rss_hash_type = rss_hash_type;
1446c2fec27bSHyong Youb Kim 		enic->rss_enable = rss_enable;
14475bc989e6SHyong Youb Kim 	} else {
14485bc989e6SHyong Youb Kim 		dev_err(enic, "Failed to update RSS configurations."
14495bc989e6SHyong Youb Kim 			" hash=0x%x\n", rss_hash_type);
1450c2fec27bSHyong Youb Kim 	}
14515bc989e6SHyong Youb Kim 	return ret;
1452c2fec27bSHyong Youb Kim }
1453c2fec27bSHyong Youb Kim 
1454c2fec27bSHyong Youb Kim int enic_set_vlan_strip(struct enic *enic)
1455c2fec27bSHyong Youb Kim {
1456c2fec27bSHyong Youb Kim 	/*
1457c2fec27bSHyong Youb Kim 	 * Unfortunately, VLAN strip on/off and RSS on/off are configured
1458c2fec27bSHyong Youb Kim 	 * together. So, re-do niccfg, preserving the current RSS settings.
1459c2fec27bSHyong Youb Kim 	 */
1460c2fec27bSHyong Youb Kim 	return enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, enic->rss_hash_type,
1461c2fec27bSHyong Youb Kim 			       ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
1462c2fec27bSHyong Youb Kim 			       enic->rss_enable);
1463c2fec27bSHyong Youb Kim }
1464c2fec27bSHyong Youb Kim 
14659039c812SAndrew Rybchenko int enic_add_packet_filter(struct enic *enic)
146672f3de30SBruce Richardson {
1467*00ce4311SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
146839cf83f1SHyong Youb Kim 	/* switchdev ignores packet filters */
146939cf83f1SHyong Youb Kim 	if (enic->switchdev_mode) {
147039cf83f1SHyong Youb Kim 		ENICPMD_LOG(DEBUG, " switchdev: ignore packet filter");
147139cf83f1SHyong Youb Kim 		return 0;
147239cf83f1SHyong Youb Kim 	}
147372f3de30SBruce Richardson 	/* Args -> directed, multicast, broadcast, promisc, allmulti */
1474*00ce4311SHyong Youb Kim 	return enic_dev_packet_filter(enic, 1, 1, 1,
147572f3de30SBruce Richardson 		enic->promisc, enic->allmulti);
147672f3de30SBruce Richardson }
147772f3de30SBruce Richardson 
147872f3de30SBruce Richardson int enic_get_link_status(struct enic *enic)
147972f3de30SBruce Richardson {
148072f3de30SBruce Richardson 	return vnic_dev_link_status(enic->vdev);
148172f3de30SBruce Richardson }
148272f3de30SBruce Richardson 
148372f3de30SBruce Richardson static void enic_dev_deinit(struct enic *enic)
148472f3de30SBruce Richardson {
1485c98779abSNelson Escobar 	/* stop link status checking */
1486c98779abSNelson Escobar 	vnic_dev_notify_unset(enic->vdev);
1487c98779abSNelson Escobar 
14887f34bb52SHyong Youb Kim 	/* mac_addrs is freed by rte_eth_dev_release_port() */
14896c45c330SHyong Youb Kim 	rte_free(enic->cq);
14900f872d31SHyong Youb Kim 	rte_free(enic->intr);
14916c45c330SHyong Youb Kim 	rte_free(enic->rq);
14926c45c330SHyong Youb Kim 	rte_free(enic->wq);
149372f3de30SBruce Richardson }
149472f3de30SBruce Richardson 
149572f3de30SBruce Richardson 
149672f3de30SBruce Richardson int enic_set_vnic_res(struct enic *enic)
149772f3de30SBruce Richardson {
149872f3de30SBruce Richardson 	struct rte_eth_dev *eth_dev = enic->rte_dev;
1499b6d5fd2eSJohn Daley 	int rc = 0;
15000f872d31SHyong Youb Kim 	unsigned int required_rq, required_wq, required_cq, required_intr;
150172f3de30SBruce Richardson 
15026c45c330SHyong Youb Kim 	/* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
15036c45c330SHyong Youb Kim 	required_rq = eth_dev->data->nb_rx_queues * 2;
15046c45c330SHyong Youb Kim 	required_wq = eth_dev->data->nb_tx_queues;
15056c45c330SHyong Youb Kim 	required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
15060f872d31SHyong Youb Kim 	required_intr = 1; /* 1 for LSC even if intr_conf.lsc is 0 */
15070f872d31SHyong Youb Kim 	if (eth_dev->data->dev_conf.intr_conf.rxq) {
15080f872d31SHyong Youb Kim 		required_intr += eth_dev->data->nb_rx_queues;
15090f872d31SHyong Youb Kim 	}
1510*00ce4311SHyong Youb Kim 	/* FW adds 2 interrupts for admin chan. Use 1 for RQ */
1511*00ce4311SHyong Youb Kim 	if (enic_is_vf(enic))
1512*00ce4311SHyong Youb Kim 		required_intr += 1;
1513edd08548SHyong Youb Kim 	ENICPMD_LOG(DEBUG, "Required queues for PF: rq %u wq %u cq %u",
1514edd08548SHyong Youb Kim 		    required_rq, required_wq, required_cq);
1515edd08548SHyong Youb Kim 	if (enic->vf_required_rq) {
1516edd08548SHyong Youb Kim 		/* Queues needed for VF representors */
1517edd08548SHyong Youb Kim 		required_rq += enic->vf_required_rq;
1518edd08548SHyong Youb Kim 		required_wq += enic->vf_required_wq;
1519edd08548SHyong Youb Kim 		required_cq += enic->vf_required_cq;
1520edd08548SHyong Youb Kim 		ENICPMD_LOG(DEBUG, "Required queues for VF representors: rq %u wq %u cq %u",
1521edd08548SHyong Youb Kim 			    enic->vf_required_rq, enic->vf_required_wq,
1522edd08548SHyong Youb Kim 			    enic->vf_required_cq);
1523edd08548SHyong Youb Kim 	}
15246c45c330SHyong Youb Kim 
15256c45c330SHyong Youb Kim 	if (enic->conf_rq_count < required_rq) {
1526856d7ba7SNelson Escobar 		dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
1527856d7ba7SNelson Escobar 			eth_dev->data->nb_rx_queues,
15286c45c330SHyong Youb Kim 			required_rq, enic->conf_rq_count);
1529b6d5fd2eSJohn Daley 		rc = -EINVAL;
1530b6d5fd2eSJohn Daley 	}
15316c45c330SHyong Youb Kim 	if (enic->conf_wq_count < required_wq) {
1532b6d5fd2eSJohn Daley 		dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
1533ce93d3c3SNelson Escobar 			eth_dev->data->nb_tx_queues, enic->conf_wq_count);
1534b6d5fd2eSJohn Daley 		rc = -EINVAL;
153572f3de30SBruce Richardson 	}
153672f3de30SBruce Richardson 
15376c45c330SHyong Youb Kim 	if (enic->conf_cq_count < required_cq) {
1538b6d5fd2eSJohn Daley 		dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
15396c45c330SHyong Youb Kim 			required_cq, enic->conf_cq_count);
1540b6d5fd2eSJohn Daley 		rc = -EINVAL;
1541b6d5fd2eSJohn Daley 	}
15420f872d31SHyong Youb Kim 	if (enic->conf_intr_count < required_intr) {
15430f872d31SHyong Youb Kim 		dev_err(dev, "Not enough Interrupts to support Rx queue"
15440f872d31SHyong Youb Kim 			" interrupts. Required:%u, Configured:%u\n",
15450f872d31SHyong Youb Kim 			required_intr, enic->conf_intr_count);
15460f872d31SHyong Youb Kim 		rc = -EINVAL;
15470f872d31SHyong Youb Kim 	}
1548b6d5fd2eSJohn Daley 
1549b6d5fd2eSJohn Daley 	if (rc == 0) {
155072f3de30SBruce Richardson 		enic->rq_count = eth_dev->data->nb_rx_queues;
155172f3de30SBruce Richardson 		enic->wq_count = eth_dev->data->nb_tx_queues;
1552b6d5fd2eSJohn Daley 		enic->cq_count = enic->rq_count + enic->wq_count;
15530f872d31SHyong Youb Kim 		enic->intr_count = required_intr;
155472f3de30SBruce Richardson 	}
155572f3de30SBruce Richardson 
1556b6d5fd2eSJohn Daley 	return rc;
155772f3de30SBruce Richardson }
155872f3de30SBruce Richardson 
1559c3e09182SJohn Daley /* Initialize the completion queue for an RQ */
1560c3e09182SJohn Daley static int
1561c3e09182SJohn Daley enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
1562c3e09182SJohn Daley {
1563c3e09182SJohn Daley 	struct vnic_rq *sop_rq, *data_rq;
1564ea5f15b1SJohn Daley 	unsigned int cq_idx;
1565c3e09182SJohn Daley 	int rc = 0;
1566c3e09182SJohn Daley 
1567aa07bf8fSJohn Daley 	sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1568285fd7c4SJohn Daley 	data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx, enic)];
1569edd08548SHyong Youb Kim 	cq_idx = enic_cq_rq(enic, rq_idx);
1570c3e09182SJohn Daley 
1571c3e09182SJohn Daley 	vnic_cq_clean(&enic->cq[cq_idx]);
1572c3e09182SJohn Daley 	vnic_cq_init(&enic->cq[cq_idx],
1573c3e09182SJohn Daley 		     0 /* flow_control_enable */,
1574c3e09182SJohn Daley 		     1 /* color_enable */,
1575c3e09182SJohn Daley 		     0 /* cq_head */,
1576c3e09182SJohn Daley 		     0 /* cq_tail */,
1577c3e09182SJohn Daley 		     1 /* cq_tail_color */,
1578c3e09182SJohn Daley 		     0 /* interrupt_enable */,
1579c3e09182SJohn Daley 		     1 /* cq_entry_enable */,
1580c3e09182SJohn Daley 		     0 /* cq_message_enable */,
1581c3e09182SJohn Daley 		     0 /* interrupt offset */,
1582c3e09182SJohn Daley 		     0 /* cq_message_addr */);
1583c3e09182SJohn Daley 
1584c3e09182SJohn Daley 
1585aa07bf8fSJohn Daley 	vnic_rq_init_start(sop_rq, enic_cq_rq(enic,
1586aa07bf8fSJohn Daley 			   enic_rte_rq_idx_to_sop_idx(rq_idx)), 0,
1587aa07bf8fSJohn Daley 			   sop_rq->ring.desc_count - 1, 1, 0);
1588c3e09182SJohn Daley 	if (data_rq->in_use) {
1589c3e09182SJohn Daley 		vnic_rq_init_start(data_rq,
1590aa07bf8fSJohn Daley 				   enic_cq_rq(enic,
1591285fd7c4SJohn Daley 				   enic_rte_rq_idx_to_data_idx(rq_idx, enic)),
1592285fd7c4SJohn Daley 				   0, data_rq->ring.desc_count - 1, 1, 0);
1593c3e09182SJohn Daley 	}
1594c3e09182SJohn Daley 
1595c3e09182SJohn Daley 	rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
1596c3e09182SJohn Daley 	if (rc)
1597c3e09182SJohn Daley 		return rc;
1598c3e09182SJohn Daley 
1599c3e09182SJohn Daley 	if (data_rq->in_use) {
1600c3e09182SJohn Daley 		rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
1601c3e09182SJohn Daley 		if (rc) {
1602c3e09182SJohn Daley 			enic_rxmbuf_queue_release(enic, sop_rq);
1603c3e09182SJohn Daley 			return rc;
1604c3e09182SJohn Daley 		}
1605c3e09182SJohn Daley 	}
1606c3e09182SJohn Daley 
1607c3e09182SJohn Daley 	return 0;
1608c3e09182SJohn Daley }
1609c3e09182SJohn Daley 
1610396a6d71SJohn Daley /* The Cisco NIC can send and receive packets up to a max packet size
1611396a6d71SJohn Daley  * determined by the NIC type and firmware. There is also an MTU
1612396a6d71SJohn Daley  * configured into the NIC via the CIMC/UCSM management interface
1613396a6d71SJohn Daley  * which can be overridden by this function (up to the max packet size).
1614396a6d71SJohn Daley  * Depending on the network setup, doing so may cause packet drops
1615396a6d71SJohn Daley  * and unexpected behavior.
1616396a6d71SJohn Daley  */
1617396a6d71SJohn Daley int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
1618396a6d71SJohn Daley {
1619c3e09182SJohn Daley 	unsigned int rq_idx;
1620c3e09182SJohn Daley 	struct vnic_rq *rq;
1621c3e09182SJohn Daley 	int rc = 0;
1622396a6d71SJohn Daley 	uint16_t old_mtu;	/* previous setting */
1623396a6d71SJohn Daley 	uint16_t config_mtu;	/* Value configured into NIC via CIMC/UCSM */
1624396a6d71SJohn Daley 	struct rte_eth_dev *eth_dev = enic->rte_dev;
1625396a6d71SJohn Daley 
1626396a6d71SJohn Daley 	old_mtu = eth_dev->data->mtu;
1627396a6d71SJohn Daley 	config_mtu = enic->config.mtu;
1628396a6d71SJohn Daley 
16290e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
16300e804034SJohn Daley 		return -E_RTE_SECONDARY;
16310e804034SJohn Daley 
1632396a6d71SJohn Daley 	if (new_mtu > enic->max_mtu) {
1633396a6d71SJohn Daley 		dev_err(enic,
1634396a6d71SJohn Daley 			"MTU not updated: requested (%u) greater than max (%u)\n",
1635396a6d71SJohn Daley 			new_mtu, enic->max_mtu);
1636396a6d71SJohn Daley 		return -EINVAL;
1637396a6d71SJohn Daley 	}
1638396a6d71SJohn Daley 	if (new_mtu < ENIC_MIN_MTU) {
1639396a6d71SJohn Daley 		dev_info(enic,
1640396a6d71SJohn Daley 			"MTU not updated: requested (%u) less than min (%u)\n",
1641396a6d71SJohn Daley 			new_mtu, ENIC_MIN_MTU);
1642396a6d71SJohn Daley 		return -EINVAL;
1643396a6d71SJohn Daley 	}
1644396a6d71SJohn Daley 	if (new_mtu > config_mtu)
1645396a6d71SJohn Daley 		dev_warning(enic,
1646396a6d71SJohn Daley 			"MTU (%u) is greater than value configured in NIC (%u)\n",
1647396a6d71SJohn Daley 			new_mtu, config_mtu);
1648396a6d71SJohn Daley 
164995faa2a9SHyong Youb Kim 	/*
165095faa2a9SHyong Youb Kim 	 * If the device has not started (enic_enable), nothing to do.
165195faa2a9SHyong Youb Kim 	 * Later, enic_enable() will set up RQs reflecting the new maximum
165295faa2a9SHyong Youb Kim 	 * packet length.
165395faa2a9SHyong Youb Kim 	 */
165495faa2a9SHyong Youb Kim 	if (!eth_dev->data->dev_started)
1655e90884a6SWeiguo Li 		return rc;
165695faa2a9SHyong Youb Kim 
165795faa2a9SHyong Youb Kim 	/*
165895faa2a9SHyong Youb Kim 	 * The device has started, re-do RQs on the fly. In the process, we
165995faa2a9SHyong Youb Kim 	 * pick up the new maximum packet length.
166095faa2a9SHyong Youb Kim 	 *
166195faa2a9SHyong Youb Kim 	 * Some applications rely on the ability to change MTU without stopping
166295faa2a9SHyong Youb Kim 	 * the device. So keep this behavior for now.
1663c3e09182SJohn Daley 	 */
1664c3e09182SJohn Daley 	rte_spinlock_lock(&enic->mtu_lock);
1665c3e09182SJohn Daley 
1666c3e09182SJohn Daley 	/* Stop traffic on all RQs */
1667c3e09182SJohn Daley 	for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
1668c3e09182SJohn Daley 		rq = &enic->rq[rq_idx];
1669c3e09182SJohn Daley 		if (rq->is_sop && rq->in_use) {
1670aa07bf8fSJohn Daley 			rc = enic_stop_rq(enic,
1671aa07bf8fSJohn Daley 					  enic_sop_rq_idx_to_rte_idx(rq_idx));
1672c3e09182SJohn Daley 			if (rc) {
1673c3e09182SJohn Daley 				dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
1674c3e09182SJohn Daley 				goto set_mtu_done;
1675c3e09182SJohn Daley 			}
1676c3e09182SJohn Daley 		}
1677c3e09182SJohn Daley 	}
1678c3e09182SJohn Daley 
167998a7ea33SJerin Jacob 	/* replace Rx function with a no-op to avoid getting stale pkts */
1680a41f593fSFerruh Yigit 	eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
1681bcd68b68SHyong Youb Kim 	rte_eth_fp_ops[enic->port_id].rx_pkt_burst = eth_dev->rx_pkt_burst;
1682c3e09182SJohn Daley 	rte_mb();
1683c3e09182SJohn Daley 
1684c3e09182SJohn Daley 	/* Allow time for threads to exit the real Rx function. */
1685c3e09182SJohn Daley 	usleep(100000);
1686c3e09182SJohn Daley 
1687c3e09182SJohn Daley 	/* now it is safe to reconfigure the RQs */
1688c3e09182SJohn Daley 
1689396a6d71SJohn Daley 
1690c3e09182SJohn Daley 	/* free and reallocate RQs with the new MTU */
1691c3e09182SJohn Daley 	for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1692aa07bf8fSJohn Daley 		rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
169333a2d659SJohn Daley 		if (!rq->in_use)
169433a2d659SJohn Daley 			continue;
1695c3e09182SJohn Daley 
1696c3e09182SJohn Daley 		enic_free_rq(rq);
1697c3e09182SJohn Daley 		rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
1698ce16fd70SJohn Daley 				   rq->tot_nb_desc, rq->rx_free_thresh);
1699c3e09182SJohn Daley 		if (rc) {
1700c3e09182SJohn Daley 			dev_err(enic,
1701c3e09182SJohn Daley 				"Fatal MTU alloc error- No traffic will pass\n");
1702c3e09182SJohn Daley 			goto set_mtu_done;
1703c3e09182SJohn Daley 		}
1704c3e09182SJohn Daley 
1705c3e09182SJohn Daley 		rc = enic_reinit_rq(enic, rq_idx);
1706c3e09182SJohn Daley 		if (rc) {
1707c3e09182SJohn Daley 			dev_err(enic,
1708c3e09182SJohn Daley 				"Fatal MTU RQ reinit- No traffic will pass\n");
1709c3e09182SJohn Daley 			goto set_mtu_done;
1710c3e09182SJohn Daley 		}
1711c3e09182SJohn Daley 	}
1712c3e09182SJohn Daley 
1713c3e09182SJohn Daley 	/* put back the real receive function */
1714c3e09182SJohn Daley 	rte_mb();
1715e92a4b41SHyong Youb Kim 	enic_pick_rx_handler(eth_dev);
1716bcd68b68SHyong Youb Kim 	rte_eth_fp_ops[enic->port_id].rx_pkt_burst = eth_dev->rx_pkt_burst;
1717c3e09182SJohn Daley 	rte_mb();
1718c3e09182SJohn Daley 
1719c3e09182SJohn Daley 	/* restart Rx traffic */
1720c3e09182SJohn Daley 	for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1721aa07bf8fSJohn Daley 		rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1722c3e09182SJohn Daley 		if (rq->is_sop && rq->in_use)
1723c3e09182SJohn Daley 			enic_start_rq(enic, rq_idx);
1724c3e09182SJohn Daley 	}
1725c3e09182SJohn Daley 
1726c3e09182SJohn Daley set_mtu_done:
1727396a6d71SJohn Daley 	dev_info(enic, "MTU changed from %u to %u\n",  old_mtu, new_mtu);
1728c3e09182SJohn Daley 	rte_spinlock_unlock(&enic->mtu_lock);
1729c3e09182SJohn Daley 	return rc;
1730396a6d71SJohn Daley }
1731396a6d71SJohn Daley 
173261c7b522SJohn Daley static void
173361c7b522SJohn Daley enic_disable_overlay_offload(struct enic *enic)
173461c7b522SJohn Daley {
173561c7b522SJohn Daley 	/*
173661c7b522SJohn Daley 	 * Disabling fails if the feature is provisioned but
173761c7b522SJohn Daley 	 * not enabled. So ignore result and do not log error.
173861c7b522SJohn Daley 	 */
173961c7b522SJohn Daley 	if (enic->vxlan) {
174061c7b522SJohn Daley 		vnic_dev_overlay_offload_ctrl(enic->vdev,
174161c7b522SJohn Daley 			OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_DISABLE);
174261c7b522SJohn Daley 	}
174361c7b522SJohn Daley 	if (enic->geneve) {
174461c7b522SJohn Daley 		vnic_dev_overlay_offload_ctrl(enic->vdev,
174561c7b522SJohn Daley 			OVERLAY_FEATURE_GENEVE, OVERLAY_OFFLOAD_DISABLE);
174661c7b522SJohn Daley 	}
174761c7b522SJohn Daley }
174861c7b522SJohn Daley 
174961c7b522SJohn Daley static int
175061c7b522SJohn Daley enic_enable_overlay_offload(struct enic *enic)
175161c7b522SJohn Daley {
175261c7b522SJohn Daley 	if (enic->vxlan && vnic_dev_overlay_offload_ctrl(enic->vdev,
175361c7b522SJohn Daley 			OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_ENABLE) != 0) {
175461c7b522SJohn Daley 		dev_err(NULL, "failed to enable VXLAN offload\n");
175561c7b522SJohn Daley 		return -EINVAL;
175661c7b522SJohn Daley 	}
175761c7b522SJohn Daley 	if (enic->geneve && vnic_dev_overlay_offload_ctrl(enic->vdev,
175861c7b522SJohn Daley 			OVERLAY_FEATURE_GENEVE, OVERLAY_OFFLOAD_ENABLE) != 0) {
175961c7b522SJohn Daley 		dev_err(NULL, "failed to enable Geneve offload\n");
176061c7b522SJohn Daley 		return -EINVAL;
176161c7b522SJohn Daley 	}
176261c7b522SJohn Daley 	enic->tx_offload_capa |=
1763295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1764295968d1SFerruh Yigit 		(enic->geneve ? RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
1765295968d1SFerruh Yigit 		(enic->vxlan ? RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
176661c7b522SJohn Daley 	enic->tx_offload_mask |=
1767daa02b5cSOlivier Matz 		RTE_MBUF_F_TX_OUTER_IPV6 |
1768daa02b5cSOlivier Matz 		RTE_MBUF_F_TX_OUTER_IPV4 |
1769daa02b5cSOlivier Matz 		RTE_MBUF_F_TX_OUTER_IP_CKSUM |
1770daa02b5cSOlivier Matz 		RTE_MBUF_F_TX_TUNNEL_MASK;
177161c7b522SJohn Daley 	enic->overlay_offload = true;
177261c7b522SJohn Daley 
177361c7b522SJohn Daley 	if (enic->vxlan && enic->geneve)
177461c7b522SJohn Daley 		dev_info(NULL, "Overlay offload is enabled (VxLAN, Geneve)\n");
177561c7b522SJohn Daley 	else if (enic->vxlan)
177661c7b522SJohn Daley 		dev_info(NULL, "Overlay offload is enabled (VxLAN)\n");
177761c7b522SJohn Daley 	else
177861c7b522SJohn Daley 		dev_info(NULL, "Overlay offload is enabled (Geneve)\n");
177961c7b522SJohn Daley 
178061c7b522SJohn Daley 	return 0;
178161c7b522SJohn Daley }
178261c7b522SJohn Daley 
178361c7b522SJohn Daley static int
178461c7b522SJohn Daley enic_reset_overlay_port(struct enic *enic)
178561c7b522SJohn Daley {
178661c7b522SJohn Daley 	if (enic->vxlan) {
178761c7b522SJohn Daley 		enic->vxlan_port = RTE_VXLAN_DEFAULT_PORT;
178861c7b522SJohn Daley 		/*
178961c7b522SJohn Daley 		 * Reset the vxlan port to the default, as the NIC firmware
179061c7b522SJohn Daley 		 * does not reset it automatically and keeps the old setting.
179161c7b522SJohn Daley 		 */
179261c7b522SJohn Daley 		if (vnic_dev_overlay_offload_cfg(enic->vdev,
179361c7b522SJohn Daley 						 OVERLAY_CFG_VXLAN_PORT_UPDATE,
179461c7b522SJohn Daley 						 RTE_VXLAN_DEFAULT_PORT)) {
179561c7b522SJohn Daley 			dev_err(enic, "failed to update vxlan port\n");
179661c7b522SJohn Daley 			return -EINVAL;
179761c7b522SJohn Daley 		}
179861c7b522SJohn Daley 	}
179961c7b522SJohn Daley 	if (enic->geneve) {
180061c7b522SJohn Daley 		enic->geneve_port = RTE_GENEVE_DEFAULT_PORT;
180161c7b522SJohn Daley 		if (vnic_dev_overlay_offload_cfg(enic->vdev,
180261c7b522SJohn Daley 						 OVERLAY_CFG_GENEVE_PORT_UPDATE,
180361c7b522SJohn Daley 						 RTE_GENEVE_DEFAULT_PORT)) {
180461c7b522SJohn Daley 			dev_err(enic, "failed to update vxlan port\n");
180561c7b522SJohn Daley 			return -EINVAL;
180661c7b522SJohn Daley 		}
180761c7b522SJohn Daley 	}
180861c7b522SJohn Daley 	return 0;
180961c7b522SJohn Daley }
181061c7b522SJohn Daley 
181172f3de30SBruce Richardson static int enic_dev_init(struct enic *enic)
181272f3de30SBruce Richardson {
181372f3de30SBruce Richardson 	int err;
181472f3de30SBruce Richardson 	struct rte_eth_dev *eth_dev = enic->rte_dev;
181572f3de30SBruce Richardson 
181672f3de30SBruce Richardson 	vnic_dev_intr_coal_timer_info_default(enic->vdev);
181772f3de30SBruce Richardson 
181872f3de30SBruce Richardson 	/* Get vNIC configuration
181972f3de30SBruce Richardson 	*/
182072f3de30SBruce Richardson 	err = enic_get_vnic_config(enic);
182172f3de30SBruce Richardson 	if (err) {
182272f3de30SBruce Richardson 		dev_err(dev, "Get vNIC configuration failed, aborting\n");
182372f3de30SBruce Richardson 		return err;
182472f3de30SBruce Richardson 	}
182572f3de30SBruce Richardson 
1826b16e60abSNelson Escobar 	/* Get available resource counts */
1827b16e60abSNelson Escobar 	enic_get_res_counts(enic);
1828b16e60abSNelson Escobar 	if (enic->conf_rq_count == 1) {
1829b16e60abSNelson Escobar 		dev_err(enic, "Running with only 1 RQ configured in the vNIC is not supported.\n");
1830b16e60abSNelson Escobar 		dev_err(enic, "Please configure 2 RQs in the vNIC for each Rx queue used by DPDK.\n");
1831b16e60abSNelson Escobar 		dev_err(enic, "See the ENIC PMD guide for more information.\n");
1832b16e60abSNelson Escobar 		return -EINVAL;
1833b16e60abSNelson Escobar 	}
18346c45c330SHyong Youb Kim 	/* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
18356c45c330SHyong Youb Kim 	enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
18366c45c330SHyong Youb Kim 			       enic->conf_cq_count, 8);
18370f872d31SHyong Youb Kim 	enic->intr = rte_zmalloc("enic_vnic_intr", sizeof(struct vnic_intr) *
18380f872d31SHyong Youb Kim 				 enic->conf_intr_count, 8);
18396c45c330SHyong Youb Kim 	enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
18406c45c330SHyong Youb Kim 			       enic->conf_rq_count, 8);
18416c45c330SHyong Youb Kim 	enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
18426c45c330SHyong Youb Kim 			       enic->conf_wq_count, 8);
18436c45c330SHyong Youb Kim 	if (enic->conf_cq_count > 0 && enic->cq == NULL) {
18446c45c330SHyong Youb Kim 		dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
18456c45c330SHyong Youb Kim 		return -1;
18466c45c330SHyong Youb Kim 	}
18470f872d31SHyong Youb Kim 	if (enic->conf_intr_count > 0 && enic->intr == NULL) {
18480f872d31SHyong Youb Kim 		dev_err(enic, "failed to allocate vnic_intr, aborting.\n");
18490f872d31SHyong Youb Kim 		return -1;
18500f872d31SHyong Youb Kim 	}
18516c45c330SHyong Youb Kim 	if (enic->conf_rq_count > 0 && enic->rq == NULL) {
18526c45c330SHyong Youb Kim 		dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
18536c45c330SHyong Youb Kim 		return -1;
18546c45c330SHyong Youb Kim 	}
18556c45c330SHyong Youb Kim 	if (enic->conf_wq_count > 0 && enic->wq == NULL) {
18566c45c330SHyong Youb Kim 		dev_err(enic, "failed to allocate vnic_wq, aborting.\n");
18576c45c330SHyong Youb Kim 		return -1;
18586c45c330SHyong Youb Kim 	}
1859b16e60abSNelson Escobar 
18608d496995SHyong Youb Kim 	eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr",
18616d13ea8eSOlivier Matz 					sizeof(struct rte_ether_addr) *
18628d496995SHyong Youb Kim 					ENIC_UNICAST_PERFECT_FILTERS, 0);
186372f3de30SBruce Richardson 	if (!eth_dev->data->mac_addrs) {
186472f3de30SBruce Richardson 		dev_err(enic, "mac addr storage alloc failed, aborting.\n");
186572f3de30SBruce Richardson 		return -1;
186672f3de30SBruce Richardson 	}
1867*00ce4311SHyong Youb Kim 
1868*00ce4311SHyong Youb Kim 	/*
1869*00ce4311SHyong Youb Kim 	 * If PF has not assigned any MAC address for VF, generate a random one.
1870*00ce4311SHyong Youb Kim 	 */
1871*00ce4311SHyong Youb Kim 	if (enic_is_vf(enic)) {
1872*00ce4311SHyong Youb Kim 		struct rte_ether_addr ea;
1873*00ce4311SHyong Youb Kim 
1874*00ce4311SHyong Youb Kim 		memcpy(ea.addr_bytes, enic->mac_addr, RTE_ETHER_ADDR_LEN);
1875*00ce4311SHyong Youb Kim 		if (!rte_is_valid_assigned_ether_addr(&ea)) {
1876*00ce4311SHyong Youb Kim 			rte_eth_random_addr(ea.addr_bytes);
1877*00ce4311SHyong Youb Kim 			ENICPMD_LOG(INFO, "assigned random MAC address " RTE_ETHER_ADDR_PRT_FMT,
1878*00ce4311SHyong Youb Kim 				    RTE_ETHER_ADDR_BYTES(&ea));
1879*00ce4311SHyong Youb Kim 			memcpy(enic->mac_addr, ea.addr_bytes, RTE_ETHER_ADDR_LEN);
1880*00ce4311SHyong Youb Kim 		}
1881*00ce4311SHyong Youb Kim 	}
1882*00ce4311SHyong Youb Kim 
1883538da7a1SOlivier Matz 	rte_ether_addr_copy((struct rte_ether_addr *)enic->mac_addr,
1884bbab3d97SJohn Daley 			eth_dev->data->mac_addrs);
188572f3de30SBruce Richardson 
188672f3de30SBruce Richardson 	vnic_dev_set_reset_flag(enic->vdev, 0);
188772f3de30SBruce Richardson 
18886ced1376SJohn Daley 	LIST_INIT(&enic->flows);
18896ced1376SJohn Daley 
1890c98779abSNelson Escobar 	/* set up link status checking */
1891c98779abSNelson Escobar 	vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
1892c98779abSNelson Escobar 
189393fb21fdSHyong Youb Kim 	enic->overlay_offload = false;
1894308b514bSHyong Youb Kim 	/*
189561c7b522SJohn Daley 	 * First, explicitly disable overlay offload as the setting is
189661c7b522SJohn Daley 	 * sticky, and resetting vNIC may not disable it.
1897308b514bSHyong Youb Kim 	 */
189861c7b522SJohn Daley 	enic_disable_overlay_offload(enic);
189961c7b522SJohn Daley 	/* Then, enable overlay offload according to vNIC flags */
190061c7b522SJohn Daley 	if (!enic->disable_overlay && (enic->vxlan || enic->geneve)) {
190161c7b522SJohn Daley 		err = enic_enable_overlay_offload(enic);
190261c7b522SJohn Daley 		if (err) {
190361c7b522SJohn Daley 			dev_info(NULL, "failed to enable overlay offload\n");
190461c7b522SJohn Daley 			return err;
1905c02a96fcSHyong Youb Kim 		}
1906c02a96fcSHyong Youb Kim 	}
1907af3a1628SHyong Youb Kim 	/*
190861c7b522SJohn Daley 	 * Reset the vxlan/geneve port if HW parsing is available. It
1909af3a1628SHyong Youb Kim 	 * is always enabled regardless of overlay offload
1910af3a1628SHyong Youb Kim 	 * enable/disable.
1911af3a1628SHyong Youb Kim 	 */
191261c7b522SJohn Daley 	err = enic_reset_overlay_port(enic);
191361c7b522SJohn Daley 	if (err)
191461c7b522SJohn Daley 		return err;
191593fb21fdSHyong Youb Kim 
191639cf83f1SHyong Youb Kim 	if (enic_fm_init(enic))
191739cf83f1SHyong Youb Kim 		dev_warning(enic, "Init of flowman failed.\n");
191872f3de30SBruce Richardson 	return 0;
191972f3de30SBruce Richardson }
192072f3de30SBruce Richardson 
192139cf83f1SHyong Youb Kim static void lock_devcmd(void *priv)
192239cf83f1SHyong Youb Kim {
192339cf83f1SHyong Youb Kim 	struct enic *enic = priv;
192439cf83f1SHyong Youb Kim 
192539cf83f1SHyong Youb Kim 	rte_spinlock_lock(&enic->devcmd_lock);
192639cf83f1SHyong Youb Kim }
192739cf83f1SHyong Youb Kim 
192839cf83f1SHyong Youb Kim static void unlock_devcmd(void *priv)
192939cf83f1SHyong Youb Kim {
193039cf83f1SHyong Youb Kim 	struct enic *enic = priv;
193139cf83f1SHyong Youb Kim 
193239cf83f1SHyong Youb Kim 	rte_spinlock_unlock(&enic->devcmd_lock);
193339cf83f1SHyong Youb Kim }
193439cf83f1SHyong Youb Kim 
193572f3de30SBruce Richardson int enic_probe(struct enic *enic)
193672f3de30SBruce Richardson {
193772f3de30SBruce Richardson 	struct rte_pci_device *pdev = enic->pdev;
193872f3de30SBruce Richardson 	int err = -1;
193972f3de30SBruce Richardson 
1940d0c98d9eSJohn Daley 	dev_debug(enic, "Initializing ENIC PMD\n");
194172f3de30SBruce Richardson 
19420e804034SJohn Daley 	/* if this is a secondary process the hardware is already initialized */
19430e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
19440e804034SJohn Daley 		return 0;
19450e804034SJohn Daley 
194672f3de30SBruce Richardson 	enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
194772f3de30SBruce Richardson 	enic->bar0.len = pdev->mem_resource[0].len;
194872f3de30SBruce Richardson 
194972f3de30SBruce Richardson 	/* Register vNIC device */
195072f3de30SBruce Richardson 	enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1);
195172f3de30SBruce Richardson 	if (!enic->vdev) {
195272f3de30SBruce Richardson 		dev_err(enic, "vNIC registration failed, aborting\n");
195372f3de30SBruce Richardson 		goto err_out;
195472f3de30SBruce Richardson 	}
195572f3de30SBruce Richardson 
1956da5f560bSNelson Escobar 	LIST_INIT(&enic->memzone_list);
1957da5f560bSNelson Escobar 	rte_spinlock_init(&enic->memzone_list_lock);
1958da5f560bSNelson Escobar 
195972f3de30SBruce Richardson 	vnic_register_cbacks(enic->vdev,
196072f3de30SBruce Richardson 		enic_alloc_consistent,
196172f3de30SBruce Richardson 		enic_free_consistent);
196272f3de30SBruce Richardson 
19638d782f3fSHyong Youb Kim 	/*
1964d74111a9SJohn Daley 	 * Allocate the consistent memory for stats upfront so both primary and
1965d74111a9SJohn Daley 	 * secondary processes can dump stats.
19668d782f3fSHyong Youb Kim 	 */
19678d782f3fSHyong Youb Kim 	err = vnic_dev_alloc_stats_mem(enic->vdev);
19688d782f3fSHyong Youb Kim 	if (err) {
19698d782f3fSHyong Youb Kim 		dev_err(enic, "Failed to allocate cmd memory, aborting\n");
19708d782f3fSHyong Youb Kim 		goto err_out_unregister;
19718d782f3fSHyong Youb Kim 	}
197272f3de30SBruce Richardson 	/* Issue device open to get device in known state */
197372f3de30SBruce Richardson 	err = enic_dev_open(enic);
197472f3de30SBruce Richardson 	if (err) {
197572f3de30SBruce Richardson 		dev_err(enic, "vNIC dev open failed, aborting\n");
197672f3de30SBruce Richardson 		goto err_out_unregister;
197772f3de30SBruce Richardson 	}
197872f3de30SBruce Richardson 
197972f3de30SBruce Richardson 	/* Set ingress vlan rewrite mode before vnic initialization */
1980e39c2756SHyong Youb Kim 	dev_debug(enic, "Set ig_vlan_rewrite_mode=%u\n",
1981e39c2756SHyong Youb Kim 		  enic->ig_vlan_rewrite_mode);
198272f3de30SBruce Richardson 	err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
1983e39c2756SHyong Youb Kim 		enic->ig_vlan_rewrite_mode);
198472f3de30SBruce Richardson 	if (err) {
198572f3de30SBruce Richardson 		dev_err(enic,
198672f3de30SBruce Richardson 			"Failed to set ingress vlan rewrite mode, aborting.\n");
198772f3de30SBruce Richardson 		goto err_out_dev_close;
198872f3de30SBruce Richardson 	}
198972f3de30SBruce Richardson 
199072f3de30SBruce Richardson 	/* Issue device init to initialize the vnic-to-switch link.
199172f3de30SBruce Richardson 	 * We'll start with carrier off and wait for link UP
199272f3de30SBruce Richardson 	 * notification later to turn on carrier.  We don't need
199372f3de30SBruce Richardson 	 * to wait here for the vnic-to-switch link initialization
199472f3de30SBruce Richardson 	 * to complete; link UP notification is the indication that
199572f3de30SBruce Richardson 	 * the process is complete.
199672f3de30SBruce Richardson 	 */
199772f3de30SBruce Richardson 
199872f3de30SBruce Richardson 	err = vnic_dev_init(enic->vdev, 0);
199972f3de30SBruce Richardson 	if (err) {
200072f3de30SBruce Richardson 		dev_err(enic, "vNIC dev init failed, aborting\n");
200172f3de30SBruce Richardson 		goto err_out_dev_close;
200272f3de30SBruce Richardson 	}
200372f3de30SBruce Richardson 
200472f3de30SBruce Richardson 	err = enic_dev_init(enic);
200572f3de30SBruce Richardson 	if (err) {
200672f3de30SBruce Richardson 		dev_err(enic, "Device initialization failed, aborting\n");
200772f3de30SBruce Richardson 		goto err_out_dev_close;
200872f3de30SBruce Richardson 	}
200972f3de30SBruce Richardson 
201039cf83f1SHyong Youb Kim 	/* Use a PF spinlock to serialize devcmd from PF and VF representors */
201139cf83f1SHyong Youb Kim 	if (enic->switchdev_mode) {
201239cf83f1SHyong Youb Kim 		rte_spinlock_init(&enic->devcmd_lock);
201339cf83f1SHyong Youb Kim 		vnic_register_lock(enic->vdev, lock_devcmd, unlock_devcmd);
201439cf83f1SHyong Youb Kim 	}
201572f3de30SBruce Richardson 	return 0;
201672f3de30SBruce Richardson 
201772f3de30SBruce Richardson err_out_dev_close:
201872f3de30SBruce Richardson 	vnic_dev_close(enic->vdev);
201972f3de30SBruce Richardson err_out_unregister:
202072f3de30SBruce Richardson 	vnic_dev_unregister(enic->vdev);
202172f3de30SBruce Richardson err_out:
202272f3de30SBruce Richardson 	return err;
202372f3de30SBruce Richardson }
202472f3de30SBruce Richardson 
202572f3de30SBruce Richardson void enic_remove(struct enic *enic)
202672f3de30SBruce Richardson {
202772f3de30SBruce Richardson 	enic_dev_deinit(enic);
202872f3de30SBruce Richardson 	vnic_dev_close(enic->vdev);
202972f3de30SBruce Richardson 	vnic_dev_unregister(enic->vdev);
203072f3de30SBruce Richardson }
2031