xref: /dpdk/drivers/net/intel/ice/ice_dcf_ethdev.c (revision c1d145834f287aa8cf53de914618a7312f2c360e)
1*c1d14583SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2*c1d14583SBruce Richardson  * Copyright(c) 2020 Intel Corporation
3*c1d14583SBruce Richardson  */
4*c1d14583SBruce Richardson 
5*c1d14583SBruce Richardson #include <errno.h>
6*c1d14583SBruce Richardson #include <stdbool.h>
7*c1d14583SBruce Richardson #include <sys/queue.h>
8*c1d14583SBruce Richardson #include <sys/types.h>
9*c1d14583SBruce Richardson #include <unistd.h>
10*c1d14583SBruce Richardson 
11*c1d14583SBruce Richardson #include <rte_interrupts.h>
12*c1d14583SBruce Richardson #include <rte_debug.h>
13*c1d14583SBruce Richardson #include <rte_pci.h>
14*c1d14583SBruce Richardson #include <rte_eal.h>
15*c1d14583SBruce Richardson #include <rte_ether.h>
16*c1d14583SBruce Richardson #include <ethdev_pci.h>
17*c1d14583SBruce Richardson #include <rte_kvargs.h>
18*c1d14583SBruce Richardson #include <rte_malloc.h>
19*c1d14583SBruce Richardson #include <rte_memzone.h>
20*c1d14583SBruce Richardson #include <dev_driver.h>
21*c1d14583SBruce Richardson 
22*c1d14583SBruce Richardson #include <iavf_devids.h>
23*c1d14583SBruce Richardson 
24*c1d14583SBruce Richardson #include "ice_generic_flow.h"
25*c1d14583SBruce Richardson #include "ice_dcf_ethdev.h"
26*c1d14583SBruce Richardson #include "ice_rxtx.h"
27*c1d14583SBruce Richardson 
28*c1d14583SBruce Richardson #define DCF_NUM_MACADDR_MAX      64
29*c1d14583SBruce Richardson 
30*c1d14583SBruce Richardson static int dcf_add_del_mc_addr_list(struct ice_dcf_hw *hw,
31*c1d14583SBruce Richardson 						struct rte_ether_addr *mc_addrs,
32*c1d14583SBruce Richardson 						uint32_t mc_addrs_num, bool add);
33*c1d14583SBruce Richardson 
34*c1d14583SBruce Richardson static int
35*c1d14583SBruce Richardson ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
36*c1d14583SBruce Richardson 				struct rte_eth_udp_tunnel *udp_tunnel);
37*c1d14583SBruce Richardson static int
38*c1d14583SBruce Richardson ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
39*c1d14583SBruce Richardson 				struct rte_eth_udp_tunnel *udp_tunnel);
40*c1d14583SBruce Richardson 
41*c1d14583SBruce Richardson static int
42*c1d14583SBruce Richardson ice_dcf_dev_init(struct rte_eth_dev *eth_dev);
43*c1d14583SBruce Richardson 
44*c1d14583SBruce Richardson static int
45*c1d14583SBruce Richardson ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev);
46*c1d14583SBruce Richardson 
47*c1d14583SBruce Richardson static int
48*c1d14583SBruce Richardson ice_dcf_cap_check_handler(__rte_unused const char *key,
49*c1d14583SBruce Richardson 			  const char *value, __rte_unused void *opaque);
50*c1d14583SBruce Richardson 
51*c1d14583SBruce Richardson static int
52*c1d14583SBruce Richardson ice_dcf_engine_disabled_handler(__rte_unused const char *key,
53*c1d14583SBruce Richardson 			  const char *value, __rte_unused void *opaque);
54*c1d14583SBruce Richardson 
55*c1d14583SBruce Richardson struct ice_devarg {
56*c1d14583SBruce Richardson 	enum ice_dcf_devrarg type;
57*c1d14583SBruce Richardson 	const char *key;
58*c1d14583SBruce Richardson 	int (*handler)(__rte_unused const char *key,
59*c1d14583SBruce Richardson 			  const char *value, __rte_unused void *opaque);
60*c1d14583SBruce Richardson };
61*c1d14583SBruce Richardson 
62*c1d14583SBruce Richardson static const struct ice_devarg ice_devargs_table[] = {
63*c1d14583SBruce Richardson 	{ICE_DCF_DEVARG_CAP, "cap", ice_dcf_cap_check_handler},
64*c1d14583SBruce Richardson 	{ICE_DCF_DEVARG_ACL, "acl", ice_dcf_engine_disabled_handler},
65*c1d14583SBruce Richardson };
66*c1d14583SBruce Richardson 
67*c1d14583SBruce Richardson struct rte_ice_dcf_xstats_name_off {
68*c1d14583SBruce Richardson 	char name[RTE_ETH_XSTATS_NAME_SIZE];
69*c1d14583SBruce Richardson 	unsigned int offset;
70*c1d14583SBruce Richardson };
71*c1d14583SBruce Richardson 
72*c1d14583SBruce Richardson static const struct rte_ice_dcf_xstats_name_off rte_ice_dcf_stats_strings[] = {
73*c1d14583SBruce Richardson 	{"rx_bytes", offsetof(struct ice_dcf_eth_stats, rx_bytes)},
74*c1d14583SBruce Richardson 	{"rx_unicast_packets", offsetof(struct ice_dcf_eth_stats, rx_unicast)},
75*c1d14583SBruce Richardson 	{"rx_multicast_packets", offsetof(struct ice_dcf_eth_stats, rx_multicast)},
76*c1d14583SBruce Richardson 	{"rx_broadcast_packets", offsetof(struct ice_dcf_eth_stats, rx_broadcast)},
77*c1d14583SBruce Richardson 	{"rx_dropped_packets", offsetof(struct ice_dcf_eth_stats, rx_discards)},
78*c1d14583SBruce Richardson 	{"rx_unknown_protocol_packets", offsetof(struct ice_dcf_eth_stats,
79*c1d14583SBruce Richardson 		rx_unknown_protocol)},
80*c1d14583SBruce Richardson 	{"tx_bytes", offsetof(struct ice_dcf_eth_stats, tx_bytes)},
81*c1d14583SBruce Richardson 	{"tx_unicast_packets", offsetof(struct ice_dcf_eth_stats, tx_unicast)},
82*c1d14583SBruce Richardson 	{"tx_multicast_packets", offsetof(struct ice_dcf_eth_stats, tx_multicast)},
83*c1d14583SBruce Richardson 	{"tx_broadcast_packets", offsetof(struct ice_dcf_eth_stats, tx_broadcast)},
84*c1d14583SBruce Richardson 	{"tx_dropped_packets", offsetof(struct ice_dcf_eth_stats, tx_discards)},
85*c1d14583SBruce Richardson 	{"tx_error_packets", offsetof(struct ice_dcf_eth_stats, tx_errors)},
86*c1d14583SBruce Richardson };
87*c1d14583SBruce Richardson 
88*c1d14583SBruce Richardson #define ICE_DCF_NB_XSTATS (sizeof(rte_ice_dcf_stats_strings) / \
89*c1d14583SBruce Richardson 		sizeof(rte_ice_dcf_stats_strings[0]))
90*c1d14583SBruce Richardson 
91*c1d14583SBruce Richardson static uint16_t
92*c1d14583SBruce Richardson ice_dcf_recv_pkts(__rte_unused void *rx_queue,
93*c1d14583SBruce Richardson 		  __rte_unused struct rte_mbuf **bufs,
94*c1d14583SBruce Richardson 		  __rte_unused uint16_t nb_pkts)
95*c1d14583SBruce Richardson {
96*c1d14583SBruce Richardson 	return 0;
97*c1d14583SBruce Richardson }
98*c1d14583SBruce Richardson 
99*c1d14583SBruce Richardson static uint16_t
100*c1d14583SBruce Richardson ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
101*c1d14583SBruce Richardson 		  __rte_unused struct rte_mbuf **bufs,
102*c1d14583SBruce Richardson 		  __rte_unused uint16_t nb_pkts)
103*c1d14583SBruce Richardson {
104*c1d14583SBruce Richardson 	return 0;
105*c1d14583SBruce Richardson }
106*c1d14583SBruce Richardson 
107*c1d14583SBruce Richardson static int
108*c1d14583SBruce Richardson ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
109*c1d14583SBruce Richardson {
110*c1d14583SBruce Richardson 	struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
111*c1d14583SBruce Richardson 	struct rte_eth_dev_data *dev_data = dev->data;
112*c1d14583SBruce Richardson 	struct iavf_hw *hw = &dcf_ad->real_hw.avf;
113*c1d14583SBruce Richardson 	uint16_t buf_size, max_pkt_len;
114*c1d14583SBruce Richardson 
115*c1d14583SBruce Richardson 	buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
116*c1d14583SBruce Richardson 	rxq->rx_hdr_len = 0;
117*c1d14583SBruce Richardson 	rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
118*c1d14583SBruce Richardson 	rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, ICE_RX_MAX_DATA_BUF_SIZE);
119*c1d14583SBruce Richardson 	max_pkt_len = RTE_MIN(ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
120*c1d14583SBruce Richardson 			      dev->data->mtu + ICE_ETH_OVERHEAD);
121*c1d14583SBruce Richardson 
122*c1d14583SBruce Richardson 	/* Check maximum packet length is set correctly.  */
123*c1d14583SBruce Richardson 	if (max_pkt_len <= RTE_ETHER_MIN_LEN ||
124*c1d14583SBruce Richardson 	    max_pkt_len > ICE_FRAME_SIZE_MAX) {
125*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "maximum packet length must be "
126*c1d14583SBruce Richardson 			    "larger than %u and smaller than %u",
127*c1d14583SBruce Richardson 			    (uint32_t)RTE_ETHER_MIN_LEN,
128*c1d14583SBruce Richardson 			    (uint32_t)ICE_FRAME_SIZE_MAX);
129*c1d14583SBruce Richardson 		return -EINVAL;
130*c1d14583SBruce Richardson 	}
131*c1d14583SBruce Richardson 
132*c1d14583SBruce Richardson 	rxq->max_pkt_len = max_pkt_len;
133*c1d14583SBruce Richardson 	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
134*c1d14583SBruce Richardson 	    (rxq->max_pkt_len + 2 * RTE_VLAN_HLEN) > buf_size) {
135*c1d14583SBruce Richardson 		dev_data->scattered_rx = 1;
136*c1d14583SBruce Richardson 	}
137*c1d14583SBruce Richardson 	rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
138*c1d14583SBruce Richardson 	IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
139*c1d14583SBruce Richardson 	IAVF_WRITE_FLUSH(hw);
140*c1d14583SBruce Richardson 
141*c1d14583SBruce Richardson 	return 0;
142*c1d14583SBruce Richardson }
143*c1d14583SBruce Richardson 
144*c1d14583SBruce Richardson static int
145*c1d14583SBruce Richardson ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
146*c1d14583SBruce Richardson {
147*c1d14583SBruce Richardson 	struct ice_rx_queue **rxq =
148*c1d14583SBruce Richardson 		(struct ice_rx_queue **)dev->data->rx_queues;
149*c1d14583SBruce Richardson 	int i, ret;
150*c1d14583SBruce Richardson 
151*c1d14583SBruce Richardson 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
152*c1d14583SBruce Richardson 		if (!rxq[i] || !rxq[i]->q_set)
153*c1d14583SBruce Richardson 			continue;
154*c1d14583SBruce Richardson 		ret = ice_dcf_init_rxq(dev, rxq[i]);
155*c1d14583SBruce Richardson 		if (ret)
156*c1d14583SBruce Richardson 			return ret;
157*c1d14583SBruce Richardson 	}
158*c1d14583SBruce Richardson 
159*c1d14583SBruce Richardson 	ice_set_rx_function(dev);
160*c1d14583SBruce Richardson 	ice_set_tx_function(dev);
161*c1d14583SBruce Richardson 
162*c1d14583SBruce Richardson 	return 0;
163*c1d14583SBruce Richardson }
164*c1d14583SBruce Richardson 
165*c1d14583SBruce Richardson #define IAVF_MISC_VEC_ID                RTE_INTR_VEC_ZERO_OFFSET
166*c1d14583SBruce Richardson #define IAVF_RX_VEC_START               RTE_INTR_VEC_RXTX_OFFSET
167*c1d14583SBruce Richardson 
168*c1d14583SBruce Richardson #define IAVF_ITR_INDEX_DEFAULT          0
169*c1d14583SBruce Richardson #define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
170*c1d14583SBruce Richardson #define IAVF_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
171*c1d14583SBruce Richardson 
172*c1d14583SBruce Richardson static inline uint16_t
173*c1d14583SBruce Richardson iavf_calc_itr_interval(int16_t interval)
174*c1d14583SBruce Richardson {
175*c1d14583SBruce Richardson 	if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
176*c1d14583SBruce Richardson 		interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
177*c1d14583SBruce Richardson 
178*c1d14583SBruce Richardson 	/* Convert to hardware count, as writing each 1 represents 2 us */
179*c1d14583SBruce Richardson 	return interval / 2;
180*c1d14583SBruce Richardson }
181*c1d14583SBruce Richardson 
182*c1d14583SBruce Richardson static int
183*c1d14583SBruce Richardson ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
184*c1d14583SBruce Richardson 				     struct rte_intr_handle *intr_handle)
185*c1d14583SBruce Richardson {
186*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
187*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &adapter->real_hw;
188*c1d14583SBruce Richardson 	uint16_t interval, i;
189*c1d14583SBruce Richardson 	int vec;
190*c1d14583SBruce Richardson 
191*c1d14583SBruce Richardson 	if (rte_intr_cap_multiple(intr_handle) &&
192*c1d14583SBruce Richardson 	    dev->data->dev_conf.intr_conf.rxq) {
193*c1d14583SBruce Richardson 		if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
194*c1d14583SBruce Richardson 			return -1;
195*c1d14583SBruce Richardson 	}
196*c1d14583SBruce Richardson 
197*c1d14583SBruce Richardson 	if (rte_intr_dp_is_en(intr_handle)) {
198*c1d14583SBruce Richardson 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
199*c1d14583SBruce Richardson 						   dev->data->nb_rx_queues)) {
200*c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
201*c1d14583SBruce Richardson 				    dev->data->nb_rx_queues);
202*c1d14583SBruce Richardson 			return -1;
203*c1d14583SBruce Richardson 		}
204*c1d14583SBruce Richardson 	}
205*c1d14583SBruce Richardson 
206*c1d14583SBruce Richardson 	if (!dev->data->dev_conf.intr_conf.rxq ||
207*c1d14583SBruce Richardson 	    !rte_intr_dp_is_en(intr_handle)) {
208*c1d14583SBruce Richardson 		/* Rx interrupt disabled, Map interrupt only for writeback */
209*c1d14583SBruce Richardson 		hw->nb_msix = 1;
210*c1d14583SBruce Richardson 		if (hw->vf_res->vf_cap_flags &
211*c1d14583SBruce Richardson 		    VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
212*c1d14583SBruce Richardson 			/* If WB_ON_ITR supports, enable it */
213*c1d14583SBruce Richardson 			hw->msix_base = IAVF_RX_VEC_START;
214*c1d14583SBruce Richardson 			/* Set the ITR for index zero, to 2us to make sure that
215*c1d14583SBruce Richardson 			 * we leave time for aggregation to occur, but don't
216*c1d14583SBruce Richardson 			 * increase latency dramatically.
217*c1d14583SBruce Richardson 			 */
218*c1d14583SBruce Richardson 			IAVF_WRITE_REG(&hw->avf,
219*c1d14583SBruce Richardson 				       IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
220*c1d14583SBruce Richardson 				       (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
221*c1d14583SBruce Richardson 				       IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
222*c1d14583SBruce Richardson 				       (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
223*c1d14583SBruce Richardson 		} else {
224*c1d14583SBruce Richardson 			/* If no WB_ON_ITR offload flags, need to set
225*c1d14583SBruce Richardson 			 * interrupt for descriptor write back.
226*c1d14583SBruce Richardson 			 */
227*c1d14583SBruce Richardson 			hw->msix_base = IAVF_MISC_VEC_ID;
228*c1d14583SBruce Richardson 
229*c1d14583SBruce Richardson 			/* set ITR to max */
230*c1d14583SBruce Richardson 			interval =
231*c1d14583SBruce Richardson 			iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
232*c1d14583SBruce Richardson 			IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
233*c1d14583SBruce Richardson 				       IAVF_VFINT_DYN_CTL01_INTENA_MASK |
234*c1d14583SBruce Richardson 				       (IAVF_ITR_INDEX_DEFAULT <<
235*c1d14583SBruce Richardson 					IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
236*c1d14583SBruce Richardson 				       (interval <<
237*c1d14583SBruce Richardson 					IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
238*c1d14583SBruce Richardson 		}
239*c1d14583SBruce Richardson 		IAVF_WRITE_FLUSH(&hw->avf);
240*c1d14583SBruce Richardson 		/* map all queues to the same interrupt */
241*c1d14583SBruce Richardson 		for (i = 0; i < dev->data->nb_rx_queues; i++)
242*c1d14583SBruce Richardson 			hw->rxq_map[hw->msix_base] |= 1 << i;
243*c1d14583SBruce Richardson 	} else {
244*c1d14583SBruce Richardson 		if (!rte_intr_allow_others(intr_handle)) {
245*c1d14583SBruce Richardson 			hw->nb_msix = 1;
246*c1d14583SBruce Richardson 			hw->msix_base = IAVF_MISC_VEC_ID;
247*c1d14583SBruce Richardson 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
248*c1d14583SBruce Richardson 				hw->rxq_map[hw->msix_base] |= 1 << i;
249*c1d14583SBruce Richardson 				rte_intr_vec_list_index_set(intr_handle,
250*c1d14583SBruce Richardson 							i, IAVF_MISC_VEC_ID);
251*c1d14583SBruce Richardson 			}
252*c1d14583SBruce Richardson 			PMD_DRV_LOG(DEBUG,
253*c1d14583SBruce Richardson 				    "vector %u are mapping to all Rx queues",
254*c1d14583SBruce Richardson 				    hw->msix_base);
255*c1d14583SBruce Richardson 		} else {
256*c1d14583SBruce Richardson 			/* If Rx interrupt is required, and we can use
257*c1d14583SBruce Richardson 			 * multi interrupts, then the vec is from 1
258*c1d14583SBruce Richardson 			 */
259*c1d14583SBruce Richardson 			hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
260*c1d14583SBruce Richardson 				      rte_intr_nb_efd_get(intr_handle));
261*c1d14583SBruce Richardson 			hw->msix_base = IAVF_MISC_VEC_ID;
262*c1d14583SBruce Richardson 			vec = IAVF_MISC_VEC_ID;
263*c1d14583SBruce Richardson 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
264*c1d14583SBruce Richardson 				hw->rxq_map[vec] |= 1 << i;
265*c1d14583SBruce Richardson 				rte_intr_vec_list_index_set(intr_handle,
266*c1d14583SBruce Richardson 								   i, vec++);
267*c1d14583SBruce Richardson 				if (vec >= hw->nb_msix)
268*c1d14583SBruce Richardson 					vec = IAVF_RX_VEC_START;
269*c1d14583SBruce Richardson 			}
270*c1d14583SBruce Richardson 			PMD_DRV_LOG(DEBUG,
271*c1d14583SBruce Richardson 				    "%u vectors are mapping to %u Rx queues",
272*c1d14583SBruce Richardson 				    hw->nb_msix, dev->data->nb_rx_queues);
273*c1d14583SBruce Richardson 		}
274*c1d14583SBruce Richardson 	}
275*c1d14583SBruce Richardson 
276*c1d14583SBruce Richardson 	if (ice_dcf_config_irq_map(hw)) {
277*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "config interrupt mapping failed");
278*c1d14583SBruce Richardson 		return -1;
279*c1d14583SBruce Richardson 	}
280*c1d14583SBruce Richardson 	return 0;
281*c1d14583SBruce Richardson }
282*c1d14583SBruce Richardson 
283*c1d14583SBruce Richardson static int
284*c1d14583SBruce Richardson alloc_rxq_mbufs(struct ice_rx_queue *rxq)
285*c1d14583SBruce Richardson {
286*c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rxd;
287*c1d14583SBruce Richardson 	struct rte_mbuf *mbuf = NULL;
288*c1d14583SBruce Richardson 	uint64_t dma_addr;
289*c1d14583SBruce Richardson 	uint16_t i;
290*c1d14583SBruce Richardson 
291*c1d14583SBruce Richardson 	for (i = 0; i < rxq->nb_rx_desc; i++) {
292*c1d14583SBruce Richardson 		mbuf = rte_mbuf_raw_alloc(rxq->mp);
293*c1d14583SBruce Richardson 		if (unlikely(!mbuf)) {
294*c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
295*c1d14583SBruce Richardson 			return -ENOMEM;
296*c1d14583SBruce Richardson 		}
297*c1d14583SBruce Richardson 
298*c1d14583SBruce Richardson 		rte_mbuf_refcnt_set(mbuf, 1);
299*c1d14583SBruce Richardson 		mbuf->next = NULL;
300*c1d14583SBruce Richardson 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
301*c1d14583SBruce Richardson 		mbuf->nb_segs = 1;
302*c1d14583SBruce Richardson 		mbuf->port = rxq->port_id;
303*c1d14583SBruce Richardson 
304*c1d14583SBruce Richardson 		dma_addr =
305*c1d14583SBruce Richardson 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
306*c1d14583SBruce Richardson 
307*c1d14583SBruce Richardson 		rxd = &rxq->rx_ring[i];
308*c1d14583SBruce Richardson 		rxd->read.pkt_addr = dma_addr;
309*c1d14583SBruce Richardson 		rxd->read.hdr_addr = 0;
310*c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
311*c1d14583SBruce Richardson 		rxd->read.rsvd1 = 0;
312*c1d14583SBruce Richardson 		rxd->read.rsvd2 = 0;
313*c1d14583SBruce Richardson #endif
314*c1d14583SBruce Richardson 
315*c1d14583SBruce Richardson 		rxq->sw_ring[i].mbuf = (void *)mbuf;
316*c1d14583SBruce Richardson 	}
317*c1d14583SBruce Richardson 
318*c1d14583SBruce Richardson 	return 0;
319*c1d14583SBruce Richardson }
320*c1d14583SBruce Richardson 
321*c1d14583SBruce Richardson static int
322*c1d14583SBruce Richardson ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
323*c1d14583SBruce Richardson {
324*c1d14583SBruce Richardson 	struct ice_dcf_adapter *ad = dev->data->dev_private;
325*c1d14583SBruce Richardson 	struct iavf_hw *hw = &ad->real_hw.avf;
326*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
327*c1d14583SBruce Richardson 	int err = 0;
328*c1d14583SBruce Richardson 
329*c1d14583SBruce Richardson 	if (rx_queue_id >= dev->data->nb_rx_queues)
330*c1d14583SBruce Richardson 		return -EINVAL;
331*c1d14583SBruce Richardson 
332*c1d14583SBruce Richardson 	rxq = dev->data->rx_queues[rx_queue_id];
333*c1d14583SBruce Richardson 
334*c1d14583SBruce Richardson 	err = alloc_rxq_mbufs(rxq);
335*c1d14583SBruce Richardson 	if (err) {
336*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
337*c1d14583SBruce Richardson 		return err;
338*c1d14583SBruce Richardson 	}
339*c1d14583SBruce Richardson 
340*c1d14583SBruce Richardson 	rte_wmb();
341*c1d14583SBruce Richardson 
342*c1d14583SBruce Richardson 	/* Init the RX tail register. */
343*c1d14583SBruce Richardson 	IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
344*c1d14583SBruce Richardson 	IAVF_WRITE_FLUSH(hw);
345*c1d14583SBruce Richardson 
346*c1d14583SBruce Richardson 	/* Ready to switch the queue on */
347*c1d14583SBruce Richardson 	err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
348*c1d14583SBruce Richardson 	if (err) {
349*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
350*c1d14583SBruce Richardson 			    rx_queue_id);
351*c1d14583SBruce Richardson 		return err;
352*c1d14583SBruce Richardson 	}
353*c1d14583SBruce Richardson 
354*c1d14583SBruce Richardson 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
355*c1d14583SBruce Richardson 
356*c1d14583SBruce Richardson 	return 0;
357*c1d14583SBruce Richardson }
358*c1d14583SBruce Richardson 
359*c1d14583SBruce Richardson static inline void
360*c1d14583SBruce Richardson reset_rx_queue(struct ice_rx_queue *rxq)
361*c1d14583SBruce Richardson {
362*c1d14583SBruce Richardson 	uint16_t len;
363*c1d14583SBruce Richardson 	uint32_t i;
364*c1d14583SBruce Richardson 
365*c1d14583SBruce Richardson 	if (!rxq)
366*c1d14583SBruce Richardson 		return;
367*c1d14583SBruce Richardson 
368*c1d14583SBruce Richardson 	len = rxq->nb_rx_desc + ICE_RX_MAX_BURST;
369*c1d14583SBruce Richardson 
370*c1d14583SBruce Richardson 	for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
371*c1d14583SBruce Richardson 		((volatile char *)rxq->rx_ring)[i] = 0;
372*c1d14583SBruce Richardson 
373*c1d14583SBruce Richardson 	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
374*c1d14583SBruce Richardson 
375*c1d14583SBruce Richardson 	for (i = 0; i < ICE_RX_MAX_BURST; i++)
376*c1d14583SBruce Richardson 		rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
377*c1d14583SBruce Richardson 
378*c1d14583SBruce Richardson 	/* for rx bulk */
379*c1d14583SBruce Richardson 	rxq->rx_nb_avail = 0;
380*c1d14583SBruce Richardson 	rxq->rx_next_avail = 0;
381*c1d14583SBruce Richardson 	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
382*c1d14583SBruce Richardson 
383*c1d14583SBruce Richardson 	rxq->rx_tail = 0;
384*c1d14583SBruce Richardson 	rxq->nb_rx_hold = 0;
385*c1d14583SBruce Richardson 	rxq->pkt_first_seg = NULL;
386*c1d14583SBruce Richardson 	rxq->pkt_last_seg = NULL;
387*c1d14583SBruce Richardson }
388*c1d14583SBruce Richardson 
389*c1d14583SBruce Richardson static inline void
390*c1d14583SBruce Richardson reset_tx_queue(struct ice_tx_queue *txq)
391*c1d14583SBruce Richardson {
392*c1d14583SBruce Richardson 	struct ice_tx_entry *txe;
393*c1d14583SBruce Richardson 	uint32_t i, size;
394*c1d14583SBruce Richardson 	uint16_t prev;
395*c1d14583SBruce Richardson 
396*c1d14583SBruce Richardson 	if (!txq) {
397*c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
398*c1d14583SBruce Richardson 		return;
399*c1d14583SBruce Richardson 	}
400*c1d14583SBruce Richardson 
401*c1d14583SBruce Richardson 	txe = txq->sw_ring;
402*c1d14583SBruce Richardson 	size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
403*c1d14583SBruce Richardson 	for (i = 0; i < size; i++)
404*c1d14583SBruce Richardson 		((volatile char *)txq->tx_ring)[i] = 0;
405*c1d14583SBruce Richardson 
406*c1d14583SBruce Richardson 	prev = (uint16_t)(txq->nb_tx_desc - 1);
407*c1d14583SBruce Richardson 	for (i = 0; i < txq->nb_tx_desc; i++) {
408*c1d14583SBruce Richardson 		txq->tx_ring[i].cmd_type_offset_bsz =
409*c1d14583SBruce Richardson 			rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
410*c1d14583SBruce Richardson 		txe[i].mbuf =  NULL;
411*c1d14583SBruce Richardson 		txe[i].last_id = i;
412*c1d14583SBruce Richardson 		txe[prev].next_id = i;
413*c1d14583SBruce Richardson 		prev = i;
414*c1d14583SBruce Richardson 	}
415*c1d14583SBruce Richardson 
416*c1d14583SBruce Richardson 	txq->tx_tail = 0;
417*c1d14583SBruce Richardson 	txq->nb_tx_used = 0;
418*c1d14583SBruce Richardson 
419*c1d14583SBruce Richardson 	txq->last_desc_cleaned = txq->nb_tx_desc - 1;
420*c1d14583SBruce Richardson 	txq->nb_tx_free = txq->nb_tx_desc - 1;
421*c1d14583SBruce Richardson 
422*c1d14583SBruce Richardson 	txq->tx_next_dd = txq->tx_rs_thresh - 1;
423*c1d14583SBruce Richardson 	txq->tx_next_rs = txq->tx_rs_thresh - 1;
424*c1d14583SBruce Richardson }
425*c1d14583SBruce Richardson 
426*c1d14583SBruce Richardson static int
427*c1d14583SBruce Richardson ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
428*c1d14583SBruce Richardson {
429*c1d14583SBruce Richardson 	struct ice_dcf_adapter *ad = dev->data->dev_private;
430*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &ad->real_hw;
431*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
432*c1d14583SBruce Richardson 	int err;
433*c1d14583SBruce Richardson 
434*c1d14583SBruce Richardson 	if (rx_queue_id >= dev->data->nb_rx_queues)
435*c1d14583SBruce Richardson 		return -EINVAL;
436*c1d14583SBruce Richardson 
437*c1d14583SBruce Richardson 	err = ice_dcf_switch_queue(hw, rx_queue_id, true, false);
438*c1d14583SBruce Richardson 	if (err) {
439*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
440*c1d14583SBruce Richardson 			    rx_queue_id);
441*c1d14583SBruce Richardson 		return err;
442*c1d14583SBruce Richardson 	}
443*c1d14583SBruce Richardson 
444*c1d14583SBruce Richardson 	rxq = dev->data->rx_queues[rx_queue_id];
445*c1d14583SBruce Richardson 	rxq->rx_rel_mbufs(rxq);
446*c1d14583SBruce Richardson 	reset_rx_queue(rxq);
447*c1d14583SBruce Richardson 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
448*c1d14583SBruce Richardson 
449*c1d14583SBruce Richardson 	return 0;
450*c1d14583SBruce Richardson }
451*c1d14583SBruce Richardson 
452*c1d14583SBruce Richardson static int
453*c1d14583SBruce Richardson ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
454*c1d14583SBruce Richardson {
455*c1d14583SBruce Richardson 	struct ice_dcf_adapter *ad = dev->data->dev_private;
456*c1d14583SBruce Richardson 	struct iavf_hw *hw = &ad->real_hw.avf;
457*c1d14583SBruce Richardson 	struct ice_tx_queue *txq;
458*c1d14583SBruce Richardson 	int err = 0;
459*c1d14583SBruce Richardson 
460*c1d14583SBruce Richardson 	if (tx_queue_id >= dev->data->nb_tx_queues)
461*c1d14583SBruce Richardson 		return -EINVAL;
462*c1d14583SBruce Richardson 
463*c1d14583SBruce Richardson 	txq = dev->data->tx_queues[tx_queue_id];
464*c1d14583SBruce Richardson 
465*c1d14583SBruce Richardson 	/* Init the RX tail register. */
466*c1d14583SBruce Richardson 	txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);
467*c1d14583SBruce Richardson 	IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
468*c1d14583SBruce Richardson 	IAVF_WRITE_FLUSH(hw);
469*c1d14583SBruce Richardson 
470*c1d14583SBruce Richardson 	/* Ready to switch the queue on */
471*c1d14583SBruce Richardson 	err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
472*c1d14583SBruce Richardson 
473*c1d14583SBruce Richardson 	if (err) {
474*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
475*c1d14583SBruce Richardson 			    tx_queue_id);
476*c1d14583SBruce Richardson 		return err;
477*c1d14583SBruce Richardson 	}
478*c1d14583SBruce Richardson 
479*c1d14583SBruce Richardson 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
480*c1d14583SBruce Richardson 
481*c1d14583SBruce Richardson 	return 0;
482*c1d14583SBruce Richardson }
483*c1d14583SBruce Richardson 
484*c1d14583SBruce Richardson static int
485*c1d14583SBruce Richardson ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
486*c1d14583SBruce Richardson {
487*c1d14583SBruce Richardson 	struct ice_dcf_adapter *ad = dev->data->dev_private;
488*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &ad->real_hw;
489*c1d14583SBruce Richardson 	struct ice_tx_queue *txq;
490*c1d14583SBruce Richardson 	int err;
491*c1d14583SBruce Richardson 
492*c1d14583SBruce Richardson 	if (tx_queue_id >= dev->data->nb_tx_queues)
493*c1d14583SBruce Richardson 		return -EINVAL;
494*c1d14583SBruce Richardson 
495*c1d14583SBruce Richardson 	err = ice_dcf_switch_queue(hw, tx_queue_id, false, false);
496*c1d14583SBruce Richardson 	if (err) {
497*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
498*c1d14583SBruce Richardson 			    tx_queue_id);
499*c1d14583SBruce Richardson 		return err;
500*c1d14583SBruce Richardson 	}
501*c1d14583SBruce Richardson 
502*c1d14583SBruce Richardson 	txq = dev->data->tx_queues[tx_queue_id];
503*c1d14583SBruce Richardson 	txq->tx_rel_mbufs(txq);
504*c1d14583SBruce Richardson 	reset_tx_queue(txq);
505*c1d14583SBruce Richardson 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
506*c1d14583SBruce Richardson 
507*c1d14583SBruce Richardson 	return 0;
508*c1d14583SBruce Richardson }
509*c1d14583SBruce Richardson 
510*c1d14583SBruce Richardson static int
511*c1d14583SBruce Richardson ice_dcf_start_queues(struct rte_eth_dev *dev)
512*c1d14583SBruce Richardson {
513*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
514*c1d14583SBruce Richardson 	struct ice_tx_queue *txq;
515*c1d14583SBruce Richardson 	int nb_rxq = 0;
516*c1d14583SBruce Richardson 	int nb_txq, i;
517*c1d14583SBruce Richardson 
518*c1d14583SBruce Richardson 	for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
519*c1d14583SBruce Richardson 		txq = dev->data->tx_queues[nb_txq];
520*c1d14583SBruce Richardson 		if (txq->tx_deferred_start)
521*c1d14583SBruce Richardson 			continue;
522*c1d14583SBruce Richardson 		if (ice_dcf_tx_queue_start(dev, nb_txq) != 0) {
523*c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_txq);
524*c1d14583SBruce Richardson 			goto tx_err;
525*c1d14583SBruce Richardson 		}
526*c1d14583SBruce Richardson 	}
527*c1d14583SBruce Richardson 
528*c1d14583SBruce Richardson 	for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
529*c1d14583SBruce Richardson 		rxq = dev->data->rx_queues[nb_rxq];
530*c1d14583SBruce Richardson 		if (rxq->rx_deferred_start)
531*c1d14583SBruce Richardson 			continue;
532*c1d14583SBruce Richardson 		if (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) {
533*c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_rxq);
534*c1d14583SBruce Richardson 			goto rx_err;
535*c1d14583SBruce Richardson 		}
536*c1d14583SBruce Richardson 	}
537*c1d14583SBruce Richardson 
538*c1d14583SBruce Richardson 	return 0;
539*c1d14583SBruce Richardson 
540*c1d14583SBruce Richardson 	/* stop the started queues if failed to start all queues */
541*c1d14583SBruce Richardson rx_err:
542*c1d14583SBruce Richardson 	for (i = 0; i < nb_rxq; i++)
543*c1d14583SBruce Richardson 		ice_dcf_rx_queue_stop(dev, i);
544*c1d14583SBruce Richardson tx_err:
545*c1d14583SBruce Richardson 	for (i = 0; i < nb_txq; i++)
546*c1d14583SBruce Richardson 		ice_dcf_tx_queue_stop(dev, i);
547*c1d14583SBruce Richardson 
548*c1d14583SBruce Richardson 	return -1;
549*c1d14583SBruce Richardson }
550*c1d14583SBruce Richardson 
551*c1d14583SBruce Richardson static int
552*c1d14583SBruce Richardson ice_dcf_dev_start(struct rte_eth_dev *dev)
553*c1d14583SBruce Richardson {
554*c1d14583SBruce Richardson 	struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
555*c1d14583SBruce Richardson 	struct rte_intr_handle *intr_handle = dev->intr_handle;
556*c1d14583SBruce Richardson 	struct ice_adapter *ad = &dcf_ad->parent;
557*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &dcf_ad->real_hw;
558*c1d14583SBruce Richardson 	int ret;
559*c1d14583SBruce Richardson 
560*c1d14583SBruce Richardson 	if (hw->resetting) {
561*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR,
562*c1d14583SBruce Richardson 			    "The DCF has been reset by PF, please reinit first");
563*c1d14583SBruce Richardson 		return -EIO;
564*c1d14583SBruce Richardson 	}
565*c1d14583SBruce Richardson 
566*c1d14583SBruce Richardson 	if (hw->tm_conf.root && !hw->tm_conf.committed) {
567*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR,
568*c1d14583SBruce Richardson 			"please call hierarchy_commit() before starting the port");
569*c1d14583SBruce Richardson 		return -EIO;
570*c1d14583SBruce Richardson 	}
571*c1d14583SBruce Richardson 
572*c1d14583SBruce Richardson 	ad->pf.adapter_stopped = 0;
573*c1d14583SBruce Richardson 
574*c1d14583SBruce Richardson 	hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
575*c1d14583SBruce Richardson 				      dev->data->nb_tx_queues);
576*c1d14583SBruce Richardson 
577*c1d14583SBruce Richardson 	ret = ice_dcf_init_rx_queues(dev);
578*c1d14583SBruce Richardson 	if (ret) {
579*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Fail to init queues");
580*c1d14583SBruce Richardson 		return ret;
581*c1d14583SBruce Richardson 	}
582*c1d14583SBruce Richardson 
583*c1d14583SBruce Richardson 	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
584*c1d14583SBruce Richardson 		ret = ice_dcf_init_rss(hw);
585*c1d14583SBruce Richardson 		if (ret) {
586*c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "Failed to configure RSS");
587*c1d14583SBruce Richardson 			return ret;
588*c1d14583SBruce Richardson 		}
589*c1d14583SBruce Richardson 	}
590*c1d14583SBruce Richardson 
591*c1d14583SBruce Richardson 	ret = ice_dcf_configure_queues(hw);
592*c1d14583SBruce Richardson 	if (ret) {
593*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Fail to config queues");
594*c1d14583SBruce Richardson 		return ret;
595*c1d14583SBruce Richardson 	}
596*c1d14583SBruce Richardson 
597*c1d14583SBruce Richardson 	ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
598*c1d14583SBruce Richardson 	if (ret) {
599*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
600*c1d14583SBruce Richardson 		return ret;
601*c1d14583SBruce Richardson 	}
602*c1d14583SBruce Richardson 
603*c1d14583SBruce Richardson 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
604*c1d14583SBruce Richardson 		rte_intr_disable(intr_handle);
605*c1d14583SBruce Richardson 		rte_intr_enable(intr_handle);
606*c1d14583SBruce Richardson 	}
607*c1d14583SBruce Richardson 
608*c1d14583SBruce Richardson 	ret = ice_dcf_start_queues(dev);
609*c1d14583SBruce Richardson 	if (ret) {
610*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to enable queues");
611*c1d14583SBruce Richardson 		return ret;
612*c1d14583SBruce Richardson 	}
613*c1d14583SBruce Richardson 
614*c1d14583SBruce Richardson 	ret = ice_dcf_add_del_all_mac_addr(hw, hw->eth_dev->data->mac_addrs,
615*c1d14583SBruce Richardson 					   true, VIRTCHNL_ETHER_ADDR_PRIMARY);
616*c1d14583SBruce Richardson 	if (ret) {
617*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to add mac addr");
618*c1d14583SBruce Richardson 		return ret;
619*c1d14583SBruce Richardson 	}
620*c1d14583SBruce Richardson 
621*c1d14583SBruce Richardson 	if (dcf_ad->mc_addrs_num) {
622*c1d14583SBruce Richardson 		/* flush previous addresses */
623*c1d14583SBruce Richardson 		ret = dcf_add_del_mc_addr_list(hw, dcf_ad->mc_addrs,
624*c1d14583SBruce Richardson 						dcf_ad->mc_addrs_num, true);
625*c1d14583SBruce Richardson 		if (ret)
626*c1d14583SBruce Richardson 			return ret;
627*c1d14583SBruce Richardson 	}
628*c1d14583SBruce Richardson 
629*c1d14583SBruce Richardson 
630*c1d14583SBruce Richardson 	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
631*c1d14583SBruce Richardson 
632*c1d14583SBruce Richardson 	return 0;
633*c1d14583SBruce Richardson }
634*c1d14583SBruce Richardson 
635*c1d14583SBruce Richardson static void
636*c1d14583SBruce Richardson ice_dcf_stop_queues(struct rte_eth_dev *dev)
637*c1d14583SBruce Richardson {
638*c1d14583SBruce Richardson 	struct ice_dcf_adapter *ad = dev->data->dev_private;
639*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &ad->real_hw;
640*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
641*c1d14583SBruce Richardson 	struct ice_tx_queue *txq;
642*c1d14583SBruce Richardson 	int ret, i;
643*c1d14583SBruce Richardson 
644*c1d14583SBruce Richardson 	/* Stop All queues */
645*c1d14583SBruce Richardson 	ret = ice_dcf_disable_queues(hw);
646*c1d14583SBruce Richardson 	if (ret)
647*c1d14583SBruce Richardson 		PMD_DRV_LOG(WARNING, "Fail to stop queues");
648*c1d14583SBruce Richardson 
649*c1d14583SBruce Richardson 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
650*c1d14583SBruce Richardson 		txq = dev->data->tx_queues[i];
651*c1d14583SBruce Richardson 		if (!txq)
652*c1d14583SBruce Richardson 			continue;
653*c1d14583SBruce Richardson 		txq->tx_rel_mbufs(txq);
654*c1d14583SBruce Richardson 		reset_tx_queue(txq);
655*c1d14583SBruce Richardson 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
656*c1d14583SBruce Richardson 	}
657*c1d14583SBruce Richardson 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
658*c1d14583SBruce Richardson 		rxq = dev->data->rx_queues[i];
659*c1d14583SBruce Richardson 		if (!rxq)
660*c1d14583SBruce Richardson 			continue;
661*c1d14583SBruce Richardson 		rxq->rx_rel_mbufs(rxq);
662*c1d14583SBruce Richardson 		reset_rx_queue(rxq);
663*c1d14583SBruce Richardson 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
664*c1d14583SBruce Richardson 	}
665*c1d14583SBruce Richardson }
666*c1d14583SBruce Richardson 
667*c1d14583SBruce Richardson static int
668*c1d14583SBruce Richardson ice_dcf_dev_stop(struct rte_eth_dev *dev)
669*c1d14583SBruce Richardson {
670*c1d14583SBruce Richardson 	struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
671*c1d14583SBruce Richardson 	struct rte_intr_handle *intr_handle = dev->intr_handle;
672*c1d14583SBruce Richardson 	struct ice_adapter *ad = &dcf_ad->parent;
673*c1d14583SBruce Richardson 
674*c1d14583SBruce Richardson 	if (ad->pf.adapter_stopped == 1) {
675*c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Port is already stopped");
676*c1d14583SBruce Richardson 		return 0;
677*c1d14583SBruce Richardson 	}
678*c1d14583SBruce Richardson 
679*c1d14583SBruce Richardson 	/* Stop the VF representors for this device */
680*c1d14583SBruce Richardson 	ice_dcf_vf_repr_stop_all(dcf_ad);
681*c1d14583SBruce Richardson 
682*c1d14583SBruce Richardson 	ice_dcf_stop_queues(dev);
683*c1d14583SBruce Richardson 
684*c1d14583SBruce Richardson 	rte_intr_efd_disable(intr_handle);
685*c1d14583SBruce Richardson 	rte_intr_vec_list_free(intr_handle);
686*c1d14583SBruce Richardson 
687*c1d14583SBruce Richardson 	ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw,
688*c1d14583SBruce Richardson 				     dcf_ad->real_hw.eth_dev->data->mac_addrs,
689*c1d14583SBruce Richardson 				     false, VIRTCHNL_ETHER_ADDR_PRIMARY);
690*c1d14583SBruce Richardson 
691*c1d14583SBruce Richardson 	if (dcf_ad->mc_addrs_num)
692*c1d14583SBruce Richardson 		/* flush previous addresses */
693*c1d14583SBruce Richardson 		(void)dcf_add_del_mc_addr_list(&dcf_ad->real_hw,
694*c1d14583SBruce Richardson 										dcf_ad->mc_addrs,
695*c1d14583SBruce Richardson 							dcf_ad->mc_addrs_num, false);
696*c1d14583SBruce Richardson 
697*c1d14583SBruce Richardson 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
698*c1d14583SBruce Richardson 	ad->pf.adapter_stopped = 1;
699*c1d14583SBruce Richardson 
700*c1d14583SBruce Richardson 	return 0;
701*c1d14583SBruce Richardson }
702*c1d14583SBruce Richardson 
703*c1d14583SBruce Richardson static int
704*c1d14583SBruce Richardson ice_dcf_dev_configure(struct rte_eth_dev *dev)
705*c1d14583SBruce Richardson {
706*c1d14583SBruce Richardson 	struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
707*c1d14583SBruce Richardson 	struct ice_adapter *ad = &dcf_ad->parent;
708*c1d14583SBruce Richardson 
709*c1d14583SBruce Richardson 	ad->rx_bulk_alloc_allowed = true;
710*c1d14583SBruce Richardson 	ad->tx_simple_allowed = true;
711*c1d14583SBruce Richardson 
712*c1d14583SBruce Richardson 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
713*c1d14583SBruce Richardson 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
714*c1d14583SBruce Richardson 
715*c1d14583SBruce Richardson 	return 0;
716*c1d14583SBruce Richardson }
717*c1d14583SBruce Richardson 
718*c1d14583SBruce Richardson static int
719*c1d14583SBruce Richardson ice_dcf_dev_info_get(struct rte_eth_dev *dev,
720*c1d14583SBruce Richardson 		     struct rte_eth_dev_info *dev_info)
721*c1d14583SBruce Richardson {
722*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
723*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &adapter->real_hw;
724*c1d14583SBruce Richardson 
725*c1d14583SBruce Richardson 	dev_info->max_mac_addrs = DCF_NUM_MACADDR_MAX;
726*c1d14583SBruce Richardson 	dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
727*c1d14583SBruce Richardson 	dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
728*c1d14583SBruce Richardson 	dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
729*c1d14583SBruce Richardson 	dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
730*c1d14583SBruce Richardson 	dev_info->hash_key_size = hw->vf_res->rss_key_size;
731*c1d14583SBruce Richardson 	dev_info->reta_size = hw->vf_res->rss_lut_size;
732*c1d14583SBruce Richardson 	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
733*c1d14583SBruce Richardson 	dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
734*c1d14583SBruce Richardson 	dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
735*c1d14583SBruce Richardson 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
736*c1d14583SBruce Richardson 
737*c1d14583SBruce Richardson 	dev_info->rx_offload_capa =
738*c1d14583SBruce Richardson 		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
739*c1d14583SBruce Richardson 		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
740*c1d14583SBruce Richardson 		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
741*c1d14583SBruce Richardson 		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
742*c1d14583SBruce Richardson 		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
743*c1d14583SBruce Richardson 		RTE_ETH_RX_OFFLOAD_SCATTER |
744*c1d14583SBruce Richardson 		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
745*c1d14583SBruce Richardson 		RTE_ETH_RX_OFFLOAD_RSS_HASH;
746*c1d14583SBruce Richardson 	dev_info->tx_offload_capa =
747*c1d14583SBruce Richardson 		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
748*c1d14583SBruce Richardson 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
749*c1d14583SBruce Richardson 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
750*c1d14583SBruce Richardson 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
751*c1d14583SBruce Richardson 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
752*c1d14583SBruce Richardson 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
753*c1d14583SBruce Richardson 		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
754*c1d14583SBruce Richardson 		RTE_ETH_TX_OFFLOAD_TCP_TSO |
755*c1d14583SBruce Richardson 		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
756*c1d14583SBruce Richardson 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
757*c1d14583SBruce Richardson 		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
758*c1d14583SBruce Richardson 		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
759*c1d14583SBruce Richardson 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
760*c1d14583SBruce Richardson 
761*c1d14583SBruce Richardson 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
762*c1d14583SBruce Richardson 		.rx_thresh = {
763*c1d14583SBruce Richardson 			.pthresh = ICE_DEFAULT_RX_PTHRESH,
764*c1d14583SBruce Richardson 			.hthresh = ICE_DEFAULT_RX_HTHRESH,
765*c1d14583SBruce Richardson 			.wthresh = ICE_DEFAULT_RX_WTHRESH,
766*c1d14583SBruce Richardson 		},
767*c1d14583SBruce Richardson 		.rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
768*c1d14583SBruce Richardson 		.rx_drop_en = 0,
769*c1d14583SBruce Richardson 		.offloads = 0,
770*c1d14583SBruce Richardson 	};
771*c1d14583SBruce Richardson 
772*c1d14583SBruce Richardson 	dev_info->default_txconf = (struct rte_eth_txconf) {
773*c1d14583SBruce Richardson 		.tx_thresh = {
774*c1d14583SBruce Richardson 			.pthresh = ICE_DEFAULT_TX_PTHRESH,
775*c1d14583SBruce Richardson 			.hthresh = ICE_DEFAULT_TX_HTHRESH,
776*c1d14583SBruce Richardson 			.wthresh = ICE_DEFAULT_TX_WTHRESH,
777*c1d14583SBruce Richardson 		},
778*c1d14583SBruce Richardson 		.tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
779*c1d14583SBruce Richardson 		.tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
780*c1d14583SBruce Richardson 		.offloads = 0,
781*c1d14583SBruce Richardson 	};
782*c1d14583SBruce Richardson 
783*c1d14583SBruce Richardson 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
784*c1d14583SBruce Richardson 		.nb_max = ICE_MAX_RING_DESC,
785*c1d14583SBruce Richardson 		.nb_min = ICE_MIN_RING_DESC,
786*c1d14583SBruce Richardson 		.nb_align = ICE_ALIGN_RING_DESC,
787*c1d14583SBruce Richardson 	};
788*c1d14583SBruce Richardson 
789*c1d14583SBruce Richardson 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
790*c1d14583SBruce Richardson 		.nb_max = ICE_MAX_RING_DESC,
791*c1d14583SBruce Richardson 		.nb_min = ICE_MIN_RING_DESC,
792*c1d14583SBruce Richardson 		.nb_align = ICE_ALIGN_RING_DESC,
793*c1d14583SBruce Richardson 	};
794*c1d14583SBruce Richardson 
795*c1d14583SBruce Richardson 	return 0;
796*c1d14583SBruce Richardson }
797*c1d14583SBruce Richardson 
798*c1d14583SBruce Richardson static int
799*c1d14583SBruce Richardson dcf_config_promisc(struct ice_dcf_adapter *adapter,
800*c1d14583SBruce Richardson 		   bool enable_unicast,
801*c1d14583SBruce Richardson 		   bool enable_multicast)
802*c1d14583SBruce Richardson {
803*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &adapter->real_hw;
804*c1d14583SBruce Richardson 	struct virtchnl_promisc_info promisc;
805*c1d14583SBruce Richardson 	struct dcf_virtchnl_cmd args;
806*c1d14583SBruce Richardson 	int err;
807*c1d14583SBruce Richardson 
808*c1d14583SBruce Richardson 	promisc.flags = 0;
809*c1d14583SBruce Richardson 	promisc.vsi_id = hw->vsi_res->vsi_id;
810*c1d14583SBruce Richardson 
811*c1d14583SBruce Richardson 	if (enable_unicast)
812*c1d14583SBruce Richardson 		promisc.flags |= FLAG_VF_UNICAST_PROMISC;
813*c1d14583SBruce Richardson 
814*c1d14583SBruce Richardson 	if (enable_multicast)
815*c1d14583SBruce Richardson 		promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
816*c1d14583SBruce Richardson 
817*c1d14583SBruce Richardson 	memset(&args, 0, sizeof(args));
818*c1d14583SBruce Richardson 	args.v_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
819*c1d14583SBruce Richardson 	args.req_msg = (uint8_t *)&promisc;
820*c1d14583SBruce Richardson 	args.req_msglen = sizeof(promisc);
821*c1d14583SBruce Richardson 
822*c1d14583SBruce Richardson 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
823*c1d14583SBruce Richardson 	if (err) {
824*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR,
825*c1d14583SBruce Richardson 			    "fail to execute command VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE");
826*c1d14583SBruce Richardson 		return err;
827*c1d14583SBruce Richardson 	}
828*c1d14583SBruce Richardson 
829*c1d14583SBruce Richardson 	adapter->promisc_unicast_enabled = enable_unicast;
830*c1d14583SBruce Richardson 	adapter->promisc_multicast_enabled = enable_multicast;
831*c1d14583SBruce Richardson 	return 0;
832*c1d14583SBruce Richardson }
833*c1d14583SBruce Richardson 
834*c1d14583SBruce Richardson static int
835*c1d14583SBruce Richardson ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
836*c1d14583SBruce Richardson {
837*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
838*c1d14583SBruce Richardson 
839*c1d14583SBruce Richardson 	if (adapter->promisc_unicast_enabled) {
840*c1d14583SBruce Richardson 		PMD_DRV_LOG(INFO, "promiscuous has been enabled");
841*c1d14583SBruce Richardson 		return 0;
842*c1d14583SBruce Richardson 	}
843*c1d14583SBruce Richardson 
844*c1d14583SBruce Richardson 	return dcf_config_promisc(adapter, true,
845*c1d14583SBruce Richardson 				  adapter->promisc_multicast_enabled);
846*c1d14583SBruce Richardson }
847*c1d14583SBruce Richardson 
848*c1d14583SBruce Richardson static int
849*c1d14583SBruce Richardson ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
850*c1d14583SBruce Richardson {
851*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
852*c1d14583SBruce Richardson 
853*c1d14583SBruce Richardson 	if (!adapter->promisc_unicast_enabled) {
854*c1d14583SBruce Richardson 		PMD_DRV_LOG(INFO, "promiscuous has been disabled");
855*c1d14583SBruce Richardson 		return 0;
856*c1d14583SBruce Richardson 	}
857*c1d14583SBruce Richardson 
858*c1d14583SBruce Richardson 	return dcf_config_promisc(adapter, false,
859*c1d14583SBruce Richardson 				  adapter->promisc_multicast_enabled);
860*c1d14583SBruce Richardson }
861*c1d14583SBruce Richardson 
862*c1d14583SBruce Richardson static int
863*c1d14583SBruce Richardson ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
864*c1d14583SBruce Richardson {
865*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
866*c1d14583SBruce Richardson 
867*c1d14583SBruce Richardson 	if (adapter->promisc_multicast_enabled) {
868*c1d14583SBruce Richardson 		PMD_DRV_LOG(INFO, "allmulticast has been enabled");
869*c1d14583SBruce Richardson 		return 0;
870*c1d14583SBruce Richardson 	}
871*c1d14583SBruce Richardson 
872*c1d14583SBruce Richardson 	return dcf_config_promisc(adapter, adapter->promisc_unicast_enabled,
873*c1d14583SBruce Richardson 				  true);
874*c1d14583SBruce Richardson }
875*c1d14583SBruce Richardson 
876*c1d14583SBruce Richardson static int
877*c1d14583SBruce Richardson ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
878*c1d14583SBruce Richardson {
879*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
880*c1d14583SBruce Richardson 
881*c1d14583SBruce Richardson 	if (!adapter->promisc_multicast_enabled) {
882*c1d14583SBruce Richardson 		PMD_DRV_LOG(INFO, "allmulticast has been disabled");
883*c1d14583SBruce Richardson 		return 0;
884*c1d14583SBruce Richardson 	}
885*c1d14583SBruce Richardson 
886*c1d14583SBruce Richardson 	return dcf_config_promisc(adapter, adapter->promisc_unicast_enabled,
887*c1d14583SBruce Richardson 				  false);
888*c1d14583SBruce Richardson }
889*c1d14583SBruce Richardson 
890*c1d14583SBruce Richardson static int
891*c1d14583SBruce Richardson dcf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
892*c1d14583SBruce Richardson 		     __rte_unused uint32_t index,
893*c1d14583SBruce Richardson 		     __rte_unused uint32_t pool)
894*c1d14583SBruce Richardson {
895*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
896*c1d14583SBruce Richardson 	int err;
897*c1d14583SBruce Richardson 
898*c1d14583SBruce Richardson 	if (rte_is_zero_ether_addr(addr)) {
899*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
900*c1d14583SBruce Richardson 		return -EINVAL;
901*c1d14583SBruce Richardson 	}
902*c1d14583SBruce Richardson 
903*c1d14583SBruce Richardson 	err = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, addr, true,
904*c1d14583SBruce Richardson 					   VIRTCHNL_ETHER_ADDR_EXTRA);
905*c1d14583SBruce Richardson 	if (err) {
906*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "fail to add MAC address");
907*c1d14583SBruce Richardson 		return err;
908*c1d14583SBruce Richardson 	}
909*c1d14583SBruce Richardson 
910*c1d14583SBruce Richardson 	return 0;
911*c1d14583SBruce Richardson }
912*c1d14583SBruce Richardson 
913*c1d14583SBruce Richardson static void
914*c1d14583SBruce Richardson dcf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
915*c1d14583SBruce Richardson {
916*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
917*c1d14583SBruce Richardson 	struct rte_ether_addr *addr = &dev->data->mac_addrs[index];
918*c1d14583SBruce Richardson 	int err;
919*c1d14583SBruce Richardson 
920*c1d14583SBruce Richardson 	err = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, addr, false,
921*c1d14583SBruce Richardson 					   VIRTCHNL_ETHER_ADDR_EXTRA);
922*c1d14583SBruce Richardson 	if (err)
923*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "fail to remove MAC address");
924*c1d14583SBruce Richardson }
925*c1d14583SBruce Richardson 
926*c1d14583SBruce Richardson static int
927*c1d14583SBruce Richardson dcf_add_del_mc_addr_list(struct ice_dcf_hw *hw,
928*c1d14583SBruce Richardson 			 struct rte_ether_addr *mc_addrs,
929*c1d14583SBruce Richardson 			 uint32_t mc_addrs_num, bool add)
930*c1d14583SBruce Richardson {
931*c1d14583SBruce Richardson 	struct virtchnl_ether_addr_list *list;
932*c1d14583SBruce Richardson 	struct dcf_virtchnl_cmd args;
933*c1d14583SBruce Richardson 	uint32_t i;
934*c1d14583SBruce Richardson 	int len, err = 0;
935*c1d14583SBruce Richardson 
936*c1d14583SBruce Richardson 	len = sizeof(struct virtchnl_ether_addr_list);
937*c1d14583SBruce Richardson 	len += sizeof(struct virtchnl_ether_addr) * mc_addrs_num;
938*c1d14583SBruce Richardson 
939*c1d14583SBruce Richardson 	list = rte_zmalloc(NULL, len, 0);
940*c1d14583SBruce Richardson 	if (!list) {
941*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "fail to allocate memory");
942*c1d14583SBruce Richardson 		return -ENOMEM;
943*c1d14583SBruce Richardson 	}
944*c1d14583SBruce Richardson 
945*c1d14583SBruce Richardson 	for (i = 0; i < mc_addrs_num; i++) {
946*c1d14583SBruce Richardson 		memcpy(list->list[i].addr, mc_addrs[i].addr_bytes,
947*c1d14583SBruce Richardson 		       sizeof(list->list[i].addr));
948*c1d14583SBruce Richardson 		list->list[i].type = VIRTCHNL_ETHER_ADDR_EXTRA;
949*c1d14583SBruce Richardson 	}
950*c1d14583SBruce Richardson 
951*c1d14583SBruce Richardson 	list->vsi_id = hw->vsi_res->vsi_id;
952*c1d14583SBruce Richardson 	list->num_elements = mc_addrs_num;
953*c1d14583SBruce Richardson 
954*c1d14583SBruce Richardson 	memset(&args, 0, sizeof(args));
955*c1d14583SBruce Richardson 	args.v_op = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
956*c1d14583SBruce Richardson 			VIRTCHNL_OP_DEL_ETH_ADDR;
957*c1d14583SBruce Richardson 	args.req_msg = (uint8_t *)list;
958*c1d14583SBruce Richardson 	args.req_msglen  = len;
959*c1d14583SBruce Richardson 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
960*c1d14583SBruce Richardson 	if (err)
961*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "fail to execute command %s",
962*c1d14583SBruce Richardson 			    add ? "OP_ADD_ETHER_ADDRESS" :
963*c1d14583SBruce Richardson 			    "OP_DEL_ETHER_ADDRESS");
964*c1d14583SBruce Richardson 	rte_free(list);
965*c1d14583SBruce Richardson 	return err;
966*c1d14583SBruce Richardson }
967*c1d14583SBruce Richardson 
968*c1d14583SBruce Richardson static int
969*c1d14583SBruce Richardson dcf_set_mc_addr_list(struct rte_eth_dev *dev,
970*c1d14583SBruce Richardson 		     struct rte_ether_addr *mc_addrs,
971*c1d14583SBruce Richardson 		     uint32_t mc_addrs_num)
972*c1d14583SBruce Richardson {
973*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
974*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &adapter->real_hw;
975*c1d14583SBruce Richardson 	uint32_t i;
976*c1d14583SBruce Richardson 	int ret;
977*c1d14583SBruce Richardson 
978*c1d14583SBruce Richardson 
979*c1d14583SBruce Richardson 	if (mc_addrs_num > DCF_NUM_MACADDR_MAX) {
980*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR,
981*c1d14583SBruce Richardson 			    "can't add more than a limited number (%u) of addresses.",
982*c1d14583SBruce Richardson 			    (uint32_t)DCF_NUM_MACADDR_MAX);
983*c1d14583SBruce Richardson 		return -EINVAL;
984*c1d14583SBruce Richardson 	}
985*c1d14583SBruce Richardson 
986*c1d14583SBruce Richardson 	for (i = 0; i < mc_addrs_num; i++) {
987*c1d14583SBruce Richardson 		if (!rte_is_multicast_ether_addr(&mc_addrs[i])) {
988*c1d14583SBruce Richardson 			const uint8_t *mac = mc_addrs[i].addr_bytes;
989*c1d14583SBruce Richardson 
990*c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR,
991*c1d14583SBruce Richardson 				    "Invalid mac: %02x:%02x:%02x:%02x:%02x:%02x",
992*c1d14583SBruce Richardson 				    mac[0], mac[1], mac[2], mac[3], mac[4],
993*c1d14583SBruce Richardson 				    mac[5]);
994*c1d14583SBruce Richardson 			return -EINVAL;
995*c1d14583SBruce Richardson 		}
996*c1d14583SBruce Richardson 	}
997*c1d14583SBruce Richardson 
998*c1d14583SBruce Richardson 	if (adapter->mc_addrs_num) {
999*c1d14583SBruce Richardson 		/* flush previous addresses */
1000*c1d14583SBruce Richardson 		ret = dcf_add_del_mc_addr_list(hw, adapter->mc_addrs,
1001*c1d14583SBruce Richardson 							adapter->mc_addrs_num, false);
1002*c1d14583SBruce Richardson 		if (ret)
1003*c1d14583SBruce Richardson 			return ret;
1004*c1d14583SBruce Richardson 	}
1005*c1d14583SBruce Richardson 	if (!mc_addrs_num) {
1006*c1d14583SBruce Richardson 		adapter->mc_addrs_num = 0;
1007*c1d14583SBruce Richardson 		return 0;
1008*c1d14583SBruce Richardson 	}
1009*c1d14583SBruce Richardson 
1010*c1d14583SBruce Richardson     /* add new ones */
1011*c1d14583SBruce Richardson 	ret = dcf_add_del_mc_addr_list(hw, mc_addrs, mc_addrs_num, true);
1012*c1d14583SBruce Richardson 	if (ret) {
1013*c1d14583SBruce Richardson 		/* if adding mac address list fails, should add the
1014*c1d14583SBruce Richardson 		 * previous addresses back.
1015*c1d14583SBruce Richardson 		 */
1016*c1d14583SBruce Richardson 		if (adapter->mc_addrs_num)
1017*c1d14583SBruce Richardson 			(void)dcf_add_del_mc_addr_list(hw, adapter->mc_addrs,
1018*c1d14583SBruce Richardson 						       adapter->mc_addrs_num,
1019*c1d14583SBruce Richardson 						       true);
1020*c1d14583SBruce Richardson 		return ret;
1021*c1d14583SBruce Richardson 	}
1022*c1d14583SBruce Richardson 	adapter->mc_addrs_num = mc_addrs_num;
1023*c1d14583SBruce Richardson 	memcpy(adapter->mc_addrs,
1024*c1d14583SBruce Richardson 		    mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
1025*c1d14583SBruce Richardson 
1026*c1d14583SBruce Richardson 	return 0;
1027*c1d14583SBruce Richardson }
1028*c1d14583SBruce Richardson 
1029*c1d14583SBruce Richardson static int
1030*c1d14583SBruce Richardson dcf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
1031*c1d14583SBruce Richardson 			     struct rte_ether_addr *mac_addr)
1032*c1d14583SBruce Richardson {
1033*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
1034*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &adapter->real_hw;
1035*c1d14583SBruce Richardson 	struct rte_ether_addr *old_addr;
1036*c1d14583SBruce Richardson 	int ret;
1037*c1d14583SBruce Richardson 
1038*c1d14583SBruce Richardson 	old_addr = hw->eth_dev->data->mac_addrs;
1039*c1d14583SBruce Richardson 	if (rte_is_same_ether_addr(old_addr, mac_addr))
1040*c1d14583SBruce Richardson 		return 0;
1041*c1d14583SBruce Richardson 
1042*c1d14583SBruce Richardson 	ret = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, old_addr, false,
1043*c1d14583SBruce Richardson 					   VIRTCHNL_ETHER_ADDR_PRIMARY);
1044*c1d14583SBruce Richardson 	if (ret)
1045*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
1046*c1d14583SBruce Richardson 			    " %02X:%02X:%02X:%02X:%02X:%02X",
1047*c1d14583SBruce Richardson 			    old_addr->addr_bytes[0],
1048*c1d14583SBruce Richardson 			    old_addr->addr_bytes[1],
1049*c1d14583SBruce Richardson 			    old_addr->addr_bytes[2],
1050*c1d14583SBruce Richardson 			    old_addr->addr_bytes[3],
1051*c1d14583SBruce Richardson 			    old_addr->addr_bytes[4],
1052*c1d14583SBruce Richardson 			    old_addr->addr_bytes[5]);
1053*c1d14583SBruce Richardson 
1054*c1d14583SBruce Richardson 	ret = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, mac_addr, true,
1055*c1d14583SBruce Richardson 					   VIRTCHNL_ETHER_ADDR_PRIMARY);
1056*c1d14583SBruce Richardson 	if (ret)
1057*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Fail to add new MAC:"
1058*c1d14583SBruce Richardson 			    " %02X:%02X:%02X:%02X:%02X:%02X",
1059*c1d14583SBruce Richardson 			    mac_addr->addr_bytes[0],
1060*c1d14583SBruce Richardson 			    mac_addr->addr_bytes[1],
1061*c1d14583SBruce Richardson 			    mac_addr->addr_bytes[2],
1062*c1d14583SBruce Richardson 			    mac_addr->addr_bytes[3],
1063*c1d14583SBruce Richardson 			    mac_addr->addr_bytes[4],
1064*c1d14583SBruce Richardson 			    mac_addr->addr_bytes[5]);
1065*c1d14583SBruce Richardson 
1066*c1d14583SBruce Richardson 	if (ret)
1067*c1d14583SBruce Richardson 		return -EIO;
1068*c1d14583SBruce Richardson 
1069*c1d14583SBruce Richardson 	rte_ether_addr_copy(mac_addr, hw->eth_dev->data->mac_addrs);
1070*c1d14583SBruce Richardson 	return 0;
1071*c1d14583SBruce Richardson }
1072*c1d14583SBruce Richardson 
1073*c1d14583SBruce Richardson static int
1074*c1d14583SBruce Richardson dcf_add_del_vlan_v2(struct ice_dcf_hw *hw, uint16_t vlanid, bool add)
1075*c1d14583SBruce Richardson {
1076*c1d14583SBruce Richardson 	struct virtchnl_vlan_supported_caps *supported_caps =
1077*c1d14583SBruce Richardson 			&hw->vlan_v2_caps.filtering.filtering_support;
1078*c1d14583SBruce Richardson 	struct virtchnl_vlan *vlan_setting;
1079*c1d14583SBruce Richardson 	struct virtchnl_vlan_filter_list_v2 vlan_filter;
1080*c1d14583SBruce Richardson 	struct dcf_virtchnl_cmd args;
1081*c1d14583SBruce Richardson 	uint32_t filtering_caps;
1082*c1d14583SBruce Richardson 	int err;
1083*c1d14583SBruce Richardson 
1084*c1d14583SBruce Richardson 	if (supported_caps->outer) {
1085*c1d14583SBruce Richardson 		filtering_caps = supported_caps->outer;
1086*c1d14583SBruce Richardson 		vlan_setting = &vlan_filter.filters[0].outer;
1087*c1d14583SBruce Richardson 	} else {
1088*c1d14583SBruce Richardson 		filtering_caps = supported_caps->inner;
1089*c1d14583SBruce Richardson 		vlan_setting = &vlan_filter.filters[0].inner;
1090*c1d14583SBruce Richardson 	}
1091*c1d14583SBruce Richardson 
1092*c1d14583SBruce Richardson 	if (!(filtering_caps & VIRTCHNL_VLAN_ETHERTYPE_8100))
1093*c1d14583SBruce Richardson 		return -ENOTSUP;
1094*c1d14583SBruce Richardson 
1095*c1d14583SBruce Richardson 	memset(&vlan_filter, 0, sizeof(vlan_filter));
1096*c1d14583SBruce Richardson 	vlan_filter.vport_id = hw->vsi_res->vsi_id;
1097*c1d14583SBruce Richardson 	vlan_filter.num_elements = 1;
1098*c1d14583SBruce Richardson 	vlan_setting->tpid = RTE_ETHER_TYPE_VLAN;
1099*c1d14583SBruce Richardson 	vlan_setting->tci = vlanid;
1100*c1d14583SBruce Richardson 
1101*c1d14583SBruce Richardson 	memset(&args, 0, sizeof(args));
1102*c1d14583SBruce Richardson 	args.v_op = add ? VIRTCHNL_OP_ADD_VLAN_V2 : VIRTCHNL_OP_DEL_VLAN_V2;
1103*c1d14583SBruce Richardson 	args.req_msg = (uint8_t *)&vlan_filter;
1104*c1d14583SBruce Richardson 	args.req_msglen = sizeof(vlan_filter);
1105*c1d14583SBruce Richardson 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1106*c1d14583SBruce Richardson 	if (err)
1107*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "fail to execute command %s",
1108*c1d14583SBruce Richardson 			    add ? "OP_ADD_VLAN_V2" :  "OP_DEL_VLAN_V2");
1109*c1d14583SBruce Richardson 
1110*c1d14583SBruce Richardson 	return err;
1111*c1d14583SBruce Richardson }
1112*c1d14583SBruce Richardson 
1113*c1d14583SBruce Richardson static int
1114*c1d14583SBruce Richardson dcf_add_del_vlan(struct ice_dcf_hw *hw, uint16_t vlanid, bool add)
1115*c1d14583SBruce Richardson {
1116*c1d14583SBruce Richardson 	struct virtchnl_vlan_filter_list *vlan_list;
1117*c1d14583SBruce Richardson 	uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
1118*c1d14583SBruce Richardson 							sizeof(uint16_t)];
1119*c1d14583SBruce Richardson 	struct dcf_virtchnl_cmd args;
1120*c1d14583SBruce Richardson 	int err;
1121*c1d14583SBruce Richardson 
1122*c1d14583SBruce Richardson 	vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
1123*c1d14583SBruce Richardson 	vlan_list->vsi_id = hw->vsi_res->vsi_id;
1124*c1d14583SBruce Richardson 	vlan_list->num_elements = 1;
1125*c1d14583SBruce Richardson 	vlan_list->vlan_id[0] = vlanid;
1126*c1d14583SBruce Richardson 
1127*c1d14583SBruce Richardson 	memset(&args, 0, sizeof(args));
1128*c1d14583SBruce Richardson 	args.v_op = add ? VIRTCHNL_OP_ADD_VLAN : VIRTCHNL_OP_DEL_VLAN;
1129*c1d14583SBruce Richardson 	args.req_msg = cmd_buffer;
1130*c1d14583SBruce Richardson 	args.req_msglen = sizeof(cmd_buffer);
1131*c1d14583SBruce Richardson 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1132*c1d14583SBruce Richardson 	if (err)
1133*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "fail to execute command %s",
1134*c1d14583SBruce Richardson 			    add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
1135*c1d14583SBruce Richardson 
1136*c1d14583SBruce Richardson 	return err;
1137*c1d14583SBruce Richardson }
1138*c1d14583SBruce Richardson 
1139*c1d14583SBruce Richardson static int
1140*c1d14583SBruce Richardson dcf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1141*c1d14583SBruce Richardson {
1142*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
1143*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &adapter->real_hw;
1144*c1d14583SBruce Richardson 	int err;
1145*c1d14583SBruce Richardson 
1146*c1d14583SBruce Richardson 	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
1147*c1d14583SBruce Richardson 		err = dcf_add_del_vlan_v2(hw, vlan_id, on);
1148*c1d14583SBruce Richardson 		if (err)
1149*c1d14583SBruce Richardson 			return -EIO;
1150*c1d14583SBruce Richardson 		return 0;
1151*c1d14583SBruce Richardson 	}
1152*c1d14583SBruce Richardson 
1153*c1d14583SBruce Richardson 	if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1154*c1d14583SBruce Richardson 		return -ENOTSUP;
1155*c1d14583SBruce Richardson 
1156*c1d14583SBruce Richardson 	err = dcf_add_del_vlan(hw, vlan_id, on);
1157*c1d14583SBruce Richardson 	if (err)
1158*c1d14583SBruce Richardson 		return -EIO;
1159*c1d14583SBruce Richardson 	return 0;
1160*c1d14583SBruce Richardson }
1161*c1d14583SBruce Richardson 
1162*c1d14583SBruce Richardson static void
1163*c1d14583SBruce Richardson dcf_iterate_vlan_filters_v2(struct rte_eth_dev *dev, bool enable)
1164*c1d14583SBruce Richardson {
1165*c1d14583SBruce Richardson 	struct rte_vlan_filter_conf *vfc = &dev->data->vlan_filter_conf;
1166*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
1167*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &adapter->real_hw;
1168*c1d14583SBruce Richardson 	uint32_t i, j;
1169*c1d14583SBruce Richardson 	uint64_t ids;
1170*c1d14583SBruce Richardson 
1171*c1d14583SBruce Richardson 	for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1172*c1d14583SBruce Richardson 		if (vfc->ids[i] == 0)
1173*c1d14583SBruce Richardson 			continue;
1174*c1d14583SBruce Richardson 
1175*c1d14583SBruce Richardson 		ids = vfc->ids[i];
1176*c1d14583SBruce Richardson 		for (j = 0; ids != 0 && j < 64; j++, ids >>= 1) {
1177*c1d14583SBruce Richardson 			if (ids & 1)
1178*c1d14583SBruce Richardson 				dcf_add_del_vlan_v2(hw, 64 * i + j, enable);
1179*c1d14583SBruce Richardson 		}
1180*c1d14583SBruce Richardson 	}
1181*c1d14583SBruce Richardson }
1182*c1d14583SBruce Richardson 
1183*c1d14583SBruce Richardson static int
1184*c1d14583SBruce Richardson dcf_config_vlan_strip_v2(struct ice_dcf_hw *hw, bool enable)
1185*c1d14583SBruce Richardson {
1186*c1d14583SBruce Richardson 	struct virtchnl_vlan_supported_caps *stripping_caps =
1187*c1d14583SBruce Richardson 			&hw->vlan_v2_caps.offloads.stripping_support;
1188*c1d14583SBruce Richardson 	struct virtchnl_vlan_setting vlan_strip;
1189*c1d14583SBruce Richardson 	struct dcf_virtchnl_cmd args;
1190*c1d14583SBruce Richardson 	uint32_t *ethertype;
1191*c1d14583SBruce Richardson 	int ret;
1192*c1d14583SBruce Richardson 
1193*c1d14583SBruce Richardson 	if ((stripping_caps->outer & VIRTCHNL_VLAN_ETHERTYPE_8100) &&
1194*c1d14583SBruce Richardson 	    (stripping_caps->outer & VIRTCHNL_VLAN_TOGGLE))
1195*c1d14583SBruce Richardson 		ethertype = &vlan_strip.outer_ethertype_setting;
1196*c1d14583SBruce Richardson 	else if ((stripping_caps->inner & VIRTCHNL_VLAN_ETHERTYPE_8100) &&
1197*c1d14583SBruce Richardson 		 (stripping_caps->inner & VIRTCHNL_VLAN_TOGGLE))
1198*c1d14583SBruce Richardson 		ethertype = &vlan_strip.inner_ethertype_setting;
1199*c1d14583SBruce Richardson 	else
1200*c1d14583SBruce Richardson 		return -ENOTSUP;
1201*c1d14583SBruce Richardson 
1202*c1d14583SBruce Richardson 	memset(&vlan_strip, 0, sizeof(vlan_strip));
1203*c1d14583SBruce Richardson 	vlan_strip.vport_id = hw->vsi_res->vsi_id;
1204*c1d14583SBruce Richardson 	*ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
1205*c1d14583SBruce Richardson 
1206*c1d14583SBruce Richardson 	memset(&args, 0, sizeof(args));
1207*c1d14583SBruce Richardson 	args.v_op = enable ? VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 :
1208*c1d14583SBruce Richardson 			    VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2;
1209*c1d14583SBruce Richardson 	args.req_msg = (uint8_t *)&vlan_strip;
1210*c1d14583SBruce Richardson 	args.req_msglen = sizeof(vlan_strip);
1211*c1d14583SBruce Richardson 	ret = ice_dcf_execute_virtchnl_cmd(hw, &args);
1212*c1d14583SBruce Richardson 	if (ret)
1213*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "fail to execute command %s",
1214*c1d14583SBruce Richardson 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
1215*c1d14583SBruce Richardson 				     "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2");
1216*c1d14583SBruce Richardson 
1217*c1d14583SBruce Richardson 	return ret;
1218*c1d14583SBruce Richardson }
1219*c1d14583SBruce Richardson 
1220*c1d14583SBruce Richardson static int
1221*c1d14583SBruce Richardson dcf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
1222*c1d14583SBruce Richardson {
1223*c1d14583SBruce Richardson 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1224*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
1225*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &adapter->real_hw;
1226*c1d14583SBruce Richardson 	bool enable;
1227*c1d14583SBruce Richardson 	int err;
1228*c1d14583SBruce Richardson 
1229*c1d14583SBruce Richardson 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1230*c1d14583SBruce Richardson 		enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
1231*c1d14583SBruce Richardson 
1232*c1d14583SBruce Richardson 		dcf_iterate_vlan_filters_v2(dev, enable);
1233*c1d14583SBruce Richardson 	}
1234*c1d14583SBruce Richardson 
1235*c1d14583SBruce Richardson 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1236*c1d14583SBruce Richardson 		enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
1237*c1d14583SBruce Richardson 
1238*c1d14583SBruce Richardson 		err = dcf_config_vlan_strip_v2(hw, enable);
1239*c1d14583SBruce Richardson 		/* If not support, the stripping is already disabled by PF */
1240*c1d14583SBruce Richardson 		if (err == -ENOTSUP && !enable)
1241*c1d14583SBruce Richardson 			err = 0;
1242*c1d14583SBruce Richardson 		if (err)
1243*c1d14583SBruce Richardson 			return -EIO;
1244*c1d14583SBruce Richardson 	}
1245*c1d14583SBruce Richardson 
1246*c1d14583SBruce Richardson 	return 0;
1247*c1d14583SBruce Richardson }
1248*c1d14583SBruce Richardson 
1249*c1d14583SBruce Richardson static int
1250*c1d14583SBruce Richardson dcf_enable_vlan_strip(struct ice_dcf_hw *hw)
1251*c1d14583SBruce Richardson {
1252*c1d14583SBruce Richardson 	struct dcf_virtchnl_cmd args;
1253*c1d14583SBruce Richardson 	int ret;
1254*c1d14583SBruce Richardson 
1255*c1d14583SBruce Richardson 	memset(&args, 0, sizeof(args));
1256*c1d14583SBruce Richardson 	args.v_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
1257*c1d14583SBruce Richardson 	ret = ice_dcf_execute_virtchnl_cmd(hw, &args);
1258*c1d14583SBruce Richardson 	if (ret)
1259*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR,
1260*c1d14583SBruce Richardson 			    "Failed to execute command of OP_ENABLE_VLAN_STRIPPING");
1261*c1d14583SBruce Richardson 
1262*c1d14583SBruce Richardson 	return ret;
1263*c1d14583SBruce Richardson }
1264*c1d14583SBruce Richardson 
1265*c1d14583SBruce Richardson static int
1266*c1d14583SBruce Richardson dcf_disable_vlan_strip(struct ice_dcf_hw *hw)
1267*c1d14583SBruce Richardson {
1268*c1d14583SBruce Richardson 	struct dcf_virtchnl_cmd args;
1269*c1d14583SBruce Richardson 	int ret;
1270*c1d14583SBruce Richardson 
1271*c1d14583SBruce Richardson 	memset(&args, 0, sizeof(args));
1272*c1d14583SBruce Richardson 	args.v_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
1273*c1d14583SBruce Richardson 	ret = ice_dcf_execute_virtchnl_cmd(hw, &args);
1274*c1d14583SBruce Richardson 	if (ret)
1275*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR,
1276*c1d14583SBruce Richardson 			    "Failed to execute command of OP_DISABLE_VLAN_STRIPPING");
1277*c1d14583SBruce Richardson 
1278*c1d14583SBruce Richardson 	return ret;
1279*c1d14583SBruce Richardson }
1280*c1d14583SBruce Richardson 
1281*c1d14583SBruce Richardson static int
1282*c1d14583SBruce Richardson dcf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1283*c1d14583SBruce Richardson {
1284*c1d14583SBruce Richardson 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1285*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
1286*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &adapter->real_hw;
1287*c1d14583SBruce Richardson 	int err;
1288*c1d14583SBruce Richardson 
1289*c1d14583SBruce Richardson 	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
1290*c1d14583SBruce Richardson 		return dcf_dev_vlan_offload_set_v2(dev, mask);
1291*c1d14583SBruce Richardson 
1292*c1d14583SBruce Richardson 	if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1293*c1d14583SBruce Richardson 		return -ENOTSUP;
1294*c1d14583SBruce Richardson 
1295*c1d14583SBruce Richardson 	/* Vlan stripping setting */
1296*c1d14583SBruce Richardson 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1297*c1d14583SBruce Richardson 		/* Enable or disable VLAN stripping */
1298*c1d14583SBruce Richardson 		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1299*c1d14583SBruce Richardson 			err = dcf_enable_vlan_strip(hw);
1300*c1d14583SBruce Richardson 		else
1301*c1d14583SBruce Richardson 			err = dcf_disable_vlan_strip(hw);
1302*c1d14583SBruce Richardson 
1303*c1d14583SBruce Richardson 		if (err)
1304*c1d14583SBruce Richardson 			return -EIO;
1305*c1d14583SBruce Richardson 	}
1306*c1d14583SBruce Richardson 	return 0;
1307*c1d14583SBruce Richardson }
1308*c1d14583SBruce Richardson 
1309*c1d14583SBruce Richardson static int
1310*c1d14583SBruce Richardson ice_dcf_dev_flow_ops_get(struct rte_eth_dev *dev,
1311*c1d14583SBruce Richardson 			 const struct rte_flow_ops **ops)
1312*c1d14583SBruce Richardson {
1313*c1d14583SBruce Richardson 	if (!dev)
1314*c1d14583SBruce Richardson 		return -EINVAL;
1315*c1d14583SBruce Richardson 
1316*c1d14583SBruce Richardson 	*ops = &ice_flow_ops;
1317*c1d14583SBruce Richardson 	return 0;
1318*c1d14583SBruce Richardson }
1319*c1d14583SBruce Richardson 
1320*c1d14583SBruce Richardson static int
1321*c1d14583SBruce Richardson ice_dcf_dev_rss_reta_update(struct rte_eth_dev *dev,
1322*c1d14583SBruce Richardson 			struct rte_eth_rss_reta_entry64 *reta_conf,
1323*c1d14583SBruce Richardson 			uint16_t reta_size)
1324*c1d14583SBruce Richardson {
1325*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
1326*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &adapter->real_hw;
1327*c1d14583SBruce Richardson 	uint8_t *lut;
1328*c1d14583SBruce Richardson 	uint16_t i, idx, shift;
1329*c1d14583SBruce Richardson 	int ret;
1330*c1d14583SBruce Richardson 
1331*c1d14583SBruce Richardson 	if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1332*c1d14583SBruce Richardson 		return -ENOTSUP;
1333*c1d14583SBruce Richardson 
1334*c1d14583SBruce Richardson 	if (reta_size != hw->vf_res->rss_lut_size) {
1335*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1336*c1d14583SBruce Richardson 			"(%d) doesn't match the number of hardware can "
1337*c1d14583SBruce Richardson 			"support (%d)", reta_size, hw->vf_res->rss_lut_size);
1338*c1d14583SBruce Richardson 		return -EINVAL;
1339*c1d14583SBruce Richardson 	}
1340*c1d14583SBruce Richardson 
1341*c1d14583SBruce Richardson 	lut = rte_zmalloc("rss_lut", reta_size, 0);
1342*c1d14583SBruce Richardson 	if (!lut) {
1343*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "No memory can be allocated");
1344*c1d14583SBruce Richardson 		return -ENOMEM;
1345*c1d14583SBruce Richardson 	}
1346*c1d14583SBruce Richardson 	/* store the old lut table temporarily */
1347*c1d14583SBruce Richardson 	rte_memcpy(lut, hw->rss_lut, reta_size);
1348*c1d14583SBruce Richardson 
1349*c1d14583SBruce Richardson 	for (i = 0; i < reta_size; i++) {
1350*c1d14583SBruce Richardson 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1351*c1d14583SBruce Richardson 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1352*c1d14583SBruce Richardson 		if (reta_conf[idx].mask & (1ULL << shift))
1353*c1d14583SBruce Richardson 			lut[i] = reta_conf[idx].reta[shift];
1354*c1d14583SBruce Richardson 	}
1355*c1d14583SBruce Richardson 
1356*c1d14583SBruce Richardson 	rte_memcpy(hw->rss_lut, lut, reta_size);
1357*c1d14583SBruce Richardson 	/* send virtchnnl ops to configure rss*/
1358*c1d14583SBruce Richardson 	ret = ice_dcf_configure_rss_lut(hw);
1359*c1d14583SBruce Richardson 	if (ret) /* revert back */
1360*c1d14583SBruce Richardson 		rte_memcpy(hw->rss_lut, lut, reta_size);
1361*c1d14583SBruce Richardson 	rte_free(lut);
1362*c1d14583SBruce Richardson 
1363*c1d14583SBruce Richardson 	return ret;
1364*c1d14583SBruce Richardson }
1365*c1d14583SBruce Richardson 
1366*c1d14583SBruce Richardson static int
1367*c1d14583SBruce Richardson ice_dcf_dev_rss_reta_query(struct rte_eth_dev *dev,
1368*c1d14583SBruce Richardson 		       struct rte_eth_rss_reta_entry64 *reta_conf,
1369*c1d14583SBruce Richardson 		       uint16_t reta_size)
1370*c1d14583SBruce Richardson {
1371*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
1372*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &adapter->real_hw;
1373*c1d14583SBruce Richardson 	uint16_t i, idx, shift;
1374*c1d14583SBruce Richardson 
1375*c1d14583SBruce Richardson 	if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1376*c1d14583SBruce Richardson 		return -ENOTSUP;
1377*c1d14583SBruce Richardson 
1378*c1d14583SBruce Richardson 	if (reta_size != hw->vf_res->rss_lut_size) {
1379*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1380*c1d14583SBruce Richardson 			"(%d) doesn't match the number of hardware can "
1381*c1d14583SBruce Richardson 			"support (%d)", reta_size, hw->vf_res->rss_lut_size);
1382*c1d14583SBruce Richardson 		return -EINVAL;
1383*c1d14583SBruce Richardson 	}
1384*c1d14583SBruce Richardson 
1385*c1d14583SBruce Richardson 	for (i = 0; i < reta_size; i++) {
1386*c1d14583SBruce Richardson 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1387*c1d14583SBruce Richardson 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1388*c1d14583SBruce Richardson 		if (reta_conf[idx].mask & (1ULL << shift))
1389*c1d14583SBruce Richardson 			reta_conf[idx].reta[shift] = hw->rss_lut[i];
1390*c1d14583SBruce Richardson 	}
1391*c1d14583SBruce Richardson 
1392*c1d14583SBruce Richardson 	return 0;
1393*c1d14583SBruce Richardson }
1394*c1d14583SBruce Richardson 
1395*c1d14583SBruce Richardson static int
1396*c1d14583SBruce Richardson ice_dcf_dev_rss_hash_update(struct rte_eth_dev *dev,
1397*c1d14583SBruce Richardson 			struct rte_eth_rss_conf *rss_conf)
1398*c1d14583SBruce Richardson {
1399*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
1400*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &adapter->real_hw;
1401*c1d14583SBruce Richardson 	int ret;
1402*c1d14583SBruce Richardson 
1403*c1d14583SBruce Richardson 	if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1404*c1d14583SBruce Richardson 		return -ENOTSUP;
1405*c1d14583SBruce Richardson 
1406*c1d14583SBruce Richardson 	/* HENA setting, it is enabled by default, no change */
1407*c1d14583SBruce Richardson 	if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
1408*c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "No key to be configured");
1409*c1d14583SBruce Richardson 		return 0;
1410*c1d14583SBruce Richardson 	} else if (rss_conf->rss_key_len != hw->vf_res->rss_key_size) {
1411*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "The size of hash key configured "
1412*c1d14583SBruce Richardson 			"(%d) doesn't match the size of hardware can "
1413*c1d14583SBruce Richardson 			"support (%d)", rss_conf->rss_key_len,
1414*c1d14583SBruce Richardson 			hw->vf_res->rss_key_size);
1415*c1d14583SBruce Richardson 		return -EINVAL;
1416*c1d14583SBruce Richardson 	}
1417*c1d14583SBruce Richardson 
1418*c1d14583SBruce Richardson 	rte_memcpy(hw->rss_key, rss_conf->rss_key, rss_conf->rss_key_len);
1419*c1d14583SBruce Richardson 
1420*c1d14583SBruce Richardson 	ret = ice_dcf_configure_rss_key(hw);
1421*c1d14583SBruce Richardson 	if (ret)
1422*c1d14583SBruce Richardson 		return ret;
1423*c1d14583SBruce Richardson 
1424*c1d14583SBruce Richardson 	/* Clear existing RSS. */
1425*c1d14583SBruce Richardson 	ret = ice_dcf_set_hena(hw, 0);
1426*c1d14583SBruce Richardson 
1427*c1d14583SBruce Richardson 	/* It is a workaround, temporarily allow error to be returned
1428*c1d14583SBruce Richardson 	 * due to possible lack of PF handling for hena = 0.
1429*c1d14583SBruce Richardson 	 */
1430*c1d14583SBruce Richardson 	if (ret)
1431*c1d14583SBruce Richardson 		PMD_DRV_LOG(WARNING, "fail to clean existing RSS,"
1432*c1d14583SBruce Richardson 				"lack PF support");
1433*c1d14583SBruce Richardson 
1434*c1d14583SBruce Richardson 	/* Set new RSS configuration. */
1435*c1d14583SBruce Richardson 	ret = ice_dcf_rss_hash_set(hw, rss_conf->rss_hf, true);
1436*c1d14583SBruce Richardson 	if (ret) {
1437*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "fail to set new RSS");
1438*c1d14583SBruce Richardson 		return ret;
1439*c1d14583SBruce Richardson 	}
1440*c1d14583SBruce Richardson 
1441*c1d14583SBruce Richardson 	return 0;
1442*c1d14583SBruce Richardson }
1443*c1d14583SBruce Richardson 
1444*c1d14583SBruce Richardson static int
1445*c1d14583SBruce Richardson ice_dcf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1446*c1d14583SBruce Richardson 			  struct rte_eth_rss_conf *rss_conf)
1447*c1d14583SBruce Richardson {
1448*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
1449*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &adapter->real_hw;
1450*c1d14583SBruce Richardson 
1451*c1d14583SBruce Richardson 	if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1452*c1d14583SBruce Richardson 		return -ENOTSUP;
1453*c1d14583SBruce Richardson 
1454*c1d14583SBruce Richardson 	/* Just set it to default value now. */
1455*c1d14583SBruce Richardson 	rss_conf->rss_hf = ICE_RSS_OFFLOAD_ALL;
1456*c1d14583SBruce Richardson 
1457*c1d14583SBruce Richardson 	if (!rss_conf->rss_key)
1458*c1d14583SBruce Richardson 		return 0;
1459*c1d14583SBruce Richardson 
1460*c1d14583SBruce Richardson 	rss_conf->rss_key_len = hw->vf_res->rss_key_size;
1461*c1d14583SBruce Richardson 	rte_memcpy(rss_conf->rss_key, hw->rss_key, rss_conf->rss_key_len);
1462*c1d14583SBruce Richardson 
1463*c1d14583SBruce Richardson 	return 0;
1464*c1d14583SBruce Richardson }
1465*c1d14583SBruce Richardson 
1466*c1d14583SBruce Richardson #define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4)
1467*c1d14583SBruce Richardson #define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6)
1468*c1d14583SBruce Richardson #define ICE_DCF_48_BIT_MASK  RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t)
1469*c1d14583SBruce Richardson 
1470*c1d14583SBruce Richardson static void
1471*c1d14583SBruce Richardson ice_dcf_stat_update_48(uint64_t *offset, uint64_t *stat)
1472*c1d14583SBruce Richardson {
1473*c1d14583SBruce Richardson 	if (*stat >= *offset)
1474*c1d14583SBruce Richardson 		*stat = *stat - *offset;
1475*c1d14583SBruce Richardson 	else
1476*c1d14583SBruce Richardson 		*stat = (uint64_t)((*stat +
1477*c1d14583SBruce Richardson 			((uint64_t)1 << ICE_DCF_48_BIT_WIDTH)) - *offset);
1478*c1d14583SBruce Richardson 
1479*c1d14583SBruce Richardson 	*stat &= ICE_DCF_48_BIT_MASK;
1480*c1d14583SBruce Richardson }
1481*c1d14583SBruce Richardson 
1482*c1d14583SBruce Richardson static void
1483*c1d14583SBruce Richardson ice_dcf_stat_update_32(uint64_t *offset, uint64_t *stat)
1484*c1d14583SBruce Richardson {
1485*c1d14583SBruce Richardson 	if (*stat >= *offset)
1486*c1d14583SBruce Richardson 		*stat = (uint64_t)(*stat - *offset);
1487*c1d14583SBruce Richardson 	else
1488*c1d14583SBruce Richardson 		*stat = (uint64_t)((*stat +
1489*c1d14583SBruce Richardson 			((uint64_t)1 << ICE_DCF_32_BIT_WIDTH)) - *offset);
1490*c1d14583SBruce Richardson }
1491*c1d14583SBruce Richardson 
1492*c1d14583SBruce Richardson static void
1493*c1d14583SBruce Richardson ice_dcf_update_stats(struct virtchnl_eth_stats *oes,
1494*c1d14583SBruce Richardson 		     struct virtchnl_eth_stats *nes)
1495*c1d14583SBruce Richardson {
1496*c1d14583SBruce Richardson 	ice_dcf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
1497*c1d14583SBruce Richardson 	ice_dcf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
1498*c1d14583SBruce Richardson 	ice_dcf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
1499*c1d14583SBruce Richardson 	ice_dcf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
1500*c1d14583SBruce Richardson 	ice_dcf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
1501*c1d14583SBruce Richardson 	ice_dcf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
1502*c1d14583SBruce Richardson 	ice_dcf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
1503*c1d14583SBruce Richardson 	ice_dcf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
1504*c1d14583SBruce Richardson 	ice_dcf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
1505*c1d14583SBruce Richardson 	ice_dcf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
1506*c1d14583SBruce Richardson 	ice_dcf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
1507*c1d14583SBruce Richardson }
1508*c1d14583SBruce Richardson 
1509*c1d14583SBruce Richardson 
1510*c1d14583SBruce Richardson static int
1511*c1d14583SBruce Richardson ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1512*c1d14583SBruce Richardson {
1513*c1d14583SBruce Richardson 	struct ice_dcf_adapter *ad = dev->data->dev_private;
1514*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &ad->real_hw;
1515*c1d14583SBruce Richardson 	struct virtchnl_eth_stats pstats;
1516*c1d14583SBruce Richardson 	int ret;
1517*c1d14583SBruce Richardson 
1518*c1d14583SBruce Richardson 	if (hw->resetting) {
1519*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR,
1520*c1d14583SBruce Richardson 			    "The DCF has been reset by PF, please reinit first");
1521*c1d14583SBruce Richardson 		return -EIO;
1522*c1d14583SBruce Richardson 	}
1523*c1d14583SBruce Richardson 
1524*c1d14583SBruce Richardson 	ret = ice_dcf_query_stats(hw, &pstats);
1525*c1d14583SBruce Richardson 	if (ret == 0) {
1526*c1d14583SBruce Richardson 		ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
1527*c1d14583SBruce Richardson 		stats->ipackets = pstats.rx_unicast + pstats.rx_multicast +
1528*c1d14583SBruce Richardson 				pstats.rx_broadcast - pstats.rx_discards;
1529*c1d14583SBruce Richardson 		stats->opackets = pstats.tx_broadcast + pstats.tx_multicast +
1530*c1d14583SBruce Richardson 						pstats.tx_unicast;
1531*c1d14583SBruce Richardson 		stats->imissed = pstats.rx_discards;
1532*c1d14583SBruce Richardson 		stats->oerrors = pstats.tx_errors + pstats.tx_discards;
1533*c1d14583SBruce Richardson 		stats->ibytes = pstats.rx_bytes;
1534*c1d14583SBruce Richardson 		stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
1535*c1d14583SBruce Richardson 		stats->obytes = pstats.tx_bytes;
1536*c1d14583SBruce Richardson 	} else {
1537*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Get statistics failed");
1538*c1d14583SBruce Richardson 	}
1539*c1d14583SBruce Richardson 	return ret;
1540*c1d14583SBruce Richardson }
1541*c1d14583SBruce Richardson 
1542*c1d14583SBruce Richardson static int
1543*c1d14583SBruce Richardson ice_dcf_stats_reset(struct rte_eth_dev *dev)
1544*c1d14583SBruce Richardson {
1545*c1d14583SBruce Richardson 	struct ice_dcf_adapter *ad = dev->data->dev_private;
1546*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &ad->real_hw;
1547*c1d14583SBruce Richardson 	struct virtchnl_eth_stats pstats;
1548*c1d14583SBruce Richardson 	int ret;
1549*c1d14583SBruce Richardson 
1550*c1d14583SBruce Richardson 	if (hw->resetting)
1551*c1d14583SBruce Richardson 		return 0;
1552*c1d14583SBruce Richardson 
1553*c1d14583SBruce Richardson 	/* read stat values to clear hardware registers */
1554*c1d14583SBruce Richardson 	ret = ice_dcf_query_stats(hw, &pstats);
1555*c1d14583SBruce Richardson 	if (ret != 0)
1556*c1d14583SBruce Richardson 		return ret;
1557*c1d14583SBruce Richardson 
1558*c1d14583SBruce Richardson 	/* set stats offset base on current values */
1559*c1d14583SBruce Richardson 	hw->eth_stats_offset = pstats;
1560*c1d14583SBruce Richardson 
1561*c1d14583SBruce Richardson 	return 0;
1562*c1d14583SBruce Richardson }
1563*c1d14583SBruce Richardson 
1564*c1d14583SBruce Richardson static int ice_dcf_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1565*c1d14583SBruce Richardson 				      struct rte_eth_xstat_name *xstats_names,
1566*c1d14583SBruce Richardson 				      __rte_unused unsigned int limit)
1567*c1d14583SBruce Richardson {
1568*c1d14583SBruce Richardson 	unsigned int i;
1569*c1d14583SBruce Richardson 
1570*c1d14583SBruce Richardson 	if (xstats_names != NULL)
1571*c1d14583SBruce Richardson 		for (i = 0; i < ICE_DCF_NB_XSTATS; i++) {
1572*c1d14583SBruce Richardson 			snprintf(xstats_names[i].name,
1573*c1d14583SBruce Richardson 				sizeof(xstats_names[i].name),
1574*c1d14583SBruce Richardson 				"%s", rte_ice_dcf_stats_strings[i].name);
1575*c1d14583SBruce Richardson 		}
1576*c1d14583SBruce Richardson 	return ICE_DCF_NB_XSTATS;
1577*c1d14583SBruce Richardson }
1578*c1d14583SBruce Richardson 
1579*c1d14583SBruce Richardson static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
1580*c1d14583SBruce Richardson 				 struct rte_eth_xstat *xstats, unsigned int n)
1581*c1d14583SBruce Richardson {
1582*c1d14583SBruce Richardson 	int ret;
1583*c1d14583SBruce Richardson 	unsigned int i;
1584*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter =
1585*c1d14583SBruce Richardson 		ICE_DCF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1586*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &adapter->real_hw;
1587*c1d14583SBruce Richardson 	struct virtchnl_eth_stats *postats = &hw->eth_stats_offset;
1588*c1d14583SBruce Richardson 	struct virtchnl_eth_stats pnstats;
1589*c1d14583SBruce Richardson 
1590*c1d14583SBruce Richardson 	if (n < ICE_DCF_NB_XSTATS)
1591*c1d14583SBruce Richardson 		return ICE_DCF_NB_XSTATS;
1592*c1d14583SBruce Richardson 
1593*c1d14583SBruce Richardson 	ret = ice_dcf_query_stats(hw, &pnstats);
1594*c1d14583SBruce Richardson 	if (ret != 0)
1595*c1d14583SBruce Richardson 		return 0;
1596*c1d14583SBruce Richardson 
1597*c1d14583SBruce Richardson 	if (!xstats)
1598*c1d14583SBruce Richardson 		return 0;
1599*c1d14583SBruce Richardson 
1600*c1d14583SBruce Richardson 	ice_dcf_update_stats(postats, &pnstats);
1601*c1d14583SBruce Richardson 
1602*c1d14583SBruce Richardson 	/* loop over xstats array and values from pstats */
1603*c1d14583SBruce Richardson 	for (i = 0; i < ICE_DCF_NB_XSTATS; i++) {
1604*c1d14583SBruce Richardson 		xstats[i].id = i;
1605*c1d14583SBruce Richardson 		xstats[i].value = *(uint64_t *)(((char *)&pnstats) +
1606*c1d14583SBruce Richardson 			rte_ice_dcf_stats_strings[i].offset);
1607*c1d14583SBruce Richardson 	}
1608*c1d14583SBruce Richardson 
1609*c1d14583SBruce Richardson 	return ICE_DCF_NB_XSTATS;
1610*c1d14583SBruce Richardson }
1611*c1d14583SBruce Richardson 
1612*c1d14583SBruce Richardson static void
1613*c1d14583SBruce Richardson ice_dcf_free_repr_info(struct ice_dcf_adapter *dcf_adapter)
1614*c1d14583SBruce Richardson {
1615*c1d14583SBruce Richardson 	if (dcf_adapter->repr_infos) {
1616*c1d14583SBruce Richardson 		rte_free(dcf_adapter->repr_infos);
1617*c1d14583SBruce Richardson 		dcf_adapter->repr_infos = NULL;
1618*c1d14583SBruce Richardson 	}
1619*c1d14583SBruce Richardson }
1620*c1d14583SBruce Richardson 
1621*c1d14583SBruce Richardson int
1622*c1d14583SBruce Richardson ice_dcf_handle_vf_repr_close(struct ice_dcf_adapter *dcf_adapter,
1623*c1d14583SBruce Richardson 				uint16_t vf_id)
1624*c1d14583SBruce Richardson {
1625*c1d14583SBruce Richardson 	struct ice_dcf_repr_info *vf_rep_info;
1626*c1d14583SBruce Richardson 
1627*c1d14583SBruce Richardson 	if (dcf_adapter->num_reprs >= vf_id) {
1628*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Invalid VF id: %d", vf_id);
1629*c1d14583SBruce Richardson 		return -1;
1630*c1d14583SBruce Richardson 	}
1631*c1d14583SBruce Richardson 
1632*c1d14583SBruce Richardson 	if (!dcf_adapter->repr_infos)
1633*c1d14583SBruce Richardson 		return 0;
1634*c1d14583SBruce Richardson 
1635*c1d14583SBruce Richardson 	vf_rep_info = &dcf_adapter->repr_infos[vf_id];
1636*c1d14583SBruce Richardson 	vf_rep_info->vf_rep_eth_dev = NULL;
1637*c1d14583SBruce Richardson 
1638*c1d14583SBruce Richardson 	return 0;
1639*c1d14583SBruce Richardson }
1640*c1d14583SBruce Richardson 
1641*c1d14583SBruce Richardson static int
1642*c1d14583SBruce Richardson ice_dcf_init_repr_info(struct ice_dcf_adapter *dcf_adapter)
1643*c1d14583SBruce Richardson {
1644*c1d14583SBruce Richardson 	dcf_adapter->repr_infos =
1645*c1d14583SBruce Richardson 			rte_calloc("ice_dcf_rep_info",
1646*c1d14583SBruce Richardson 				   dcf_adapter->real_hw.num_vfs,
1647*c1d14583SBruce Richardson 				   sizeof(dcf_adapter->repr_infos[0]), 0);
1648*c1d14583SBruce Richardson 	if (!dcf_adapter->repr_infos) {
1649*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to alloc memory for VF representors");
1650*c1d14583SBruce Richardson 		return -ENOMEM;
1651*c1d14583SBruce Richardson 	}
1652*c1d14583SBruce Richardson 
1653*c1d14583SBruce Richardson 	return 0;
1654*c1d14583SBruce Richardson }
1655*c1d14583SBruce Richardson 
1656*c1d14583SBruce Richardson static int
1657*c1d14583SBruce Richardson ice_dcf_dev_close(struct rte_eth_dev *dev)
1658*c1d14583SBruce Richardson {
1659*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
1660*c1d14583SBruce Richardson 
1661*c1d14583SBruce Richardson 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1662*c1d14583SBruce Richardson 		return 0;
1663*c1d14583SBruce Richardson 
1664*c1d14583SBruce Richardson 	ice_dcf_vf_repr_notify_all(adapter, false);
1665*c1d14583SBruce Richardson 	(void)ice_dcf_dev_stop(dev);
1666*c1d14583SBruce Richardson 
1667*c1d14583SBruce Richardson 	ice_free_queues(dev);
1668*c1d14583SBruce Richardson 	ice_dcf_uninit_parent_adapter(dev);
1669*c1d14583SBruce Richardson 	ice_dcf_uninit_hw(dev, &adapter->real_hw);
1670*c1d14583SBruce Richardson 
1671*c1d14583SBruce Richardson 	return 0;
1672*c1d14583SBruce Richardson }
1673*c1d14583SBruce Richardson 
1674*c1d14583SBruce Richardson int
1675*c1d14583SBruce Richardson ice_dcf_link_update(struct rte_eth_dev *dev,
1676*c1d14583SBruce Richardson 		    __rte_unused int wait_to_complete)
1677*c1d14583SBruce Richardson {
1678*c1d14583SBruce Richardson 	struct ice_dcf_adapter *ad = dev->data->dev_private;
1679*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &ad->real_hw;
1680*c1d14583SBruce Richardson 	struct rte_eth_link new_link;
1681*c1d14583SBruce Richardson 
1682*c1d14583SBruce Richardson 	memset(&new_link, 0, sizeof(new_link));
1683*c1d14583SBruce Richardson 
1684*c1d14583SBruce Richardson 	/* Only read status info stored in VF, and the info is updated
1685*c1d14583SBruce Richardson 	 * when receive LINK_CHANGE event from PF by virtchnl.
1686*c1d14583SBruce Richardson 	 */
1687*c1d14583SBruce Richardson 	switch (hw->link_speed) {
1688*c1d14583SBruce Richardson 	case 10:
1689*c1d14583SBruce Richardson 		new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
1690*c1d14583SBruce Richardson 		break;
1691*c1d14583SBruce Richardson 	case 100:
1692*c1d14583SBruce Richardson 		new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
1693*c1d14583SBruce Richardson 		break;
1694*c1d14583SBruce Richardson 	case 1000:
1695*c1d14583SBruce Richardson 		new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
1696*c1d14583SBruce Richardson 		break;
1697*c1d14583SBruce Richardson 	case 10000:
1698*c1d14583SBruce Richardson 		new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
1699*c1d14583SBruce Richardson 		break;
1700*c1d14583SBruce Richardson 	case 20000:
1701*c1d14583SBruce Richardson 		new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
1702*c1d14583SBruce Richardson 		break;
1703*c1d14583SBruce Richardson 	case 25000:
1704*c1d14583SBruce Richardson 		new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
1705*c1d14583SBruce Richardson 		break;
1706*c1d14583SBruce Richardson 	case 40000:
1707*c1d14583SBruce Richardson 		new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
1708*c1d14583SBruce Richardson 		break;
1709*c1d14583SBruce Richardson 	case 50000:
1710*c1d14583SBruce Richardson 		new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
1711*c1d14583SBruce Richardson 		break;
1712*c1d14583SBruce Richardson 	case 100000:
1713*c1d14583SBruce Richardson 		new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
1714*c1d14583SBruce Richardson 		break;
1715*c1d14583SBruce Richardson 	default:
1716*c1d14583SBruce Richardson 		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1717*c1d14583SBruce Richardson 		break;
1718*c1d14583SBruce Richardson 	}
1719*c1d14583SBruce Richardson 
1720*c1d14583SBruce Richardson 	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1721*c1d14583SBruce Richardson 	new_link.link_status = hw->link_up ? RTE_ETH_LINK_UP :
1722*c1d14583SBruce Richardson 					     RTE_ETH_LINK_DOWN;
1723*c1d14583SBruce Richardson 	new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1724*c1d14583SBruce Richardson 				RTE_ETH_LINK_SPEED_FIXED);
1725*c1d14583SBruce Richardson 
1726*c1d14583SBruce Richardson 	return rte_eth_linkstatus_set(dev, &new_link);
1727*c1d14583SBruce Richardson }
1728*c1d14583SBruce Richardson 
1729*c1d14583SBruce Richardson static int
1730*c1d14583SBruce Richardson ice_dcf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
1731*c1d14583SBruce Richardson {
1732*c1d14583SBruce Richardson 	/* mtu setting is forbidden if port is start */
1733*c1d14583SBruce Richardson 	if (dev->data->dev_started != 0) {
1734*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1735*c1d14583SBruce Richardson 			    dev->data->port_id);
1736*c1d14583SBruce Richardson 		return -EBUSY;
1737*c1d14583SBruce Richardson 	}
1738*c1d14583SBruce Richardson 
1739*c1d14583SBruce Richardson 	return 0;
1740*c1d14583SBruce Richardson }
1741*c1d14583SBruce Richardson 
1742*c1d14583SBruce Richardson bool
1743*c1d14583SBruce Richardson ice_dcf_adminq_need_retry(struct ice_adapter *ad)
1744*c1d14583SBruce Richardson {
1745*c1d14583SBruce Richardson 	return ad->hw.dcf_enabled &&
1746*c1d14583SBruce Richardson 	       !rte_atomic_load_explicit(&ad->dcf_state_on, rte_memory_order_relaxed);
1747*c1d14583SBruce Richardson }
1748*c1d14583SBruce Richardson 
1749*c1d14583SBruce Richardson /* Add UDP tunneling port */
1750*c1d14583SBruce Richardson static int
1751*c1d14583SBruce Richardson ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
1752*c1d14583SBruce Richardson 				struct rte_eth_udp_tunnel *udp_tunnel)
1753*c1d14583SBruce Richardson {
1754*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
1755*c1d14583SBruce Richardson 	struct ice_adapter *parent_adapter = &adapter->parent;
1756*c1d14583SBruce Richardson 	struct ice_hw *parent_hw = &parent_adapter->hw;
1757*c1d14583SBruce Richardson 	int ret = 0;
1758*c1d14583SBruce Richardson 
1759*c1d14583SBruce Richardson 	if (!udp_tunnel)
1760*c1d14583SBruce Richardson 		return -EINVAL;
1761*c1d14583SBruce Richardson 
1762*c1d14583SBruce Richardson 	switch (udp_tunnel->prot_type) {
1763*c1d14583SBruce Richardson 	case RTE_ETH_TUNNEL_TYPE_VXLAN:
1764*c1d14583SBruce Richardson 		ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
1765*c1d14583SBruce Richardson 					udp_tunnel->udp_port);
1766*c1d14583SBruce Richardson 		break;
1767*c1d14583SBruce Richardson 	case RTE_ETH_TUNNEL_TYPE_ECPRI:
1768*c1d14583SBruce Richardson 		ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
1769*c1d14583SBruce Richardson 					udp_tunnel->udp_port);
1770*c1d14583SBruce Richardson 		break;
1771*c1d14583SBruce Richardson 	default:
1772*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
1773*c1d14583SBruce Richardson 		ret = -EINVAL;
1774*c1d14583SBruce Richardson 		break;
1775*c1d14583SBruce Richardson 	}
1776*c1d14583SBruce Richardson 
1777*c1d14583SBruce Richardson 	return ret;
1778*c1d14583SBruce Richardson }
1779*c1d14583SBruce Richardson 
1780*c1d14583SBruce Richardson /* Delete UDP tunneling port */
1781*c1d14583SBruce Richardson static int
1782*c1d14583SBruce Richardson ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
1783*c1d14583SBruce Richardson 				struct rte_eth_udp_tunnel *udp_tunnel)
1784*c1d14583SBruce Richardson {
1785*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
1786*c1d14583SBruce Richardson 	struct ice_adapter *parent_adapter = &adapter->parent;
1787*c1d14583SBruce Richardson 	struct ice_hw *parent_hw = &parent_adapter->hw;
1788*c1d14583SBruce Richardson 	int ret = 0;
1789*c1d14583SBruce Richardson 
1790*c1d14583SBruce Richardson 	if (!udp_tunnel)
1791*c1d14583SBruce Richardson 		return -EINVAL;
1792*c1d14583SBruce Richardson 
1793*c1d14583SBruce Richardson 	switch (udp_tunnel->prot_type) {
1794*c1d14583SBruce Richardson 	case RTE_ETH_TUNNEL_TYPE_VXLAN:
1795*c1d14583SBruce Richardson 	case RTE_ETH_TUNNEL_TYPE_ECPRI:
1796*c1d14583SBruce Richardson 		ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
1797*c1d14583SBruce Richardson 		break;
1798*c1d14583SBruce Richardson 	default:
1799*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
1800*c1d14583SBruce Richardson 		ret = -EINVAL;
1801*c1d14583SBruce Richardson 		break;
1802*c1d14583SBruce Richardson 	}
1803*c1d14583SBruce Richardson 
1804*c1d14583SBruce Richardson 	return ret;
1805*c1d14583SBruce Richardson }
1806*c1d14583SBruce Richardson 
1807*c1d14583SBruce Richardson static int
1808*c1d14583SBruce Richardson ice_dcf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
1809*c1d14583SBruce Richardson 		void *arg)
1810*c1d14583SBruce Richardson {
1811*c1d14583SBruce Richardson 	if (!arg)
1812*c1d14583SBruce Richardson 		return -EINVAL;
1813*c1d14583SBruce Richardson 
1814*c1d14583SBruce Richardson 	*(const void **)arg = &ice_dcf_tm_ops;
1815*c1d14583SBruce Richardson 
1816*c1d14583SBruce Richardson 	return 0;
1817*c1d14583SBruce Richardson }
1818*c1d14583SBruce Richardson 
1819*c1d14583SBruce Richardson static inline void
1820*c1d14583SBruce Richardson ice_dcf_reset_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
1821*c1d14583SBruce Richardson {
1822*c1d14583SBruce Richardson 	ice_dcf_uninit_hw(eth_dev, hw);
1823*c1d14583SBruce Richardson 	ice_dcf_init_hw(eth_dev, hw);
1824*c1d14583SBruce Richardson }
1825*c1d14583SBruce Richardson 
1826*c1d14583SBruce Richardson /* Check if reset has been triggered by PF */
1827*c1d14583SBruce Richardson static inline bool
1828*c1d14583SBruce Richardson ice_dcf_is_reset(struct rte_eth_dev *dev)
1829*c1d14583SBruce Richardson {
1830*c1d14583SBruce Richardson 	struct ice_dcf_adapter *ad = dev->data->dev_private;
1831*c1d14583SBruce Richardson 	struct iavf_hw *hw = &ad->real_hw.avf;
1832*c1d14583SBruce Richardson 
1833*c1d14583SBruce Richardson 	return !(IAVF_READ_REG(hw, IAVF_VF_ARQLEN1) &
1834*c1d14583SBruce Richardson 		 IAVF_VF_ARQLEN1_ARQENABLE_MASK);
1835*c1d14583SBruce Richardson }
1836*c1d14583SBruce Richardson 
1837*c1d14583SBruce Richardson static int
1838*c1d14583SBruce Richardson ice_dcf_dev_reset(struct rte_eth_dev *dev)
1839*c1d14583SBruce Richardson {
1840*c1d14583SBruce Richardson 	struct ice_dcf_adapter *ad = dev->data->dev_private;
1841*c1d14583SBruce Richardson 	struct ice_dcf_hw *hw = &ad->real_hw;
1842*c1d14583SBruce Richardson 	int ret;
1843*c1d14583SBruce Richardson 
1844*c1d14583SBruce Richardson 	if (ice_dcf_is_reset(dev)) {
1845*c1d14583SBruce Richardson 		if (!ad->real_hw.resetting)
1846*c1d14583SBruce Richardson 			ad->real_hw.resetting = true;
1847*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "The DCF has been reset by PF");
1848*c1d14583SBruce Richardson 
1849*c1d14583SBruce Richardson 		/*
1850*c1d14583SBruce Richardson 		 * Simply reset hw to trigger an additional DCF enable/disable
1851*c1d14583SBruce Richardson 		 * cycle which help to workaround the issue that kernel driver
1852*c1d14583SBruce Richardson 		 * may not clean up resource during previous reset.
1853*c1d14583SBruce Richardson 		 */
1854*c1d14583SBruce Richardson 		ice_dcf_reset_hw(dev, hw);
1855*c1d14583SBruce Richardson 	}
1856*c1d14583SBruce Richardson 
1857*c1d14583SBruce Richardson 	ret = ice_dcf_dev_close(dev);
1858*c1d14583SBruce Richardson 	if (ret)
1859*c1d14583SBruce Richardson 		return ret;
1860*c1d14583SBruce Richardson 
1861*c1d14583SBruce Richardson 	ret = ice_dcf_dev_init(dev);
1862*c1d14583SBruce Richardson 
1863*c1d14583SBruce Richardson 	return ret;
1864*c1d14583SBruce Richardson }
1865*c1d14583SBruce Richardson 
1866*c1d14583SBruce Richardson static const uint32_t *
1867*c1d14583SBruce Richardson ice_dcf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused,
1868*c1d14583SBruce Richardson 				 size_t *no_of_elements)
1869*c1d14583SBruce Richardson {
1870*c1d14583SBruce Richardson 	static const uint32_t ptypes[] = {
1871*c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER,
1872*c1d14583SBruce Richardson 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1873*c1d14583SBruce Richardson 		RTE_PTYPE_L4_FRAG,
1874*c1d14583SBruce Richardson 		RTE_PTYPE_L4_ICMP,
1875*c1d14583SBruce Richardson 		RTE_PTYPE_L4_NONFRAG,
1876*c1d14583SBruce Richardson 		RTE_PTYPE_L4_SCTP,
1877*c1d14583SBruce Richardson 		RTE_PTYPE_L4_TCP,
1878*c1d14583SBruce Richardson 		RTE_PTYPE_L4_UDP,
1879*c1d14583SBruce Richardson 	};
1880*c1d14583SBruce Richardson 	*no_of_elements = RTE_DIM(ptypes);
1881*c1d14583SBruce Richardson 	return ptypes;
1882*c1d14583SBruce Richardson }
1883*c1d14583SBruce Richardson 
1884*c1d14583SBruce Richardson static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
1885*c1d14583SBruce Richardson 	.dev_start                = ice_dcf_dev_start,
1886*c1d14583SBruce Richardson 	.dev_stop                 = ice_dcf_dev_stop,
1887*c1d14583SBruce Richardson 	.dev_close                = ice_dcf_dev_close,
1888*c1d14583SBruce Richardson 	.dev_reset                = ice_dcf_dev_reset,
1889*c1d14583SBruce Richardson 	.dev_configure            = ice_dcf_dev_configure,
1890*c1d14583SBruce Richardson 	.dev_infos_get            = ice_dcf_dev_info_get,
1891*c1d14583SBruce Richardson 	.dev_supported_ptypes_get = ice_dcf_dev_supported_ptypes_get,
1892*c1d14583SBruce Richardson 	.rx_queue_setup           = ice_rx_queue_setup,
1893*c1d14583SBruce Richardson 	.tx_queue_setup           = ice_tx_queue_setup,
1894*c1d14583SBruce Richardson 	.rx_queue_release         = ice_dev_rx_queue_release,
1895*c1d14583SBruce Richardson 	.tx_queue_release         = ice_dev_tx_queue_release,
1896*c1d14583SBruce Richardson 	.rx_queue_start           = ice_dcf_rx_queue_start,
1897*c1d14583SBruce Richardson 	.tx_queue_start           = ice_dcf_tx_queue_start,
1898*c1d14583SBruce Richardson 	.rx_queue_stop            = ice_dcf_rx_queue_stop,
1899*c1d14583SBruce Richardson 	.tx_queue_stop            = ice_dcf_tx_queue_stop,
1900*c1d14583SBruce Richardson 	.rxq_info_get             = ice_rxq_info_get,
1901*c1d14583SBruce Richardson 	.txq_info_get             = ice_txq_info_get,
1902*c1d14583SBruce Richardson 	.get_monitor_addr         = ice_get_monitor_addr,
1903*c1d14583SBruce Richardson 	.link_update              = ice_dcf_link_update,
1904*c1d14583SBruce Richardson 	.stats_get                = ice_dcf_stats_get,
1905*c1d14583SBruce Richardson 	.stats_reset              = ice_dcf_stats_reset,
1906*c1d14583SBruce Richardson 	.xstats_get               = ice_dcf_xstats_get,
1907*c1d14583SBruce Richardson 	.xstats_get_names         = ice_dcf_xstats_get_names,
1908*c1d14583SBruce Richardson 	.xstats_reset             = ice_dcf_stats_reset,
1909*c1d14583SBruce Richardson 	.promiscuous_enable       = ice_dcf_dev_promiscuous_enable,
1910*c1d14583SBruce Richardson 	.promiscuous_disable      = ice_dcf_dev_promiscuous_disable,
1911*c1d14583SBruce Richardson 	.allmulticast_enable      = ice_dcf_dev_allmulticast_enable,
1912*c1d14583SBruce Richardson 	.allmulticast_disable     = ice_dcf_dev_allmulticast_disable,
1913*c1d14583SBruce Richardson 	.mac_addr_add             = dcf_dev_add_mac_addr,
1914*c1d14583SBruce Richardson 	.mac_addr_remove          = dcf_dev_del_mac_addr,
1915*c1d14583SBruce Richardson 	.set_mc_addr_list         = dcf_set_mc_addr_list,
1916*c1d14583SBruce Richardson 	.mac_addr_set             = dcf_dev_set_default_mac_addr,
1917*c1d14583SBruce Richardson 	.vlan_filter_set          = dcf_dev_vlan_filter_set,
1918*c1d14583SBruce Richardson 	.vlan_offload_set         = dcf_dev_vlan_offload_set,
1919*c1d14583SBruce Richardson 	.flow_ops_get             = ice_dcf_dev_flow_ops_get,
1920*c1d14583SBruce Richardson 	.udp_tunnel_port_add	  = ice_dcf_dev_udp_tunnel_port_add,
1921*c1d14583SBruce Richardson 	.udp_tunnel_port_del	  = ice_dcf_dev_udp_tunnel_port_del,
1922*c1d14583SBruce Richardson 	.tm_ops_get               = ice_dcf_tm_ops_get,
1923*c1d14583SBruce Richardson 	.reta_update              = ice_dcf_dev_rss_reta_update,
1924*c1d14583SBruce Richardson 	.reta_query               = ice_dcf_dev_rss_reta_query,
1925*c1d14583SBruce Richardson 	.rss_hash_update          = ice_dcf_dev_rss_hash_update,
1926*c1d14583SBruce Richardson 	.rss_hash_conf_get        = ice_dcf_dev_rss_hash_conf_get,
1927*c1d14583SBruce Richardson 	.tx_done_cleanup          = ice_tx_done_cleanup,
1928*c1d14583SBruce Richardson 	.mtu_set                  = ice_dcf_dev_mtu_set,
1929*c1d14583SBruce Richardson };
1930*c1d14583SBruce Richardson 
1931*c1d14583SBruce Richardson static int
1932*c1d14583SBruce Richardson ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
1933*c1d14583SBruce Richardson {
1934*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
1935*c1d14583SBruce Richardson 	struct ice_adapter *parent_adapter = &adapter->parent;
1936*c1d14583SBruce Richardson 
1937*c1d14583SBruce Richardson 	eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
1938*c1d14583SBruce Richardson 	eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
1939*c1d14583SBruce Richardson 	eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
1940*c1d14583SBruce Richardson 
1941*c1d14583SBruce Richardson 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1942*c1d14583SBruce Richardson 		return 0;
1943*c1d14583SBruce Richardson 
1944*c1d14583SBruce Richardson 	adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
1945*c1d14583SBruce Richardson 	if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
1946*c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
1947*c1d14583SBruce Richardson 		rte_atomic_store_explicit(&parent_adapter->dcf_state_on, false,
1948*c1d14583SBruce Richardson 				 rte_memory_order_relaxed);
1949*c1d14583SBruce Richardson 		return -1;
1950*c1d14583SBruce Richardson 	}
1951*c1d14583SBruce Richardson 
1952*c1d14583SBruce Richardson 	rte_atomic_store_explicit(&parent_adapter->dcf_state_on, true, rte_memory_order_relaxed);
1953*c1d14583SBruce Richardson 
1954*c1d14583SBruce Richardson 	if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
1955*c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
1956*c1d14583SBruce Richardson 		ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
1957*c1d14583SBruce Richardson 		return -1;
1958*c1d14583SBruce Richardson 	}
1959*c1d14583SBruce Richardson 
1960*c1d14583SBruce Richardson 	ice_dcf_stats_reset(eth_dev);
1961*c1d14583SBruce Richardson 
1962*c1d14583SBruce Richardson 	dcf_config_promisc(adapter, false, false);
1963*c1d14583SBruce Richardson 	ice_dcf_vf_repr_notify_all(adapter, true);
1964*c1d14583SBruce Richardson 
1965*c1d14583SBruce Richardson 	return 0;
1966*c1d14583SBruce Richardson }
1967*c1d14583SBruce Richardson 
1968*c1d14583SBruce Richardson static int
1969*c1d14583SBruce Richardson ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
1970*c1d14583SBruce Richardson {
1971*c1d14583SBruce Richardson 	struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
1972*c1d14583SBruce Richardson 
1973*c1d14583SBruce Richardson 	ice_dcf_free_repr_info(adapter);
1974*c1d14583SBruce Richardson 	ice_dcf_dev_close(eth_dev);
1975*c1d14583SBruce Richardson 
1976*c1d14583SBruce Richardson 	return 0;
1977*c1d14583SBruce Richardson }
1978*c1d14583SBruce Richardson 
1979*c1d14583SBruce Richardson static int
1980*c1d14583SBruce Richardson ice_dcf_engine_disabled_handler(__rte_unused const char *key,
1981*c1d14583SBruce Richardson 			  const char *value, __rte_unused void *opaque)
1982*c1d14583SBruce Richardson {
1983*c1d14583SBruce Richardson 	if (strcmp(value, "off"))
1984*c1d14583SBruce Richardson 		return -1;
1985*c1d14583SBruce Richardson 
1986*c1d14583SBruce Richardson 	return 0;
1987*c1d14583SBruce Richardson }
1988*c1d14583SBruce Richardson 
1989*c1d14583SBruce Richardson static int
1990*c1d14583SBruce Richardson ice_dcf_cap_check_handler(__rte_unused const char *key,
1991*c1d14583SBruce Richardson 			  const char *value, __rte_unused void *opaque)
1992*c1d14583SBruce Richardson {
1993*c1d14583SBruce Richardson 	if (strcmp(value, "dcf"))
1994*c1d14583SBruce Richardson 		return -1;
1995*c1d14583SBruce Richardson 
1996*c1d14583SBruce Richardson 	return 0;
1997*c1d14583SBruce Richardson }
1998*c1d14583SBruce Richardson 
1999*c1d14583SBruce Richardson int
2000*c1d14583SBruce Richardson ice_devargs_check(struct rte_devargs *devargs, enum ice_dcf_devrarg devarg_type)
2001*c1d14583SBruce Richardson {
2002*c1d14583SBruce Richardson 	struct rte_kvargs *kvlist;
2003*c1d14583SBruce Richardson 	unsigned int i = 0;
2004*c1d14583SBruce Richardson 	int ret = 0;
2005*c1d14583SBruce Richardson 
2006*c1d14583SBruce Richardson 	if (devargs == NULL)
2007*c1d14583SBruce Richardson 		return 0;
2008*c1d14583SBruce Richardson 
2009*c1d14583SBruce Richardson 	kvlist = rte_kvargs_parse(devargs->args, NULL);
2010*c1d14583SBruce Richardson 	if (kvlist == NULL)
2011*c1d14583SBruce Richardson 		return 0;
2012*c1d14583SBruce Richardson 
2013*c1d14583SBruce Richardson 	for (i = 0; i < ARRAY_SIZE(ice_devargs_table); i++)	{
2014*c1d14583SBruce Richardson 		if (devarg_type == ice_devargs_table[i].type) {
2015*c1d14583SBruce Richardson 			if (!rte_kvargs_count(kvlist, ice_devargs_table[i].key))
2016*c1d14583SBruce Richardson 				goto exit;
2017*c1d14583SBruce Richardson 
2018*c1d14583SBruce Richardson 			if (rte_kvargs_process(kvlist, ice_devargs_table[i].key,
2019*c1d14583SBruce Richardson 					ice_devargs_table[i].handler, NULL) < 0)
2020*c1d14583SBruce Richardson 				goto exit;
2021*c1d14583SBruce Richardson 			ret = 1;
2022*c1d14583SBruce Richardson 			break;
2023*c1d14583SBruce Richardson 		}
2024*c1d14583SBruce Richardson 	}
2025*c1d14583SBruce Richardson exit:
2026*c1d14583SBruce Richardson 	rte_kvargs_free(kvlist);
2027*c1d14583SBruce Richardson 	return ret;
2028*c1d14583SBruce Richardson }
2029*c1d14583SBruce Richardson 
2030*c1d14583SBruce Richardson static int
2031*c1d14583SBruce Richardson eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
2032*c1d14583SBruce Richardson 		      struct rte_pci_device *pci_dev)
2033*c1d14583SBruce Richardson {
2034*c1d14583SBruce Richardson 	struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
2035*c1d14583SBruce Richardson 	struct ice_dcf_vf_repr_param repr_param;
2036*c1d14583SBruce Richardson 	char repr_name[RTE_ETH_NAME_MAX_LEN];
2037*c1d14583SBruce Richardson 	struct ice_dcf_adapter *dcf_adapter;
2038*c1d14583SBruce Richardson 	struct rte_eth_dev *dcf_ethdev;
2039*c1d14583SBruce Richardson 	uint16_t dcf_vsi_id;
2040*c1d14583SBruce Richardson 	int i, ret;
2041*c1d14583SBruce Richardson 
2042*c1d14583SBruce Richardson 	if (!ice_devargs_check(pci_dev->device.devargs, ICE_DCF_DEVARG_CAP))
2043*c1d14583SBruce Richardson 		return 1;
2044*c1d14583SBruce Richardson 
2045*c1d14583SBruce Richardson 	ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, &eth_da, 1);
2046*c1d14583SBruce Richardson 	if (ret < 0)
2047*c1d14583SBruce Richardson 		return ret;
2048*c1d14583SBruce Richardson 
2049*c1d14583SBruce Richardson 	ret = rte_eth_dev_pci_generic_probe(pci_dev,
2050*c1d14583SBruce Richardson 					    sizeof(struct ice_dcf_adapter),
2051*c1d14583SBruce Richardson 					    ice_dcf_dev_init);
2052*c1d14583SBruce Richardson 	if (ret || !eth_da.nb_representor_ports)
2053*c1d14583SBruce Richardson 		return ret;
2054*c1d14583SBruce Richardson 	if (eth_da.type != RTE_ETH_REPRESENTOR_VF)
2055*c1d14583SBruce Richardson 		return -ENOTSUP;
2056*c1d14583SBruce Richardson 
2057*c1d14583SBruce Richardson 	dcf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
2058*c1d14583SBruce Richardson 	if (dcf_ethdev == NULL)
2059*c1d14583SBruce Richardson 		return -ENODEV;
2060*c1d14583SBruce Richardson 
2061*c1d14583SBruce Richardson 	dcf_adapter = dcf_ethdev->data->dev_private;
2062*c1d14583SBruce Richardson 	ret = ice_dcf_init_repr_info(dcf_adapter);
2063*c1d14583SBruce Richardson 	if (ret)
2064*c1d14583SBruce Richardson 		return ret;
2065*c1d14583SBruce Richardson 
2066*c1d14583SBruce Richardson 	if (eth_da.nb_representor_ports > dcf_adapter->real_hw.num_vfs ||
2067*c1d14583SBruce Richardson 	    eth_da.nb_representor_ports >= RTE_MAX_ETHPORTS) {
2068*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "the number of port representors is too large: %u",
2069*c1d14583SBruce Richardson 			    eth_da.nb_representor_ports);
2070*c1d14583SBruce Richardson 		ice_dcf_free_repr_info(dcf_adapter);
2071*c1d14583SBruce Richardson 		return -EINVAL;
2072*c1d14583SBruce Richardson 	}
2073*c1d14583SBruce Richardson 
2074*c1d14583SBruce Richardson 	dcf_vsi_id = dcf_adapter->real_hw.vsi_id | VIRTCHNL_DCF_VF_VSI_VALID;
2075*c1d14583SBruce Richardson 
2076*c1d14583SBruce Richardson 	repr_param.dcf_eth_dev = dcf_ethdev;
2077*c1d14583SBruce Richardson 	repr_param.switch_domain_id = 0;
2078*c1d14583SBruce Richardson 
2079*c1d14583SBruce Richardson 	for (i = 0; i < eth_da.nb_representor_ports; i++) {
2080*c1d14583SBruce Richardson 		uint16_t vf_id = eth_da.representor_ports[i];
2081*c1d14583SBruce Richardson 		struct rte_eth_dev *vf_rep_eth_dev;
2082*c1d14583SBruce Richardson 
2083*c1d14583SBruce Richardson 		if (vf_id >= dcf_adapter->real_hw.num_vfs) {
2084*c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "VF ID %u is out of range (0 ~ %u)",
2085*c1d14583SBruce Richardson 				    vf_id, dcf_adapter->real_hw.num_vfs - 1);
2086*c1d14583SBruce Richardson 			ret = -EINVAL;
2087*c1d14583SBruce Richardson 			break;
2088*c1d14583SBruce Richardson 		}
2089*c1d14583SBruce Richardson 
2090*c1d14583SBruce Richardson 		if (dcf_adapter->real_hw.vf_vsi_map[vf_id] == dcf_vsi_id) {
2091*c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "VF ID %u is DCF's ID.", vf_id);
2092*c1d14583SBruce Richardson 			ret = -EINVAL;
2093*c1d14583SBruce Richardson 			break;
2094*c1d14583SBruce Richardson 		}
2095*c1d14583SBruce Richardson 
2096*c1d14583SBruce Richardson 		repr_param.vf_id = vf_id;
2097*c1d14583SBruce Richardson 		snprintf(repr_name, sizeof(repr_name), "net_%s_representor_%u",
2098*c1d14583SBruce Richardson 			 pci_dev->device.name, vf_id);
2099*c1d14583SBruce Richardson 		ret = rte_eth_dev_create(&pci_dev->device, repr_name,
2100*c1d14583SBruce Richardson 					 sizeof(struct ice_dcf_vf_repr),
2101*c1d14583SBruce Richardson 					 NULL, NULL, ice_dcf_vf_repr_init,
2102*c1d14583SBruce Richardson 					 &repr_param);
2103*c1d14583SBruce Richardson 		if (ret) {
2104*c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "failed to create DCF VF representor %s",
2105*c1d14583SBruce Richardson 				    repr_name);
2106*c1d14583SBruce Richardson 			break;
2107*c1d14583SBruce Richardson 		}
2108*c1d14583SBruce Richardson 
2109*c1d14583SBruce Richardson 		vf_rep_eth_dev = rte_eth_dev_allocated(repr_name);
2110*c1d14583SBruce Richardson 		if (!vf_rep_eth_dev) {
2111*c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR,
2112*c1d14583SBruce Richardson 				    "Failed to find the ethdev for DCF VF representor: %s",
2113*c1d14583SBruce Richardson 				    repr_name);
2114*c1d14583SBruce Richardson 			ret = -ENODEV;
2115*c1d14583SBruce Richardson 			break;
2116*c1d14583SBruce Richardson 		}
2117*c1d14583SBruce Richardson 
2118*c1d14583SBruce Richardson 		dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev = vf_rep_eth_dev;
2119*c1d14583SBruce Richardson 		dcf_adapter->num_reprs++;
2120*c1d14583SBruce Richardson 	}
2121*c1d14583SBruce Richardson 
2122*c1d14583SBruce Richardson 	return ret;
2123*c1d14583SBruce Richardson }
2124*c1d14583SBruce Richardson 
2125*c1d14583SBruce Richardson static int
2126*c1d14583SBruce Richardson eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
2127*c1d14583SBruce Richardson {
2128*c1d14583SBruce Richardson 	struct rte_eth_dev *eth_dev;
2129*c1d14583SBruce Richardson 
2130*c1d14583SBruce Richardson 	eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
2131*c1d14583SBruce Richardson 	if (!eth_dev)
2132*c1d14583SBruce Richardson 		return 0;
2133*c1d14583SBruce Richardson 
2134*c1d14583SBruce Richardson 	if (rte_eth_dev_is_repr(eth_dev))
2135*c1d14583SBruce Richardson 		return rte_eth_dev_pci_generic_remove(pci_dev,
2136*c1d14583SBruce Richardson 						      ice_dcf_vf_repr_uninit);
2137*c1d14583SBruce Richardson 	else
2138*c1d14583SBruce Richardson 		return rte_eth_dev_pci_generic_remove(pci_dev,
2139*c1d14583SBruce Richardson 						      ice_dcf_dev_uninit);
2140*c1d14583SBruce Richardson }
2141*c1d14583SBruce Richardson 
2142*c1d14583SBruce Richardson static const struct rte_pci_id pci_id_ice_dcf_map[] = {
2143*c1d14583SBruce Richardson 	{ RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
2144*c1d14583SBruce Richardson 	{ .vendor_id = 0, /* sentinel */ },
2145*c1d14583SBruce Richardson };
2146*c1d14583SBruce Richardson 
2147*c1d14583SBruce Richardson static struct rte_pci_driver rte_ice_dcf_pmd = {
2148*c1d14583SBruce Richardson 	.id_table = pci_id_ice_dcf_map,
2149*c1d14583SBruce Richardson 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2150*c1d14583SBruce Richardson 	.probe = eth_ice_dcf_pci_probe,
2151*c1d14583SBruce Richardson 	.remove = eth_ice_dcf_pci_remove,
2152*c1d14583SBruce Richardson };
2153*c1d14583SBruce Richardson 
2154*c1d14583SBruce Richardson RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
2155*c1d14583SBruce Richardson RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
2156*c1d14583SBruce Richardson RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
2157*c1d14583SBruce Richardson RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");
2158