xref: /dpdk/drivers/net/intel/ice/ice_dcf.c (revision c038157a2e4416338bb5c7171ae7d611c454045d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 
16 #include <rte_pci.h>
17 #include <rte_eal.h>
18 #include <rte_ether.h>
19 #include <ethdev_driver.h>
20 #include <ethdev_pci.h>
21 #include <rte_malloc.h>
22 #include <rte_memzone.h>
23 #include <dev_driver.h>
24 
25 #include "ice_dcf.h"
26 #include "ice_rxtx.h"
27 
28 #define ICE_DCF_AQ_LEN     32
29 #define ICE_DCF_AQ_BUF_SZ  4096
30 
31 #define ICE_DCF_ARQ_MAX_RETRIES 200
32 #define ICE_DCF_ARQ_CHECK_TIME  2   /* msecs */
33 
34 #define ICE_DCF_CHECK_INTERVAL  100   /* 100ms */
35 
36 #define ICE_DCF_VF_RES_BUF_SZ	\
37 	(sizeof(struct virtchnl_vf_resource) +	\
38 		IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource))
39 
40 #define FIELD_SELECTOR(proto_hdr_field) \
41 		(1UL << ((proto_hdr_field) & PROTO_HDR_FIELD_MASK))
42 #define BUFF_NOUSED			0
43 
44 #define proto_hdr_eth { \
45 	VIRTCHNL_PROTO_HDR_ETH, \
46 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) | \
47 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST), {BUFF_NOUSED} }
48 
49 #define proto_hdr_svlan { \
50 	VIRTCHNL_PROTO_HDR_S_VLAN, \
51 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID), {BUFF_NOUSED} }
52 
53 #define proto_hdr_cvlan { \
54 	VIRTCHNL_PROTO_HDR_C_VLAN, \
55 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID), {BUFF_NOUSED} }
56 
57 #define proto_hdr_ipv4 { \
58 	VIRTCHNL_PROTO_HDR_IPV4, \
59 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | \
60 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), {BUFF_NOUSED} }
61 
62 #define proto_hdr_ipv4_with_prot { \
63 	VIRTCHNL_PROTO_HDR_IPV4, \
64 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | \
65 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | \
66 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), {BUFF_NOUSED} }
67 
68 #define proto_hdr_ipv6 { \
69 	VIRTCHNL_PROTO_HDR_IPV6, \
70 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | \
71 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), {BUFF_NOUSED} }
72 
73 #define proto_hdr_ipv6_frag { \
74 	VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, \
75 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID), {BUFF_NOUSED} }
76 
77 #define proto_hdr_ipv6_with_prot { \
78 	VIRTCHNL_PROTO_HDR_IPV6, \
79 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | \
80 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | \
81 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), {BUFF_NOUSED} }
82 
83 #define proto_hdr_udp { \
84 	VIRTCHNL_PROTO_HDR_UDP, \
85 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | \
86 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), {BUFF_NOUSED} }
87 
88 #define proto_hdr_tcp { \
89 	VIRTCHNL_PROTO_HDR_TCP, \
90 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | \
91 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), {BUFF_NOUSED} }
92 
93 #define proto_hdr_sctp { \
94 	VIRTCHNL_PROTO_HDR_SCTP, \
95 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | \
96 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), {BUFF_NOUSED} }
97 
98 #define proto_hdr_esp { \
99 	VIRTCHNL_PROTO_HDR_ESP, \
100 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI), {BUFF_NOUSED} }
101 
102 #define proto_hdr_ah { \
103 	VIRTCHNL_PROTO_HDR_AH, \
104 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI), {BUFF_NOUSED} }
105 
106 #define proto_hdr_l2tpv3 { \
107 	VIRTCHNL_PROTO_HDR_L2TPV3, \
108 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID), {BUFF_NOUSED} }
109 
110 #define proto_hdr_pfcp { \
111 	VIRTCHNL_PROTO_HDR_PFCP, \
112 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID), {BUFF_NOUSED} }
113 
114 #define proto_hdr_gtpc { \
115 	VIRTCHNL_PROTO_HDR_GTPC, 0, {BUFF_NOUSED} }
116 
117 #define proto_hdr_ecpri { \
118 	VIRTCHNL_PROTO_HDR_ECPRI, \
119 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ECPRI_PC_RTC_ID), {BUFF_NOUSED} }
120 
121 #define proto_hdr_l2tpv2 { \
122 	VIRTCHNL_PROTO_HDR_L2TPV2, \
123 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID) | \
124 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID), {BUFF_NOUSED} }
125 
126 #define proto_hdr_ppp { \
127 	VIRTCHNL_PROTO_HDR_PPP, 0, {BUFF_NOUSED} }
128 
129 #define TUNNEL_LEVEL_OUTER		0
130 #define TUNNEL_LEVEL_INNER		1
131 
132 struct virtchnl_proto_hdrs ice_dcf_inner_ipv4_tmplt = {
133 	TUNNEL_LEVEL_INNER, 1, {{proto_hdr_ipv4}}
134 };
135 
136 struct virtchnl_proto_hdrs ice_dcf_inner_ipv4_udp_tmplt = {
137 	TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv4_with_prot, proto_hdr_udp}}
138 };
139 
140 struct virtchnl_proto_hdrs ice_dcf_inner_ipv4_tcp_tmplt = {
141 	TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv4_with_prot, proto_hdr_tcp}}
142 };
143 
144 struct virtchnl_proto_hdrs ice_dcf_inner_ipv4_sctp_tmplt = {
145 	TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv4, proto_hdr_sctp}}
146 };
147 
148 struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_tmplt = {
149 	TUNNEL_LEVEL_INNER, 1, {{proto_hdr_ipv6}}
150 };
151 
152 struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_udp_tmplt = {
153 	TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv6_with_prot, proto_hdr_udp}}
154 };
155 
156 struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_tcp_tmplt = {
157 	TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv6_with_prot, proto_hdr_tcp}}
158 };
159 
160 struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_sctp_tmplt = {
161 	TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv6, proto_hdr_sctp}}
162 };
163 
164 static __rte_always_inline int
165 ice_dcf_send_cmd_req_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
166 			    uint8_t *req_msg, uint16_t req_msglen)
167 {
168 	return iavf_aq_send_msg_to_pf(&hw->avf, op, IAVF_SUCCESS,
169 				      req_msg, req_msglen, NULL);
170 }
171 
172 static int
173 ice_dcf_recv_cmd_rsp_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
174 			    uint8_t *rsp_msgbuf, uint16_t rsp_buflen,
175 			    uint16_t *rsp_msglen)
176 {
177 	struct iavf_arq_event_info event;
178 	enum virtchnl_ops v_op;
179 	int i = 0;
180 	int err;
181 
182 	event.buf_len = rsp_buflen;
183 	event.msg_buf = rsp_msgbuf;
184 
185 	do {
186 		err = iavf_clean_arq_element(&hw->avf, &event, NULL);
187 		if (err != IAVF_SUCCESS)
188 			goto again;
189 
190 		v_op = rte_le_to_cpu_32(event.desc.cookie_high);
191 		if (v_op != op)
192 			goto again;
193 
194 		if (rsp_msglen != NULL)
195 			*rsp_msglen = event.msg_len;
196 		return rte_le_to_cpu_32(event.desc.cookie_low);
197 
198 again:
199 		rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
200 	} while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
201 
202 	return -EIO;
203 }
204 
205 static __rte_always_inline void
206 ice_dcf_aq_cmd_clear(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
207 {
208 	rte_spinlock_lock(&hw->vc_cmd_queue_lock);
209 
210 	TAILQ_REMOVE(&hw->vc_cmd_queue, cmd, next);
211 
212 	rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
213 }
214 
215 static __rte_always_inline void
216 ice_dcf_vc_cmd_set(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
217 {
218 	cmd->v_ret = IAVF_ERR_NOT_READY;
219 	cmd->rsp_msglen = 0;
220 	cmd->pending = 1;
221 
222 	rte_spinlock_lock(&hw->vc_cmd_queue_lock);
223 
224 	TAILQ_INSERT_TAIL(&hw->vc_cmd_queue, cmd, next);
225 
226 	rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
227 }
228 
229 static __rte_always_inline int
230 ice_dcf_vc_cmd_send(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
231 {
232 	return iavf_aq_send_msg_to_pf(&hw->avf,
233 				      cmd->v_op, IAVF_SUCCESS,
234 				      cmd->req_msg, cmd->req_msglen, NULL);
235 }
236 
237 static __rte_always_inline void
238 ice_dcf_aq_cmd_handle(struct ice_dcf_hw *hw, struct iavf_arq_event_info *info)
239 {
240 	struct dcf_virtchnl_cmd *cmd;
241 	enum virtchnl_ops v_op;
242 	enum iavf_status v_ret;
243 	uint16_t aq_op;
244 
245 	aq_op = rte_le_to_cpu_16(info->desc.opcode);
246 	if (unlikely(aq_op != iavf_aqc_opc_send_msg_to_vf)) {
247 		PMD_DRV_LOG(ERR,
248 			    "Request %u is not supported yet", aq_op);
249 		return;
250 	}
251 
252 	v_op = rte_le_to_cpu_32(info->desc.cookie_high);
253 	if (v_op == VIRTCHNL_OP_EVENT) {
254 		if (hw->vc_event_msg_cb != NULL)
255 			hw->vc_event_msg_cb(hw,
256 					    info->msg_buf,
257 					    info->msg_len);
258 		return;
259 	}
260 
261 	v_ret = rte_le_to_cpu_32(info->desc.cookie_low);
262 
263 	rte_spinlock_lock(&hw->vc_cmd_queue_lock);
264 
265 	TAILQ_FOREACH(cmd, &hw->vc_cmd_queue, next) {
266 		if (cmd->v_op == v_op && cmd->pending) {
267 			cmd->v_ret = v_ret;
268 			cmd->rsp_msglen = RTE_MIN(info->msg_len,
269 						  cmd->rsp_buflen);
270 			if (likely(cmd->rsp_msglen != 0))
271 				rte_memcpy(cmd->rsp_msgbuf, info->msg_buf,
272 					   cmd->rsp_msglen);
273 
274 			/* prevent compiler reordering */
275 			rte_compiler_barrier();
276 			cmd->pending = 0;
277 			break;
278 		}
279 	}
280 
281 	rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
282 }
283 
284 static void
285 ice_dcf_handle_virtchnl_msg(struct ice_dcf_hw *hw)
286 {
287 	struct iavf_arq_event_info info;
288 	uint16_t pending = 1;
289 	int ret;
290 
291 	info.buf_len = ICE_DCF_AQ_BUF_SZ;
292 	info.msg_buf = hw->arq_buf;
293 
294 	while (pending && !hw->resetting) {
295 		ret = iavf_clean_arq_element(&hw->avf, &info, &pending);
296 		if (ret != IAVF_SUCCESS)
297 			break;
298 
299 		ice_dcf_aq_cmd_handle(hw, &info);
300 	}
301 }
302 
303 static int
304 ice_dcf_init_check_api_version(struct ice_dcf_hw *hw)
305 {
306 #define ICE_CPF_VIRTCHNL_VERSION_MAJOR_START	1
307 #define ICE_CPF_VIRTCHNL_VERSION_MINOR_START	1
308 	struct virtchnl_version_info version, *pver;
309 	int err;
310 
311 	version.major = VIRTCHNL_VERSION_MAJOR;
312 	version.minor = VIRTCHNL_VERSION_MINOR;
313 	err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_VERSION,
314 					  (uint8_t *)&version, sizeof(version));
315 	if (err) {
316 		PMD_INIT_LOG(ERR, "Failed to send OP_VERSION");
317 		return err;
318 	}
319 
320 	pver = &hw->virtchnl_version;
321 	err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_VERSION,
322 					  (uint8_t *)pver, sizeof(*pver), NULL);
323 	if (err) {
324 		PMD_INIT_LOG(ERR, "Failed to get response of OP_VERSION");
325 		return -1;
326 	}
327 
328 	PMD_INIT_LOG(DEBUG,
329 		     "Peer PF API version: %u.%u", pver->major, pver->minor);
330 
331 	if (pver->major < ICE_CPF_VIRTCHNL_VERSION_MAJOR_START ||
332 	    (pver->major == ICE_CPF_VIRTCHNL_VERSION_MAJOR_START &&
333 	     pver->minor < ICE_CPF_VIRTCHNL_VERSION_MINOR_START)) {
334 		PMD_INIT_LOG(ERR,
335 			     "VIRTCHNL API version should not be lower than (%u.%u)",
336 			     ICE_CPF_VIRTCHNL_VERSION_MAJOR_START,
337 			     ICE_CPF_VIRTCHNL_VERSION_MAJOR_START);
338 		return -1;
339 	} else if (pver->major > VIRTCHNL_VERSION_MAJOR ||
340 		   (pver->major == VIRTCHNL_VERSION_MAJOR &&
341 		    pver->minor > VIRTCHNL_VERSION_MINOR)) {
342 		PMD_INIT_LOG(ERR,
343 			     "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
344 			     pver->major, pver->minor,
345 			     VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
346 		return -1;
347 	}
348 
349 	PMD_INIT_LOG(DEBUG, "Peer is supported PF host");
350 
351 	return 0;
352 }
353 
354 static int
355 ice_dcf_get_vf_resource(struct ice_dcf_hw *hw)
356 {
357 	uint32_t caps;
358 	int err, i;
359 
360 	caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING |
361 	       VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF |
362 	       VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
363 	       VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
364 	       VIRTCHNL_VF_OFFLOAD_QOS | VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
365 
366 	err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
367 					  (uint8_t *)&caps, sizeof(caps));
368 	if (err) {
369 		PMD_DRV_LOG(ERR, "Failed to send msg OP_GET_VF_RESOURCE");
370 		return err;
371 	}
372 
373 	err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
374 					  (uint8_t *)hw->vf_res,
375 					  ICE_DCF_VF_RES_BUF_SZ, NULL);
376 	if (err) {
377 		PMD_DRV_LOG(ERR, "Failed to get response of OP_GET_VF_RESOURCE");
378 		return -1;
379 	}
380 
381 	iavf_vf_parse_hw_config(&hw->avf, hw->vf_res);
382 
383 	hw->vsi_res = NULL;
384 	for (i = 0; i < hw->vf_res->num_vsis; i++) {
385 		if (hw->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
386 			hw->vsi_res = &hw->vf_res->vsi_res[i];
387 	}
388 
389 	if (!hw->vsi_res) {
390 		PMD_DRV_LOG(ERR, "no LAN VSI found");
391 		return -1;
392 	}
393 
394 	hw->vsi_id = hw->vsi_res->vsi_id;
395 	PMD_DRV_LOG(DEBUG, "VSI ID is %u", hw->vsi_id);
396 
397 	return 0;
398 }
399 
400 static int
401 ice_dcf_get_vf_vsi_map(struct ice_dcf_hw *hw)
402 {
403 	struct virtchnl_dcf_vsi_map *vsi_map;
404 	uint32_t valid_msg_len;
405 	uint16_t len;
406 	int err;
407 
408 	err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
409 					  NULL, 0);
410 	if (err) {
411 		PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_GET_VSI_MAP");
412 		return err;
413 	}
414 
415 	err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
416 					  hw->arq_buf, ICE_DCF_AQ_BUF_SZ,
417 					  &len);
418 	if (err) {
419 		PMD_DRV_LOG(ERR, "Failed to get response of OP_DCF_GET_VSI_MAP");
420 		return err;
421 	}
422 
423 	vsi_map = (struct virtchnl_dcf_vsi_map *)hw->arq_buf;
424 	valid_msg_len = (vsi_map->num_vfs - 1) * sizeof(vsi_map->vf_vsi[0]) +
425 			sizeof(*vsi_map);
426 	if (len != valid_msg_len) {
427 		PMD_DRV_LOG(ERR, "invalid vf vsi map response with length %u",
428 			    len);
429 		return -EINVAL;
430 	}
431 
432 	if (hw->num_vfs != 0 && hw->num_vfs != vsi_map->num_vfs) {
433 		PMD_DRV_LOG(ERR, "The number VSI map (%u) doesn't match the number of VFs (%u)",
434 			    vsi_map->num_vfs, hw->num_vfs);
435 		return -EINVAL;
436 	}
437 
438 	len = vsi_map->num_vfs * sizeof(vsi_map->vf_vsi[0]);
439 
440 	if (!hw->vf_vsi_map) {
441 		hw->vf_vsi_map = rte_zmalloc("vf_vsi_ctx", len, 0);
442 		if (!hw->vf_vsi_map) {
443 			PMD_DRV_LOG(ERR, "Failed to alloc memory for VSI context");
444 			return -ENOMEM;
445 		}
446 
447 		hw->num_vfs = vsi_map->num_vfs;
448 		hw->pf_vsi_id = vsi_map->pf_vsi;
449 	}
450 
451 	if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) {
452 		PMD_DRV_LOG(DEBUG, "VF VSI map doesn't change");
453 		return 1;
454 	}
455 
456 	rte_memcpy(hw->vf_vsi_map, vsi_map->vf_vsi, len);
457 	return 0;
458 }
459 
460 static int
461 ice_dcf_mode_disable(struct ice_dcf_hw *hw)
462 {
463 	int err;
464 
465 	if (hw->resetting)
466 		return 0;
467 
468 	err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
469 					  NULL, 0);
470 	if (err) {
471 		PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_DISABLE");
472 		return err;
473 	}
474 
475 	err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
476 					  hw->arq_buf, ICE_DCF_AQ_BUF_SZ, NULL);
477 	if (err) {
478 		PMD_DRV_LOG(ERR,
479 			    "Failed to get response of OP_DCF_DISABLE %d",
480 			    err);
481 		return -1;
482 	}
483 
484 	return 0;
485 }
486 
487 static int
488 ice_dcf_check_reset_done(struct ice_dcf_hw *hw)
489 {
490 #define ICE_DCF_RESET_WAIT_CNT       50
491 	struct iavf_hw *avf = &hw->avf;
492 	int i, reset;
493 
494 	for (i = 0; i < ICE_DCF_RESET_WAIT_CNT; i++) {
495 		reset = IAVF_READ_REG(avf, IAVF_VFGEN_RSTAT) &
496 					IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
497 		reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
498 
499 		if (reset == VIRTCHNL_VFR_VFACTIVE ||
500 		    reset == VIRTCHNL_VFR_COMPLETED)
501 			break;
502 
503 		rte_delay_ms(20);
504 	}
505 
506 	if (i >= ICE_DCF_RESET_WAIT_CNT)
507 		return -1;
508 
509 	return 0;
510 }
511 
512 static inline void
513 ice_dcf_enable_irq0(struct ice_dcf_hw *hw)
514 {
515 	struct iavf_hw *avf = &hw->avf;
516 
517 	/* Enable admin queue interrupt trigger */
518 	IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1,
519 		       IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
520 	IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
521 		       IAVF_VFINT_DYN_CTL01_INTENA_MASK |
522 		       IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
523 		       IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
524 
525 	IAVF_WRITE_FLUSH(avf);
526 }
527 
528 static inline void
529 ice_dcf_disable_irq0(struct ice_dcf_hw *hw)
530 {
531 	struct iavf_hw *avf = &hw->avf;
532 
533 	/* Disable all interrupt types */
534 	IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1, 0);
535 	IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
536 		       IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
537 
538 	IAVF_WRITE_FLUSH(avf);
539 }
540 
541 static void
542 ice_dcf_dev_interrupt_handler(void *param)
543 {
544 	struct ice_dcf_hw *hw = param;
545 
546 	ice_dcf_disable_irq0(hw);
547 
548 	ice_dcf_handle_virtchnl_msg(hw);
549 
550 	ice_dcf_enable_irq0(hw);
551 }
552 
553 int
554 ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
555 			     struct dcf_virtchnl_cmd *cmd)
556 {
557 	int i = 0;
558 	int err;
559 
560 	if ((cmd->req_msg && !cmd->req_msglen) ||
561 	    (!cmd->req_msg && cmd->req_msglen) ||
562 	    (cmd->rsp_msgbuf && !cmd->rsp_buflen) ||
563 	    (!cmd->rsp_msgbuf && cmd->rsp_buflen))
564 		return -EINVAL;
565 
566 	rte_spinlock_lock(&hw->vc_cmd_send_lock);
567 	ice_dcf_vc_cmd_set(hw, cmd);
568 
569 	err = ice_dcf_vc_cmd_send(hw, cmd);
570 	if (err) {
571 		PMD_DRV_LOG(ERR, "fail to send cmd %d", cmd->v_op);
572 		goto ret;
573 	}
574 
575 	do {
576 		if (!cmd->pending)
577 			break;
578 
579 		rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
580 	} while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
581 
582 	if (cmd->v_ret != IAVF_SUCCESS) {
583 		err = -1;
584 		PMD_DRV_LOG(ERR,
585 			    "No response (%d times) or return failure (%d) for cmd %d",
586 			    i, cmd->v_ret, cmd->v_op);
587 	}
588 
589 ret:
590 	ice_dcf_aq_cmd_clear(hw, cmd);
591 	rte_spinlock_unlock(&hw->vc_cmd_send_lock);
592 	return err;
593 }
594 
595 int
596 ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
597 		    void *buf, uint16_t buf_size)
598 {
599 	struct dcf_virtchnl_cmd desc_cmd, buff_cmd;
600 	struct ice_dcf_hw *hw = dcf_hw;
601 	int err = 0;
602 	int i = 0;
603 
604 	if ((buf && !buf_size) || (!buf && buf_size) ||
605 	    buf_size > ICE_DCF_AQ_BUF_SZ)
606 		return -EINVAL;
607 
608 	desc_cmd.v_op = VIRTCHNL_OP_DCF_CMD_DESC;
609 	desc_cmd.req_msglen = sizeof(*desc);
610 	desc_cmd.req_msg = (uint8_t *)desc;
611 	desc_cmd.rsp_buflen = sizeof(*desc);
612 	desc_cmd.rsp_msgbuf = (uint8_t *)desc;
613 
614 	if (buf == NULL)
615 		return ice_dcf_execute_virtchnl_cmd(hw, &desc_cmd);
616 
617 	desc->flags |= rte_cpu_to_le_16(ICE_AQ_FLAG_BUF);
618 
619 	buff_cmd.v_op = VIRTCHNL_OP_DCF_CMD_BUFF;
620 	buff_cmd.req_msglen = buf_size;
621 	buff_cmd.req_msg = buf;
622 	buff_cmd.rsp_buflen = buf_size;
623 	buff_cmd.rsp_msgbuf = buf;
624 
625 	rte_spinlock_lock(&hw->vc_cmd_send_lock);
626 	ice_dcf_vc_cmd_set(hw, &desc_cmd);
627 	ice_dcf_vc_cmd_set(hw, &buff_cmd);
628 
629 	if (ice_dcf_vc_cmd_send(hw, &desc_cmd) ||
630 	    ice_dcf_vc_cmd_send(hw, &buff_cmd)) {
631 		err = -1;
632 		PMD_DRV_LOG(ERR, "fail to send OP_DCF_CMD_DESC/BUFF");
633 		goto ret;
634 	}
635 
636 	do {
637 		if (!desc_cmd.pending && !buff_cmd.pending)
638 			break;
639 
640 		rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
641 	} while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
642 
643 	if (desc_cmd.v_ret != IAVF_SUCCESS || buff_cmd.v_ret != IAVF_SUCCESS) {
644 		err = -1;
645 		PMD_DRV_LOG(ERR,
646 			    "No response (%d times) or return failure (desc: %d / buff: %d)",
647 			    i, desc_cmd.v_ret, buff_cmd.v_ret);
648 	}
649 
650 ret:
651 	ice_dcf_aq_cmd_clear(hw, &desc_cmd);
652 	ice_dcf_aq_cmd_clear(hw, &buff_cmd);
653 	rte_spinlock_unlock(&hw->vc_cmd_send_lock);
654 
655 	return err;
656 }
657 
658 int
659 ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw)
660 {
661 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(hw->eth_dev);
662 	int i = 0;
663 	int err = -1;
664 
665 	rte_spinlock_lock(&hw->vc_cmd_send_lock);
666 
667 	rte_intr_disable(pci_dev->intr_handle);
668 	ice_dcf_disable_irq0(hw);
669 
670 	for (;;) {
671 		if (ice_dcf_get_vf_resource(hw) == 0 &&
672 		    ice_dcf_get_vf_vsi_map(hw) >= 0) {
673 			err = 0;
674 			break;
675 		}
676 
677 		if (++i >= ICE_DCF_ARQ_MAX_RETRIES)
678 			break;
679 
680 		rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
681 	}
682 
683 	rte_intr_enable(pci_dev->intr_handle);
684 	ice_dcf_enable_irq0(hw);
685 
686 	rte_spinlock_unlock(&hw->vc_cmd_send_lock);
687 
688 	return err;
689 }
690 
691 static int
692 ice_dcf_get_supported_rxdid(struct ice_dcf_hw *hw)
693 {
694 	int err;
695 
696 	err = ice_dcf_send_cmd_req_no_irq(hw,
697 					  VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
698 					  NULL, 0);
699 	if (err) {
700 		PMD_INIT_LOG(ERR, "Failed to send OP_GET_SUPPORTED_RXDIDS");
701 		return -1;
702 	}
703 
704 	err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
705 					  (uint8_t *)&hw->supported_rxdid,
706 					  sizeof(uint64_t), NULL);
707 	if (err) {
708 		PMD_INIT_LOG(ERR, "Failed to get response of OP_GET_SUPPORTED_RXDIDS");
709 		return -1;
710 	}
711 
712 	return 0;
713 }
714 
715 static int
716 dcf_get_vlan_offload_caps_v2(struct ice_dcf_hw *hw)
717 {
718 	struct virtchnl_vlan_caps vlan_v2_caps;
719 	struct dcf_virtchnl_cmd args;
720 	int ret;
721 
722 	memset(&args, 0, sizeof(args));
723 	args.v_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS;
724 	args.rsp_msgbuf = (uint8_t *)&vlan_v2_caps;
725 	args.rsp_buflen = sizeof(vlan_v2_caps);
726 
727 	ret = ice_dcf_execute_virtchnl_cmd(hw, &args);
728 	if (ret) {
729 		PMD_DRV_LOG(ERR,
730 			    "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
731 		return ret;
732 	}
733 
734 	rte_memcpy(&hw->vlan_v2_caps, &vlan_v2_caps, sizeof(vlan_v2_caps));
735 	return 0;
736 }
737 
738 int
739 ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
740 {
741 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
742 	int ret, size;
743 
744 	hw->resetting = false;
745 
746 	hw->avf.hw_addr = pci_dev->mem_resource[0].addr;
747 	hw->avf.back = hw;
748 
749 	hw->avf.bus.bus_id = pci_dev->addr.bus;
750 	hw->avf.bus.device = pci_dev->addr.devid;
751 	hw->avf.bus.func = pci_dev->addr.function;
752 
753 	hw->avf.device_id = pci_dev->id.device_id;
754 	hw->avf.vendor_id = pci_dev->id.vendor_id;
755 	hw->avf.subsystem_device_id = pci_dev->id.subsystem_device_id;
756 	hw->avf.subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
757 
758 	hw->avf.aq.num_arq_entries = ICE_DCF_AQ_LEN;
759 	hw->avf.aq.num_asq_entries = ICE_DCF_AQ_LEN;
760 	hw->avf.aq.arq_buf_size = ICE_DCF_AQ_BUF_SZ;
761 	hw->avf.aq.asq_buf_size = ICE_DCF_AQ_BUF_SZ;
762 
763 	rte_spinlock_init(&hw->vc_cmd_send_lock);
764 	rte_spinlock_init(&hw->vc_cmd_queue_lock);
765 	TAILQ_INIT(&hw->vc_cmd_queue);
766 
767 	rte_atomic_store_explicit(&hw->vsi_update_thread_num, 0, rte_memory_order_relaxed);
768 
769 	hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
770 	if (hw->arq_buf == NULL) {
771 		PMD_INIT_LOG(ERR, "unable to allocate AdminQ buffer memory");
772 		goto err;
773 	}
774 
775 	ret = iavf_set_mac_type(&hw->avf);
776 	if (ret) {
777 		PMD_INIT_LOG(ERR, "set_mac_type failed: %d", ret);
778 		goto err;
779 	}
780 
781 	ret = ice_dcf_check_reset_done(hw);
782 	if (ret) {
783 		PMD_INIT_LOG(ERR, "VF is still resetting");
784 		goto err;
785 	}
786 
787 	ret = iavf_init_adminq(&hw->avf);
788 	if (ret) {
789 		PMD_INIT_LOG(ERR, "init_adminq failed: %d", ret);
790 		goto err;
791 	}
792 
793 	if (ice_dcf_init_check_api_version(hw)) {
794 		PMD_INIT_LOG(ERR, "check_api version failed");
795 		goto err_api;
796 	}
797 
798 	hw->vf_res = rte_zmalloc("vf_res", ICE_DCF_VF_RES_BUF_SZ, 0);
799 	if (hw->vf_res == NULL) {
800 		PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
801 		goto err_api;
802 	}
803 
804 	if (ice_dcf_get_vf_resource(hw)) {
805 		PMD_INIT_LOG(ERR, "Failed to get VF resource");
806 		goto err_alloc;
807 	}
808 
809 	if (ice_dcf_get_vf_vsi_map(hw) < 0) {
810 		PMD_INIT_LOG(ERR, "Failed to get VF VSI map");
811 		ice_dcf_mode_disable(hw);
812 		goto err_alloc;
813 	}
814 
815 	/* Allocate memory for RSS info */
816 	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
817 		hw->rss_key = rte_zmalloc(NULL,
818 					  hw->vf_res->rss_key_size, 0);
819 		if (!hw->rss_key) {
820 			PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
821 			goto err_alloc;
822 		}
823 		hw->rss_lut = rte_zmalloc("rss_lut",
824 					  hw->vf_res->rss_lut_size, 0);
825 		if (!hw->rss_lut) {
826 			PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
827 			goto err_rss;
828 		}
829 	}
830 
831 	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
832 		if (ice_dcf_get_supported_rxdid(hw) != 0) {
833 			PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
834 			goto err_rss;
835 		}
836 	}
837 
838 	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
839 		ice_dcf_tm_conf_init(eth_dev);
840 		size = sizeof(struct virtchnl_dcf_bw_cfg_list *) * hw->num_vfs;
841 		hw->qos_bw_cfg = rte_zmalloc("qos_bw_cfg", size, 0);
842 		if (!hw->qos_bw_cfg) {
843 			PMD_INIT_LOG(ERR, "no memory for qos_bw_cfg");
844 			goto err_rss;
845 		}
846 	}
847 
848 	hw->eth_dev = eth_dev;
849 	rte_intr_callback_register(pci_dev->intr_handle,
850 				   ice_dcf_dev_interrupt_handler, hw);
851 	rte_intr_enable(pci_dev->intr_handle);
852 	ice_dcf_enable_irq0(hw);
853 
854 	if ((hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) &&
855 	    dcf_get_vlan_offload_caps_v2(hw))
856 		goto err_rss;
857 
858 	return 0;
859 
860 err_rss:
861 	rte_free(hw->rss_key);
862 	rte_free(hw->rss_lut);
863 err_alloc:
864 	rte_free(hw->vf_res);
865 err_api:
866 	iavf_shutdown_adminq(&hw->avf);
867 err:
868 	rte_free(hw->arq_buf);
869 
870 	return -1;
871 }
872 
873 void
874 ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
875 {
876 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
877 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
878 
879 	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
880 		if (hw->tm_conf.committed) {
881 			ice_dcf_clear_bw(hw);
882 			ice_dcf_tm_conf_uninit(eth_dev);
883 		}
884 
885 	ice_dcf_disable_irq0(hw);
886 	rte_intr_disable(intr_handle);
887 	rte_intr_callback_unregister(intr_handle,
888 				     ice_dcf_dev_interrupt_handler, hw);
889 
890 	/* Wait for all `ice-thread` threads to exit. */
891 	while (rte_atomic_load_explicit(&hw->vsi_update_thread_num,
892 		rte_memory_order_acquire) != 0)
893 		rte_delay_ms(ICE_DCF_CHECK_INTERVAL);
894 
895 	ice_dcf_mode_disable(hw);
896 	iavf_shutdown_adminq(&hw->avf);
897 
898 	rte_free(hw->arq_buf);
899 	hw->arq_buf = NULL;
900 
901 	rte_free(hw->vf_vsi_map);
902 	hw->vf_vsi_map = NULL;
903 
904 	rte_free(hw->vf_res);
905 	hw->vf_res = NULL;
906 
907 	rte_free(hw->rss_lut);
908 	hw->rss_lut = NULL;
909 
910 	rte_free(hw->rss_key);
911 	hw->rss_key = NULL;
912 
913 	rte_free(hw->qos_bw_cfg);
914 	hw->qos_bw_cfg = NULL;
915 
916 	rte_free(hw->ets_config);
917 	hw->ets_config = NULL;
918 }
919 
920 int
921 ice_dcf_configure_rss_key(struct ice_dcf_hw *hw)
922 {
923 	struct virtchnl_rss_key *rss_key;
924 	struct dcf_virtchnl_cmd args;
925 	int len, err;
926 
927 	len = sizeof(*rss_key) + hw->vf_res->rss_key_size - 1;
928 	rss_key = rte_zmalloc("rss_key", len, 0);
929 	if (!rss_key)
930 		return -ENOMEM;
931 
932 	rss_key->vsi_id = hw->vsi_res->vsi_id;
933 	rss_key->key_len = hw->vf_res->rss_key_size;
934 	rte_memcpy(rss_key->key, hw->rss_key, hw->vf_res->rss_key_size);
935 
936 	args.v_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
937 	args.req_msglen = len;
938 	args.req_msg = (uint8_t *)rss_key;
939 	args.rsp_msglen = 0;
940 	args.rsp_buflen = 0;
941 	args.rsp_msgbuf = NULL;
942 	args.pending = 0;
943 
944 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
945 	if (err)
946 		PMD_INIT_LOG(ERR, "Failed to execute OP_CONFIG_RSS_KEY");
947 
948 	rte_free(rss_key);
949 	return err;
950 }
951 
952 int
953 ice_dcf_configure_rss_lut(struct ice_dcf_hw *hw)
954 {
955 	struct virtchnl_rss_lut *rss_lut;
956 	struct dcf_virtchnl_cmd args;
957 	int len, err;
958 
959 	len = sizeof(*rss_lut) + hw->vf_res->rss_lut_size - 1;
960 	rss_lut = rte_zmalloc("rss_lut", len, 0);
961 	if (!rss_lut)
962 		return -ENOMEM;
963 
964 	rss_lut->vsi_id = hw->vsi_res->vsi_id;
965 	rss_lut->lut_entries = hw->vf_res->rss_lut_size;
966 	rte_memcpy(rss_lut->lut, hw->rss_lut, hw->vf_res->rss_lut_size);
967 
968 	args.v_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
969 	args.req_msglen = len;
970 	args.req_msg = (uint8_t *)rss_lut;
971 	args.rsp_msglen = 0;
972 	args.rsp_buflen = 0;
973 	args.rsp_msgbuf = NULL;
974 	args.pending = 0;
975 
976 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
977 	if (err)
978 		PMD_INIT_LOG(ERR, "Failed to execute OP_CONFIG_RSS_LUT");
979 
980 	rte_free(rss_lut);
981 	return err;
982 }
983 
984 int
985 ice_dcf_add_del_rss_cfg(struct ice_dcf_hw *hw,
986 		     struct virtchnl_rss_cfg *rss_cfg, bool add)
987 {
988 	struct dcf_virtchnl_cmd args;
989 	int err;
990 
991 	memset(&args, 0, sizeof(args));
992 
993 	args.v_op = add ? VIRTCHNL_OP_ADD_RSS_CFG :
994 		VIRTCHNL_OP_DEL_RSS_CFG;
995 	args.req_msglen = sizeof(*rss_cfg);
996 	args.req_msg = (uint8_t *)rss_cfg;
997 	args.rsp_msglen = 0;
998 	args.rsp_buflen = 0;
999 	args.rsp_msgbuf = NULL;
1000 	args.pending = 0;
1001 
1002 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1003 	if (err)
1004 		PMD_DRV_LOG(ERR,
1005 			    "Failed to execute command of %s",
1006 			    add ? "OP_ADD_RSS_CFG" :
1007 			    "OP_DEL_RSS_INPUT_CFG");
1008 
1009 	return err;
1010 }
1011 
1012 int
1013 ice_dcf_set_hena(struct ice_dcf_hw *hw, uint64_t hena)
1014 {
1015 	struct virtchnl_rss_hena vrh;
1016 	struct dcf_virtchnl_cmd args;
1017 	int err;
1018 
1019 	memset(&args, 0, sizeof(args));
1020 
1021 	vrh.hena = hena;
1022 	args.v_op = VIRTCHNL_OP_SET_RSS_HENA;
1023 	args.req_msglen = sizeof(vrh);
1024 	args.req_msg = (uint8_t *)&vrh;
1025 	args.rsp_msglen = 0;
1026 	args.rsp_buflen = 0;
1027 	args.rsp_msgbuf = NULL;
1028 	args.pending = 0;
1029 
1030 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1031 	if (err)
1032 		PMD_INIT_LOG(ERR, "Failed to execute OP_SET_RSS_HENA");
1033 
1034 	return err;
1035 }
1036 
1037 int
1038 ice_dcf_rss_hash_set(struct ice_dcf_hw *hw, uint64_t rss_hf, bool add)
1039 {
1040 	struct rte_eth_dev *dev = hw->eth_dev;
1041 	struct rte_eth_rss_conf *rss_conf;
1042 	struct virtchnl_rss_cfg rss_cfg;
1043 
1044 	rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1045 #define ICE_DCF_RSS_HF_ALL ( \
1046 	RTE_ETH_RSS_IPV4 | \
1047 	RTE_ETH_RSS_IPV6 | \
1048 	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
1049 	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
1050 	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
1051 	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
1052 	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
1053 	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
1054 
1055 	rss_cfg.rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
1056 	if (rss_hf & RTE_ETH_RSS_IPV4) {
1057 		rss_cfg.proto_hdrs = ice_dcf_inner_ipv4_tmplt;
1058 		ice_dcf_add_del_rss_cfg(hw, &rss_cfg, add);
1059 	}
1060 
1061 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
1062 		rss_cfg.proto_hdrs = ice_dcf_inner_ipv4_udp_tmplt;
1063 		ice_dcf_add_del_rss_cfg(hw, &rss_cfg, add);
1064 	}
1065 
1066 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
1067 		rss_cfg.proto_hdrs = ice_dcf_inner_ipv4_tcp_tmplt;
1068 		ice_dcf_add_del_rss_cfg(hw, &rss_cfg, add);
1069 	}
1070 
1071 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
1072 		rss_cfg.proto_hdrs = ice_dcf_inner_ipv4_sctp_tmplt;
1073 		ice_dcf_add_del_rss_cfg(hw, &rss_cfg, add);
1074 	}
1075 
1076 	if (rss_hf & RTE_ETH_RSS_IPV6) {
1077 		rss_cfg.proto_hdrs = ice_dcf_inner_ipv6_tmplt;
1078 		ice_dcf_add_del_rss_cfg(hw, &rss_cfg, add);
1079 	}
1080 
1081 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
1082 		rss_cfg.proto_hdrs = ice_dcf_inner_ipv6_udp_tmplt;
1083 		ice_dcf_add_del_rss_cfg(hw, &rss_cfg, add);
1084 	}
1085 
1086 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
1087 		rss_cfg.proto_hdrs = ice_dcf_inner_ipv6_tcp_tmplt;
1088 		ice_dcf_add_del_rss_cfg(hw, &rss_cfg, add);
1089 	}
1090 
1091 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
1092 		rss_cfg.proto_hdrs = ice_dcf_inner_ipv6_sctp_tmplt;
1093 		ice_dcf_add_del_rss_cfg(hw, &rss_cfg, add);
1094 	}
1095 
1096 	rss_conf->rss_hf = rss_hf & ICE_DCF_RSS_HF_ALL;
1097 	return 0;
1098 }
1099 
1100 int
1101 ice_dcf_init_rss(struct ice_dcf_hw *hw)
1102 {
1103 	struct rte_eth_dev *dev = hw->eth_dev;
1104 	struct rte_eth_rss_conf *rss_conf;
1105 	uint8_t j, nb_q;
1106 	size_t i;
1107 	int ret;
1108 
1109 	rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1110 	nb_q = dev->data->nb_rx_queues;
1111 
1112 	if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
1113 		PMD_DRV_LOG(DEBUG, "RSS is not supported");
1114 		return -ENOTSUP;
1115 	}
1116 	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
1117 		PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
1118 		/* set all lut items to default queue */
1119 		memset(hw->rss_lut, 0, hw->vf_res->rss_lut_size);
1120 		return ice_dcf_configure_rss_lut(hw);
1121 	}
1122 
1123 	/* In IAVF, RSS enablement is set by PF driver. It is not supported
1124 	 * to set based on rss_conf->rss_hf.
1125 	 */
1126 
1127 	/* configure RSS key */
1128 	if (!rss_conf->rss_key)
1129 		/* Calculate the default hash key */
1130 		for (i = 0; i < hw->vf_res->rss_key_size; i++)
1131 			hw->rss_key[i] = (uint8_t)rte_rand();
1132 	else
1133 		rte_memcpy(hw->rss_key, rss_conf->rss_key,
1134 			   RTE_MIN(rss_conf->rss_key_len,
1135 				   hw->vf_res->rss_key_size));
1136 
1137 	/* init RSS LUT table */
1138 	for (i = 0, j = 0; i < hw->vf_res->rss_lut_size; i++, j++) {
1139 		if (j >= nb_q)
1140 			j = 0;
1141 		hw->rss_lut[i] = j;
1142 	}
1143 	/* send virtchnl ops to configure RSS */
1144 	ret = ice_dcf_configure_rss_lut(hw);
1145 	if (ret)
1146 		return ret;
1147 	ret = ice_dcf_configure_rss_key(hw);
1148 	if (ret)
1149 		return ret;
1150 
1151 	/* Clear existing RSS. */
1152 	ret = ice_dcf_set_hena(hw, 0);
1153 
1154 	/* It is a workaround, temporarily allow error to be returned
1155 	 * due to possible lack of PF handling for hena = 0.
1156 	 */
1157 	if (ret)
1158 		PMD_DRV_LOG(WARNING, "fail to clean existing RSS,"
1159 				"lack PF support");
1160 
1161 	/* Set RSS hash configuration based on rss_conf->rss_hf. */
1162 	ret = ice_dcf_rss_hash_set(hw, rss_conf->rss_hf, true);
1163 	if (ret) {
1164 		PMD_DRV_LOG(ERR, "fail to set default RSS");
1165 		return ret;
1166 	}
1167 
1168 	return 0;
1169 }
1170 
1171 #define IAVF_RXDID_LEGACY_0 0
1172 #define IAVF_RXDID_LEGACY_1 1
1173 #define IAVF_RXDID_COMMS_OVS_1 22
1174 
1175 int
1176 ice_dcf_configure_queues(struct ice_dcf_hw *hw)
1177 {
1178 	struct ice_rx_queue **rxq =
1179 		(struct ice_rx_queue **)hw->eth_dev->data->rx_queues;
1180 	struct ci_tx_queue **txq =
1181 		(struct ci_tx_queue **)hw->eth_dev->data->tx_queues;
1182 	struct virtchnl_vsi_queue_config_info *vc_config;
1183 	struct virtchnl_queue_pair_info *vc_qp;
1184 	struct dcf_virtchnl_cmd args;
1185 	uint16_t i, size;
1186 	int err;
1187 
1188 	size = sizeof(*vc_config) +
1189 	       sizeof(vc_config->qpair[0]) * hw->num_queue_pairs;
1190 	vc_config = rte_zmalloc("cfg_queue", size, 0);
1191 	if (!vc_config)
1192 		return -ENOMEM;
1193 
1194 	vc_config->vsi_id = hw->vsi_res->vsi_id;
1195 	vc_config->num_queue_pairs = hw->num_queue_pairs;
1196 
1197 	for (i = 0, vc_qp = vc_config->qpair;
1198 	     i < hw->num_queue_pairs;
1199 	     i++, vc_qp++) {
1200 		vc_qp->txq.vsi_id = hw->vsi_res->vsi_id;
1201 		vc_qp->txq.queue_id = i;
1202 		if (i < hw->eth_dev->data->nb_tx_queues) {
1203 			vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
1204 			vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_dma;
1205 		}
1206 		vc_qp->rxq.vsi_id = hw->vsi_res->vsi_id;
1207 		vc_qp->rxq.queue_id = i;
1208 
1209 		if (i >= hw->eth_dev->data->nb_rx_queues)
1210 			continue;
1211 
1212 		vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len;
1213 		vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
1214 		vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_dma;
1215 		vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
1216 
1217 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1218 		if (hw->vf_res->vf_cap_flags &
1219 		    VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
1220 		    hw->supported_rxdid &
1221 		    BIT(IAVF_RXDID_COMMS_OVS_1)) {
1222 			vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_OVS_1;
1223 			PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
1224 				    "Queue[%d]", vc_qp->rxq.rxdid, i);
1225 		} else {
1226 			PMD_DRV_LOG(ERR, "RXDID 16 is not supported");
1227 			return -EINVAL;
1228 		}
1229 #else
1230 		if (hw->vf_res->vf_cap_flags &
1231 			VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
1232 			hw->supported_rxdid &
1233 			BIT(IAVF_RXDID_LEGACY_0)) {
1234 			vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_0;
1235 			PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
1236 					"Queue[%d]", vc_qp->rxq.rxdid, i);
1237 		} else {
1238 			PMD_DRV_LOG(ERR, "RXDID == 0 is not supported");
1239 			return -EINVAL;
1240 		}
1241 #endif
1242 		ice_select_rxd_to_pkt_fields_handler(rxq[i], vc_qp->rxq.rxdid);
1243 	}
1244 
1245 	memset(&args, 0, sizeof(args));
1246 	args.v_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
1247 	args.req_msg = (uint8_t *)vc_config;
1248 	args.req_msglen = size;
1249 
1250 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1251 	if (err)
1252 		PMD_DRV_LOG(ERR, "Failed to execute command of"
1253 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
1254 
1255 	rte_free(vc_config);
1256 	return err;
1257 }
1258 
1259 int
1260 ice_dcf_config_irq_map(struct ice_dcf_hw *hw)
1261 {
1262 	struct virtchnl_irq_map_info *map_info;
1263 	struct virtchnl_vector_map *vecmap;
1264 	struct dcf_virtchnl_cmd args;
1265 	int len, i, err;
1266 
1267 	len = sizeof(struct virtchnl_irq_map_info) +
1268 	      sizeof(struct virtchnl_vector_map) * hw->nb_msix;
1269 
1270 	map_info = rte_zmalloc("map_info", len, 0);
1271 	if (!map_info)
1272 		return -ENOMEM;
1273 
1274 	map_info->num_vectors = hw->nb_msix;
1275 	for (i = 0; i < hw->nb_msix; i++) {
1276 		vecmap = &map_info->vecmap[i];
1277 		vecmap->vsi_id = hw->vsi_res->vsi_id;
1278 		vecmap->rxitr_idx = 0;
1279 		vecmap->vector_id = hw->msix_base + i;
1280 		vecmap->txq_map = 0;
1281 		vecmap->rxq_map = hw->rxq_map[hw->msix_base + i];
1282 	}
1283 
1284 	memset(&args, 0, sizeof(args));
1285 	args.v_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
1286 	args.req_msg = (u8 *)map_info;
1287 	args.req_msglen = len;
1288 
1289 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1290 	if (err)
1291 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
1292 
1293 	rte_free(map_info);
1294 	return err;
1295 }
1296 
1297 int
1298 ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on)
1299 {
1300 	struct virtchnl_queue_select queue_select;
1301 	struct dcf_virtchnl_cmd args;
1302 	int err;
1303 
1304 	memset(&queue_select, 0, sizeof(queue_select));
1305 	queue_select.vsi_id = hw->vsi_res->vsi_id;
1306 	if (rx)
1307 		queue_select.rx_queues |= 1 << qid;
1308 	else
1309 		queue_select.tx_queues |= 1 << qid;
1310 
1311 	memset(&args, 0, sizeof(args));
1312 	if (on)
1313 		args.v_op = VIRTCHNL_OP_ENABLE_QUEUES;
1314 	else
1315 		args.v_op = VIRTCHNL_OP_DISABLE_QUEUES;
1316 
1317 	args.req_msg = (u8 *)&queue_select;
1318 	args.req_msglen = sizeof(queue_select);
1319 
1320 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1321 	if (err)
1322 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
1323 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
1324 
1325 	return err;
1326 }
1327 
1328 int
1329 ice_dcf_disable_queues(struct ice_dcf_hw *hw)
1330 {
1331 	struct virtchnl_queue_select queue_select;
1332 	struct dcf_virtchnl_cmd args;
1333 	int err;
1334 
1335 	if (hw->resetting)
1336 		return 0;
1337 
1338 	memset(&queue_select, 0, sizeof(queue_select));
1339 	queue_select.vsi_id = hw->vsi_res->vsi_id;
1340 
1341 	queue_select.rx_queues = BIT(hw->eth_dev->data->nb_rx_queues) - 1;
1342 	queue_select.tx_queues = BIT(hw->eth_dev->data->nb_tx_queues) - 1;
1343 
1344 	memset(&args, 0, sizeof(args));
1345 	args.v_op = VIRTCHNL_OP_DISABLE_QUEUES;
1346 	args.req_msg = (u8 *)&queue_select;
1347 	args.req_msglen = sizeof(queue_select);
1348 
1349 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1350 	if (err)
1351 		PMD_DRV_LOG(ERR,
1352 			    "Failed to execute command of OP_DISABLE_QUEUES");
1353 
1354 	return err;
1355 }
1356 
1357 int
1358 ice_dcf_query_stats(struct ice_dcf_hw *hw,
1359 				   struct virtchnl_eth_stats *pstats)
1360 {
1361 	struct virtchnl_queue_select q_stats;
1362 	struct dcf_virtchnl_cmd args;
1363 	int err;
1364 
1365 	memset(&q_stats, 0, sizeof(q_stats));
1366 	q_stats.vsi_id = hw->vsi_res->vsi_id;
1367 
1368 	args.v_op = VIRTCHNL_OP_GET_STATS;
1369 	args.req_msg = (uint8_t *)&q_stats;
1370 	args.req_msglen = sizeof(q_stats);
1371 	args.rsp_msglen = sizeof(*pstats);
1372 	args.rsp_msgbuf = (uint8_t *)pstats;
1373 	args.rsp_buflen = sizeof(*pstats);
1374 
1375 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1376 	if (err) {
1377 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
1378 		return err;
1379 	}
1380 
1381 	return 0;
1382 }
1383 
1384 int
1385 ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw,
1386 			     struct rte_ether_addr *addr,
1387 			     bool add, uint8_t type)
1388 {
1389 	struct virtchnl_ether_addr_list *list;
1390 	struct dcf_virtchnl_cmd args;
1391 	int len, err = 0;
1392 
1393 	if (hw->resetting) {
1394 		if (!add)
1395 			return 0;
1396 
1397 		PMD_DRV_LOG(ERR, "fail to add all MACs for VF resetting");
1398 		return -EIO;
1399 	}
1400 
1401 	len = sizeof(struct virtchnl_ether_addr_list);
1402 	len += sizeof(struct virtchnl_ether_addr);
1403 
1404 	list = rte_zmalloc(NULL, len, 0);
1405 	if (!list) {
1406 		PMD_DRV_LOG(ERR, "fail to allocate memory");
1407 		return -ENOMEM;
1408 	}
1409 
1410 	rte_memcpy(list->list[0].addr, addr->addr_bytes,
1411 			sizeof(addr->addr_bytes));
1412 
1413 	PMD_DRV_LOG(DEBUG, "add/rm mac:" RTE_ETHER_ADDR_PRT_FMT,
1414 			    RTE_ETHER_ADDR_BYTES(addr));
1415 	list->list[0].type = type;
1416 	list->vsi_id = hw->vsi_res->vsi_id;
1417 	list->num_elements = 1;
1418 
1419 	memset(&args, 0, sizeof(args));
1420 	args.v_op = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
1421 			VIRTCHNL_OP_DEL_ETH_ADDR;
1422 	args.req_msg = (uint8_t *)list;
1423 	args.req_msglen  = len;
1424 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1425 	if (err)
1426 		PMD_DRV_LOG(ERR, "fail to execute command %s",
1427 			    add ? "OP_ADD_ETHER_ADDRESS" :
1428 			    "OP_DEL_ETHER_ADDRESS");
1429 	rte_free(list);
1430 	return err;
1431 }
1432