xref: /dpdk/drivers/common/idpf/idpf_common_virtchnl.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2023 Intel Corporation
3  */
4 
5 #include <idpf_common_virtchnl.h>
6 #include <idpf_common_logs.h>
7 
8 static int
9 idpf_vc_clean(struct idpf_adapter *adapter)
10 {
11 	struct idpf_ctlq_msg *q_msg[IDPF_CTLQ_LEN];
12 	uint16_t num_q_msg = IDPF_CTLQ_LEN;
13 	struct idpf_dma_mem *dma_mem;
14 	int err;
15 	uint32_t i;
16 
17 	for (i = 0; i < 10; i++) {
18 		err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);
19 		msleep(20);
20 		if (num_q_msg > 0)
21 			break;
22 	}
23 	if (err != 0)
24 		return err;
25 
26 	/* Empty queue is not an error */
27 	for (i = 0; i < num_q_msg; i++) {
28 		dma_mem = q_msg[i]->ctx.indirect.payload;
29 		if (dma_mem != NULL) {
30 			idpf_free_dma_mem(&adapter->hw, dma_mem);
31 			rte_free(dma_mem);
32 		}
33 		rte_free(q_msg[i]);
34 	}
35 
36 	return 0;
37 }
38 
39 static int
40 idpf_send_vc_msg(struct idpf_adapter *adapter, uint32_t op,
41 		 uint16_t msg_size, uint8_t *msg)
42 {
43 	struct idpf_ctlq_msg *ctlq_msg;
44 	struct idpf_dma_mem *dma_mem;
45 	int err;
46 
47 	err = idpf_vc_clean(adapter);
48 	if (err != 0)
49 		goto err;
50 
51 	ctlq_msg = rte_zmalloc(NULL, sizeof(struct idpf_ctlq_msg), 0);
52 	if (ctlq_msg == NULL) {
53 		err = -ENOMEM;
54 		goto err;
55 	}
56 
57 	dma_mem = rte_zmalloc(NULL, sizeof(struct idpf_dma_mem), 0);
58 	if (dma_mem == NULL) {
59 		err = -ENOMEM;
60 		goto dma_mem_error;
61 	}
62 
63 	dma_mem->size = IDPF_DFLT_MBX_BUF_SIZE;
64 	idpf_alloc_dma_mem(&adapter->hw, dma_mem, dma_mem->size);
65 	if (dma_mem->va == NULL) {
66 		err = -ENOMEM;
67 		goto dma_alloc_error;
68 	}
69 
70 	memcpy(dma_mem->va, msg, msg_size);
71 
72 	ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_pf;
73 	ctlq_msg->func_id = 0;
74 	ctlq_msg->data_len = msg_size;
75 	ctlq_msg->cookie.mbx.chnl_opcode = op;
76 	ctlq_msg->cookie.mbx.chnl_retval = VIRTCHNL_STATUS_SUCCESS;
77 	ctlq_msg->ctx.indirect.payload = dma_mem;
78 
79 	err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
80 	if (err != 0)
81 		goto send_error;
82 
83 	return 0;
84 
85 send_error:
86 	idpf_free_dma_mem(&adapter->hw, dma_mem);
87 dma_alloc_error:
88 	rte_free(dma_mem);
89 dma_mem_error:
90 	rte_free(ctlq_msg);
91 err:
92 	return err;
93 }
94 
95 static enum idpf_vc_result
96 idpf_read_msg_from_cp(struct idpf_adapter *adapter, uint16_t buf_len,
97 		      uint8_t *buf)
98 {
99 	struct idpf_hw *hw = &adapter->hw;
100 	struct idpf_ctlq_msg ctlq_msg;
101 	struct idpf_dma_mem *dma_mem = NULL;
102 	enum idpf_vc_result result = IDPF_MSG_NON;
103 	uint32_t opcode;
104 	uint16_t pending = 1;
105 	int ret;
106 
107 	ret = idpf_ctlq_recv(hw->arq, &pending, &ctlq_msg);
108 	if (ret != 0) {
109 		DRV_LOG(DEBUG, "Can't read msg from AQ");
110 		if (ret != -ENOMSG)
111 			result = IDPF_MSG_ERR;
112 		return result;
113 	}
114 
115 	rte_memcpy(buf, ctlq_msg.ctx.indirect.payload->va, buf_len);
116 
117 	opcode = rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_opcode);
118 	adapter->cmd_retval = rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_retval);
119 
120 	DRV_LOG(DEBUG, "CQ from CP carries opcode %u, retval %d",
121 		opcode, adapter->cmd_retval);
122 
123 	if (opcode == VIRTCHNL2_OP_EVENT) {
124 		struct virtchnl2_event *ve = ctlq_msg.ctx.indirect.payload->va;
125 
126 		result = IDPF_MSG_SYS;
127 		switch (ve->event) {
128 		case VIRTCHNL2_EVENT_LINK_CHANGE:
129 			/* TBD */
130 			break;
131 		default:
132 			DRV_LOG(ERR, "%s: Unknown event %d from CP",
133 				__func__, ve->event);
134 			break;
135 		}
136 	} else {
137 		/* async reply msg on command issued by pf previously */
138 		result = IDPF_MSG_CMD;
139 		if (opcode != adapter->pend_cmd) {
140 			DRV_LOG(WARNING, "command mismatch, expect %u, get %u",
141 				adapter->pend_cmd, opcode);
142 			result = IDPF_MSG_ERR;
143 		}
144 	}
145 
146 	if (ctlq_msg.data_len != 0)
147 		dma_mem = ctlq_msg.ctx.indirect.payload;
148 	else
149 		pending = 0;
150 
151 	ret = idpf_ctlq_post_rx_buffs(hw, hw->arq, &pending, &dma_mem);
152 	if (ret != 0 && dma_mem != NULL)
153 		idpf_free_dma_mem(hw, dma_mem);
154 
155 	return result;
156 }
157 
158 #define MAX_TRY_TIMES 200
159 #define ASQ_DELAY_MS  10
160 
161 int
162 idpf_vc_one_msg_read(struct idpf_adapter *adapter, uint32_t ops, uint16_t buf_len,
163 		     uint8_t *buf)
164 {
165 	int err = 0;
166 	int i = 0;
167 	int ret;
168 
169 	do {
170 		ret = idpf_read_msg_from_cp(adapter, buf_len, buf);
171 		if (ret == IDPF_MSG_CMD)
172 			break;
173 		rte_delay_ms(ASQ_DELAY_MS);
174 	} while (i++ < MAX_TRY_TIMES);
175 	if (i >= MAX_TRY_TIMES ||
176 	    adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
177 		err = -EBUSY;
178 		DRV_LOG(ERR, "No response or return failure (%d) for cmd %d",
179 			adapter->cmd_retval, ops);
180 	}
181 
182 	return err;
183 }
184 
185 int
186 idpf_vc_cmd_execute(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
187 {
188 	int err = 0;
189 	int i = 0;
190 	int ret;
191 
192 	if (atomic_set_cmd(adapter, args->ops))
193 		return -EINVAL;
194 
195 	ret = idpf_send_vc_msg(adapter, args->ops, args->in_args_size, args->in_args);
196 	if (ret != 0) {
197 		DRV_LOG(ERR, "fail to send cmd %d", args->ops);
198 		clear_cmd(adapter);
199 		return ret;
200 	}
201 
202 	switch (args->ops) {
203 	case VIRTCHNL_OP_VERSION:
204 	case VIRTCHNL2_OP_GET_CAPS:
205 		/* for init virtchnl ops, need to poll the response */
206 		err = idpf_vc_one_msg_read(adapter, args->ops, args->out_size, args->out_buffer);
207 		clear_cmd(adapter);
208 		break;
209 	case VIRTCHNL2_OP_GET_PTYPE_INFO:
210 		/* for multuple response message,
211 		 * do not handle the response here.
212 		 */
213 		break;
214 	default:
215 		/* For other virtchnl ops in running time,
216 		 * wait for the cmd done flag.
217 		 */
218 		do {
219 			if (adapter->pend_cmd == VIRTCHNL_OP_UNKNOWN)
220 				break;
221 			rte_delay_ms(ASQ_DELAY_MS);
222 			/* If don't read msg or read sys event, continue */
223 		} while (i++ < MAX_TRY_TIMES);
224 		/* If there's no response is received, clear command */
225 		if (i >= MAX_TRY_TIMES  ||
226 		    adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
227 			err = -EBUSY;
228 			DRV_LOG(ERR, "No response or return failure (%d) for cmd %d",
229 				adapter->cmd_retval, args->ops);
230 			clear_cmd(adapter);
231 		}
232 		break;
233 	}
234 
235 	return err;
236 }
237 
238 int
239 idpf_vc_api_version_check(struct idpf_adapter *adapter)
240 {
241 	struct virtchnl2_version_info version, *pver;
242 	struct idpf_cmd_info args;
243 	int err;
244 
245 	memset(&version, 0, sizeof(struct virtchnl_version_info));
246 	version.major = VIRTCHNL2_VERSION_MAJOR_2;
247 	version.minor = VIRTCHNL2_VERSION_MINOR_0;
248 
249 	args.ops = VIRTCHNL_OP_VERSION;
250 	args.in_args = (uint8_t *)&version;
251 	args.in_args_size = sizeof(version);
252 	args.out_buffer = adapter->mbx_resp;
253 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
254 
255 	err = idpf_vc_cmd_execute(adapter, &args);
256 	if (err != 0) {
257 		DRV_LOG(ERR,
258 			"Failed to execute command of VIRTCHNL_OP_VERSION");
259 		return err;
260 	}
261 
262 	pver = (struct virtchnl2_version_info *)args.out_buffer;
263 	adapter->virtchnl_version = *pver;
264 
265 	if (adapter->virtchnl_version.major != VIRTCHNL2_VERSION_MAJOR_2 ||
266 	    adapter->virtchnl_version.minor != VIRTCHNL2_VERSION_MINOR_0) {
267 		DRV_LOG(ERR, "VIRTCHNL API version mismatch:(%u.%u)-(%u.%u)",
268 			adapter->virtchnl_version.major,
269 			adapter->virtchnl_version.minor,
270 			VIRTCHNL2_VERSION_MAJOR_2,
271 			VIRTCHNL2_VERSION_MINOR_0);
272 		return -EINVAL;
273 	}
274 
275 	return 0;
276 }
277 
278 int
279 idpf_vc_caps_get(struct idpf_adapter *adapter)
280 {
281 	struct virtchnl2_get_capabilities caps_msg;
282 	struct idpf_cmd_info args;
283 	int err;
284 
285 	memset(&caps_msg, 0, sizeof(struct virtchnl2_get_capabilities));
286 
287 	caps_msg.csum_caps =
288 		VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
289 		VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP      |
290 		VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP      |
291 		VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP     |
292 		VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP      |
293 		VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP      |
294 		VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP     |
295 		VIRTCHNL2_CAP_TX_CSUM_GENERIC          |
296 		VIRTCHNL2_CAP_RX_CSUM_L3_IPV4          |
297 		VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP      |
298 		VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP      |
299 		VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP     |
300 		VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP      |
301 		VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP      |
302 		VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP     |
303 		VIRTCHNL2_CAP_RX_CSUM_GENERIC;
304 
305 	caps_msg.rss_caps =
306 		VIRTCHNL2_CAP_RSS_IPV4_TCP             |
307 		VIRTCHNL2_CAP_RSS_IPV4_UDP             |
308 		VIRTCHNL2_CAP_RSS_IPV4_SCTP            |
309 		VIRTCHNL2_CAP_RSS_IPV4_OTHER           |
310 		VIRTCHNL2_CAP_RSS_IPV6_TCP             |
311 		VIRTCHNL2_CAP_RSS_IPV6_UDP             |
312 		VIRTCHNL2_CAP_RSS_IPV6_SCTP            |
313 		VIRTCHNL2_CAP_RSS_IPV6_OTHER           |
314 		VIRTCHNL2_CAP_RSS_IPV4_AH              |
315 		VIRTCHNL2_CAP_RSS_IPV4_ESP             |
316 		VIRTCHNL2_CAP_RSS_IPV4_AH_ESP          |
317 		VIRTCHNL2_CAP_RSS_IPV6_AH              |
318 		VIRTCHNL2_CAP_RSS_IPV6_ESP             |
319 		VIRTCHNL2_CAP_RSS_IPV6_AH_ESP;
320 
321 	caps_msg.other_caps = VIRTCHNL2_CAP_WB_ON_ITR;
322 
323 	args.ops = VIRTCHNL2_OP_GET_CAPS;
324 	args.in_args = (uint8_t *)&caps_msg;
325 	args.in_args_size = sizeof(caps_msg);
326 	args.out_buffer = adapter->mbx_resp;
327 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
328 
329 	err = idpf_vc_cmd_execute(adapter, &args);
330 	if (err != 0) {
331 		DRV_LOG(ERR,
332 			"Failed to execute command of VIRTCHNL2_OP_GET_CAPS");
333 		return err;
334 	}
335 
336 	rte_memcpy(&adapter->caps, args.out_buffer, sizeof(caps_msg));
337 
338 	return 0;
339 }
340 
341 int
342 idpf_vc_vport_create(struct idpf_vport *vport,
343 		     struct virtchnl2_create_vport *create_vport_info)
344 {
345 	struct idpf_adapter *adapter = vport->adapter;
346 	struct virtchnl2_create_vport vport_msg;
347 	struct idpf_cmd_info args;
348 	int err = -1;
349 
350 	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
351 	vport_msg.vport_type = create_vport_info->vport_type;
352 	vport_msg.txq_model = create_vport_info->txq_model;
353 	vport_msg.rxq_model = create_vport_info->rxq_model;
354 	vport_msg.num_tx_q = create_vport_info->num_tx_q;
355 	vport_msg.num_tx_complq = create_vport_info->num_tx_complq;
356 	vport_msg.num_rx_q = create_vport_info->num_rx_q;
357 	vport_msg.num_rx_bufq = create_vport_info->num_rx_bufq;
358 
359 	memset(&args, 0, sizeof(args));
360 	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
361 	args.in_args = (uint8_t *)&vport_msg;
362 	args.in_args_size = sizeof(vport_msg);
363 	args.out_buffer = adapter->mbx_resp;
364 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
365 
366 	err = idpf_vc_cmd_execute(adapter, &args);
367 	if (err != 0) {
368 		DRV_LOG(ERR,
369 			"Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
370 		return err;
371 	}
372 
373 	rte_memcpy(&(vport->vport_info.info), args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
374 	return 0;
375 }
376 
377 int
378 idpf_vc_vport_destroy(struct idpf_vport *vport)
379 {
380 	struct idpf_adapter *adapter = vport->adapter;
381 	struct virtchnl2_vport vc_vport;
382 	struct idpf_cmd_info args;
383 	int err;
384 
385 	vc_vport.vport_id = vport->vport_id;
386 
387 	memset(&args, 0, sizeof(args));
388 	args.ops = VIRTCHNL2_OP_DESTROY_VPORT;
389 	args.in_args = (uint8_t *)&vc_vport;
390 	args.in_args_size = sizeof(vc_vport);
391 	args.out_buffer = adapter->mbx_resp;
392 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
393 
394 	err = idpf_vc_cmd_execute(adapter, &args);
395 	if (err != 0)
396 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DESTROY_VPORT");
397 
398 	return err;
399 }
400 
401 int
402 idpf_vc_rss_key_set(struct idpf_vport *vport)
403 {
404 	struct idpf_adapter *adapter = vport->adapter;
405 	struct virtchnl2_rss_key *rss_key;
406 	struct idpf_cmd_info args;
407 	int len, err;
408 
409 	len = sizeof(*rss_key) + sizeof(rss_key->key[0]) *
410 		(vport->rss_key_size - 1);
411 	rss_key = rte_zmalloc("rss_key", len, 0);
412 	if (rss_key == NULL)
413 		return -ENOMEM;
414 
415 	rss_key->vport_id = vport->vport_id;
416 	rss_key->key_len = vport->rss_key_size;
417 	rte_memcpy(rss_key->key, vport->rss_key,
418 		   sizeof(rss_key->key[0]) * vport->rss_key_size);
419 
420 	memset(&args, 0, sizeof(args));
421 	args.ops = VIRTCHNL2_OP_SET_RSS_KEY;
422 	args.in_args = (uint8_t *)rss_key;
423 	args.in_args_size = len;
424 	args.out_buffer = adapter->mbx_resp;
425 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
426 
427 	err = idpf_vc_cmd_execute(adapter, &args);
428 	if (err != 0)
429 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_KEY");
430 
431 	rte_free(rss_key);
432 	return err;
433 }
434 
435 int idpf_vc_rss_key_get(struct idpf_vport *vport)
436 {
437 	struct idpf_adapter *adapter = vport->adapter;
438 	struct virtchnl2_rss_key *rss_key_ret;
439 	struct virtchnl2_rss_key rss_key;
440 	struct idpf_cmd_info args;
441 	int err;
442 
443 	memset(&rss_key, 0, sizeof(rss_key));
444 	rss_key.vport_id = vport->vport_id;
445 
446 	memset(&args, 0, sizeof(args));
447 	args.ops = VIRTCHNL2_OP_GET_RSS_KEY;
448 	args.in_args = (uint8_t *)&rss_key;
449 	args.in_args_size = sizeof(rss_key);
450 	args.out_buffer = adapter->mbx_resp;
451 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
452 
453 	err = idpf_vc_cmd_execute(adapter, &args);
454 
455 	if (!err) {
456 		rss_key_ret = (struct virtchnl2_rss_key *)args.out_buffer;
457 		if (rss_key_ret->key_len != vport->rss_key_size) {
458 			rte_free(vport->rss_key);
459 			vport->rss_key = NULL;
460 			vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,
461 						      rss_key_ret->key_len);
462 			vport->rss_key = rte_zmalloc("rss_key", vport->rss_key_size, 0);
463 			if (!vport->rss_key) {
464 				vport->rss_key_size = 0;
465 				DRV_LOG(ERR, "Failed to allocate RSS key");
466 				return -ENOMEM;
467 			}
468 		}
469 		rte_memcpy(vport->rss_key, rss_key_ret->key, vport->rss_key_size);
470 	} else {
471 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_KEY");
472 	}
473 
474 	return err;
475 }
476 
477 int
478 idpf_vc_rss_lut_set(struct idpf_vport *vport)
479 {
480 	struct idpf_adapter *adapter = vport->adapter;
481 	struct virtchnl2_rss_lut *rss_lut;
482 	struct idpf_cmd_info args;
483 	int len, err;
484 
485 	len = sizeof(*rss_lut) + sizeof(rss_lut->lut[0]) *
486 		(vport->rss_lut_size - 1);
487 	rss_lut = rte_zmalloc("rss_lut", len, 0);
488 	if (rss_lut == NULL)
489 		return -ENOMEM;
490 
491 	rss_lut->vport_id = vport->vport_id;
492 	rss_lut->lut_entries = vport->rss_lut_size;
493 	rte_memcpy(rss_lut->lut, vport->rss_lut,
494 		   sizeof(rss_lut->lut[0]) * vport->rss_lut_size);
495 
496 	memset(&args, 0, sizeof(args));
497 	args.ops = VIRTCHNL2_OP_SET_RSS_LUT;
498 	args.in_args = (uint8_t *)rss_lut;
499 	args.in_args_size = len;
500 	args.out_buffer = adapter->mbx_resp;
501 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
502 
503 	err = idpf_vc_cmd_execute(adapter, &args);
504 	if (err != 0)
505 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_LUT");
506 
507 	rte_free(rss_lut);
508 	return err;
509 }
510 
511 int
512 idpf_vc_rss_lut_get(struct idpf_vport *vport)
513 {
514 	struct idpf_adapter *adapter = vport->adapter;
515 	struct virtchnl2_rss_lut *rss_lut_ret;
516 	struct virtchnl2_rss_lut rss_lut;
517 	struct idpf_cmd_info args;
518 	int err;
519 
520 	memset(&rss_lut, 0, sizeof(rss_lut));
521 	rss_lut.vport_id = vport->vport_id;
522 
523 	memset(&args, 0, sizeof(args));
524 	args.ops = VIRTCHNL2_OP_GET_RSS_LUT;
525 	args.in_args = (uint8_t *)&rss_lut;
526 	args.in_args_size = sizeof(rss_lut);
527 	args.out_buffer = adapter->mbx_resp;
528 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
529 
530 	err = idpf_vc_cmd_execute(adapter, &args);
531 
532 	if (!err) {
533 		rss_lut_ret = (struct virtchnl2_rss_lut *)args.out_buffer;
534 		if (rss_lut_ret->lut_entries != vport->rss_lut_size) {
535 			rte_free(vport->rss_lut);
536 			vport->rss_lut = NULL;
537 			vport->rss_lut = rte_zmalloc("rss_lut",
538 				     sizeof(uint32_t) * rss_lut_ret->lut_entries, 0);
539 			if (vport->rss_lut == NULL) {
540 				DRV_LOG(ERR, "Failed to allocate RSS lut");
541 				return -ENOMEM;
542 			}
543 		}
544 		rte_memcpy(vport->rss_lut, rss_lut_ret->lut, rss_lut_ret->lut_entries);
545 		vport->rss_lut_size = rss_lut_ret->lut_entries;
546 	} else {
547 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_LUT");
548 	}
549 
550 	return err;
551 }
552 
553 int
554 idpf_vc_rss_hash_get(struct idpf_vport *vport)
555 {
556 	struct idpf_adapter *adapter = vport->adapter;
557 	struct virtchnl2_rss_hash *rss_hash_ret;
558 	struct virtchnl2_rss_hash rss_hash;
559 	struct idpf_cmd_info args;
560 	int err;
561 
562 	memset(&rss_hash, 0, sizeof(rss_hash));
563 	rss_hash.ptype_groups = vport->rss_hf;
564 	rss_hash.vport_id = vport->vport_id;
565 
566 	memset(&args, 0, sizeof(args));
567 	args.ops = VIRTCHNL2_OP_GET_RSS_HASH;
568 	args.in_args = (uint8_t *)&rss_hash;
569 	args.in_args_size = sizeof(rss_hash);
570 	args.out_buffer = adapter->mbx_resp;
571 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
572 
573 	err = idpf_vc_cmd_execute(adapter, &args);
574 
575 	if (!err) {
576 		rss_hash_ret = (struct virtchnl2_rss_hash *)args.out_buffer;
577 		vport->rss_hf = rss_hash_ret->ptype_groups;
578 	} else {
579 		DRV_LOG(ERR, "Failed to execute command of OP_GET_RSS_HASH");
580 	}
581 
582 	return err;
583 }
584 
585 int
586 idpf_vc_rss_hash_set(struct idpf_vport *vport)
587 {
588 	struct idpf_adapter *adapter = vport->adapter;
589 	struct virtchnl2_rss_hash rss_hash;
590 	struct idpf_cmd_info args;
591 	int err;
592 
593 	memset(&rss_hash, 0, sizeof(rss_hash));
594 	rss_hash.ptype_groups = vport->rss_hf;
595 	rss_hash.vport_id = vport->vport_id;
596 
597 	memset(&args, 0, sizeof(args));
598 	args.ops = VIRTCHNL2_OP_SET_RSS_HASH;
599 	args.in_args = (uint8_t *)&rss_hash;
600 	args.in_args_size = sizeof(rss_hash);
601 	args.out_buffer = adapter->mbx_resp;
602 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
603 
604 	err = idpf_vc_cmd_execute(adapter, &args);
605 	if (err != 0)
606 		DRV_LOG(ERR, "Failed to execute command of OP_SET_RSS_HASH");
607 
608 	return err;
609 }
610 
611 int
612 idpf_vc_irq_map_unmap_config(struct idpf_vport *vport, uint16_t nb_rxq, bool map)
613 {
614 	struct idpf_adapter *adapter = vport->adapter;
615 	struct virtchnl2_queue_vector_maps *map_info;
616 	struct virtchnl2_queue_vector *vecmap;
617 	struct idpf_cmd_info args;
618 	int len, i, err = 0;
619 
620 	len = sizeof(struct virtchnl2_queue_vector_maps) +
621 		(nb_rxq - 1) * sizeof(struct virtchnl2_queue_vector);
622 
623 	map_info = rte_zmalloc("map_info", len, 0);
624 	if (map_info == NULL)
625 		return -ENOMEM;
626 
627 	map_info->vport_id = vport->vport_id;
628 	map_info->num_qv_maps = nb_rxq;
629 	for (i = 0; i < nb_rxq; i++) {
630 		vecmap = &map_info->qv_maps[i];
631 		vecmap->queue_id = vport->qv_map[i].queue_id;
632 		vecmap->vector_id = vport->qv_map[i].vector_id;
633 		vecmap->itr_idx = VIRTCHNL2_ITR_IDX_0;
634 		vecmap->queue_type = VIRTCHNL2_QUEUE_TYPE_RX;
635 	}
636 
637 	args.ops = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :
638 		VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
639 	args.in_args = (uint8_t *)map_info;
640 	args.in_args_size = len;
641 	args.out_buffer = adapter->mbx_resp;
642 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
643 	err = idpf_vc_cmd_execute(adapter, &args);
644 	if (err != 0)
645 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUE_VECTOR",
646 			map ? "MAP" : "UNMAP");
647 
648 	rte_free(map_info);
649 	return err;
650 }
651 
652 int
653 idpf_vc_vectors_alloc(struct idpf_vport *vport, uint16_t num_vectors)
654 {
655 	struct idpf_adapter *adapter = vport->adapter;
656 	struct virtchnl2_alloc_vectors *alloc_vec;
657 	struct idpf_cmd_info args;
658 	int err, len;
659 
660 	len = sizeof(struct virtchnl2_alloc_vectors) +
661 		(num_vectors - 1) * sizeof(struct virtchnl2_vector_chunk);
662 	alloc_vec = rte_zmalloc("alloc_vec", len, 0);
663 	if (alloc_vec == NULL)
664 		return -ENOMEM;
665 
666 	alloc_vec->num_vectors = num_vectors;
667 
668 	args.ops = VIRTCHNL2_OP_ALLOC_VECTORS;
669 	args.in_args = (uint8_t *)alloc_vec;
670 	args.in_args_size = len;
671 	args.out_buffer = adapter->mbx_resp;
672 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
673 	err = idpf_vc_cmd_execute(adapter, &args);
674 	if (err != 0)
675 		DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_ALLOC_VECTORS");
676 
677 	rte_memcpy(vport->recv_vectors, args.out_buffer, len);
678 	rte_free(alloc_vec);
679 	return err;
680 }
681 
682 int
683 idpf_vc_vectors_dealloc(struct idpf_vport *vport)
684 {
685 	struct idpf_adapter *adapter = vport->adapter;
686 	struct virtchnl2_alloc_vectors *alloc_vec;
687 	struct virtchnl2_vector_chunks *vcs;
688 	struct idpf_cmd_info args;
689 	int err, len;
690 
691 	alloc_vec = vport->recv_vectors;
692 	vcs = &alloc_vec->vchunks;
693 
694 	len = sizeof(struct virtchnl2_vector_chunks) +
695 		(vcs->num_vchunks - 1) * sizeof(struct virtchnl2_vector_chunk);
696 
697 	args.ops = VIRTCHNL2_OP_DEALLOC_VECTORS;
698 	args.in_args = (uint8_t *)vcs;
699 	args.in_args_size = len;
700 	args.out_buffer = adapter->mbx_resp;
701 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
702 	err = idpf_vc_cmd_execute(adapter, &args);
703 	if (err != 0)
704 		DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_DEALLOC_VECTORS");
705 
706 	return err;
707 }
708 
709 static int
710 idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
711 			  uint32_t type, bool on)
712 {
713 	struct idpf_adapter *adapter = vport->adapter;
714 	struct virtchnl2_del_ena_dis_queues *queue_select;
715 	struct virtchnl2_queue_chunk *queue_chunk;
716 	struct idpf_cmd_info args;
717 	int err, len;
718 
719 	len = sizeof(struct virtchnl2_del_ena_dis_queues);
720 	queue_select = rte_zmalloc("queue_select", len, 0);
721 	if (queue_select == NULL)
722 		return -ENOMEM;
723 
724 	queue_chunk = queue_select->chunks.chunks;
725 	queue_select->chunks.num_chunks = 1;
726 	queue_select->vport_id = vport->vport_id;
727 
728 	queue_chunk->type = type;
729 	queue_chunk->start_queue_id = qid;
730 	queue_chunk->num_queues = 1;
731 
732 	args.ops = on ? VIRTCHNL2_OP_ENABLE_QUEUES :
733 		VIRTCHNL2_OP_DISABLE_QUEUES;
734 	args.in_args = (uint8_t *)queue_select;
735 	args.in_args_size = len;
736 	args.out_buffer = adapter->mbx_resp;
737 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
738 	err = idpf_vc_cmd_execute(adapter, &args);
739 	if (err != 0)
740 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES",
741 			on ? "ENABLE" : "DISABLE");
742 
743 	rte_free(queue_select);
744 	return err;
745 }
746 
747 int
748 idpf_vc_queue_switch(struct idpf_vport *vport, uint16_t qid,
749 		     bool rx, bool on)
750 {
751 	uint32_t type;
752 	int err, queue_id;
753 
754 	/* switch txq/rxq */
755 	type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
756 
757 	if (type == VIRTCHNL2_QUEUE_TYPE_RX)
758 		queue_id = vport->chunks_info.rx_start_qid + qid;
759 	else
760 		queue_id = vport->chunks_info.tx_start_qid + qid;
761 	err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
762 	if (err != 0)
763 		return err;
764 
765 	/* switch tx completion queue */
766 	if (!rx && vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
767 		type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
768 		queue_id = vport->chunks_info.tx_compl_start_qid + qid;
769 		err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
770 		if (err != 0)
771 			return err;
772 	}
773 
774 	/* switch rx buffer queue */
775 	if (rx && vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
776 		type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
777 		queue_id = vport->chunks_info.rx_buf_start_qid + 2 * qid;
778 		err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
779 		if (err != 0)
780 			return err;
781 		queue_id++;
782 		err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
783 		if (err != 0)
784 			return err;
785 	}
786 
787 	return err;
788 }
789 
790 #define IDPF_RXTX_QUEUE_CHUNKS_NUM	2
791 int
792 idpf_vc_queues_ena_dis(struct idpf_vport *vport, bool enable)
793 {
794 	struct idpf_adapter *adapter = vport->adapter;
795 	struct virtchnl2_del_ena_dis_queues *queue_select;
796 	struct virtchnl2_queue_chunk *queue_chunk;
797 	uint32_t type;
798 	struct idpf_cmd_info args;
799 	uint16_t num_chunks;
800 	int err, len;
801 
802 	num_chunks = IDPF_RXTX_QUEUE_CHUNKS_NUM;
803 	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
804 		num_chunks++;
805 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
806 		num_chunks++;
807 
808 	len = sizeof(struct virtchnl2_del_ena_dis_queues) +
809 		sizeof(struct virtchnl2_queue_chunk) * (num_chunks - 1);
810 	queue_select = rte_zmalloc("queue_select", len, 0);
811 	if (queue_select == NULL)
812 		return -ENOMEM;
813 
814 	queue_chunk = queue_select->chunks.chunks;
815 	queue_select->chunks.num_chunks = num_chunks;
816 	queue_select->vport_id = vport->vport_id;
817 
818 	type = VIRTCHNL_QUEUE_TYPE_RX;
819 	queue_chunk[type].type = type;
820 	queue_chunk[type].start_queue_id = vport->chunks_info.rx_start_qid;
821 	queue_chunk[type].num_queues = vport->num_rx_q;
822 
823 	type = VIRTCHNL2_QUEUE_TYPE_TX;
824 	queue_chunk[type].type = type;
825 	queue_chunk[type].start_queue_id = vport->chunks_info.tx_start_qid;
826 	queue_chunk[type].num_queues = vport->num_tx_q;
827 
828 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
829 		type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
830 		queue_chunk[type].type = type;
831 		queue_chunk[type].start_queue_id =
832 			vport->chunks_info.rx_buf_start_qid;
833 		queue_chunk[type].num_queues = vport->num_rx_bufq;
834 	}
835 
836 	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
837 		type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
838 		queue_chunk[type].type = type;
839 		queue_chunk[type].start_queue_id =
840 			vport->chunks_info.tx_compl_start_qid;
841 		queue_chunk[type].num_queues = vport->num_tx_complq;
842 	}
843 
844 	args.ops = enable ? VIRTCHNL2_OP_ENABLE_QUEUES :
845 		VIRTCHNL2_OP_DISABLE_QUEUES;
846 	args.in_args = (uint8_t *)queue_select;
847 	args.in_args_size = len;
848 	args.out_buffer = adapter->mbx_resp;
849 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
850 	err = idpf_vc_cmd_execute(adapter, &args);
851 	if (err != 0)
852 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES",
853 			enable ? "ENABLE" : "DISABLE");
854 
855 	rte_free(queue_select);
856 	return err;
857 }
858 
859 int
860 idpf_vc_vport_ena_dis(struct idpf_vport *vport, bool enable)
861 {
862 	struct idpf_adapter *adapter = vport->adapter;
863 	struct virtchnl2_vport vc_vport;
864 	struct idpf_cmd_info args;
865 	int err;
866 
867 	vc_vport.vport_id = vport->vport_id;
868 	args.ops = enable ? VIRTCHNL2_OP_ENABLE_VPORT :
869 		VIRTCHNL2_OP_DISABLE_VPORT;
870 	args.in_args = (uint8_t *)&vc_vport;
871 	args.in_args_size = sizeof(vc_vport);
872 	args.out_buffer = adapter->mbx_resp;
873 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
874 
875 	err = idpf_vc_cmd_execute(adapter, &args);
876 	if (err != 0) {
877 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_VPORT",
878 			enable ? "ENABLE" : "DISABLE");
879 	}
880 
881 	return err;
882 }
883 
884 int
885 idpf_vc_ptype_info_query(struct idpf_adapter *adapter)
886 {
887 	struct virtchnl2_get_ptype_info *ptype_info;
888 	struct idpf_cmd_info args;
889 	int len, err;
890 
891 	len = sizeof(struct virtchnl2_get_ptype_info);
892 	ptype_info = rte_zmalloc("ptype_info", len, 0);
893 	if (ptype_info == NULL)
894 		return -ENOMEM;
895 
896 	ptype_info->start_ptype_id = 0;
897 	ptype_info->num_ptypes = IDPF_MAX_PKT_TYPE;
898 	args.ops = VIRTCHNL2_OP_GET_PTYPE_INFO;
899 	args.in_args = (uint8_t *)ptype_info;
900 	args.in_args_size = len;
901 
902 	err = idpf_vc_cmd_execute(adapter, &args);
903 	if (err != 0)
904 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_PTYPE_INFO");
905 
906 	rte_free(ptype_info);
907 	return err;
908 }
909 
910 int
911 idpf_vc_stats_query(struct idpf_vport *vport,
912 		struct virtchnl2_vport_stats **pstats)
913 {
914 	struct idpf_adapter *adapter = vport->adapter;
915 	struct virtchnl2_vport_stats vport_stats;
916 	struct idpf_cmd_info args;
917 	int err;
918 
919 	vport_stats.vport_id = vport->vport_id;
920 	args.ops = VIRTCHNL2_OP_GET_STATS;
921 	args.in_args = (u8 *)&vport_stats;
922 	args.in_args_size = sizeof(vport_stats);
923 	args.out_buffer = adapter->mbx_resp;
924 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
925 
926 	err = idpf_vc_cmd_execute(adapter, &args);
927 	if (err) {
928 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_STATS");
929 		*pstats = NULL;
930 		return err;
931 	}
932 	*pstats = (struct virtchnl2_vport_stats *)args.out_buffer;
933 	return 0;
934 }
935 
936 #define IDPF_RX_BUF_STRIDE		64
937 int
938 idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
939 {
940 	struct idpf_adapter *adapter = vport->adapter;
941 	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
942 	struct virtchnl2_rxq_info *rxq_info;
943 	struct idpf_cmd_info args;
944 	uint16_t num_qs;
945 	int size, err, i;
946 
947 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
948 		num_qs = IDPF_RXQ_PER_GRP;
949 	else
950 		num_qs = IDPF_RXQ_PER_GRP + IDPF_RX_BUFQ_PER_GRP;
951 
952 	size = sizeof(*vc_rxqs) + (num_qs - 1) *
953 		sizeof(struct virtchnl2_rxq_info);
954 	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
955 	if (vc_rxqs == NULL) {
956 		DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
957 		err = -ENOMEM;
958 		return err;
959 	}
960 	vc_rxqs->vport_id = vport->vport_id;
961 	vc_rxqs->num_qinfo = num_qs;
962 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
963 		rxq_info = &vc_rxqs->qinfo[0];
964 		rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
965 		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
966 		rxq_info->queue_id = rxq->queue_id;
967 		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
968 		rxq_info->data_buffer_size = rxq->rx_buf_len;
969 		rxq_info->max_pkt_size = vport->max_pkt_len;
970 
971 		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
972 		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
973 
974 		rxq_info->ring_len = rxq->nb_rx_desc;
975 	}  else {
976 		/* Rx queue */
977 		rxq_info = &vc_rxqs->qinfo[0];
978 		rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
979 		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
980 		rxq_info->queue_id = rxq->queue_id;
981 		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
982 		rxq_info->data_buffer_size = rxq->rx_buf_len;
983 		rxq_info->max_pkt_size = vport->max_pkt_len;
984 
985 		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
986 		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
987 
988 		rxq_info->ring_len = rxq->nb_rx_desc;
989 		rxq_info->rx_bufq1_id = rxq->bufq1->queue_id;
990 		rxq_info->rx_bufq2_id = rxq->bufq2->queue_id;
991 		rxq_info->rx_buffer_low_watermark = 64;
992 
993 		/* Buffer queue */
994 		for (i = 1; i <= IDPF_RX_BUFQ_PER_GRP; i++) {
995 			struct idpf_rx_queue *bufq = i == 1 ? rxq->bufq1 : rxq->bufq2;
996 			rxq_info = &vc_rxqs->qinfo[i];
997 			rxq_info->dma_ring_addr = bufq->rx_ring_phys_addr;
998 			rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
999 			rxq_info->queue_id = bufq->queue_id;
1000 			rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
1001 			rxq_info->data_buffer_size = bufq->rx_buf_len;
1002 			rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
1003 			rxq_info->ring_len = bufq->nb_rx_desc;
1004 
1005 			rxq_info->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
1006 			rxq_info->rx_buffer_low_watermark = 64;
1007 		}
1008 	}
1009 
1010 	memset(&args, 0, sizeof(args));
1011 	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
1012 	args.in_args = (uint8_t *)vc_rxqs;
1013 	args.in_args_size = size;
1014 	args.out_buffer = adapter->mbx_resp;
1015 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
1016 
1017 	err = idpf_vc_cmd_execute(adapter, &args);
1018 	rte_free(vc_rxqs);
1019 	if (err != 0)
1020 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
1021 
1022 	return err;
1023 }
1024 
1025 int
1026 idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
1027 {
1028 	struct idpf_adapter *adapter = vport->adapter;
1029 	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
1030 	struct virtchnl2_txq_info *txq_info;
1031 	struct idpf_cmd_info args;
1032 	uint16_t num_qs;
1033 	int size, err;
1034 
1035 	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
1036 		num_qs = IDPF_TXQ_PER_GRP;
1037 	else
1038 		num_qs = IDPF_TXQ_PER_GRP + IDPF_TX_COMPLQ_PER_GRP;
1039 
1040 	size = sizeof(*vc_txqs) + (num_qs - 1) *
1041 		sizeof(struct virtchnl2_txq_info);
1042 	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
1043 	if (vc_txqs == NULL) {
1044 		DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
1045 		err = -ENOMEM;
1046 		return err;
1047 	}
1048 	vc_txqs->vport_id = vport->vport_id;
1049 	vc_txqs->num_qinfo = num_qs;
1050 
1051 	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
1052 		txq_info = &vc_txqs->qinfo[0];
1053 		txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
1054 		txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
1055 		txq_info->queue_id = txq->queue_id;
1056 		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
1057 		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
1058 		txq_info->ring_len = txq->nb_tx_desc;
1059 	} else {
1060 		/* txq info */
1061 		txq_info = &vc_txqs->qinfo[0];
1062 		txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
1063 		txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
1064 		txq_info->queue_id = txq->queue_id;
1065 		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
1066 		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
1067 		txq_info->ring_len = txq->nb_tx_desc;
1068 		txq_info->tx_compl_queue_id = txq->complq->queue_id;
1069 		txq_info->relative_queue_id = txq_info->queue_id;
1070 
1071 		/* tx completion queue info */
1072 		txq_info = &vc_txqs->qinfo[1];
1073 		txq_info->dma_ring_addr = txq->complq->tx_ring_phys_addr;
1074 		txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
1075 		txq_info->queue_id = txq->complq->queue_id;
1076 		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
1077 		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
1078 		txq_info->ring_len = txq->complq->nb_tx_desc;
1079 	}
1080 
1081 	memset(&args, 0, sizeof(args));
1082 	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
1083 	args.in_args = (uint8_t *)vc_txqs;
1084 	args.in_args_size = size;
1085 	args.out_buffer = adapter->mbx_resp;
1086 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
1087 
1088 	err = idpf_vc_cmd_execute(adapter, &args);
1089 	rte_free(vc_txqs);
1090 	if (err != 0)
1091 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
1092 
1093 	return err;
1094 }
1095 
1096 int
1097 idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
1098 		  struct idpf_ctlq_msg *q_msg)
1099 {
1100 	return idpf_ctlq_recv(cq, num_q_msg, q_msg);
1101 }
1102 
1103 int
1104 idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
1105 			   u16 *buff_count, struct idpf_dma_mem **buffs)
1106 {
1107 	return idpf_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
1108 }
1109