xref: /dpdk/drivers/common/idpf/idpf_common_virtchnl.c (revision 2df20a1d345a5fc0a1b6dc0317d11fc7b1fda7e7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2023 Intel Corporation
3  */
4 
5 #include "idpf_common_virtchnl.h"
6 #include "idpf_common_logs.h"
7 
8 static int
9 idpf_vc_clean(struct idpf_adapter *adapter)
10 {
11 	struct idpf_ctlq_msg *q_msg[IDPF_CTLQ_LEN];
12 	uint16_t num_q_msg = IDPF_CTLQ_LEN;
13 	struct idpf_dma_mem *dma_mem;
14 	int err;
15 	uint32_t i;
16 
17 	for (i = 0; i < 10; i++) {
18 		err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);
19 		msleep(20);
20 		if (num_q_msg > 0)
21 			break;
22 	}
23 	if (err != 0)
24 		return err;
25 
26 	/* Empty queue is not an error */
27 	for (i = 0; i < num_q_msg; i++) {
28 		dma_mem = q_msg[i]->ctx.indirect.payload;
29 		if (dma_mem != NULL) {
30 			idpf_free_dma_mem(&adapter->hw, dma_mem);
31 			rte_free(dma_mem);
32 		}
33 		rte_free(q_msg[i]);
34 	}
35 
36 	return 0;
37 }
38 
39 static int
40 idpf_send_vc_msg(struct idpf_adapter *adapter, uint32_t op,
41 		 uint16_t msg_size, uint8_t *msg)
42 {
43 	struct idpf_ctlq_msg *ctlq_msg;
44 	struct idpf_dma_mem *dma_mem;
45 	int err;
46 
47 	err = idpf_vc_clean(adapter);
48 	if (err != 0)
49 		goto err;
50 
51 	ctlq_msg = rte_zmalloc(NULL, sizeof(struct idpf_ctlq_msg), 0);
52 	if (ctlq_msg == NULL) {
53 		err = -ENOMEM;
54 		goto err;
55 	}
56 
57 	dma_mem = rte_zmalloc(NULL, sizeof(struct idpf_dma_mem), 0);
58 	if (dma_mem == NULL) {
59 		err = -ENOMEM;
60 		goto dma_mem_error;
61 	}
62 
63 	dma_mem->size = IDPF_DFLT_MBX_BUF_SIZE;
64 	idpf_alloc_dma_mem(&adapter->hw, dma_mem, dma_mem->size);
65 	if (dma_mem->va == NULL) {
66 		err = -ENOMEM;
67 		goto dma_alloc_error;
68 	}
69 
70 	memcpy(dma_mem->va, msg, msg_size);
71 
72 	ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_pf;
73 	ctlq_msg->func_id = 0;
74 	ctlq_msg->data_len = msg_size;
75 	ctlq_msg->cookie.mbx.chnl_opcode = op;
76 	ctlq_msg->cookie.mbx.chnl_retval = VIRTCHNL_STATUS_SUCCESS;
77 	ctlq_msg->ctx.indirect.payload = dma_mem;
78 
79 	err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
80 	if (err != 0)
81 		goto send_error;
82 
83 	return 0;
84 
85 send_error:
86 	idpf_free_dma_mem(&adapter->hw, dma_mem);
87 dma_alloc_error:
88 	rte_free(dma_mem);
89 dma_mem_error:
90 	rte_free(ctlq_msg);
91 err:
92 	return err;
93 }
94 
95 static enum idpf_vc_result
96 idpf_read_msg_from_cp(struct idpf_adapter *adapter, uint16_t buf_len,
97 		      uint8_t *buf)
98 {
99 	struct idpf_hw *hw = &adapter->hw;
100 	struct idpf_ctlq_msg ctlq_msg;
101 	struct idpf_dma_mem *dma_mem = NULL;
102 	enum idpf_vc_result result = IDPF_MSG_NON;
103 	uint32_t opcode;
104 	uint16_t pending = 1;
105 	int ret;
106 
107 	ret = idpf_ctlq_recv(hw->arq, &pending, &ctlq_msg);
108 	if (ret != 0) {
109 		DRV_LOG(DEBUG, "Can't read msg from AQ");
110 		if (ret != -ENOMSG)
111 			result = IDPF_MSG_ERR;
112 		return result;
113 	}
114 
115 	rte_memcpy(buf, ctlq_msg.ctx.indirect.payload->va, buf_len);
116 
117 	opcode = rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_opcode);
118 	adapter->cmd_retval = rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_retval);
119 
120 	DRV_LOG(DEBUG, "CQ from CP carries opcode %u, retval %d",
121 		opcode, adapter->cmd_retval);
122 
123 	if (opcode == VIRTCHNL2_OP_EVENT) {
124 		struct virtchnl2_event *ve = ctlq_msg.ctx.indirect.payload->va;
125 
126 		result = IDPF_MSG_SYS;
127 		switch (ve->event) {
128 		case VIRTCHNL2_EVENT_LINK_CHANGE:
129 			/* TBD */
130 			break;
131 		default:
132 			DRV_LOG(ERR, "%s: Unknown event %d from CP",
133 				__func__, ve->event);
134 			break;
135 		}
136 	} else {
137 		/* async reply msg on command issued by pf previously */
138 		result = IDPF_MSG_CMD;
139 		if (opcode != adapter->pend_cmd) {
140 			DRV_LOG(WARNING, "command mismatch, expect %u, get %u",
141 				adapter->pend_cmd, opcode);
142 			result = IDPF_MSG_ERR;
143 		}
144 	}
145 
146 	if (ctlq_msg.data_len != 0)
147 		dma_mem = ctlq_msg.ctx.indirect.payload;
148 	else
149 		pending = 0;
150 
151 	ret = idpf_ctlq_post_rx_buffs(hw, hw->arq, &pending, &dma_mem);
152 	if (ret != 0 && dma_mem != NULL)
153 		idpf_free_dma_mem(hw, dma_mem);
154 
155 	return result;
156 }
157 
158 #define MAX_TRY_TIMES 200
159 #define ASQ_DELAY_MS  10
160 
161 int
162 idpf_vc_one_msg_read(struct idpf_adapter *adapter, uint32_t ops, uint16_t buf_len,
163 		     uint8_t *buf)
164 {
165 	int err = 0;
166 	int i = 0;
167 	int ret;
168 
169 	do {
170 		ret = idpf_read_msg_from_cp(adapter, buf_len, buf);
171 		if (ret == IDPF_MSG_CMD)
172 			break;
173 		rte_delay_ms(ASQ_DELAY_MS);
174 	} while (i++ < MAX_TRY_TIMES);
175 	if (i >= MAX_TRY_TIMES ||
176 	    adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
177 		err = -EBUSY;
178 		DRV_LOG(ERR, "No response or return failure (%d) for cmd %d",
179 			adapter->cmd_retval, ops);
180 	}
181 
182 	return err;
183 }
184 
185 int
186 idpf_vc_cmd_execute(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
187 {
188 	int err = 0;
189 	int i = 0;
190 	int ret;
191 
192 	if (atomic_set_cmd(adapter, args->ops))
193 		return -EINVAL;
194 
195 	ret = idpf_send_vc_msg(adapter, args->ops, args->in_args_size, args->in_args);
196 	if (ret != 0) {
197 		DRV_LOG(ERR, "fail to send cmd %d", args->ops);
198 		clear_cmd(adapter);
199 		return ret;
200 	}
201 
202 	switch (args->ops) {
203 	case VIRTCHNL_OP_VERSION:
204 	case VIRTCHNL2_OP_GET_CAPS:
205 		/* for init virtchnl ops, need to poll the response */
206 		err = idpf_vc_one_msg_read(adapter, args->ops, args->out_size, args->out_buffer);
207 		clear_cmd(adapter);
208 		break;
209 	case VIRTCHNL2_OP_GET_PTYPE_INFO:
210 		/* for multuple response message,
211 		 * do not handle the response here.
212 		 */
213 		break;
214 	default:
215 		/* For other virtchnl ops in running time,
216 		 * wait for the cmd done flag.
217 		 */
218 		do {
219 			if (adapter->pend_cmd == VIRTCHNL_OP_UNKNOWN)
220 				break;
221 			rte_delay_ms(ASQ_DELAY_MS);
222 			/* If don't read msg or read sys event, continue */
223 		} while (i++ < MAX_TRY_TIMES);
224 		/* If there's no response is received, clear command */
225 		if (i >= MAX_TRY_TIMES  ||
226 		    adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
227 			err = -EBUSY;
228 			DRV_LOG(ERR, "No response or return failure (%d) for cmd %d",
229 				adapter->cmd_retval, args->ops);
230 			clear_cmd(adapter);
231 		}
232 		break;
233 	}
234 
235 	return err;
236 }
237 
238 int
239 idpf_vc_api_version_check(struct idpf_adapter *adapter)
240 {
241 	struct virtchnl2_version_info version, *pver;
242 	struct idpf_cmd_info args;
243 	int err;
244 
245 	memset(&version, 0, sizeof(struct virtchnl_version_info));
246 	version.major = VIRTCHNL2_VERSION_MAJOR_2;
247 	version.minor = VIRTCHNL2_VERSION_MINOR_0;
248 
249 	args.ops = VIRTCHNL_OP_VERSION;
250 	args.in_args = (uint8_t *)&version;
251 	args.in_args_size = sizeof(version);
252 	args.out_buffer = adapter->mbx_resp;
253 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
254 
255 	err = idpf_vc_cmd_execute(adapter, &args);
256 	if (err != 0) {
257 		DRV_LOG(ERR,
258 			"Failed to execute command of VIRTCHNL_OP_VERSION");
259 		return err;
260 	}
261 
262 	pver = (struct virtchnl2_version_info *)args.out_buffer;
263 	adapter->virtchnl_version = *pver;
264 
265 	if (adapter->virtchnl_version.major != VIRTCHNL2_VERSION_MAJOR_2 ||
266 	    adapter->virtchnl_version.minor != VIRTCHNL2_VERSION_MINOR_0) {
267 		DRV_LOG(ERR, "VIRTCHNL API version mismatch:(%u.%u)-(%u.%u)",
268 			adapter->virtchnl_version.major,
269 			adapter->virtchnl_version.minor,
270 			VIRTCHNL2_VERSION_MAJOR_2,
271 			VIRTCHNL2_VERSION_MINOR_0);
272 		return -EINVAL;
273 	}
274 
275 	return 0;
276 }
277 
278 int
279 idpf_vc_caps_get(struct idpf_adapter *adapter)
280 {
281 	struct idpf_cmd_info args;
282 	int err;
283 
284 	args.ops = VIRTCHNL2_OP_GET_CAPS;
285 	args.in_args = (uint8_t *)&adapter->caps;
286 	args.in_args_size = sizeof(struct virtchnl2_get_capabilities);
287 	args.out_buffer = adapter->mbx_resp;
288 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
289 
290 	err = idpf_vc_cmd_execute(adapter, &args);
291 	if (err != 0) {
292 		DRV_LOG(ERR,
293 			"Failed to execute command of VIRTCHNL2_OP_GET_CAPS");
294 		return err;
295 	}
296 
297 	rte_memcpy(&adapter->caps, args.out_buffer, sizeof(struct virtchnl2_get_capabilities));
298 
299 	return 0;
300 }
301 
302 int
303 idpf_vc_vport_create(struct idpf_vport *vport,
304 		     struct virtchnl2_create_vport *create_vport_info)
305 {
306 	struct idpf_adapter *adapter = vport->adapter;
307 	struct virtchnl2_create_vport vport_msg;
308 	struct idpf_cmd_info args;
309 	int err = -1;
310 
311 	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
312 	vport_msg.vport_type = create_vport_info->vport_type;
313 	vport_msg.txq_model = create_vport_info->txq_model;
314 	vport_msg.rxq_model = create_vport_info->rxq_model;
315 	vport_msg.num_tx_q = create_vport_info->num_tx_q;
316 	vport_msg.num_tx_complq = create_vport_info->num_tx_complq;
317 	vport_msg.num_rx_q = create_vport_info->num_rx_q;
318 	vport_msg.num_rx_bufq = create_vport_info->num_rx_bufq;
319 
320 	memset(&args, 0, sizeof(args));
321 	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
322 	args.in_args = (uint8_t *)&vport_msg;
323 	args.in_args_size = sizeof(vport_msg);
324 	args.out_buffer = adapter->mbx_resp;
325 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
326 
327 	err = idpf_vc_cmd_execute(adapter, &args);
328 	if (err != 0) {
329 		DRV_LOG(ERR,
330 			"Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
331 		return err;
332 	}
333 
334 	rte_memcpy(&(vport->vport_info.info), args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
335 	return 0;
336 }
337 
338 int
339 idpf_vc_vport_destroy(struct idpf_vport *vport)
340 {
341 	struct idpf_adapter *adapter = vport->adapter;
342 	struct virtchnl2_vport vc_vport;
343 	struct idpf_cmd_info args;
344 	int err;
345 
346 	vc_vport.vport_id = vport->vport_id;
347 
348 	memset(&args, 0, sizeof(args));
349 	args.ops = VIRTCHNL2_OP_DESTROY_VPORT;
350 	args.in_args = (uint8_t *)&vc_vport;
351 	args.in_args_size = sizeof(vc_vport);
352 	args.out_buffer = adapter->mbx_resp;
353 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
354 
355 	err = idpf_vc_cmd_execute(adapter, &args);
356 	if (err != 0)
357 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DESTROY_VPORT");
358 
359 	return err;
360 }
361 
362 int
363 idpf_vc_queue_grps_add(struct idpf_vport *vport,
364 		       struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
365 		       uint8_t *p2p_queue_grps_out)
366 {
367 	struct idpf_adapter *adapter = vport->adapter;
368 	struct idpf_cmd_info args;
369 	int size, qg_info_size;
370 	int err = -1;
371 
372 	size = sizeof(*p2p_queue_grps_info) +
373 	       (p2p_queue_grps_info->qg_info.num_queue_groups - 1) *
374 		   sizeof(struct virtchnl2_queue_group_info);
375 
376 	memset(&args, 0, sizeof(args));
377 	args.ops = VIRTCHNL2_OP_ADD_QUEUE_GROUPS;
378 	args.in_args = (uint8_t *)p2p_queue_grps_info;
379 	args.in_args_size = size;
380 	args.out_buffer = adapter->mbx_resp;
381 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
382 
383 	err = idpf_vc_cmd_execute(adapter, &args);
384 	if (err != 0) {
385 		DRV_LOG(ERR,
386 			"Failed to execute command of VIRTCHNL2_OP_ADD_QUEUE_GROUPS");
387 		return err;
388 	}
389 
390 	rte_memcpy(p2p_queue_grps_out, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
391 	return 0;
392 }
393 
394 int idpf_vc_queue_grps_del(struct idpf_vport *vport,
395 			  uint16_t num_q_grps,
396 			  struct virtchnl2_queue_group_id *qg_ids)
397 {
398 	struct idpf_adapter *adapter = vport->adapter;
399 	struct virtchnl2_delete_queue_groups *vc_del_q_grps;
400 	struct idpf_cmd_info args;
401 	int size;
402 	int err;
403 
404 	size = sizeof(*vc_del_q_grps) +
405 	       (num_q_grps - 1) * sizeof(struct virtchnl2_queue_group_id);
406 	vc_del_q_grps = rte_zmalloc("vc_del_q_grps", size, 0);
407 
408 	vc_del_q_grps->vport_id = vport->vport_id;
409 	vc_del_q_grps->num_queue_groups = num_q_grps;
410 	memcpy(vc_del_q_grps->qg_ids, qg_ids,
411 	       num_q_grps * sizeof(struct virtchnl2_queue_group_id));
412 
413 	memset(&args, 0, sizeof(args));
414 	args.ops = VIRTCHNL2_OP_DEL_QUEUE_GROUPS;
415 	args.in_args = (uint8_t *)vc_del_q_grps;
416 	args.in_args_size = size;
417 	args.out_buffer = adapter->mbx_resp;
418 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
419 
420 	err = idpf_vc_cmd_execute(adapter, &args);
421 	if (err != 0)
422 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DEL_QUEUE_GROUPS");
423 
424 	rte_free(vc_del_q_grps);
425 	return err;
426 }
427 
428 int
429 idpf_vc_rss_key_set(struct idpf_vport *vport)
430 {
431 	struct idpf_adapter *adapter = vport->adapter;
432 	struct virtchnl2_rss_key *rss_key;
433 	struct idpf_cmd_info args;
434 	int len, err;
435 
436 	len = sizeof(*rss_key) + sizeof(rss_key->key[0]) *
437 		(vport->rss_key_size - 1);
438 	rss_key = rte_zmalloc("rss_key", len, 0);
439 	if (rss_key == NULL)
440 		return -ENOMEM;
441 
442 	rss_key->vport_id = vport->vport_id;
443 	rss_key->key_len = vport->rss_key_size;
444 	rte_memcpy(rss_key->key, vport->rss_key,
445 		   sizeof(rss_key->key[0]) * vport->rss_key_size);
446 
447 	memset(&args, 0, sizeof(args));
448 	args.ops = VIRTCHNL2_OP_SET_RSS_KEY;
449 	args.in_args = (uint8_t *)rss_key;
450 	args.in_args_size = len;
451 	args.out_buffer = adapter->mbx_resp;
452 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
453 
454 	err = idpf_vc_cmd_execute(adapter, &args);
455 	if (err != 0)
456 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_KEY");
457 
458 	rte_free(rss_key);
459 	return err;
460 }
461 
462 int idpf_vc_rss_key_get(struct idpf_vport *vport)
463 {
464 	struct idpf_adapter *adapter = vport->adapter;
465 	struct virtchnl2_rss_key *rss_key_ret;
466 	struct virtchnl2_rss_key rss_key;
467 	struct idpf_cmd_info args;
468 	int err;
469 
470 	memset(&rss_key, 0, sizeof(rss_key));
471 	rss_key.vport_id = vport->vport_id;
472 
473 	memset(&args, 0, sizeof(args));
474 	args.ops = VIRTCHNL2_OP_GET_RSS_KEY;
475 	args.in_args = (uint8_t *)&rss_key;
476 	args.in_args_size = sizeof(rss_key);
477 	args.out_buffer = adapter->mbx_resp;
478 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
479 
480 	err = idpf_vc_cmd_execute(adapter, &args);
481 
482 	if (!err) {
483 		rss_key_ret = (struct virtchnl2_rss_key *)args.out_buffer;
484 		if (rss_key_ret->key_len != vport->rss_key_size) {
485 			rte_free(vport->rss_key);
486 			vport->rss_key = NULL;
487 			vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,
488 						      rss_key_ret->key_len);
489 			vport->rss_key = rte_zmalloc("rss_key", vport->rss_key_size, 0);
490 			if (!vport->rss_key) {
491 				vport->rss_key_size = 0;
492 				DRV_LOG(ERR, "Failed to allocate RSS key");
493 				return -ENOMEM;
494 			}
495 		}
496 		rte_memcpy(vport->rss_key, rss_key_ret->key, vport->rss_key_size);
497 	} else {
498 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_KEY");
499 	}
500 
501 	return err;
502 }
503 
504 int
505 idpf_vc_rss_lut_set(struct idpf_vport *vport)
506 {
507 	struct idpf_adapter *adapter = vport->adapter;
508 	struct virtchnl2_rss_lut *rss_lut;
509 	struct idpf_cmd_info args;
510 	int len, err;
511 
512 	len = sizeof(*rss_lut) + sizeof(rss_lut->lut[0]) *
513 		(vport->rss_lut_size - 1);
514 	rss_lut = rte_zmalloc("rss_lut", len, 0);
515 	if (rss_lut == NULL)
516 		return -ENOMEM;
517 
518 	rss_lut->vport_id = vport->vport_id;
519 	rss_lut->lut_entries = vport->rss_lut_size;
520 	rte_memcpy(rss_lut->lut, vport->rss_lut,
521 		   sizeof(rss_lut->lut[0]) * vport->rss_lut_size);
522 
523 	memset(&args, 0, sizeof(args));
524 	args.ops = VIRTCHNL2_OP_SET_RSS_LUT;
525 	args.in_args = (uint8_t *)rss_lut;
526 	args.in_args_size = len;
527 	args.out_buffer = adapter->mbx_resp;
528 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
529 
530 	err = idpf_vc_cmd_execute(adapter, &args);
531 	if (err != 0)
532 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_LUT");
533 
534 	rte_free(rss_lut);
535 	return err;
536 }
537 
538 int
539 idpf_vc_rss_lut_get(struct idpf_vport *vport)
540 {
541 	struct idpf_adapter *adapter = vport->adapter;
542 	struct virtchnl2_rss_lut *rss_lut_ret;
543 	struct virtchnl2_rss_lut rss_lut;
544 	struct idpf_cmd_info args;
545 	int err;
546 
547 	memset(&rss_lut, 0, sizeof(rss_lut));
548 	rss_lut.vport_id = vport->vport_id;
549 
550 	memset(&args, 0, sizeof(args));
551 	args.ops = VIRTCHNL2_OP_GET_RSS_LUT;
552 	args.in_args = (uint8_t *)&rss_lut;
553 	args.in_args_size = sizeof(rss_lut);
554 	args.out_buffer = adapter->mbx_resp;
555 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
556 
557 	err = idpf_vc_cmd_execute(adapter, &args);
558 
559 	if (!err) {
560 		rss_lut_ret = (struct virtchnl2_rss_lut *)args.out_buffer;
561 		if (rss_lut_ret->lut_entries != vport->rss_lut_size) {
562 			rte_free(vport->rss_lut);
563 			vport->rss_lut = NULL;
564 			vport->rss_lut = rte_zmalloc("rss_lut",
565 				     sizeof(uint32_t) * rss_lut_ret->lut_entries, 0);
566 			if (vport->rss_lut == NULL) {
567 				DRV_LOG(ERR, "Failed to allocate RSS lut");
568 				return -ENOMEM;
569 			}
570 		}
571 		rte_memcpy(vport->rss_lut, rss_lut_ret->lut, rss_lut_ret->lut_entries);
572 		vport->rss_lut_size = rss_lut_ret->lut_entries;
573 	} else {
574 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_LUT");
575 	}
576 
577 	return err;
578 }
579 
580 int
581 idpf_vc_rss_hash_get(struct idpf_vport *vport)
582 {
583 	struct idpf_adapter *adapter = vport->adapter;
584 	struct virtchnl2_rss_hash *rss_hash_ret;
585 	struct virtchnl2_rss_hash rss_hash;
586 	struct idpf_cmd_info args;
587 	int err;
588 
589 	memset(&rss_hash, 0, sizeof(rss_hash));
590 	rss_hash.ptype_groups = vport->rss_hf;
591 	rss_hash.vport_id = vport->vport_id;
592 
593 	memset(&args, 0, sizeof(args));
594 	args.ops = VIRTCHNL2_OP_GET_RSS_HASH;
595 	args.in_args = (uint8_t *)&rss_hash;
596 	args.in_args_size = sizeof(rss_hash);
597 	args.out_buffer = adapter->mbx_resp;
598 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
599 
600 	err = idpf_vc_cmd_execute(adapter, &args);
601 
602 	if (!err) {
603 		rss_hash_ret = (struct virtchnl2_rss_hash *)args.out_buffer;
604 		vport->rss_hf = rss_hash_ret->ptype_groups;
605 	} else {
606 		DRV_LOG(ERR, "Failed to execute command of OP_GET_RSS_HASH");
607 	}
608 
609 	return err;
610 }
611 
612 int
613 idpf_vc_rss_hash_set(struct idpf_vport *vport)
614 {
615 	struct idpf_adapter *adapter = vport->adapter;
616 	struct virtchnl2_rss_hash rss_hash;
617 	struct idpf_cmd_info args;
618 	int err;
619 
620 	memset(&rss_hash, 0, sizeof(rss_hash));
621 	rss_hash.ptype_groups = vport->rss_hf;
622 	rss_hash.vport_id = vport->vport_id;
623 
624 	memset(&args, 0, sizeof(args));
625 	args.ops = VIRTCHNL2_OP_SET_RSS_HASH;
626 	args.in_args = (uint8_t *)&rss_hash;
627 	args.in_args_size = sizeof(rss_hash);
628 	args.out_buffer = adapter->mbx_resp;
629 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
630 
631 	err = idpf_vc_cmd_execute(adapter, &args);
632 	if (err != 0)
633 		DRV_LOG(ERR, "Failed to execute command of OP_SET_RSS_HASH");
634 
635 	return err;
636 }
637 
638 int
639 idpf_vc_irq_map_unmap_config(struct idpf_vport *vport, uint16_t nb_rxq, bool map)
640 {
641 	struct idpf_adapter *adapter = vport->adapter;
642 	struct virtchnl2_queue_vector_maps *map_info;
643 	struct virtchnl2_queue_vector *vecmap;
644 	struct idpf_cmd_info args;
645 	int len, i, err = 0;
646 
647 	len = sizeof(struct virtchnl2_queue_vector_maps) +
648 		(nb_rxq - 1) * sizeof(struct virtchnl2_queue_vector);
649 
650 	map_info = rte_zmalloc("map_info", len, 0);
651 	if (map_info == NULL)
652 		return -ENOMEM;
653 
654 	map_info->vport_id = vport->vport_id;
655 	map_info->num_qv_maps = nb_rxq;
656 	for (i = 0; i < nb_rxq; i++) {
657 		vecmap = &map_info->qv_maps[i];
658 		vecmap->queue_id = vport->qv_map[i].queue_id;
659 		vecmap->vector_id = vport->qv_map[i].vector_id;
660 		vecmap->itr_idx = VIRTCHNL2_ITR_IDX_0;
661 		vecmap->queue_type = VIRTCHNL2_QUEUE_TYPE_RX;
662 	}
663 
664 	args.ops = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :
665 		VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
666 	args.in_args = (uint8_t *)map_info;
667 	args.in_args_size = len;
668 	args.out_buffer = adapter->mbx_resp;
669 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
670 	err = idpf_vc_cmd_execute(adapter, &args);
671 	if (err != 0)
672 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUE_VECTOR",
673 			map ? "MAP" : "UNMAP");
674 
675 	rte_free(map_info);
676 	return err;
677 }
678 
679 int
680 idpf_vc_vectors_alloc(struct idpf_vport *vport, uint16_t num_vectors)
681 {
682 	struct idpf_adapter *adapter = vport->adapter;
683 	struct virtchnl2_alloc_vectors *alloc_vec;
684 	struct idpf_cmd_info args;
685 	int err, len;
686 
687 	len = sizeof(struct virtchnl2_alloc_vectors) +
688 		(num_vectors - 1) * sizeof(struct virtchnl2_vector_chunk);
689 	alloc_vec = rte_zmalloc("alloc_vec", len, 0);
690 	if (alloc_vec == NULL)
691 		return -ENOMEM;
692 
693 	alloc_vec->num_vectors = num_vectors;
694 
695 	args.ops = VIRTCHNL2_OP_ALLOC_VECTORS;
696 	args.in_args = (uint8_t *)alloc_vec;
697 	args.in_args_size = len;
698 	args.out_buffer = adapter->mbx_resp;
699 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
700 	err = idpf_vc_cmd_execute(adapter, &args);
701 	if (err != 0)
702 		DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_ALLOC_VECTORS");
703 
704 	rte_memcpy(vport->recv_vectors, args.out_buffer, len);
705 	rte_free(alloc_vec);
706 	return err;
707 }
708 
709 int
710 idpf_vc_vectors_dealloc(struct idpf_vport *vport)
711 {
712 	struct idpf_adapter *adapter = vport->adapter;
713 	struct virtchnl2_alloc_vectors *alloc_vec;
714 	struct virtchnl2_vector_chunks *vcs;
715 	struct idpf_cmd_info args;
716 	int err, len;
717 
718 	alloc_vec = vport->recv_vectors;
719 	vcs = &alloc_vec->vchunks;
720 
721 	len = sizeof(struct virtchnl2_vector_chunks) +
722 		(vcs->num_vchunks - 1) * sizeof(struct virtchnl2_vector_chunk);
723 
724 	args.ops = VIRTCHNL2_OP_DEALLOC_VECTORS;
725 	args.in_args = (uint8_t *)vcs;
726 	args.in_args_size = len;
727 	args.out_buffer = adapter->mbx_resp;
728 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
729 	err = idpf_vc_cmd_execute(adapter, &args);
730 	if (err != 0)
731 		DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_DEALLOC_VECTORS");
732 
733 	return err;
734 }
735 
736 int
737 idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
738 			  uint32_t type, bool on)
739 {
740 	struct idpf_adapter *adapter = vport->adapter;
741 	struct virtchnl2_del_ena_dis_queues *queue_select;
742 	struct virtchnl2_queue_chunk *queue_chunk;
743 	struct idpf_cmd_info args;
744 	int err, len;
745 
746 	len = sizeof(struct virtchnl2_del_ena_dis_queues);
747 	queue_select = rte_zmalloc("queue_select", len, 0);
748 	if (queue_select == NULL)
749 		return -ENOMEM;
750 
751 	queue_chunk = queue_select->chunks.chunks;
752 	queue_select->chunks.num_chunks = 1;
753 	queue_select->vport_id = vport->vport_id;
754 
755 	queue_chunk->type = type;
756 	queue_chunk->start_queue_id = qid;
757 	queue_chunk->num_queues = 1;
758 
759 	args.ops = on ? VIRTCHNL2_OP_ENABLE_QUEUES :
760 		VIRTCHNL2_OP_DISABLE_QUEUES;
761 	args.in_args = (uint8_t *)queue_select;
762 	args.in_args_size = len;
763 	args.out_buffer = adapter->mbx_resp;
764 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
765 	err = idpf_vc_cmd_execute(adapter, &args);
766 	if (err != 0)
767 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES",
768 			on ? "ENABLE" : "DISABLE");
769 
770 	rte_free(queue_select);
771 	return err;
772 }
773 
774 int
775 idpf_vc_queue_switch(struct idpf_vport *vport, uint16_t qid,
776 		     bool rx, bool on)
777 {
778 	uint32_t type;
779 	int err, queue_id;
780 
781 	/* switch txq/rxq */
782 	type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
783 
784 	if (type == VIRTCHNL2_QUEUE_TYPE_RX)
785 		queue_id = vport->chunks_info.rx_start_qid + qid;
786 	else
787 		queue_id = vport->chunks_info.tx_start_qid + qid;
788 	err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
789 	if (err != 0)
790 		return err;
791 
792 	/* switch tx completion queue */
793 	if (!rx && vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
794 		type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
795 		queue_id = vport->chunks_info.tx_compl_start_qid + qid;
796 		err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
797 		if (err != 0)
798 			return err;
799 	}
800 
801 	/* switch rx buffer queue */
802 	if (rx && vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
803 		type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
804 		queue_id = vport->chunks_info.rx_buf_start_qid + 2 * qid;
805 		err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
806 		if (err != 0)
807 			return err;
808 		queue_id++;
809 		err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
810 		if (err != 0)
811 			return err;
812 	}
813 
814 	return err;
815 }
816 
817 #define IDPF_RXTX_QUEUE_CHUNKS_NUM	2
818 int
819 idpf_vc_queues_ena_dis(struct idpf_vport *vport, bool enable)
820 {
821 	struct idpf_adapter *adapter = vport->adapter;
822 	struct virtchnl2_del_ena_dis_queues *queue_select;
823 	struct virtchnl2_queue_chunk *queue_chunk;
824 	uint32_t type;
825 	struct idpf_cmd_info args;
826 	uint16_t num_chunks;
827 	int err, len;
828 
829 	num_chunks = IDPF_RXTX_QUEUE_CHUNKS_NUM;
830 	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
831 		num_chunks++;
832 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
833 		num_chunks++;
834 
835 	len = sizeof(struct virtchnl2_del_ena_dis_queues) +
836 		sizeof(struct virtchnl2_queue_chunk) * (num_chunks - 1);
837 	queue_select = rte_zmalloc("queue_select", len, 0);
838 	if (queue_select == NULL)
839 		return -ENOMEM;
840 
841 	queue_chunk = queue_select->chunks.chunks;
842 	queue_select->chunks.num_chunks = num_chunks;
843 	queue_select->vport_id = vport->vport_id;
844 
845 	type = VIRTCHNL_QUEUE_TYPE_RX;
846 	queue_chunk[type].type = type;
847 	queue_chunk[type].start_queue_id = vport->chunks_info.rx_start_qid;
848 	queue_chunk[type].num_queues = vport->num_rx_q;
849 
850 	type = VIRTCHNL2_QUEUE_TYPE_TX;
851 	queue_chunk[type].type = type;
852 	queue_chunk[type].start_queue_id = vport->chunks_info.tx_start_qid;
853 	queue_chunk[type].num_queues = vport->num_tx_q;
854 
855 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
856 		type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
857 		queue_chunk[type].type = type;
858 		queue_chunk[type].start_queue_id =
859 			vport->chunks_info.rx_buf_start_qid;
860 		queue_chunk[type].num_queues = vport->num_rx_bufq;
861 	}
862 
863 	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
864 		type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
865 		queue_chunk[type].type = type;
866 		queue_chunk[type].start_queue_id =
867 			vport->chunks_info.tx_compl_start_qid;
868 		queue_chunk[type].num_queues = vport->num_tx_complq;
869 	}
870 
871 	args.ops = enable ? VIRTCHNL2_OP_ENABLE_QUEUES :
872 		VIRTCHNL2_OP_DISABLE_QUEUES;
873 	args.in_args = (uint8_t *)queue_select;
874 	args.in_args_size = len;
875 	args.out_buffer = adapter->mbx_resp;
876 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
877 	err = idpf_vc_cmd_execute(adapter, &args);
878 	if (err != 0)
879 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES",
880 			enable ? "ENABLE" : "DISABLE");
881 
882 	rte_free(queue_select);
883 	return err;
884 }
885 
886 int
887 idpf_vc_vport_ena_dis(struct idpf_vport *vport, bool enable)
888 {
889 	struct idpf_adapter *adapter = vport->adapter;
890 	struct virtchnl2_vport vc_vport;
891 	struct idpf_cmd_info args;
892 	int err;
893 
894 	vc_vport.vport_id = vport->vport_id;
895 	args.ops = enable ? VIRTCHNL2_OP_ENABLE_VPORT :
896 		VIRTCHNL2_OP_DISABLE_VPORT;
897 	args.in_args = (uint8_t *)&vc_vport;
898 	args.in_args_size = sizeof(vc_vport);
899 	args.out_buffer = adapter->mbx_resp;
900 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
901 
902 	err = idpf_vc_cmd_execute(adapter, &args);
903 	if (err != 0) {
904 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_VPORT",
905 			enable ? "ENABLE" : "DISABLE");
906 	}
907 
908 	return err;
909 }
910 
911 int
912 idpf_vc_ptype_info_query(struct idpf_adapter *adapter)
913 {
914 	struct virtchnl2_get_ptype_info *ptype_info;
915 	struct idpf_cmd_info args;
916 	int len, err;
917 
918 	len = sizeof(struct virtchnl2_get_ptype_info);
919 	ptype_info = rte_zmalloc("ptype_info", len, 0);
920 	if (ptype_info == NULL)
921 		return -ENOMEM;
922 
923 	ptype_info->start_ptype_id = 0;
924 	ptype_info->num_ptypes = IDPF_MAX_PKT_TYPE;
925 	args.ops = VIRTCHNL2_OP_GET_PTYPE_INFO;
926 	args.in_args = (uint8_t *)ptype_info;
927 	args.in_args_size = len;
928 
929 	err = idpf_vc_cmd_execute(adapter, &args);
930 	if (err != 0)
931 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_PTYPE_INFO");
932 
933 	rte_free(ptype_info);
934 	return err;
935 }
936 
937 int
938 idpf_vc_stats_query(struct idpf_vport *vport,
939 		struct virtchnl2_vport_stats **pstats)
940 {
941 	struct idpf_adapter *adapter = vport->adapter;
942 	struct virtchnl2_vport_stats vport_stats;
943 	struct idpf_cmd_info args;
944 	int err;
945 
946 	vport_stats.vport_id = vport->vport_id;
947 	args.ops = VIRTCHNL2_OP_GET_STATS;
948 	args.in_args = (u8 *)&vport_stats;
949 	args.in_args_size = sizeof(vport_stats);
950 	args.out_buffer = adapter->mbx_resp;
951 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
952 
953 	err = idpf_vc_cmd_execute(adapter, &args);
954 	if (err) {
955 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_STATS");
956 		*pstats = NULL;
957 		return err;
958 	}
959 	*pstats = (struct virtchnl2_vport_stats *)args.out_buffer;
960 	return 0;
961 }
962 
963 #define IDPF_RX_BUF_STRIDE		64
964 int
965 idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
966 {
967 	struct idpf_adapter *adapter = vport->adapter;
968 	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
969 	struct virtchnl2_rxq_info *rxq_info;
970 	struct idpf_cmd_info args;
971 	uint16_t num_qs;
972 	int size, err, i;
973 
974 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
975 		num_qs = IDPF_RXQ_PER_GRP;
976 	else
977 		num_qs = IDPF_RXQ_PER_GRP + IDPF_RX_BUFQ_PER_GRP;
978 
979 	size = sizeof(*vc_rxqs) + (num_qs - 1) *
980 		sizeof(struct virtchnl2_rxq_info);
981 	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
982 	if (vc_rxqs == NULL) {
983 		DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
984 		err = -ENOMEM;
985 		return err;
986 	}
987 	vc_rxqs->vport_id = vport->vport_id;
988 	vc_rxqs->num_qinfo = num_qs;
989 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
990 		rxq_info = &vc_rxqs->qinfo[0];
991 		rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
992 		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
993 		rxq_info->queue_id = rxq->queue_id;
994 		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
995 		rxq_info->data_buffer_size = rxq->rx_buf_len;
996 		rxq_info->max_pkt_size = vport->max_pkt_len;
997 
998 		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
999 		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
1000 
1001 		rxq_info->ring_len = rxq->nb_rx_desc;
1002 	}  else {
1003 		/* Rx queue */
1004 		rxq_info = &vc_rxqs->qinfo[0];
1005 		rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
1006 		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
1007 		rxq_info->queue_id = rxq->queue_id;
1008 		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
1009 		rxq_info->data_buffer_size = rxq->rx_buf_len;
1010 		rxq_info->max_pkt_size = vport->max_pkt_len;
1011 
1012 		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
1013 		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
1014 
1015 		rxq_info->ring_len = rxq->nb_rx_desc;
1016 		rxq_info->rx_bufq1_id = rxq->bufq1->queue_id;
1017 		rxq_info->bufq2_ena = 1;
1018 		rxq_info->rx_bufq2_id = rxq->bufq2->queue_id;
1019 		rxq_info->rx_buffer_low_watermark = 64;
1020 
1021 		/* Buffer queue */
1022 		for (i = 1; i <= IDPF_RX_BUFQ_PER_GRP; i++) {
1023 			struct idpf_rx_queue *bufq = i == 1 ? rxq->bufq1 : rxq->bufq2;
1024 			rxq_info = &vc_rxqs->qinfo[i];
1025 			rxq_info->dma_ring_addr = bufq->rx_ring_phys_addr;
1026 			rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
1027 			rxq_info->queue_id = bufq->queue_id;
1028 			rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
1029 			rxq_info->data_buffer_size = bufq->rx_buf_len;
1030 			rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
1031 			rxq_info->ring_len = bufq->nb_rx_desc;
1032 
1033 			rxq_info->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
1034 			rxq_info->rx_buffer_low_watermark = 64;
1035 		}
1036 	}
1037 
1038 	memset(&args, 0, sizeof(args));
1039 	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
1040 	args.in_args = (uint8_t *)vc_rxqs;
1041 	args.in_args_size = size;
1042 	args.out_buffer = adapter->mbx_resp;
1043 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
1044 
1045 	err = idpf_vc_cmd_execute(adapter, &args);
1046 	rte_free(vc_rxqs);
1047 	if (err != 0)
1048 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
1049 
1050 	return err;
1051 }
1052 
1053 int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
1054 			       uint16_t num_qs)
1055 {
1056 	struct idpf_adapter *adapter = vport->adapter;
1057 	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
1058 	struct idpf_cmd_info args;
1059 	int size, err, i;
1060 
1061 	size = sizeof(*vc_rxqs) + (num_qs - 1) *
1062 		sizeof(struct virtchnl2_rxq_info);
1063 	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
1064 	if (vc_rxqs == NULL) {
1065 		DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
1066 		err = -ENOMEM;
1067 		return err;
1068 	}
1069 	vc_rxqs->vport_id = vport->vport_id;
1070 	vc_rxqs->num_qinfo = num_qs;
1071 	memcpy(vc_rxqs->qinfo, rxq_info, num_qs * sizeof(struct virtchnl2_rxq_info));
1072 
1073 	memset(&args, 0, sizeof(args));
1074 	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
1075 	args.in_args = (uint8_t *)vc_rxqs;
1076 	args.in_args_size = size;
1077 	args.out_buffer = adapter->mbx_resp;
1078 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
1079 
1080 	err = idpf_vc_cmd_execute(adapter, &args);
1081 	rte_free(vc_rxqs);
1082 	if (err != 0)
1083 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
1084 
1085 	return err;
1086 }
1087 
1088 int
1089 idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
1090 {
1091 	struct idpf_adapter *adapter = vport->adapter;
1092 	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
1093 	struct virtchnl2_txq_info *txq_info;
1094 	struct idpf_cmd_info args;
1095 	uint16_t num_qs;
1096 	int size, err;
1097 
1098 	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
1099 		num_qs = IDPF_TXQ_PER_GRP;
1100 	else
1101 		num_qs = IDPF_TXQ_PER_GRP + IDPF_TX_COMPLQ_PER_GRP;
1102 
1103 	size = sizeof(*vc_txqs) + (num_qs - 1) *
1104 		sizeof(struct virtchnl2_txq_info);
1105 	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
1106 	if (vc_txqs == NULL) {
1107 		DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
1108 		err = -ENOMEM;
1109 		return err;
1110 	}
1111 	vc_txqs->vport_id = vport->vport_id;
1112 	vc_txqs->num_qinfo = num_qs;
1113 
1114 	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
1115 		txq_info = &vc_txqs->qinfo[0];
1116 		txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
1117 		txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
1118 		txq_info->queue_id = txq->queue_id;
1119 		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
1120 		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
1121 		txq_info->ring_len = txq->nb_tx_desc;
1122 	} else {
1123 		/* txq info */
1124 		txq_info = &vc_txqs->qinfo[0];
1125 		txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
1126 		txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
1127 		txq_info->queue_id = txq->queue_id;
1128 		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
1129 		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
1130 		txq_info->ring_len = txq->nb_tx_desc;
1131 		txq_info->tx_compl_queue_id = txq->complq->queue_id;
1132 		txq_info->relative_queue_id = txq_info->queue_id;
1133 
1134 		/* tx completion queue info */
1135 		txq_info = &vc_txqs->qinfo[1];
1136 		txq_info->dma_ring_addr = txq->complq->tx_ring_phys_addr;
1137 		txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
1138 		txq_info->queue_id = txq->complq->queue_id;
1139 		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
1140 		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
1141 		txq_info->ring_len = txq->complq->nb_tx_desc;
1142 	}
1143 
1144 	memset(&args, 0, sizeof(args));
1145 	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
1146 	args.in_args = (uint8_t *)vc_txqs;
1147 	args.in_args_size = size;
1148 	args.out_buffer = adapter->mbx_resp;
1149 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
1150 
1151 	err = idpf_vc_cmd_execute(adapter, &args);
1152 	rte_free(vc_txqs);
1153 	if (err != 0)
1154 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
1155 
1156 	return err;
1157 }
1158 
1159 int
1160 idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
1161 		       uint16_t num_qs)
1162 {
1163 	struct idpf_adapter *adapter = vport->adapter;
1164 	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
1165 	struct idpf_cmd_info args;
1166 	int size, err;
1167 
1168 	size = sizeof(*vc_txqs) + (num_qs - 1) * sizeof(struct virtchnl2_txq_info);
1169 	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
1170 	if (vc_txqs == NULL) {
1171 		DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
1172 		err = -ENOMEM;
1173 		return err;
1174 	}
1175 	vc_txqs->vport_id = vport->vport_id;
1176 	vc_txqs->num_qinfo = num_qs;
1177 	memcpy(vc_txqs->qinfo, txq_info, num_qs * sizeof(struct virtchnl2_txq_info));
1178 
1179 	memset(&args, 0, sizeof(args));
1180 	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
1181 	args.in_args = (uint8_t *)vc_txqs;
1182 	args.in_args_size = size;
1183 	args.out_buffer = adapter->mbx_resp;
1184 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
1185 
1186 	err = idpf_vc_cmd_execute(adapter, &args);
1187 	rte_free(vc_txqs);
1188 	if (err != 0)
1189 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
1190 
1191 	return err;
1192 }
1193 
1194 int
1195 idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
1196 		  struct idpf_ctlq_msg *q_msg)
1197 {
1198 	return idpf_ctlq_recv(cq, num_q_msg, q_msg);
1199 }
1200 
1201 int
1202 idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
1203 			   u16 *buff_count, struct idpf_dma_mem **buffs)
1204 {
1205 	return idpf_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
1206 }
1207