xref: /dpdk/drivers/net/intel/iavf/iavf_vchnl.c (revision b92babc246830ede6c33a2dfa1d6291076b1a81d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <fcntl.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_os_shim.h>
16 
17 #include <rte_debug.h>
18 #include <rte_alarm.h>
19 #include <rte_atomic.h>
20 #include <rte_eal.h>
21 #include <rte_ether.h>
22 #include <ethdev_driver.h>
23 #include <ethdev_pci.h>
24 #include <dev_driver.h>
25 
26 #include "iavf.h"
27 #include "iavf_rxtx.h"
28 
29 #define MAX_TRY_TIMES 2000
30 #define ASQ_DELAY_MS  1
31 
32 #define MAX_EVENT_PENDING 16
33 
34 struct iavf_event_element {
35 	TAILQ_ENTRY(iavf_event_element) next;
36 	struct rte_eth_dev *dev;
37 	enum rte_eth_event_type event;
38 	void *param;
39 	size_t param_alloc_size;
40 	uint8_t param_alloc_data[0];
41 };
42 
43 struct iavf_event_handler {
44 	RTE_ATOMIC(uint32_t) ndev;
45 	rte_thread_t tid;
46 	int fd[2];
47 	pthread_mutex_t lock;
48 	TAILQ_HEAD(event_list, iavf_event_element) pending;
49 };
50 
51 static struct iavf_event_handler event_handler = {
52 	.fd = {-1, -1},
53 };
54 
55 #ifndef TAILQ_FOREACH_SAFE
56 #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
57 	for ((var) = TAILQ_FIRST((head)); \
58 		(var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
59 	(var) = (tvar))
60 #endif
61 
62 static uint32_t
63 iavf_dev_event_handle(void *param __rte_unused)
64 {
65 	struct iavf_event_handler *handler = &event_handler;
66 	TAILQ_HEAD(event_list, iavf_event_element) pending;
67 
68 	while (true) {
69 		char unused[MAX_EVENT_PENDING];
70 		ssize_t nr = read(handler->fd[0], &unused, sizeof(unused));
71 		if (nr <= 0)
72 			break;
73 
74 		TAILQ_INIT(&pending);
75 		pthread_mutex_lock(&handler->lock);
76 		TAILQ_CONCAT(&pending, &handler->pending, next);
77 		pthread_mutex_unlock(&handler->lock);
78 
79 		struct iavf_event_element *pos, *save_next;
80 		TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) {
81 			TAILQ_REMOVE(&pending, pos, next);
82 
83 			struct iavf_adapter *adapter = pos->dev->data->dev_private;
84 			if (pos->event == RTE_ETH_EVENT_INTR_RESET &&
85 			    adapter->devargs.auto_reset) {
86 				iavf_handle_hw_reset(pos->dev);
87 				rte_free(pos);
88 				continue;
89 			}
90 
91 			rte_eth_dev_callback_process(pos->dev, pos->event, pos->param);
92 			rte_free(pos);
93 		}
94 	}
95 
96 	return 0;
97 }
98 
99 void
100 iavf_dev_event_post(struct rte_eth_dev *dev,
101 		enum rte_eth_event_type event,
102 		void *param, size_t param_alloc_size)
103 {
104 	struct iavf_event_handler *handler = &event_handler;
105 	char notify_byte;
106 	struct iavf_event_element *elem = rte_malloc(NULL, sizeof(*elem) + param_alloc_size, 0);
107 	if (!elem)
108 		return;
109 
110 	elem->dev = dev;
111 	elem->event = event;
112 	elem->param = param;
113 	elem->param_alloc_size = param_alloc_size;
114 	if (param && param_alloc_size) {
115 		rte_memcpy(elem->param_alloc_data, param, param_alloc_size);
116 		elem->param = elem->param_alloc_data;
117 	}
118 
119 	pthread_mutex_lock(&handler->lock);
120 	TAILQ_INSERT_TAIL(&handler->pending, elem, next);
121 	pthread_mutex_unlock(&handler->lock);
122 
123 	ssize_t nw = write(handler->fd[1], &notify_byte, 1);
124 	RTE_SET_USED(nw);
125 }
126 
127 int
128 iavf_dev_event_handler_init(void)
129 {
130 	struct iavf_event_handler *handler = &event_handler;
131 
132 	if (rte_atomic_fetch_add_explicit(&handler->ndev, 1, rte_memory_order_relaxed) + 1 != 1)
133 		return 0;
134 #if defined(RTE_EXEC_ENV_IS_WINDOWS) && RTE_EXEC_ENV_IS_WINDOWS != 0
135 	int err = _pipe(handler->fd, MAX_EVENT_PENDING, O_BINARY);
136 #else
137 	int err = pipe(handler->fd);
138 #endif
139 	if (err != 0) {
140 		rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed);
141 		return -1;
142 	}
143 
144 	TAILQ_INIT(&handler->pending);
145 	pthread_mutex_init(&handler->lock, NULL);
146 
147 	if (rte_thread_create_internal_control(&handler->tid, "iavf-event",
148 				iavf_dev_event_handle, NULL)) {
149 		rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed);
150 		return -1;
151 	}
152 
153 	return 0;
154 }
155 
156 void
157 iavf_dev_event_handler_fini(void)
158 {
159 	struct iavf_event_handler *handler = &event_handler;
160 
161 	if (rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed) - 1 != 0)
162 		return;
163 
164 	int unused = pthread_cancel((pthread_t)handler->tid.opaque_id);
165 	RTE_SET_USED(unused);
166 	close(handler->fd[0]);
167 	close(handler->fd[1]);
168 	handler->fd[0] = -1;
169 	handler->fd[1] = -1;
170 
171 	rte_thread_join(handler->tid, NULL);
172 	pthread_mutex_destroy(&handler->lock);
173 
174 	struct iavf_event_element *pos, *save_next;
175 	TAILQ_FOREACH_SAFE(pos, &handler->pending, next, save_next) {
176 		TAILQ_REMOVE(&handler->pending, pos, next);
177 		rte_free(pos);
178 	}
179 }
180 
181 static uint32_t
182 iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
183 {
184 	uint32_t speed;
185 
186 	switch (virt_link_speed) {
187 	case VIRTCHNL_LINK_SPEED_100MB:
188 		speed = 100;
189 		break;
190 	case VIRTCHNL_LINK_SPEED_1GB:
191 		speed = 1000;
192 		break;
193 	case VIRTCHNL_LINK_SPEED_10GB:
194 		speed = 10000;
195 		break;
196 	case VIRTCHNL_LINK_SPEED_40GB:
197 		speed = 40000;
198 		break;
199 	case VIRTCHNL_LINK_SPEED_20GB:
200 		speed = 20000;
201 		break;
202 	case VIRTCHNL_LINK_SPEED_25GB:
203 		speed = 25000;
204 		break;
205 	case VIRTCHNL_LINK_SPEED_2_5GB:
206 		speed = 2500;
207 		break;
208 	case VIRTCHNL_LINK_SPEED_5GB:
209 		speed = 5000;
210 		break;
211 	default:
212 		speed = 0;
213 		break;
214 	}
215 
216 	return speed;
217 }
218 
219 /* Read data in admin queue to get msg from pf driver */
220 static enum iavf_aq_result
221 iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
222 		     uint8_t *buf)
223 {
224 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
225 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
226 	struct iavf_arq_event_info event;
227 	enum iavf_aq_result result = IAVF_MSG_NON;
228 	enum virtchnl_ops opcode;
229 	int ret;
230 
231 	event.buf_len = buf_len;
232 	event.msg_buf = buf;
233 	ret = iavf_clean_arq_element(hw, &event, NULL);
234 	/* Can't read any msg from adminQ */
235 	if (ret) {
236 		PMD_DRV_LOG(DEBUG, "Can't read msg from AQ");
237 		if (ret != IAVF_ERR_ADMIN_QUEUE_NO_WORK)
238 			result = IAVF_MSG_ERR;
239 		return result;
240 	}
241 
242 	opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
243 	vf->cmd_retval = (enum virtchnl_status_code)rte_le_to_cpu_32(
244 			event.desc.cookie_low);
245 
246 	PMD_DRV_LOG(DEBUG, "AQ from pf carries opcode %u, retval %d",
247 		    opcode, vf->cmd_retval);
248 
249 	if (opcode == VIRTCHNL_OP_EVENT) {
250 		struct virtchnl_pf_event *vpe =
251 			(struct virtchnl_pf_event *)event.msg_buf;
252 
253 		result = IAVF_MSG_SYS;
254 		switch (vpe->event) {
255 		case VIRTCHNL_EVENT_LINK_CHANGE:
256 			vf->link_up =
257 				vpe->event_data.link_event.link_status;
258 			if (vf->vf_res != NULL &&
259 			    vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
260 				vf->link_speed =
261 				    vpe->event_data.link_event_adv.link_speed;
262 			} else {
263 				enum virtchnl_link_speed speed;
264 				speed = vpe->event_data.link_event.link_speed;
265 				vf->link_speed = iavf_convert_link_speed(speed);
266 			}
267 			iavf_dev_link_update(vf->eth_dev, 0);
268 			iavf_dev_event_post(vf->eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL, 0);
269 			if (vf->link_up && !vf->vf_reset) {
270 				iavf_dev_watchdog_disable(adapter);
271 			} else {
272 				if (!vf->link_up)
273 					iavf_dev_watchdog_enable(adapter);
274 			}
275 			if (adapter->devargs.no_poll_on_link_down) {
276 				iavf_set_no_poll(adapter, true);
277 				if (adapter->no_poll)
278 					PMD_DRV_LOG(DEBUG, "VF no poll turned on");
279 				else
280 					PMD_DRV_LOG(DEBUG, "VF no poll turned off");
281 			}
282 			PMD_DRV_LOG(INFO, "Link status update:%s",
283 					vf->link_up ? "up" : "down");
284 			break;
285 		case VIRTCHNL_EVENT_RESET_IMPENDING:
286 			vf->vf_reset = true;
287 			iavf_set_no_poll(adapter, false);
288 			PMD_DRV_LOG(INFO, "VF is resetting");
289 			break;
290 		case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
291 			vf->dev_closed = true;
292 			PMD_DRV_LOG(INFO, "PF driver closed");
293 			break;
294 		default:
295 			PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
296 					__func__, vpe->event);
297 		}
298 	}  else {
299 		/* async reply msg on command issued by vf previously */
300 		result = IAVF_MSG_CMD;
301 		if (opcode != vf->pend_cmd) {
302 			PMD_DRV_LOG(WARNING, "command mismatch, expect %u, get %u",
303 					vf->pend_cmd, opcode);
304 			result = IAVF_MSG_ERR;
305 		}
306 	}
307 
308 	return result;
309 }
310 
311 static int
312 iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args,
313 	int async)
314 {
315 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
316 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
317 	enum iavf_aq_result result;
318 	enum iavf_status ret;
319 	int err = 0;
320 	int i = 0;
321 
322 	if (vf->vf_reset)
323 		return -EIO;
324 
325 
326 	if (async) {
327 		if (_atomic_set_async_response_cmd(vf, args->ops))
328 			return -1;
329 	} else {
330 		if (_atomic_set_cmd(vf, args->ops))
331 			return -1;
332 	}
333 
334 	ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
335 				    args->in_args, args->in_args_size, NULL);
336 	if (ret) {
337 		PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
338 		_clear_cmd(vf);
339 		return err;
340 	}
341 
342 	switch (args->ops) {
343 	case VIRTCHNL_OP_RESET_VF:
344 	case VIRTCHNL_OP_REQUEST_QUEUES:
345 		/*no need to wait for response */
346 		_clear_cmd(vf);
347 		break;
348 	case VIRTCHNL_OP_VERSION:
349 	case VIRTCHNL_OP_GET_VF_RESOURCES:
350 	case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
351 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
352 		/* for init virtchnl ops, need to poll the response */
353 		do {
354 			result = iavf_read_msg_from_pf(adapter, args->out_size,
355 						   args->out_buffer);
356 			if (result == IAVF_MSG_CMD)
357 				break;
358 			iavf_msec_delay(ASQ_DELAY_MS);
359 		} while (i++ < MAX_TRY_TIMES);
360 		if (i >= MAX_TRY_TIMES ||
361 		    vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
362 			err = -1;
363 			PMD_DRV_LOG(ERR, "No response or return failure (%d)"
364 				    " for cmd %d", vf->cmd_retval, args->ops);
365 		}
366 		_clear_cmd(vf);
367 		break;
368 	default:
369 		if (rte_thread_is_intr()) {
370 			/* For virtchnl ops were executed in eal_intr_thread,
371 			 * need to poll the response.
372 			 */
373 			do {
374 				result = iavf_read_msg_from_pf(adapter, args->out_size,
375 							args->out_buffer);
376 				if (result == IAVF_MSG_CMD)
377 					break;
378 				iavf_msec_delay(ASQ_DELAY_MS);
379 			} while (i++ < MAX_TRY_TIMES);
380 			if (i >= MAX_TRY_TIMES ||
381 				vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
382 				err = -1;
383 				PMD_DRV_LOG(ERR, "No response or return failure (%d)"
384 						" for cmd %d", vf->cmd_retval, args->ops);
385 			}
386 			_clear_cmd(vf);
387 		} else {
388 			/* For other virtchnl ops in running time,
389 			 * wait for the cmd done flag.
390 			 */
391 			do {
392 				if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN)
393 					break;
394 				iavf_msec_delay(ASQ_DELAY_MS);
395 				/* If don't read msg or read sys event, continue */
396 			} while (i++ < MAX_TRY_TIMES);
397 
398 			if (i >= MAX_TRY_TIMES) {
399 				PMD_DRV_LOG(ERR, "No response for cmd %d", args->ops);
400 				_clear_cmd(vf);
401 				err = -EIO;
402 			} else if (vf->cmd_retval ==
403 				VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) {
404 				PMD_DRV_LOG(ERR, "Cmd %d not supported", args->ops);
405 				err = -ENOTSUP;
406 			} else if (vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
407 				PMD_DRV_LOG(ERR, "Return failure %d for cmd %d",
408 						vf->cmd_retval, args->ops);
409 				err = -EINVAL;
410 			}
411 		}
412 		break;
413 	}
414 
415 	return err;
416 }
417 
418 static int
419 iavf_execute_vf_cmd_safe(struct iavf_adapter *adapter,
420 	struct iavf_cmd_info *args, int async)
421 {
422 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
423 	int ret;
424 	int is_intr_thread = rte_thread_is_intr();
425 
426 	if (is_intr_thread) {
427 		if (!rte_spinlock_trylock(&vf->aq_lock))
428 			return -EIO;
429 	} else {
430 		rte_spinlock_lock(&vf->aq_lock);
431 	}
432 	ret = iavf_execute_vf_cmd(adapter, args, async);
433 	rte_spinlock_unlock(&vf->aq_lock);
434 
435 	return ret;
436 }
437 
438 static void
439 iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
440 			uint16_t msglen)
441 {
442 	struct iavf_adapter *adapter =
443 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
444 	struct iavf_info *vf = &adapter->vf;
445 	struct virtchnl_pf_event *pf_msg =
446 			(struct virtchnl_pf_event *)msg;
447 
448 	if (adapter->closed) {
449 		PMD_DRV_LOG(DEBUG, "Port closed");
450 		return;
451 	}
452 
453 	if (msglen < sizeof(struct virtchnl_pf_event)) {
454 		PMD_DRV_LOG(DEBUG, "Error event");
455 		return;
456 	}
457 	switch (pf_msg->event) {
458 	case VIRTCHNL_EVENT_RESET_IMPENDING:
459 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
460 		vf->link_up = false;
461 		if (!vf->vf_reset) {
462 			vf->vf_reset = true;
463 			iavf_set_no_poll(adapter, false);
464 			iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_RESET,
465 				NULL, 0);
466 		}
467 		break;
468 	case VIRTCHNL_EVENT_LINK_CHANGE:
469 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
470 		vf->link_up = pf_msg->event_data.link_event.link_status;
471 		if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
472 			vf->link_speed =
473 				pf_msg->event_data.link_event_adv.link_speed;
474 		} else {
475 			enum virtchnl_link_speed speed;
476 			speed = pf_msg->event_data.link_event.link_speed;
477 			vf->link_speed = iavf_convert_link_speed(speed);
478 		}
479 		iavf_dev_link_update(dev, 0);
480 		if (vf->link_up && !vf->vf_reset) {
481 			iavf_dev_watchdog_disable(adapter);
482 		} else {
483 			if (!vf->link_up)
484 				iavf_dev_watchdog_enable(adapter);
485 		}
486 		if (adapter->devargs.no_poll_on_link_down) {
487 			iavf_set_no_poll(adapter, true);
488 			if (adapter->no_poll)
489 				PMD_DRV_LOG(DEBUG, "VF no poll turned on");
490 			else
491 				PMD_DRV_LOG(DEBUG, "VF no poll turned off");
492 		}
493 		iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_LSC, NULL, 0);
494 		break;
495 	case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
496 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
497 		break;
498 	default:
499 		PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event);
500 		break;
501 	}
502 }
503 
504 void
505 iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
506 {
507 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
508 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
509 	struct iavf_arq_event_info info;
510 	uint16_t pending, aq_opc;
511 	enum virtchnl_ops msg_opc;
512 	enum iavf_status msg_ret;
513 	int ret;
514 
515 	info.buf_len = IAVF_AQ_BUF_SZ;
516 	if (!vf->aq_resp) {
517 		PMD_DRV_LOG(ERR, "Buffer for adminq resp should not be NULL");
518 		return;
519 	}
520 	info.msg_buf = vf->aq_resp;
521 
522 	pending = 1;
523 	while (pending) {
524 		ret = iavf_clean_arq_element(hw, &info, &pending);
525 
526 		if (ret != IAVF_SUCCESS) {
527 			PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ,"
528 				    "ret: %d", ret);
529 			break;
530 		}
531 		aq_opc = rte_le_to_cpu_16(info.desc.opcode);
532 		/* For the message sent from pf to vf, opcode is stored in
533 		 * cookie_high of struct iavf_aq_desc, while return error code
534 		 * are stored in cookie_low, Which is done by PF driver.
535 		 */
536 		msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32(
537 						  info.desc.cookie_high);
538 		msg_ret = (enum iavf_status)rte_le_to_cpu_32(
539 						  info.desc.cookie_low);
540 		switch (aq_opc) {
541 		case iavf_aqc_opc_send_msg_to_vf:
542 			if (msg_opc == VIRTCHNL_OP_EVENT) {
543 				iavf_handle_pf_event_msg(dev, info.msg_buf,
544 						info.msg_len);
545 			} else {
546 				/* check for unsolicited messages i.e. events */
547 				if (info.msg_len > 0) {
548 					switch (msg_opc) {
549 					case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO: {
550 						struct inline_ipsec_msg *imsg =
551 							(struct inline_ipsec_msg *)info.msg_buf;
552 						if (imsg->ipsec_opcode
553 								== INLINE_IPSEC_OP_EVENT) {
554 							struct rte_eth_event_ipsec_desc desc;
555 							struct virtchnl_ipsec_event *ev =
556 									imsg->ipsec_data.event;
557 							desc.subtype =
558 									RTE_ETH_EVENT_IPSEC_UNKNOWN;
559 							desc.metadata =
560 									ev->ipsec_event_data;
561 							iavf_dev_event_post(dev,
562 								RTE_ETH_EVENT_IPSEC,
563 								&desc, sizeof(desc));
564 							continue;
565 					}
566 				}
567 						break;
568 					default:
569 						break;
570 					}
571 
572 				}
573 
574 				/* read message and it's expected one */
575 				if (msg_opc == vf->pend_cmd) {
576 					uint32_t cmd_count =
577 					rte_atomic_fetch_sub_explicit(&vf->pend_cmd_count,
578 							1, rte_memory_order_relaxed) - 1;
579 					if (cmd_count == 0)
580 						_notify_cmd(vf, msg_ret);
581 				} else {
582 					PMD_DRV_LOG(ERR,
583 					"command mismatch, expect %u, get %u",
584 						vf->pend_cmd, msg_opc);
585 				}
586 				PMD_DRV_LOG(DEBUG,
587 				"adminq response is received, opcode = %d",
588 						msg_opc);
589 			}
590 			break;
591 		default:
592 			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
593 				    aq_opc);
594 			break;
595 		}
596 	}
597 }
598 
599 int
600 iavf_enable_vlan_strip(struct iavf_adapter *adapter)
601 {
602 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
603 	struct iavf_cmd_info args;
604 	int ret;
605 
606 	memset(&args, 0, sizeof(args));
607 	args.ops = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
608 	args.in_args = NULL;
609 	args.in_args_size = 0;
610 	args.out_buffer = vf->aq_resp;
611 	args.out_size = IAVF_AQ_BUF_SZ;
612 	ret = iavf_execute_vf_cmd_safe(adapter, &args, 0);
613 	if (ret)
614 		PMD_DRV_LOG(ERR, "Failed to execute command of"
615 			    " OP_ENABLE_VLAN_STRIPPING");
616 
617 	return ret;
618 }
619 
620 int
621 iavf_disable_vlan_strip(struct iavf_adapter *adapter)
622 {
623 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
624 	struct iavf_cmd_info args;
625 	int ret;
626 
627 	memset(&args, 0, sizeof(args));
628 	args.ops = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
629 	args.in_args = NULL;
630 	args.in_args_size = 0;
631 	args.out_buffer = vf->aq_resp;
632 	args.out_size = IAVF_AQ_BUF_SZ;
633 	ret = iavf_execute_vf_cmd_safe(adapter, &args, 0);
634 	if (ret)
635 		PMD_DRV_LOG(ERR, "Failed to execute command of"
636 			    " OP_DISABLE_VLAN_STRIPPING");
637 
638 	return ret;
639 }
640 
641 #define VIRTCHNL_VERSION_MAJOR_START 1
642 #define VIRTCHNL_VERSION_MINOR_START 1
643 
644 /* Check API version with sync wait until version read from admin queue */
645 int
646 iavf_check_api_version(struct iavf_adapter *adapter)
647 {
648 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
649 	struct virtchnl_version_info version, *pver;
650 	struct iavf_cmd_info args;
651 	int err;
652 
653 	version.major = VIRTCHNL_VERSION_MAJOR;
654 	version.minor = VIRTCHNL_VERSION_MINOR;
655 
656 	args.ops = VIRTCHNL_OP_VERSION;
657 	args.in_args = (uint8_t *)&version;
658 	args.in_args_size = sizeof(version);
659 	args.out_buffer = vf->aq_resp;
660 	args.out_size = IAVF_AQ_BUF_SZ;
661 
662 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
663 	if (err) {
664 		PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
665 		return err;
666 	}
667 
668 	pver = (struct virtchnl_version_info *)args.out_buffer;
669 	vf->virtchnl_version = *pver;
670 
671 	if (vf->virtchnl_version.major < VIRTCHNL_VERSION_MAJOR_START ||
672 	    (vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR_START &&
673 	     vf->virtchnl_version.minor < VIRTCHNL_VERSION_MINOR_START)) {
674 		PMD_INIT_LOG(ERR, "VIRTCHNL API version should not be lower"
675 			     " than (%u.%u) to support Adaptive VF",
676 			     VIRTCHNL_VERSION_MAJOR_START,
677 			     VIRTCHNL_VERSION_MAJOR_START);
678 		return -1;
679 	} else if (vf->virtchnl_version.major > VIRTCHNL_VERSION_MAJOR ||
680 		   (vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR &&
681 		    vf->virtchnl_version.minor > VIRTCHNL_VERSION_MINOR)) {
682 		PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
683 			     vf->virtchnl_version.major,
684 			     vf->virtchnl_version.minor,
685 			     VIRTCHNL_VERSION_MAJOR,
686 			     VIRTCHNL_VERSION_MINOR);
687 		return -1;
688 	}
689 
690 	PMD_DRV_LOG(DEBUG, "Peer is supported PF host");
691 	return 0;
692 }
693 
694 int
695 iavf_get_vf_resource(struct iavf_adapter *adapter)
696 {
697 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
698 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
699 	struct iavf_cmd_info args;
700 	uint32_t caps, len;
701 	int err, i;
702 
703 	args.ops = VIRTCHNL_OP_GET_VF_RESOURCES;
704 	args.out_buffer = vf->aq_resp;
705 	args.out_size = IAVF_AQ_BUF_SZ;
706 
707 	caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
708 		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
709 		VIRTCHNL_VF_OFFLOAD_FDIR_PF |
710 		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
711 		VIRTCHNL_VF_OFFLOAD_FSUB_PF |
712 		VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
713 		VIRTCHNL_VF_OFFLOAD_USO |
714 		VIRTCHNL_VF_OFFLOAD_CRC |
715 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
716 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
717 		VIRTCHNL_VF_OFFLOAD_QOS |
718 		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO |
719 		VIRTCHNL_VF_CAP_PTP;
720 
721 	args.in_args = (uint8_t *)&caps;
722 	args.in_args_size = sizeof(caps);
723 
724 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
725 
726 	if (err) {
727 		PMD_DRV_LOG(ERR,
728 			    "Failed to execute command of OP_GET_VF_RESOURCE");
729 		return -1;
730 	}
731 
732 	len =  sizeof(struct virtchnl_vf_resource) +
733 		      IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
734 
735 	rte_memcpy(vf->vf_res, args.out_buffer,
736 		   RTE_MIN(args.out_size, len));
737 	/* parse  VF config message back from PF*/
738 	iavf_vf_parse_hw_config(hw, vf->vf_res);
739 	for (i = 0; i < vf->vf_res->num_vsis; i++) {
740 		if (vf->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
741 			vf->vsi_res = &vf->vf_res->vsi_res[i];
742 	}
743 
744 	if (!vf->vsi_res) {
745 		PMD_INIT_LOG(ERR, "no LAN VSI found");
746 		return -1;
747 	}
748 
749 	vf->vsi.vsi_id = vf->vsi_res->vsi_id;
750 	vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
751 	vf->vsi.adapter = adapter;
752 
753 	return 0;
754 }
755 
756 int
757 iavf_get_supported_rxdid(struct iavf_adapter *adapter)
758 {
759 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
760 	struct iavf_cmd_info args;
761 	int ret;
762 
763 	args.ops = VIRTCHNL_OP_GET_SUPPORTED_RXDIDS;
764 	args.in_args = NULL;
765 	args.in_args_size = 0;
766 	args.out_buffer = vf->aq_resp;
767 	args.out_size = IAVF_AQ_BUF_SZ;
768 
769 	ret = iavf_execute_vf_cmd_safe(adapter, &args, 0);
770 	if (ret) {
771 		PMD_DRV_LOG(ERR,
772 			    "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
773 		return ret;
774 	}
775 
776 	vf->supported_rxdid =
777 		((struct virtchnl_supported_rxdids *)args.out_buffer)->supported_rxdids;
778 
779 	return 0;
780 }
781 
782 int
783 iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable)
784 {
785 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
786 	struct virtchnl_vlan_supported_caps *stripping_caps;
787 	struct virtchnl_vlan_setting vlan_strip;
788 	struct iavf_cmd_info args;
789 	uint32_t *ethertype;
790 	int ret;
791 
792 	stripping_caps = &vf->vlan_v2_caps.offloads.stripping_support;
793 
794 	if ((stripping_caps->outer & VIRTCHNL_VLAN_ETHERTYPE_8100) &&
795 	    (stripping_caps->outer & VIRTCHNL_VLAN_TOGGLE))
796 		ethertype = &vlan_strip.outer_ethertype_setting;
797 	else if ((stripping_caps->inner & VIRTCHNL_VLAN_ETHERTYPE_8100) &&
798 		 (stripping_caps->inner & VIRTCHNL_VLAN_TOGGLE))
799 		ethertype = &vlan_strip.inner_ethertype_setting;
800 	else
801 		return -ENOTSUP;
802 
803 	memset(&vlan_strip, 0, sizeof(vlan_strip));
804 	vlan_strip.vport_id = vf->vsi_res->vsi_id;
805 	*ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
806 
807 	args.ops = enable ? VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 :
808 			    VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2;
809 	args.in_args = (uint8_t *)&vlan_strip;
810 	args.in_args_size = sizeof(vlan_strip);
811 	args.out_buffer = vf->aq_resp;
812 	args.out_size = IAVF_AQ_BUF_SZ;
813 	ret = iavf_execute_vf_cmd_safe(adapter, &args, 0);
814 	if (ret)
815 		PMD_DRV_LOG(ERR, "fail to execute command %s",
816 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
817 				     "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2");
818 
819 	return ret;
820 }
821 
822 int
823 iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable)
824 {
825 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
826 	struct virtchnl_vlan_supported_caps *insertion_caps;
827 	struct virtchnl_vlan_setting vlan_insert;
828 	struct iavf_cmd_info args;
829 	uint32_t *ethertype;
830 	int ret;
831 
832 	insertion_caps = &vf->vlan_v2_caps.offloads.insertion_support;
833 
834 	if ((insertion_caps->outer & VIRTCHNL_VLAN_ETHERTYPE_8100) &&
835 	    (insertion_caps->outer & VIRTCHNL_VLAN_TOGGLE))
836 		ethertype = &vlan_insert.outer_ethertype_setting;
837 	else if ((insertion_caps->inner & VIRTCHNL_VLAN_ETHERTYPE_8100) &&
838 		 (insertion_caps->inner & VIRTCHNL_VLAN_TOGGLE))
839 		ethertype = &vlan_insert.inner_ethertype_setting;
840 	else
841 		return -ENOTSUP;
842 
843 	memset(&vlan_insert, 0, sizeof(vlan_insert));
844 	vlan_insert.vport_id = vf->vsi_res->vsi_id;
845 	*ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
846 
847 	args.ops = enable ? VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 :
848 			    VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2;
849 	args.in_args = (uint8_t *)&vlan_insert;
850 	args.in_args_size = sizeof(vlan_insert);
851 	args.out_buffer = vf->aq_resp;
852 	args.out_size = IAVF_AQ_BUF_SZ;
853 	ret = iavf_execute_vf_cmd_safe(adapter, &args, 0);
854 	if (ret)
855 		PMD_DRV_LOG(ERR, "fail to execute command %s",
856 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" :
857 				     "VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2");
858 
859 	return ret;
860 }
861 
862 int
863 iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
864 {
865 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
866 	struct virtchnl_vlan_supported_caps *supported_caps;
867 	struct virtchnl_vlan_filter_list_v2 vlan_filter;
868 	struct virtchnl_vlan *vlan_setting;
869 	struct iavf_cmd_info args;
870 	uint32_t filtering_caps;
871 	int err;
872 
873 	supported_caps = &vf->vlan_v2_caps.filtering.filtering_support;
874 	if (supported_caps->outer) {
875 		filtering_caps = supported_caps->outer;
876 		vlan_setting = &vlan_filter.filters[0].outer;
877 	} else {
878 		filtering_caps = supported_caps->inner;
879 		vlan_setting = &vlan_filter.filters[0].inner;
880 	}
881 
882 	if (!(filtering_caps & VIRTCHNL_VLAN_ETHERTYPE_8100))
883 		return -ENOTSUP;
884 
885 	memset(&vlan_filter, 0, sizeof(vlan_filter));
886 	vlan_filter.vport_id = vf->vsi_res->vsi_id;
887 	vlan_filter.num_elements = 1;
888 	vlan_setting->tpid = RTE_ETHER_TYPE_VLAN;
889 	vlan_setting->tci = vlanid;
890 
891 	args.ops = add ? VIRTCHNL_OP_ADD_VLAN_V2 : VIRTCHNL_OP_DEL_VLAN_V2;
892 	args.in_args = (uint8_t *)&vlan_filter;
893 	args.in_args_size = sizeof(vlan_filter);
894 	args.out_buffer = vf->aq_resp;
895 	args.out_size = IAVF_AQ_BUF_SZ;
896 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
897 	if (err)
898 		PMD_DRV_LOG(ERR, "fail to execute command %s",
899 			    add ? "OP_ADD_VLAN_V2" :  "OP_DEL_VLAN_V2");
900 
901 	return err;
902 }
903 
904 int
905 iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter)
906 {
907 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
908 	struct iavf_cmd_info args;
909 	int ret;
910 
911 	args.ops = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS;
912 	args.in_args = NULL;
913 	args.in_args_size = 0;
914 	args.out_buffer = vf->aq_resp;
915 	args.out_size = IAVF_AQ_BUF_SZ;
916 
917 	ret = iavf_execute_vf_cmd_safe(adapter, &args, 0);
918 	if (ret) {
919 		PMD_DRV_LOG(ERR,
920 			    "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
921 		return ret;
922 	}
923 
924 	rte_memcpy(&vf->vlan_v2_caps, vf->aq_resp, sizeof(vf->vlan_v2_caps));
925 
926 	return 0;
927 }
928 
929 int
930 iavf_enable_queues(struct iavf_adapter *adapter)
931 {
932 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
933 	struct virtchnl_queue_select queue_select;
934 	struct iavf_cmd_info args;
935 	int err;
936 
937 	memset(&queue_select, 0, sizeof(queue_select));
938 	queue_select.vsi_id = vf->vsi_res->vsi_id;
939 
940 	queue_select.rx_queues = BIT(adapter->dev_data->nb_rx_queues) - 1;
941 	queue_select.tx_queues = BIT(adapter->dev_data->nb_tx_queues) - 1;
942 
943 	args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
944 	args.in_args = (u8 *)&queue_select;
945 	args.in_args_size = sizeof(queue_select);
946 	args.out_buffer = vf->aq_resp;
947 	args.out_size = IAVF_AQ_BUF_SZ;
948 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
949 	if (err) {
950 		PMD_DRV_LOG(ERR,
951 			    "Failed to execute command of OP_ENABLE_QUEUES");
952 		return err;
953 	}
954 	return 0;
955 }
956 
957 int
958 iavf_disable_queues(struct iavf_adapter *adapter)
959 {
960 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
961 	struct virtchnl_queue_select queue_select;
962 	struct iavf_cmd_info args;
963 	int err;
964 
965 	memset(&queue_select, 0, sizeof(queue_select));
966 	queue_select.vsi_id = vf->vsi_res->vsi_id;
967 
968 	queue_select.rx_queues = BIT(adapter->dev_data->nb_rx_queues) - 1;
969 	queue_select.tx_queues = BIT(adapter->dev_data->nb_tx_queues) - 1;
970 
971 	args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
972 	args.in_args = (u8 *)&queue_select;
973 	args.in_args_size = sizeof(queue_select);
974 	args.out_buffer = vf->aq_resp;
975 	args.out_size = IAVF_AQ_BUF_SZ;
976 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
977 	if (err) {
978 		PMD_DRV_LOG(ERR,
979 			    "Failed to execute command of OP_DISABLE_QUEUES");
980 		return err;
981 	}
982 	return 0;
983 }
984 
985 int
986 iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
987 		 bool rx, bool on)
988 {
989 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
990 	struct virtchnl_queue_select queue_select;
991 	struct iavf_cmd_info args;
992 	int err;
993 
994 	if (adapter->closed)
995 		return -EIO;
996 
997 	memset(&queue_select, 0, sizeof(queue_select));
998 	queue_select.vsi_id = vf->vsi_res->vsi_id;
999 	if (rx)
1000 		queue_select.rx_queues |= 1 << qid;
1001 	else
1002 		queue_select.tx_queues |= 1 << qid;
1003 
1004 	if (on)
1005 		args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
1006 	else
1007 		args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
1008 	args.in_args = (u8 *)&queue_select;
1009 	args.in_args_size = sizeof(queue_select);
1010 	args.out_buffer = vf->aq_resp;
1011 	args.out_size = IAVF_AQ_BUF_SZ;
1012 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1013 	if (err)
1014 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
1015 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
1016 	return err;
1017 }
1018 
1019 int
1020 iavf_enable_queues_lv(struct iavf_adapter *adapter)
1021 {
1022 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1023 	struct virtchnl_del_ena_dis_queues *queue_select;
1024 	struct virtchnl_queue_chunk *queue_chunk;
1025 	struct iavf_cmd_info args;
1026 	int err, len;
1027 
1028 	len = sizeof(struct virtchnl_del_ena_dis_queues) +
1029 		  sizeof(struct virtchnl_queue_chunk) *
1030 		  (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1);
1031 	queue_select = rte_zmalloc("queue_select", len, 0);
1032 	if (!queue_select)
1033 		return -ENOMEM;
1034 
1035 	queue_chunk = queue_select->chunks.chunks;
1036 	queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM;
1037 	queue_select->vport_id = vf->vsi_res->vsi_id;
1038 
1039 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
1040 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
1041 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
1042 		adapter->dev_data->nb_tx_queues;
1043 
1044 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
1045 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
1046 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
1047 		adapter->dev_data->nb_rx_queues;
1048 
1049 	args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2;
1050 	args.in_args = (u8 *)queue_select;
1051 	args.in_args_size = len;
1052 	args.out_buffer = vf->aq_resp;
1053 	args.out_size = IAVF_AQ_BUF_SZ;
1054 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1055 	if (err)
1056 		PMD_DRV_LOG(ERR,
1057 			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
1058 
1059 	rte_free(queue_select);
1060 	return err;
1061 }
1062 
1063 int
1064 iavf_disable_queues_lv(struct iavf_adapter *adapter)
1065 {
1066 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1067 	struct virtchnl_del_ena_dis_queues *queue_select;
1068 	struct virtchnl_queue_chunk *queue_chunk;
1069 	struct iavf_cmd_info args;
1070 	int err, len;
1071 
1072 	len = sizeof(struct virtchnl_del_ena_dis_queues) +
1073 		  sizeof(struct virtchnl_queue_chunk) *
1074 		  (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1);
1075 	queue_select = rte_zmalloc("queue_select", len, 0);
1076 	if (!queue_select)
1077 		return -ENOMEM;
1078 
1079 	queue_chunk = queue_select->chunks.chunks;
1080 	queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM;
1081 	queue_select->vport_id = vf->vsi_res->vsi_id;
1082 
1083 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
1084 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
1085 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
1086 		adapter->dev_data->nb_tx_queues;
1087 
1088 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
1089 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
1090 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
1091 		adapter->dev_data->nb_rx_queues;
1092 
1093 	args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2;
1094 	args.in_args = (u8 *)queue_select;
1095 	args.in_args_size = len;
1096 	args.out_buffer = vf->aq_resp;
1097 	args.out_size = IAVF_AQ_BUF_SZ;
1098 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1099 	if (err)
1100 		PMD_DRV_LOG(ERR,
1101 			    "Failed to execute command of OP_DISABLE_QUEUES_V2");
1102 
1103 	rte_free(queue_select);
1104 	return err;
1105 }
1106 
1107 int
1108 iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
1109 		 bool rx, bool on)
1110 {
1111 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1112 	struct virtchnl_del_ena_dis_queues *queue_select;
1113 	struct virtchnl_queue_chunk *queue_chunk;
1114 	struct iavf_cmd_info args;
1115 	int err, len;
1116 
1117 	len = sizeof(struct virtchnl_del_ena_dis_queues);
1118 	queue_select = rte_zmalloc("queue_select", len, 0);
1119 	if (!queue_select)
1120 		return -ENOMEM;
1121 
1122 	queue_chunk = queue_select->chunks.chunks;
1123 	queue_select->chunks.num_chunks = 1;
1124 	queue_select->vport_id = vf->vsi_res->vsi_id;
1125 
1126 	if (rx) {
1127 		queue_chunk->type = VIRTCHNL_QUEUE_TYPE_RX;
1128 		queue_chunk->start_queue_id = qid;
1129 		queue_chunk->num_queues = 1;
1130 	} else {
1131 		queue_chunk->type = VIRTCHNL_QUEUE_TYPE_TX;
1132 		queue_chunk->start_queue_id = qid;
1133 		queue_chunk->num_queues = 1;
1134 	}
1135 
1136 	if (on)
1137 		args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2;
1138 	else
1139 		args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2;
1140 	args.in_args = (u8 *)queue_select;
1141 	args.in_args_size = len;
1142 	args.out_buffer = vf->aq_resp;
1143 	args.out_size = IAVF_AQ_BUF_SZ;
1144 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1145 	if (err)
1146 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
1147 			    on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
1148 
1149 	rte_free(queue_select);
1150 	return err;
1151 }
1152 
1153 int
1154 iavf_configure_rss_lut(struct iavf_adapter *adapter)
1155 {
1156 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1157 	struct virtchnl_rss_lut *rss_lut;
1158 	struct iavf_cmd_info args;
1159 	int len, err = 0;
1160 
1161 	len = sizeof(*rss_lut) + vf->vf_res->rss_lut_size - 1;
1162 	rss_lut = rte_zmalloc("rss_lut", len, 0);
1163 	if (!rss_lut)
1164 		return -ENOMEM;
1165 
1166 	rss_lut->vsi_id = vf->vsi_res->vsi_id;
1167 	rss_lut->lut_entries = vf->vf_res->rss_lut_size;
1168 	rte_memcpy(rss_lut->lut, vf->rss_lut, vf->vf_res->rss_lut_size);
1169 
1170 	args.ops = VIRTCHNL_OP_CONFIG_RSS_LUT;
1171 	args.in_args = (u8 *)rss_lut;
1172 	args.in_args_size = len;
1173 	args.out_buffer = vf->aq_resp;
1174 	args.out_size = IAVF_AQ_BUF_SZ;
1175 
1176 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1177 	if (err)
1178 		PMD_DRV_LOG(ERR,
1179 			    "Failed to execute command of OP_CONFIG_RSS_LUT");
1180 
1181 	rte_free(rss_lut);
1182 	return err;
1183 }
1184 
1185 int
1186 iavf_configure_rss_key(struct iavf_adapter *adapter)
1187 {
1188 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1189 	struct virtchnl_rss_key *rss_key;
1190 	struct iavf_cmd_info args;
1191 	int len, err = 0;
1192 
1193 	len = sizeof(*rss_key) + vf->vf_res->rss_key_size - 1;
1194 	rss_key = rte_zmalloc("rss_key", len, 0);
1195 	if (!rss_key)
1196 		return -ENOMEM;
1197 
1198 	rss_key->vsi_id = vf->vsi_res->vsi_id;
1199 	rss_key->key_len = vf->vf_res->rss_key_size;
1200 	rte_memcpy(rss_key->key, vf->rss_key, vf->vf_res->rss_key_size);
1201 
1202 	args.ops = VIRTCHNL_OP_CONFIG_RSS_KEY;
1203 	args.in_args = (u8 *)rss_key;
1204 	args.in_args_size = len;
1205 	args.out_buffer = vf->aq_resp;
1206 	args.out_size = IAVF_AQ_BUF_SZ;
1207 
1208 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1209 	if (err)
1210 		PMD_DRV_LOG(ERR,
1211 			    "Failed to execute command of OP_CONFIG_RSS_KEY");
1212 
1213 	rte_free(rss_key);
1214 	return err;
1215 }
1216 
1217 int
1218 iavf_configure_queues(struct iavf_adapter *adapter,
1219 		uint16_t num_queue_pairs, uint16_t index)
1220 {
1221 	struct iavf_rx_queue **rxq = (struct iavf_rx_queue **)adapter->dev_data->rx_queues;
1222 	struct ci_tx_queue **txq = (struct ci_tx_queue **)adapter->dev_data->tx_queues;
1223 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1224 	struct virtchnl_vsi_queue_config_info *vc_config;
1225 	struct virtchnl_queue_pair_info *vc_qp;
1226 	struct iavf_cmd_info args;
1227 	uint16_t i, size;
1228 	int err;
1229 
1230 	size = sizeof(*vc_config) +
1231 	       sizeof(vc_config->qpair[0]) * num_queue_pairs;
1232 	vc_config = rte_zmalloc("cfg_queue", size, 0);
1233 	if (!vc_config)
1234 		return -ENOMEM;
1235 
1236 	vc_config->vsi_id = vf->vsi_res->vsi_id;
1237 	vc_config->num_queue_pairs = num_queue_pairs;
1238 
1239 	for (i = index, vc_qp = vc_config->qpair;
1240 		 i < index + num_queue_pairs;
1241 	     i++, vc_qp++) {
1242 		vc_qp->txq.vsi_id = vf->vsi_res->vsi_id;
1243 		vc_qp->txq.queue_id = i;
1244 
1245 		/* Virtchnnl configure tx queues by pairs */
1246 		if (i < adapter->dev_data->nb_tx_queues) {
1247 			vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
1248 			vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_dma;
1249 		}
1250 
1251 		vc_qp->rxq.vsi_id = vf->vsi_res->vsi_id;
1252 		vc_qp->rxq.queue_id = i;
1253 		vc_qp->rxq.max_pkt_size = vf->max_pkt_len;
1254 
1255 		if (i >= adapter->dev_data->nb_rx_queues)
1256 			continue;
1257 
1258 		/* Virtchnnl configure rx queues by pairs */
1259 		vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
1260 		vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;
1261 		vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
1262 		vc_qp->rxq.crc_disable = rxq[i]->crc_len != 0 ? 1 : 0;
1263 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1264 		if (vf->vf_res->vf_cap_flags &
1265 		    VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
1266 			if (vf->supported_rxdid & BIT(rxq[i]->rxdid)) {
1267 				vc_qp->rxq.rxdid = rxq[i]->rxdid;
1268 				PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
1269 					    vc_qp->rxq.rxdid, i);
1270 			} else {
1271 				PMD_DRV_LOG(NOTICE, "RXDID[%d] is not supported, "
1272 					    "request default RXDID[%d] in Queue[%d]",
1273 					    rxq[i]->rxdid, IAVF_RXDID_LEGACY_1, i);
1274 				vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1;
1275 			}
1276 
1277 			if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP &&
1278 			    vf->ptp_caps & VIRTCHNL_1588_PTP_CAP_RX_TSTAMP &&
1279 			    rxq[i]->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
1280 				vc_qp->rxq.flags |= VIRTCHNL_PTP_RX_TSTAMP;
1281 		}
1282 #else
1283 		if (vf->vf_res->vf_cap_flags &
1284 			VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
1285 			vf->supported_rxdid & BIT(IAVF_RXDID_LEGACY_0)) {
1286 			vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_0;
1287 			PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
1288 				    vc_qp->rxq.rxdid, i);
1289 		} else {
1290 			PMD_DRV_LOG(ERR, "RXDID[%d] is not supported",
1291 				    IAVF_RXDID_LEGACY_0);
1292 			return -1;
1293 		}
1294 #endif
1295 	}
1296 
1297 	memset(&args, 0, sizeof(args));
1298 	args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
1299 	args.in_args = (uint8_t *)vc_config;
1300 	args.in_args_size = size;
1301 	args.out_buffer = vf->aq_resp;
1302 	args.out_size = IAVF_AQ_BUF_SZ;
1303 
1304 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1305 	if (err)
1306 		PMD_DRV_LOG(ERR, "Failed to execute command of"
1307 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
1308 
1309 	rte_free(vc_config);
1310 	return err;
1311 }
1312 
1313 int
1314 iavf_config_irq_map(struct iavf_adapter *adapter)
1315 {
1316 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1317 	struct virtchnl_irq_map_info *map_info;
1318 	struct virtchnl_vector_map *vecmap;
1319 	struct iavf_cmd_info args;
1320 	int len, i, err;
1321 
1322 	len = sizeof(struct virtchnl_irq_map_info) +
1323 	      sizeof(struct virtchnl_vector_map) * vf->nb_msix;
1324 
1325 	map_info = rte_zmalloc("map_info", len, 0);
1326 	if (!map_info)
1327 		return -ENOMEM;
1328 
1329 	map_info->num_vectors = vf->nb_msix;
1330 	for (i = 0; i < adapter->dev_data->nb_rx_queues; i++) {
1331 		vecmap =
1332 		    &map_info->vecmap[vf->qv_map[i].vector_id - vf->msix_base];
1333 		vecmap->vsi_id = vf->vsi_res->vsi_id;
1334 		vecmap->rxitr_idx = IAVF_ITR_INDEX_DEFAULT;
1335 		vecmap->vector_id = vf->qv_map[i].vector_id;
1336 		vecmap->txq_map = 0;
1337 		vecmap->rxq_map |= 1 << vf->qv_map[i].queue_id;
1338 	}
1339 
1340 	args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
1341 	args.in_args = (u8 *)map_info;
1342 	args.in_args_size = len;
1343 	args.out_buffer = vf->aq_resp;
1344 	args.out_size = IAVF_AQ_BUF_SZ;
1345 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1346 	if (err)
1347 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
1348 
1349 	rte_free(map_info);
1350 	return err;
1351 }
1352 
1353 int
1354 iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
1355 		uint16_t index)
1356 {
1357 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1358 	struct virtchnl_queue_vector_maps *map_info;
1359 	struct virtchnl_queue_vector *qv_maps;
1360 	struct iavf_cmd_info args;
1361 	int len, i, err;
1362 	int count = 0;
1363 
1364 	len = sizeof(struct virtchnl_queue_vector_maps) +
1365 	      sizeof(struct virtchnl_queue_vector) * (num - 1);
1366 
1367 	map_info = rte_zmalloc("map_info", len, 0);
1368 	if (!map_info)
1369 		return -ENOMEM;
1370 
1371 	map_info->vport_id = vf->vsi_res->vsi_id;
1372 	map_info->num_qv_maps = num;
1373 	for (i = index; i < index + map_info->num_qv_maps; i++) {
1374 		qv_maps = &map_info->qv_maps[count++];
1375 		qv_maps->itr_idx = VIRTCHNL_ITR_IDX_0;
1376 		qv_maps->queue_type = VIRTCHNL_QUEUE_TYPE_RX;
1377 		qv_maps->queue_id = vf->qv_map[i].queue_id;
1378 		qv_maps->vector_id = vf->qv_map[i].vector_id;
1379 	}
1380 
1381 	args.ops = VIRTCHNL_OP_MAP_QUEUE_VECTOR;
1382 	args.in_args = (u8 *)map_info;
1383 	args.in_args_size = len;
1384 	args.out_buffer = vf->aq_resp;
1385 	args.out_size = IAVF_AQ_BUF_SZ;
1386 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1387 	if (err)
1388 		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
1389 
1390 	rte_free(map_info);
1391 	return err;
1392 }
1393 
1394 void
1395 iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
1396 {
1397 	struct virtchnl_ether_addr_list *list;
1398 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1399 	struct rte_ether_addr *addr;
1400 	struct iavf_cmd_info args;
1401 	int len, err, i, j;
1402 	int next_begin = 0;
1403 	int begin = 0;
1404 
1405 	do {
1406 		j = 0;
1407 		len = sizeof(struct virtchnl_ether_addr_list);
1408 		for (i = begin; i < IAVF_NUM_MACADDR_MAX; i++, next_begin++) {
1409 			addr = &adapter->dev_data->mac_addrs[i];
1410 			if (rte_is_zero_ether_addr(addr))
1411 				continue;
1412 			len += sizeof(struct virtchnl_ether_addr);
1413 			if (len >= IAVF_AQ_BUF_SZ) {
1414 				next_begin = i + 1;
1415 				break;
1416 			}
1417 		}
1418 
1419 		list = rte_zmalloc("iavf_del_mac_buffer", len, 0);
1420 		if (!list) {
1421 			PMD_DRV_LOG(ERR, "fail to allocate memory");
1422 			return;
1423 		}
1424 
1425 		for (i = begin; i < next_begin; i++) {
1426 			addr = &adapter->dev_data->mac_addrs[i];
1427 			if (rte_is_zero_ether_addr(addr))
1428 				continue;
1429 			rte_memcpy(list->list[j].addr, addr->addr_bytes,
1430 				   sizeof(addr->addr_bytes));
1431 			list->list[j].type = (j == 0 ?
1432 					      VIRTCHNL_ETHER_ADDR_PRIMARY :
1433 					      VIRTCHNL_ETHER_ADDR_EXTRA);
1434 			PMD_DRV_LOG(DEBUG, "add/rm mac:" RTE_ETHER_ADDR_PRT_FMT,
1435 				    RTE_ETHER_ADDR_BYTES(addr));
1436 			j++;
1437 		}
1438 		list->vsi_id = vf->vsi_res->vsi_id;
1439 		list->num_elements = j;
1440 		args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
1441 			   VIRTCHNL_OP_DEL_ETH_ADDR;
1442 		args.in_args = (uint8_t *)list;
1443 		args.in_args_size = len;
1444 		args.out_buffer = vf->aq_resp;
1445 		args.out_size = IAVF_AQ_BUF_SZ;
1446 		err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1447 		if (err)
1448 			PMD_DRV_LOG(ERR, "fail to execute command %s",
1449 				    add ? "OP_ADD_ETHER_ADDRESS" :
1450 				    "OP_DEL_ETHER_ADDRESS");
1451 		rte_free(list);
1452 		begin = next_begin;
1453 	} while (begin < IAVF_NUM_MACADDR_MAX);
1454 }
1455 
1456 int
1457 iavf_query_stats(struct iavf_adapter *adapter,
1458 		struct virtchnl_eth_stats **pstats)
1459 {
1460 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1461 	struct virtchnl_queue_select q_stats;
1462 	struct iavf_cmd_info args;
1463 	int err;
1464 
1465 	if (adapter->closed)
1466 		return -EIO;
1467 
1468 	memset(&q_stats, 0, sizeof(q_stats));
1469 	q_stats.vsi_id = vf->vsi_res->vsi_id;
1470 	args.ops = VIRTCHNL_OP_GET_STATS;
1471 	args.in_args = (uint8_t *)&q_stats;
1472 	args.in_args_size = sizeof(q_stats);
1473 	args.out_buffer = vf->aq_resp;
1474 	args.out_size = IAVF_AQ_BUF_SZ;
1475 
1476 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1477 	if (err) {
1478 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
1479 		*pstats = NULL;
1480 		return err;
1481 	}
1482 	*pstats = (struct virtchnl_eth_stats *)args.out_buffer;
1483 	return 0;
1484 }
1485 
1486 int
1487 iavf_config_promisc(struct iavf_adapter *adapter,
1488 		   bool enable_unicast,
1489 		   bool enable_multicast)
1490 {
1491 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1492 	struct virtchnl_promisc_info promisc;
1493 	struct iavf_cmd_info args;
1494 	int err;
1495 
1496 	if (adapter->closed)
1497 		return -EIO;
1498 
1499 	promisc.flags = 0;
1500 	promisc.vsi_id = vf->vsi_res->vsi_id;
1501 
1502 	if (enable_unicast)
1503 		promisc.flags |= FLAG_VF_UNICAST_PROMISC;
1504 
1505 	if (enable_multicast)
1506 		promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
1507 
1508 	args.ops = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
1509 	args.in_args = (uint8_t *)&promisc;
1510 	args.in_args_size = sizeof(promisc);
1511 	args.out_buffer = vf->aq_resp;
1512 	args.out_size = IAVF_AQ_BUF_SZ;
1513 
1514 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1515 
1516 	if (err) {
1517 		PMD_DRV_LOG(ERR,
1518 			    "fail to execute command CONFIG_PROMISCUOUS_MODE");
1519 
1520 		if (err == -ENOTSUP)
1521 			return err;
1522 
1523 		return -EAGAIN;
1524 	}
1525 
1526 	vf->promisc_unicast_enabled = enable_unicast;
1527 	vf->promisc_multicast_enabled = enable_multicast;
1528 	return 0;
1529 }
1530 
1531 int
1532 iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
1533 		     bool add, uint8_t type)
1534 {
1535 	struct virtchnl_ether_addr_list *list;
1536 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1537 	uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) +
1538 			   sizeof(struct virtchnl_ether_addr)];
1539 	struct iavf_cmd_info args;
1540 	int err;
1541 
1542 	if (adapter->closed)
1543 		return -EIO;
1544 
1545 	list = (struct virtchnl_ether_addr_list *)cmd_buffer;
1546 	list->vsi_id = vf->vsi_res->vsi_id;
1547 	list->num_elements = 1;
1548 	list->list[0].type = type;
1549 	rte_memcpy(list->list[0].addr, addr->addr_bytes,
1550 		   sizeof(addr->addr_bytes));
1551 
1552 	args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : VIRTCHNL_OP_DEL_ETH_ADDR;
1553 	args.in_args = cmd_buffer;
1554 	args.in_args_size = sizeof(cmd_buffer);
1555 	args.out_buffer = vf->aq_resp;
1556 	args.out_size = IAVF_AQ_BUF_SZ;
1557 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1558 	if (err)
1559 		PMD_DRV_LOG(ERR, "fail to execute command %s",
1560 			    add ? "OP_ADD_ETH_ADDR" :  "OP_DEL_ETH_ADDR");
1561 	return err;
1562 }
1563 
1564 int
1565 iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
1566 {
1567 	struct virtchnl_vlan_filter_list *vlan_list;
1568 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1569 	uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
1570 							sizeof(uint16_t)];
1571 	struct iavf_cmd_info args;
1572 	int err;
1573 
1574 	vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
1575 	vlan_list->vsi_id = vf->vsi_res->vsi_id;
1576 	vlan_list->num_elements = 1;
1577 	vlan_list->vlan_id[0] = vlanid;
1578 
1579 	args.ops = add ? VIRTCHNL_OP_ADD_VLAN : VIRTCHNL_OP_DEL_VLAN;
1580 	args.in_args = cmd_buffer;
1581 	args.in_args_size = sizeof(cmd_buffer);
1582 	args.out_buffer = vf->aq_resp;
1583 	args.out_size = IAVF_AQ_BUF_SZ;
1584 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1585 	if (err)
1586 		PMD_DRV_LOG(ERR, "fail to execute command %s",
1587 			    add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
1588 
1589 	return err;
1590 }
1591 
1592 int
1593 iavf_fdir_add(struct iavf_adapter *adapter,
1594 	struct iavf_fdir_conf *filter)
1595 {
1596 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1597 	struct virtchnl_fdir_add *fdir_ret;
1598 
1599 	struct iavf_cmd_info args;
1600 	int err;
1601 
1602 	filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
1603 	filter->add_fltr.validate_only = 0;
1604 
1605 	args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
1606 	args.in_args = (uint8_t *)(&filter->add_fltr);
1607 	args.in_args_size = sizeof(*(&filter->add_fltr));
1608 	args.out_buffer = vf->aq_resp;
1609 	args.out_size = IAVF_AQ_BUF_SZ;
1610 
1611 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1612 	if (err) {
1613 		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
1614 		return err;
1615 	}
1616 
1617 	fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
1618 	filter->flow_id = fdir_ret->flow_id;
1619 
1620 	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
1621 		PMD_DRV_LOG(INFO,
1622 			"Succeed in adding rule request by PF");
1623 	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE) {
1624 		PMD_DRV_LOG(ERR,
1625 			"Failed to add rule request due to no hw resource");
1626 		return -1;
1627 	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_EXIST) {
1628 		PMD_DRV_LOG(ERR,
1629 			"Failed to add rule request due to the rule is already existed");
1630 		return -1;
1631 	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT) {
1632 		PMD_DRV_LOG(ERR,
1633 			"Failed to add rule request due to the rule is conflict with existing rule");
1634 		return -1;
1635 	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
1636 		PMD_DRV_LOG(ERR,
1637 			"Failed to add rule request due to the hw doesn't support");
1638 		return -1;
1639 	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
1640 		PMD_DRV_LOG(ERR,
1641 			"Failed to add rule request due to time out for programming");
1642 		return -1;
1643 	} else {
1644 		PMD_DRV_LOG(ERR,
1645 			"Failed to add rule request due to other reasons");
1646 		return -1;
1647 	}
1648 
1649 	return 0;
1650 };
1651 
1652 int
1653 iavf_fdir_del(struct iavf_adapter *adapter,
1654 	struct iavf_fdir_conf *filter)
1655 {
1656 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1657 	struct virtchnl_fdir_del *fdir_ret;
1658 
1659 	struct iavf_cmd_info args;
1660 	int err;
1661 
1662 	filter->del_fltr.vsi_id = vf->vsi_res->vsi_id;
1663 	filter->del_fltr.flow_id = filter->flow_id;
1664 
1665 	args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER;
1666 	args.in_args = (uint8_t *)(&filter->del_fltr);
1667 	args.in_args_size = sizeof(filter->del_fltr);
1668 	args.out_buffer = vf->aq_resp;
1669 	args.out_size = IAVF_AQ_BUF_SZ;
1670 
1671 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1672 	if (err) {
1673 		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
1674 		return err;
1675 	}
1676 
1677 	fdir_ret = (struct virtchnl_fdir_del *)args.out_buffer;
1678 
1679 	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
1680 		PMD_DRV_LOG(INFO,
1681 			"Succeed in deleting rule request by PF");
1682 	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
1683 		PMD_DRV_LOG(ERR,
1684 			"Failed to delete rule request due to this rule doesn't exist");
1685 		return -1;
1686 	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
1687 		PMD_DRV_LOG(ERR,
1688 			"Failed to delete rule request due to time out for programming");
1689 		return -1;
1690 	} else {
1691 		PMD_DRV_LOG(ERR,
1692 			"Failed to delete rule request due to other reasons");
1693 		return -1;
1694 	}
1695 
1696 	return 0;
1697 };
1698 
1699 int
1700 iavf_fdir_check(struct iavf_adapter *adapter,
1701 		struct iavf_fdir_conf *filter)
1702 {
1703 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1704 	struct virtchnl_fdir_add *fdir_ret;
1705 
1706 	struct iavf_cmd_info args;
1707 	int err;
1708 
1709 	filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
1710 	filter->add_fltr.validate_only = 1;
1711 
1712 	args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
1713 	args.in_args = (uint8_t *)(&filter->add_fltr);
1714 	args.in_args_size = sizeof(*(&filter->add_fltr));
1715 	args.out_buffer = vf->aq_resp;
1716 	args.out_size = IAVF_AQ_BUF_SZ;
1717 
1718 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1719 	if (err) {
1720 		PMD_DRV_LOG(ERR, "fail to check flow director rule");
1721 		return err;
1722 	}
1723 
1724 	fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
1725 
1726 	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
1727 		PMD_DRV_LOG(INFO,
1728 			"Succeed in checking rule request by PF");
1729 	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
1730 		PMD_DRV_LOG(ERR,
1731 			"Failed to check rule request due to parameters validation"
1732 			" or HW doesn't support");
1733 		err = -1;
1734 	} else {
1735 		PMD_DRV_LOG(ERR,
1736 			"Failed to check rule request due to other reasons");
1737 		err =  -1;
1738 	}
1739 
1740 	return err;
1741 }
1742 
1743 int
1744 iavf_flow_sub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter)
1745 {
1746 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1747 	struct virtchnl_flow_sub *fsub_cfg;
1748 	struct iavf_cmd_info args;
1749 	int err;
1750 
1751 	filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id;
1752 	filter->sub_fltr.validate_only = 0;
1753 
1754 	memset(&args, 0, sizeof(args));
1755 	args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE;
1756 	args.in_args = (uint8_t *)(&filter->sub_fltr);
1757 	args.in_args_size = sizeof(*(&filter->sub_fltr));
1758 	args.out_buffer = vf->aq_resp;
1759 	args.out_size = IAVF_AQ_BUF_SZ;
1760 
1761 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1762 	if (err) {
1763 		PMD_DRV_LOG(ERR, "Failed to execute command of "
1764 				 "OP_FLOW_SUBSCRIBE");
1765 		return err;
1766 	}
1767 
1768 	fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer;
1769 	filter->flow_id = fsub_cfg->flow_id;
1770 
1771 	if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
1772 		PMD_DRV_LOG(INFO, "Succeed in adding rule request by PF");
1773 	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE) {
1774 		PMD_DRV_LOG(ERR, "Failed to add rule request due to no hw "
1775 				 "resource");
1776 		err = -1;
1777 	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_EXIST) {
1778 		PMD_DRV_LOG(ERR, "Failed to add rule request due to the rule "
1779 				 "is already existed");
1780 		err = -1;
1781 	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) {
1782 		PMD_DRV_LOG(ERR, "Failed to add rule request due to the hw "
1783 				 "doesn't support");
1784 		err = -1;
1785 	} else {
1786 		PMD_DRV_LOG(ERR, "Failed to add rule request due to other "
1787 				 "reasons");
1788 		err = -1;
1789 	}
1790 
1791 	return err;
1792 }
1793 
1794 int
1795 iavf_flow_unsub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter)
1796 {
1797 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1798 	struct virtchnl_flow_unsub *unsub_cfg;
1799 	struct iavf_cmd_info args;
1800 	int err;
1801 
1802 	filter->unsub_fltr.vsi_id = vf->vsi_res->vsi_id;
1803 	filter->unsub_fltr.flow_id = filter->flow_id;
1804 
1805 	memset(&args, 0, sizeof(args));
1806 	args.ops = VIRTCHNL_OP_FLOW_UNSUBSCRIBE;
1807 	args.in_args = (uint8_t *)(&filter->unsub_fltr);
1808 	args.in_args_size = sizeof(filter->unsub_fltr);
1809 	args.out_buffer = vf->aq_resp;
1810 	args.out_size = IAVF_AQ_BUF_SZ;
1811 
1812 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1813 	if (err) {
1814 		PMD_DRV_LOG(ERR, "Failed to execute command of "
1815 				 "OP_FLOW_UNSUBSCRIBE");
1816 		return err;
1817 	}
1818 
1819 	unsub_cfg = (struct virtchnl_flow_unsub *)args.out_buffer;
1820 
1821 	if (unsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
1822 		PMD_DRV_LOG(INFO, "Succeed in deleting rule request by PF");
1823 	} else if (unsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST) {
1824 		PMD_DRV_LOG(ERR, "Failed to delete rule request due to this "
1825 				 "rule doesn't exist");
1826 		err = -1;
1827 	} else {
1828 		PMD_DRV_LOG(ERR, "Failed to delete rule request due to other "
1829 				 "reasons");
1830 		err = -1;
1831 	}
1832 
1833 	return err;
1834 }
1835 
1836 int
1837 iavf_flow_sub_check(struct iavf_adapter *adapter,
1838 		    struct iavf_fsub_conf *filter)
1839 {
1840 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1841 	struct virtchnl_flow_sub *fsub_cfg;
1842 
1843 	struct iavf_cmd_info args;
1844 	int err;
1845 
1846 	filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id;
1847 	filter->sub_fltr.validate_only = 1;
1848 
1849 	args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE;
1850 	args.in_args = (uint8_t *)(&filter->sub_fltr);
1851 	args.in_args_size = sizeof(*(&filter->sub_fltr));
1852 	args.out_buffer = vf->aq_resp;
1853 	args.out_size = IAVF_AQ_BUF_SZ;
1854 
1855 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1856 	if (err) {
1857 		PMD_DRV_LOG(ERR, "Failed to check flow subscription rule");
1858 		return err;
1859 	}
1860 
1861 	fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer;
1862 
1863 	if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
1864 		PMD_DRV_LOG(INFO, "Succeed in checking rule request by PF");
1865 	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) {
1866 		PMD_DRV_LOG(ERR, "Failed to check rule request due to "
1867 				 "parameters validation or HW doesn't "
1868 				 "support");
1869 		err = -1;
1870 	} else {
1871 		PMD_DRV_LOG(ERR, "Failed to check rule request due to other "
1872 				 "reasons");
1873 		err = -1;
1874 	}
1875 
1876 	return err;
1877 }
1878 
1879 int
1880 iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
1881 		     struct virtchnl_rss_cfg *rss_cfg, bool add)
1882 {
1883 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1884 	struct iavf_cmd_info args;
1885 	int err;
1886 
1887 	memset(&args, 0, sizeof(args));
1888 	args.ops = add ? VIRTCHNL_OP_ADD_RSS_CFG :
1889 		VIRTCHNL_OP_DEL_RSS_CFG;
1890 	args.in_args = (u8 *)rss_cfg;
1891 	args.in_args_size = sizeof(*rss_cfg);
1892 	args.out_buffer = vf->aq_resp;
1893 	args.out_size = IAVF_AQ_BUF_SZ;
1894 
1895 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1896 	if (err)
1897 		PMD_DRV_LOG(ERR,
1898 			    "Failed to execute command of %s",
1899 			    add ? "OP_ADD_RSS_CFG" :
1900 			    "OP_DEL_RSS_INPUT_CFG");
1901 
1902 	return err;
1903 }
1904 
1905 int
1906 iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps)
1907 {
1908 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1909 	struct iavf_cmd_info args;
1910 	int err;
1911 
1912 	args.ops = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
1913 	args.in_args = NULL;
1914 	args.in_args_size = 0;
1915 	args.out_buffer = vf->aq_resp;
1916 	args.out_size = IAVF_AQ_BUF_SZ;
1917 
1918 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1919 	if (err) {
1920 		PMD_DRV_LOG(ERR,
1921 			    "Failed to execute command of OP_GET_RSS_HENA_CAPS");
1922 		return err;
1923 	}
1924 
1925 	*caps = ((struct virtchnl_rss_hena *)args.out_buffer)->hena;
1926 	return 0;
1927 }
1928 
1929 int
1930 iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
1931 {
1932 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1933 	struct virtchnl_rss_hena vrh;
1934 	struct iavf_cmd_info args;
1935 	int err;
1936 
1937 	vrh.hena = hena;
1938 	args.ops = VIRTCHNL_OP_SET_RSS_HENA;
1939 	args.in_args = (u8 *)&vrh;
1940 	args.in_args_size = sizeof(vrh);
1941 	args.out_buffer = vf->aq_resp;
1942 	args.out_size = IAVF_AQ_BUF_SZ;
1943 
1944 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1945 	if (err)
1946 		PMD_DRV_LOG(ERR,
1947 			    "Failed to execute command of OP_SET_RSS_HENA");
1948 
1949 	return err;
1950 }
1951 
1952 int
1953 iavf_get_qos_cap(struct iavf_adapter *adapter)
1954 {
1955 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1956 	struct iavf_cmd_info args;
1957 	uint32_t len;
1958 	int err;
1959 
1960 	args.ops = VIRTCHNL_OP_GET_QOS_CAPS;
1961 	args.in_args = NULL;
1962 	args.in_args_size = 0;
1963 	args.out_buffer = vf->aq_resp;
1964 	args.out_size = IAVF_AQ_BUF_SZ;
1965 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1966 
1967 	if (err) {
1968 		PMD_DRV_LOG(ERR,
1969 			    "Failed to execute command of OP_GET_VF_RESOURCE");
1970 		return -1;
1971 	}
1972 
1973 	len =  sizeof(struct virtchnl_qos_cap_list) +
1974 		IAVF_MAX_TRAFFIC_CLASS * sizeof(struct virtchnl_qos_cap_elem);
1975 
1976 	rte_memcpy(vf->qos_cap, args.out_buffer,
1977 		   RTE_MIN(args.out_size, len));
1978 
1979 	return 0;
1980 }
1981 
1982 int iavf_set_q_tc_map(struct rte_eth_dev *dev,
1983 		struct virtchnl_queue_tc_mapping *q_tc_mapping, uint16_t size)
1984 {
1985 	struct iavf_adapter *adapter =
1986 			IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1987 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1988 	struct iavf_cmd_info args;
1989 	int err;
1990 
1991 	memset(&args, 0, sizeof(args));
1992 	args.ops = VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP;
1993 	args.in_args = (uint8_t *)q_tc_mapping;
1994 	args.in_args_size = size;
1995 	args.out_buffer = vf->aq_resp;
1996 	args.out_size = IAVF_AQ_BUF_SZ;
1997 
1998 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
1999 	if (err)
2000 		PMD_DRV_LOG(ERR, "Failed to execute command of"
2001 			    " VIRTCHNL_OP_CONFIG_TC_MAP");
2002 	return err;
2003 }
2004 
2005 int iavf_set_q_bw(struct rte_eth_dev *dev,
2006 		struct virtchnl_queues_bw_cfg *q_bw, uint16_t size)
2007 {
2008 	struct iavf_adapter *adapter =
2009 			IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2010 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2011 	struct iavf_cmd_info args;
2012 	int err;
2013 
2014 	memset(&args, 0, sizeof(args));
2015 	args.ops = VIRTCHNL_OP_CONFIG_QUEUE_BW;
2016 	args.in_args = (uint8_t *)q_bw;
2017 	args.in_args_size = size;
2018 	args.out_buffer = vf->aq_resp;
2019 	args.out_size = IAVF_AQ_BUF_SZ;
2020 
2021 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
2022 	if (err)
2023 		PMD_DRV_LOG(ERR, "Failed to execute command of"
2024 			    " VIRTCHNL_OP_CONFIG_QUEUE_BW");
2025 	return err;
2026 }
2027 
2028 int
2029 iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
2030 			struct rte_ether_addr *mc_addrs,
2031 			uint32_t mc_addrs_num, bool add)
2032 {
2033 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2034 	uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) +
2035 		(IAVF_NUM_MACADDR_MAX * sizeof(struct virtchnl_ether_addr))];
2036 	struct virtchnl_ether_addr_list *list;
2037 	struct iavf_cmd_info args;
2038 	uint32_t i;
2039 	int err;
2040 
2041 	if (mc_addrs == NULL || mc_addrs_num == 0)
2042 		return 0;
2043 
2044 	list = (struct virtchnl_ether_addr_list *)cmd_buffer;
2045 	list->vsi_id = vf->vsi_res->vsi_id;
2046 	list->num_elements = mc_addrs_num;
2047 
2048 	for (i = 0; i < mc_addrs_num; i++) {
2049 		if (!IAVF_IS_MULTICAST(mc_addrs[i].addr_bytes)) {
2050 			PMD_DRV_LOG(ERR, "Invalid mac:" RTE_ETHER_ADDR_PRT_FMT,
2051 				    RTE_ETHER_ADDR_BYTES(&mc_addrs[i]));
2052 			return -EINVAL;
2053 		}
2054 
2055 		memcpy(list->list[i].addr, mc_addrs[i].addr_bytes,
2056 			sizeof(list->list[i].addr));
2057 		list->list[i].type = VIRTCHNL_ETHER_ADDR_EXTRA;
2058 	}
2059 
2060 	args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : VIRTCHNL_OP_DEL_ETH_ADDR;
2061 	args.in_args = cmd_buffer;
2062 	args.in_args_size = sizeof(struct virtchnl_ether_addr_list) +
2063 		i * sizeof(struct virtchnl_ether_addr);
2064 	args.out_buffer = vf->aq_resp;
2065 	args.out_size = IAVF_AQ_BUF_SZ;
2066 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
2067 
2068 	if (err) {
2069 		PMD_DRV_LOG(ERR, "fail to execute command %s",
2070 			add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR");
2071 		return err;
2072 	}
2073 
2074 	return 0;
2075 }
2076 
2077 int
2078 iavf_request_queues(struct rte_eth_dev *dev, uint16_t num)
2079 {
2080 	struct iavf_adapter *adapter =
2081 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2082 	struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(adapter);
2083 	struct virtchnl_vf_res_request vfres;
2084 	struct iavf_cmd_info args;
2085 	uint16_t num_queue_pairs;
2086 	int err;
2087 	int i = 0;
2088 
2089 	if (!(vf->vf_res->vf_cap_flags &
2090 		VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) {
2091 		PMD_DRV_LOG(ERR, "request queues not supported");
2092 		return -1;
2093 	}
2094 
2095 	if (num == 0) {
2096 		PMD_DRV_LOG(ERR, "queue number cannot be zero");
2097 		return -1;
2098 	}
2099 	vfres.num_queue_pairs = num;
2100 
2101 	args.ops = VIRTCHNL_OP_REQUEST_QUEUES;
2102 	args.in_args = (u8 *)&vfres;
2103 	args.in_args_size = sizeof(vfres);
2104 	args.out_buffer = vf->aq_resp;
2105 	args.out_size = IAVF_AQ_BUF_SZ;
2106 
2107 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
2108 		err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
2109 	} else {
2110 		rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
2111 		err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
2112 		rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
2113 				  iavf_dev_alarm_handler, dev);
2114 	}
2115 
2116 	if (err) {
2117 		PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES");
2118 		return err;
2119 	}
2120 
2121 	/* wait for interrupt notification vf is resetting */
2122 	while (i++ < MAX_TRY_TIMES) {
2123 		if (vf->vf_reset)
2124 			break;
2125 		iavf_msec_delay(ASQ_DELAY_MS);
2126 	}
2127 
2128 	/* request queues succeeded, vf is resetting */
2129 	if (vf->vf_reset) {
2130 		PMD_DRV_LOG(INFO, "vf is resetting");
2131 		return 0;
2132 	}
2133 
2134 	/* request additional queues failed, return available number */
2135 	num_queue_pairs =
2136 	  ((struct virtchnl_vf_res_request *)args.out_buffer)->num_queue_pairs;
2137 	PMD_DRV_LOG(ERR, "request queues failed, only %u queues "
2138 		"available", num_queue_pairs);
2139 
2140 	return -1;
2141 }
2142 
2143 int
2144 iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
2145 {
2146 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2147 	struct iavf_cmd_info args;
2148 	uint16_t qregion_width;
2149 	int err;
2150 
2151 	args.ops = VIRTCHNL_OP_GET_MAX_RSS_QREGION;
2152 	args.in_args = NULL;
2153 	args.in_args_size = 0;
2154 	args.out_buffer = vf->aq_resp;
2155 	args.out_size = IAVF_AQ_BUF_SZ;
2156 
2157 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
2158 	if (err) {
2159 		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
2160 		return err;
2161 	}
2162 
2163 	qregion_width =
2164 	((struct virtchnl_max_rss_qregion *)args.out_buffer)->qregion_width;
2165 
2166 	vf->max_rss_qregion = (uint16_t)(1 << qregion_width);
2167 
2168 	return 0;
2169 }
2170 
2171 
2172 
2173 int
2174 iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
2175 		uint8_t *msg, size_t msg_len,
2176 		uint8_t *resp_msg, size_t resp_msg_len)
2177 {
2178 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2179 	struct iavf_cmd_info args;
2180 	int err;
2181 
2182 	args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
2183 	args.in_args = msg;
2184 	args.in_args_size = msg_len;
2185 	args.out_buffer = vf->aq_resp;
2186 	args.out_size = IAVF_AQ_BUF_SZ;
2187 
2188 	err = iavf_execute_vf_cmd_safe(adapter, &args, 1);
2189 	if (err) {
2190 		PMD_DRV_LOG(ERR, "fail to execute command %s",
2191 				"OP_INLINE_IPSEC_CRYPTO");
2192 		return err;
2193 	}
2194 
2195 	memcpy(resp_msg, args.out_buffer, resp_msg_len);
2196 
2197 	return 0;
2198 }
2199 
2200 int
2201 iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id, u16 num_queues)
2202 {
2203 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2204 	struct iavf_cmd_info args;
2205 	struct virtchnl_quanta_cfg q_quanta;
2206 	int err;
2207 
2208 	if (adapter->devargs.quanta_size == 0)
2209 		return 0;
2210 
2211 	q_quanta.quanta_size = adapter->devargs.quanta_size;
2212 	q_quanta.queue_select.type = VIRTCHNL_QUEUE_TYPE_TX;
2213 	q_quanta.queue_select.start_queue_id = start_queue_id;
2214 	q_quanta.queue_select.num_queues = num_queues;
2215 
2216 	args.ops = VIRTCHNL_OP_CONFIG_QUANTA;
2217 	args.in_args = (uint8_t *)&q_quanta;
2218 	args.in_args_size = sizeof(q_quanta);
2219 	args.out_buffer = vf->aq_resp;
2220 	args.out_size = IAVF_AQ_BUF_SZ;
2221 
2222 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
2223 	if (err) {
2224 		PMD_DRV_LOG(ERR, "Failed to execute command VIRTCHNL_OP_CONFIG_QUANTA");
2225 		return err;
2226 	}
2227 
2228 	return 0;
2229 }
2230 
2231 int
2232 iavf_get_ptp_cap(struct iavf_adapter *adapter)
2233 {
2234 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2235 	struct virtchnl_ptp_caps ptp_caps;
2236 	struct iavf_cmd_info args;
2237 	int err;
2238 
2239 	ptp_caps.caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP |
2240 			VIRTCHNL_1588_PTP_CAP_READ_PHC;
2241 
2242 	args.ops = VIRTCHNL_OP_1588_PTP_GET_CAPS;
2243 	args.in_args = (uint8_t *)&ptp_caps;
2244 	args.in_args_size = sizeof(ptp_caps);
2245 	args.out_buffer = vf->aq_resp;
2246 	args.out_size = IAVF_AQ_BUF_SZ;
2247 
2248 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
2249 	if (err) {
2250 		PMD_DRV_LOG(ERR,
2251 			    "Failed to execute command of OP_1588_PTP_GET_CAPS");
2252 		return err;
2253 	}
2254 
2255 	vf->ptp_caps = ((struct virtchnl_ptp_caps *)args.out_buffer)->caps;
2256 
2257 	return 0;
2258 }
2259 
2260 int
2261 iavf_get_phc_time(struct iavf_rx_queue *rxq)
2262 {
2263 	struct iavf_adapter *adapter = rxq->vsi->adapter;
2264 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2265 	struct virtchnl_phc_time phc_time;
2266 	struct iavf_cmd_info args;
2267 	int err = 0;
2268 
2269 	args.ops = VIRTCHNL_OP_1588_PTP_GET_TIME;
2270 	args.in_args = (uint8_t *)&phc_time;
2271 	args.in_args_size = sizeof(phc_time);
2272 	args.out_buffer = vf->aq_resp;
2273 	args.out_size = IAVF_AQ_BUF_SZ;
2274 
2275 	rte_spinlock_lock(&vf->phc_time_aq_lock);
2276 	err = iavf_execute_vf_cmd_safe(adapter, &args, 0);
2277 	if (err) {
2278 		PMD_DRV_LOG(ERR,
2279 			    "Failed to execute command of VIRTCHNL_OP_1588_PTP_GET_TIME");
2280 		goto out;
2281 	}
2282 	rxq->phc_time = ((struct virtchnl_phc_time *)args.out_buffer)->time;
2283 
2284 out:
2285 	rte_spinlock_unlock(&vf->phc_time_aq_lock);
2286 	return err;
2287 }
2288