xref: /dpdk/drivers/common/idpf/idpf_common_device.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2023 Intel Corporation
3  */
4 
5 #include <rte_log.h>
6 #include <idpf_common_device.h>
7 #include <idpf_common_virtchnl.h>
8 
9 static void
10 idpf_reset_pf(struct idpf_hw *hw)
11 {
12 	uint32_t reg;
13 
14 	reg = IDPF_READ_REG(hw, PFGEN_CTRL);
15 	IDPF_WRITE_REG(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR));
16 }
17 
18 #define IDPF_RESET_WAIT_CNT 100
19 static int
20 idpf_check_pf_reset_done(struct idpf_hw *hw)
21 {
22 	uint32_t reg;
23 	int i;
24 
25 	for (i = 0; i < IDPF_RESET_WAIT_CNT; i++) {
26 		reg = IDPF_READ_REG(hw, PFGEN_RSTAT);
27 		if (reg != 0xFFFFFFFF && (reg & PFGEN_RSTAT_PFR_STATE_M))
28 			return 0;
29 		rte_delay_ms(1000);
30 	}
31 
32 	DRV_LOG(ERR, "IDPF reset timeout");
33 	return -EBUSY;
34 }
35 
36 #define CTLQ_NUM 2
37 static int
38 idpf_init_mbx(struct idpf_hw *hw)
39 {
40 	struct idpf_ctlq_create_info ctlq_info[CTLQ_NUM] = {
41 		{
42 			.type = IDPF_CTLQ_TYPE_MAILBOX_TX,
43 			.id = IDPF_CTLQ_ID,
44 			.len = IDPF_CTLQ_LEN,
45 			.buf_size = IDPF_DFLT_MBX_BUF_SIZE,
46 			.reg = {
47 				.head = PF_FW_ATQH,
48 				.tail = PF_FW_ATQT,
49 				.len = PF_FW_ATQLEN,
50 				.bah = PF_FW_ATQBAH,
51 				.bal = PF_FW_ATQBAL,
52 				.len_mask = PF_FW_ATQLEN_ATQLEN_M,
53 				.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M,
54 				.head_mask = PF_FW_ATQH_ATQH_M,
55 			}
56 		},
57 		{
58 			.type = IDPF_CTLQ_TYPE_MAILBOX_RX,
59 			.id = IDPF_CTLQ_ID,
60 			.len = IDPF_CTLQ_LEN,
61 			.buf_size = IDPF_DFLT_MBX_BUF_SIZE,
62 			.reg = {
63 				.head = PF_FW_ARQH,
64 				.tail = PF_FW_ARQT,
65 				.len = PF_FW_ARQLEN,
66 				.bah = PF_FW_ARQBAH,
67 				.bal = PF_FW_ARQBAL,
68 				.len_mask = PF_FW_ARQLEN_ARQLEN_M,
69 				.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M,
70 				.head_mask = PF_FW_ARQH_ARQH_M,
71 			}
72 		}
73 	};
74 	struct idpf_ctlq_info *ctlq;
75 	int ret;
76 
77 	ret = idpf_ctlq_init(hw, CTLQ_NUM, ctlq_info);
78 	if (ret != 0)
79 		return ret;
80 
81 	LIST_FOR_EACH_ENTRY_SAFE(ctlq, NULL, &hw->cq_list_head,
82 				 struct idpf_ctlq_info, cq_list) {
83 		if (ctlq->q_id == IDPF_CTLQ_ID &&
84 		    ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
85 			hw->asq = ctlq;
86 		if (ctlq->q_id == IDPF_CTLQ_ID &&
87 		    ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX)
88 			hw->arq = ctlq;
89 	}
90 
91 	if (hw->asq == NULL || hw->arq == NULL) {
92 		idpf_ctlq_deinit(hw);
93 		ret = -ENOENT;
94 	}
95 
96 	return ret;
97 }
98 
99 static int
100 idpf_get_pkt_type(struct idpf_adapter *adapter)
101 {
102 	struct virtchnl2_get_ptype_info *ptype_info;
103 	uint16_t ptype_offset, i, j;
104 	uint16_t ptype_recvd = 0;
105 	int ret;
106 
107 	ret = idpf_vc_ptype_info_query(adapter);
108 	if (ret != 0) {
109 		DRV_LOG(ERR, "Fail to query packet type information");
110 		return ret;
111 	}
112 
113 	ptype_info = rte_zmalloc("ptype_info", IDPF_DFLT_MBX_BUF_SIZE, 0);
114 		if (ptype_info == NULL)
115 			return -ENOMEM;
116 
117 	while (ptype_recvd < IDPF_MAX_PKT_TYPE) {
118 		ret = idpf_vc_one_msg_read(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
119 					   IDPF_DFLT_MBX_BUF_SIZE, (uint8_t *)ptype_info);
120 		if (ret != 0) {
121 			DRV_LOG(ERR, "Fail to get packet type information");
122 			goto free_ptype_info;
123 		}
124 
125 		ptype_recvd += ptype_info->num_ptypes;
126 		ptype_offset = sizeof(struct virtchnl2_get_ptype_info) -
127 						sizeof(struct virtchnl2_ptype);
128 
129 		for (i = 0; i < rte_cpu_to_le_16(ptype_info->num_ptypes); i++) {
130 			bool is_inner = false, is_ip = false;
131 			struct virtchnl2_ptype *ptype;
132 			uint32_t proto_hdr = 0;
133 
134 			ptype = (struct virtchnl2_ptype *)
135 					((uint8_t *)ptype_info + ptype_offset);
136 			ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
137 			if (ptype_offset > IDPF_DFLT_MBX_BUF_SIZE) {
138 				ret = -EINVAL;
139 				goto free_ptype_info;
140 			}
141 
142 			if (rte_cpu_to_le_16(ptype->ptype_id_10) == 0xFFFF)
143 				goto free_ptype_info;
144 
145 			for (j = 0; j < ptype->proto_id_count; j++) {
146 				switch (rte_cpu_to_le_16(ptype->proto_id[j])) {
147 				case VIRTCHNL2_PROTO_HDR_GRE:
148 				case VIRTCHNL2_PROTO_HDR_VXLAN:
149 					proto_hdr &= ~RTE_PTYPE_L4_MASK;
150 					proto_hdr |= RTE_PTYPE_TUNNEL_GRENAT;
151 					is_inner = true;
152 					break;
153 				case VIRTCHNL2_PROTO_HDR_MAC:
154 					if (is_inner) {
155 						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
156 						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER;
157 					} else {
158 						proto_hdr &= ~RTE_PTYPE_L2_MASK;
159 						proto_hdr |= RTE_PTYPE_L2_ETHER;
160 					}
161 					break;
162 				case VIRTCHNL2_PROTO_HDR_VLAN:
163 					if (is_inner) {
164 						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
165 						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER_VLAN;
166 					}
167 					break;
168 				case VIRTCHNL2_PROTO_HDR_PTP:
169 					proto_hdr &= ~RTE_PTYPE_L2_MASK;
170 					proto_hdr |= RTE_PTYPE_L2_ETHER_TIMESYNC;
171 					break;
172 				case VIRTCHNL2_PROTO_HDR_LLDP:
173 					proto_hdr &= ~RTE_PTYPE_L2_MASK;
174 					proto_hdr |= RTE_PTYPE_L2_ETHER_LLDP;
175 					break;
176 				case VIRTCHNL2_PROTO_HDR_ARP:
177 					proto_hdr &= ~RTE_PTYPE_L2_MASK;
178 					proto_hdr |= RTE_PTYPE_L2_ETHER_ARP;
179 					break;
180 				case VIRTCHNL2_PROTO_HDR_PPPOE:
181 					proto_hdr &= ~RTE_PTYPE_L2_MASK;
182 					proto_hdr |= RTE_PTYPE_L2_ETHER_PPPOE;
183 					break;
184 				case VIRTCHNL2_PROTO_HDR_IPV4:
185 					if (!is_ip) {
186 						proto_hdr |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
187 						is_ip = true;
188 					} else {
189 						proto_hdr |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
190 							     RTE_PTYPE_TUNNEL_IP;
191 						is_inner = true;
192 					}
193 						break;
194 				case VIRTCHNL2_PROTO_HDR_IPV6:
195 					if (!is_ip) {
196 						proto_hdr |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
197 						is_ip = true;
198 					} else {
199 						proto_hdr |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
200 							     RTE_PTYPE_TUNNEL_IP;
201 						is_inner = true;
202 					}
203 					break;
204 				case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
205 				case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
206 					if (is_inner)
207 						proto_hdr |= RTE_PTYPE_INNER_L4_FRAG;
208 					else
209 						proto_hdr |= RTE_PTYPE_L4_FRAG;
210 					break;
211 				case VIRTCHNL2_PROTO_HDR_UDP:
212 					if (is_inner)
213 						proto_hdr |= RTE_PTYPE_INNER_L4_UDP;
214 					else
215 						proto_hdr |= RTE_PTYPE_L4_UDP;
216 					break;
217 				case VIRTCHNL2_PROTO_HDR_TCP:
218 					if (is_inner)
219 						proto_hdr |= RTE_PTYPE_INNER_L4_TCP;
220 					else
221 						proto_hdr |= RTE_PTYPE_L4_TCP;
222 					break;
223 				case VIRTCHNL2_PROTO_HDR_SCTP:
224 					if (is_inner)
225 						proto_hdr |= RTE_PTYPE_INNER_L4_SCTP;
226 					else
227 						proto_hdr |= RTE_PTYPE_L4_SCTP;
228 					break;
229 				case VIRTCHNL2_PROTO_HDR_ICMP:
230 					if (is_inner)
231 						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
232 					else
233 						proto_hdr |= RTE_PTYPE_L4_ICMP;
234 					break;
235 				case VIRTCHNL2_PROTO_HDR_ICMPV6:
236 					if (is_inner)
237 						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
238 					else
239 						proto_hdr |= RTE_PTYPE_L4_ICMP;
240 					break;
241 				case VIRTCHNL2_PROTO_HDR_L2TPV2:
242 				case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
243 				case VIRTCHNL2_PROTO_HDR_L2TPV3:
244 					is_inner = true;
245 					proto_hdr |= RTE_PTYPE_TUNNEL_L2TP;
246 					break;
247 				case VIRTCHNL2_PROTO_HDR_NVGRE:
248 					is_inner = true;
249 					proto_hdr |= RTE_PTYPE_TUNNEL_NVGRE;
250 					break;
251 				case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
252 					is_inner = true;
253 					proto_hdr |= RTE_PTYPE_TUNNEL_GTPC;
254 					break;
255 				case VIRTCHNL2_PROTO_HDR_GTPU:
256 				case VIRTCHNL2_PROTO_HDR_GTPU_UL:
257 				case VIRTCHNL2_PROTO_HDR_GTPU_DL:
258 					is_inner = true;
259 					proto_hdr |= RTE_PTYPE_TUNNEL_GTPU;
260 					break;
261 				case VIRTCHNL2_PROTO_HDR_PAY:
262 				case VIRTCHNL2_PROTO_HDR_IPV6_EH:
263 				case VIRTCHNL2_PROTO_HDR_PRE_MAC:
264 				case VIRTCHNL2_PROTO_HDR_POST_MAC:
265 				case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
266 				case VIRTCHNL2_PROTO_HDR_SVLAN:
267 				case VIRTCHNL2_PROTO_HDR_CVLAN:
268 				case VIRTCHNL2_PROTO_HDR_MPLS:
269 				case VIRTCHNL2_PROTO_HDR_MMPLS:
270 				case VIRTCHNL2_PROTO_HDR_CTRL:
271 				case VIRTCHNL2_PROTO_HDR_ECP:
272 				case VIRTCHNL2_PROTO_HDR_EAPOL:
273 				case VIRTCHNL2_PROTO_HDR_PPPOD:
274 				case VIRTCHNL2_PROTO_HDR_IGMP:
275 				case VIRTCHNL2_PROTO_HDR_AH:
276 				case VIRTCHNL2_PROTO_HDR_ESP:
277 				case VIRTCHNL2_PROTO_HDR_IKE:
278 				case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
279 				case VIRTCHNL2_PROTO_HDR_GTP:
280 				case VIRTCHNL2_PROTO_HDR_GTP_EH:
281 				case VIRTCHNL2_PROTO_HDR_GTPCV2:
282 				case VIRTCHNL2_PROTO_HDR_ECPRI:
283 				case VIRTCHNL2_PROTO_HDR_VRRP:
284 				case VIRTCHNL2_PROTO_HDR_OSPF:
285 				case VIRTCHNL2_PROTO_HDR_TUN:
286 				case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
287 				case VIRTCHNL2_PROTO_HDR_GENEVE:
288 				case VIRTCHNL2_PROTO_HDR_NSH:
289 				case VIRTCHNL2_PROTO_HDR_QUIC:
290 				case VIRTCHNL2_PROTO_HDR_PFCP:
291 				case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
292 				case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
293 				case VIRTCHNL2_PROTO_HDR_RTP:
294 				case VIRTCHNL2_PROTO_HDR_NO_PROTO:
295 				default:
296 					continue;
297 				}
298 				adapter->ptype_tbl[ptype->ptype_id_10] = proto_hdr;
299 			}
300 		}
301 	}
302 
303 free_ptype_info:
304 	rte_free(ptype_info);
305 	clear_cmd(adapter);
306 	return ret;
307 }
308 
309 int
310 idpf_adapter_init(struct idpf_adapter *adapter)
311 {
312 	struct idpf_hw *hw = &adapter->hw;
313 	int ret;
314 
315 	idpf_reset_pf(hw);
316 	ret = idpf_check_pf_reset_done(hw);
317 	if (ret != 0) {
318 		DRV_LOG(ERR, "IDPF is still resetting");
319 		goto err_check_reset;
320 	}
321 
322 	ret = idpf_init_mbx(hw);
323 	if (ret != 0) {
324 		DRV_LOG(ERR, "Failed to init mailbox");
325 		goto err_check_reset;
326 	}
327 
328 	adapter->mbx_resp = rte_zmalloc("idpf_adapter_mbx_resp",
329 					IDPF_DFLT_MBX_BUF_SIZE, 0);
330 	if (adapter->mbx_resp == NULL) {
331 		DRV_LOG(ERR, "Failed to allocate idpf_adapter_mbx_resp memory");
332 		ret = -ENOMEM;
333 		goto err_mbx_resp;
334 	}
335 
336 	ret = idpf_vc_api_version_check(adapter);
337 	if (ret != 0) {
338 		DRV_LOG(ERR, "Failed to check api version");
339 		goto err_check_api;
340 	}
341 
342 	ret = idpf_vc_caps_get(adapter);
343 	if (ret != 0) {
344 		DRV_LOG(ERR, "Failed to get capabilities");
345 		goto err_check_api;
346 	}
347 
348 	ret = idpf_get_pkt_type(adapter);
349 	if (ret != 0) {
350 		DRV_LOG(ERR, "Failed to set ptype table");
351 		goto err_check_api;
352 	}
353 
354 	return 0;
355 
356 err_check_api:
357 	rte_free(adapter->mbx_resp);
358 	adapter->mbx_resp = NULL;
359 err_mbx_resp:
360 	idpf_ctlq_deinit(hw);
361 err_check_reset:
362 	return ret;
363 }
364 
365 int
366 idpf_adapter_deinit(struct idpf_adapter *adapter)
367 {
368 	struct idpf_hw *hw = &adapter->hw;
369 
370 	idpf_ctlq_deinit(hw);
371 	rte_free(adapter->mbx_resp);
372 	adapter->mbx_resp = NULL;
373 
374 	return 0;
375 }
376 
377 int
378 idpf_vport_init(struct idpf_vport *vport,
379 		struct virtchnl2_create_vport *create_vport_info,
380 		void *dev_data)
381 {
382 	struct virtchnl2_create_vport *vport_info;
383 	int i, type, ret;
384 
385 	ret = idpf_vc_vport_create(vport, create_vport_info);
386 	if (ret != 0) {
387 		DRV_LOG(ERR, "Failed to create vport.");
388 		goto err_create_vport;
389 	}
390 
391 	vport_info = &(vport->vport_info.info);
392 	vport->vport_id = vport_info->vport_id;
393 	vport->txq_model = vport_info->txq_model;
394 	vport->rxq_model = vport_info->rxq_model;
395 	vport->num_tx_q = vport_info->num_tx_q;
396 	vport->num_tx_complq = vport_info->num_tx_complq;
397 	vport->num_rx_q = vport_info->num_rx_q;
398 	vport->num_rx_bufq = vport_info->num_rx_bufq;
399 	vport->max_mtu = vport_info->max_mtu;
400 	rte_memcpy(vport->default_mac_addr,
401 		   vport_info->default_mac_addr, ETH_ALEN);
402 	vport->rss_algorithm = vport_info->rss_algorithm;
403 	vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,
404 				      vport_info->rss_key_size);
405 	vport->rss_lut_size = vport_info->rss_lut_size;
406 
407 	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
408 		type = vport_info->chunks.chunks[i].type;
409 		switch (type) {
410 		case VIRTCHNL2_QUEUE_TYPE_TX:
411 			vport->chunks_info.tx_start_qid =
412 				vport_info->chunks.chunks[i].start_queue_id;
413 			vport->chunks_info.tx_qtail_start =
414 				vport_info->chunks.chunks[i].qtail_reg_start;
415 			vport->chunks_info.tx_qtail_spacing =
416 				vport_info->chunks.chunks[i].qtail_reg_spacing;
417 			break;
418 		case VIRTCHNL2_QUEUE_TYPE_RX:
419 			vport->chunks_info.rx_start_qid =
420 				vport_info->chunks.chunks[i].start_queue_id;
421 			vport->chunks_info.rx_qtail_start =
422 				vport_info->chunks.chunks[i].qtail_reg_start;
423 			vport->chunks_info.rx_qtail_spacing =
424 				vport_info->chunks.chunks[i].qtail_reg_spacing;
425 			break;
426 		case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
427 			vport->chunks_info.tx_compl_start_qid =
428 				vport_info->chunks.chunks[i].start_queue_id;
429 			vport->chunks_info.tx_compl_qtail_start =
430 				vport_info->chunks.chunks[i].qtail_reg_start;
431 			vport->chunks_info.tx_compl_qtail_spacing =
432 				vport_info->chunks.chunks[i].qtail_reg_spacing;
433 			break;
434 		case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
435 			vport->chunks_info.rx_buf_start_qid =
436 				vport_info->chunks.chunks[i].start_queue_id;
437 			vport->chunks_info.rx_buf_qtail_start =
438 				vport_info->chunks.chunks[i].qtail_reg_start;
439 			vport->chunks_info.rx_buf_qtail_spacing =
440 				vport_info->chunks.chunks[i].qtail_reg_spacing;
441 			break;
442 		default:
443 			DRV_LOG(ERR, "Unsupported queue type");
444 			break;
445 		}
446 	}
447 
448 	vport->dev_data = dev_data;
449 
450 	vport->rss_key = rte_zmalloc("rss_key",
451 				     vport->rss_key_size, 0);
452 	if (vport->rss_key == NULL) {
453 		DRV_LOG(ERR, "Failed to allocate RSS key");
454 		ret = -ENOMEM;
455 		goto err_rss_key;
456 	}
457 
458 	vport->rss_lut = rte_zmalloc("rss_lut",
459 				     sizeof(uint32_t) * vport->rss_lut_size, 0);
460 	if (vport->rss_lut == NULL) {
461 		DRV_LOG(ERR, "Failed to allocate RSS lut");
462 		ret = -ENOMEM;
463 		goto err_rss_lut;
464 	}
465 
466 	/* recv_vectors is used for VIRTCHNL2_OP_ALLOC_VECTORS response,
467 	 * reserve maximum size for it now, may need optimization in future.
468 	 */
469 	vport->recv_vectors = rte_zmalloc("recv_vectors", IDPF_DFLT_MBX_BUF_SIZE, 0);
470 	if (vport->recv_vectors == NULL) {
471 		DRV_LOG(ERR, "Failed to allocate recv_vectors");
472 		ret = -ENOMEM;
473 		goto err_recv_vec;
474 	}
475 
476 	return 0;
477 
478 err_recv_vec:
479 	rte_free(vport->rss_lut);
480 	vport->rss_lut = NULL;
481 err_rss_lut:
482 	vport->dev_data = NULL;
483 	rte_free(vport->rss_key);
484 	vport->rss_key = NULL;
485 err_rss_key:
486 	idpf_vc_vport_destroy(vport);
487 err_create_vport:
488 	return ret;
489 }
490 int
491 idpf_vport_deinit(struct idpf_vport *vport)
492 {
493 	rte_free(vport->recv_vectors);
494 	vport->recv_vectors = NULL;
495 	rte_free(vport->rss_lut);
496 	vport->rss_lut = NULL;
497 
498 	rte_free(vport->rss_key);
499 	vport->rss_key = NULL;
500 
501 	vport->dev_data = NULL;
502 
503 	idpf_vc_vport_destroy(vport);
504 
505 	return 0;
506 }
507 int
508 idpf_vport_rss_config(struct idpf_vport *vport)
509 {
510 	int ret;
511 
512 	ret = idpf_vc_rss_key_set(vport);
513 	if (ret != 0) {
514 		DRV_LOG(ERR, "Failed to configure RSS key");
515 		return ret;
516 	}
517 
518 	ret = idpf_vc_rss_lut_set(vport);
519 	if (ret != 0) {
520 		DRV_LOG(ERR, "Failed to configure RSS lut");
521 		return ret;
522 	}
523 
524 	ret = idpf_vc_rss_hash_set(vport);
525 	if (ret != 0) {
526 		DRV_LOG(ERR, "Failed to configure RSS hash");
527 		return ret;
528 	}
529 
530 	return ret;
531 }
532 
533 int
534 idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
535 {
536 	struct idpf_adapter *adapter = vport->adapter;
537 	struct virtchnl2_queue_vector *qv_map;
538 	struct idpf_hw *hw = &adapter->hw;
539 	uint32_t dynctl_val, itrn_val;
540 	uint32_t dynctl_reg_start;
541 	uint32_t itrn_reg_start;
542 	uint16_t i;
543 	int ret;
544 
545 	qv_map = rte_zmalloc("qv_map",
546 			     nb_rx_queues *
547 			     sizeof(struct virtchnl2_queue_vector), 0);
548 	if (qv_map == NULL) {
549 		DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
550 			nb_rx_queues);
551 		ret = -ENOMEM;
552 		goto qv_map_alloc_err;
553 	}
554 
555 	/* Rx interrupt disabled, Map interrupt only for writeback */
556 
557 	/* The capability flags adapter->caps.other_caps should be
558 	 * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
559 	 * condition should be updated when the FW can return the
560 	 * correct flag bits.
561 	 */
562 	dynctl_reg_start =
563 		vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
564 	itrn_reg_start =
565 		vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
566 	dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
567 	DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
568 	itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
569 	DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
570 	/* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
571 	 * register. WB_ON_ITR and INTENA are mutually exclusive
572 	 * bits. Setting WB_ON_ITR bits means TX and RX Descs
573 	 * are written back based on ITR expiration irrespective
574 	 * of INTENA setting.
575 	 */
576 	/* TBD: need to tune INTERVAL value for better performance. */
577 	itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
578 	dynctl_val = VIRTCHNL2_ITR_IDX_0  <<
579 		     PF_GLINT_DYN_CTL_ITR_INDX_S |
580 		     PF_GLINT_DYN_CTL_WB_ON_ITR_M |
581 		     itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
582 	IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
583 
584 	for (i = 0; i < nb_rx_queues; i++) {
585 		/* map all queues to the same vector */
586 		qv_map[i].queue_id = vport->chunks_info.rx_start_qid + i;
587 		qv_map[i].vector_id =
588 			vport->recv_vectors->vchunks.vchunks->start_vector_id;
589 	}
590 	vport->qv_map = qv_map;
591 
592 	ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
593 	if (ret != 0) {
594 		DRV_LOG(ERR, "config interrupt mapping failed");
595 		goto config_irq_map_err;
596 	}
597 
598 	return 0;
599 
600 config_irq_map_err:
601 	rte_free(vport->qv_map);
602 	vport->qv_map = NULL;
603 
604 qv_map_alloc_err:
605 	return ret;
606 }
607 
608 int
609 idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
610 {
611 	idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, false);
612 
613 	rte_free(vport->qv_map);
614 	vport->qv_map = NULL;
615 
616 	return 0;
617 }
618 
619 int
620 idpf_vport_info_init(struct idpf_vport *vport,
621 			    struct virtchnl2_create_vport *vport_info)
622 {
623 	struct idpf_adapter *adapter = vport->adapter;
624 
625 	vport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
626 	if (adapter->txq_model == 0) {
627 		vport_info->txq_model =
628 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
629 		vport_info->num_tx_q =
630 			rte_cpu_to_le_16(IDPF_DEFAULT_TXQ_NUM);
631 		vport_info->num_tx_complq =
632 			rte_cpu_to_le_16(IDPF_DEFAULT_TXQ_NUM * IDPF_TX_COMPLQ_PER_GRP);
633 	} else {
634 		vport_info->txq_model =
635 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
636 		vport_info->num_tx_q = rte_cpu_to_le_16(IDPF_DEFAULT_TXQ_NUM);
637 		vport_info->num_tx_complq = 0;
638 	}
639 	if (adapter->rxq_model == 0) {
640 		vport_info->rxq_model =
641 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
642 		vport_info->num_rx_q = rte_cpu_to_le_16(IDPF_DEFAULT_RXQ_NUM);
643 		vport_info->num_rx_bufq =
644 			rte_cpu_to_le_16(IDPF_DEFAULT_RXQ_NUM * IDPF_RX_BUFQ_PER_GRP);
645 	} else {
646 		vport_info->rxq_model =
647 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
648 		vport_info->num_rx_q = rte_cpu_to_le_16(IDPF_DEFAULT_RXQ_NUM);
649 		vport_info->num_rx_bufq = 0;
650 	}
651 
652 	return 0;
653 }
654 
655 void
656 idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct virtchnl2_vport_stats *nes)
657 {
658 	nes->rx_bytes = nes->rx_bytes - oes->rx_bytes;
659 	nes->rx_unicast = nes->rx_unicast - oes->rx_unicast;
660 	nes->rx_multicast = nes->rx_multicast - oes->rx_multicast;
661 	nes->rx_broadcast = nes->rx_broadcast - oes->rx_broadcast;
662 	nes->rx_errors = nes->rx_errors - oes->rx_errors;
663 	nes->rx_discards = nes->rx_discards - oes->rx_discards;
664 	nes->tx_bytes = nes->tx_bytes - oes->tx_bytes;
665 	nes->tx_unicast = nes->tx_unicast - oes->tx_unicast;
666 	nes->tx_multicast = nes->tx_multicast - oes->tx_multicast;
667 	nes->tx_broadcast = nes->tx_broadcast - oes->tx_broadcast;
668 	nes->tx_errors = nes->tx_errors - oes->tx_errors;
669 	nes->tx_discards = nes->tx_discards - oes->tx_discards;
670 }
671 
672 RTE_LOG_REGISTER_SUFFIX(idpf_common_logtype, common, NOTICE);
673