xref: /dpdk/drivers/common/idpf/idpf_common_device.c (revision 2df20a1d345a5fc0a1b6dc0317d11fc7b1fda7e7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2023 Intel Corporation
3  */
4 
5 #include <rte_log.h>
6 #include "idpf_common_device.h"
7 #include "idpf_common_virtchnl.h"
8 
9 static void
10 idpf_reset_pf(struct idpf_hw *hw)
11 {
12 	uint32_t reg;
13 
14 	reg = IDPF_READ_REG(hw, PFGEN_CTRL);
15 	IDPF_WRITE_REG(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR));
16 }
17 
18 #define IDPF_RESET_WAIT_CNT 100
19 
20 static int
21 idpf_check_pf_reset_done(struct idpf_hw *hw)
22 {
23 	uint32_t reg;
24 	int i;
25 
26 	for (i = 0; i < IDPF_RESET_WAIT_CNT; i++) {
27 		reg = IDPF_READ_REG(hw, PFGEN_RSTAT);
28 		if (reg != 0xFFFFFFFF && (reg & PFGEN_RSTAT_PFR_STATE_M))
29 			return 0;
30 		rte_delay_ms(1000);
31 	}
32 
33 	DRV_LOG(ERR, "IDPF reset timeout");
34 	return -EBUSY;
35 }
36 
37 static int
38 idpf_check_vf_reset_done(struct idpf_hw *hw)
39 {
40 	uint32_t reg;
41 	int i;
42 
43 	for (i = 0; i < IDPF_RESET_WAIT_CNT; i++) {
44 		reg = IDPF_READ_REG(hw, VFGEN_RSTAT);
45 		if (reg != 0xFFFFFFFF && (reg & VFGEN_RSTAT_VFR_STATE_M))
46 			return 0;
47 		rte_delay_ms(1000);
48 	}
49 
50 	DRV_LOG(ERR, "VF reset timeout");
51 	return -EBUSY;
52 }
53 
54 #define IDPF_CTLQ_NUM 2
55 
56 struct idpf_ctlq_create_info pf_ctlq_info[IDPF_CTLQ_NUM] = {
57 	{
58 		.type = IDPF_CTLQ_TYPE_MAILBOX_TX,
59 		.id = IDPF_CTLQ_ID,
60 		.len = IDPF_CTLQ_LEN,
61 		.buf_size = IDPF_DFLT_MBX_BUF_SIZE,
62 		.reg = {
63 			.head = PF_FW_ATQH,
64 			.tail = PF_FW_ATQT,
65 			.len = PF_FW_ATQLEN,
66 			.bah = PF_FW_ATQBAH,
67 			.bal = PF_FW_ATQBAL,
68 			.len_mask = PF_FW_ATQLEN_ATQLEN_M,
69 			.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M,
70 			.head_mask = PF_FW_ATQH_ATQH_M,
71 		}
72 	},
73 	{
74 		.type = IDPF_CTLQ_TYPE_MAILBOX_RX,
75 		.id = IDPF_CTLQ_ID,
76 		.len = IDPF_CTLQ_LEN,
77 		.buf_size = IDPF_DFLT_MBX_BUF_SIZE,
78 		.reg = {
79 			.head = PF_FW_ARQH,
80 			.tail = PF_FW_ARQT,
81 			.len = PF_FW_ARQLEN,
82 			.bah = PF_FW_ARQBAH,
83 			.bal = PF_FW_ARQBAL,
84 			.len_mask = PF_FW_ARQLEN_ARQLEN_M,
85 			.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M,
86 			.head_mask = PF_FW_ARQH_ARQH_M,
87 		}
88 	}
89 };
90 
91 struct idpf_ctlq_create_info vf_ctlq_info[IDPF_CTLQ_NUM] = {
92 	{
93 		.type = IDPF_CTLQ_TYPE_MAILBOX_TX,
94 		.id = IDPF_CTLQ_ID,
95 		.len = IDPF_CTLQ_LEN,
96 		.buf_size = IDPF_DFLT_MBX_BUF_SIZE,
97 		.reg = {
98 			.head = VF_ATQH,
99 			.tail = VF_ATQT,
100 			.len = VF_ATQLEN,
101 			.bah = VF_ATQBAH,
102 			.bal = VF_ATQBAL,
103 			.len_mask = VF_ATQLEN_ATQLEN_M,
104 			.len_ena_mask = VF_ATQLEN_ATQENABLE_M,
105 			.head_mask = VF_ATQH_ATQH_M,
106 		}
107 	},
108 	{
109 		.type = IDPF_CTLQ_TYPE_MAILBOX_RX,
110 		.id = IDPF_CTLQ_ID,
111 		.len = IDPF_CTLQ_LEN,
112 		.buf_size = IDPF_DFLT_MBX_BUF_SIZE,
113 		.reg = {
114 			.head = VF_ARQH,
115 			.tail = VF_ARQT,
116 			.len = VF_ARQLEN,
117 			.bah = VF_ARQBAH,
118 			.bal = VF_ARQBAL,
119 			.len_mask = VF_ARQLEN_ARQLEN_M,
120 			.len_ena_mask = VF_ARQLEN_ARQENABLE_M,
121 			.head_mask = VF_ARQH_ARQH_M,
122 		}
123 	}
124 };
125 
126 static int
127 idpf_init_mbx(struct idpf_hw *hw)
128 {
129 	struct idpf_ctlq_info *ctlq;
130 	int ret = 0;
131 
132 	if (hw->device_id == IDPF_DEV_ID_SRIOV)
133 		ret = idpf_ctlq_init(hw, IDPF_CTLQ_NUM, vf_ctlq_info);
134 	else
135 		ret = idpf_ctlq_init(hw, IDPF_CTLQ_NUM, pf_ctlq_info);
136 	if (ret != 0)
137 		return ret;
138 
139 	LIST_FOR_EACH_ENTRY_SAFE(ctlq, NULL, &hw->cq_list_head,
140 				 struct idpf_ctlq_info, cq_list) {
141 		if (ctlq->q_id == IDPF_CTLQ_ID &&
142 		    ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
143 			hw->asq = ctlq;
144 		if (ctlq->q_id == IDPF_CTLQ_ID &&
145 		    ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX)
146 			hw->arq = ctlq;
147 	}
148 
149 	if (hw->asq == NULL || hw->arq == NULL) {
150 		idpf_ctlq_deinit(hw);
151 		ret = -ENOENT;
152 	}
153 
154 	return ret;
155 }
156 
157 static int
158 idpf_get_pkt_type(struct idpf_adapter *adapter)
159 {
160 	struct virtchnl2_get_ptype_info *ptype_info;
161 	uint16_t ptype_offset, i, j;
162 	uint16_t ptype_recvd = 0;
163 	int ret;
164 
165 	ret = idpf_vc_ptype_info_query(adapter);
166 	if (ret != 0) {
167 		DRV_LOG(ERR, "Fail to query packet type information");
168 		return ret;
169 	}
170 
171 	ptype_info = rte_zmalloc("ptype_info", IDPF_DFLT_MBX_BUF_SIZE, 0);
172 		if (ptype_info == NULL)
173 			return -ENOMEM;
174 
175 	while (ptype_recvd < IDPF_MAX_PKT_TYPE) {
176 		ret = idpf_vc_one_msg_read(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
177 					   IDPF_DFLT_MBX_BUF_SIZE, (uint8_t *)ptype_info);
178 		if (ret != 0) {
179 			DRV_LOG(ERR, "Fail to get packet type information");
180 			goto free_ptype_info;
181 		}
182 
183 		ptype_recvd += ptype_info->num_ptypes;
184 		ptype_offset = sizeof(struct virtchnl2_get_ptype_info) -
185 						sizeof(struct virtchnl2_ptype);
186 
187 		for (i = 0; i < rte_cpu_to_le_16(ptype_info->num_ptypes); i++) {
188 			bool is_inner = false, is_ip = false;
189 			struct virtchnl2_ptype *ptype;
190 			uint32_t proto_hdr = 0;
191 
192 			ptype = (struct virtchnl2_ptype *)
193 					((uint8_t *)ptype_info + ptype_offset);
194 			ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
195 			if (ptype_offset > IDPF_DFLT_MBX_BUF_SIZE) {
196 				ret = -EINVAL;
197 				goto free_ptype_info;
198 			}
199 
200 			if (rte_cpu_to_le_16(ptype->ptype_id_10) == 0xFFFF)
201 				goto free_ptype_info;
202 
203 			for (j = 0; j < ptype->proto_id_count; j++) {
204 				switch (rte_cpu_to_le_16(ptype->proto_id[j])) {
205 				case VIRTCHNL2_PROTO_HDR_GRE:
206 				case VIRTCHNL2_PROTO_HDR_VXLAN:
207 					proto_hdr &= ~RTE_PTYPE_L4_MASK;
208 					proto_hdr |= RTE_PTYPE_TUNNEL_GRENAT;
209 					is_inner = true;
210 					break;
211 				case VIRTCHNL2_PROTO_HDR_MAC:
212 					if (is_inner) {
213 						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
214 						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER;
215 					} else {
216 						proto_hdr &= ~RTE_PTYPE_L2_MASK;
217 						proto_hdr |= RTE_PTYPE_L2_ETHER;
218 					}
219 					break;
220 				case VIRTCHNL2_PROTO_HDR_VLAN:
221 					if (is_inner) {
222 						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
223 						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER_VLAN;
224 					}
225 					break;
226 				case VIRTCHNL2_PROTO_HDR_PTP:
227 					proto_hdr &= ~RTE_PTYPE_L2_MASK;
228 					proto_hdr |= RTE_PTYPE_L2_ETHER_TIMESYNC;
229 					break;
230 				case VIRTCHNL2_PROTO_HDR_LLDP:
231 					proto_hdr &= ~RTE_PTYPE_L2_MASK;
232 					proto_hdr |= RTE_PTYPE_L2_ETHER_LLDP;
233 					break;
234 				case VIRTCHNL2_PROTO_HDR_ARP:
235 					proto_hdr &= ~RTE_PTYPE_L2_MASK;
236 					proto_hdr |= RTE_PTYPE_L2_ETHER_ARP;
237 					break;
238 				case VIRTCHNL2_PROTO_HDR_PPPOE:
239 					proto_hdr &= ~RTE_PTYPE_L2_MASK;
240 					proto_hdr |= RTE_PTYPE_L2_ETHER_PPPOE;
241 					break;
242 				case VIRTCHNL2_PROTO_HDR_IPV4:
243 					if (!is_ip) {
244 						proto_hdr |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
245 						is_ip = true;
246 					} else {
247 						proto_hdr |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
248 							     RTE_PTYPE_TUNNEL_IP;
249 						is_inner = true;
250 					}
251 						break;
252 				case VIRTCHNL2_PROTO_HDR_IPV6:
253 					if (!is_ip) {
254 						proto_hdr |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
255 						is_ip = true;
256 					} else {
257 						proto_hdr |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
258 							     RTE_PTYPE_TUNNEL_IP;
259 						is_inner = true;
260 					}
261 					break;
262 				case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
263 				case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
264 					if (is_inner)
265 						proto_hdr |= RTE_PTYPE_INNER_L4_FRAG;
266 					else
267 						proto_hdr |= RTE_PTYPE_L4_FRAG;
268 					break;
269 				case VIRTCHNL2_PROTO_HDR_UDP:
270 					if (is_inner)
271 						proto_hdr |= RTE_PTYPE_INNER_L4_UDP;
272 					else
273 						proto_hdr |= RTE_PTYPE_L4_UDP;
274 					break;
275 				case VIRTCHNL2_PROTO_HDR_TCP:
276 					if (is_inner)
277 						proto_hdr |= RTE_PTYPE_INNER_L4_TCP;
278 					else
279 						proto_hdr |= RTE_PTYPE_L4_TCP;
280 					break;
281 				case VIRTCHNL2_PROTO_HDR_SCTP:
282 					if (is_inner)
283 						proto_hdr |= RTE_PTYPE_INNER_L4_SCTP;
284 					else
285 						proto_hdr |= RTE_PTYPE_L4_SCTP;
286 					break;
287 				case VIRTCHNL2_PROTO_HDR_ICMP:
288 					if (is_inner)
289 						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
290 					else
291 						proto_hdr |= RTE_PTYPE_L4_ICMP;
292 					break;
293 				case VIRTCHNL2_PROTO_HDR_ICMPV6:
294 					if (is_inner)
295 						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
296 					else
297 						proto_hdr |= RTE_PTYPE_L4_ICMP;
298 					break;
299 				case VIRTCHNL2_PROTO_HDR_L2TPV2:
300 				case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
301 				case VIRTCHNL2_PROTO_HDR_L2TPV3:
302 					is_inner = true;
303 					proto_hdr |= RTE_PTYPE_TUNNEL_L2TP;
304 					break;
305 				case VIRTCHNL2_PROTO_HDR_NVGRE:
306 					is_inner = true;
307 					proto_hdr |= RTE_PTYPE_TUNNEL_NVGRE;
308 					break;
309 				case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
310 					is_inner = true;
311 					proto_hdr |= RTE_PTYPE_TUNNEL_GTPC;
312 					break;
313 				case VIRTCHNL2_PROTO_HDR_GTPU:
314 				case VIRTCHNL2_PROTO_HDR_GTPU_UL:
315 				case VIRTCHNL2_PROTO_HDR_GTPU_DL:
316 					is_inner = true;
317 					proto_hdr |= RTE_PTYPE_TUNNEL_GTPU;
318 					break;
319 				case VIRTCHNL2_PROTO_HDR_PAY:
320 				case VIRTCHNL2_PROTO_HDR_IPV6_EH:
321 				case VIRTCHNL2_PROTO_HDR_PRE_MAC:
322 				case VIRTCHNL2_PROTO_HDR_POST_MAC:
323 				case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
324 				case VIRTCHNL2_PROTO_HDR_SVLAN:
325 				case VIRTCHNL2_PROTO_HDR_CVLAN:
326 				case VIRTCHNL2_PROTO_HDR_MPLS:
327 				case VIRTCHNL2_PROTO_HDR_MMPLS:
328 				case VIRTCHNL2_PROTO_HDR_CTRL:
329 				case VIRTCHNL2_PROTO_HDR_ECP:
330 				case VIRTCHNL2_PROTO_HDR_EAPOL:
331 				case VIRTCHNL2_PROTO_HDR_PPPOD:
332 				case VIRTCHNL2_PROTO_HDR_IGMP:
333 				case VIRTCHNL2_PROTO_HDR_AH:
334 				case VIRTCHNL2_PROTO_HDR_ESP:
335 				case VIRTCHNL2_PROTO_HDR_IKE:
336 				case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
337 				case VIRTCHNL2_PROTO_HDR_GTP:
338 				case VIRTCHNL2_PROTO_HDR_GTP_EH:
339 				case VIRTCHNL2_PROTO_HDR_GTPCV2:
340 				case VIRTCHNL2_PROTO_HDR_ECPRI:
341 				case VIRTCHNL2_PROTO_HDR_VRRP:
342 				case VIRTCHNL2_PROTO_HDR_OSPF:
343 				case VIRTCHNL2_PROTO_HDR_TUN:
344 				case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
345 				case VIRTCHNL2_PROTO_HDR_GENEVE:
346 				case VIRTCHNL2_PROTO_HDR_NSH:
347 				case VIRTCHNL2_PROTO_HDR_QUIC:
348 				case VIRTCHNL2_PROTO_HDR_PFCP:
349 				case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
350 				case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
351 				case VIRTCHNL2_PROTO_HDR_RTP:
352 				case VIRTCHNL2_PROTO_HDR_NO_PROTO:
353 				default:
354 					continue;
355 				}
356 				adapter->ptype_tbl[ptype->ptype_id_10] = proto_hdr;
357 			}
358 		}
359 	}
360 
361 free_ptype_info:
362 	rte_free(ptype_info);
363 	clear_cmd(adapter);
364 	return ret;
365 }
366 
367 int
368 idpf_adapter_init(struct idpf_adapter *adapter)
369 {
370 	struct idpf_hw *hw = &adapter->hw;
371 	int ret;
372 
373 	if (hw->device_id == IDPF_DEV_ID_SRIOV) {
374 		ret = idpf_check_vf_reset_done(hw);
375 	} else {
376 		idpf_reset_pf(hw);
377 		ret = idpf_check_pf_reset_done(hw);
378 	}
379 	if (ret != 0) {
380 		DRV_LOG(ERR, "IDPF is still resetting");
381 		goto err_check_reset;
382 	}
383 
384 	ret = idpf_init_mbx(hw);
385 	if (ret != 0) {
386 		DRV_LOG(ERR, "Failed to init mailbox");
387 		goto err_check_reset;
388 	}
389 
390 	adapter->mbx_resp = rte_zmalloc("idpf_adapter_mbx_resp",
391 					IDPF_DFLT_MBX_BUF_SIZE, 0);
392 	if (adapter->mbx_resp == NULL) {
393 		DRV_LOG(ERR, "Failed to allocate idpf_adapter_mbx_resp memory");
394 		ret = -ENOMEM;
395 		goto err_mbx_resp;
396 	}
397 
398 	ret = idpf_vc_api_version_check(adapter);
399 	if (ret != 0) {
400 		DRV_LOG(ERR, "Failed to check api version");
401 		goto err_check_api;
402 	}
403 
404 	ret = idpf_vc_caps_get(adapter);
405 	if (ret != 0) {
406 		DRV_LOG(ERR, "Failed to get capabilities");
407 		goto err_check_api;
408 	}
409 
410 	ret = idpf_get_pkt_type(adapter);
411 	if (ret != 0) {
412 		DRV_LOG(ERR, "Failed to set ptype table");
413 		goto err_check_api;
414 	}
415 
416 	return 0;
417 
418 err_check_api:
419 	rte_free(adapter->mbx_resp);
420 	adapter->mbx_resp = NULL;
421 err_mbx_resp:
422 	idpf_ctlq_deinit(hw);
423 err_check_reset:
424 	return ret;
425 }
426 
427 int
428 idpf_adapter_deinit(struct idpf_adapter *adapter)
429 {
430 	struct idpf_hw *hw = &adapter->hw;
431 
432 	idpf_ctlq_deinit(hw);
433 	rte_free(adapter->mbx_resp);
434 	adapter->mbx_resp = NULL;
435 
436 	return 0;
437 }
438 
439 int
440 idpf_vport_init(struct idpf_vport *vport,
441 		struct virtchnl2_create_vport *create_vport_info,
442 		void *dev_data)
443 {
444 	struct virtchnl2_create_vport *vport_info;
445 	int i, type, ret;
446 
447 	ret = idpf_vc_vport_create(vport, create_vport_info);
448 	if (ret != 0) {
449 		DRV_LOG(ERR, "Failed to create vport.");
450 		goto err_create_vport;
451 	}
452 
453 	vport_info = &(vport->vport_info.info);
454 	vport->vport_id = vport_info->vport_id;
455 	vport->txq_model = vport_info->txq_model;
456 	vport->rxq_model = vport_info->rxq_model;
457 	vport->num_tx_q = vport_info->num_tx_q;
458 	vport->num_tx_complq = vport_info->num_tx_complq;
459 	vport->num_rx_q = vport_info->num_rx_q;
460 	vport->num_rx_bufq = vport_info->num_rx_bufq;
461 	vport->max_mtu = vport_info->max_mtu;
462 	rte_memcpy(vport->default_mac_addr,
463 		   vport_info->default_mac_addr, ETH_ALEN);
464 	vport->rss_algorithm = vport_info->rss_algorithm;
465 	vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,
466 				      vport_info->rss_key_size);
467 	vport->rss_lut_size = vport_info->rss_lut_size;
468 
469 	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
470 		type = vport_info->chunks.chunks[i].type;
471 		switch (type) {
472 		case VIRTCHNL2_QUEUE_TYPE_TX:
473 			vport->chunks_info.tx_start_qid =
474 				vport_info->chunks.chunks[i].start_queue_id;
475 			vport->chunks_info.tx_qtail_start =
476 				vport_info->chunks.chunks[i].qtail_reg_start;
477 			vport->chunks_info.tx_qtail_spacing =
478 				vport_info->chunks.chunks[i].qtail_reg_spacing;
479 			break;
480 		case VIRTCHNL2_QUEUE_TYPE_RX:
481 			vport->chunks_info.rx_start_qid =
482 				vport_info->chunks.chunks[i].start_queue_id;
483 			vport->chunks_info.rx_qtail_start =
484 				vport_info->chunks.chunks[i].qtail_reg_start;
485 			vport->chunks_info.rx_qtail_spacing =
486 				vport_info->chunks.chunks[i].qtail_reg_spacing;
487 			break;
488 		case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
489 			vport->chunks_info.tx_compl_start_qid =
490 				vport_info->chunks.chunks[i].start_queue_id;
491 			vport->chunks_info.tx_compl_qtail_start =
492 				vport_info->chunks.chunks[i].qtail_reg_start;
493 			vport->chunks_info.tx_compl_qtail_spacing =
494 				vport_info->chunks.chunks[i].qtail_reg_spacing;
495 			break;
496 		case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
497 			vport->chunks_info.rx_buf_start_qid =
498 				vport_info->chunks.chunks[i].start_queue_id;
499 			vport->chunks_info.rx_buf_qtail_start =
500 				vport_info->chunks.chunks[i].qtail_reg_start;
501 			vport->chunks_info.rx_buf_qtail_spacing =
502 				vport_info->chunks.chunks[i].qtail_reg_spacing;
503 			break;
504 		default:
505 			DRV_LOG(ERR, "Unsupported queue type");
506 			break;
507 		}
508 	}
509 
510 	vport->dev_data = dev_data;
511 
512 	vport->rss_key = rte_zmalloc("rss_key",
513 				     vport->rss_key_size, 0);
514 	if (vport->rss_key == NULL) {
515 		DRV_LOG(ERR, "Failed to allocate RSS key");
516 		ret = -ENOMEM;
517 		goto err_rss_key;
518 	}
519 
520 	vport->rss_lut = rte_zmalloc("rss_lut",
521 				     sizeof(uint32_t) * vport->rss_lut_size, 0);
522 	if (vport->rss_lut == NULL) {
523 		DRV_LOG(ERR, "Failed to allocate RSS lut");
524 		ret = -ENOMEM;
525 		goto err_rss_lut;
526 	}
527 
528 	/* recv_vectors is used for VIRTCHNL2_OP_ALLOC_VECTORS response,
529 	 * reserve maximum size for it now, may need optimization in future.
530 	 */
531 	vport->recv_vectors = rte_zmalloc("recv_vectors", IDPF_DFLT_MBX_BUF_SIZE, 0);
532 	if (vport->recv_vectors == NULL) {
533 		DRV_LOG(ERR, "Failed to allocate recv_vectors");
534 		ret = -ENOMEM;
535 		goto err_recv_vec;
536 	}
537 
538 	return 0;
539 
540 err_recv_vec:
541 	rte_free(vport->rss_lut);
542 	vport->rss_lut = NULL;
543 err_rss_lut:
544 	vport->dev_data = NULL;
545 	rte_free(vport->rss_key);
546 	vport->rss_key = NULL;
547 err_rss_key:
548 	idpf_vc_vport_destroy(vport);
549 err_create_vport:
550 	return ret;
551 }
552 int
553 idpf_vport_deinit(struct idpf_vport *vport)
554 {
555 	rte_free(vport->recv_vectors);
556 	vport->recv_vectors = NULL;
557 	rte_free(vport->rss_lut);
558 	vport->rss_lut = NULL;
559 
560 	rte_free(vport->rss_key);
561 	vport->rss_key = NULL;
562 
563 	vport->dev_data = NULL;
564 
565 	idpf_vc_vport_destroy(vport);
566 
567 	return 0;
568 }
569 int
570 idpf_vport_rss_config(struct idpf_vport *vport)
571 {
572 	int ret;
573 
574 	ret = idpf_vc_rss_key_set(vport);
575 	if (ret != 0) {
576 		DRV_LOG(ERR, "Failed to configure RSS key");
577 		return ret;
578 	}
579 
580 	ret = idpf_vc_rss_lut_set(vport);
581 	if (ret != 0) {
582 		DRV_LOG(ERR, "Failed to configure RSS lut");
583 		return ret;
584 	}
585 
586 	ret = idpf_vc_rss_hash_set(vport);
587 	if (ret != 0) {
588 		DRV_LOG(ERR, "Failed to configure RSS hash");
589 		return ret;
590 	}
591 
592 	return ret;
593 }
594 
595 int
596 idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
597 {
598 	struct idpf_adapter *adapter = vport->adapter;
599 	struct virtchnl2_queue_vector *qv_map;
600 	struct idpf_hw *hw = &adapter->hw;
601 	uint32_t dynctl_val, itrn_val;
602 	uint32_t dynctl_reg_start;
603 	uint32_t itrn_reg_start;
604 	uint16_t i;
605 	int ret;
606 
607 	qv_map = rte_zmalloc("qv_map",
608 			     nb_rx_queues *
609 			     sizeof(struct virtchnl2_queue_vector), 0);
610 	if (qv_map == NULL) {
611 		DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
612 			nb_rx_queues);
613 		ret = -ENOMEM;
614 		goto qv_map_alloc_err;
615 	}
616 
617 	/* Rx interrupt disabled, Map interrupt only for writeback */
618 
619 	/* The capability flags adapter->caps.other_caps should be
620 	 * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
621 	 * condition should be updated when the FW can return the
622 	 * correct flag bits.
623 	 */
624 	dynctl_reg_start =
625 		vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
626 	itrn_reg_start =
627 		vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
628 	dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
629 	DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
630 	itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
631 	DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
632 	/* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
633 	 * register. WB_ON_ITR and INTENA are mutually exclusive
634 	 * bits. Setting WB_ON_ITR bits means TX and RX Descs
635 	 * are written back based on ITR expiration irrespective
636 	 * of INTENA setting.
637 	 */
638 	/* TBD: need to tune INTERVAL value for better performance. */
639 	itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
640 	dynctl_val = VIRTCHNL2_ITR_IDX_0  <<
641 		     PF_GLINT_DYN_CTL_ITR_INDX_S |
642 		     PF_GLINT_DYN_CTL_WB_ON_ITR_M |
643 		     itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
644 	IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
645 
646 	for (i = 0; i < nb_rx_queues; i++) {
647 		/* map all queues to the same vector */
648 		qv_map[i].queue_id = vport->chunks_info.rx_start_qid + i;
649 		qv_map[i].vector_id =
650 			vport->recv_vectors->vchunks.vchunks->start_vector_id;
651 	}
652 	vport->qv_map = qv_map;
653 
654 	ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
655 	if (ret != 0) {
656 		DRV_LOG(ERR, "config interrupt mapping failed");
657 		goto config_irq_map_err;
658 	}
659 
660 	return 0;
661 
662 config_irq_map_err:
663 	rte_free(vport->qv_map);
664 	vport->qv_map = NULL;
665 
666 qv_map_alloc_err:
667 	return ret;
668 }
669 
670 int
671 idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport, uint32_t *qids, uint16_t nb_rx_queues)
672 {
673 	struct idpf_adapter *adapter = vport->adapter;
674 	struct virtchnl2_queue_vector *qv_map;
675 	struct idpf_hw *hw = &adapter->hw;
676 	uint32_t dynctl_val, itrn_val;
677 	uint32_t dynctl_reg_start;
678 	uint32_t itrn_reg_start;
679 	uint16_t i;
680 	int ret;
681 
682 	qv_map = rte_zmalloc("qv_map",
683 			     nb_rx_queues *
684 			     sizeof(struct virtchnl2_queue_vector), 0);
685 	if (qv_map == NULL) {
686 		DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
687 			nb_rx_queues);
688 		ret = -ENOMEM;
689 		goto qv_map_alloc_err;
690 	}
691 
692 	/* Rx interrupt disabled, Map interrupt only for writeback */
693 
694 	/* The capability flags adapter->caps.other_caps should be
695 	 * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
696 	 * condition should be updated when the FW can return the
697 	 * correct flag bits.
698 	 */
699 	dynctl_reg_start =
700 		vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
701 	itrn_reg_start =
702 		vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
703 	dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
704 	DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
705 	itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
706 	DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
707 	/* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
708 	 * register. WB_ON_ITR and INTENA are mutually exclusive
709 	 * bits. Setting WB_ON_ITR bits means TX and RX Descs
710 	 * are written back based on ITR expiration irrespective
711 	 * of INTENA setting.
712 	 */
713 	/* TBD: need to tune INTERVAL value for better performance. */
714 	itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
715 	dynctl_val = VIRTCHNL2_ITR_IDX_0  <<
716 		     PF_GLINT_DYN_CTL_ITR_INDX_S |
717 		     PF_GLINT_DYN_CTL_WB_ON_ITR_M |
718 		     itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
719 	IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
720 
721 	for (i = 0; i < nb_rx_queues; i++) {
722 		/* map all queues to the same vector */
723 		qv_map[i].queue_id = qids[i];
724 		qv_map[i].vector_id =
725 			vport->recv_vectors->vchunks.vchunks->start_vector_id;
726 	}
727 	vport->qv_map = qv_map;
728 
729 	ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
730 	if (ret != 0) {
731 		DRV_LOG(ERR, "config interrupt mapping failed");
732 		goto config_irq_map_err;
733 	}
734 
735 	return 0;
736 
737 config_irq_map_err:
738 	rte_free(vport->qv_map);
739 	vport->qv_map = NULL;
740 
741 qv_map_alloc_err:
742 	return ret;
743 }
744 
745 int
746 idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
747 {
748 	idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, false);
749 
750 	rte_free(vport->qv_map);
751 	vport->qv_map = NULL;
752 
753 	return 0;
754 }
755 
756 int
757 idpf_vport_info_init(struct idpf_vport *vport,
758 			    struct virtchnl2_create_vport *vport_info)
759 {
760 	struct idpf_adapter *adapter = vport->adapter;
761 
762 	vport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
763 	if (!adapter->is_tx_singleq) {
764 		vport_info->txq_model =
765 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
766 		vport_info->num_tx_q =
767 			rte_cpu_to_le_16(IDPF_DEFAULT_TXQ_NUM);
768 		vport_info->num_tx_complq =
769 			rte_cpu_to_le_16(IDPF_DEFAULT_TXQ_NUM * IDPF_TX_COMPLQ_PER_GRP);
770 	} else {
771 		vport_info->txq_model =
772 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
773 		vport_info->num_tx_q = rte_cpu_to_le_16(IDPF_DEFAULT_TXQ_NUM);
774 		vport_info->num_tx_complq = 0;
775 	}
776 	if (!adapter->is_rx_singleq) {
777 		vport_info->rxq_model =
778 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
779 		vport_info->num_rx_q = rte_cpu_to_le_16(IDPF_DEFAULT_RXQ_NUM);
780 		vport_info->num_rx_bufq =
781 			rte_cpu_to_le_16(IDPF_DEFAULT_RXQ_NUM * IDPF_RX_BUFQ_PER_GRP);
782 	} else {
783 		vport_info->rxq_model =
784 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
785 		vport_info->num_rx_q = rte_cpu_to_le_16(IDPF_DEFAULT_RXQ_NUM);
786 		vport_info->num_rx_bufq = 0;
787 	}
788 
789 	return 0;
790 }
791 
792 void
793 idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct virtchnl2_vport_stats *nes)
794 {
795 	nes->rx_bytes = nes->rx_bytes - oes->rx_bytes;
796 	nes->rx_unicast = nes->rx_unicast - oes->rx_unicast;
797 	nes->rx_multicast = nes->rx_multicast - oes->rx_multicast;
798 	nes->rx_broadcast = nes->rx_broadcast - oes->rx_broadcast;
799 	nes->rx_errors = nes->rx_errors - oes->rx_errors;
800 	nes->rx_discards = nes->rx_discards - oes->rx_discards;
801 	nes->tx_bytes = nes->tx_bytes - oes->tx_bytes;
802 	nes->tx_unicast = nes->tx_unicast - oes->tx_unicast;
803 	nes->tx_multicast = nes->tx_multicast - oes->tx_multicast;
804 	nes->tx_broadcast = nes->tx_broadcast - oes->tx_broadcast;
805 	nes->tx_errors = nes->tx_errors - oes->tx_errors;
806 	nes->tx_discards = nes->tx_discards - oes->tx_discards;
807 }
808 
809 RTE_LOG_REGISTER_SUFFIX(idpf_common_logtype, common, NOTICE);
810