xref: /dpdk/drivers/common/idpf/idpf_common_device.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2023 Intel Corporation
3  */
4 
5 #include <rte_log.h>
6 #include "idpf_common_device.h"
7 #include "idpf_common_virtchnl.h"
8 
9 static void
10 idpf_reset_pf(struct idpf_hw *hw)
11 {
12 	uint32_t reg;
13 
14 	reg = IDPF_READ_REG(hw, PFGEN_CTRL);
15 	IDPF_WRITE_REG(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR));
16 }
17 
18 #define IDPF_RESET_WAIT_CNT 100
19 
20 static int
21 idpf_check_pf_reset_done(struct idpf_hw *hw)
22 {
23 	uint32_t reg;
24 	int i;
25 
26 	for (i = 0; i < IDPF_RESET_WAIT_CNT; i++) {
27 		reg = IDPF_READ_REG(hw, PFGEN_RSTAT);
28 		if (reg != 0xFFFFFFFF && (reg & PFGEN_RSTAT_PFR_STATE_M))
29 			return 0;
30 		rte_delay_ms(1000);
31 	}
32 
33 	DRV_LOG(ERR, "IDPF reset timeout");
34 	return -EBUSY;
35 }
36 
37 static int
38 idpf_check_vf_reset_done(struct idpf_hw *hw)
39 {
40 	uint32_t reg;
41 	int i;
42 
43 	for (i = 0; i < IDPF_RESET_WAIT_CNT; i++) {
44 		reg = IDPF_READ_REG(hw, VFGEN_RSTAT);
45 		if (reg != 0xFFFFFFFF && (reg & VFGEN_RSTAT_VFR_STATE_M))
46 			return 0;
47 		rte_delay_ms(1000);
48 	}
49 
50 	DRV_LOG(ERR, "VF reset timeout");
51 	return -EBUSY;
52 }
53 
54 #define IDPF_CTLQ_NUM 2
55 
56 struct idpf_ctlq_create_info pf_ctlq_info[IDPF_CTLQ_NUM] = {
57 	{
58 		.type = IDPF_CTLQ_TYPE_MAILBOX_TX,
59 		.id = IDPF_CTLQ_ID,
60 		.len = IDPF_CTLQ_LEN,
61 		.buf_size = IDPF_DFLT_MBX_BUF_SIZE,
62 		.reg = {
63 			.head = PF_FW_ATQH,
64 			.tail = PF_FW_ATQT,
65 			.len = PF_FW_ATQLEN,
66 			.bah = PF_FW_ATQBAH,
67 			.bal = PF_FW_ATQBAL,
68 			.len_mask = PF_FW_ATQLEN_ATQLEN_M,
69 			.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M,
70 			.head_mask = PF_FW_ATQH_ATQH_M,
71 		}
72 	},
73 	{
74 		.type = IDPF_CTLQ_TYPE_MAILBOX_RX,
75 		.id = IDPF_CTLQ_ID,
76 		.len = IDPF_CTLQ_LEN,
77 		.buf_size = IDPF_DFLT_MBX_BUF_SIZE,
78 		.reg = {
79 			.head = PF_FW_ARQH,
80 			.tail = PF_FW_ARQT,
81 			.len = PF_FW_ARQLEN,
82 			.bah = PF_FW_ARQBAH,
83 			.bal = PF_FW_ARQBAL,
84 			.len_mask = PF_FW_ARQLEN_ARQLEN_M,
85 			.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M,
86 			.head_mask = PF_FW_ARQH_ARQH_M,
87 		}
88 	}
89 };
90 
91 struct idpf_ctlq_create_info vf_ctlq_info[IDPF_CTLQ_NUM] = {
92 	{
93 		.type = IDPF_CTLQ_TYPE_MAILBOX_TX,
94 		.id = IDPF_CTLQ_ID,
95 		.len = IDPF_CTLQ_LEN,
96 		.buf_size = IDPF_DFLT_MBX_BUF_SIZE,
97 		.reg = {
98 			.head = VF_ATQH,
99 			.tail = VF_ATQT,
100 			.len = VF_ATQLEN,
101 			.bah = VF_ATQBAH,
102 			.bal = VF_ATQBAL,
103 			.len_mask = VF_ATQLEN_ATQLEN_M,
104 			.len_ena_mask = VF_ATQLEN_ATQENABLE_M,
105 			.head_mask = VF_ATQH_ATQH_M,
106 		}
107 	},
108 	{
109 		.type = IDPF_CTLQ_TYPE_MAILBOX_RX,
110 		.id = IDPF_CTLQ_ID,
111 		.len = IDPF_CTLQ_LEN,
112 		.buf_size = IDPF_DFLT_MBX_BUF_SIZE,
113 		.reg = {
114 			.head = VF_ARQH,
115 			.tail = VF_ARQT,
116 			.len = VF_ARQLEN,
117 			.bah = VF_ARQBAH,
118 			.bal = VF_ARQBAL,
119 			.len_mask = VF_ARQLEN_ARQLEN_M,
120 			.len_ena_mask = VF_ARQLEN_ARQENABLE_M,
121 			.head_mask = VF_ARQH_ARQH_M,
122 		}
123 	}
124 };
125 
126 static int
127 idpf_init_mbx(struct idpf_hw *hw)
128 {
129 	struct idpf_ctlq_info *ctlq;
130 	int ret = 0;
131 
132 	if (hw->device_id == IDPF_DEV_ID_SRIOV)
133 		ret = idpf_ctlq_init(hw, IDPF_CTLQ_NUM, vf_ctlq_info);
134 	else
135 		ret = idpf_ctlq_init(hw, IDPF_CTLQ_NUM, pf_ctlq_info);
136 	if (ret != 0)
137 		return ret;
138 
139 	LIST_FOR_EACH_ENTRY_SAFE(ctlq, NULL, &hw->cq_list_head,
140 				 struct idpf_ctlq_info, cq_list) {
141 		if (ctlq->q_id == IDPF_CTLQ_ID &&
142 		    ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
143 			hw->asq = ctlq;
144 		if (ctlq->q_id == IDPF_CTLQ_ID &&
145 		    ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX)
146 			hw->arq = ctlq;
147 	}
148 
149 	if (hw->asq == NULL || hw->arq == NULL) {
150 		idpf_ctlq_deinit(hw);
151 		ret = -ENOENT;
152 	}
153 
154 	return ret;
155 }
156 
157 static int
158 idpf_get_pkt_type(struct idpf_adapter *adapter)
159 {
160 	struct virtchnl2_get_ptype_info *req_ptype_info;
161 	struct virtchnl2_get_ptype_info *recv_ptype_info;
162 	uint16_t recv_num_ptypes = 0;
163 	uint16_t ptype_offset, i, j;
164 	uint16_t start_ptype_id = 0;
165 	int ret;
166 
167 	req_ptype_info = rte_zmalloc("req_ptype_info", IDPF_DFLT_MBX_BUF_SIZE, 0);
168 	if (req_ptype_info == NULL)
169 		return -ENOMEM;
170 
171 	recv_ptype_info = rte_zmalloc("recv_ptype_info", IDPF_DFLT_MBX_BUF_SIZE, 0);
172 	if (recv_ptype_info == NULL) {
173 		ret = -ENOMEM;
174 		goto free_req_ptype_info;
175 	}
176 
177 	while (start_ptype_id < IDPF_MAX_PKT_TYPE) {
178 		memset(req_ptype_info, 0, sizeof(*req_ptype_info));
179 		memset(recv_ptype_info, 0, sizeof(*recv_ptype_info));
180 
181 		if ((start_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > IDPF_MAX_PKT_TYPE)
182 			req_ptype_info->num_ptypes =
183 				rte_cpu_to_le_16(IDPF_MAX_PKT_TYPE - start_ptype_id);
184 		else
185 			req_ptype_info->num_ptypes = rte_cpu_to_le_16(IDPF_RX_MAX_PTYPES_PER_BUF);
186 		req_ptype_info->start_ptype_id = start_ptype_id;
187 
188 		ret = idpf_vc_ptype_info_query(adapter, req_ptype_info, recv_ptype_info);
189 		if (ret != 0) {
190 			DRV_LOG(ERR, "Fail to query packet type information");
191 			goto free_recv_ptype_info;
192 		}
193 
194 		recv_num_ptypes += rte_le_to_cpu_16(recv_ptype_info->num_ptypes);
195 		if (recv_num_ptypes > IDPF_MAX_PKT_TYPE) {
196 			ret = -EINVAL;
197 			goto free_recv_ptype_info;
198 		}
199 
200 		start_ptype_id = rte_le_to_cpu_16(req_ptype_info->start_ptype_id) +
201 			rte_le_to_cpu_16(req_ptype_info->num_ptypes);
202 
203 		ptype_offset = sizeof(struct virtchnl2_get_ptype_info) -
204 						sizeof(struct virtchnl2_ptype);
205 
206 		for (i = 0; i < rte_le_to_cpu_16(recv_ptype_info->num_ptypes); i++) {
207 			bool is_inner = false, is_ip = false;
208 			struct virtchnl2_ptype *ptype;
209 			uint32_t proto_hdr = 0;
210 
211 			ptype = (struct virtchnl2_ptype *)
212 					((uint8_t *)recv_ptype_info + ptype_offset);
213 			ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
214 			if (ptype_offset > IDPF_DFLT_MBX_BUF_SIZE) {
215 				ret = -EINVAL;
216 				goto free_recv_ptype_info;
217 			}
218 
219 			for (j = 0; j < ptype->proto_id_count; j++) {
220 				switch (rte_le_to_cpu_16(ptype->proto_id[j])) {
221 				case VIRTCHNL2_PROTO_HDR_GRE:
222 				case VIRTCHNL2_PROTO_HDR_VXLAN:
223 					proto_hdr &= ~RTE_PTYPE_L4_MASK;
224 					proto_hdr |= RTE_PTYPE_TUNNEL_GRENAT;
225 					is_inner = true;
226 					break;
227 				case VIRTCHNL2_PROTO_HDR_MAC:
228 					if (is_inner) {
229 						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
230 						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER;
231 					} else {
232 						proto_hdr &= ~RTE_PTYPE_L2_MASK;
233 						proto_hdr |= RTE_PTYPE_L2_ETHER;
234 					}
235 					break;
236 				case VIRTCHNL2_PROTO_HDR_VLAN:
237 					if (is_inner) {
238 						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
239 						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER_VLAN;
240 					}
241 					break;
242 				case VIRTCHNL2_PROTO_HDR_PTP:
243 					proto_hdr &= ~RTE_PTYPE_L2_MASK;
244 					proto_hdr |= RTE_PTYPE_L2_ETHER_TIMESYNC;
245 					break;
246 				case VIRTCHNL2_PROTO_HDR_LLDP:
247 					proto_hdr &= ~RTE_PTYPE_L2_MASK;
248 					proto_hdr |= RTE_PTYPE_L2_ETHER_LLDP;
249 					break;
250 				case VIRTCHNL2_PROTO_HDR_ARP:
251 					proto_hdr &= ~RTE_PTYPE_L2_MASK;
252 					proto_hdr |= RTE_PTYPE_L2_ETHER_ARP;
253 					break;
254 				case VIRTCHNL2_PROTO_HDR_PPPOE:
255 					proto_hdr &= ~RTE_PTYPE_L2_MASK;
256 					proto_hdr |= RTE_PTYPE_L2_ETHER_PPPOE;
257 					break;
258 				case VIRTCHNL2_PROTO_HDR_IPV4:
259 					if (!is_ip) {
260 						proto_hdr |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
261 						is_ip = true;
262 					} else {
263 						proto_hdr |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
264 							     RTE_PTYPE_TUNNEL_IP;
265 						is_inner = true;
266 					}
267 						break;
268 				case VIRTCHNL2_PROTO_HDR_IPV6:
269 					if (!is_ip) {
270 						proto_hdr |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
271 						is_ip = true;
272 					} else {
273 						proto_hdr |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
274 							     RTE_PTYPE_TUNNEL_IP;
275 						is_inner = true;
276 					}
277 					break;
278 				case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
279 				case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
280 					if (is_inner)
281 						proto_hdr |= RTE_PTYPE_INNER_L4_FRAG;
282 					else
283 						proto_hdr |= RTE_PTYPE_L4_FRAG;
284 					break;
285 				case VIRTCHNL2_PROTO_HDR_UDP:
286 					if (is_inner)
287 						proto_hdr |= RTE_PTYPE_INNER_L4_UDP;
288 					else
289 						proto_hdr |= RTE_PTYPE_L4_UDP;
290 					break;
291 				case VIRTCHNL2_PROTO_HDR_TCP:
292 					if (is_inner)
293 						proto_hdr |= RTE_PTYPE_INNER_L4_TCP;
294 					else
295 						proto_hdr |= RTE_PTYPE_L4_TCP;
296 					break;
297 				case VIRTCHNL2_PROTO_HDR_SCTP:
298 					if (is_inner)
299 						proto_hdr |= RTE_PTYPE_INNER_L4_SCTP;
300 					else
301 						proto_hdr |= RTE_PTYPE_L4_SCTP;
302 					break;
303 				case VIRTCHNL2_PROTO_HDR_ICMP:
304 					if (is_inner)
305 						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
306 					else
307 						proto_hdr |= RTE_PTYPE_L4_ICMP;
308 					break;
309 				case VIRTCHNL2_PROTO_HDR_ICMPV6:
310 					if (is_inner)
311 						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
312 					else
313 						proto_hdr |= RTE_PTYPE_L4_ICMP;
314 					break;
315 				case VIRTCHNL2_PROTO_HDR_L2TPV2:
316 				case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
317 				case VIRTCHNL2_PROTO_HDR_L2TPV3:
318 					is_inner = true;
319 					proto_hdr |= RTE_PTYPE_TUNNEL_L2TP;
320 					break;
321 				case VIRTCHNL2_PROTO_HDR_NVGRE:
322 					is_inner = true;
323 					proto_hdr |= RTE_PTYPE_TUNNEL_NVGRE;
324 					break;
325 				case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
326 					is_inner = true;
327 					proto_hdr |= RTE_PTYPE_TUNNEL_GTPC;
328 					break;
329 				case VIRTCHNL2_PROTO_HDR_GTPU:
330 				case VIRTCHNL2_PROTO_HDR_GTPU_UL:
331 				case VIRTCHNL2_PROTO_HDR_GTPU_DL:
332 					is_inner = true;
333 					proto_hdr |= RTE_PTYPE_TUNNEL_GTPU;
334 					break;
335 				case VIRTCHNL2_PROTO_HDR_PAY:
336 				case VIRTCHNL2_PROTO_HDR_IPV6_EH:
337 				case VIRTCHNL2_PROTO_HDR_PRE_MAC:
338 				case VIRTCHNL2_PROTO_HDR_POST_MAC:
339 				case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
340 				case VIRTCHNL2_PROTO_HDR_SVLAN:
341 				case VIRTCHNL2_PROTO_HDR_CVLAN:
342 				case VIRTCHNL2_PROTO_HDR_MPLS:
343 				case VIRTCHNL2_PROTO_HDR_MMPLS:
344 				case VIRTCHNL2_PROTO_HDR_CTRL:
345 				case VIRTCHNL2_PROTO_HDR_ECP:
346 				case VIRTCHNL2_PROTO_HDR_EAPOL:
347 				case VIRTCHNL2_PROTO_HDR_PPPOD:
348 				case VIRTCHNL2_PROTO_HDR_IGMP:
349 				case VIRTCHNL2_PROTO_HDR_AH:
350 				case VIRTCHNL2_PROTO_HDR_ESP:
351 				case VIRTCHNL2_PROTO_HDR_IKE:
352 				case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
353 				case VIRTCHNL2_PROTO_HDR_GTP:
354 				case VIRTCHNL2_PROTO_HDR_GTP_EH:
355 				case VIRTCHNL2_PROTO_HDR_GTPCV2:
356 				case VIRTCHNL2_PROTO_HDR_ECPRI:
357 				case VIRTCHNL2_PROTO_HDR_VRRP:
358 				case VIRTCHNL2_PROTO_HDR_OSPF:
359 				case VIRTCHNL2_PROTO_HDR_TUN:
360 				case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
361 				case VIRTCHNL2_PROTO_HDR_GENEVE:
362 				case VIRTCHNL2_PROTO_HDR_NSH:
363 				case VIRTCHNL2_PROTO_HDR_QUIC:
364 				case VIRTCHNL2_PROTO_HDR_PFCP:
365 				case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
366 				case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
367 				case VIRTCHNL2_PROTO_HDR_RTP:
368 				case VIRTCHNL2_PROTO_HDR_NO_PROTO:
369 				default:
370 					continue;
371 				}
372 				adapter->ptype_tbl[ptype->ptype_id_10] = proto_hdr;
373 			}
374 		}
375 	}
376 
377 free_recv_ptype_info:
378 	rte_free(recv_ptype_info);
379 free_req_ptype_info:
380 	rte_free(req_ptype_info);
381 	clear_cmd(adapter);
382 	return ret;
383 }
384 
385 int
386 idpf_adapter_init(struct idpf_adapter *adapter)
387 {
388 	struct idpf_hw *hw = &adapter->hw;
389 	int ret;
390 
391 	if (hw->device_id == IDPF_DEV_ID_SRIOV) {
392 		ret = idpf_check_vf_reset_done(hw);
393 	} else {
394 		idpf_reset_pf(hw);
395 		ret = idpf_check_pf_reset_done(hw);
396 	}
397 	if (ret != 0) {
398 		DRV_LOG(ERR, "IDPF is still resetting");
399 		goto err_check_reset;
400 	}
401 
402 	ret = idpf_init_mbx(hw);
403 	if (ret != 0) {
404 		DRV_LOG(ERR, "Failed to init mailbox");
405 		goto err_check_reset;
406 	}
407 
408 	adapter->mbx_resp = rte_zmalloc("idpf_adapter_mbx_resp",
409 					IDPF_DFLT_MBX_BUF_SIZE, 0);
410 	if (adapter->mbx_resp == NULL) {
411 		DRV_LOG(ERR, "Failed to allocate idpf_adapter_mbx_resp memory");
412 		ret = -ENOMEM;
413 		goto err_mbx_resp;
414 	}
415 
416 	ret = idpf_vc_api_version_check(adapter);
417 	if (ret != 0) {
418 		DRV_LOG(ERR, "Failed to check api version");
419 		goto err_check_api;
420 	}
421 
422 	ret = idpf_vc_caps_get(adapter);
423 	if (ret != 0) {
424 		DRV_LOG(ERR, "Failed to get capabilities");
425 		goto err_check_api;
426 	}
427 
428 	ret = idpf_get_pkt_type(adapter);
429 	if (ret != 0) {
430 		DRV_LOG(ERR, "Failed to set ptype table");
431 		goto err_check_api;
432 	}
433 
434 	return 0;
435 
436 err_check_api:
437 	rte_free(adapter->mbx_resp);
438 	adapter->mbx_resp = NULL;
439 err_mbx_resp:
440 	idpf_ctlq_deinit(hw);
441 err_check_reset:
442 	return ret;
443 }
444 
445 int
446 idpf_adapter_deinit(struct idpf_adapter *adapter)
447 {
448 	struct idpf_hw *hw = &adapter->hw;
449 
450 	idpf_ctlq_deinit(hw);
451 	rte_free(adapter->mbx_resp);
452 	adapter->mbx_resp = NULL;
453 
454 	return 0;
455 }
456 
457 int
458 idpf_vport_init(struct idpf_vport *vport,
459 		struct virtchnl2_create_vport *create_vport_info,
460 		void *dev_data)
461 {
462 	struct virtchnl2_create_vport *vport_info;
463 	int i, type, ret;
464 
465 	ret = idpf_vc_vport_create(vport, create_vport_info);
466 	if (ret != 0) {
467 		DRV_LOG(ERR, "Failed to create vport.");
468 		goto err_create_vport;
469 	}
470 
471 	vport_info = &(vport->vport_info.info);
472 	vport->vport_id = vport_info->vport_id;
473 	vport->txq_model = vport_info->txq_model;
474 	vport->rxq_model = vport_info->rxq_model;
475 	vport->num_tx_q = vport_info->num_tx_q;
476 	vport->num_tx_complq = vport_info->num_tx_complq;
477 	vport->num_rx_q = vport_info->num_rx_q;
478 	vport->num_rx_bufq = vport_info->num_rx_bufq;
479 	vport->max_mtu = vport_info->max_mtu;
480 	rte_memcpy(vport->default_mac_addr,
481 		   vport_info->default_mac_addr, ETH_ALEN);
482 	vport->rss_algorithm = vport_info->rss_algorithm;
483 	vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,
484 				      vport_info->rss_key_size);
485 	vport->rss_lut_size = vport_info->rss_lut_size;
486 
487 	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
488 		type = vport_info->chunks.chunks[i].type;
489 		switch (type) {
490 		case VIRTCHNL2_QUEUE_TYPE_TX:
491 			vport->chunks_info.tx_start_qid =
492 				vport_info->chunks.chunks[i].start_queue_id;
493 			vport->chunks_info.tx_qtail_start =
494 				vport_info->chunks.chunks[i].qtail_reg_start;
495 			vport->chunks_info.tx_qtail_spacing =
496 				vport_info->chunks.chunks[i].qtail_reg_spacing;
497 			break;
498 		case VIRTCHNL2_QUEUE_TYPE_RX:
499 			vport->chunks_info.rx_start_qid =
500 				vport_info->chunks.chunks[i].start_queue_id;
501 			vport->chunks_info.rx_qtail_start =
502 				vport_info->chunks.chunks[i].qtail_reg_start;
503 			vport->chunks_info.rx_qtail_spacing =
504 				vport_info->chunks.chunks[i].qtail_reg_spacing;
505 			break;
506 		case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
507 			vport->chunks_info.tx_compl_start_qid =
508 				vport_info->chunks.chunks[i].start_queue_id;
509 			vport->chunks_info.tx_compl_qtail_start =
510 				vport_info->chunks.chunks[i].qtail_reg_start;
511 			vport->chunks_info.tx_compl_qtail_spacing =
512 				vport_info->chunks.chunks[i].qtail_reg_spacing;
513 			break;
514 		case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
515 			vport->chunks_info.rx_buf_start_qid =
516 				vport_info->chunks.chunks[i].start_queue_id;
517 			vport->chunks_info.rx_buf_qtail_start =
518 				vport_info->chunks.chunks[i].qtail_reg_start;
519 			vport->chunks_info.rx_buf_qtail_spacing =
520 				vport_info->chunks.chunks[i].qtail_reg_spacing;
521 			break;
522 		default:
523 			DRV_LOG(ERR, "Unsupported queue type");
524 			break;
525 		}
526 	}
527 
528 	vport->dev_data = dev_data;
529 
530 	vport->rss_key = rte_zmalloc("rss_key",
531 				     vport->rss_key_size, 0);
532 	if (vport->rss_key == NULL) {
533 		DRV_LOG(ERR, "Failed to allocate RSS key");
534 		ret = -ENOMEM;
535 		goto err_rss_key;
536 	}
537 
538 	vport->rss_lut = rte_zmalloc("rss_lut",
539 				     sizeof(uint32_t) * vport->rss_lut_size, 0);
540 	if (vport->rss_lut == NULL) {
541 		DRV_LOG(ERR, "Failed to allocate RSS lut");
542 		ret = -ENOMEM;
543 		goto err_rss_lut;
544 	}
545 
546 	/* recv_vectors is used for VIRTCHNL2_OP_ALLOC_VECTORS response,
547 	 * reserve maximum size for it now, may need optimization in future.
548 	 */
549 	vport->recv_vectors = rte_zmalloc("recv_vectors", IDPF_DFLT_MBX_BUF_SIZE, 0);
550 	if (vport->recv_vectors == NULL) {
551 		DRV_LOG(ERR, "Failed to allocate recv_vectors");
552 		ret = -ENOMEM;
553 		goto err_recv_vec;
554 	}
555 
556 	return 0;
557 
558 err_recv_vec:
559 	rte_free(vport->rss_lut);
560 	vport->rss_lut = NULL;
561 err_rss_lut:
562 	vport->dev_data = NULL;
563 	rte_free(vport->rss_key);
564 	vport->rss_key = NULL;
565 err_rss_key:
566 	idpf_vc_vport_destroy(vport);
567 err_create_vport:
568 	return ret;
569 }
570 int
571 idpf_vport_deinit(struct idpf_vport *vport)
572 {
573 	rte_free(vport->recv_vectors);
574 	vport->recv_vectors = NULL;
575 	rte_free(vport->rss_lut);
576 	vport->rss_lut = NULL;
577 
578 	rte_free(vport->rss_key);
579 	vport->rss_key = NULL;
580 
581 	vport->dev_data = NULL;
582 
583 	idpf_vc_vport_destroy(vport);
584 
585 	return 0;
586 }
587 int
588 idpf_vport_rss_config(struct idpf_vport *vport)
589 {
590 	int ret;
591 
592 	ret = idpf_vc_rss_key_set(vport);
593 	if (ret != 0) {
594 		DRV_LOG(ERR, "Failed to configure RSS key");
595 		return ret;
596 	}
597 
598 	ret = idpf_vc_rss_lut_set(vport);
599 	if (ret != 0) {
600 		DRV_LOG(ERR, "Failed to configure RSS lut");
601 		return ret;
602 	}
603 
604 	ret = idpf_vc_rss_hash_set(vport);
605 	if (ret != 0) {
606 		DRV_LOG(ERR, "Failed to configure RSS hash");
607 		return ret;
608 	}
609 
610 	return ret;
611 }
612 
613 int
614 idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
615 {
616 	struct idpf_adapter *adapter = vport->adapter;
617 	struct virtchnl2_queue_vector *qv_map;
618 	struct idpf_hw *hw = &adapter->hw;
619 	uint32_t dynctl_val, itrn_val;
620 	uint32_t dynctl_reg_start;
621 	uint32_t itrn_reg_start;
622 	uint16_t i;
623 	int ret;
624 
625 	qv_map = rte_zmalloc("qv_map",
626 			     nb_rx_queues *
627 			     sizeof(struct virtchnl2_queue_vector), 0);
628 	if (qv_map == NULL) {
629 		DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
630 			nb_rx_queues);
631 		ret = -ENOMEM;
632 		goto qv_map_alloc_err;
633 	}
634 
635 	/* Rx interrupt disabled, Map interrupt only for writeback */
636 
637 	/* The capability flags adapter->caps.other_caps should be
638 	 * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
639 	 * condition should be updated when the FW can return the
640 	 * correct flag bits.
641 	 */
642 	dynctl_reg_start =
643 		vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
644 	itrn_reg_start =
645 		vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
646 	dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
647 	DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
648 	itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
649 	DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
650 	/* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
651 	 * register. WB_ON_ITR and INTENA are mutually exclusive
652 	 * bits. Setting WB_ON_ITR bits means TX and RX Descs
653 	 * are written back based on ITR expiration irrespective
654 	 * of INTENA setting.
655 	 */
656 	/* TBD: need to tune INTERVAL value for better performance. */
657 	itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
658 	dynctl_val = VIRTCHNL2_ITR_IDX_0  <<
659 		     PF_GLINT_DYN_CTL_ITR_INDX_S |
660 		     PF_GLINT_DYN_CTL_WB_ON_ITR_M |
661 		     itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
662 	IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
663 
664 	for (i = 0; i < nb_rx_queues; i++) {
665 		/* map all queues to the same vector */
666 		qv_map[i].queue_id = vport->chunks_info.rx_start_qid + i;
667 		qv_map[i].vector_id =
668 			vport->recv_vectors->vchunks.vchunks->start_vector_id;
669 	}
670 	vport->qv_map = qv_map;
671 
672 	ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
673 	if (ret != 0) {
674 		DRV_LOG(ERR, "config interrupt mapping failed");
675 		goto config_irq_map_err;
676 	}
677 
678 	return 0;
679 
680 config_irq_map_err:
681 	rte_free(vport->qv_map);
682 	vport->qv_map = NULL;
683 
684 qv_map_alloc_err:
685 	return ret;
686 }
687 
688 int
689 idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport, uint32_t *qids, uint16_t nb_rx_queues)
690 {
691 	struct idpf_adapter *adapter = vport->adapter;
692 	struct virtchnl2_queue_vector *qv_map;
693 	struct idpf_hw *hw = &adapter->hw;
694 	uint32_t dynctl_val, itrn_val;
695 	uint32_t dynctl_reg_start;
696 	uint32_t itrn_reg_start;
697 	uint16_t i;
698 	int ret;
699 
700 	qv_map = rte_zmalloc("qv_map",
701 			     nb_rx_queues *
702 			     sizeof(struct virtchnl2_queue_vector), 0);
703 	if (qv_map == NULL) {
704 		DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
705 			nb_rx_queues);
706 		ret = -ENOMEM;
707 		goto qv_map_alloc_err;
708 	}
709 
710 	/* Rx interrupt disabled, Map interrupt only for writeback */
711 
712 	/* The capability flags adapter->caps.other_caps should be
713 	 * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
714 	 * condition should be updated when the FW can return the
715 	 * correct flag bits.
716 	 */
717 	dynctl_reg_start =
718 		vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
719 	itrn_reg_start =
720 		vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
721 	dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
722 	DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
723 	itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
724 	DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
725 	/* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
726 	 * register. WB_ON_ITR and INTENA are mutually exclusive
727 	 * bits. Setting WB_ON_ITR bits means TX and RX Descs
728 	 * are written back based on ITR expiration irrespective
729 	 * of INTENA setting.
730 	 */
731 	/* TBD: need to tune INTERVAL value for better performance. */
732 	itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
733 	dynctl_val = VIRTCHNL2_ITR_IDX_0  <<
734 		     PF_GLINT_DYN_CTL_ITR_INDX_S |
735 		     PF_GLINT_DYN_CTL_WB_ON_ITR_M |
736 		     itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
737 	IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
738 
739 	for (i = 0; i < nb_rx_queues; i++) {
740 		/* map all queues to the same vector */
741 		qv_map[i].queue_id = qids[i];
742 		qv_map[i].vector_id =
743 			vport->recv_vectors->vchunks.vchunks->start_vector_id;
744 	}
745 	vport->qv_map = qv_map;
746 
747 	ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
748 	if (ret != 0) {
749 		DRV_LOG(ERR, "config interrupt mapping failed");
750 		goto config_irq_map_err;
751 	}
752 
753 	return 0;
754 
755 config_irq_map_err:
756 	rte_free(vport->qv_map);
757 	vport->qv_map = NULL;
758 
759 qv_map_alloc_err:
760 	return ret;
761 }
762 
763 int
764 idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
765 {
766 	idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, false);
767 
768 	rte_free(vport->qv_map);
769 	vport->qv_map = NULL;
770 
771 	return 0;
772 }
773 
774 int
775 idpf_vport_info_init(struct idpf_vport *vport,
776 			    struct virtchnl2_create_vport *vport_info)
777 {
778 	struct idpf_adapter *adapter = vport->adapter;
779 
780 	vport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
781 	if (!adapter->is_tx_singleq) {
782 		vport_info->txq_model =
783 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
784 		vport_info->num_tx_q =
785 			rte_cpu_to_le_16(IDPF_DEFAULT_TXQ_NUM);
786 		vport_info->num_tx_complq =
787 			rte_cpu_to_le_16(IDPF_DEFAULT_TXQ_NUM * IDPF_TX_COMPLQ_PER_GRP);
788 	} else {
789 		vport_info->txq_model =
790 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
791 		vport_info->num_tx_q = rte_cpu_to_le_16(IDPF_DEFAULT_TXQ_NUM);
792 		vport_info->num_tx_complq = 0;
793 	}
794 	if (!adapter->is_rx_singleq) {
795 		vport_info->rxq_model =
796 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
797 		vport_info->num_rx_q = rte_cpu_to_le_16(IDPF_DEFAULT_RXQ_NUM);
798 		vport_info->num_rx_bufq =
799 			rte_cpu_to_le_16(IDPF_DEFAULT_RXQ_NUM * IDPF_RX_BUFQ_PER_GRP);
800 	} else {
801 		vport_info->rxq_model =
802 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
803 		vport_info->num_rx_q = rte_cpu_to_le_16(IDPF_DEFAULT_RXQ_NUM);
804 		vport_info->num_rx_bufq = 0;
805 	}
806 
807 	return 0;
808 }
809 
810 void
811 idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct virtchnl2_vport_stats *nes)
812 {
813 	nes->rx_bytes = nes->rx_bytes - oes->rx_bytes;
814 	nes->rx_unicast = nes->rx_unicast - oes->rx_unicast;
815 	nes->rx_multicast = nes->rx_multicast - oes->rx_multicast;
816 	nes->rx_broadcast = nes->rx_broadcast - oes->rx_broadcast;
817 	nes->rx_errors = nes->rx_errors - oes->rx_errors;
818 	nes->rx_discards = nes->rx_discards - oes->rx_discards;
819 	nes->tx_bytes = nes->tx_bytes - oes->tx_bytes;
820 	nes->tx_unicast = nes->tx_unicast - oes->tx_unicast;
821 	nes->tx_multicast = nes->tx_multicast - oes->tx_multicast;
822 	nes->tx_broadcast = nes->tx_broadcast - oes->tx_broadcast;
823 	nes->tx_errors = nes->tx_errors - oes->tx_errors;
824 	nes->tx_discards = nes->tx_discards - oes->tx_discards;
825 }
826 
827 RTE_LOG_REGISTER_SUFFIX(idpf_common_logtype, common, NOTICE);
828