xref: /dpdk/drivers/common/idpf/idpf_common_device.c (revision 4baf54ed9dc87b89ea2150578c51120bc0157bb0)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2023 Intel Corporation
3  */
4 
5 #include <rte_log.h>
6 #include "idpf_common_device.h"
7 #include "idpf_common_virtchnl.h"
8 
9 static void
10 idpf_reset_pf(struct idpf_hw *hw)
11 {
12 	uint32_t reg;
13 
14 	reg = IDPF_READ_REG(hw, PFGEN_CTRL);
15 	IDPF_WRITE_REG(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR));
16 }
17 
18 #define IDPF_RESET_WAIT_CNT 100
19 
20 static int
21 idpf_check_pf_reset_done(struct idpf_hw *hw)
22 {
23 	uint32_t reg;
24 	int i;
25 
26 	for (i = 0; i < IDPF_RESET_WAIT_CNT; i++) {
27 		reg = IDPF_READ_REG(hw, PFGEN_RSTAT);
28 		if (reg != 0xFFFFFFFF && (reg & PFGEN_RSTAT_PFR_STATE_M))
29 			return 0;
30 		rte_delay_ms(1000);
31 	}
32 
33 	DRV_LOG(ERR, "IDPF reset timeout");
34 	return -EBUSY;
35 }
36 
37 static int
38 idpf_check_vf_reset_done(struct idpf_hw *hw)
39 {
40 	uint32_t reg;
41 	int i;
42 
43 	for (i = 0; i < IDPF_RESET_WAIT_CNT; i++) {
44 		reg = IDPF_READ_REG(hw, VFGEN_RSTAT);
45 		if (reg != 0xFFFFFFFF && (reg & VFGEN_RSTAT_VFR_STATE_M))
46 			return 0;
47 		rte_delay_ms(1000);
48 	}
49 
50 	DRV_LOG(ERR, "VF reset timeout");
51 	return -EBUSY;
52 }
53 
54 #define IDPF_CTLQ_NUM 2
55 
56 struct idpf_ctlq_create_info pf_ctlq_info[IDPF_CTLQ_NUM] = {
57 	{
58 		.type = IDPF_CTLQ_TYPE_MAILBOX_TX,
59 		.id = IDPF_CTLQ_ID,
60 		.len = IDPF_CTLQ_LEN,
61 		.buf_size = IDPF_DFLT_MBX_BUF_SIZE,
62 		.reg = {
63 			.head = PF_FW_ATQH,
64 			.tail = PF_FW_ATQT,
65 			.len = PF_FW_ATQLEN,
66 			.bah = PF_FW_ATQBAH,
67 			.bal = PF_FW_ATQBAL,
68 			.len_mask = PF_FW_ATQLEN_ATQLEN_M,
69 			.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M,
70 			.head_mask = PF_FW_ATQH_ATQH_M,
71 		}
72 	},
73 	{
74 		.type = IDPF_CTLQ_TYPE_MAILBOX_RX,
75 		.id = IDPF_CTLQ_ID,
76 		.len = IDPF_CTLQ_LEN,
77 		.buf_size = IDPF_DFLT_MBX_BUF_SIZE,
78 		.reg = {
79 			.head = PF_FW_ARQH,
80 			.tail = PF_FW_ARQT,
81 			.len = PF_FW_ARQLEN,
82 			.bah = PF_FW_ARQBAH,
83 			.bal = PF_FW_ARQBAL,
84 			.len_mask = PF_FW_ARQLEN_ARQLEN_M,
85 			.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M,
86 			.head_mask = PF_FW_ARQH_ARQH_M,
87 		}
88 	}
89 };
90 
91 struct idpf_ctlq_create_info vf_ctlq_info[IDPF_CTLQ_NUM] = {
92 	{
93 		.type = IDPF_CTLQ_TYPE_MAILBOX_TX,
94 		.id = IDPF_CTLQ_ID,
95 		.len = IDPF_CTLQ_LEN,
96 		.buf_size = IDPF_DFLT_MBX_BUF_SIZE,
97 		.reg = {
98 			.head = VF_ATQH,
99 			.tail = VF_ATQT,
100 			.len = VF_ATQLEN,
101 			.bah = VF_ATQBAH,
102 			.bal = VF_ATQBAL,
103 			.len_mask = VF_ATQLEN_ATQLEN_M,
104 			.len_ena_mask = VF_ATQLEN_ATQENABLE_M,
105 			.head_mask = VF_ATQH_ATQH_M,
106 		}
107 	},
108 	{
109 		.type = IDPF_CTLQ_TYPE_MAILBOX_RX,
110 		.id = IDPF_CTLQ_ID,
111 		.len = IDPF_CTLQ_LEN,
112 		.buf_size = IDPF_DFLT_MBX_BUF_SIZE,
113 		.reg = {
114 			.head = VF_ARQH,
115 			.tail = VF_ARQT,
116 			.len = VF_ARQLEN,
117 			.bah = VF_ARQBAH,
118 			.bal = VF_ARQBAL,
119 			.len_mask = VF_ARQLEN_ARQLEN_M,
120 			.len_ena_mask = VF_ARQLEN_ARQENABLE_M,
121 			.head_mask = VF_ARQH_ARQH_M,
122 		}
123 	}
124 };
125 
126 static int
127 idpf_init_mbx(struct idpf_hw *hw)
128 {
129 	struct idpf_ctlq_info *ctlq;
130 	int ret = 0;
131 
132 	if (hw->device_id == IDPF_DEV_ID_SRIOV)
133 		ret = idpf_ctlq_init(hw, IDPF_CTLQ_NUM, vf_ctlq_info);
134 	else
135 		ret = idpf_ctlq_init(hw, IDPF_CTLQ_NUM, pf_ctlq_info);
136 	if (ret != 0)
137 		return ret;
138 
139 	LIST_FOR_EACH_ENTRY(ctlq, &hw->cq_list_head, struct idpf_ctlq_info, cq_list) {
140 		if (ctlq->q_id == IDPF_CTLQ_ID &&
141 		    ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
142 			hw->asq = ctlq;
143 		if (ctlq->q_id == IDPF_CTLQ_ID &&
144 		    ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX)
145 			hw->arq = ctlq;
146 	}
147 
148 	if (hw->asq == NULL || hw->arq == NULL) {
149 		idpf_ctlq_deinit(hw);
150 		ret = -ENOENT;
151 	}
152 
153 	return ret;
154 }
155 
156 static int
157 idpf_get_pkt_type(struct idpf_adapter *adapter)
158 {
159 	struct virtchnl2_get_ptype_info *req_ptype_info;
160 	struct virtchnl2_get_ptype_info *recv_ptype_info;
161 	uint16_t recv_num_ptypes = 0;
162 	uint16_t ptype_offset, i, j;
163 	uint16_t start_ptype_id = 0;
164 	int ret;
165 
166 	req_ptype_info = rte_zmalloc("req_ptype_info", IDPF_DFLT_MBX_BUF_SIZE, 0);
167 	if (req_ptype_info == NULL)
168 		return -ENOMEM;
169 
170 	recv_ptype_info = rte_zmalloc("recv_ptype_info", IDPF_DFLT_MBX_BUF_SIZE, 0);
171 	if (recv_ptype_info == NULL) {
172 		ret = -ENOMEM;
173 		goto free_req_ptype_info;
174 	}
175 
176 	while (start_ptype_id < IDPF_MAX_PKT_TYPE) {
177 		memset(req_ptype_info, 0, sizeof(*req_ptype_info));
178 		memset(recv_ptype_info, 0, sizeof(*recv_ptype_info));
179 
180 		if ((start_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > IDPF_MAX_PKT_TYPE)
181 			req_ptype_info->num_ptypes =
182 				rte_cpu_to_le_16(IDPF_MAX_PKT_TYPE - start_ptype_id);
183 		else
184 			req_ptype_info->num_ptypes = rte_cpu_to_le_16(IDPF_RX_MAX_PTYPES_PER_BUF);
185 		req_ptype_info->start_ptype_id = start_ptype_id;
186 
187 		ret = idpf_vc_ptype_info_query(adapter, req_ptype_info, recv_ptype_info);
188 		if (ret != 0) {
189 			DRV_LOG(ERR, "Fail to query packet type information");
190 			goto free_recv_ptype_info;
191 		}
192 
193 		recv_num_ptypes += rte_le_to_cpu_16(recv_ptype_info->num_ptypes);
194 		if (recv_num_ptypes > IDPF_MAX_PKT_TYPE) {
195 			ret = -EINVAL;
196 			goto free_recv_ptype_info;
197 		}
198 
199 		start_ptype_id = rte_le_to_cpu_16(req_ptype_info->start_ptype_id) +
200 			rte_le_to_cpu_16(req_ptype_info->num_ptypes);
201 
202 		ptype_offset = sizeof(struct virtchnl2_get_ptype_info) -
203 						sizeof(struct virtchnl2_ptype);
204 
205 		for (i = 0; i < rte_le_to_cpu_16(recv_ptype_info->num_ptypes); i++) {
206 			bool is_inner = false, is_ip = false;
207 			struct virtchnl2_ptype *ptype;
208 			uint32_t proto_hdr = 0;
209 
210 			ptype = (struct virtchnl2_ptype *)
211 					((uint8_t *)recv_ptype_info + ptype_offset);
212 			ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
213 			if (ptype_offset > IDPF_DFLT_MBX_BUF_SIZE) {
214 				ret = -EINVAL;
215 				goto free_recv_ptype_info;
216 			}
217 
218 			for (j = 0; j < ptype->proto_id_count; j++) {
219 				switch (rte_le_to_cpu_16(ptype->proto_id[j])) {
220 				case VIRTCHNL2_PROTO_HDR_GRE:
221 				case VIRTCHNL2_PROTO_HDR_VXLAN:
222 					proto_hdr &= ~RTE_PTYPE_L4_MASK;
223 					proto_hdr |= RTE_PTYPE_TUNNEL_GRENAT;
224 					is_inner = true;
225 					break;
226 				case VIRTCHNL2_PROTO_HDR_MAC:
227 					if (is_inner) {
228 						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
229 						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER;
230 					} else {
231 						proto_hdr &= ~RTE_PTYPE_L2_MASK;
232 						proto_hdr |= RTE_PTYPE_L2_ETHER;
233 					}
234 					break;
235 				case VIRTCHNL2_PROTO_HDR_VLAN:
236 					if (is_inner) {
237 						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
238 						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER_VLAN;
239 					}
240 					break;
241 				case VIRTCHNL2_PROTO_HDR_PTP:
242 					proto_hdr &= ~RTE_PTYPE_L2_MASK;
243 					proto_hdr |= RTE_PTYPE_L2_ETHER_TIMESYNC;
244 					break;
245 				case VIRTCHNL2_PROTO_HDR_LLDP:
246 					proto_hdr &= ~RTE_PTYPE_L2_MASK;
247 					proto_hdr |= RTE_PTYPE_L2_ETHER_LLDP;
248 					break;
249 				case VIRTCHNL2_PROTO_HDR_ARP:
250 					proto_hdr &= ~RTE_PTYPE_L2_MASK;
251 					proto_hdr |= RTE_PTYPE_L2_ETHER_ARP;
252 					break;
253 				case VIRTCHNL2_PROTO_HDR_PPPOE:
254 					proto_hdr &= ~RTE_PTYPE_L2_MASK;
255 					proto_hdr |= RTE_PTYPE_L2_ETHER_PPPOE;
256 					break;
257 				case VIRTCHNL2_PROTO_HDR_IPV4:
258 					if (!is_ip) {
259 						proto_hdr |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
260 						is_ip = true;
261 					} else {
262 						proto_hdr |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
263 							     RTE_PTYPE_TUNNEL_IP;
264 						is_inner = true;
265 					}
266 						break;
267 				case VIRTCHNL2_PROTO_HDR_IPV6:
268 					if (!is_ip) {
269 						proto_hdr |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
270 						is_ip = true;
271 					} else {
272 						proto_hdr |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
273 							     RTE_PTYPE_TUNNEL_IP;
274 						is_inner = true;
275 					}
276 					break;
277 				case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
278 				case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
279 					if (is_inner)
280 						proto_hdr |= RTE_PTYPE_INNER_L4_FRAG;
281 					else
282 						proto_hdr |= RTE_PTYPE_L4_FRAG;
283 					break;
284 				case VIRTCHNL2_PROTO_HDR_UDP:
285 					if (is_inner)
286 						proto_hdr |= RTE_PTYPE_INNER_L4_UDP;
287 					else
288 						proto_hdr |= RTE_PTYPE_L4_UDP;
289 					break;
290 				case VIRTCHNL2_PROTO_HDR_TCP:
291 					if (is_inner)
292 						proto_hdr |= RTE_PTYPE_INNER_L4_TCP;
293 					else
294 						proto_hdr |= RTE_PTYPE_L4_TCP;
295 					break;
296 				case VIRTCHNL2_PROTO_HDR_SCTP:
297 					if (is_inner)
298 						proto_hdr |= RTE_PTYPE_INNER_L4_SCTP;
299 					else
300 						proto_hdr |= RTE_PTYPE_L4_SCTP;
301 					break;
302 				case VIRTCHNL2_PROTO_HDR_ICMP:
303 					if (is_inner)
304 						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
305 					else
306 						proto_hdr |= RTE_PTYPE_L4_ICMP;
307 					break;
308 				case VIRTCHNL2_PROTO_HDR_ICMPV6:
309 					if (is_inner)
310 						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
311 					else
312 						proto_hdr |= RTE_PTYPE_L4_ICMP;
313 					break;
314 				case VIRTCHNL2_PROTO_HDR_L2TPV2:
315 				case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
316 				case VIRTCHNL2_PROTO_HDR_L2TPV3:
317 					is_inner = true;
318 					proto_hdr |= RTE_PTYPE_TUNNEL_L2TP;
319 					break;
320 				case VIRTCHNL2_PROTO_HDR_NVGRE:
321 					is_inner = true;
322 					proto_hdr |= RTE_PTYPE_TUNNEL_NVGRE;
323 					break;
324 				case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
325 					is_inner = true;
326 					proto_hdr |= RTE_PTYPE_TUNNEL_GTPC;
327 					break;
328 				case VIRTCHNL2_PROTO_HDR_GTPU:
329 				case VIRTCHNL2_PROTO_HDR_GTPU_UL:
330 				case VIRTCHNL2_PROTO_HDR_GTPU_DL:
331 					is_inner = true;
332 					proto_hdr |= RTE_PTYPE_TUNNEL_GTPU;
333 					break;
334 				case VIRTCHNL2_PROTO_HDR_PAY:
335 				case VIRTCHNL2_PROTO_HDR_IPV6_EH:
336 				case VIRTCHNL2_PROTO_HDR_PRE_MAC:
337 				case VIRTCHNL2_PROTO_HDR_POST_MAC:
338 				case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
339 				case VIRTCHNL2_PROTO_HDR_SVLAN:
340 				case VIRTCHNL2_PROTO_HDR_CVLAN:
341 				case VIRTCHNL2_PROTO_HDR_MPLS:
342 				case VIRTCHNL2_PROTO_HDR_MMPLS:
343 				case VIRTCHNL2_PROTO_HDR_CTRL:
344 				case VIRTCHNL2_PROTO_HDR_ECP:
345 				case VIRTCHNL2_PROTO_HDR_EAPOL:
346 				case VIRTCHNL2_PROTO_HDR_PPPOD:
347 				case VIRTCHNL2_PROTO_HDR_IGMP:
348 				case VIRTCHNL2_PROTO_HDR_AH:
349 				case VIRTCHNL2_PROTO_HDR_ESP:
350 				case VIRTCHNL2_PROTO_HDR_IKE:
351 				case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
352 				case VIRTCHNL2_PROTO_HDR_GTP:
353 				case VIRTCHNL2_PROTO_HDR_GTP_EH:
354 				case VIRTCHNL2_PROTO_HDR_GTPCV2:
355 				case VIRTCHNL2_PROTO_HDR_ECPRI:
356 				case VIRTCHNL2_PROTO_HDR_VRRP:
357 				case VIRTCHNL2_PROTO_HDR_OSPF:
358 				case VIRTCHNL2_PROTO_HDR_TUN:
359 				case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
360 				case VIRTCHNL2_PROTO_HDR_GENEVE:
361 				case VIRTCHNL2_PROTO_HDR_NSH:
362 				case VIRTCHNL2_PROTO_HDR_QUIC:
363 				case VIRTCHNL2_PROTO_HDR_PFCP:
364 				case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
365 				case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
366 				case VIRTCHNL2_PROTO_HDR_RTP:
367 				case VIRTCHNL2_PROTO_HDR_NO_PROTO:
368 				default:
369 					continue;
370 				}
371 				adapter->ptype_tbl[ptype->ptype_id_10] = proto_hdr;
372 			}
373 		}
374 	}
375 
376 free_recv_ptype_info:
377 	rte_free(recv_ptype_info);
378 free_req_ptype_info:
379 	rte_free(req_ptype_info);
380 	clear_cmd(adapter);
381 	return ret;
382 }
383 
384 int
385 idpf_adapter_init(struct idpf_adapter *adapter)
386 {
387 	struct idpf_hw *hw = &adapter->hw;
388 	int ret;
389 
390 	if (hw->device_id == IDPF_DEV_ID_SRIOV) {
391 		ret = idpf_check_vf_reset_done(hw);
392 	} else {
393 		idpf_reset_pf(hw);
394 		ret = idpf_check_pf_reset_done(hw);
395 	}
396 	if (ret != 0) {
397 		DRV_LOG(ERR, "IDPF is still resetting");
398 		goto err_check_reset;
399 	}
400 
401 	ret = idpf_init_mbx(hw);
402 	if (ret != 0) {
403 		DRV_LOG(ERR, "Failed to init mailbox");
404 		goto err_check_reset;
405 	}
406 
407 	adapter->mbx_resp = rte_zmalloc("idpf_adapter_mbx_resp",
408 					IDPF_DFLT_MBX_BUF_SIZE, 0);
409 	if (adapter->mbx_resp == NULL) {
410 		DRV_LOG(ERR, "Failed to allocate idpf_adapter_mbx_resp memory");
411 		ret = -ENOMEM;
412 		goto err_mbx_resp;
413 	}
414 
415 	ret = idpf_vc_api_version_check(adapter);
416 	if (ret != 0) {
417 		DRV_LOG(ERR, "Failed to check api version");
418 		goto err_check_api;
419 	}
420 
421 	ret = idpf_vc_caps_get(adapter);
422 	if (ret != 0) {
423 		DRV_LOG(ERR, "Failed to get capabilities");
424 		goto err_check_api;
425 	}
426 
427 	ret = idpf_get_pkt_type(adapter);
428 	if (ret != 0) {
429 		DRV_LOG(ERR, "Failed to set ptype table");
430 		goto err_check_api;
431 	}
432 
433 	return 0;
434 
435 err_check_api:
436 	rte_free(adapter->mbx_resp);
437 	adapter->mbx_resp = NULL;
438 err_mbx_resp:
439 	idpf_ctlq_deinit(hw);
440 err_check_reset:
441 	return ret;
442 }
443 
444 int
445 idpf_adapter_deinit(struct idpf_adapter *adapter)
446 {
447 	struct idpf_hw *hw = &adapter->hw;
448 
449 	idpf_ctlq_deinit(hw);
450 	rte_free(adapter->mbx_resp);
451 	adapter->mbx_resp = NULL;
452 
453 	return 0;
454 }
455 
456 int
457 idpf_vport_init(struct idpf_vport *vport,
458 		struct virtchnl2_create_vport *create_vport_info,
459 		void *dev_data)
460 {
461 	struct virtchnl2_create_vport *vport_info;
462 	int i, type, ret;
463 
464 	ret = idpf_vc_vport_create(vport, create_vport_info);
465 	if (ret != 0) {
466 		DRV_LOG(ERR, "Failed to create vport.");
467 		goto err_create_vport;
468 	}
469 
470 	vport_info = &(vport->vport_info.info);
471 	vport->vport_id = vport_info->vport_id;
472 	vport->txq_model = vport_info->txq_model;
473 	vport->rxq_model = vport_info->rxq_model;
474 	vport->num_tx_q = vport_info->num_tx_q;
475 	vport->num_tx_complq = vport_info->num_tx_complq;
476 	vport->num_rx_q = vport_info->num_rx_q;
477 	vport->num_rx_bufq = vport_info->num_rx_bufq;
478 	vport->max_mtu = vport_info->max_mtu;
479 	rte_memcpy(vport->default_mac_addr,
480 		   vport_info->default_mac_addr, ETH_ALEN);
481 	vport->rss_algorithm = vport_info->rss_algorithm;
482 	vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,
483 				      vport_info->rss_key_size);
484 	vport->rss_lut_size = vport_info->rss_lut_size;
485 
486 	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
487 		type = vport_info->chunks.chunks[i].type;
488 		switch (type) {
489 		case VIRTCHNL2_QUEUE_TYPE_TX:
490 			vport->chunks_info.tx_start_qid =
491 				vport_info->chunks.chunks[i].start_queue_id;
492 			vport->chunks_info.tx_qtail_start =
493 				vport_info->chunks.chunks[i].qtail_reg_start;
494 			vport->chunks_info.tx_qtail_spacing =
495 				vport_info->chunks.chunks[i].qtail_reg_spacing;
496 			break;
497 		case VIRTCHNL2_QUEUE_TYPE_RX:
498 			vport->chunks_info.rx_start_qid =
499 				vport_info->chunks.chunks[i].start_queue_id;
500 			vport->chunks_info.rx_qtail_start =
501 				vport_info->chunks.chunks[i].qtail_reg_start;
502 			vport->chunks_info.rx_qtail_spacing =
503 				vport_info->chunks.chunks[i].qtail_reg_spacing;
504 			break;
505 		case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
506 			vport->chunks_info.tx_compl_start_qid =
507 				vport_info->chunks.chunks[i].start_queue_id;
508 			vport->chunks_info.tx_compl_qtail_start =
509 				vport_info->chunks.chunks[i].qtail_reg_start;
510 			vport->chunks_info.tx_compl_qtail_spacing =
511 				vport_info->chunks.chunks[i].qtail_reg_spacing;
512 			break;
513 		case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
514 			vport->chunks_info.rx_buf_start_qid =
515 				vport_info->chunks.chunks[i].start_queue_id;
516 			vport->chunks_info.rx_buf_qtail_start =
517 				vport_info->chunks.chunks[i].qtail_reg_start;
518 			vport->chunks_info.rx_buf_qtail_spacing =
519 				vport_info->chunks.chunks[i].qtail_reg_spacing;
520 			break;
521 		default:
522 			DRV_LOG(ERR, "Unsupported queue type");
523 			break;
524 		}
525 	}
526 
527 	vport->dev_data = dev_data;
528 
529 	vport->rss_key = rte_zmalloc("rss_key",
530 				     vport->rss_key_size, 0);
531 	if (vport->rss_key == NULL) {
532 		DRV_LOG(ERR, "Failed to allocate RSS key");
533 		ret = -ENOMEM;
534 		goto err_rss_key;
535 	}
536 
537 	vport->rss_lut = rte_zmalloc("rss_lut",
538 				     sizeof(uint32_t) * vport->rss_lut_size, 0);
539 	if (vport->rss_lut == NULL) {
540 		DRV_LOG(ERR, "Failed to allocate RSS lut");
541 		ret = -ENOMEM;
542 		goto err_rss_lut;
543 	}
544 
545 	/* recv_vectors is used for VIRTCHNL2_OP_ALLOC_VECTORS response,
546 	 * reserve maximum size for it now, may need optimization in future.
547 	 */
548 	vport->recv_vectors = rte_zmalloc("recv_vectors", IDPF_DFLT_MBX_BUF_SIZE, 0);
549 	if (vport->recv_vectors == NULL) {
550 		DRV_LOG(ERR, "Failed to allocate recv_vectors");
551 		ret = -ENOMEM;
552 		goto err_recv_vec;
553 	}
554 
555 	return 0;
556 
557 err_recv_vec:
558 	rte_free(vport->rss_lut);
559 	vport->rss_lut = NULL;
560 err_rss_lut:
561 	vport->dev_data = NULL;
562 	rte_free(vport->rss_key);
563 	vport->rss_key = NULL;
564 err_rss_key:
565 	idpf_vc_vport_destroy(vport);
566 err_create_vport:
567 	return ret;
568 }
569 int
570 idpf_vport_deinit(struct idpf_vport *vport)
571 {
572 	rte_free(vport->recv_vectors);
573 	vport->recv_vectors = NULL;
574 	rte_free(vport->rss_lut);
575 	vport->rss_lut = NULL;
576 
577 	rte_free(vport->rss_key);
578 	vport->rss_key = NULL;
579 
580 	vport->dev_data = NULL;
581 
582 	idpf_vc_vport_destroy(vport);
583 
584 	return 0;
585 }
586 int
587 idpf_vport_rss_config(struct idpf_vport *vport)
588 {
589 	int ret;
590 
591 	ret = idpf_vc_rss_key_set(vport);
592 	if (ret != 0) {
593 		DRV_LOG(ERR, "Failed to configure RSS key");
594 		return ret;
595 	}
596 
597 	ret = idpf_vc_rss_lut_set(vport);
598 	if (ret != 0) {
599 		DRV_LOG(ERR, "Failed to configure RSS lut");
600 		return ret;
601 	}
602 
603 	ret = idpf_vc_rss_hash_set(vport);
604 	if (ret != 0) {
605 		DRV_LOG(ERR, "Failed to configure RSS hash");
606 		return ret;
607 	}
608 
609 	return ret;
610 }
611 
612 int
613 idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
614 {
615 	struct idpf_adapter *adapter = vport->adapter;
616 	struct virtchnl2_queue_vector *qv_map;
617 	struct idpf_hw *hw = &adapter->hw;
618 	uint32_t dynctl_val, itrn_val;
619 	uint32_t dynctl_reg_start;
620 	uint32_t itrn_reg_start;
621 	uint16_t i;
622 	int ret;
623 
624 	qv_map = rte_zmalloc("qv_map",
625 			     nb_rx_queues *
626 			     sizeof(struct virtchnl2_queue_vector), 0);
627 	if (qv_map == NULL) {
628 		DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
629 			nb_rx_queues);
630 		ret = -ENOMEM;
631 		goto qv_map_alloc_err;
632 	}
633 
634 	/* Rx interrupt disabled, Map interrupt only for writeback */
635 
636 	/* The capability flags adapter->caps.other_caps should be
637 	 * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
638 	 * condition should be updated when the FW can return the
639 	 * correct flag bits.
640 	 */
641 	dynctl_reg_start =
642 		vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
643 	itrn_reg_start =
644 		vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
645 	dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
646 	DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
647 	itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
648 	DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
649 	/* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
650 	 * register. WB_ON_ITR and INTENA are mutually exclusive
651 	 * bits. Setting WB_ON_ITR bits means TX and RX Descs
652 	 * are written back based on ITR expiration irrespective
653 	 * of INTENA setting.
654 	 */
655 	/* TBD: need to tune INTERVAL value for better performance. */
656 	itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
657 	dynctl_val = VIRTCHNL2_ITR_IDX_0  <<
658 		     PF_GLINT_DYN_CTL_ITR_INDX_S |
659 		     PF_GLINT_DYN_CTL_WB_ON_ITR_M |
660 		     itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
661 	IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
662 
663 	for (i = 0; i < nb_rx_queues; i++) {
664 		/* map all queues to the same vector */
665 		qv_map[i].queue_id = vport->chunks_info.rx_start_qid + i;
666 		qv_map[i].vector_id =
667 			vport->recv_vectors->vchunks.vchunks->start_vector_id;
668 	}
669 	vport->qv_map = qv_map;
670 
671 	ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
672 	if (ret != 0) {
673 		DRV_LOG(ERR, "config interrupt mapping failed");
674 		goto config_irq_map_err;
675 	}
676 
677 	return 0;
678 
679 config_irq_map_err:
680 	rte_free(vport->qv_map);
681 	vport->qv_map = NULL;
682 
683 qv_map_alloc_err:
684 	return ret;
685 }
686 
687 int
688 idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport, uint32_t *qids, uint16_t nb_rx_queues)
689 {
690 	struct idpf_adapter *adapter = vport->adapter;
691 	struct virtchnl2_queue_vector *qv_map;
692 	struct idpf_hw *hw = &adapter->hw;
693 	uint32_t dynctl_val, itrn_val;
694 	uint32_t dynctl_reg_start;
695 	uint32_t itrn_reg_start;
696 	uint16_t i;
697 	int ret;
698 
699 	qv_map = rte_zmalloc("qv_map",
700 			     nb_rx_queues *
701 			     sizeof(struct virtchnl2_queue_vector), 0);
702 	if (qv_map == NULL) {
703 		DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
704 			nb_rx_queues);
705 		ret = -ENOMEM;
706 		goto qv_map_alloc_err;
707 	}
708 
709 	/* Rx interrupt disabled, Map interrupt only for writeback */
710 
711 	/* The capability flags adapter->caps.other_caps should be
712 	 * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
713 	 * condition should be updated when the FW can return the
714 	 * correct flag bits.
715 	 */
716 	dynctl_reg_start =
717 		vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
718 	itrn_reg_start =
719 		vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
720 	dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
721 	DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
722 	itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
723 	DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
724 	/* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
725 	 * register. WB_ON_ITR and INTENA are mutually exclusive
726 	 * bits. Setting WB_ON_ITR bits means TX and RX Descs
727 	 * are written back based on ITR expiration irrespective
728 	 * of INTENA setting.
729 	 */
730 	/* TBD: need to tune INTERVAL value for better performance. */
731 	itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
732 	dynctl_val = VIRTCHNL2_ITR_IDX_0  <<
733 		     PF_GLINT_DYN_CTL_ITR_INDX_S |
734 		     PF_GLINT_DYN_CTL_WB_ON_ITR_M |
735 		     itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
736 	IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
737 
738 	for (i = 0; i < nb_rx_queues; i++) {
739 		/* map all queues to the same vector */
740 		qv_map[i].queue_id = qids[i];
741 		qv_map[i].vector_id =
742 			vport->recv_vectors->vchunks.vchunks->start_vector_id;
743 	}
744 	vport->qv_map = qv_map;
745 
746 	ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
747 	if (ret != 0) {
748 		DRV_LOG(ERR, "config interrupt mapping failed");
749 		goto config_irq_map_err;
750 	}
751 
752 	return 0;
753 
754 config_irq_map_err:
755 	rte_free(vport->qv_map);
756 	vport->qv_map = NULL;
757 
758 qv_map_alloc_err:
759 	return ret;
760 }
761 
762 int
763 idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
764 {
765 	idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, false);
766 
767 	rte_free(vport->qv_map);
768 	vport->qv_map = NULL;
769 
770 	return 0;
771 }
772 
773 int
774 idpf_vport_info_init(struct idpf_vport *vport,
775 			    struct virtchnl2_create_vport *vport_info)
776 {
777 	struct idpf_adapter *adapter = vport->adapter;
778 
779 	vport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
780 	if (!adapter->is_tx_singleq) {
781 		vport_info->txq_model =
782 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
783 		vport_info->num_tx_q =
784 			rte_cpu_to_le_16(IDPF_DEFAULT_TXQ_NUM);
785 		vport_info->num_tx_complq =
786 			rte_cpu_to_le_16(IDPF_DEFAULT_TXQ_NUM * IDPF_TX_COMPLQ_PER_GRP);
787 	} else {
788 		vport_info->txq_model =
789 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
790 		vport_info->num_tx_q = rte_cpu_to_le_16(IDPF_DEFAULT_TXQ_NUM);
791 		vport_info->num_tx_complq = 0;
792 	}
793 	if (!adapter->is_rx_singleq) {
794 		vport_info->rxq_model =
795 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
796 		vport_info->num_rx_q = rte_cpu_to_le_16(IDPF_DEFAULT_RXQ_NUM);
797 		vport_info->num_rx_bufq =
798 			rte_cpu_to_le_16(IDPF_DEFAULT_RXQ_NUM * IDPF_RX_BUFQ_PER_GRP);
799 	} else {
800 		vport_info->rxq_model =
801 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
802 		vport_info->num_rx_q = rte_cpu_to_le_16(IDPF_DEFAULT_RXQ_NUM);
803 		vport_info->num_rx_bufq = 0;
804 	}
805 
806 	return 0;
807 }
808 
809 void
810 idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct virtchnl2_vport_stats *nes)
811 {
812 	nes->rx_bytes = nes->rx_bytes - oes->rx_bytes;
813 	nes->rx_unicast = nes->rx_unicast - oes->rx_unicast;
814 	nes->rx_multicast = nes->rx_multicast - oes->rx_multicast;
815 	nes->rx_broadcast = nes->rx_broadcast - oes->rx_broadcast;
816 	nes->rx_errors = nes->rx_errors - oes->rx_errors;
817 	nes->rx_discards = nes->rx_discards - oes->rx_discards;
818 	nes->tx_bytes = nes->tx_bytes - oes->tx_bytes;
819 	nes->tx_unicast = nes->tx_unicast - oes->tx_unicast;
820 	nes->tx_multicast = nes->tx_multicast - oes->tx_multicast;
821 	nes->tx_broadcast = nes->tx_broadcast - oes->tx_broadcast;
822 	nes->tx_errors = nes->tx_errors - oes->tx_errors;
823 	nes->tx_discards = nes->tx_discards - oes->tx_discards;
824 }
825 
826 RTE_LOG_REGISTER_SUFFIX(idpf_common_logtype, common, NOTICE);
827