xref: /dpdk/drivers/net/qede/qede_filter.c (revision 89b5642d0d45c22c0ceab57efe3fab3b49ff4324)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2017 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 #include <rte_udp.h>
8 #include <rte_tcp.h>
9 #include <rte_sctp.h>
10 #include <rte_errno.h>
11 #include <rte_flow_driver.h>
12 
13 #include "qede_ethdev.h"
14 
15 /* VXLAN tunnel classification mapping */
16 const struct _qede_udp_tunn_types {
17 	uint16_t rte_filter_type;
18 	enum ecore_filter_ucast_type qede_type;
19 	enum ecore_tunn_clss qede_tunn_clss;
20 	const char *string;
21 } qede_tunn_types[] = {
22 	{
23 		RTE_ETH_TUNNEL_FILTER_OMAC,
24 		ECORE_FILTER_MAC,
25 		ECORE_TUNN_CLSS_MAC_VLAN,
26 		"outer-mac"
27 	},
28 	{
29 		RTE_ETH_TUNNEL_FILTER_TENID,
30 		ECORE_FILTER_VNI,
31 		ECORE_TUNN_CLSS_MAC_VNI,
32 		"vni"
33 	},
34 	{
35 		RTE_ETH_TUNNEL_FILTER_IMAC,
36 		ECORE_FILTER_INNER_MAC,
37 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
38 		"inner-mac"
39 	},
40 	{
41 		RTE_ETH_TUNNEL_FILTER_IVLAN,
42 		ECORE_FILTER_INNER_VLAN,
43 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
44 		"inner-vlan"
45 	},
46 	{
47 		RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_TENID,
48 		ECORE_FILTER_MAC_VNI_PAIR,
49 		ECORE_TUNN_CLSS_MAC_VNI,
50 		"outer-mac and vni"
51 	},
52 	{
53 		RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_IMAC,
54 		ECORE_FILTER_UNUSED,
55 		MAX_ECORE_TUNN_CLSS,
56 		"outer-mac and inner-mac"
57 	},
58 	{
59 		RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
60 		ECORE_FILTER_UNUSED,
61 		MAX_ECORE_TUNN_CLSS,
62 		"outer-mac and inner-vlan"
63 	},
64 	{
65 		RTE_ETH_TUNNEL_FILTER_TENID | RTE_ETH_TUNNEL_FILTER_IMAC,
66 		ECORE_FILTER_INNER_MAC_VNI_PAIR,
67 		ECORE_TUNN_CLSS_INNER_MAC_VNI,
68 		"vni and inner-mac",
69 	},
70 	{
71 		RTE_ETH_TUNNEL_FILTER_TENID | RTE_ETH_TUNNEL_FILTER_IVLAN,
72 		ECORE_FILTER_UNUSED,
73 		MAX_ECORE_TUNN_CLSS,
74 		"vni and inner-vlan",
75 	},
76 	{
77 		RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
78 		ECORE_FILTER_INNER_PAIR,
79 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
80 		"inner-mac and inner-vlan",
81 	},
82 	{
83 		RTE_ETH_TUNNEL_FILTER_OIP,
84 		ECORE_FILTER_UNUSED,
85 		MAX_ECORE_TUNN_CLSS,
86 		"outer-IP"
87 	},
88 	{
89 		RTE_ETH_TUNNEL_FILTER_IIP,
90 		ECORE_FILTER_UNUSED,
91 		MAX_ECORE_TUNN_CLSS,
92 		"inner-IP"
93 	},
94 	{
95 		RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN,
96 		ECORE_FILTER_UNUSED,
97 		MAX_ECORE_TUNN_CLSS,
98 		"IMAC_IVLAN"
99 	},
100 	{
101 		RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID,
102 		ECORE_FILTER_UNUSED,
103 		MAX_ECORE_TUNN_CLSS,
104 		"IMAC_IVLAN_TENID"
105 	},
106 	{
107 		RTE_ETH_TUNNEL_FILTER_IMAC_TENID,
108 		ECORE_FILTER_UNUSED,
109 		MAX_ECORE_TUNN_CLSS,
110 		"IMAC_TENID"
111 	},
112 	{
113 		RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC,
114 		ECORE_FILTER_UNUSED,
115 		MAX_ECORE_TUNN_CLSS,
116 		"OMAC_TENID_IMAC"
117 	},
118 };
119 
120 #define IP_VERSION				(0x40)
121 #define IP_HDRLEN				(0x5)
122 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL	(IP_VERSION | IP_HDRLEN)
123 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF		(0x50)
124 #define QEDE_FDIR_IPV4_DEF_TTL			(64)
125 #define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW		(0x60000000)
126 /* Sum of length of header types of L2, L3, L4.
127  * L2 : ether_hdr + vlan_hdr + vxlan_hdr
128  * L3 : ipv6_hdr
129  * L4 : tcp_hdr
130  */
131 #define QEDE_MAX_FDIR_PKT_LEN			(86)
132 
133 static uint16_t
134 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
135 			struct qede_arfs_entry *arfs,
136 			void *buff,
137 			struct ecore_arfs_config_params *params);
138 
139 /* Note: Flowdir support is only partial.
140  * For ex: drop_queue, FDIR masks, flex_conf are not supported.
141  * Parameters like pballoc/status fields are irrelevant here.
142  */
143 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
144 {
145 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
146 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
147 
148 	qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
149 	DP_INFO(edev, "flowdir is disabled\n");
150 
151 	return 0;
152 }
153 
154 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
155 {
156 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
157 	struct qede_arfs_entry *tmp = NULL;
158 
159 	SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
160 		if (tmp) {
161 			rte_memzone_free(tmp->mz);
162 			SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
163 				     qede_arfs_entry, list);
164 			rte_free(tmp);
165 		}
166 	}
167 }
168 
169 static int
170 qede_config_arfs_filter(struct rte_eth_dev *eth_dev,
171 			struct qede_arfs_entry *arfs,
172 			bool add)
173 {
174 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
175 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
176 	struct ecore_ntuple_filter_params params;
177 	char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
178 	struct qede_arfs_entry *tmp = NULL;
179 	const struct rte_memzone *mz;
180 	struct ecore_hwfn *p_hwfn;
181 	enum _ecore_status_t rc;
182 	uint16_t pkt_len;
183 	void *pkt;
184 
185 	if (add) {
186 		if (qdev->arfs_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
187 			DP_ERR(edev, "Reached max flowdir filter limit\n");
188 			return -EINVAL;
189 		}
190 	}
191 
192 	/* soft_id could have been used as memzone string, but soft_id is
193 	 * not currently used so it has no significance.
194 	 */
195 	snprintf(mz_name, sizeof(mz_name), "%lx",
196 		 (unsigned long)rte_get_timer_cycles());
197 	mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
198 					 SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
199 	if (!mz) {
200 		DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
201 		       rte_strerror(rte_errno));
202 		return -rte_errno;
203 	}
204 
205 	pkt = mz->addr;
206 	memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
207 	pkt_len = qede_arfs_construct_pkt(eth_dev, arfs, pkt,
208 					  &qdev->arfs_info.arfs);
209 	if (pkt_len == 0) {
210 		rc = -EINVAL;
211 		goto err1;
212 	}
213 
214 	DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
215 	if (add) {
216 		SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
217 			if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
218 				DP_INFO(edev, "flowdir filter exist\n");
219 				rc = -EEXIST;
220 				goto err1;
221 			}
222 		}
223 	} else {
224 		SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
225 			if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
226 				break;
227 		}
228 		if (!tmp) {
229 			DP_ERR(edev, "flowdir filter does not exist\n");
230 			rc = -EEXIST;
231 			goto err1;
232 		}
233 	}
234 	p_hwfn = ECORE_LEADING_HWFN(edev);
235 	if (add) {
236 		if (qdev->arfs_info.arfs.mode ==
237 			ECORE_FILTER_CONFIG_MODE_DISABLE) {
238 			qdev->arfs_info.arfs.mode =
239 					ECORE_FILTER_CONFIG_MODE_5_TUPLE;
240 			DP_INFO(edev, "Force enable flowdir in perfect mode\n");
241 		}
242 		/* Enable ARFS searcher with updated flow_types */
243 		ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
244 					  &qdev->arfs_info.arfs);
245 	}
246 
247 	memset(&params, 0, sizeof(params));
248 	params.addr = (dma_addr_t)mz->iova;
249 	params.length = pkt_len;
250 	params.qid = arfs->rx_queue;
251 	params.vport_id = 0;
252 	params.b_is_add = add;
253 	params.b_is_drop = arfs->is_drop;
254 
255 	/* configure filter with ECORE_SPQ_MODE_EBLOCK */
256 	rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
257 					       &params);
258 	if (rc == ECORE_SUCCESS) {
259 		if (add) {
260 			arfs->pkt_len = pkt_len;
261 			arfs->mz = mz;
262 			SLIST_INSERT_HEAD(&qdev->arfs_info.arfs_list_head,
263 					  arfs, list);
264 			qdev->arfs_info.filter_count++;
265 			DP_INFO(edev, "flowdir filter added, count = %d\n",
266 				qdev->arfs_info.filter_count);
267 		} else {
268 			rte_memzone_free(tmp->mz);
269 			SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
270 				     qede_arfs_entry, list);
271 			rte_free(tmp); /* the node deleted */
272 			rte_memzone_free(mz); /* temp node allocated */
273 			qdev->arfs_info.filter_count--;
274 			DP_INFO(edev, "Fdir filter deleted, count = %d\n",
275 				qdev->arfs_info.filter_count);
276 		}
277 	} else {
278 		DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
279 		       rc, qdev->arfs_info.filter_count);
280 	}
281 
282 	/* Disable ARFS searcher if there are no more filters */
283 	if (qdev->arfs_info.filter_count == 0) {
284 		memset(&qdev->arfs_info.arfs, 0,
285 		       sizeof(struct ecore_arfs_config_params));
286 		DP_INFO(edev, "Disabling flowdir\n");
287 		qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
288 		ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
289 					  &qdev->arfs_info.arfs);
290 	}
291 	return 0;
292 
293 err1:
294 	rte_memzone_free(mz);
295 	return rc;
296 }
297 
298 /* Fills the L3/L4 headers and returns the actual length  of flowdir packet */
299 static uint16_t
300 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
301 			struct qede_arfs_entry *arfs,
302 			void *buff,
303 			struct ecore_arfs_config_params *params)
304 
305 {
306 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
307 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
308 	uint16_t *ether_type;
309 	uint8_t *raw_pkt;
310 	struct rte_ipv4_hdr *ip;
311 	struct rte_ipv6_hdr *ip6;
312 	struct rte_udp_hdr *udp;
313 	struct rte_tcp_hdr *tcp;
314 	uint16_t len;
315 
316 	raw_pkt = (uint8_t *)buff;
317 
318 	len =  2 * sizeof(struct rte_ether_addr);
319 	raw_pkt += 2 * sizeof(struct rte_ether_addr);
320 	ether_type = (uint16_t *)raw_pkt;
321 	raw_pkt += sizeof(uint16_t);
322 	len += sizeof(uint16_t);
323 
324 	*ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
325 	switch (arfs->tuple.eth_proto) {
326 	case RTE_ETHER_TYPE_IPV4:
327 		ip = (struct rte_ipv4_hdr *)raw_pkt;
328 		ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
329 		ip->total_length = sizeof(struct rte_ipv4_hdr);
330 		ip->next_proto_id = arfs->tuple.ip_proto;
331 		ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
332 		ip->dst_addr = arfs->tuple.dst_ipv4;
333 		ip->src_addr = arfs->tuple.src_ipv4;
334 		len += sizeof(struct rte_ipv4_hdr);
335 		params->ipv4 = true;
336 
337 		raw_pkt = (uint8_t *)buff;
338 		/* UDP */
339 		if (arfs->tuple.ip_proto == IPPROTO_UDP) {
340 			udp = (struct rte_udp_hdr *)(raw_pkt + len);
341 			udp->dst_port = arfs->tuple.dst_port;
342 			udp->src_port = arfs->tuple.src_port;
343 			udp->dgram_len = sizeof(struct rte_udp_hdr);
344 			len += sizeof(struct rte_udp_hdr);
345 			/* adjust ip total_length */
346 			ip->total_length += sizeof(struct rte_udp_hdr);
347 			params->udp = true;
348 		} else { /* TCP */
349 			tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
350 			tcp->src_port = arfs->tuple.src_port;
351 			tcp->dst_port = arfs->tuple.dst_port;
352 			tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
353 			len += sizeof(struct rte_tcp_hdr);
354 			/* adjust ip total_length */
355 			ip->total_length += sizeof(struct rte_tcp_hdr);
356 			params->tcp = true;
357 		}
358 		break;
359 	case RTE_ETHER_TYPE_IPV6:
360 		ip6 = (struct rte_ipv6_hdr *)raw_pkt;
361 		ip6->proto = arfs->tuple.ip_proto;
362 		ip6->vtc_flow =
363 			rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
364 
365 		memcpy(&ip6->src_addr, arfs->tuple.src_ipv6, IPV6_ADDR_LEN);
366 		memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6, IPV6_ADDR_LEN);
367 		len += sizeof(struct rte_ipv6_hdr);
368 		params->ipv6 = true;
369 
370 		raw_pkt = (uint8_t *)buff;
371 		/* UDP */
372 		if (arfs->tuple.ip_proto == IPPROTO_UDP) {
373 			udp = (struct rte_udp_hdr *)(raw_pkt + len);
374 			udp->src_port = arfs->tuple.src_port;
375 			udp->dst_port = arfs->tuple.dst_port;
376 			len += sizeof(struct rte_udp_hdr);
377 			params->udp = true;
378 		} else { /* TCP */
379 			tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
380 			tcp->src_port = arfs->tuple.src_port;
381 			tcp->dst_port = arfs->tuple.dst_port;
382 			tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
383 			len += sizeof(struct rte_tcp_hdr);
384 			params->tcp = true;
385 		}
386 		break;
387 	default:
388 		DP_ERR(edev, "Unsupported eth_proto %u\n",
389 		       arfs->tuple.eth_proto);
390 		return 0;
391 	}
392 
393 	return len;
394 }
395 
396 static int
397 qede_tunnel_update(struct qede_dev *qdev,
398 		   struct ecore_tunnel_info *tunn_info)
399 {
400 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
401 	enum _ecore_status_t rc = ECORE_INVAL;
402 	struct ecore_hwfn *p_hwfn;
403 	struct ecore_ptt *p_ptt;
404 	int i;
405 
406 	for_each_hwfn(edev, i) {
407 		p_hwfn = &edev->hwfns[i];
408 		if (IS_PF(edev)) {
409 			p_ptt = ecore_ptt_acquire(p_hwfn);
410 			if (!p_ptt) {
411 				DP_ERR(p_hwfn, "Can't acquire PTT\n");
412 				return -EAGAIN;
413 			}
414 		} else {
415 			p_ptt = NULL;
416 		}
417 
418 		rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
419 				tunn_info, ECORE_SPQ_MODE_CB, NULL);
420 		if (IS_PF(edev))
421 			ecore_ptt_release(p_hwfn, p_ptt);
422 
423 		if (rc != ECORE_SUCCESS)
424 			break;
425 	}
426 
427 	return rc;
428 }
429 
430 static int
431 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
432 		  bool enable)
433 {
434 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
435 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
436 	enum _ecore_status_t rc = ECORE_INVAL;
437 	struct ecore_tunnel_info tunn;
438 
439 	if (qdev->vxlan.enable == enable)
440 		return ECORE_SUCCESS;
441 
442 	memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
443 	tunn.vxlan.b_update_mode = true;
444 	tunn.vxlan.b_mode_enabled = enable;
445 	tunn.b_update_rx_cls = true;
446 	tunn.b_update_tx_cls = true;
447 	tunn.vxlan.tun_cls = clss;
448 
449 	tunn.vxlan_port.b_update_port = true;
450 	tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
451 
452 	rc = qede_tunnel_update(qdev, &tunn);
453 	if (rc == ECORE_SUCCESS) {
454 		qdev->vxlan.enable = enable;
455 		qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
456 		DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
457 			enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
458 	} else {
459 		DP_ERR(edev, "Failed to update tunn_clss %u\n",
460 		       tunn.vxlan.tun_cls);
461 	}
462 
463 	return rc;
464 }
465 
466 static int
467 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
468 		  bool enable)
469 {
470 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
471 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
472 	enum _ecore_status_t rc = ECORE_INVAL;
473 	struct ecore_tunnel_info tunn;
474 
475 	memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
476 	tunn.l2_geneve.b_update_mode = true;
477 	tunn.l2_geneve.b_mode_enabled = enable;
478 	tunn.ip_geneve.b_update_mode = true;
479 	tunn.ip_geneve.b_mode_enabled = enable;
480 	tunn.l2_geneve.tun_cls = clss;
481 	tunn.ip_geneve.tun_cls = clss;
482 	tunn.b_update_rx_cls = true;
483 	tunn.b_update_tx_cls = true;
484 
485 	tunn.geneve_port.b_update_port = true;
486 	tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
487 
488 	rc = qede_tunnel_update(qdev, &tunn);
489 	if (rc == ECORE_SUCCESS) {
490 		qdev->geneve.enable = enable;
491 		qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
492 		DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
493 			enable ? "enabled" : "disabled", qdev->geneve.udp_port);
494 	} else {
495 		DP_ERR(edev, "Failed to update tunn_clss %u\n",
496 		       clss);
497 	}
498 
499 	return rc;
500 }
501 
502 int
503 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
504 		      struct rte_eth_udp_tunnel *tunnel_udp)
505 {
506 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
507 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
508 	struct ecore_tunnel_info tunn; /* @DPDK */
509 	uint16_t udp_port;
510 	int rc;
511 
512 	PMD_INIT_FUNC_TRACE(edev);
513 
514 	memset(&tunn, 0, sizeof(tunn));
515 
516 	switch (tunnel_udp->prot_type) {
517 	case RTE_ETH_TUNNEL_TYPE_VXLAN:
518 		if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
519 			DP_ERR(edev, "UDP port %u doesn't exist\n",
520 				tunnel_udp->udp_port);
521 			return ECORE_INVAL;
522 		}
523 		udp_port = 0;
524 
525 		tunn.vxlan_port.b_update_port = true;
526 		tunn.vxlan_port.port = udp_port;
527 
528 		rc = qede_tunnel_update(qdev, &tunn);
529 		if (rc != ECORE_SUCCESS) {
530 			DP_ERR(edev, "Unable to config UDP port %u\n",
531 			       tunn.vxlan_port.port);
532 			return rc;
533 		}
534 
535 		qdev->vxlan.udp_port = udp_port;
536 		/* If the request is to delete UDP port and if the number of
537 		 * VXLAN filters have reached 0 then VxLAN offload can be
538 		 * disabled.
539 		 */
540 		if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
541 			return qede_vxlan_enable(eth_dev,
542 					ECORE_TUNN_CLSS_MAC_VLAN, false);
543 
544 		break;
545 	case RTE_ETH_TUNNEL_TYPE_GENEVE:
546 		if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
547 			DP_ERR(edev, "UDP port %u doesn't exist\n",
548 				tunnel_udp->udp_port);
549 			return ECORE_INVAL;
550 		}
551 
552 		udp_port = 0;
553 
554 		tunn.geneve_port.b_update_port = true;
555 		tunn.geneve_port.port = udp_port;
556 
557 		rc = qede_tunnel_update(qdev, &tunn);
558 		if (rc != ECORE_SUCCESS) {
559 			DP_ERR(edev, "Unable to config UDP port %u\n",
560 			       tunn.vxlan_port.port);
561 			return rc;
562 		}
563 
564 		qdev->vxlan.udp_port = udp_port;
565 		/* If the request is to delete UDP port and if the number of
566 		 * GENEVE filters have reached 0 then GENEVE offload can be
567 		 * disabled.
568 		 */
569 		if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
570 			return qede_geneve_enable(eth_dev,
571 					ECORE_TUNN_CLSS_MAC_VLAN, false);
572 
573 		break;
574 
575 	default:
576 		return ECORE_INVAL;
577 	}
578 
579 	return 0;
580 }
581 
582 int
583 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
584 		      struct rte_eth_udp_tunnel *tunnel_udp)
585 {
586 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
587 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
588 	struct ecore_tunnel_info tunn; /* @DPDK */
589 	uint16_t udp_port;
590 	int rc;
591 
592 	PMD_INIT_FUNC_TRACE(edev);
593 
594 	memset(&tunn, 0, sizeof(tunn));
595 
596 	switch (tunnel_udp->prot_type) {
597 	case RTE_ETH_TUNNEL_TYPE_VXLAN:
598 		if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
599 			DP_INFO(edev,
600 				"UDP port %u for VXLAN was already configured\n",
601 				tunnel_udp->udp_port);
602 			return ECORE_SUCCESS;
603 		}
604 
605 		/* Enable VxLAN tunnel with default MAC/VLAN classification if
606 		 * it was not enabled while adding VXLAN filter before UDP port
607 		 * update.
608 		 */
609 		if (!qdev->vxlan.enable) {
610 			rc = qede_vxlan_enable(eth_dev,
611 				ECORE_TUNN_CLSS_MAC_VLAN, true);
612 			if (rc != ECORE_SUCCESS) {
613 				DP_ERR(edev, "Failed to enable VXLAN "
614 					"prior to updating UDP port\n");
615 				return rc;
616 			}
617 		}
618 		udp_port = tunnel_udp->udp_port;
619 
620 		tunn.vxlan_port.b_update_port = true;
621 		tunn.vxlan_port.port = udp_port;
622 
623 		rc = qede_tunnel_update(qdev, &tunn);
624 		if (rc != ECORE_SUCCESS) {
625 			DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
626 			       udp_port);
627 			return rc;
628 		}
629 
630 		DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
631 
632 		qdev->vxlan.udp_port = udp_port;
633 		break;
634 	case RTE_ETH_TUNNEL_TYPE_GENEVE:
635 		if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
636 			DP_INFO(edev,
637 				"UDP port %u for GENEVE was already configured\n",
638 				tunnel_udp->udp_port);
639 			return ECORE_SUCCESS;
640 		}
641 
642 		/* Enable GENEVE tunnel with default MAC/VLAN classification if
643 		 * it was not enabled while adding GENEVE filter before UDP port
644 		 * update.
645 		 */
646 		if (!qdev->geneve.enable) {
647 			rc = qede_geneve_enable(eth_dev,
648 				ECORE_TUNN_CLSS_MAC_VLAN, true);
649 			if (rc != ECORE_SUCCESS) {
650 				DP_ERR(edev, "Failed to enable GENEVE "
651 					"prior to updating UDP port\n");
652 				return rc;
653 			}
654 		}
655 		udp_port = tunnel_udp->udp_port;
656 
657 		tunn.geneve_port.b_update_port = true;
658 		tunn.geneve_port.port = udp_port;
659 
660 		rc = qede_tunnel_update(qdev, &tunn);
661 		if (rc != ECORE_SUCCESS) {
662 			DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
663 			       udp_port);
664 			return rc;
665 		}
666 
667 		DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
668 
669 		qdev->geneve.udp_port = udp_port;
670 		break;
671 	default:
672 		return ECORE_INVAL;
673 	}
674 
675 	return 0;
676 }
677 
678 static int
679 qede_flow_validate_attr(__rte_unused struct rte_eth_dev *dev,
680 			const struct rte_flow_attr *attr,
681 			struct rte_flow_error *error)
682 {
683 	if (attr == NULL) {
684 		rte_flow_error_set(error, EINVAL,
685 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
686 				   "NULL attribute");
687 		return -rte_errno;
688 	}
689 
690 	if (attr->group != 0) {
691 		rte_flow_error_set(error, ENOTSUP,
692 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
693 				   "Groups are not supported");
694 		return -rte_errno;
695 	}
696 
697 	if (attr->priority != 0) {
698 		rte_flow_error_set(error, ENOTSUP,
699 				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
700 				   "Priorities are not supported");
701 		return -rte_errno;
702 	}
703 
704 	if (attr->egress != 0) {
705 		rte_flow_error_set(error, ENOTSUP,
706 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
707 				   "Egress is not supported");
708 		return -rte_errno;
709 	}
710 
711 	if (attr->transfer != 0) {
712 		rte_flow_error_set(error, ENOTSUP,
713 				   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
714 				   "Transfer is not supported");
715 		return -rte_errno;
716 	}
717 
718 	if (attr->ingress == 0) {
719 		rte_flow_error_set(error, ENOTSUP,
720 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
721 				   "Only ingress is supported");
722 		return -rte_errno;
723 	}
724 
725 	return 0;
726 }
727 
728 static int
729 qede_flow_parse_pattern(__rte_unused struct rte_eth_dev *dev,
730 			const struct rte_flow_item pattern[],
731 			struct rte_flow_error *error,
732 			struct rte_flow *flow)
733 {
734 	bool l3 = false, l4 = false;
735 
736 	if (pattern == NULL) {
737 		rte_flow_error_set(error, EINVAL,
738 				   RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
739 				   "NULL pattern");
740 		return -rte_errno;
741 	}
742 
743 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
744 		if (!pattern->spec) {
745 			rte_flow_error_set(error, EINVAL,
746 					   RTE_FLOW_ERROR_TYPE_ITEM,
747 					   pattern,
748 					   "Item spec not defined");
749 			return -rte_errno;
750 		}
751 
752 		if (pattern->last) {
753 			rte_flow_error_set(error, EINVAL,
754 					   RTE_FLOW_ERROR_TYPE_ITEM,
755 					   pattern,
756 					   "Item last not supported");
757 			return -rte_errno;
758 		}
759 
760 		if (pattern->mask) {
761 			rte_flow_error_set(error, EINVAL,
762 					   RTE_FLOW_ERROR_TYPE_ITEM,
763 					   pattern,
764 					   "Item mask not supported");
765 			return -rte_errno;
766 		}
767 
768 		/* Below validation is only for 4 tuple flow
769 		 * (GFT_PROFILE_TYPE_4_TUPLE)
770 		 * - src and dst L3 address (IPv4 or IPv6)
771 		 * - src and dst L4 port (TCP or UDP)
772 		 */
773 
774 		switch (pattern->type) {
775 		case RTE_FLOW_ITEM_TYPE_IPV4:
776 			l3 = true;
777 
778 			if (flow) {
779 				const struct rte_flow_item_ipv4 *spec;
780 
781 				spec = pattern->spec;
782 				flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
783 				flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
784 				flow->entry.tuple.eth_proto =
785 					RTE_ETHER_TYPE_IPV4;
786 			}
787 			break;
788 
789 		case RTE_FLOW_ITEM_TYPE_IPV6:
790 			l3 = true;
791 
792 			if (flow) {
793 				const struct rte_flow_item_ipv6 *spec;
794 
795 				spec = pattern->spec;
796 				memcpy(flow->entry.tuple.src_ipv6,
797 				       &spec->hdr.src_addr, IPV6_ADDR_LEN);
798 				memcpy(flow->entry.tuple.dst_ipv6,
799 				       &spec->hdr.dst_addr, IPV6_ADDR_LEN);
800 				flow->entry.tuple.eth_proto =
801 					RTE_ETHER_TYPE_IPV6;
802 			}
803 			break;
804 
805 		case RTE_FLOW_ITEM_TYPE_UDP:
806 			l4 = true;
807 
808 			if (flow) {
809 				const struct rte_flow_item_udp *spec;
810 
811 				spec = pattern->spec;
812 				flow->entry.tuple.src_port =
813 						spec->hdr.src_port;
814 				flow->entry.tuple.dst_port =
815 						spec->hdr.dst_port;
816 				flow->entry.tuple.ip_proto = IPPROTO_UDP;
817 			}
818 			break;
819 
820 		case RTE_FLOW_ITEM_TYPE_TCP:
821 			l4 = true;
822 
823 			if (flow) {
824 				const struct rte_flow_item_tcp *spec;
825 
826 				spec = pattern->spec;
827 				flow->entry.tuple.src_port =
828 						spec->hdr.src_port;
829 				flow->entry.tuple.dst_port =
830 						spec->hdr.dst_port;
831 				flow->entry.tuple.ip_proto = IPPROTO_TCP;
832 			}
833 
834 			break;
835 		default:
836 			rte_flow_error_set(error, EINVAL,
837 					   RTE_FLOW_ERROR_TYPE_ITEM,
838 					   pattern,
839 					   "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported");
840 			return -rte_errno;
841 		}
842 	}
843 
844 	if (!(l3 && l4)) {
845 		rte_flow_error_set(error, EINVAL,
846 				   RTE_FLOW_ERROR_TYPE_ITEM,
847 				   pattern,
848 				   "Item types need to have both L3 and L4 protocols");
849 		return -rte_errno;
850 	}
851 
852 	return 0;
853 }
854 
855 static int
856 qede_flow_parse_actions(struct rte_eth_dev *dev,
857 			const struct rte_flow_action actions[],
858 			struct rte_flow_error *error,
859 			struct rte_flow *flow)
860 {
861 	const struct rte_flow_action_queue *queue;
862 
863 	if (actions == NULL) {
864 		rte_flow_error_set(error, EINVAL,
865 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
866 				   "NULL actions");
867 		return -rte_errno;
868 	}
869 
870 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
871 		switch (actions->type) {
872 		case RTE_FLOW_ACTION_TYPE_QUEUE:
873 			queue = actions->conf;
874 
875 			if (queue->index >= QEDE_RSS_COUNT(dev)) {
876 				rte_flow_error_set(error, EINVAL,
877 						   RTE_FLOW_ERROR_TYPE_ACTION,
878 						   actions,
879 						   "Bad QUEUE action");
880 				return -rte_errno;
881 			}
882 
883 			if (flow)
884 				flow->entry.rx_queue = queue->index;
885 
886 			break;
887 		case RTE_FLOW_ACTION_TYPE_DROP:
888 			if (flow)
889 				flow->entry.is_drop = true;
890 			break;
891 		default:
892 			rte_flow_error_set(error, ENOTSUP,
893 					   RTE_FLOW_ERROR_TYPE_ACTION,
894 					   actions,
895 					   "Action is not supported - only ACTION_TYPE_QUEUE and ACTION_TYPE_DROP supported");
896 			return -rte_errno;
897 		}
898 	}
899 
900 	return 0;
901 }
902 
903 static int
904 qede_flow_parse(struct rte_eth_dev *dev,
905 		const struct rte_flow_attr *attr,
906 		const struct rte_flow_item patterns[],
907 		const struct rte_flow_action actions[],
908 		struct rte_flow_error *error,
909 		struct rte_flow *flow)
910 
911 {
912 	int rc = 0;
913 
914 	rc = qede_flow_validate_attr(dev, attr, error);
915 	if (rc)
916 		return rc;
917 
918 	/* parse and validate item pattern and actions.
919 	 * Given item list and actions will be translate to qede PMD
920 	 * specific arfs structure.
921 	 */
922 	rc = qede_flow_parse_pattern(dev, patterns, error, flow);
923 	if (rc)
924 		return rc;
925 
926 	rc = qede_flow_parse_actions(dev, actions, error, flow);
927 
928 	return rc;
929 }
930 
931 static int
932 qede_flow_validate(struct rte_eth_dev *dev,
933 		   const struct rte_flow_attr *attr,
934 		   const struct rte_flow_item patterns[],
935 		   const struct rte_flow_action actions[],
936 		   struct rte_flow_error *error)
937 {
938 	return qede_flow_parse(dev, attr, patterns, actions, error, NULL);
939 }
940 
941 static struct rte_flow *
942 qede_flow_create(struct rte_eth_dev *dev,
943 		 const struct rte_flow_attr *attr,
944 		 const struct rte_flow_item pattern[],
945 		 const struct rte_flow_action actions[],
946 		 struct rte_flow_error *error)
947 {
948 	struct rte_flow *flow = NULL;
949 	int rc;
950 
951 	flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0);
952 	if (flow == NULL) {
953 		rte_flow_error_set(error, ENOMEM,
954 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
955 				   "Failed to allocate memory");
956 		return NULL;
957 	}
958 
959 	rc = qede_flow_parse(dev, attr, pattern, actions, error, flow);
960 	if (rc < 0) {
961 		rte_free(flow);
962 		return NULL;
963 	}
964 
965 	rc = qede_config_arfs_filter(dev, &flow->entry, true);
966 	if (rc < 0) {
967 		rte_flow_error_set(error, rc,
968 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
969 				   "Failed to configure flow filter");
970 		rte_free(flow);
971 		return NULL;
972 	}
973 
974 	return flow;
975 }
976 
977 static int
978 qede_flow_destroy(struct rte_eth_dev *eth_dev,
979 		  struct rte_flow *flow,
980 		  struct rte_flow_error *error)
981 {
982 	int rc = 0;
983 
984 	rc = qede_config_arfs_filter(eth_dev, &flow->entry, false);
985 	if (rc < 0) {
986 		rte_flow_error_set(error, rc,
987 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
988 				   "Failed to delete flow filter");
989 		rte_free(flow);
990 	}
991 
992 	return rc;
993 }
994 
995 static int
996 qede_flow_flush(struct rte_eth_dev *eth_dev,
997 		struct rte_flow_error *error)
998 {
999 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1000 	struct qede_arfs_entry *tmp = NULL;
1001 	int rc = 0;
1002 
1003 	while (!SLIST_EMPTY(&qdev->arfs_info.arfs_list_head)) {
1004 		tmp = SLIST_FIRST(&qdev->arfs_info.arfs_list_head);
1005 
1006 		rc = qede_config_arfs_filter(eth_dev, tmp, false);
1007 		if (rc < 0)
1008 			rte_flow_error_set(error, rc,
1009 					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1010 					   "Failed to flush flow filter");
1011 	}
1012 
1013 	return rc;
1014 }
1015 
1016 const struct rte_flow_ops qede_flow_ops = {
1017 	.validate = qede_flow_validate,
1018 	.create = qede_flow_create,
1019 	.destroy = qede_flow_destroy,
1020 	.flush = qede_flow_flush,
1021 };
1022 
1023 int
1024 qede_dev_flow_ops_get(struct rte_eth_dev *eth_dev,
1025 		      const struct rte_flow_ops **ops)
1026 {
1027 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1028 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1029 
1030 	if (ECORE_IS_CMT(edev)) {
1031 		DP_ERR(edev, "flowdir is not supported in 100G mode\n");
1032 		return -ENOTSUP;
1033 	}
1034 
1035 	*ops = &qede_flow_ops;
1036 	return 0;
1037 }
1038