xref: /dpdk/drivers/net/bnxt/bnxt_flow.c (revision 9e991f217fc8719e38a812dc280dba5f84db9f59)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <sys/queue.h>
7 
8 #include <rte_log.h>
9 #include <rte_malloc.h>
10 #include <rte_flow.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
13 
14 #include "bnxt.h"
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_ring.h"
18 #include "bnxt_rxq.h"
19 #include "bnxt_vnic.h"
20 #include "hsi_struct_def_dpdk.h"
21 
22 static int
23 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
24 			const struct rte_flow_item pattern[],
25 			const struct rte_flow_action actions[],
26 			struct rte_flow_error *error)
27 {
28 	if (!pattern) {
29 		rte_flow_error_set(error,
30 				   EINVAL,
31 				   RTE_FLOW_ERROR_TYPE_ITEM_NUM,
32 				   NULL,
33 				   "NULL pattern.");
34 		return -rte_errno;
35 	}
36 
37 	if (!actions) {
38 		rte_flow_error_set(error,
39 				   EINVAL,
40 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
41 				   NULL,
42 				   "NULL action.");
43 		return -rte_errno;
44 	}
45 
46 	if (!attr) {
47 		rte_flow_error_set(error,
48 				   EINVAL,
49 				   RTE_FLOW_ERROR_TYPE_ATTR,
50 				   NULL,
51 				   "NULL attribute.");
52 		return -rte_errno;
53 	}
54 
55 	return 0;
56 }
57 
58 static const struct rte_flow_item *
59 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
60 {
61 	while (1) {
62 		if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
63 			return cur;
64 		cur++;
65 	}
66 }
67 
68 static const struct rte_flow_action *
69 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
70 {
71 	while (1) {
72 		if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
73 			return cur;
74 		cur++;
75 	}
76 }
77 
78 static int
79 bnxt_filter_type_check(const struct rte_flow_item pattern[],
80 		       struct rte_flow_error *error)
81 {
82 	const struct rte_flow_item *item =
83 		bnxt_flow_non_void_item(pattern);
84 	int use_ntuple = 1;
85 	bool has_vlan = 0;
86 
87 	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
88 		switch (item->type) {
89 		case RTE_FLOW_ITEM_TYPE_ANY:
90 		case RTE_FLOW_ITEM_TYPE_ETH:
91 			use_ntuple = 0;
92 			break;
93 		case RTE_FLOW_ITEM_TYPE_VLAN:
94 			use_ntuple = 0;
95 			has_vlan = 1;
96 			break;
97 		case RTE_FLOW_ITEM_TYPE_IPV4:
98 		case RTE_FLOW_ITEM_TYPE_IPV6:
99 		case RTE_FLOW_ITEM_TYPE_TCP:
100 		case RTE_FLOW_ITEM_TYPE_UDP:
101 			/* FALLTHROUGH */
102 			/* need ntuple match, reset exact match */
103 			use_ntuple |= 1;
104 			break;
105 		default:
106 			PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
107 			use_ntuple |= 0;
108 		}
109 		item++;
110 	}
111 
112 	if (has_vlan && use_ntuple) {
113 		PMD_DRV_LOG(ERR,
114 			    "VLAN flow cannot use NTUPLE filter\n");
115 		rte_flow_error_set(error, EINVAL,
116 				   RTE_FLOW_ERROR_TYPE_ITEM,
117 				   item,
118 				   "Cannot use VLAN with NTUPLE");
119 		return -rte_errno;
120 	}
121 
122 	return use_ntuple;
123 }
124 
125 static int
126 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
127 				  const struct rte_flow_attr *attr,
128 				  const struct rte_flow_item pattern[],
129 				  struct rte_flow_error *error,
130 				  struct bnxt_filter_info *filter)
131 {
132 	const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
133 	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
134 	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
135 	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
136 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
137 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
138 	const struct rte_flow_item_eth *eth_spec, *eth_mask;
139 	const struct rte_ether_addr *dst, *src;
140 	const struct rte_flow_item_nvgre *nvgre_spec;
141 	const struct rte_flow_item_nvgre *nvgre_mask;
142 	const struct rte_flow_item_gre *gre_spec;
143 	const struct rte_flow_item_gre *gre_mask;
144 	const struct rte_flow_item_vxlan *vxlan_spec;
145 	const struct rte_flow_item_vxlan *vxlan_mask;
146 	uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
147 	uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
148 	const struct rte_flow_item_vf *vf_spec;
149 	uint32_t tenant_id_be = 0, valid_flags = 0;
150 	bool vni_masked = 0;
151 	bool tni_masked = 0;
152 	uint32_t en_ethertype;
153 	uint8_t inner = 0;
154 	uint32_t vf = 0;
155 	uint32_t en = 0;
156 	int use_ntuple;
157 	int dflt_vnic;
158 
159 	use_ntuple = bnxt_filter_type_check(pattern, error);
160 	if (use_ntuple < 0)
161 		return use_ntuple;
162 	PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
163 
164 	filter->filter_type = use_ntuple ?
165 		HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_L2_FILTER;
166 	en_ethertype = use_ntuple ?
167 		NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
168 		EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
169 
170 	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
171 		if (item->last) {
172 			/* last or range is NOT supported as match criteria */
173 			rte_flow_error_set(error, EINVAL,
174 					   RTE_FLOW_ERROR_TYPE_ITEM,
175 					   item,
176 					   "No support for range");
177 			return -rte_errno;
178 		}
179 
180 		switch (item->type) {
181 		case RTE_FLOW_ITEM_TYPE_ANY:
182 			inner =
183 			((const struct rte_flow_item_any *)item->spec)->num > 3;
184 			if (inner)
185 				PMD_DRV_LOG(DEBUG, "Parse inner header\n");
186 			break;
187 		case RTE_FLOW_ITEM_TYPE_ETH:
188 			if (!item->spec || !item->mask)
189 				break;
190 
191 			eth_spec = item->spec;
192 			eth_mask = item->mask;
193 
194 			/* Source MAC address mask cannot be partially set.
195 			 * Should be All 0's or all 1's.
196 			 * Destination MAC address mask must not be partially
197 			 * set. Should be all 1's or all 0's.
198 			 */
199 			if ((!rte_is_zero_ether_addr(&eth_mask->src) &&
200 			     !rte_is_broadcast_ether_addr(&eth_mask->src)) ||
201 			    (!rte_is_zero_ether_addr(&eth_mask->dst) &&
202 			     !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
203 				rte_flow_error_set(error,
204 						   EINVAL,
205 						   RTE_FLOW_ERROR_TYPE_ITEM,
206 						   item,
207 						   "MAC_addr mask not valid");
208 				return -rte_errno;
209 			}
210 
211 			/* Mask is not allowed. Only exact matches are */
212 			if (eth_mask->type &&
213 			    eth_mask->type != RTE_BE16(0xffff)) {
214 				rte_flow_error_set(error, EINVAL,
215 						   RTE_FLOW_ERROR_TYPE_ITEM,
216 						   item,
217 						   "ethertype mask not valid");
218 				return -rte_errno;
219 			}
220 
221 			if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
222 				dst = &eth_spec->dst;
223 				if (!rte_is_valid_assigned_ether_addr(dst)) {
224 					rte_flow_error_set(error,
225 							   EINVAL,
226 							   RTE_FLOW_ERROR_TYPE_ITEM,
227 							   item,
228 							   "DMAC is invalid");
229 					PMD_DRV_LOG(ERR,
230 						    "DMAC is invalid!\n");
231 					return -rte_errno;
232 				}
233 				rte_memcpy(filter->dst_macaddr,
234 					   &eth_spec->dst, RTE_ETHER_ADDR_LEN);
235 				en |= use_ntuple ?
236 					NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
237 					EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
238 				valid_flags |= inner ?
239 					BNXT_FLOW_L2_INNER_DST_VALID_FLAG :
240 					BNXT_FLOW_L2_DST_VALID_FLAG;
241 				filter->priority = attr->priority;
242 				PMD_DRV_LOG(DEBUG,
243 					    "Creating a priority flow\n");
244 			}
245 			if (rte_is_broadcast_ether_addr(&eth_mask->src)) {
246 				src = &eth_spec->src;
247 				if (!rte_is_valid_assigned_ether_addr(src)) {
248 					rte_flow_error_set(error,
249 							   EINVAL,
250 							   RTE_FLOW_ERROR_TYPE_ITEM,
251 							   item,
252 							   "SMAC is invalid");
253 					PMD_DRV_LOG(ERR,
254 						    "SMAC is invalid!\n");
255 					return -rte_errno;
256 				}
257 				rte_memcpy(filter->src_macaddr,
258 					   &eth_spec->src, RTE_ETHER_ADDR_LEN);
259 				en |= use_ntuple ?
260 					NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
261 					EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
262 				valid_flags |= inner ?
263 					BNXT_FLOW_L2_INNER_SRC_VALID_FLAG :
264 					BNXT_FLOW_L2_SRC_VALID_FLAG;
265 			} /*
266 			   * else {
267 			   *  PMD_DRV_LOG(ERR, "Handle this condition\n");
268 			   * }
269 			   */
270 			if (eth_mask->type) {
271 				filter->ethertype =
272 					rte_be_to_cpu_16(eth_spec->type);
273 				en |= en_ethertype;
274 			}
275 			if (inner)
276 				valid_flags |= BNXT_FLOW_PARSE_INNER_FLAG;
277 
278 			break;
279 		case RTE_FLOW_ITEM_TYPE_VLAN:
280 			vlan_spec = item->spec;
281 			vlan_mask = item->mask;
282 			if (en & en_ethertype) {
283 				rte_flow_error_set(error, EINVAL,
284 						   RTE_FLOW_ERROR_TYPE_ITEM,
285 						   item,
286 						   "VLAN TPID matching is not"
287 						   " supported");
288 				return -rte_errno;
289 			}
290 			if (vlan_mask->tci &&
291 			    vlan_mask->tci == RTE_BE16(0x0fff)) {
292 				/* Only the VLAN ID can be matched. */
293 				filter->l2_ovlan =
294 					rte_be_to_cpu_16(vlan_spec->tci &
295 							 RTE_BE16(0x0fff));
296 				en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
297 			} else {
298 				rte_flow_error_set(error,
299 						   EINVAL,
300 						   RTE_FLOW_ERROR_TYPE_ITEM,
301 						   item,
302 						   "VLAN mask is invalid");
303 				return -rte_errno;
304 			}
305 			if (vlan_mask->inner_type &&
306 			    vlan_mask->inner_type != RTE_BE16(0xffff)) {
307 				rte_flow_error_set(error, EINVAL,
308 						   RTE_FLOW_ERROR_TYPE_ITEM,
309 						   item,
310 						   "inner ethertype mask not"
311 						   " valid");
312 				return -rte_errno;
313 			}
314 			if (vlan_mask->inner_type) {
315 				filter->ethertype =
316 					rte_be_to_cpu_16(vlan_spec->inner_type);
317 				en |= en_ethertype;
318 			}
319 
320 			break;
321 		case RTE_FLOW_ITEM_TYPE_IPV4:
322 			/* If mask is not involved, we could use EM filters. */
323 			ipv4_spec = item->spec;
324 			ipv4_mask = item->mask;
325 
326 			if (!item->spec || !item->mask)
327 				break;
328 
329 			/* Only IP DST and SRC fields are maskable. */
330 			if (ipv4_mask->hdr.version_ihl ||
331 			    ipv4_mask->hdr.type_of_service ||
332 			    ipv4_mask->hdr.total_length ||
333 			    ipv4_mask->hdr.packet_id ||
334 			    ipv4_mask->hdr.fragment_offset ||
335 			    ipv4_mask->hdr.time_to_live ||
336 			    ipv4_mask->hdr.next_proto_id ||
337 			    ipv4_mask->hdr.hdr_checksum) {
338 				rte_flow_error_set(error,
339 						   EINVAL,
340 						   RTE_FLOW_ERROR_TYPE_ITEM,
341 						   item,
342 						   "Invalid IPv4 mask.");
343 				return -rte_errno;
344 			}
345 
346 			filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
347 			filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
348 
349 			if (use_ntuple)
350 				en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
351 					NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
352 			else
353 				en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
354 					EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
355 
356 			if (ipv4_mask->hdr.src_addr) {
357 				filter->src_ipaddr_mask[0] =
358 					ipv4_mask->hdr.src_addr;
359 				en |= !use_ntuple ? 0 :
360 				     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
361 			}
362 
363 			if (ipv4_mask->hdr.dst_addr) {
364 				filter->dst_ipaddr_mask[0] =
365 					ipv4_mask->hdr.dst_addr;
366 				en |= !use_ntuple ? 0 :
367 				     NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
368 			}
369 
370 			filter->ip_addr_type = use_ntuple ?
371 			 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
372 			 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
373 
374 			if (ipv4_spec->hdr.next_proto_id) {
375 				filter->ip_protocol =
376 					ipv4_spec->hdr.next_proto_id;
377 				if (use_ntuple)
378 					en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
379 				else
380 					en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
381 			}
382 			break;
383 		case RTE_FLOW_ITEM_TYPE_IPV6:
384 			ipv6_spec = item->spec;
385 			ipv6_mask = item->mask;
386 
387 			if (!item->spec || !item->mask)
388 				break;
389 
390 			/* Only IP DST and SRC fields are maskable. */
391 			if (ipv6_mask->hdr.vtc_flow ||
392 			    ipv6_mask->hdr.payload_len ||
393 			    ipv6_mask->hdr.proto ||
394 			    ipv6_mask->hdr.hop_limits) {
395 				rte_flow_error_set(error,
396 						   EINVAL,
397 						   RTE_FLOW_ERROR_TYPE_ITEM,
398 						   item,
399 						   "Invalid IPv6 mask.");
400 				return -rte_errno;
401 			}
402 
403 			if (use_ntuple)
404 				en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
405 					NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
406 			else
407 				en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
408 					EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
409 
410 			rte_memcpy(filter->src_ipaddr,
411 				   ipv6_spec->hdr.src_addr, 16);
412 			rte_memcpy(filter->dst_ipaddr,
413 				   ipv6_spec->hdr.dst_addr, 16);
414 
415 			if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
416 						   16)) {
417 				rte_memcpy(filter->src_ipaddr_mask,
418 					   ipv6_mask->hdr.src_addr, 16);
419 				en |= !use_ntuple ? 0 :
420 				    NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
421 			}
422 
423 			if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
424 						   16)) {
425 				rte_memcpy(filter->dst_ipaddr_mask,
426 					   ipv6_mask->hdr.dst_addr, 16);
427 				en |= !use_ntuple ? 0 :
428 				     NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
429 			}
430 
431 			filter->ip_addr_type = use_ntuple ?
432 				NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
433 				EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
434 			break;
435 		case RTE_FLOW_ITEM_TYPE_TCP:
436 			tcp_spec = item->spec;
437 			tcp_mask = item->mask;
438 
439 			if (!item->spec || !item->mask)
440 				break;
441 
442 			/* Check TCP mask. Only DST & SRC ports are maskable */
443 			if (tcp_mask->hdr.sent_seq ||
444 			    tcp_mask->hdr.recv_ack ||
445 			    tcp_mask->hdr.data_off ||
446 			    tcp_mask->hdr.tcp_flags ||
447 			    tcp_mask->hdr.rx_win ||
448 			    tcp_mask->hdr.cksum ||
449 			    tcp_mask->hdr.tcp_urp) {
450 				rte_flow_error_set(error,
451 						   EINVAL,
452 						   RTE_FLOW_ERROR_TYPE_ITEM,
453 						   item,
454 						   "Invalid TCP mask");
455 				return -rte_errno;
456 			}
457 
458 			filter->src_port = tcp_spec->hdr.src_port;
459 			filter->dst_port = tcp_spec->hdr.dst_port;
460 
461 			if (use_ntuple)
462 				en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
463 					NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
464 			else
465 				en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
466 					EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
467 
468 			if (tcp_mask->hdr.dst_port) {
469 				filter->dst_port_mask = tcp_mask->hdr.dst_port;
470 				en |= !use_ntuple ? 0 :
471 				  NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
472 			}
473 
474 			if (tcp_mask->hdr.src_port) {
475 				filter->src_port_mask = tcp_mask->hdr.src_port;
476 				en |= !use_ntuple ? 0 :
477 				  NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
478 			}
479 			break;
480 		case RTE_FLOW_ITEM_TYPE_UDP:
481 			udp_spec = item->spec;
482 			udp_mask = item->mask;
483 
484 			if (!item->spec || !item->mask)
485 				break;
486 
487 			if (udp_mask->hdr.dgram_len ||
488 			    udp_mask->hdr.dgram_cksum) {
489 				rte_flow_error_set(error,
490 						   EINVAL,
491 						   RTE_FLOW_ERROR_TYPE_ITEM,
492 						   item,
493 						   "Invalid UDP mask");
494 				return -rte_errno;
495 			}
496 
497 			filter->src_port = udp_spec->hdr.src_port;
498 			filter->dst_port = udp_spec->hdr.dst_port;
499 
500 			if (use_ntuple)
501 				en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
502 					NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
503 			else
504 				en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
505 					EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
506 
507 			if (udp_mask->hdr.dst_port) {
508 				filter->dst_port_mask = udp_mask->hdr.dst_port;
509 				en |= !use_ntuple ? 0 :
510 				  NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
511 			}
512 
513 			if (udp_mask->hdr.src_port) {
514 				filter->src_port_mask = udp_mask->hdr.src_port;
515 				en |= !use_ntuple ? 0 :
516 				  NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
517 			}
518 			break;
519 		case RTE_FLOW_ITEM_TYPE_VXLAN:
520 			vxlan_spec = item->spec;
521 			vxlan_mask = item->mask;
522 			/* Check if VXLAN item is used to describe protocol.
523 			 * If yes, both spec and mask should be NULL.
524 			 * If no, both spec and mask shouldn't be NULL.
525 			 */
526 			if ((!vxlan_spec && vxlan_mask) ||
527 			    (vxlan_spec && !vxlan_mask)) {
528 				rte_flow_error_set(error,
529 						   EINVAL,
530 						   RTE_FLOW_ERROR_TYPE_ITEM,
531 						   item,
532 						   "Invalid VXLAN item");
533 				return -rte_errno;
534 			}
535 
536 			if (!vxlan_spec && !vxlan_mask) {
537 				filter->tunnel_type =
538 				CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
539 				break;
540 			}
541 
542 			if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
543 			    vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
544 			    vxlan_spec->flags != 0x8) {
545 				rte_flow_error_set(error,
546 						   EINVAL,
547 						   RTE_FLOW_ERROR_TYPE_ITEM,
548 						   item,
549 						   "Invalid VXLAN item");
550 				return -rte_errno;
551 			}
552 
553 			/* Check if VNI is masked. */
554 			if (vxlan_spec && vxlan_mask) {
555 				vni_masked =
556 					!!memcmp(vxlan_mask->vni, vni_mask,
557 						 RTE_DIM(vni_mask));
558 				if (vni_masked) {
559 					rte_flow_error_set
560 						(error,
561 						 EINVAL,
562 						 RTE_FLOW_ERROR_TYPE_ITEM,
563 						 item,
564 						 "Invalid VNI mask");
565 					return -rte_errno;
566 				}
567 
568 				rte_memcpy(((uint8_t *)&tenant_id_be + 1),
569 					   vxlan_spec->vni, 3);
570 				filter->vni =
571 					rte_be_to_cpu_32(tenant_id_be);
572 				filter->tunnel_type =
573 				 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
574 			}
575 			break;
576 		case RTE_FLOW_ITEM_TYPE_NVGRE:
577 			nvgre_spec = item->spec;
578 			nvgre_mask = item->mask;
579 			/* Check if NVGRE item is used to describe protocol.
580 			 * If yes, both spec and mask should be NULL.
581 			 * If no, both spec and mask shouldn't be NULL.
582 			 */
583 			if ((!nvgre_spec && nvgre_mask) ||
584 			    (nvgre_spec && !nvgre_mask)) {
585 				rte_flow_error_set(error,
586 						   EINVAL,
587 						   RTE_FLOW_ERROR_TYPE_ITEM,
588 						   item,
589 						   "Invalid NVGRE item");
590 				return -rte_errno;
591 			}
592 
593 			if (!nvgre_spec && !nvgre_mask) {
594 				filter->tunnel_type =
595 				CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
596 				break;
597 			}
598 
599 			if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
600 			    nvgre_spec->protocol != 0x6558) {
601 				rte_flow_error_set(error,
602 						   EINVAL,
603 						   RTE_FLOW_ERROR_TYPE_ITEM,
604 						   item,
605 						   "Invalid NVGRE item");
606 				return -rte_errno;
607 			}
608 
609 			if (nvgre_spec && nvgre_mask) {
610 				tni_masked =
611 					!!memcmp(nvgre_mask->tni, tni_mask,
612 						 RTE_DIM(tni_mask));
613 				if (tni_masked) {
614 					rte_flow_error_set
615 						(error,
616 						 EINVAL,
617 						 RTE_FLOW_ERROR_TYPE_ITEM,
618 						 item,
619 						 "Invalid TNI mask");
620 					return -rte_errno;
621 				}
622 				rte_memcpy(((uint8_t *)&tenant_id_be + 1),
623 					   nvgre_spec->tni, 3);
624 				filter->vni =
625 					rte_be_to_cpu_32(tenant_id_be);
626 				filter->tunnel_type =
627 				 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
628 			}
629 			break;
630 
631 		case RTE_FLOW_ITEM_TYPE_GRE:
632 			gre_spec = (const struct rte_flow_item_gre *)item->spec;
633 			gre_mask = (const struct rte_flow_item_gre *)item->mask;
634 
635 			/*
636 			 *Check if GRE item is used to describe protocol.
637 			 * If yes, both spec and mask should be NULL.
638 			 * If no, both spec and mask shouldn't be NULL.
639 			 */
640 			if (!!gre_spec ^ !!gre_mask) {
641 				rte_flow_error_set(error, EINVAL,
642 						   RTE_FLOW_ERROR_TYPE_ITEM,
643 						   item,
644 						   "Invalid GRE item");
645 				return -rte_errno;
646 			}
647 
648 			if (!gre_spec && !gre_mask) {
649 				filter->tunnel_type =
650 				CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
651 				break;
652 			}
653 			break;
654 
655 		case RTE_FLOW_ITEM_TYPE_VF:
656 			vf_spec = item->spec;
657 			vf = vf_spec->id;
658 			if (!BNXT_PF(bp)) {
659 				rte_flow_error_set(error,
660 						   EINVAL,
661 						   RTE_FLOW_ERROR_TYPE_ITEM,
662 						   item,
663 						   "Configuring on a VF!");
664 				return -rte_errno;
665 			}
666 
667 			if (vf >= bp->pdev->max_vfs) {
668 				rte_flow_error_set(error,
669 						   EINVAL,
670 						   RTE_FLOW_ERROR_TYPE_ITEM,
671 						   item,
672 						   "Incorrect VF id!");
673 				return -rte_errno;
674 			}
675 
676 			if (!attr->transfer) {
677 				rte_flow_error_set(error,
678 						   ENOTSUP,
679 						   RTE_FLOW_ERROR_TYPE_ITEM,
680 						   item,
681 						   "Matching VF traffic without"
682 						   " affecting it (transfer attribute)"
683 						   " is unsupported");
684 				return -rte_errno;
685 			}
686 
687 			filter->mirror_vnic_id =
688 			dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
689 			if (dflt_vnic < 0) {
690 				/* This simply indicates there's no driver
691 				 * loaded. This is not an error.
692 				 */
693 				rte_flow_error_set
694 					(error,
695 					 EINVAL,
696 					 RTE_FLOW_ERROR_TYPE_ITEM,
697 					 item,
698 					 "Unable to get default VNIC for VF");
699 				return -rte_errno;
700 			}
701 
702 			filter->mirror_vnic_id = dflt_vnic;
703 			en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
704 			break;
705 		default:
706 			break;
707 		}
708 		item++;
709 	}
710 	filter->enables = en;
711 	filter->valid_flags = valid_flags;
712 
713 	return 0;
714 }
715 
716 /* Parse attributes */
717 static int
718 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
719 		     struct rte_flow_error *error)
720 {
721 	/* Must be input direction */
722 	if (!attr->ingress) {
723 		rte_flow_error_set(error,
724 				   EINVAL,
725 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
726 				   attr,
727 				   "Only support ingress.");
728 		return -rte_errno;
729 	}
730 
731 	/* Not supported */
732 	if (attr->egress) {
733 		rte_flow_error_set(error,
734 				   EINVAL,
735 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
736 				   attr,
737 				   "No support for egress.");
738 		return -rte_errno;
739 	}
740 
741 	return 0;
742 }
743 
744 static struct bnxt_filter_info *
745 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
746 {
747 	struct bnxt_filter_info *mf, *f0;
748 	struct bnxt_vnic_info *vnic0;
749 	int i;
750 
751 	vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
752 	f0 = STAILQ_FIRST(&vnic0->filter);
753 
754 	/* This flow has same DST MAC as the port/l2 filter. */
755 	if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
756 		return f0;
757 
758 	for (i = bp->max_vnics - 1; i >= 0; i--) {
759 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
760 
761 		if (vnic->fw_vnic_id == INVALID_VNIC_ID)
762 			continue;
763 
764 		STAILQ_FOREACH(mf, &vnic->filter, next) {
765 
766 			if (mf->matching_l2_fltr_ptr)
767 				continue;
768 
769 			if (mf->ethertype == nf->ethertype &&
770 			    mf->l2_ovlan == nf->l2_ovlan &&
771 			    mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
772 			    mf->l2_ivlan == nf->l2_ivlan &&
773 			    mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
774 			    !memcmp(mf->src_macaddr, nf->src_macaddr,
775 				    RTE_ETHER_ADDR_LEN) &&
776 			    !memcmp(mf->dst_macaddr, nf->dst_macaddr,
777 				    RTE_ETHER_ADDR_LEN))
778 				return mf;
779 		}
780 	}
781 	return NULL;
782 }
783 
784 static struct bnxt_filter_info *
785 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
786 		      struct bnxt_vnic_info *vnic)
787 {
788 	struct bnxt_filter_info *filter1;
789 	int rc;
790 
791 	/* Alloc new L2 filter.
792 	 * This flow needs MAC filter which does not match any existing
793 	 * L2 filters.
794 	 */
795 	filter1 = bnxt_get_unused_filter(bp);
796 	if (filter1 == NULL)
797 		return NULL;
798 
799 	memcpy(filter1, nf, sizeof(*filter1));
800 
801 	filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE;
802 	filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
803 	if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
804 	    nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) {
805 		filter1->flags |=
806 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
807 		PMD_DRV_LOG(DEBUG, "Create Outer filter\n");
808 	}
809 
810 	if (nf->filter_type == HWRM_CFA_L2_FILTER &&
811 	    (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
812 	     nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) {
813 		PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n");
814 		filter1->flags |=
815 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID;
816 		memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN);
817 	} else {
818 		PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
819 		memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
820 	}
821 
822 	if (nf->priority &&
823 	    (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG ||
824 	     nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
825 		/* Tell the FW where to place the filter in the table. */
826 		if (nf->priority > 65535) {
827 			filter1->pri_hint =
828 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER;
829 			/* This will place the filter in TCAM */
830 			filter1->l2_filter_id_hint = (uint64_t)-1;
831 		}
832 	}
833 
834 	if (nf->valid_flags & (BNXT_FLOW_L2_DST_VALID_FLAG |
835 			       BNXT_FLOW_L2_SRC_VALID_FLAG |
836 			       BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
837 			       BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
838 		filter1->enables =
839 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
840 			L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
841 		memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
842 	}
843 
844 	if (nf->valid_flags & BNXT_FLOW_L2_DROP_FLAG) {
845 		filter1->flags |=
846 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP;
847 		if (nf->ethertype == RTE_ETHER_TYPE_IPV4) {
848 			/* Num VLANs for drop filter will/should be 0.
849 			 * If the req is memset to 0, then the count will
850 			 * be automatically set to 0.
851 			 */
852 			if (nf->valid_flags & BNXT_FLOW_PARSE_INNER_FLAG) {
853 				filter1->enables |=
854 					L2_FILTER_ALLOC_INPUT_EN_T_NUM_VLANS;
855 			} else {
856 				filter1->enables |=
857 					L2_FILTER_ALLOC_INPUT_EN_NUM_VLANS;
858 				filter1->flags |=
859 				HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
860 			}
861 		}
862 	}
863 
864 	rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
865 				     filter1);
866 	if (rc) {
867 		bnxt_free_filter(bp, filter1);
868 		return NULL;
869 	}
870 	return filter1;
871 }
872 
873 struct bnxt_filter_info *
874 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
875 		   struct bnxt_vnic_info *vnic)
876 {
877 	struct bnxt_filter_info *l2_filter = NULL;
878 
879 	l2_filter = bnxt_find_matching_l2_filter(bp, nf);
880 	if (l2_filter) {
881 		l2_filter->l2_ref_cnt++;
882 	} else {
883 		l2_filter = bnxt_create_l2_filter(bp, nf, vnic);
884 		if (l2_filter) {
885 			STAILQ_INSERT_TAIL(&vnic->filter, l2_filter, next);
886 			l2_filter->vnic = vnic;
887 		}
888 	}
889 	nf->matching_l2_fltr_ptr = l2_filter;
890 
891 	return l2_filter;
892 }
893 
894 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
895 {
896 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
897 	uint64_t rx_offloads = dev_conf->rxmode.offloads;
898 	int rc;
899 
900 	rc = bnxt_vnic_grp_alloc(bp, vnic);
901 	if (rc)
902 		goto ret;
903 
904 	rc = bnxt_hwrm_vnic_alloc(bp, vnic);
905 	if (rc) {
906 		PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc);
907 		goto ret;
908 	}
909 	bp->nr_vnics++;
910 
911 	/* RSS context is required only when there is more than one RSS ring */
912 	if (vnic->rx_queue_cnt > 1) {
913 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */);
914 		if (rc) {
915 			PMD_DRV_LOG(ERR,
916 				    "HWRM vnic ctx alloc failure: %x\n", rc);
917 			goto ret;
918 		}
919 	} else {
920 		PMD_DRV_LOG(DEBUG, "No RSS context required\n");
921 	}
922 
923 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
924 		vnic->vlan_strip = true;
925 	else
926 		vnic->vlan_strip = false;
927 
928 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
929 	if (rc)
930 		goto ret;
931 
932 	bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
933 
934 ret:
935 	return rc;
936 }
937 
938 static int match_vnic_rss_cfg(struct bnxt *bp,
939 			      struct bnxt_vnic_info *vnic,
940 			      const struct rte_flow_action_rss *rss)
941 {
942 	unsigned int match = 0, i;
943 
944 	if (vnic->rx_queue_cnt != rss->queue_num)
945 		return -EINVAL;
946 
947 	for (i = 0; i < rss->queue_num; i++) {
948 		if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt &&
949 		    !bp->rx_queues[rss->queue[i]]->rx_started)
950 			return -EINVAL;
951 	}
952 
953 	for (i = 0; i < vnic->rx_queue_cnt; i++) {
954 		int j;
955 
956 		for (j = 0; j < vnic->rx_queue_cnt; j++) {
957 			if (bp->grp_info[rss->queue[i]].fw_grp_id ==
958 			    vnic->fw_grp_ids[j])
959 				match++;
960 		}
961 	}
962 
963 	if (match != vnic->rx_queue_cnt) {
964 		PMD_DRV_LOG(ERR,
965 			    "VNIC queue count %d vs queues matched %d\n",
966 			    match, vnic->rx_queue_cnt);
967 		return -EINVAL;
968 	}
969 
970 	return 0;
971 }
972 
973 static void
974 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter,
975 			    struct bnxt_filter_info *filter1,
976 			    int use_ntuple)
977 {
978 	if (!use_ntuple &&
979 	    !(filter->valid_flags &
980 	      ~(BNXT_FLOW_L2_DST_VALID_FLAG |
981 		BNXT_FLOW_L2_SRC_VALID_FLAG |
982 		BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
983 		BNXT_FLOW_L2_INNER_DST_VALID_FLAG |
984 		BNXT_FLOW_L2_DROP_FLAG |
985 		BNXT_FLOW_PARSE_INNER_FLAG))) {
986 		filter->flags = filter1->flags;
987 		filter->enables = filter1->enables;
988 		filter->filter_type = HWRM_CFA_L2_FILTER;
989 		memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN);
990 		memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
991 		filter->pri_hint = filter1->pri_hint;
992 		filter->l2_filter_id_hint = filter1->l2_filter_id_hint;
993 	}
994 	filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
995 	filter->l2_ref_cnt = filter1->l2_ref_cnt;
996 	filter->flow_id = filter1->flow_id;
997 	PMD_DRV_LOG(DEBUG,
998 		"l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n",
999 		filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
1000 }
1001 
1002 static int
1003 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
1004 			     const struct rte_flow_item pattern[],
1005 			     const struct rte_flow_action actions[],
1006 			     const struct rte_flow_attr *attr,
1007 			     struct rte_flow_error *error,
1008 			     struct bnxt_filter_info *filter)
1009 {
1010 	const struct rte_flow_action *act =
1011 		bnxt_flow_non_void_action(actions);
1012 	struct bnxt *bp = dev->data->dev_private;
1013 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1014 	struct bnxt_vnic_info *vnic = NULL, *vnic0 = NULL;
1015 	const struct rte_flow_action_queue *act_q;
1016 	const struct rte_flow_action_vf *act_vf;
1017 	struct bnxt_filter_info *filter1 = NULL;
1018 	const struct rte_flow_action_rss *rss;
1019 	struct bnxt_rx_queue *rxq = NULL;
1020 	int dflt_vnic, vnic_id;
1021 	unsigned int rss_idx;
1022 	uint32_t vf = 0, i;
1023 	int rc, use_ntuple;
1024 
1025 	rc =
1026 	bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
1027 	if (rc != 0)
1028 		goto ret;
1029 
1030 	rc = bnxt_flow_parse_attr(attr, error);
1031 	if (rc != 0)
1032 		goto ret;
1033 
1034 	/* Since we support ingress attribute only - right now. */
1035 	if (filter->filter_type == HWRM_CFA_EM_FILTER)
1036 		filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
1037 
1038 	use_ntuple = bnxt_filter_type_check(pattern, error);
1039 
1040 start:
1041 	switch (act->type) {
1042 	case RTE_FLOW_ACTION_TYPE_QUEUE:
1043 		/* Allow this flow. Redirect to a VNIC. */
1044 		act_q = (const struct rte_flow_action_queue *)act->conf;
1045 		if (!act_q->index || act_q->index >= bp->rx_nr_rings) {
1046 			rte_flow_error_set(error,
1047 					   EINVAL,
1048 					   RTE_FLOW_ERROR_TYPE_ACTION,
1049 					   act,
1050 					   "Invalid queue ID.");
1051 			rc = -rte_errno;
1052 			goto ret;
1053 		}
1054 		PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
1055 
1056 		vnic_id = attr->group;
1057 		if (!vnic_id) {
1058 			PMD_DRV_LOG(DEBUG, "Group id is 0\n");
1059 			vnic_id = act_q->index;
1060 		}
1061 
1062 		BNXT_VALID_VNIC_OR_RET(bp, vnic_id);
1063 
1064 		vnic = &bp->vnic_info[vnic_id];
1065 		if (vnic->rx_queue_cnt) {
1066 			if (vnic->start_grp_id != act_q->index) {
1067 				PMD_DRV_LOG(ERR,
1068 					    "VNIC already in use\n");
1069 				rte_flow_error_set(error,
1070 						   EINVAL,
1071 						   RTE_FLOW_ERROR_TYPE_ACTION,
1072 						   act,
1073 						   "VNIC already in use");
1074 				rc = -rte_errno;
1075 				goto ret;
1076 			}
1077 			goto use_vnic;
1078 		}
1079 
1080 		rxq = bp->rx_queues[act_q->index];
1081 
1082 		if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
1083 		    vnic->fw_vnic_id != INVALID_HW_RING_ID)
1084 			goto use_vnic;
1085 
1086 		if (!rxq) {
1087 			PMD_DRV_LOG(ERR,
1088 				    "Queue invalid or used with other VNIC\n");
1089 			rte_flow_error_set(error,
1090 					   EINVAL,
1091 					   RTE_FLOW_ERROR_TYPE_ACTION,
1092 					   act,
1093 					   "Queue invalid queue or in use");
1094 			rc = -rte_errno;
1095 			goto ret;
1096 		}
1097 
1098 		rxq->vnic = vnic;
1099 		rxq->rx_started = 1;
1100 		vnic->rx_queue_cnt++;
1101 		vnic->start_grp_id = act_q->index;
1102 		vnic->end_grp_id = act_q->index;
1103 		vnic->func_default = 0;	//This is not a default VNIC.
1104 
1105 		PMD_DRV_LOG(DEBUG, "VNIC found\n");
1106 
1107 		rc = bnxt_vnic_prep(bp, vnic);
1108 		if (rc)  {
1109 			rte_flow_error_set(error,
1110 					   EINVAL,
1111 					   RTE_FLOW_ERROR_TYPE_ACTION,
1112 					   act,
1113 					   "VNIC prep fail");
1114 			rc = -rte_errno;
1115 			goto ret;
1116 		}
1117 
1118 		PMD_DRV_LOG(DEBUG,
1119 			    "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1120 			    act_q->index, vnic, vnic->fw_grp_ids);
1121 
1122 use_vnic:
1123 		vnic->ff_pool_idx = vnic_id;
1124 		PMD_DRV_LOG(DEBUG,
1125 			    "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
1126 		filter->dst_id = vnic->fw_vnic_id;
1127 
1128 		/* For ntuple filter, create the L2 filter with default VNIC.
1129 		 * The user specified redirect queue will be set while creating
1130 		 * the ntuple filter in hardware.
1131 		 */
1132 		vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
1133 		if (use_ntuple)
1134 			filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1135 		else
1136 			filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1137 		if (filter1 == NULL) {
1138 			rte_flow_error_set(error,
1139 					   ENOSPC,
1140 					   RTE_FLOW_ERROR_TYPE_ACTION,
1141 					   act,
1142 					   "Filter not available");
1143 			rc = -rte_errno;
1144 			goto ret;
1145 		}
1146 
1147 		PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n",
1148 			    filter, filter1, filter1->l2_ref_cnt);
1149 		bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1150 		break;
1151 	case RTE_FLOW_ACTION_TYPE_DROP:
1152 		vnic0 = &bp->vnic_info[0];
1153 		filter->dst_id = vnic0->fw_vnic_id;
1154 		filter->valid_flags |= BNXT_FLOW_L2_DROP_FLAG;
1155 		filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1156 		if (filter1 == NULL) {
1157 			rte_flow_error_set(error,
1158 					   ENOSPC,
1159 					   RTE_FLOW_ERROR_TYPE_ACTION,
1160 					   act,
1161 					   "Filter not available");
1162 			rc = -rte_errno;
1163 			goto ret;
1164 		}
1165 
1166 		if (filter->filter_type == HWRM_CFA_EM_FILTER)
1167 			filter->flags =
1168 				HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
1169 		else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1170 			filter->flags =
1171 				HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1172 
1173 		bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1174 		break;
1175 	case RTE_FLOW_ACTION_TYPE_COUNT:
1176 		vnic0 = &bp->vnic_info[0];
1177 		filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1178 		if (filter1 == NULL) {
1179 			rte_flow_error_set(error,
1180 					   ENOSPC,
1181 					   RTE_FLOW_ERROR_TYPE_ACTION,
1182 					   act,
1183 					   "New filter not available");
1184 			rc = -rte_errno;
1185 			goto ret;
1186 		}
1187 
1188 		filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1189 		filter->flow_id = filter1->flow_id;
1190 		filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
1191 		break;
1192 	case RTE_FLOW_ACTION_TYPE_VF:
1193 		act_vf = (const struct rte_flow_action_vf *)act->conf;
1194 		vf = act_vf->id;
1195 
1196 		if (filter->tunnel_type ==
1197 		    CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
1198 		    filter->tunnel_type ==
1199 		    CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
1200 			/* If issued on a VF, ensure id is 0 and is trusted */
1201 			if (BNXT_VF(bp)) {
1202 				if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
1203 					rte_flow_error_set(error, EINVAL,
1204 						RTE_FLOW_ERROR_TYPE_ACTION,
1205 						act,
1206 						"Incorrect VF");
1207 					rc = -rte_errno;
1208 					goto ret;
1209 				}
1210 			}
1211 
1212 			filter->enables |= filter->tunnel_type;
1213 			filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
1214 			goto done;
1215 		}
1216 
1217 		if (vf >= bp->pdev->max_vfs) {
1218 			rte_flow_error_set(error,
1219 					   EINVAL,
1220 					   RTE_FLOW_ERROR_TYPE_ACTION,
1221 					   act,
1222 					   "Incorrect VF id!");
1223 			rc = -rte_errno;
1224 			goto ret;
1225 		}
1226 
1227 		filter->mirror_vnic_id =
1228 		dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
1229 		if (dflt_vnic < 0) {
1230 			/* This simply indicates there's no driver loaded.
1231 			 * This is not an error.
1232 			 */
1233 			rte_flow_error_set(error,
1234 					   EINVAL,
1235 					   RTE_FLOW_ERROR_TYPE_ACTION,
1236 					   act,
1237 					   "Unable to get default VNIC for VF");
1238 			rc = -rte_errno;
1239 			goto ret;
1240 		}
1241 
1242 		filter->mirror_vnic_id = dflt_vnic;
1243 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
1244 
1245 		vnic0 = &bp->vnic_info[0];
1246 		filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1247 		if (filter1 == NULL) {
1248 			rte_flow_error_set(error,
1249 					   ENOSPC,
1250 					   RTE_FLOW_ERROR_TYPE_ACTION,
1251 					   act,
1252 					   "New filter not available");
1253 			rc = -rte_errno;
1254 			goto ret;
1255 		}
1256 
1257 		filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1258 		filter->flow_id = filter1->flow_id;
1259 		break;
1260 	case RTE_FLOW_ACTION_TYPE_RSS:
1261 		rss = (const struct rte_flow_action_rss *)act->conf;
1262 
1263 		vnic_id = attr->group;
1264 
1265 		BNXT_VALID_VNIC_OR_RET(bp, vnic_id);
1266 		vnic = &bp->vnic_info[vnic_id];
1267 
1268 		/* Check if requested RSS config matches RSS config of VNIC
1269 		 * only if it is not a fresh VNIC configuration.
1270 		 * Otherwise the existing VNIC configuration can be used.
1271 		 */
1272 		if (vnic->rx_queue_cnt) {
1273 			rc = match_vnic_rss_cfg(bp, vnic, rss);
1274 			if (rc) {
1275 				PMD_DRV_LOG(ERR,
1276 					    "VNIC and RSS config mismatch\n");
1277 				rte_flow_error_set(error,
1278 						   EINVAL,
1279 						   RTE_FLOW_ERROR_TYPE_ACTION,
1280 						   act,
1281 						   "VNIC and RSS cfg mismatch");
1282 				rc = -rte_errno;
1283 				goto ret;
1284 			}
1285 			goto vnic_found;
1286 		}
1287 
1288 		for (i = 0; i < rss->queue_num; i++) {
1289 			PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n",
1290 				    rss->queue[i]);
1291 
1292 			if (!rss->queue[i] ||
1293 			    rss->queue[i] >= bp->rx_nr_rings ||
1294 			    !bp->rx_queues[rss->queue[i]]) {
1295 				rte_flow_error_set(error,
1296 						   EINVAL,
1297 						   RTE_FLOW_ERROR_TYPE_ACTION,
1298 						   act,
1299 						   "Invalid queue ID for RSS");
1300 				rc = -rte_errno;
1301 				goto ret;
1302 			}
1303 			rxq = bp->rx_queues[rss->queue[i]];
1304 
1305 			if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1306 			    INVALID_HW_RING_ID) {
1307 				PMD_DRV_LOG(ERR,
1308 					    "queue active with other VNIC\n");
1309 				rte_flow_error_set(error,
1310 						   EINVAL,
1311 						   RTE_FLOW_ERROR_TYPE_ACTION,
1312 						   act,
1313 						   "Invalid queue ID for RSS");
1314 				rc = -rte_errno;
1315 				goto ret;
1316 			}
1317 
1318 			rxq->vnic = vnic;
1319 			rxq->rx_started = 1;
1320 			vnic->rx_queue_cnt++;
1321 		}
1322 
1323 		vnic->start_grp_id = rss->queue[0];
1324 		vnic->end_grp_id = rss->queue[rss->queue_num - 1];
1325 		vnic->func_default = 0;	//This is not a default VNIC.
1326 
1327 		rc = bnxt_vnic_prep(bp, vnic);
1328 		if (rc) {
1329 			rte_flow_error_set(error,
1330 					   EINVAL,
1331 					   RTE_FLOW_ERROR_TYPE_ACTION,
1332 					   act,
1333 					   "VNIC prep fail");
1334 			rc = -rte_errno;
1335 			goto ret;
1336 		}
1337 
1338 		PMD_DRV_LOG(DEBUG,
1339 			    "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1340 			    vnic_id, vnic, vnic->fw_grp_ids);
1341 
1342 		vnic->ff_pool_idx = vnic_id;
1343 		PMD_DRV_LOG(DEBUG,
1344 			    "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx);
1345 
1346 		/* This can be done only after vnic_grp_alloc is done. */
1347 		for (i = 0; i < vnic->rx_queue_cnt; i++) {
1348 			vnic->fw_grp_ids[i] =
1349 				bp->grp_info[rss->queue[i]].fw_grp_id;
1350 			/* Make sure vnic0 does not use these rings. */
1351 			bp->vnic_info[0].fw_grp_ids[rss->queue[i]] =
1352 				INVALID_HW_RING_ID;
1353 		}
1354 
1355 		for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) {
1356 			for (i = 0; i < vnic->rx_queue_cnt; i++)
1357 				vnic->rss_table[rss_idx++] =
1358 					vnic->fw_grp_ids[i];
1359 		}
1360 
1361 		/* Configure RSS only if the queue count is > 1 */
1362 		if (vnic->rx_queue_cnt > 1) {
1363 			vnic->hash_type =
1364 				bnxt_rte_to_hwrm_hash_types(rss->types);
1365 
1366 			if (!rss->key_len) {
1367 				/* If hash key has not been specified,
1368 				 * use random hash key.
1369 				 */
1370 				prandom_bytes(vnic->rss_hash_key,
1371 					      HW_HASH_KEY_SIZE);
1372 			} else {
1373 				if (rss->key_len > HW_HASH_KEY_SIZE)
1374 					memcpy(vnic->rss_hash_key,
1375 					       rss->key,
1376 					       HW_HASH_KEY_SIZE);
1377 				else
1378 					memcpy(vnic->rss_hash_key,
1379 					       rss->key,
1380 					       rss->key_len);
1381 			}
1382 			bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1383 		} else {
1384 			PMD_DRV_LOG(DEBUG, "No RSS config required\n");
1385 		}
1386 
1387 vnic_found:
1388 		filter->dst_id = vnic->fw_vnic_id;
1389 		filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1390 		if (filter1 == NULL) {
1391 			rte_flow_error_set(error,
1392 					   ENOSPC,
1393 					   RTE_FLOW_ERROR_TYPE_ACTION,
1394 					   act,
1395 					   "New filter not available");
1396 			rc = -rte_errno;
1397 			goto ret;
1398 		}
1399 
1400 		PMD_DRV_LOG(DEBUG, "L2 filter created\n");
1401 		bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1402 		break;
1403 	case RTE_FLOW_ACTION_TYPE_MARK:
1404 		if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) {
1405 			PMD_DRV_LOG(DEBUG,
1406 				    "Disable vector processing for mark\n");
1407 			rte_flow_error_set(error,
1408 					   ENOTSUP,
1409 					   RTE_FLOW_ERROR_TYPE_ACTION,
1410 					   act,
1411 					   "Disable vector processing for mark");
1412 			rc = -rte_errno;
1413 			goto ret;
1414 		}
1415 
1416 		if (bp->mark_table == NULL) {
1417 			rte_flow_error_set(error,
1418 					   ENOMEM,
1419 					   RTE_FLOW_ERROR_TYPE_ACTION,
1420 					   act,
1421 					   "Mark table not allocated.");
1422 			rc = -rte_errno;
1423 			goto ret;
1424 		}
1425 
1426 		filter->valid_flags |= BNXT_FLOW_MARK_FLAG;
1427 		filter->mark = ((const struct rte_flow_action_mark *)
1428 				act->conf)->id;
1429 		PMD_DRV_LOG(DEBUG, "Mark the flow %d\n", filter->mark);
1430 		break;
1431 	default:
1432 		rte_flow_error_set(error,
1433 				   EINVAL,
1434 				   RTE_FLOW_ERROR_TYPE_ACTION,
1435 				   act,
1436 				   "Invalid action.");
1437 		rc = -rte_errno;
1438 		goto ret;
1439 	}
1440 
1441 done:
1442 	act = bnxt_flow_non_void_action(++act);
1443 	while (act->type != RTE_FLOW_ACTION_TYPE_END)
1444 		goto start;
1445 
1446 	return rc;
1447 ret:
1448 
1449 	if (filter1) {
1450 		bnxt_hwrm_clear_l2_filter(bp, filter1);
1451 		bnxt_free_filter(bp, filter1);
1452 	}
1453 
1454 	if (rte_errno)  {
1455 		if (vnic && STAILQ_EMPTY(&vnic->filter))
1456 			vnic->rx_queue_cnt = 0;
1457 
1458 		if (rxq && !vnic->rx_queue_cnt)
1459 			rxq->vnic = &bp->vnic_info[0];
1460 	}
1461 	return -rte_errno;
1462 }
1463 
1464 static
1465 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp,
1466 					  struct bnxt_filter_info *filter)
1467 {
1468 	struct bnxt_vnic_info *vnic = NULL;
1469 	unsigned int i;
1470 
1471 	for (i = 0; i < bp->max_vnics; i++) {
1472 		vnic = &bp->vnic_info[i];
1473 		if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
1474 		    filter->dst_id == vnic->fw_vnic_id) {
1475 			PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n",
1476 				    vnic->ff_pool_idx);
1477 			return vnic;
1478 		}
1479 	}
1480 	return NULL;
1481 }
1482 
1483 static int
1484 bnxt_flow_validate(struct rte_eth_dev *dev,
1485 		   const struct rte_flow_attr *attr,
1486 		   const struct rte_flow_item pattern[],
1487 		   const struct rte_flow_action actions[],
1488 		   struct rte_flow_error *error)
1489 {
1490 	struct bnxt *bp = dev->data->dev_private;
1491 	struct bnxt_vnic_info *vnic = NULL;
1492 	struct bnxt_filter_info *filter;
1493 	int ret = 0;
1494 
1495 	bnxt_acquire_flow_lock(bp);
1496 	ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1497 	if (ret != 0) {
1498 		bnxt_release_flow_lock(bp);
1499 		return ret;
1500 	}
1501 
1502 	filter = bnxt_get_unused_filter(bp);
1503 	if (filter == NULL) {
1504 		PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1505 		bnxt_release_flow_lock(bp);
1506 		return -ENOMEM;
1507 	}
1508 
1509 	ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1510 					   error, filter);
1511 	if (ret)
1512 		goto exit;
1513 
1514 	vnic = find_matching_vnic(bp, filter);
1515 	if (vnic) {
1516 		if (STAILQ_EMPTY(&vnic->filter)) {
1517 			rte_free(vnic->fw_grp_ids);
1518 			bnxt_hwrm_vnic_ctx_free(bp, vnic);
1519 			bnxt_hwrm_vnic_free(bp, vnic);
1520 			vnic->rx_queue_cnt = 0;
1521 			PMD_DRV_LOG(DEBUG, "Free VNIC\n");
1522 		}
1523 	}
1524 
1525 	if (filter->filter_type == HWRM_CFA_EM_FILTER)
1526 		bnxt_hwrm_clear_em_filter(bp, filter);
1527 	else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1528 		bnxt_hwrm_clear_ntuple_filter(bp, filter);
1529 	else
1530 		bnxt_hwrm_clear_l2_filter(bp, filter);
1531 
1532 exit:
1533 	/* No need to hold on to this filter if we are just validating flow */
1534 	bnxt_free_filter(bp, filter);
1535 	bnxt_release_flow_lock(bp);
1536 
1537 	return ret;
1538 }
1539 
1540 static void
1541 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter,
1542 		   struct bnxt_filter_info *new_filter)
1543 {
1544 	/* Clear the new L2 filter that was created in the previous step in
1545 	 * bnxt_validate_and_parse_flow. For L2 filters, we will use the new
1546 	 * filter which points to the new destination queue and so we clear
1547 	 * the previous L2 filter. For ntuple filters, we are going to reuse
1548 	 * the old L2 filter and create new NTUPLE filter with this new
1549 	 * destination queue subsequently during bnxt_flow_create. So we
1550 	 * decrement the ref cnt of the L2 filter that would've been bumped
1551 	 * up previously in bnxt_validate_and_parse_flow as the old n-tuple
1552 	 * filter that was referencing it will be deleted now.
1553 	 */
1554 	bnxt_hwrm_clear_l2_filter(bp, old_filter);
1555 	if (new_filter->filter_type == HWRM_CFA_L2_FILTER) {
1556 		bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter);
1557 	} else {
1558 		if (new_filter->filter_type == HWRM_CFA_EM_FILTER)
1559 			bnxt_hwrm_clear_em_filter(bp, old_filter);
1560 		if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1561 			bnxt_hwrm_clear_ntuple_filter(bp, old_filter);
1562 	}
1563 }
1564 
1565 static int
1566 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1567 {
1568 	struct bnxt_filter_info *mf;
1569 	struct rte_flow *flow;
1570 	int i;
1571 
1572 	for (i = bp->max_vnics - 1; i >= 0; i--) {
1573 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1574 
1575 		if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1576 			continue;
1577 
1578 		STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1579 			mf = flow->filter;
1580 
1581 			if (mf->filter_type == nf->filter_type &&
1582 			    mf->flags == nf->flags &&
1583 			    mf->src_port == nf->src_port &&
1584 			    mf->src_port_mask == nf->src_port_mask &&
1585 			    mf->dst_port == nf->dst_port &&
1586 			    mf->dst_port_mask == nf->dst_port_mask &&
1587 			    mf->ip_protocol == nf->ip_protocol &&
1588 			    mf->ip_addr_type == nf->ip_addr_type &&
1589 			    mf->ethertype == nf->ethertype &&
1590 			    mf->vni == nf->vni &&
1591 			    mf->tunnel_type == nf->tunnel_type &&
1592 			    mf->l2_ovlan == nf->l2_ovlan &&
1593 			    mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1594 			    mf->l2_ivlan == nf->l2_ivlan &&
1595 			    mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1596 			    !memcmp(mf->l2_addr, nf->l2_addr,
1597 				    RTE_ETHER_ADDR_LEN) &&
1598 			    !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1599 				    RTE_ETHER_ADDR_LEN) &&
1600 			    !memcmp(mf->src_macaddr, nf->src_macaddr,
1601 				    RTE_ETHER_ADDR_LEN) &&
1602 			    !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1603 				    RTE_ETHER_ADDR_LEN) &&
1604 			    !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1605 				    sizeof(nf->src_ipaddr)) &&
1606 			    !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1607 				    sizeof(nf->src_ipaddr_mask)) &&
1608 			    !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1609 				    sizeof(nf->dst_ipaddr)) &&
1610 			    !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1611 				    sizeof(nf->dst_ipaddr_mask))) {
1612 				if (mf->dst_id == nf->dst_id)
1613 					return -EEXIST;
1614 				/* Free the old filter, update flow
1615 				 * with new filter
1616 				 */
1617 				bnxt_update_filter(bp, mf, nf);
1618 				STAILQ_REMOVE(&vnic->filter, mf,
1619 					      bnxt_filter_info, next);
1620 				STAILQ_INSERT_TAIL(&vnic->filter, nf, next);
1621 				bnxt_free_filter(bp, mf);
1622 				flow->filter = nf;
1623 				return -EXDEV;
1624 			}
1625 		}
1626 	}
1627 	return 0;
1628 }
1629 
1630 static struct rte_flow *
1631 bnxt_flow_create(struct rte_eth_dev *dev,
1632 		 const struct rte_flow_attr *attr,
1633 		 const struct rte_flow_item pattern[],
1634 		 const struct rte_flow_action actions[],
1635 		 struct rte_flow_error *error)
1636 {
1637 	struct bnxt *bp = dev->data->dev_private;
1638 	struct bnxt_vnic_info *vnic = NULL;
1639 	struct bnxt_filter_info *filter;
1640 	bool update_flow = false;
1641 	struct rte_flow *flow;
1642 	int ret = 0;
1643 	uint32_t tun_type, flow_id;
1644 
1645 	if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1646 		rte_flow_error_set(error, EINVAL,
1647 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1648 				   "Failed to create flow, Not a Trusted VF!");
1649 		return NULL;
1650 	}
1651 
1652 	if (!dev->data->dev_started) {
1653 		rte_flow_error_set(error,
1654 				   EINVAL,
1655 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1656 				   NULL,
1657 				   "Device must be started");
1658 		return NULL;
1659 	}
1660 
1661 	flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1662 	if (!flow) {
1663 		rte_flow_error_set(error, ENOMEM,
1664 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1665 				   "Failed to allocate memory");
1666 		return flow;
1667 	}
1668 
1669 	bnxt_acquire_flow_lock(bp);
1670 	ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1671 	if (ret != 0) {
1672 		PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1673 		goto free_flow;
1674 	}
1675 
1676 	filter = bnxt_get_unused_filter(bp);
1677 	if (filter == NULL) {
1678 		rte_flow_error_set(error, ENOSPC,
1679 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1680 				   "Not enough resources for a new flow");
1681 		goto free_flow;
1682 	}
1683 
1684 	ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1685 					   error, filter);
1686 	if (ret != 0)
1687 		goto free_filter;
1688 
1689 	ret = bnxt_match_filter(bp, filter);
1690 	if (ret == -EEXIST) {
1691 		PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1692 		/* Clear the filter that was created as part of
1693 		 * validate_and_parse_flow() above
1694 		 */
1695 		bnxt_hwrm_clear_l2_filter(bp, filter);
1696 		goto free_filter;
1697 	} else if (ret == -EXDEV) {
1698 		PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1699 		PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1700 		update_flow = true;
1701 	}
1702 
1703 	/* If tunnel redirection to a VF/PF is specified then only tunnel_type
1704 	 * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1705 	 * in such a case.
1706 	 */
1707 	if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1708 	    filter->enables == filter->tunnel_type) {
1709 		ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1710 		if (ret) {
1711 			rte_flow_error_set(error, -ret,
1712 					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1713 					   "Unable to query tunnel to VF");
1714 			goto free_filter;
1715 		}
1716 		if (tun_type == (1U << filter->tunnel_type)) {
1717 			ret =
1718 			bnxt_hwrm_tunnel_redirect_free(bp,
1719 						       filter->tunnel_type);
1720 			if (ret) {
1721 				PMD_DRV_LOG(ERR,
1722 					    "Unable to free existing tunnel\n");
1723 				rte_flow_error_set(error, -ret,
1724 						   RTE_FLOW_ERROR_TYPE_HANDLE,
1725 						   NULL,
1726 						   "Unable to free preexisting "
1727 						   "tunnel on VF");
1728 				goto free_filter;
1729 			}
1730 		}
1731 		ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1732 		if (ret) {
1733 			rte_flow_error_set(error, -ret,
1734 					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1735 					   "Unable to redirect tunnel to VF");
1736 			goto free_filter;
1737 		}
1738 		vnic = &bp->vnic_info[0];
1739 		goto done;
1740 	}
1741 
1742 	if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1743 		filter->enables |=
1744 			HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1745 		ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1746 	}
1747 
1748 	if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1749 		filter->enables |=
1750 			HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1751 		ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1752 	}
1753 
1754 	vnic = find_matching_vnic(bp, filter);
1755 done:
1756 	if (!ret || update_flow) {
1757 		flow->filter = filter;
1758 		flow->vnic = vnic;
1759 		if (update_flow) {
1760 			ret = -EXDEV;
1761 			goto free_flow;
1762 		}
1763 
1764 		STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1765 		PMD_DRV_LOG(DEBUG, "Successfully created flow.\n");
1766 		STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1767 		if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
1768 			PMD_DRV_LOG(DEBUG,
1769 				    "Mark action: mark id 0x%x, flow id 0x%x\n",
1770 				    filter->mark, filter->flow_id);
1771 
1772 			/* TCAM and EM should be 16-bit only.
1773 			 * Other modes not supported.
1774 			 */
1775 			flow_id = filter->flow_id & BNXT_FLOW_ID_MASK;
1776 			if (bp->mark_table[flow_id].valid) {
1777 				PMD_DRV_LOG(ERR,
1778 					    "Entry for Mark id 0x%x occupied"
1779 					    " flow id 0x%x\n",
1780 					    filter->mark, filter->flow_id);
1781 				goto free_filter;
1782 			}
1783 			bp->mark_table[flow_id].valid = true;
1784 			bp->mark_table[flow_id].mark_id = filter->mark;
1785 		}
1786 		bnxt_release_flow_lock(bp);
1787 		return flow;
1788 	}
1789 
1790 free_filter:
1791 	bnxt_free_filter(bp, filter);
1792 free_flow:
1793 	if (ret == -EEXIST)
1794 		rte_flow_error_set(error, ret,
1795 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1796 				   "Matching Flow exists.");
1797 	else if (ret == -EXDEV)
1798 		rte_flow_error_set(error, 0,
1799 				   RTE_FLOW_ERROR_TYPE_NONE, NULL,
1800 				   "Flow with pattern exists, updating destination queue");
1801 	else if (!rte_errno)
1802 		rte_flow_error_set(error, -ret,
1803 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1804 				   "Failed to create flow.");
1805 	rte_free(flow);
1806 	flow = NULL;
1807 	bnxt_release_flow_lock(bp);
1808 	return flow;
1809 }
1810 
1811 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1812 					       struct bnxt_filter_info *filter,
1813 					       struct rte_flow_error *error)
1814 {
1815 	uint16_t tun_dst_fid;
1816 	uint32_t tun_type;
1817 	int ret = 0;
1818 
1819 	ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1820 	if (ret) {
1821 		rte_flow_error_set(error, -ret,
1822 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1823 				   "Unable to query tunnel to VF");
1824 		return ret;
1825 	}
1826 	if (tun_type == (1U << filter->tunnel_type)) {
1827 		ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1828 						     &tun_dst_fid);
1829 		if (ret) {
1830 			rte_flow_error_set(error, -ret,
1831 					   RTE_FLOW_ERROR_TYPE_HANDLE,
1832 					   NULL,
1833 					   "tunnel_redirect info cmd fail");
1834 			return ret;
1835 		}
1836 		PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1837 			    tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1838 
1839 		/* Tunnel doesn't belong to this VF, so don't send HWRM
1840 		 * cmd, just delete the flow from driver
1841 		 */
1842 		if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1843 			PMD_DRV_LOG(ERR,
1844 				    "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1845 		else
1846 			ret = bnxt_hwrm_tunnel_redirect_free(bp,
1847 							filter->tunnel_type);
1848 	}
1849 	return ret;
1850 }
1851 
1852 static int
1853 _bnxt_flow_destroy(struct bnxt *bp,
1854 		   struct rte_flow *flow,
1855 		    struct rte_flow_error *error)
1856 {
1857 	struct bnxt_filter_info *filter;
1858 	struct bnxt_vnic_info *vnic;
1859 	int ret = 0;
1860 	uint32_t flow_id;
1861 
1862 	filter = flow->filter;
1863 	vnic = flow->vnic;
1864 
1865 	if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1866 	    filter->enables == filter->tunnel_type) {
1867 		ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error);
1868 		if (!ret)
1869 			goto done;
1870 		else
1871 			return ret;
1872 	}
1873 
1874 	ret = bnxt_match_filter(bp, filter);
1875 	if (ret == 0)
1876 		PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1877 
1878 	if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
1879 		flow_id = filter->flow_id & BNXT_FLOW_ID_MASK;
1880 		memset(&bp->mark_table[flow_id], 0,
1881 		       sizeof(bp->mark_table[flow_id]));
1882 		filter->flow_id = 0;
1883 	}
1884 
1885 	if (filter->filter_type == HWRM_CFA_EM_FILTER)
1886 		ret = bnxt_hwrm_clear_em_filter(bp, filter);
1887 	if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1888 		ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1889 	ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1890 
1891 done:
1892 	if (!ret) {
1893 		/* If it is a L2 drop filter, when the filter is created,
1894 		 * the FW updates the BC/MC records.
1895 		 * Once this filter is removed, issue the set_rx_mask command
1896 		 * to reset the BC/MC records in the HW to the settings
1897 		 * before the drop counter is created.
1898 		 */
1899 		if (filter->valid_flags & BNXT_FLOW_L2_DROP_FLAG)
1900 			bnxt_set_rx_mask_no_vlan(bp, &bp->vnic_info[0]);
1901 
1902 		STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
1903 		bnxt_free_filter(bp, filter);
1904 		STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1905 		rte_free(flow);
1906 
1907 		/* If this was the last flow associated with this vnic,
1908 		 * switch the queue back to RSS pool.
1909 		 */
1910 		if (vnic && !vnic->func_default &&
1911 		    STAILQ_EMPTY(&vnic->flow_list)) {
1912 			rte_free(vnic->fw_grp_ids);
1913 			if (vnic->rx_queue_cnt > 1)
1914 				bnxt_hwrm_vnic_ctx_free(bp, vnic);
1915 
1916 			bnxt_hwrm_vnic_free(bp, vnic);
1917 			vnic->rx_queue_cnt = 0;
1918 		}
1919 	} else {
1920 		rte_flow_error_set(error, -ret,
1921 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1922 				   "Failed to destroy flow.");
1923 	}
1924 
1925 	return ret;
1926 }
1927 
1928 static int
1929 bnxt_flow_destroy(struct rte_eth_dev *dev,
1930 		  struct rte_flow *flow,
1931 		  struct rte_flow_error *error)
1932 {
1933 	struct bnxt *bp = dev->data->dev_private;
1934 	int ret = 0;
1935 
1936 	bnxt_acquire_flow_lock(bp);
1937 	if (!flow) {
1938 		rte_flow_error_set(error, EINVAL,
1939 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1940 				   "Invalid flow: failed to destroy flow.");
1941 		bnxt_release_flow_lock(bp);
1942 		return -EINVAL;
1943 	}
1944 
1945 	if (!flow->filter) {
1946 		rte_flow_error_set(error, EINVAL,
1947 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1948 				   "Invalid flow: failed to destroy flow.");
1949 		bnxt_release_flow_lock(bp);
1950 		return -EINVAL;
1951 	}
1952 	ret = _bnxt_flow_destroy(bp, flow, error);
1953 	bnxt_release_flow_lock(bp);
1954 
1955 	return ret;
1956 }
1957 
1958 static int
1959 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1960 {
1961 	struct bnxt *bp = dev->data->dev_private;
1962 	struct bnxt_vnic_info *vnic;
1963 	struct rte_flow *flow;
1964 	unsigned int i;
1965 	int ret = 0;
1966 
1967 	bnxt_acquire_flow_lock(bp);
1968 	for (i = 0; i < bp->max_vnics; i++) {
1969 		vnic = &bp->vnic_info[i];
1970 		if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID)
1971 			continue;
1972 
1973 		while (!STAILQ_EMPTY(&vnic->flow_list)) {
1974 			flow = STAILQ_FIRST(&vnic->flow_list);
1975 
1976 			if (!flow->filter)
1977 				continue;
1978 
1979 			ret = _bnxt_flow_destroy(bp, flow, error);
1980 			if (ret)
1981 				break;
1982 		}
1983 	}
1984 	bnxt_release_flow_lock(bp);
1985 
1986 	return ret;
1987 }
1988 
1989 const struct rte_flow_ops bnxt_flow_ops = {
1990 	.validate = bnxt_flow_validate,
1991 	.create = bnxt_flow_create,
1992 	.destroy = bnxt_flow_destroy,
1993 	.flush = bnxt_flow_flush,
1994 };
1995