xref: /dpdk/drivers/net/hinic/hinic_pmd_flow.c (revision 89b5642d0d45c22c0ceab57efe3fab3b49ff4324)
173122b52SXiaoyun Wang /* SPDX-License-Identifier: BSD-3-Clause
273122b52SXiaoyun Wang  * Copyright(c) 2017 Huawei Technologies Co., Ltd
373122b52SXiaoyun Wang  */
473122b52SXiaoyun Wang 
573122b52SXiaoyun Wang #include <stdio.h>
673122b52SXiaoyun Wang #include <errno.h>
773122b52SXiaoyun Wang #include <stdint.h>
873122b52SXiaoyun Wang #include <string.h>
973122b52SXiaoyun Wang #include <unistd.h>
1073122b52SXiaoyun Wang 
1173122b52SXiaoyun Wang #include <rte_byteorder.h>
1273122b52SXiaoyun Wang #include <rte_common.h>
1373122b52SXiaoyun Wang #include <rte_ether.h>
1473122b52SXiaoyun Wang #include <rte_ethdev.h>
1573122b52SXiaoyun Wang #include <rte_malloc.h>
1673122b52SXiaoyun Wang #include <rte_flow.h>
1773122b52SXiaoyun Wang #include <rte_flow_driver.h>
1873122b52SXiaoyun Wang #include "base/hinic_compat.h"
1973122b52SXiaoyun Wang #include "base/hinic_pmd_hwdev.h"
2073122b52SXiaoyun Wang #include "base/hinic_pmd_hwif.h"
2173122b52SXiaoyun Wang #include "base/hinic_pmd_wq.h"
2273122b52SXiaoyun Wang #include "base/hinic_pmd_cmdq.h"
2373122b52SXiaoyun Wang #include "base/hinic_pmd_niccfg.h"
2473122b52SXiaoyun Wang #include "hinic_pmd_ethdev.h"
2573122b52SXiaoyun Wang 
26a3920be3SXiaoyun Wang #define HINIC_MAX_RX_QUEUE_NUM		64
27a3920be3SXiaoyun Wang 
2873122b52SXiaoyun Wang #ifndef UINT8_MAX
2973122b52SXiaoyun Wang #define UINT8_MAX          (u8)(~((u8)0))	/* 0xFF               */
3073122b52SXiaoyun Wang #define UINT16_MAX         (u16)(~((u16)0))	/* 0xFFFF             */
3173122b52SXiaoyun Wang #define UINT32_MAX         (u32)(~((u32)0))	/* 0xFFFFFFFF         */
3273122b52SXiaoyun Wang #define UINT64_MAX         (u64)(~((u64)0))	/* 0xFFFFFFFFFFFFFFFF */
3373122b52SXiaoyun Wang #define ASCII_MAX          (0x7F)
3473122b52SXiaoyun Wang #endif
3573122b52SXiaoyun Wang 
36a3920be3SXiaoyun Wang /* IPSURX MACRO */
37a3920be3SXiaoyun Wang #define PA_ETH_TYPE_ROCE		0
38a3920be3SXiaoyun Wang #define PA_ETH_TYPE_IPV4		1
39a3920be3SXiaoyun Wang #define PA_ETH_TYPE_IPV6		2
40a3920be3SXiaoyun Wang #define PA_ETH_TYPE_OTHER		3
41a3920be3SXiaoyun Wang 
42a3920be3SXiaoyun Wang #define PA_IP_PROTOCOL_TYPE_TCP		1
43a3920be3SXiaoyun Wang #define PA_IP_PROTOCOL_TYPE_UDP		2
44a3920be3SXiaoyun Wang #define PA_IP_PROTOCOL_TYPE_ICMP	3
45a3920be3SXiaoyun Wang #define PA_IP_PROTOCOL_TYPE_IPV4_IGMP	4
46a3920be3SXiaoyun Wang #define PA_IP_PROTOCOL_TYPE_SCTP	5
47a3920be3SXiaoyun Wang #define PA_IP_PROTOCOL_TYPE_VRRP	112
48a3920be3SXiaoyun Wang 
49a3920be3SXiaoyun Wang #define IP_HEADER_PROTOCOL_TYPE_TCP     6
501fe89aa3SXiaoyun Wang #define IP_HEADER_PROTOCOL_TYPE_UDP     17
511fe89aa3SXiaoyun Wang #define IP_HEADER_PROTOCOL_TYPE_ICMP    1
529d441c45SXiaoyun Wang #define IP_HEADER_PROTOCOL_TYPE_ICMPV6  58
531fe89aa3SXiaoyun Wang 
541fe89aa3SXiaoyun Wang #define FDIR_TCAM_NORMAL_PACKET         0
551fe89aa3SXiaoyun Wang #define FDIR_TCAM_TUNNEL_PACKET         1
56a3920be3SXiaoyun Wang 
5773122b52SXiaoyun Wang #define HINIC_MIN_N_TUPLE_PRIO		1
5873122b52SXiaoyun Wang #define HINIC_MAX_N_TUPLE_PRIO		7
5973122b52SXiaoyun Wang 
60a3920be3SXiaoyun Wang /* TCAM type mask in hardware */
61a3920be3SXiaoyun Wang #define TCAM_PKT_BGP_SPORT	1
62a3920be3SXiaoyun Wang #define TCAM_PKT_VRRP		2
63a3920be3SXiaoyun Wang #define TCAM_PKT_BGP_DPORT	3
64a3920be3SXiaoyun Wang #define TCAM_PKT_LACP		4
65a3920be3SXiaoyun Wang 
669d441c45SXiaoyun Wang #define TCAM_DIP_IPV4_TYPE	0
679d441c45SXiaoyun Wang #define TCAM_DIP_IPV6_TYPE	1
689d441c45SXiaoyun Wang 
69a3920be3SXiaoyun Wang #define BGP_DPORT_ID		179
70a3920be3SXiaoyun Wang #define IPPROTO_VRRP		112
71a3920be3SXiaoyun Wang 
72a3920be3SXiaoyun Wang /* Packet type defined in hardware to perform filter */
73a3920be3SXiaoyun Wang #define PKT_IGMP_IPV4_TYPE     64
74a3920be3SXiaoyun Wang #define PKT_ICMP_IPV4_TYPE     65
75a3920be3SXiaoyun Wang #define PKT_ICMP_IPV6_TYPE     66
76a3920be3SXiaoyun Wang #define PKT_ICMP_IPV6RS_TYPE   67
77a3920be3SXiaoyun Wang #define PKT_ICMP_IPV6RA_TYPE   68
78a3920be3SXiaoyun Wang #define PKT_ICMP_IPV6NS_TYPE   69
79a3920be3SXiaoyun Wang #define PKT_ICMP_IPV6NA_TYPE   70
80a3920be3SXiaoyun Wang #define PKT_ICMP_IPV6RE_TYPE   71
81a3920be3SXiaoyun Wang #define PKT_DHCP_IPV4_TYPE     72
82a3920be3SXiaoyun Wang #define PKT_DHCP_IPV6_TYPE     73
83a3920be3SXiaoyun Wang #define PKT_LACP_TYPE          74
84a3920be3SXiaoyun Wang #define PKT_ARP_REQ_TYPE       79
85a3920be3SXiaoyun Wang #define PKT_ARP_REP_TYPE       80
86a3920be3SXiaoyun Wang #define PKT_ARP_TYPE           81
87a3920be3SXiaoyun Wang #define PKT_BGPD_DPORT_TYPE    83
88a3920be3SXiaoyun Wang #define PKT_BGPD_SPORT_TYPE    84
89a3920be3SXiaoyun Wang #define PKT_VRRP_TYPE          85
90a3920be3SXiaoyun Wang 
91a3920be3SXiaoyun Wang #define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \
92a3920be3SXiaoyun Wang 	(&((struct hinic_nic_dev *)nic_dev)->filter)
93a3920be3SXiaoyun Wang 
941fe89aa3SXiaoyun Wang #define HINIC_DEV_PRIVATE_TO_TCAM_INFO(nic_dev) \
951fe89aa3SXiaoyun Wang 	(&((struct hinic_nic_dev *)nic_dev)->tcam)
961fe89aa3SXiaoyun Wang 
971fe89aa3SXiaoyun Wang 
98f4ca3fd5SXiaoyun Wang enum hinic_atr_flow_type {
99f4ca3fd5SXiaoyun Wang 	HINIC_ATR_FLOW_TYPE_IPV4_DIP    = 0x1,
100f4ca3fd5SXiaoyun Wang 	HINIC_ATR_FLOW_TYPE_IPV4_SIP    = 0x2,
101f4ca3fd5SXiaoyun Wang 	HINIC_ATR_FLOW_TYPE_DPORT       = 0x3,
102f4ca3fd5SXiaoyun Wang 	HINIC_ATR_FLOW_TYPE_SPORT       = 0x4,
103f4ca3fd5SXiaoyun Wang };
104f4ca3fd5SXiaoyun Wang 
105f4ca3fd5SXiaoyun Wang /* Structure to store fdir's info. */
106f4ca3fd5SXiaoyun Wang struct hinic_fdir_info {
107f4ca3fd5SXiaoyun Wang 	uint8_t fdir_flag;
108f4ca3fd5SXiaoyun Wang 	uint8_t qid;
109f4ca3fd5SXiaoyun Wang 	uint32_t fdir_key;
110f4ca3fd5SXiaoyun Wang };
111a3920be3SXiaoyun Wang 
11273122b52SXiaoyun Wang /**
11373122b52SXiaoyun Wang  * Endless loop will never happen with below assumption
11473122b52SXiaoyun Wang  * 1. there is at least one no-void item(END)
11573122b52SXiaoyun Wang  * 2. cur is before END.
11673122b52SXiaoyun Wang  */
11773122b52SXiaoyun Wang static inline const struct rte_flow_item *
11873122b52SXiaoyun Wang next_no_void_pattern(const struct rte_flow_item pattern[],
11973122b52SXiaoyun Wang 		const struct rte_flow_item *cur)
12073122b52SXiaoyun Wang {
12173122b52SXiaoyun Wang 	const struct rte_flow_item *next =
12273122b52SXiaoyun Wang 		cur ? cur + 1 : &pattern[0];
12373122b52SXiaoyun Wang 	while (1) {
12473122b52SXiaoyun Wang 		if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
12573122b52SXiaoyun Wang 			return next;
12673122b52SXiaoyun Wang 		next++;
12773122b52SXiaoyun Wang 	}
12873122b52SXiaoyun Wang }
12973122b52SXiaoyun Wang 
13073122b52SXiaoyun Wang static inline const struct rte_flow_action *
13173122b52SXiaoyun Wang next_no_void_action(const struct rte_flow_action actions[],
13273122b52SXiaoyun Wang 		const struct rte_flow_action *cur)
13373122b52SXiaoyun Wang {
13473122b52SXiaoyun Wang 	const struct rte_flow_action *next =
13573122b52SXiaoyun Wang 		cur ? cur + 1 : &actions[0];
13673122b52SXiaoyun Wang 	while (1) {
13773122b52SXiaoyun Wang 		if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
13873122b52SXiaoyun Wang 			return next;
13973122b52SXiaoyun Wang 		next++;
14073122b52SXiaoyun Wang 	}
14173122b52SXiaoyun Wang }
14273122b52SXiaoyun Wang 
14373122b52SXiaoyun Wang static int hinic_check_ethertype_attr_ele(const struct rte_flow_attr *attr,
14473122b52SXiaoyun Wang 					struct rte_flow_error *error)
14573122b52SXiaoyun Wang {
14673122b52SXiaoyun Wang 	/* Must be input direction */
14773122b52SXiaoyun Wang 	if (!attr->ingress) {
14873122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
14973122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
15073122b52SXiaoyun Wang 			attr, "Only support ingress.");
15173122b52SXiaoyun Wang 		return -rte_errno;
15273122b52SXiaoyun Wang 	}
15373122b52SXiaoyun Wang 
15473122b52SXiaoyun Wang 	if (attr->egress) {
15573122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
15673122b52SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
15773122b52SXiaoyun Wang 				attr, "Not support egress.");
15873122b52SXiaoyun Wang 		return -rte_errno;
15973122b52SXiaoyun Wang 	}
16073122b52SXiaoyun Wang 
16173122b52SXiaoyun Wang 	if (attr->priority) {
16273122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
16373122b52SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
16473122b52SXiaoyun Wang 				attr, "Not support priority.");
16573122b52SXiaoyun Wang 		return -rte_errno;
16673122b52SXiaoyun Wang 	}
16773122b52SXiaoyun Wang 
16873122b52SXiaoyun Wang 	if (attr->group) {
16973122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
17073122b52SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
17173122b52SXiaoyun Wang 				attr, "Not support group.");
17273122b52SXiaoyun Wang 		return -rte_errno;
17373122b52SXiaoyun Wang 	}
17473122b52SXiaoyun Wang 
17573122b52SXiaoyun Wang 	return 0;
17673122b52SXiaoyun Wang }
17773122b52SXiaoyun Wang 
17873122b52SXiaoyun Wang static int hinic_check_filter_arg(const struct rte_flow_attr *attr,
17973122b52SXiaoyun Wang 				const struct rte_flow_item *pattern,
18073122b52SXiaoyun Wang 				const struct rte_flow_action *actions,
18173122b52SXiaoyun Wang 				struct rte_flow_error *error)
18273122b52SXiaoyun Wang {
18373122b52SXiaoyun Wang 	if (!pattern) {
18473122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
18573122b52SXiaoyun Wang 				NULL, "NULL pattern.");
18673122b52SXiaoyun Wang 		return -rte_errno;
18773122b52SXiaoyun Wang 	}
18873122b52SXiaoyun Wang 
18973122b52SXiaoyun Wang 	if (!actions) {
19073122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
19173122b52SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ACTION_NUM,
19273122b52SXiaoyun Wang 				NULL, "NULL action.");
19373122b52SXiaoyun Wang 		return -rte_errno;
19473122b52SXiaoyun Wang 	}
19573122b52SXiaoyun Wang 
19673122b52SXiaoyun Wang 	if (!attr) {
19773122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
19873122b52SXiaoyun Wang 				   NULL, "NULL attribute.");
19973122b52SXiaoyun Wang 		return -rte_errno;
20073122b52SXiaoyun Wang 	}
20173122b52SXiaoyun Wang 
20273122b52SXiaoyun Wang 	return 0;
20373122b52SXiaoyun Wang }
20473122b52SXiaoyun Wang 
20573122b52SXiaoyun Wang static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,
20673122b52SXiaoyun Wang 					struct rte_flow_error *error)
20773122b52SXiaoyun Wang {
20873122b52SXiaoyun Wang 	/* The first non-void item should be MAC */
20973122b52SXiaoyun Wang 	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
21073122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
21173122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ITEM,
21273122b52SXiaoyun Wang 			item, "Not supported by ethertype filter");
21373122b52SXiaoyun Wang 		return -rte_errno;
21473122b52SXiaoyun Wang 	}
21573122b52SXiaoyun Wang 
21673122b52SXiaoyun Wang 	/* Not supported last point for range */
21773122b52SXiaoyun Wang 	if (item->last) {
21873122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
21973122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
22073122b52SXiaoyun Wang 			item, "Not supported last point for range");
22173122b52SXiaoyun Wang 		return -rte_errno;
22273122b52SXiaoyun Wang 	}
22373122b52SXiaoyun Wang 
22473122b52SXiaoyun Wang 	/* Get the MAC info. */
22573122b52SXiaoyun Wang 	if (!item->spec || !item->mask) {
22673122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
22773122b52SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM,
22873122b52SXiaoyun Wang 				item, "Not supported by ethertype filter");
22973122b52SXiaoyun Wang 		return -rte_errno;
23073122b52SXiaoyun Wang 	}
23173122b52SXiaoyun Wang 	return 0;
23273122b52SXiaoyun Wang }
23373122b52SXiaoyun Wang 
23473122b52SXiaoyun Wang static int
23573122b52SXiaoyun Wang hinic_parse_ethertype_aciton(const struct rte_flow_action *actions,
23673122b52SXiaoyun Wang 			const struct rte_flow_action *act,
23773122b52SXiaoyun Wang 			const struct rte_flow_action_queue *act_q,
23873122b52SXiaoyun Wang 			struct rte_eth_ethertype_filter *filter,
23973122b52SXiaoyun Wang 			struct rte_flow_error *error)
24073122b52SXiaoyun Wang {
24173122b52SXiaoyun Wang 	/* Parse action */
24273122b52SXiaoyun Wang 	act = next_no_void_action(actions, NULL);
24373122b52SXiaoyun Wang 	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
24473122b52SXiaoyun Wang 		act->type != RTE_FLOW_ACTION_TYPE_DROP) {
24573122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
24673122b52SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ACTION,
24773122b52SXiaoyun Wang 				act, "Not supported action.");
24873122b52SXiaoyun Wang 		return -rte_errno;
24973122b52SXiaoyun Wang 	}
25073122b52SXiaoyun Wang 
25173122b52SXiaoyun Wang 	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
25273122b52SXiaoyun Wang 		act_q = (const struct rte_flow_action_queue *)act->conf;
25373122b52SXiaoyun Wang 		filter->queue = act_q->index;
25473122b52SXiaoyun Wang 	} else {
25573122b52SXiaoyun Wang 		filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
25673122b52SXiaoyun Wang 	}
25773122b52SXiaoyun Wang 
25873122b52SXiaoyun Wang 	/* Check if the next non-void item is END */
25973122b52SXiaoyun Wang 	act = next_no_void_action(actions, act);
26073122b52SXiaoyun Wang 	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
26173122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
26273122b52SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ACTION,
26373122b52SXiaoyun Wang 				act, "Not supported action.");
26473122b52SXiaoyun Wang 		return -rte_errno;
26573122b52SXiaoyun Wang 	}
26673122b52SXiaoyun Wang 
26773122b52SXiaoyun Wang 	return 0;
26873122b52SXiaoyun Wang }
26973122b52SXiaoyun Wang 
27073122b52SXiaoyun Wang /**
27173122b52SXiaoyun Wang  * Parse the rule to see if it is a ethertype rule.
27273122b52SXiaoyun Wang  * And get the ethertype filter info BTW.
27373122b52SXiaoyun Wang  * pattern:
27473122b52SXiaoyun Wang  * The first not void item can be ETH.
27573122b52SXiaoyun Wang  * The next not void item must be END.
27673122b52SXiaoyun Wang  * action:
27773122b52SXiaoyun Wang  * The first not void action should be QUEUE.
27873122b52SXiaoyun Wang  * The next not void action should be END.
27973122b52SXiaoyun Wang  * pattern example:
28073122b52SXiaoyun Wang  * ITEM		Spec			Mask
28173122b52SXiaoyun Wang  * ETH		type	0x0807		0xFFFF
28273122b52SXiaoyun Wang  * END
28373122b52SXiaoyun Wang  * other members in mask and spec should set to 0x00.
28473122b52SXiaoyun Wang  * item->last should be NULL.
28573122b52SXiaoyun Wang  */
2861fe89aa3SXiaoyun Wang static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
28773122b52SXiaoyun Wang 			const struct rte_flow_item *pattern,
28873122b52SXiaoyun Wang 			const struct rte_flow_action *actions,
28973122b52SXiaoyun Wang 			struct rte_eth_ethertype_filter *filter,
29073122b52SXiaoyun Wang 			struct rte_flow_error *error)
29173122b52SXiaoyun Wang {
29273122b52SXiaoyun Wang 	const struct rte_flow_item *item;
29373122b52SXiaoyun Wang 	const struct rte_flow_action *act = NULL;
29473122b52SXiaoyun Wang 	const struct rte_flow_item_eth *eth_spec;
29573122b52SXiaoyun Wang 	const struct rte_flow_item_eth *eth_mask;
29673122b52SXiaoyun Wang 	const struct rte_flow_action_queue *act_q = NULL;
29773122b52SXiaoyun Wang 
29873122b52SXiaoyun Wang 	if (hinic_check_filter_arg(attr, pattern, actions, error))
29973122b52SXiaoyun Wang 		return -rte_errno;
30073122b52SXiaoyun Wang 
30173122b52SXiaoyun Wang 	item = next_no_void_pattern(pattern, NULL);
30273122b52SXiaoyun Wang 	if (hinic_check_ethertype_first_item(item, error))
30373122b52SXiaoyun Wang 		return -rte_errno;
30473122b52SXiaoyun Wang 
30573122b52SXiaoyun Wang 	eth_spec = (const struct rte_flow_item_eth *)item->spec;
30673122b52SXiaoyun Wang 	eth_mask = (const struct rte_flow_item_eth *)item->mask;
30773122b52SXiaoyun Wang 
30873122b52SXiaoyun Wang 	/*
30973122b52SXiaoyun Wang 	 * Mask bits of source MAC address must be full of 0.
31073122b52SXiaoyun Wang 	 * Mask bits of destination MAC address must be full
31173122b52SXiaoyun Wang 	 * of 1 or full of 0.
31273122b52SXiaoyun Wang 	 */
3138275d5fcSThomas Monjalon 	if (!rte_is_zero_ether_addr(&eth_mask->hdr.src_addr) ||
3148275d5fcSThomas Monjalon 	    (!rte_is_zero_ether_addr(&eth_mask->hdr.dst_addr) &&
3158275d5fcSThomas Monjalon 	     !rte_is_broadcast_ether_addr(&eth_mask->hdr.dst_addr))) {
31673122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
31773122b52SXiaoyun Wang 				item, "Invalid ether address mask");
31873122b52SXiaoyun Wang 		return -rte_errno;
31973122b52SXiaoyun Wang 	}
32073122b52SXiaoyun Wang 
3218275d5fcSThomas Monjalon 	if ((eth_mask->hdr.ether_type & UINT16_MAX) != UINT16_MAX) {
32273122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
32373122b52SXiaoyun Wang 				item, "Invalid ethertype mask");
32473122b52SXiaoyun Wang 		return -rte_errno;
32573122b52SXiaoyun Wang 	}
32673122b52SXiaoyun Wang 
32773122b52SXiaoyun Wang 	/*
32873122b52SXiaoyun Wang 	 * If mask bits of destination MAC address
32973122b52SXiaoyun Wang 	 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
33073122b52SXiaoyun Wang 	 */
3318275d5fcSThomas Monjalon 	if (rte_is_broadcast_ether_addr(&eth_mask->hdr.dst_addr)) {
3328275d5fcSThomas Monjalon 		filter->mac_addr = eth_spec->hdr.dst_addr;
33373122b52SXiaoyun Wang 		filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
33473122b52SXiaoyun Wang 	} else {
33573122b52SXiaoyun Wang 		filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
33673122b52SXiaoyun Wang 	}
3378275d5fcSThomas Monjalon 	filter->ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type);
33873122b52SXiaoyun Wang 
33973122b52SXiaoyun Wang 	/* Check if the next non-void item is END. */
34073122b52SXiaoyun Wang 	item = next_no_void_pattern(pattern, item);
34173122b52SXiaoyun Wang 	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
34273122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
34373122b52SXiaoyun Wang 			item, "Not supported by ethertype filter.");
34473122b52SXiaoyun Wang 		return -rte_errno;
34573122b52SXiaoyun Wang 	}
34673122b52SXiaoyun Wang 
34773122b52SXiaoyun Wang 	if (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error))
34873122b52SXiaoyun Wang 		return -rte_errno;
34973122b52SXiaoyun Wang 
35073122b52SXiaoyun Wang 	if (hinic_check_ethertype_attr_ele(attr, error))
35173122b52SXiaoyun Wang 		return -rte_errno;
35273122b52SXiaoyun Wang 
35373122b52SXiaoyun Wang 	return 0;
35473122b52SXiaoyun Wang }
35573122b52SXiaoyun Wang 
3561fe89aa3SXiaoyun Wang static int hinic_parse_ethertype_filter(struct rte_eth_dev *dev,
35773122b52SXiaoyun Wang 			const struct rte_flow_attr *attr,
35873122b52SXiaoyun Wang 			const struct rte_flow_item pattern[],
35973122b52SXiaoyun Wang 			const struct rte_flow_action actions[],
36073122b52SXiaoyun Wang 			struct rte_eth_ethertype_filter *filter,
36173122b52SXiaoyun Wang 			struct rte_flow_error *error)
36273122b52SXiaoyun Wang {
36373122b52SXiaoyun Wang 	if (cons_parse_ethertype_filter(attr, pattern, actions, filter, error))
36473122b52SXiaoyun Wang 		return -rte_errno;
36573122b52SXiaoyun Wang 
36673122b52SXiaoyun Wang 	/* NIC doesn't support MAC address. */
36773122b52SXiaoyun Wang 	if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
36873122b52SXiaoyun Wang 		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
36973122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
37073122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ITEM,
37173122b52SXiaoyun Wang 			NULL, "Not supported by ethertype filter");
37273122b52SXiaoyun Wang 		return -rte_errno;
37373122b52SXiaoyun Wang 	}
37473122b52SXiaoyun Wang 
37573122b52SXiaoyun Wang 	if (filter->queue >= dev->data->nb_rx_queues) {
37673122b52SXiaoyun Wang 		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
37773122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
37873122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ITEM,
37973122b52SXiaoyun Wang 			NULL, "Queue index much too big");
38073122b52SXiaoyun Wang 		return -rte_errno;
38173122b52SXiaoyun Wang 	}
38273122b52SXiaoyun Wang 
38373122b52SXiaoyun Wang 	if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
38473122b52SXiaoyun Wang 		filter->ether_type == RTE_ETHER_TYPE_IPV6) {
38573122b52SXiaoyun Wang 		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
38673122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
38773122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ITEM,
38873122b52SXiaoyun Wang 			NULL, "IPv4/IPv6 not supported by ethertype filter");
38973122b52SXiaoyun Wang 		return -rte_errno;
39073122b52SXiaoyun Wang 	}
39173122b52SXiaoyun Wang 
39273122b52SXiaoyun Wang 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
39373122b52SXiaoyun Wang 		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
39473122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
39573122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ITEM,
39673122b52SXiaoyun Wang 			NULL, "Drop option is unsupported");
39773122b52SXiaoyun Wang 		return -rte_errno;
39873122b52SXiaoyun Wang 	}
39973122b52SXiaoyun Wang 
40073122b52SXiaoyun Wang 	/* Hinic only support LACP/ARP for ether type */
40173122b52SXiaoyun Wang 	if (filter->ether_type != RTE_ETHER_TYPE_SLOW &&
40273122b52SXiaoyun Wang 		filter->ether_type != RTE_ETHER_TYPE_ARP) {
40373122b52SXiaoyun Wang 		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
40473122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
40573122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
40673122b52SXiaoyun Wang 			"only lacp/arp type supported by ethertype filter");
40773122b52SXiaoyun Wang 		return -rte_errno;
40873122b52SXiaoyun Wang 	}
40973122b52SXiaoyun Wang 
41073122b52SXiaoyun Wang 	return 0;
41173122b52SXiaoyun Wang }
41273122b52SXiaoyun Wang 
41373122b52SXiaoyun Wang static int hinic_check_ntuple_attr_ele(const struct rte_flow_attr *attr,
41473122b52SXiaoyun Wang 				struct rte_eth_ntuple_filter *filter,
41573122b52SXiaoyun Wang 				struct rte_flow_error *error)
41673122b52SXiaoyun Wang {
41773122b52SXiaoyun Wang 	/* Must be input direction */
41873122b52SXiaoyun Wang 	if (!attr->ingress) {
41973122b52SXiaoyun Wang 		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
42073122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
42173122b52SXiaoyun Wang 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
42273122b52SXiaoyun Wang 				   attr, "Only support ingress.");
42373122b52SXiaoyun Wang 		return -rte_errno;
42473122b52SXiaoyun Wang 	}
42573122b52SXiaoyun Wang 
42673122b52SXiaoyun Wang 	if (attr->egress) {
42773122b52SXiaoyun Wang 		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
42873122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
42973122b52SXiaoyun Wang 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
43073122b52SXiaoyun Wang 				   attr, "Not support egress.");
43173122b52SXiaoyun Wang 		return -rte_errno;
43273122b52SXiaoyun Wang 	}
43373122b52SXiaoyun Wang 
43473122b52SXiaoyun Wang 	if (attr->priority > 0xFFFF) {
43573122b52SXiaoyun Wang 		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
43673122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
43773122b52SXiaoyun Wang 				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
43873122b52SXiaoyun Wang 				   attr, "Error priority.");
43973122b52SXiaoyun Wang 		return -rte_errno;
44073122b52SXiaoyun Wang 	}
44173122b52SXiaoyun Wang 
44273122b52SXiaoyun Wang 	if (attr->priority < HINIC_MIN_N_TUPLE_PRIO ||
44373122b52SXiaoyun Wang 		    attr->priority > HINIC_MAX_N_TUPLE_PRIO)
44473122b52SXiaoyun Wang 		filter->priority = 1;
44573122b52SXiaoyun Wang 	else
44673122b52SXiaoyun Wang 		filter->priority = (uint16_t)attr->priority;
44773122b52SXiaoyun Wang 
44873122b52SXiaoyun Wang 	return 0;
44973122b52SXiaoyun Wang }
45073122b52SXiaoyun Wang 
45173122b52SXiaoyun Wang static int
45273122b52SXiaoyun Wang hinic_check_ntuple_act_ele(__rte_unused const struct rte_flow_item *item,
45373122b52SXiaoyun Wang 			const struct rte_flow_action actions[],
45473122b52SXiaoyun Wang 			struct rte_eth_ntuple_filter *filter,
45573122b52SXiaoyun Wang 			struct rte_flow_error *error)
45673122b52SXiaoyun Wang {
45773122b52SXiaoyun Wang 	const struct rte_flow_action *act;
45873122b52SXiaoyun Wang 	/*
45973122b52SXiaoyun Wang 	 * n-tuple only supports forwarding,
46073122b52SXiaoyun Wang 	 * check if the first not void action is QUEUE.
46173122b52SXiaoyun Wang 	 */
46273122b52SXiaoyun Wang 	act = next_no_void_action(actions, NULL);
46373122b52SXiaoyun Wang 	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
46473122b52SXiaoyun Wang 		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
46573122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
46673122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ACTION,
46773122b52SXiaoyun Wang 			act, "Flow action type is not QUEUE.");
46873122b52SXiaoyun Wang 		return -rte_errno;
46973122b52SXiaoyun Wang 	}
47073122b52SXiaoyun Wang 	filter->queue =
47173122b52SXiaoyun Wang 		((const struct rte_flow_action_queue *)act->conf)->index;
47273122b52SXiaoyun Wang 
47373122b52SXiaoyun Wang 	/* Check if the next not void item is END */
47473122b52SXiaoyun Wang 	act = next_no_void_action(actions, act);
47573122b52SXiaoyun Wang 	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
47673122b52SXiaoyun Wang 		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
47773122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
47873122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ACTION,
47973122b52SXiaoyun Wang 			act, "Next not void item is not END.");
48073122b52SXiaoyun Wang 		return -rte_errno;
48173122b52SXiaoyun Wang 	}
48273122b52SXiaoyun Wang 
48373122b52SXiaoyun Wang 	return 0;
48473122b52SXiaoyun Wang }
48573122b52SXiaoyun Wang 
48673122b52SXiaoyun Wang static int hinic_ntuple_item_check_ether(const struct rte_flow_item **ipv4_item,
48773122b52SXiaoyun Wang 					const struct rte_flow_item pattern[],
48873122b52SXiaoyun Wang 					struct rte_flow_error *error)
48973122b52SXiaoyun Wang {
49073122b52SXiaoyun Wang 	const struct rte_flow_item *item;
49173122b52SXiaoyun Wang 
49273122b52SXiaoyun Wang 	/* The first not void item can be MAC or IPv4 */
49373122b52SXiaoyun Wang 	item = next_no_void_pattern(pattern, NULL);
49473122b52SXiaoyun Wang 
49573122b52SXiaoyun Wang 	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
49673122b52SXiaoyun Wang 		item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
49773122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
49873122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ITEM,
49973122b52SXiaoyun Wang 			item, "Not supported by ntuple filter");
50073122b52SXiaoyun Wang 		return -rte_errno;
50173122b52SXiaoyun Wang 	}
50273122b52SXiaoyun Wang 
50373122b52SXiaoyun Wang 	/* Skip Ethernet */
50473122b52SXiaoyun Wang 	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
50573122b52SXiaoyun Wang 		/* Not supported last point for range */
50673122b52SXiaoyun Wang 		if (item->last) {
50773122b52SXiaoyun Wang 			rte_flow_error_set(error,
50873122b52SXiaoyun Wang 				EINVAL,
50973122b52SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
51073122b52SXiaoyun Wang 				item, "Not supported last point for range");
51173122b52SXiaoyun Wang 			return -rte_errno;
51273122b52SXiaoyun Wang 		}
51373122b52SXiaoyun Wang 		/* if the first item is MAC, the content should be NULL */
51473122b52SXiaoyun Wang 		if (item->spec || item->mask) {
51573122b52SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
51673122b52SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM,
51773122b52SXiaoyun Wang 				item, "Not supported by ntuple filter");
51873122b52SXiaoyun Wang 			return -rte_errno;
51973122b52SXiaoyun Wang 		}
52073122b52SXiaoyun Wang 		/* check if the next not void item is IPv4 */
52173122b52SXiaoyun Wang 		item = next_no_void_pattern(pattern, item);
52273122b52SXiaoyun Wang 		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
52373122b52SXiaoyun Wang 			rte_flow_error_set(error,
52473122b52SXiaoyun Wang 				EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
52573122b52SXiaoyun Wang 				item, "Not supported by ntuple filter");
52673122b52SXiaoyun Wang 			return -rte_errno;
52773122b52SXiaoyun Wang 		}
52873122b52SXiaoyun Wang 	}
52973122b52SXiaoyun Wang 
53073122b52SXiaoyun Wang 	*ipv4_item = item;
53173122b52SXiaoyun Wang 	return 0;
53273122b52SXiaoyun Wang }
53373122b52SXiaoyun Wang 
53473122b52SXiaoyun Wang static int
53573122b52SXiaoyun Wang hinic_ntuple_item_check_ipv4(const struct rte_flow_item **in_out_item,
53673122b52SXiaoyun Wang 			const struct rte_flow_item pattern[],
53773122b52SXiaoyun Wang 			struct rte_eth_ntuple_filter *filter,
53873122b52SXiaoyun Wang 			struct rte_flow_error *error)
53973122b52SXiaoyun Wang {
54073122b52SXiaoyun Wang 	const struct rte_flow_item_ipv4 *ipv4_spec;
54173122b52SXiaoyun Wang 	const struct rte_flow_item_ipv4 *ipv4_mask;
54273122b52SXiaoyun Wang 	const struct rte_flow_item *item = *in_out_item;
54373122b52SXiaoyun Wang 
54473122b52SXiaoyun Wang 	/* Get the IPv4 info */
54573122b52SXiaoyun Wang 	if (!item->spec || !item->mask) {
54673122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
54773122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ITEM,
54873122b52SXiaoyun Wang 			item, "Invalid ntuple mask");
54973122b52SXiaoyun Wang 		return -rte_errno;
55073122b52SXiaoyun Wang 	}
55173122b52SXiaoyun Wang 	/* Not supported last point for range */
55273122b52SXiaoyun Wang 	if (item->last) {
55373122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
55473122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
55573122b52SXiaoyun Wang 			item, "Not supported last point for range");
55673122b52SXiaoyun Wang 		return -rte_errno;
55773122b52SXiaoyun Wang 	}
55873122b52SXiaoyun Wang 
55973122b52SXiaoyun Wang 	ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
56073122b52SXiaoyun Wang 	/*
56173122b52SXiaoyun Wang 	 * Only support src & dst addresses, protocol,
56273122b52SXiaoyun Wang 	 * others should be masked.
56373122b52SXiaoyun Wang 	 */
56473122b52SXiaoyun Wang 	if (ipv4_mask->hdr.version_ihl ||
56573122b52SXiaoyun Wang 		ipv4_mask->hdr.type_of_service ||
56673122b52SXiaoyun Wang 		ipv4_mask->hdr.total_length ||
56773122b52SXiaoyun Wang 		ipv4_mask->hdr.packet_id ||
56873122b52SXiaoyun Wang 		ipv4_mask->hdr.fragment_offset ||
56973122b52SXiaoyun Wang 		ipv4_mask->hdr.time_to_live ||
57073122b52SXiaoyun Wang 		ipv4_mask->hdr.hdr_checksum ||
57173122b52SXiaoyun Wang 		!ipv4_mask->hdr.next_proto_id) {
57273122b52SXiaoyun Wang 		rte_flow_error_set(error,
57373122b52SXiaoyun Wang 			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
57473122b52SXiaoyun Wang 			item, "Not supported by ntuple filter");
57573122b52SXiaoyun Wang 		return -rte_errno;
57673122b52SXiaoyun Wang 	}
57773122b52SXiaoyun Wang 
57873122b52SXiaoyun Wang 	filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
57973122b52SXiaoyun Wang 	filter->src_ip_mask = ipv4_mask->hdr.src_addr;
58073122b52SXiaoyun Wang 	filter->proto_mask = ipv4_mask->hdr.next_proto_id;
58173122b52SXiaoyun Wang 
58273122b52SXiaoyun Wang 	ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
58373122b52SXiaoyun Wang 	filter->dst_ip = ipv4_spec->hdr.dst_addr;
58473122b52SXiaoyun Wang 	filter->src_ip = ipv4_spec->hdr.src_addr;
58573122b52SXiaoyun Wang 	filter->proto  = ipv4_spec->hdr.next_proto_id;
58673122b52SXiaoyun Wang 
58773122b52SXiaoyun Wang 	/* Get next no void item */
58873122b52SXiaoyun Wang 	*in_out_item = next_no_void_pattern(pattern, item);
58973122b52SXiaoyun Wang 	return 0;
59073122b52SXiaoyun Wang }
59173122b52SXiaoyun Wang 
59273122b52SXiaoyun Wang static int hinic_ntuple_item_check_l4(const struct rte_flow_item **in_out_item,
59373122b52SXiaoyun Wang 				const struct rte_flow_item pattern[],
59473122b52SXiaoyun Wang 				struct rte_eth_ntuple_filter *filter,
59573122b52SXiaoyun Wang 				struct rte_flow_error *error)
59673122b52SXiaoyun Wang {
59773122b52SXiaoyun Wang 	const struct rte_flow_item_tcp *tcp_spec;
59873122b52SXiaoyun Wang 	const struct rte_flow_item_tcp *tcp_mask;
59973122b52SXiaoyun Wang 	const struct rte_flow_item_icmp *icmp_mask;
60073122b52SXiaoyun Wang 	const struct rte_flow_item *item = *in_out_item;
60173122b52SXiaoyun Wang 	u32 ntuple_filter_size = sizeof(struct rte_eth_ntuple_filter);
60273122b52SXiaoyun Wang 
60373122b52SXiaoyun Wang 	if (item->type == RTE_FLOW_ITEM_TYPE_END)
60473122b52SXiaoyun Wang 		return 0;
60573122b52SXiaoyun Wang 
60673122b52SXiaoyun Wang 	/* Get TCP or UDP info */
60773122b52SXiaoyun Wang 	if (item->type != RTE_FLOW_ITEM_TYPE_END &&
60873122b52SXiaoyun Wang 		(!item->spec || !item->mask)) {
60973122b52SXiaoyun Wang 		memset(filter, 0, ntuple_filter_size);
61073122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
61173122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ITEM,
61273122b52SXiaoyun Wang 			item, "Invalid ntuple mask");
61373122b52SXiaoyun Wang 		return -rte_errno;
61473122b52SXiaoyun Wang 	}
61573122b52SXiaoyun Wang 
61673122b52SXiaoyun Wang 	/* Not supported last point for range */
61773122b52SXiaoyun Wang 	if (item->last) {
61873122b52SXiaoyun Wang 		memset(filter, 0, ntuple_filter_size);
61973122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
62073122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
62173122b52SXiaoyun Wang 			item, "Not supported last point for range");
62273122b52SXiaoyun Wang 		return -rte_errno;
62373122b52SXiaoyun Wang 	}
62473122b52SXiaoyun Wang 
62573122b52SXiaoyun Wang 	if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
62673122b52SXiaoyun Wang 		tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
62773122b52SXiaoyun Wang 
62873122b52SXiaoyun Wang 		/*
62973122b52SXiaoyun Wang 		 * Only support src & dst ports, tcp flags,
63073122b52SXiaoyun Wang 		 * others should be masked.
63173122b52SXiaoyun Wang 		 */
63273122b52SXiaoyun Wang 		if (tcp_mask->hdr.sent_seq ||
63373122b52SXiaoyun Wang 			tcp_mask->hdr.recv_ack ||
63473122b52SXiaoyun Wang 			tcp_mask->hdr.data_off ||
63573122b52SXiaoyun Wang 			tcp_mask->hdr.rx_win ||
63673122b52SXiaoyun Wang 			tcp_mask->hdr.cksum ||
63773122b52SXiaoyun Wang 			tcp_mask->hdr.tcp_urp) {
63873122b52SXiaoyun Wang 			memset(filter, 0, ntuple_filter_size);
63973122b52SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
64073122b52SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM,
64173122b52SXiaoyun Wang 				item, "Not supported by ntuple filter");
64273122b52SXiaoyun Wang 			return -rte_errno;
64373122b52SXiaoyun Wang 		}
64473122b52SXiaoyun Wang 
64573122b52SXiaoyun Wang 		filter->dst_port_mask  = tcp_mask->hdr.dst_port;
64673122b52SXiaoyun Wang 		filter->src_port_mask  = tcp_mask->hdr.src_port;
64773122b52SXiaoyun Wang 		if (tcp_mask->hdr.tcp_flags == 0xFF) {
64873122b52SXiaoyun Wang 			filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
64973122b52SXiaoyun Wang 		} else if (!tcp_mask->hdr.tcp_flags) {
65073122b52SXiaoyun Wang 			filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
65173122b52SXiaoyun Wang 		} else {
65273122b52SXiaoyun Wang 			memset(filter, 0, ntuple_filter_size);
65373122b52SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
65473122b52SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM,
65573122b52SXiaoyun Wang 				item, "Not supported by ntuple filter");
65673122b52SXiaoyun Wang 			return -rte_errno;
65773122b52SXiaoyun Wang 		}
65873122b52SXiaoyun Wang 
65973122b52SXiaoyun Wang 		tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
66073122b52SXiaoyun Wang 		filter->dst_port  = tcp_spec->hdr.dst_port;
66173122b52SXiaoyun Wang 		filter->src_port  = tcp_spec->hdr.src_port;
66273122b52SXiaoyun Wang 		filter->tcp_flags = tcp_spec->hdr.tcp_flags;
66373122b52SXiaoyun Wang 	} else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
66473122b52SXiaoyun Wang 		icmp_mask = (const struct rte_flow_item_icmp *)item->mask;
66573122b52SXiaoyun Wang 
66673122b52SXiaoyun Wang 		/* ICMP all should be masked. */
66773122b52SXiaoyun Wang 		if (icmp_mask->hdr.icmp_cksum ||
66873122b52SXiaoyun Wang 			icmp_mask->hdr.icmp_ident ||
66973122b52SXiaoyun Wang 			icmp_mask->hdr.icmp_seq_nb ||
67073122b52SXiaoyun Wang 			icmp_mask->hdr.icmp_type ||
67173122b52SXiaoyun Wang 			icmp_mask->hdr.icmp_code) {
67273122b52SXiaoyun Wang 			memset(filter, 0, ntuple_filter_size);
67373122b52SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
67473122b52SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM,
67573122b52SXiaoyun Wang 				item, "Not supported by ntuple filter");
67673122b52SXiaoyun Wang 			return -rte_errno;
67773122b52SXiaoyun Wang 		}
67873122b52SXiaoyun Wang 	}
67973122b52SXiaoyun Wang 
68073122b52SXiaoyun Wang 	/* Get next no void item */
68173122b52SXiaoyun Wang 	*in_out_item = next_no_void_pattern(pattern, item);
68273122b52SXiaoyun Wang 	return 0;
68373122b52SXiaoyun Wang }
68473122b52SXiaoyun Wang 
68573122b52SXiaoyun Wang static int hinic_ntuple_item_check_end(const struct rte_flow_item *item,
68673122b52SXiaoyun Wang 					struct rte_eth_ntuple_filter *filter,
68773122b52SXiaoyun Wang 					struct rte_flow_error *error)
68873122b52SXiaoyun Wang {
68973122b52SXiaoyun Wang 	/* Check if the next not void item is END */
69073122b52SXiaoyun Wang 	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
69173122b52SXiaoyun Wang 		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
69273122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
69373122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ITEM,
69473122b52SXiaoyun Wang 			item, "Not supported by ntuple filter");
69573122b52SXiaoyun Wang 		return -rte_errno;
69673122b52SXiaoyun Wang 	}
6970c87a15fSXiaoyun Wang 
69873122b52SXiaoyun Wang 	return 0;
69973122b52SXiaoyun Wang }
70073122b52SXiaoyun Wang 
70173122b52SXiaoyun Wang static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
70273122b52SXiaoyun Wang 					const struct rte_flow_item pattern[],
70373122b52SXiaoyun Wang 					struct rte_eth_ntuple_filter *filter,
70473122b52SXiaoyun Wang 					struct rte_flow_error *error)
70573122b52SXiaoyun Wang {
70673122b52SXiaoyun Wang 	if (hinic_ntuple_item_check_ether(&item, pattern, error) ||
70773122b52SXiaoyun Wang 		hinic_ntuple_item_check_ipv4(&item, pattern, filter, error) ||
70873122b52SXiaoyun Wang 		hinic_ntuple_item_check_l4(&item, pattern, filter, error) ||
70973122b52SXiaoyun Wang 		hinic_ntuple_item_check_end(item, filter, error))
71073122b52SXiaoyun Wang 		return -rte_errno;
71173122b52SXiaoyun Wang 
71273122b52SXiaoyun Wang 	return 0;
71373122b52SXiaoyun Wang }
71473122b52SXiaoyun Wang 
71573122b52SXiaoyun Wang /**
71673122b52SXiaoyun Wang  * Parse the rule to see if it is a n-tuple rule.
71773122b52SXiaoyun Wang  * And get the n-tuple filter info BTW.
71873122b52SXiaoyun Wang  * pattern:
71973122b52SXiaoyun Wang  * The first not void item can be ETH or IPV4.
72073122b52SXiaoyun Wang  * The second not void item must be IPV4 if the first one is ETH.
72173122b52SXiaoyun Wang  * The third not void item must be UDP or TCP.
72273122b52SXiaoyun Wang  * The next not void item must be END.
72373122b52SXiaoyun Wang  * action:
72473122b52SXiaoyun Wang  * The first not void action should be QUEUE.
72573122b52SXiaoyun Wang  * The next not void action should be END.
72673122b52SXiaoyun Wang  * pattern example:
72773122b52SXiaoyun Wang  * ITEM		Spec			Mask
72873122b52SXiaoyun Wang  * ETH		NULL			NULL
72973122b52SXiaoyun Wang  * IPV4		src_addr 192.168.1.20	0xFFFFFFFF
73073122b52SXiaoyun Wang  *		dst_addr 192.167.3.50	0xFFFFFFFF
73173122b52SXiaoyun Wang  *		next_proto_id	17	0xFF
73273122b52SXiaoyun Wang  * UDP/TCP/	src_port	80	0xFFFF
73373122b52SXiaoyun Wang  * SCTP		dst_port	80	0xFFFF
73473122b52SXiaoyun Wang  * END
73573122b52SXiaoyun Wang  * other members in mask and spec should set to 0x00.
73673122b52SXiaoyun Wang  * item->last should be NULL.
7377be78d02SJosh Soref  * Please be aware there's an assumption for all the parsers.
73873122b52SXiaoyun Wang  * rte_flow_item is using big endian, rte_flow_attr and
73973122b52SXiaoyun Wang  * rte_flow_action are using CPU order.
74073122b52SXiaoyun Wang  * Because the pattern is used to describe the packets,
74173122b52SXiaoyun Wang  * normally the packets should use network order.
74273122b52SXiaoyun Wang  */
7431fe89aa3SXiaoyun Wang static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
74473122b52SXiaoyun Wang 			const struct rte_flow_item pattern[],
74573122b52SXiaoyun Wang 			const struct rte_flow_action actions[],
74673122b52SXiaoyun Wang 			struct rte_eth_ntuple_filter *filter,
74773122b52SXiaoyun Wang 			struct rte_flow_error *error)
74873122b52SXiaoyun Wang {
74973122b52SXiaoyun Wang 	const struct rte_flow_item *item = NULL;
75073122b52SXiaoyun Wang 
75173122b52SXiaoyun Wang 	if (hinic_check_filter_arg(attr, pattern, actions, error))
75273122b52SXiaoyun Wang 		return -rte_errno;
75373122b52SXiaoyun Wang 
75473122b52SXiaoyun Wang 	if (hinic_check_ntuple_item_ele(item, pattern, filter, error))
75573122b52SXiaoyun Wang 		return -rte_errno;
75673122b52SXiaoyun Wang 
75773122b52SXiaoyun Wang 	if (hinic_check_ntuple_act_ele(item, actions, filter, error))
75873122b52SXiaoyun Wang 		return -rte_errno;
75973122b52SXiaoyun Wang 
76073122b52SXiaoyun Wang 	if (hinic_check_ntuple_attr_ele(attr, filter, error))
76173122b52SXiaoyun Wang 		return -rte_errno;
76273122b52SXiaoyun Wang 
76373122b52SXiaoyun Wang 	return 0;
76473122b52SXiaoyun Wang }
76573122b52SXiaoyun Wang 
7661fe89aa3SXiaoyun Wang static int hinic_parse_ntuple_filter(struct rte_eth_dev *dev,
76773122b52SXiaoyun Wang 			const struct rte_flow_attr *attr,
76873122b52SXiaoyun Wang 			const struct rte_flow_item pattern[],
76973122b52SXiaoyun Wang 			const struct rte_flow_action actions[],
77073122b52SXiaoyun Wang 			struct rte_eth_ntuple_filter *filter,
77173122b52SXiaoyun Wang 			struct rte_flow_error *error)
77273122b52SXiaoyun Wang {
77373122b52SXiaoyun Wang 	int ret;
77473122b52SXiaoyun Wang 
77573122b52SXiaoyun Wang 	ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
77673122b52SXiaoyun Wang 	if (ret)
77773122b52SXiaoyun Wang 		return ret;
77873122b52SXiaoyun Wang 
77973122b52SXiaoyun Wang 	/* Hinic doesn't support tcp flags */
78073122b52SXiaoyun Wang 	if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
78173122b52SXiaoyun Wang 		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
78273122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
78373122b52SXiaoyun Wang 				   RTE_FLOW_ERROR_TYPE_ITEM,
78473122b52SXiaoyun Wang 				   NULL, "Not supported by ntuple filter");
78573122b52SXiaoyun Wang 		return -rte_errno;
78673122b52SXiaoyun Wang 	}
78773122b52SXiaoyun Wang 
78873122b52SXiaoyun Wang 	/* Hinic doesn't support many priorities */
78973122b52SXiaoyun Wang 	if (filter->priority < HINIC_MIN_N_TUPLE_PRIO ||
79073122b52SXiaoyun Wang 	    filter->priority > HINIC_MAX_N_TUPLE_PRIO) {
79173122b52SXiaoyun Wang 		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
79273122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
79373122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ITEM,
79473122b52SXiaoyun Wang 			NULL, "Priority not supported by ntuple filter");
79573122b52SXiaoyun Wang 		return -rte_errno;
79673122b52SXiaoyun Wang 	}
79773122b52SXiaoyun Wang 
79873122b52SXiaoyun Wang 	if (filter->queue >= dev->data->nb_rx_queues)
79973122b52SXiaoyun Wang 		return -rte_errno;
80073122b52SXiaoyun Wang 
80173122b52SXiaoyun Wang 	/* Fixed value for hinic */
80273122b52SXiaoyun Wang 	filter->flags = RTE_5TUPLE_FLAGS;
80373122b52SXiaoyun Wang 	return 0;
80473122b52SXiaoyun Wang }
80573122b52SXiaoyun Wang 
80673122b52SXiaoyun Wang static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item,
80773122b52SXiaoyun Wang 					const struct rte_flow_item pattern[],
80873122b52SXiaoyun Wang 					struct rte_flow_error *error)
80973122b52SXiaoyun Wang {
81073122b52SXiaoyun Wang 	const struct rte_flow_item *item;
81173122b52SXiaoyun Wang 
81273122b52SXiaoyun Wang 	/* The first not void item can be MAC or IPv4  or TCP or UDP */
81373122b52SXiaoyun Wang 	item = next_no_void_pattern(pattern, NULL);
81473122b52SXiaoyun Wang 
81573122b52SXiaoyun Wang 	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
81673122b52SXiaoyun Wang 		item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
81773122b52SXiaoyun Wang 		item->type != RTE_FLOW_ITEM_TYPE_TCP &&
81873122b52SXiaoyun Wang 		item->type != RTE_FLOW_ITEM_TYPE_UDP) {
81973122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
82073122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ITEM, item,
82173122b52SXiaoyun Wang 			"Not supported by fdir filter,support mac,ipv4,tcp,udp");
82273122b52SXiaoyun Wang 		return -rte_errno;
82373122b52SXiaoyun Wang 	}
82473122b52SXiaoyun Wang 
82573122b52SXiaoyun Wang 	/* Not supported last point for range */
82673122b52SXiaoyun Wang 	if (item->last) {
82773122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
82873122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
82973122b52SXiaoyun Wang 			"Not supported last point for range");
83073122b52SXiaoyun Wang 		return -rte_errno;
83173122b52SXiaoyun Wang 	}
83273122b52SXiaoyun Wang 
83373122b52SXiaoyun Wang 	/* Skip Ethernet */
83473122b52SXiaoyun Wang 	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
83573122b52SXiaoyun Wang 		/* All should be masked. */
83673122b52SXiaoyun Wang 		if (item->spec || item->mask) {
83773122b52SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
83873122b52SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM,
83973122b52SXiaoyun Wang 				item, "Not supported by fdir filter,support mac");
84073122b52SXiaoyun Wang 			return -rte_errno;
84173122b52SXiaoyun Wang 		}
84273122b52SXiaoyun Wang 		/* Check if the next not void item is IPv4 */
84373122b52SXiaoyun Wang 		item = next_no_void_pattern(pattern, item);
8449d441c45SXiaoyun Wang 		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
8459d441c45SXiaoyun Wang 			item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
84673122b52SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
84773122b52SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM, item,
84873122b52SXiaoyun Wang 				"Not supported by fdir filter,support mac,ipv4");
84973122b52SXiaoyun Wang 			return -rte_errno;
85073122b52SXiaoyun Wang 		}
85173122b52SXiaoyun Wang 	}
85273122b52SXiaoyun Wang 
85373122b52SXiaoyun Wang 	*ip_item = item;
85473122b52SXiaoyun Wang 	return 0;
85573122b52SXiaoyun Wang }
85673122b52SXiaoyun Wang 
85773122b52SXiaoyun Wang static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
85873122b52SXiaoyun Wang 				const struct rte_flow_item pattern[],
85973122b52SXiaoyun Wang 				struct hinic_fdir_rule *rule,
86073122b52SXiaoyun Wang 				struct rte_flow_error *error)
86173122b52SXiaoyun Wang {
86273122b52SXiaoyun Wang 	const struct rte_flow_item_ipv4 *ipv4_spec;
86373122b52SXiaoyun Wang 	const struct rte_flow_item_ipv4 *ipv4_mask;
8649d441c45SXiaoyun Wang 	const struct rte_flow_item_ipv6 *ipv6_spec;
8659d441c45SXiaoyun Wang 	const struct rte_flow_item_ipv6 *ipv6_mask;
86673122b52SXiaoyun Wang 	const struct rte_flow_item *item = *in_out_item;
8679d441c45SXiaoyun Wang 	int i;
86873122b52SXiaoyun Wang 
86973122b52SXiaoyun Wang 	/* Get the IPv4 info */
87073122b52SXiaoyun Wang 	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
87173122b52SXiaoyun Wang 		/* Not supported last point for range */
87273122b52SXiaoyun Wang 		if (item->last) {
87373122b52SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
87473122b52SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
87573122b52SXiaoyun Wang 				item, "Not supported last point for range");
87673122b52SXiaoyun Wang 			return -rte_errno;
87773122b52SXiaoyun Wang 		}
87873122b52SXiaoyun Wang 
87973122b52SXiaoyun Wang 		if (!item->mask) {
88073122b52SXiaoyun Wang 			memset(rule, 0, sizeof(struct hinic_fdir_rule));
88173122b52SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
88273122b52SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM,
88373122b52SXiaoyun Wang 				item, "Invalid fdir filter mask");
88473122b52SXiaoyun Wang 			return -rte_errno;
88573122b52SXiaoyun Wang 		}
88673122b52SXiaoyun Wang 
88773122b52SXiaoyun Wang 		ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
88873122b52SXiaoyun Wang 		/*
88973122b52SXiaoyun Wang 		 * Only support src & dst addresses,
89073122b52SXiaoyun Wang 		 * others should be masked.
89173122b52SXiaoyun Wang 		 */
89273122b52SXiaoyun Wang 		if (ipv4_mask->hdr.version_ihl ||
89373122b52SXiaoyun Wang 			ipv4_mask->hdr.type_of_service ||
89473122b52SXiaoyun Wang 			ipv4_mask->hdr.total_length ||
89573122b52SXiaoyun Wang 			ipv4_mask->hdr.packet_id ||
89673122b52SXiaoyun Wang 			ipv4_mask->hdr.fragment_offset ||
89773122b52SXiaoyun Wang 			ipv4_mask->hdr.time_to_live ||
89873122b52SXiaoyun Wang 			ipv4_mask->hdr.next_proto_id ||
89973122b52SXiaoyun Wang 			ipv4_mask->hdr.hdr_checksum) {
90073122b52SXiaoyun Wang 			rte_flow_error_set(error,
90173122b52SXiaoyun Wang 				EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
90273122b52SXiaoyun Wang 				"Not supported by fdir filter, support src,dst ip");
90373122b52SXiaoyun Wang 			return -rte_errno;
90473122b52SXiaoyun Wang 		}
90573122b52SXiaoyun Wang 
90673122b52SXiaoyun Wang 		rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
90773122b52SXiaoyun Wang 		rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
9081fe89aa3SXiaoyun Wang 		rule->mode = HINIC_FDIR_MODE_NORMAL;
90973122b52SXiaoyun Wang 
91073122b52SXiaoyun Wang 		if (item->spec) {
91173122b52SXiaoyun Wang 			ipv4_spec =
91273122b52SXiaoyun Wang 				(const struct rte_flow_item_ipv4 *)item->spec;
91373122b52SXiaoyun Wang 			rule->hinic_fdir.dst_ip = ipv4_spec->hdr.dst_addr;
91473122b52SXiaoyun Wang 			rule->hinic_fdir.src_ip = ipv4_spec->hdr.src_addr;
91573122b52SXiaoyun Wang 		}
91673122b52SXiaoyun Wang 
91773122b52SXiaoyun Wang 		/*
91873122b52SXiaoyun Wang 		 * Check if the next not void item is
91973122b52SXiaoyun Wang 		 * TCP or UDP or END.
92073122b52SXiaoyun Wang 		 */
92173122b52SXiaoyun Wang 		item = next_no_void_pattern(pattern, item);
92273122b52SXiaoyun Wang 		if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
92373122b52SXiaoyun Wang 		    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
9241fe89aa3SXiaoyun Wang 		    item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
9251fe89aa3SXiaoyun Wang 		    item->type != RTE_FLOW_ITEM_TYPE_ANY &&
92673122b52SXiaoyun Wang 		    item->type != RTE_FLOW_ITEM_TYPE_END) {
92773122b52SXiaoyun Wang 			memset(rule, 0, sizeof(struct hinic_fdir_rule));
92873122b52SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
92973122b52SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM, item,
93073122b52SXiaoyun Wang 				"Not supported by fdir filter, support tcp, udp, end");
93173122b52SXiaoyun Wang 			return -rte_errno;
93273122b52SXiaoyun Wang 		}
9339d441c45SXiaoyun Wang 	} else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
9349d441c45SXiaoyun Wang 		/* Not supported last point for range */
9359d441c45SXiaoyun Wang 		if (item->last) {
9369d441c45SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
9379d441c45SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9389d441c45SXiaoyun Wang 				item, "Not supported last point for range");
9399d441c45SXiaoyun Wang 			return -rte_errno;
9409d441c45SXiaoyun Wang 		}
9419d441c45SXiaoyun Wang 
9429d441c45SXiaoyun Wang 		if (!item->mask) {
9439d441c45SXiaoyun Wang 			memset(rule, 0, sizeof(struct hinic_fdir_rule));
9449d441c45SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
9459d441c45SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM,
9469d441c45SXiaoyun Wang 				item, "Invalid fdir filter mask");
9479d441c45SXiaoyun Wang 			return -rte_errno;
9489d441c45SXiaoyun Wang 		}
9499d441c45SXiaoyun Wang 
9509d441c45SXiaoyun Wang 		ipv6_mask = (const struct rte_flow_item_ipv6 *)item->mask;
9519d441c45SXiaoyun Wang 
9529d441c45SXiaoyun Wang 		/* Only support dst addresses,  others should be masked */
9539d441c45SXiaoyun Wang 		if (ipv6_mask->hdr.vtc_flow ||
9549d441c45SXiaoyun Wang 		    ipv6_mask->hdr.payload_len ||
9559d441c45SXiaoyun Wang 		    ipv6_mask->hdr.proto ||
9569d441c45SXiaoyun Wang 		    ipv6_mask->hdr.hop_limits) {
9579d441c45SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
9589d441c45SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM, item,
9599d441c45SXiaoyun Wang 				"Not supported by fdir filter, support dst ipv6");
9609d441c45SXiaoyun Wang 			return -rte_errno;
9619d441c45SXiaoyun Wang 		}
9629d441c45SXiaoyun Wang 
9639d441c45SXiaoyun Wang 		/* check ipv6 src addr mask, ipv6 src addr is 16 bytes */
9649d441c45SXiaoyun Wang 		for (i = 0; i < 16; i++) {
965*89b5642dSRobin Jarry 			if (ipv6_mask->hdr.src_addr.a[i] == UINT8_MAX) {
9669d441c45SXiaoyun Wang 				rte_flow_error_set(error, EINVAL,
9679d441c45SXiaoyun Wang 					RTE_FLOW_ERROR_TYPE_ITEM, item,
9689d441c45SXiaoyun Wang 					"Not supported by fdir filter, do not support src ipv6");
9699d441c45SXiaoyun Wang 				return -rte_errno;
9709d441c45SXiaoyun Wang 			}
9719d441c45SXiaoyun Wang 		}
9729d441c45SXiaoyun Wang 
9739d441c45SXiaoyun Wang 		if (!item->spec) {
9749d441c45SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
9759d441c45SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM, item,
9769d441c45SXiaoyun Wang 				"Not supported by fdir filter, ipv6 spec is NULL");
9779d441c45SXiaoyun Wang 			return -rte_errno;
9789d441c45SXiaoyun Wang 		}
9799d441c45SXiaoyun Wang 
9809d441c45SXiaoyun Wang 		for (i = 0; i < 16; i++) {
981*89b5642dSRobin Jarry 			if (ipv6_mask->hdr.dst_addr.a[i] == UINT8_MAX)
9829d441c45SXiaoyun Wang 				rule->mask.dst_ipv6_mask |= 1 << i;
9839d441c45SXiaoyun Wang 		}
9849d441c45SXiaoyun Wang 
9859d441c45SXiaoyun Wang 		ipv6_spec = (const struct rte_flow_item_ipv6 *)item->spec;
9869d441c45SXiaoyun Wang 		rte_memcpy(rule->hinic_fdir.dst_ipv6,
987*89b5642dSRobin Jarry 			   &ipv6_spec->hdr.dst_addr, 16);
9889d441c45SXiaoyun Wang 
9899d441c45SXiaoyun Wang 		/*
9909d441c45SXiaoyun Wang 		 * Check if the next not void item is TCP or UDP or ICMP.
9919d441c45SXiaoyun Wang 		 */
9929d441c45SXiaoyun Wang 		item = next_no_void_pattern(pattern, item);
9939d441c45SXiaoyun Wang 		if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
9949d441c45SXiaoyun Wang 		    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
9959d441c45SXiaoyun Wang 		    item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
9969d441c45SXiaoyun Wang 		    item->type != RTE_FLOW_ITEM_TYPE_ICMP6){
9979d441c45SXiaoyun Wang 			memset(rule, 0, sizeof(struct hinic_fdir_rule));
9989d441c45SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
9999d441c45SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM, item,
10009d441c45SXiaoyun Wang 				"Not supported by fdir filter, support tcp, udp, icmp");
10019d441c45SXiaoyun Wang 			return -rte_errno;
10029d441c45SXiaoyun Wang 		}
100373122b52SXiaoyun Wang 	}
100473122b52SXiaoyun Wang 
100573122b52SXiaoyun Wang 	*in_out_item = item;
100673122b52SXiaoyun Wang 	return 0;
100773122b52SXiaoyun Wang }
100873122b52SXiaoyun Wang 
100973122b52SXiaoyun Wang static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
10101fe89aa3SXiaoyun Wang 			__rte_unused const struct rte_flow_item pattern[],
10111fe89aa3SXiaoyun Wang 			__rte_unused struct hinic_fdir_rule *rule,
101273122b52SXiaoyun Wang 			struct rte_flow_error *error)
101373122b52SXiaoyun Wang {
101473122b52SXiaoyun Wang 	const struct rte_flow_item *item = *in_out_item;
101573122b52SXiaoyun Wang 
101673122b52SXiaoyun Wang 	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
101773122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
101873122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ITEM,
10191fe89aa3SXiaoyun Wang 			item, "Not supported by normal fdir filter, not support l4");
102073122b52SXiaoyun Wang 		return -rte_errno;
102173122b52SXiaoyun Wang 	}
102273122b52SXiaoyun Wang 
102373122b52SXiaoyun Wang 	return 0;
102473122b52SXiaoyun Wang }
102573122b52SXiaoyun Wang 
10261fe89aa3SXiaoyun Wang 
102773122b52SXiaoyun Wang static int hinic_normal_item_check_end(const struct rte_flow_item *item,
102873122b52SXiaoyun Wang 					struct hinic_fdir_rule *rule,
102973122b52SXiaoyun Wang 					struct rte_flow_error *error)
103073122b52SXiaoyun Wang {
103173122b52SXiaoyun Wang 	/* Check if the next not void item is END */
103273122b52SXiaoyun Wang 	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
103373122b52SXiaoyun Wang 		memset(rule, 0, sizeof(struct hinic_fdir_rule));
103473122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
103573122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ITEM,
103673122b52SXiaoyun Wang 			item, "Not supported by fdir filter, support end");
103773122b52SXiaoyun Wang 		return -rte_errno;
103873122b52SXiaoyun Wang 	}
103973122b52SXiaoyun Wang 
104073122b52SXiaoyun Wang 	return 0;
104173122b52SXiaoyun Wang }
104273122b52SXiaoyun Wang 
104373122b52SXiaoyun Wang static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
104473122b52SXiaoyun Wang 					const struct rte_flow_item pattern[],
104573122b52SXiaoyun Wang 					struct hinic_fdir_rule *rule,
104673122b52SXiaoyun Wang 					struct rte_flow_error *error)
104773122b52SXiaoyun Wang {
104873122b52SXiaoyun Wang 	if (hinic_normal_item_check_ether(&item, pattern, error) ||
104973122b52SXiaoyun Wang 	    hinic_normal_item_check_ip(&item, pattern, rule, error) ||
105073122b52SXiaoyun Wang 	    hinic_normal_item_check_l4(&item, pattern, rule, error) ||
105173122b52SXiaoyun Wang 	    hinic_normal_item_check_end(item, rule, error))
105273122b52SXiaoyun Wang 		return -rte_errno;
105373122b52SXiaoyun Wang 
105473122b52SXiaoyun Wang 	return 0;
105573122b52SXiaoyun Wang }
105673122b52SXiaoyun Wang 
10571fe89aa3SXiaoyun Wang static int
10581fe89aa3SXiaoyun Wang hinic_tcam_normal_item_check_l4(const struct rte_flow_item **in_out_item,
10591fe89aa3SXiaoyun Wang 				const struct rte_flow_item pattern[],
10601fe89aa3SXiaoyun Wang 				struct hinic_fdir_rule *rule,
10611fe89aa3SXiaoyun Wang 				struct rte_flow_error *error)
10621fe89aa3SXiaoyun Wang {
10631fe89aa3SXiaoyun Wang 	const struct rte_flow_item *item = *in_out_item;
10641fe89aa3SXiaoyun Wang 	const struct rte_flow_item_tcp *tcp_spec;
10651fe89aa3SXiaoyun Wang 	const struct rte_flow_item_tcp *tcp_mask;
10661fe89aa3SXiaoyun Wang 	const struct rte_flow_item_udp *udp_spec;
10671fe89aa3SXiaoyun Wang 	const struct rte_flow_item_udp *udp_mask;
10681fe89aa3SXiaoyun Wang 
10691fe89aa3SXiaoyun Wang 	if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
10701fe89aa3SXiaoyun Wang 		rule->mode = HINIC_FDIR_MODE_TCAM;
10711fe89aa3SXiaoyun Wang 		rule->mask.proto_mask = UINT16_MAX;
10721fe89aa3SXiaoyun Wang 		rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMP;
10739d441c45SXiaoyun Wang 	} else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP6) {
10749d441c45SXiaoyun Wang 		rule->mode = HINIC_FDIR_MODE_TCAM;
10759d441c45SXiaoyun Wang 		rule->mask.proto_mask = UINT16_MAX;
10769d441c45SXiaoyun Wang 		rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMPV6;
10771fe89aa3SXiaoyun Wang 	} else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
10781fe89aa3SXiaoyun Wang 		rule->mode = HINIC_FDIR_MODE_TCAM;
10791fe89aa3SXiaoyun Wang 	} else if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
10801fe89aa3SXiaoyun Wang 		if (!item->mask) {
10811fe89aa3SXiaoyun Wang 			(void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
10821fe89aa3SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
10831fe89aa3SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM,
10841fe89aa3SXiaoyun Wang 				item, "Not supported by fdir filter, support src, dst ports");
10851fe89aa3SXiaoyun Wang 			return -rte_errno;
10861fe89aa3SXiaoyun Wang 		}
10871fe89aa3SXiaoyun Wang 
10881fe89aa3SXiaoyun Wang 		tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
10891fe89aa3SXiaoyun Wang 
10901fe89aa3SXiaoyun Wang 		/*
10911fe89aa3SXiaoyun Wang 		 * Only support src & dst ports, tcp flags,
10921fe89aa3SXiaoyun Wang 		 * others should be masked.
10931fe89aa3SXiaoyun Wang 		 */
10941fe89aa3SXiaoyun Wang 		if (tcp_mask->hdr.sent_seq ||
10951fe89aa3SXiaoyun Wang 			tcp_mask->hdr.recv_ack ||
10961fe89aa3SXiaoyun Wang 			tcp_mask->hdr.data_off ||
10971fe89aa3SXiaoyun Wang 			tcp_mask->hdr.rx_win ||
10981fe89aa3SXiaoyun Wang 			tcp_mask->hdr.cksum ||
10991fe89aa3SXiaoyun Wang 			tcp_mask->hdr.tcp_urp) {
11001fe89aa3SXiaoyun Wang 			(void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
11011fe89aa3SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
11021fe89aa3SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM,
11031fe89aa3SXiaoyun Wang 				item, "Not supported by fdir normal tcam filter");
11041fe89aa3SXiaoyun Wang 			return -rte_errno;
11051fe89aa3SXiaoyun Wang 		}
11061fe89aa3SXiaoyun Wang 
11071fe89aa3SXiaoyun Wang 		rule->mode = HINIC_FDIR_MODE_TCAM;
11081fe89aa3SXiaoyun Wang 		rule->mask.proto_mask = UINT16_MAX;
11091fe89aa3SXiaoyun Wang 		rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
11101fe89aa3SXiaoyun Wang 		rule->mask.src_port_mask = tcp_mask->hdr.src_port;
11111fe89aa3SXiaoyun Wang 
11121fe89aa3SXiaoyun Wang 		rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
11131fe89aa3SXiaoyun Wang 		if (item->spec) {
11141fe89aa3SXiaoyun Wang 			tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
11151fe89aa3SXiaoyun Wang 			rule->hinic_fdir.dst_port = tcp_spec->hdr.dst_port;
11161fe89aa3SXiaoyun Wang 			rule->hinic_fdir.src_port = tcp_spec->hdr.src_port;
11171fe89aa3SXiaoyun Wang 		}
11181fe89aa3SXiaoyun Wang 	} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
11191fe89aa3SXiaoyun Wang 		/*
11201fe89aa3SXiaoyun Wang 		 * Only care about src & dst ports,
11211fe89aa3SXiaoyun Wang 		 * others should be masked.
11221fe89aa3SXiaoyun Wang 		 */
11231fe89aa3SXiaoyun Wang 		if (!item->mask) {
11241fe89aa3SXiaoyun Wang 			(void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
11251fe89aa3SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
11261fe89aa3SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM,
11271fe89aa3SXiaoyun Wang 				item, "Not supported by fdir filter, support src, dst ports");
11281fe89aa3SXiaoyun Wang 			return -rte_errno;
11291fe89aa3SXiaoyun Wang 		}
11301fe89aa3SXiaoyun Wang 
11311fe89aa3SXiaoyun Wang 		udp_mask = (const struct rte_flow_item_udp *)item->mask;
11321fe89aa3SXiaoyun Wang 		if (udp_mask->hdr.dgram_len ||
11331fe89aa3SXiaoyun Wang 			udp_mask->hdr.dgram_cksum) {
11341fe89aa3SXiaoyun Wang 			(void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
11351fe89aa3SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
11361fe89aa3SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM,
11371fe89aa3SXiaoyun Wang 				item, "Not supported by fdir filter, support udp");
11381fe89aa3SXiaoyun Wang 			return -rte_errno;
11391fe89aa3SXiaoyun Wang 		}
11401fe89aa3SXiaoyun Wang 
11411fe89aa3SXiaoyun Wang 		rule->mode = HINIC_FDIR_MODE_TCAM;
11421fe89aa3SXiaoyun Wang 		rule->mask.proto_mask = UINT16_MAX;
11431fe89aa3SXiaoyun Wang 		rule->mask.src_port_mask = udp_mask->hdr.src_port;
11441fe89aa3SXiaoyun Wang 		rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
11451fe89aa3SXiaoyun Wang 
11461fe89aa3SXiaoyun Wang 		rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
11471fe89aa3SXiaoyun Wang 		if (item->spec) {
11481fe89aa3SXiaoyun Wang 			udp_spec = (const struct rte_flow_item_udp *)item->spec;
11491fe89aa3SXiaoyun Wang 			rule->hinic_fdir.src_port = udp_spec->hdr.src_port;
11501fe89aa3SXiaoyun Wang 			rule->hinic_fdir.dst_port = udp_spec->hdr.dst_port;
11511fe89aa3SXiaoyun Wang 		}
11521fe89aa3SXiaoyun Wang 	} else {
11531fe89aa3SXiaoyun Wang 		(void)memset(rule,  0, sizeof(struct hinic_fdir_rule));
11541fe89aa3SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
11551fe89aa3SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM,
11561fe89aa3SXiaoyun Wang 				item, "Not supported by fdir filter tcam normal, l4 only support icmp, tcp");
11571fe89aa3SXiaoyun Wang 		return -rte_errno;
11581fe89aa3SXiaoyun Wang 	}
11591fe89aa3SXiaoyun Wang 
11601fe89aa3SXiaoyun Wang 	item = next_no_void_pattern(pattern, item);
11611fe89aa3SXiaoyun Wang 	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
11621fe89aa3SXiaoyun Wang 		(void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
11631fe89aa3SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
11641fe89aa3SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ITEM,
11651fe89aa3SXiaoyun Wang 			item, "Not supported by fdir filter tcam normal, support end");
11661fe89aa3SXiaoyun Wang 		return -rte_errno;
11671fe89aa3SXiaoyun Wang 	}
11681fe89aa3SXiaoyun Wang 
11691fe89aa3SXiaoyun Wang 	/* get next no void item */
11701fe89aa3SXiaoyun Wang 	*in_out_item = item;
11711fe89aa3SXiaoyun Wang 
11721fe89aa3SXiaoyun Wang 	return 0;
11731fe89aa3SXiaoyun Wang }
11741fe89aa3SXiaoyun Wang 
11751fe89aa3SXiaoyun Wang static int hinic_check_tcam_normal_item_ele(const struct rte_flow_item *item,
11761fe89aa3SXiaoyun Wang 					const struct rte_flow_item pattern[],
11771fe89aa3SXiaoyun Wang 					struct hinic_fdir_rule *rule,
11781fe89aa3SXiaoyun Wang 					struct rte_flow_error *error)
11791fe89aa3SXiaoyun Wang {
11801fe89aa3SXiaoyun Wang 	if (hinic_normal_item_check_ether(&item, pattern, error) ||
11811fe89aa3SXiaoyun Wang 		hinic_normal_item_check_ip(&item, pattern, rule, error) ||
11821fe89aa3SXiaoyun Wang 		hinic_tcam_normal_item_check_l4(&item, pattern, rule, error) ||
11831fe89aa3SXiaoyun Wang 		hinic_normal_item_check_end(item, rule, error))
11841fe89aa3SXiaoyun Wang 		return -rte_errno;
11851fe89aa3SXiaoyun Wang 
11861fe89aa3SXiaoyun Wang 	return 0;
11871fe89aa3SXiaoyun Wang }
11881fe89aa3SXiaoyun Wang 
11891fe89aa3SXiaoyun Wang static int hinic_tunnel_item_check_l4(const struct rte_flow_item **in_out_item,
11901fe89aa3SXiaoyun Wang 					const struct rte_flow_item pattern[],
11911fe89aa3SXiaoyun Wang 					struct hinic_fdir_rule *rule,
11921fe89aa3SXiaoyun Wang 					struct rte_flow_error *error)
11931fe89aa3SXiaoyun Wang {
11941fe89aa3SXiaoyun Wang 	const struct rte_flow_item *item = *in_out_item;
11951fe89aa3SXiaoyun Wang 
11961fe89aa3SXiaoyun Wang 	if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
11971fe89aa3SXiaoyun Wang 		item = next_no_void_pattern(pattern, item);
11981fe89aa3SXiaoyun Wang 		if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
11991fe89aa3SXiaoyun Wang 			(void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
12001fe89aa3SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
12011fe89aa3SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM,
12021fe89aa3SXiaoyun Wang 				item, "Not supported by fdir filter, support vxlan");
12031fe89aa3SXiaoyun Wang 			return -rte_errno;
12041fe89aa3SXiaoyun Wang 		}
12051fe89aa3SXiaoyun Wang 
12061fe89aa3SXiaoyun Wang 		*in_out_item = item;
12071fe89aa3SXiaoyun Wang 	} else {
12081fe89aa3SXiaoyun Wang 		(void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
12091fe89aa3SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
12101fe89aa3SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM,
12111fe89aa3SXiaoyun Wang 				item, "Not supported by fdir filter tcam tunnel, outer l4 only support udp");
12121fe89aa3SXiaoyun Wang 		return -rte_errno;
12131fe89aa3SXiaoyun Wang 	}
12141fe89aa3SXiaoyun Wang 
12151fe89aa3SXiaoyun Wang 	return 0;
12161fe89aa3SXiaoyun Wang }
12171fe89aa3SXiaoyun Wang 
12181fe89aa3SXiaoyun Wang static int
12191fe89aa3SXiaoyun Wang hinic_tunnel_item_check_vxlan(const struct rte_flow_item **in_out_item,
12201fe89aa3SXiaoyun Wang 				const struct rte_flow_item pattern[],
12211fe89aa3SXiaoyun Wang 				struct hinic_fdir_rule *rule,
12221fe89aa3SXiaoyun Wang 				struct rte_flow_error *error)
12231fe89aa3SXiaoyun Wang {
12241fe89aa3SXiaoyun Wang 	const struct rte_flow_item *item = *in_out_item;
12251fe89aa3SXiaoyun Wang 
12261fe89aa3SXiaoyun Wang 
12271fe89aa3SXiaoyun Wang 	if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
12281fe89aa3SXiaoyun Wang 		item = next_no_void_pattern(pattern, item);
12291fe89aa3SXiaoyun Wang 		if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
12301fe89aa3SXiaoyun Wang 		    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
12311fe89aa3SXiaoyun Wang 		    item->type != RTE_FLOW_ITEM_TYPE_ANY) {
12321fe89aa3SXiaoyun Wang 			(void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
12331fe89aa3SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
12341fe89aa3SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM,
12351fe89aa3SXiaoyun Wang 				item, "Not supported by fdir filter, support tcp/udp");
12361fe89aa3SXiaoyun Wang 			return -rte_errno;
12371fe89aa3SXiaoyun Wang 		}
12381fe89aa3SXiaoyun Wang 
12391fe89aa3SXiaoyun Wang 		*in_out_item = item;
12401fe89aa3SXiaoyun Wang 	}
12411fe89aa3SXiaoyun Wang 
12421fe89aa3SXiaoyun Wang 	return 0;
12431fe89aa3SXiaoyun Wang }
12441fe89aa3SXiaoyun Wang 
12451fe89aa3SXiaoyun Wang static int
12461fe89aa3SXiaoyun Wang hinic_tunnel_inner_item_check_l4(const struct rte_flow_item **in_out_item,
12471fe89aa3SXiaoyun Wang 				const struct rte_flow_item pattern[],
12481fe89aa3SXiaoyun Wang 				struct hinic_fdir_rule *rule,
12491fe89aa3SXiaoyun Wang 				struct rte_flow_error *error)
12501fe89aa3SXiaoyun Wang {
12511fe89aa3SXiaoyun Wang 	const struct rte_flow_item_tcp *tcp_spec;
12521fe89aa3SXiaoyun Wang 	const struct rte_flow_item_tcp *tcp_mask;
12531fe89aa3SXiaoyun Wang 	const struct rte_flow_item_udp *udp_spec;
12541fe89aa3SXiaoyun Wang 	const struct rte_flow_item_udp *udp_mask;
12551fe89aa3SXiaoyun Wang 	const struct rte_flow_item *item = *in_out_item;
12561fe89aa3SXiaoyun Wang 
12571fe89aa3SXiaoyun Wang 	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
12581fe89aa3SXiaoyun Wang 		/* Not supported last point for range */
12591fe89aa3SXiaoyun Wang 		if (item->last) {
12601fe89aa3SXiaoyun Wang 			memset(rule, 0, sizeof(struct hinic_fdir_rule));
12611fe89aa3SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
12621fe89aa3SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12631fe89aa3SXiaoyun Wang 				item, "Not supported last point for range");
12641fe89aa3SXiaoyun Wang 			return -rte_errno;
12651fe89aa3SXiaoyun Wang 		}
12661fe89aa3SXiaoyun Wang 
12671fe89aa3SXiaoyun Wang 		/* get the TCP/UDP info */
12681fe89aa3SXiaoyun Wang 		if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
12691fe89aa3SXiaoyun Wang 			/*
12701fe89aa3SXiaoyun Wang 			 * Only care about src & dst ports,
12711fe89aa3SXiaoyun Wang 			 * others should be masked.
12721fe89aa3SXiaoyun Wang 			 */
12731fe89aa3SXiaoyun Wang 			if (!item->mask) {
12741fe89aa3SXiaoyun Wang 				memset(rule, 0, sizeof(struct hinic_fdir_rule));
12751fe89aa3SXiaoyun Wang 				rte_flow_error_set(error, EINVAL,
12761fe89aa3SXiaoyun Wang 					RTE_FLOW_ERROR_TYPE_ITEM,
12771fe89aa3SXiaoyun Wang 					item, "Not supported by fdir filter, support src, dst ports");
12781fe89aa3SXiaoyun Wang 				return -rte_errno;
12791fe89aa3SXiaoyun Wang 			}
12801fe89aa3SXiaoyun Wang 
12811fe89aa3SXiaoyun Wang 			tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
12821fe89aa3SXiaoyun Wang 			if (tcp_mask->hdr.sent_seq ||
12831fe89aa3SXiaoyun Wang 				tcp_mask->hdr.recv_ack ||
12841fe89aa3SXiaoyun Wang 				tcp_mask->hdr.data_off ||
12851fe89aa3SXiaoyun Wang 				tcp_mask->hdr.tcp_flags ||
12861fe89aa3SXiaoyun Wang 				tcp_mask->hdr.rx_win ||
12871fe89aa3SXiaoyun Wang 				tcp_mask->hdr.cksum ||
12881fe89aa3SXiaoyun Wang 				tcp_mask->hdr.tcp_urp) {
12891fe89aa3SXiaoyun Wang 				(void)memset(rule, 0,
12901fe89aa3SXiaoyun Wang 					sizeof(struct hinic_fdir_rule));
12911fe89aa3SXiaoyun Wang 				rte_flow_error_set(error, EINVAL,
12921fe89aa3SXiaoyun Wang 					RTE_FLOW_ERROR_TYPE_ITEM,
12931fe89aa3SXiaoyun Wang 					item, "Not supported by fdir filter, support tcp");
12941fe89aa3SXiaoyun Wang 				return -rte_errno;
12951fe89aa3SXiaoyun Wang 			}
12961fe89aa3SXiaoyun Wang 
12971fe89aa3SXiaoyun Wang 			rule->mode = HINIC_FDIR_MODE_TCAM;
12981fe89aa3SXiaoyun Wang 			rule->mask.tunnel_flag = UINT16_MAX;
12991fe89aa3SXiaoyun Wang 			rule->mask.tunnel_inner_src_port_mask =
13001fe89aa3SXiaoyun Wang 							tcp_mask->hdr.src_port;
13011fe89aa3SXiaoyun Wang 			rule->mask.tunnel_inner_dst_port_mask =
13021fe89aa3SXiaoyun Wang 							tcp_mask->hdr.dst_port;
13031fe89aa3SXiaoyun Wang 			rule->mask.proto_mask = UINT16_MAX;
13041fe89aa3SXiaoyun Wang 
13051fe89aa3SXiaoyun Wang 			rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
13061fe89aa3SXiaoyun Wang 			if (item->spec) {
13071fe89aa3SXiaoyun Wang 				tcp_spec =
13081fe89aa3SXiaoyun Wang 				(const struct rte_flow_item_tcp *)item->spec;
13091fe89aa3SXiaoyun Wang 				rule->hinic_fdir.tunnel_inner_src_port =
13101fe89aa3SXiaoyun Wang 							tcp_spec->hdr.src_port;
13111fe89aa3SXiaoyun Wang 				rule->hinic_fdir.tunnel_inner_dst_port =
13121fe89aa3SXiaoyun Wang 							tcp_spec->hdr.dst_port;
13131fe89aa3SXiaoyun Wang 			}
13141fe89aa3SXiaoyun Wang 		} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
13151fe89aa3SXiaoyun Wang 			/*
13161fe89aa3SXiaoyun Wang 			 * Only care about src & dst ports,
13171fe89aa3SXiaoyun Wang 			 * others should be masked.
13181fe89aa3SXiaoyun Wang 			 */
13191fe89aa3SXiaoyun Wang 			if (!item->mask) {
13201fe89aa3SXiaoyun Wang 				memset(rule, 0, sizeof(struct hinic_fdir_rule));
13211fe89aa3SXiaoyun Wang 				rte_flow_error_set(error, EINVAL,
13221fe89aa3SXiaoyun Wang 					RTE_FLOW_ERROR_TYPE_ITEM,
13231fe89aa3SXiaoyun Wang 					item, "Not supported by fdir filter, support src, dst ports");
13241fe89aa3SXiaoyun Wang 				return -rte_errno;
13251fe89aa3SXiaoyun Wang 			}
13261fe89aa3SXiaoyun Wang 
13271fe89aa3SXiaoyun Wang 			udp_mask = (const struct rte_flow_item_udp *)item->mask;
13281fe89aa3SXiaoyun Wang 			if (udp_mask->hdr.dgram_len ||
13291fe89aa3SXiaoyun Wang 			    udp_mask->hdr.dgram_cksum) {
13301fe89aa3SXiaoyun Wang 				memset(rule, 0, sizeof(struct hinic_fdir_rule));
13311fe89aa3SXiaoyun Wang 				rte_flow_error_set(error, EINVAL,
13321fe89aa3SXiaoyun Wang 					RTE_FLOW_ERROR_TYPE_ITEM,
13331fe89aa3SXiaoyun Wang 					item, "Not supported by fdir filter, support udp");
13341fe89aa3SXiaoyun Wang 				return -rte_errno;
13351fe89aa3SXiaoyun Wang 			}
13361fe89aa3SXiaoyun Wang 
13371fe89aa3SXiaoyun Wang 			rule->mode = HINIC_FDIR_MODE_TCAM;
13381fe89aa3SXiaoyun Wang 			rule->mask.tunnel_flag = UINT16_MAX;
13391fe89aa3SXiaoyun Wang 			rule->mask.tunnel_inner_src_port_mask =
13401fe89aa3SXiaoyun Wang 							udp_mask->hdr.src_port;
13411fe89aa3SXiaoyun Wang 			rule->mask.tunnel_inner_dst_port_mask =
13421fe89aa3SXiaoyun Wang 							udp_mask->hdr.dst_port;
13431fe89aa3SXiaoyun Wang 			rule->mask.proto_mask = UINT16_MAX;
13441fe89aa3SXiaoyun Wang 
13451fe89aa3SXiaoyun Wang 			rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
13461fe89aa3SXiaoyun Wang 			if (item->spec) {
13471fe89aa3SXiaoyun Wang 				udp_spec =
13481fe89aa3SXiaoyun Wang 				(const struct rte_flow_item_udp *)item->spec;
13491fe89aa3SXiaoyun Wang 				rule->hinic_fdir.tunnel_inner_src_port =
13501fe89aa3SXiaoyun Wang 							udp_spec->hdr.src_port;
13511fe89aa3SXiaoyun Wang 				rule->hinic_fdir.tunnel_inner_dst_port =
13521fe89aa3SXiaoyun Wang 							udp_spec->hdr.dst_port;
13531fe89aa3SXiaoyun Wang 			}
13541fe89aa3SXiaoyun Wang 		} else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
13551fe89aa3SXiaoyun Wang 			rule->mode = HINIC_FDIR_MODE_TCAM;
13561fe89aa3SXiaoyun Wang 			rule->mask.tunnel_flag = UINT16_MAX;
13571fe89aa3SXiaoyun Wang 		} else {
13581fe89aa3SXiaoyun Wang 			memset(rule, 0, sizeof(struct hinic_fdir_rule));
13591fe89aa3SXiaoyun Wang 			rte_flow_error_set(error, EINVAL,
13601fe89aa3SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_ITEM,
13611fe89aa3SXiaoyun Wang 				item, "Not supported by fdir filter, support tcp/udp");
13621fe89aa3SXiaoyun Wang 			return -rte_errno;
13631fe89aa3SXiaoyun Wang 		}
13641fe89aa3SXiaoyun Wang 
13651fe89aa3SXiaoyun Wang 		/* get next no void item */
13661fe89aa3SXiaoyun Wang 		*in_out_item = next_no_void_pattern(pattern, item);
13671fe89aa3SXiaoyun Wang 	}
13681fe89aa3SXiaoyun Wang 
13691fe89aa3SXiaoyun Wang 	return 0;
13701fe89aa3SXiaoyun Wang }
13711fe89aa3SXiaoyun Wang 
13721fe89aa3SXiaoyun Wang static int hinic_check_tcam_tunnel_item_ele(const struct rte_flow_item *item,
13731fe89aa3SXiaoyun Wang 					const struct rte_flow_item pattern[],
13741fe89aa3SXiaoyun Wang 					struct hinic_fdir_rule *rule,
13751fe89aa3SXiaoyun Wang 					struct rte_flow_error *error)
13761fe89aa3SXiaoyun Wang {
13771fe89aa3SXiaoyun Wang 	if (hinic_normal_item_check_ether(&item, pattern, error) ||
13781fe89aa3SXiaoyun Wang 		hinic_normal_item_check_ip(&item, pattern, rule, error) ||
13791fe89aa3SXiaoyun Wang 		hinic_tunnel_item_check_l4(&item, pattern, rule, error) ||
13801fe89aa3SXiaoyun Wang 		hinic_tunnel_item_check_vxlan(&item, pattern, rule, error) ||
13811fe89aa3SXiaoyun Wang 		hinic_tunnel_inner_item_check_l4(&item, pattern, rule, error) ||
13821fe89aa3SXiaoyun Wang 		hinic_normal_item_check_end(item, rule, error))
13831fe89aa3SXiaoyun Wang 		return -rte_errno;
13841fe89aa3SXiaoyun Wang 
13851fe89aa3SXiaoyun Wang 	return 0;
13861fe89aa3SXiaoyun Wang }
13871fe89aa3SXiaoyun Wang 
138873122b52SXiaoyun Wang static int hinic_check_normal_attr_ele(const struct rte_flow_attr *attr,
138973122b52SXiaoyun Wang 					struct hinic_fdir_rule *rule,
139073122b52SXiaoyun Wang 					struct rte_flow_error *error)
139173122b52SXiaoyun Wang {
139273122b52SXiaoyun Wang 	/* Must be input direction */
139373122b52SXiaoyun Wang 	if (!attr->ingress) {
139473122b52SXiaoyun Wang 		memset(rule, 0, sizeof(struct hinic_fdir_rule));
139573122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
139673122b52SXiaoyun Wang 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
139773122b52SXiaoyun Wang 				   attr, "Only support ingress.");
139873122b52SXiaoyun Wang 		return -rte_errno;
139973122b52SXiaoyun Wang 	}
140073122b52SXiaoyun Wang 
140173122b52SXiaoyun Wang 	/* Not supported */
140273122b52SXiaoyun Wang 	if (attr->egress) {
140373122b52SXiaoyun Wang 		memset(rule, 0, sizeof(struct hinic_fdir_rule));
140473122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
140573122b52SXiaoyun Wang 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
140673122b52SXiaoyun Wang 				   attr, "Not support egress.");
140773122b52SXiaoyun Wang 		return -rte_errno;
140873122b52SXiaoyun Wang 	}
140973122b52SXiaoyun Wang 
141073122b52SXiaoyun Wang 	/* Not supported */
141173122b52SXiaoyun Wang 	if (attr->priority) {
141273122b52SXiaoyun Wang 		memset(rule, 0, sizeof(struct hinic_fdir_rule));
141373122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
141473122b52SXiaoyun Wang 			RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
141573122b52SXiaoyun Wang 			attr, "Not support priority.");
141673122b52SXiaoyun Wang 		return -rte_errno;
141773122b52SXiaoyun Wang 	}
141873122b52SXiaoyun Wang 
141973122b52SXiaoyun Wang 	return 0;
142073122b52SXiaoyun Wang }
142173122b52SXiaoyun Wang 
142273122b52SXiaoyun Wang static int hinic_check_normal_act_ele(const struct rte_flow_item *item,
142373122b52SXiaoyun Wang 				const struct rte_flow_action actions[],
142473122b52SXiaoyun Wang 				struct hinic_fdir_rule *rule,
142573122b52SXiaoyun Wang 				struct rte_flow_error *error)
142673122b52SXiaoyun Wang {
142773122b52SXiaoyun Wang 	const struct rte_flow_action *act;
142873122b52SXiaoyun Wang 
142973122b52SXiaoyun Wang 	/* Check if the first not void action is QUEUE */
143073122b52SXiaoyun Wang 	act = next_no_void_action(actions, NULL);
143173122b52SXiaoyun Wang 	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
143273122b52SXiaoyun Wang 		memset(rule, 0, sizeof(struct hinic_fdir_rule));
143373122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
143473122b52SXiaoyun Wang 			item, "Not supported action.");
143573122b52SXiaoyun Wang 		return -rte_errno;
143673122b52SXiaoyun Wang 	}
143773122b52SXiaoyun Wang 
143873122b52SXiaoyun Wang 	rule->queue = ((const struct rte_flow_action_queue *)act->conf)->index;
143973122b52SXiaoyun Wang 
144073122b52SXiaoyun Wang 	/* Check if the next not void item is END */
144173122b52SXiaoyun Wang 	act = next_no_void_action(actions, act);
144273122b52SXiaoyun Wang 	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
144373122b52SXiaoyun Wang 		memset(rule, 0, sizeof(struct hinic_fdir_rule));
144473122b52SXiaoyun Wang 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
144573122b52SXiaoyun Wang 			act, "Not supported action.");
144673122b52SXiaoyun Wang 		return -rte_errno;
144773122b52SXiaoyun Wang 	}
144873122b52SXiaoyun Wang 
144973122b52SXiaoyun Wang 	return 0;
145073122b52SXiaoyun Wang }
145173122b52SXiaoyun Wang 
145273122b52SXiaoyun Wang /**
145373122b52SXiaoyun Wang  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
145473122b52SXiaoyun Wang  * And get the flow director filter info BTW.
145573122b52SXiaoyun Wang  * UDP/TCP/SCTP PATTERN:
145673122b52SXiaoyun Wang  * The first not void item can be ETH or IPV4 or IPV6
145773122b52SXiaoyun Wang  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
145873122b52SXiaoyun Wang  * The next not void item could be UDP or TCP(optional)
145973122b52SXiaoyun Wang  * The next not void item must be END.
146073122b52SXiaoyun Wang  * ACTION:
146173122b52SXiaoyun Wang  * The first not void action should be QUEUE.
146273122b52SXiaoyun Wang  * The second not void optional action should be MARK,
146373122b52SXiaoyun Wang  * mark_id is a uint32_t number.
146473122b52SXiaoyun Wang  * The next not void action should be END.
146573122b52SXiaoyun Wang  * UDP/TCP pattern example:
146673122b52SXiaoyun Wang  * ITEM          Spec	                                    Mask
146773122b52SXiaoyun Wang  * ETH            NULL                                    NULL
146873122b52SXiaoyun Wang  * IPV4           src_addr  1.2.3.6                 0xFFFFFFFF
146973122b52SXiaoyun Wang  *                   dst_addr  1.2.3.5                 0xFFFFFFFF
147073122b52SXiaoyun Wang  * UDP/TCP    src_port  80                         0xFFFF
147173122b52SXiaoyun Wang  *                   dst_port  80                         0xFFFF
147273122b52SXiaoyun Wang  * END
147373122b52SXiaoyun Wang  * Other members in mask and spec should set to 0x00.
147473122b52SXiaoyun Wang  * Item->last should be NULL.
147573122b52SXiaoyun Wang  */
147673122b52SXiaoyun Wang static int
147773122b52SXiaoyun Wang hinic_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
147873122b52SXiaoyun Wang 			       const struct rte_flow_item pattern[],
147973122b52SXiaoyun Wang 			       const struct rte_flow_action actions[],
148073122b52SXiaoyun Wang 			       struct hinic_fdir_rule *rule,
148173122b52SXiaoyun Wang 			       struct rte_flow_error *error)
148273122b52SXiaoyun Wang {
148373122b52SXiaoyun Wang 	const struct rte_flow_item *item = NULL;
148473122b52SXiaoyun Wang 
148573122b52SXiaoyun Wang 	if (hinic_check_filter_arg(attr, pattern, actions, error))
148673122b52SXiaoyun Wang 		return -rte_errno;
148773122b52SXiaoyun Wang 
148873122b52SXiaoyun Wang 	if (hinic_check_normal_item_ele(item, pattern, rule, error))
148973122b52SXiaoyun Wang 		return -rte_errno;
149073122b52SXiaoyun Wang 
149173122b52SXiaoyun Wang 	if (hinic_check_normal_attr_ele(attr, rule, error))
149273122b52SXiaoyun Wang 		return -rte_errno;
149373122b52SXiaoyun Wang 
149473122b52SXiaoyun Wang 	if (hinic_check_normal_act_ele(item, actions, rule, error))
149573122b52SXiaoyun Wang 		return -rte_errno;
149673122b52SXiaoyun Wang 
149773122b52SXiaoyun Wang 	return 0;
149873122b52SXiaoyun Wang }
149973122b52SXiaoyun Wang 
15001fe89aa3SXiaoyun Wang /**
15011fe89aa3SXiaoyun Wang  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
15021fe89aa3SXiaoyun Wang  * And get the flow director filter info BTW.
15031fe89aa3SXiaoyun Wang  * UDP/TCP/SCTP PATTERN:
15041fe89aa3SXiaoyun Wang  * The first not void item can be ETH or IPV4 or IPV6
15051fe89aa3SXiaoyun Wang  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
15061fe89aa3SXiaoyun Wang  * The next not void item can be ANY/TCP/UDP
15071fe89aa3SXiaoyun Wang  * ACTION:
15081fe89aa3SXiaoyun Wang  * The first not void action should be QUEUE.
15091fe89aa3SXiaoyun Wang  * The second not void optional action should be MARK,
15101fe89aa3SXiaoyun Wang  * mark_id is a uint32_t number.
15111fe89aa3SXiaoyun Wang  * The next not void action should be END.
15121fe89aa3SXiaoyun Wang  * UDP/TCP pattern example:
15131fe89aa3SXiaoyun Wang  * ITEM                 Spec	                       Mask
15141fe89aa3SXiaoyun Wang  * ETH            NULL                                 NULL
15151fe89aa3SXiaoyun Wang  * IPV4           src_addr  1.2.3.6                 0xFFFFFFFF
15161fe89aa3SXiaoyun Wang  *                dst_addr  1.2.3.5                 0xFFFFFFFF
15171fe89aa3SXiaoyun Wang  * UDP/TCP        src_port  80                      0xFFFF
15181fe89aa3SXiaoyun Wang  *                dst_port  80                      0xFFFF
15191fe89aa3SXiaoyun Wang  * END
15201fe89aa3SXiaoyun Wang  * Other members in mask and spec should set to 0x00.
15211fe89aa3SXiaoyun Wang  * Item->last should be NULL.
15221fe89aa3SXiaoyun Wang  */
152373122b52SXiaoyun Wang static int
15241fe89aa3SXiaoyun Wang hinic_parse_fdir_filter_tcam_normal(const struct rte_flow_attr *attr,
15251fe89aa3SXiaoyun Wang 			       const struct rte_flow_item pattern[],
15261fe89aa3SXiaoyun Wang 			       const struct rte_flow_action actions[],
15271fe89aa3SXiaoyun Wang 			       struct hinic_fdir_rule *rule,
15281fe89aa3SXiaoyun Wang 			       struct rte_flow_error *error)
15291fe89aa3SXiaoyun Wang {
15301fe89aa3SXiaoyun Wang 	const struct rte_flow_item *item = NULL;
15311fe89aa3SXiaoyun Wang 
15321fe89aa3SXiaoyun Wang 	if (hinic_check_filter_arg(attr, pattern, actions, error))
15331fe89aa3SXiaoyun Wang 		return -rte_errno;
15341fe89aa3SXiaoyun Wang 
15351fe89aa3SXiaoyun Wang 	if (hinic_check_tcam_normal_item_ele(item, pattern, rule, error))
15361fe89aa3SXiaoyun Wang 		return -rte_errno;
15371fe89aa3SXiaoyun Wang 
15381fe89aa3SXiaoyun Wang 	if (hinic_check_normal_attr_ele(attr, rule, error))
15391fe89aa3SXiaoyun Wang 		return -rte_errno;
15401fe89aa3SXiaoyun Wang 
15411fe89aa3SXiaoyun Wang 	if (hinic_check_normal_act_ele(item, actions, rule, error))
15421fe89aa3SXiaoyun Wang 		return -rte_errno;
15431fe89aa3SXiaoyun Wang 
15441fe89aa3SXiaoyun Wang 	return 0;
15451fe89aa3SXiaoyun Wang }
15461fe89aa3SXiaoyun Wang 
15471fe89aa3SXiaoyun Wang /**
15481fe89aa3SXiaoyun Wang  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
15491fe89aa3SXiaoyun Wang  * And get the flow director filter info BTW.
15501fe89aa3SXiaoyun Wang  * UDP/TCP/SCTP PATTERN:
15511fe89aa3SXiaoyun Wang  * The first not void item can be ETH or IPV4 or IPV6
15521fe89aa3SXiaoyun Wang  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
15531fe89aa3SXiaoyun Wang  * The next not void item must be UDP
15541fe89aa3SXiaoyun Wang  * The next not void item must be VXLAN(optional)
15551fe89aa3SXiaoyun Wang  * The first not void item can be ETH or IPV4 or IPV6
15561fe89aa3SXiaoyun Wang  * The next not void item could be ANY or UDP or TCP(optional)
15571fe89aa3SXiaoyun Wang  * The next not void item must be END.
15581fe89aa3SXiaoyun Wang  * ACTION:
15591fe89aa3SXiaoyun Wang  * The first not void action should be QUEUE.
15601fe89aa3SXiaoyun Wang  * The second not void optional action should be MARK,
15611fe89aa3SXiaoyun Wang  * mark_id is a uint32_t number.
15621fe89aa3SXiaoyun Wang  * The next not void action should be END.
15631fe89aa3SXiaoyun Wang  * UDP/TCP pattern example:
15641fe89aa3SXiaoyun Wang  * ITEM             Spec	                    Mask
15651fe89aa3SXiaoyun Wang  * ETH            NULL                              NULL
15661fe89aa3SXiaoyun Wang  * IPV4        src_addr  1.2.3.6                 0xFFFFFFFF
15671fe89aa3SXiaoyun Wang  *             dst_addr  1.2.3.5                 0xFFFFFFFF
15681fe89aa3SXiaoyun Wang  * UDP            NULL                              NULL
15691fe89aa3SXiaoyun Wang  * VXLAN          NULL                              NULL
15701fe89aa3SXiaoyun Wang  * UDP/TCP     src_port  80                      0xFFFF
15711fe89aa3SXiaoyun Wang  *             dst_port  80                      0xFFFF
15721fe89aa3SXiaoyun Wang  * END
15731fe89aa3SXiaoyun Wang  * Other members in mask and spec should set to 0x00.
15741fe89aa3SXiaoyun Wang  * Item->last should be NULL.
15751fe89aa3SXiaoyun Wang  */
15761fe89aa3SXiaoyun Wang static int
15771fe89aa3SXiaoyun Wang hinic_parse_fdir_filter_tacm_tunnel(const struct rte_flow_attr *attr,
15781fe89aa3SXiaoyun Wang 			       const struct rte_flow_item pattern[],
15791fe89aa3SXiaoyun Wang 			       const struct rte_flow_action actions[],
15801fe89aa3SXiaoyun Wang 			       struct hinic_fdir_rule *rule,
15811fe89aa3SXiaoyun Wang 			       struct rte_flow_error *error)
15821fe89aa3SXiaoyun Wang {
15831fe89aa3SXiaoyun Wang 	const struct rte_flow_item *item = NULL;
15841fe89aa3SXiaoyun Wang 
15851fe89aa3SXiaoyun Wang 	if (hinic_check_filter_arg(attr, pattern, actions, error))
15861fe89aa3SXiaoyun Wang 		return -rte_errno;
15871fe89aa3SXiaoyun Wang 
15881fe89aa3SXiaoyun Wang 	if (hinic_check_tcam_tunnel_item_ele(item, pattern, rule, error))
15891fe89aa3SXiaoyun Wang 		return -rte_errno;
15901fe89aa3SXiaoyun Wang 
15911fe89aa3SXiaoyun Wang 	if (hinic_check_normal_attr_ele(attr, rule, error))
15921fe89aa3SXiaoyun Wang 		return -rte_errno;
15931fe89aa3SXiaoyun Wang 
15941fe89aa3SXiaoyun Wang 	if (hinic_check_normal_act_ele(item, actions, rule, error))
15951fe89aa3SXiaoyun Wang 		return -rte_errno;
15961fe89aa3SXiaoyun Wang 
15971fe89aa3SXiaoyun Wang 	return 0;
15981fe89aa3SXiaoyun Wang }
15991fe89aa3SXiaoyun Wang 
16001fe89aa3SXiaoyun Wang static int hinic_parse_fdir_filter(struct rte_eth_dev *dev,
160173122b52SXiaoyun Wang 			const struct rte_flow_attr *attr,
160273122b52SXiaoyun Wang 			const struct rte_flow_item pattern[],
160373122b52SXiaoyun Wang 			const struct rte_flow_action actions[],
160473122b52SXiaoyun Wang 			struct hinic_fdir_rule *rule,
160573122b52SXiaoyun Wang 			struct rte_flow_error *error)
160673122b52SXiaoyun Wang {
160773122b52SXiaoyun Wang 	int ret;
160873122b52SXiaoyun Wang 
16091fe89aa3SXiaoyun Wang 	ret = hinic_parse_fdir_filter_normal(attr, pattern, actions,
16101fe89aa3SXiaoyun Wang 						rule, error);
16111fe89aa3SXiaoyun Wang 	if (!ret)
16121fe89aa3SXiaoyun Wang 		goto step_next;
16131fe89aa3SXiaoyun Wang 
16141fe89aa3SXiaoyun Wang 	ret = hinic_parse_fdir_filter_tcam_normal(attr, pattern, actions,
16151fe89aa3SXiaoyun Wang 						rule, error);
16161fe89aa3SXiaoyun Wang 	if (!ret)
16171fe89aa3SXiaoyun Wang 		goto step_next;
16181fe89aa3SXiaoyun Wang 
16191fe89aa3SXiaoyun Wang 	ret = hinic_parse_fdir_filter_tacm_tunnel(attr, pattern, actions,
16201fe89aa3SXiaoyun Wang 						rule, error);
162173122b52SXiaoyun Wang 	if (ret)
162273122b52SXiaoyun Wang 		return ret;
162373122b52SXiaoyun Wang 
16241fe89aa3SXiaoyun Wang step_next:
162573122b52SXiaoyun Wang 	if (rule->queue >= dev->data->nb_rx_queues)
162673122b52SXiaoyun Wang 		return -ENOTSUP;
162773122b52SXiaoyun Wang 
162873122b52SXiaoyun Wang 	return ret;
162973122b52SXiaoyun Wang }
163073122b52SXiaoyun Wang 
163173122b52SXiaoyun Wang /**
163273122b52SXiaoyun Wang  * Check if the flow rule is supported by nic.
16337be78d02SJosh Soref  * It only checks the format. Don't guarantee the rule can be programmed into
163473122b52SXiaoyun Wang  * the HW. Because there can be no enough room for the rule.
163573122b52SXiaoyun Wang  */
163673122b52SXiaoyun Wang static int hinic_flow_validate(struct rte_eth_dev *dev,
163773122b52SXiaoyun Wang 				const struct rte_flow_attr *attr,
163873122b52SXiaoyun Wang 				const struct rte_flow_item pattern[],
163973122b52SXiaoyun Wang 				const struct rte_flow_action actions[],
164073122b52SXiaoyun Wang 				struct rte_flow_error *error)
164173122b52SXiaoyun Wang {
164273122b52SXiaoyun Wang 	struct rte_eth_ethertype_filter ethertype_filter;
164373122b52SXiaoyun Wang 	struct rte_eth_ntuple_filter ntuple_filter;
164473122b52SXiaoyun Wang 	struct hinic_fdir_rule fdir_rule;
164573122b52SXiaoyun Wang 	int ret;
164673122b52SXiaoyun Wang 
164773122b52SXiaoyun Wang 	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
164873122b52SXiaoyun Wang 	ret = hinic_parse_ntuple_filter(dev, attr, pattern,
164973122b52SXiaoyun Wang 				actions, &ntuple_filter, error);
165073122b52SXiaoyun Wang 	if (!ret)
165173122b52SXiaoyun Wang 		return 0;
165273122b52SXiaoyun Wang 
165373122b52SXiaoyun Wang 	memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
165473122b52SXiaoyun Wang 	ret = hinic_parse_ethertype_filter(dev, attr, pattern,
165573122b52SXiaoyun Wang 				actions, &ethertype_filter, error);
165673122b52SXiaoyun Wang 
165773122b52SXiaoyun Wang 	if (!ret)
165873122b52SXiaoyun Wang 		return 0;
165973122b52SXiaoyun Wang 
166073122b52SXiaoyun Wang 	memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
166173122b52SXiaoyun Wang 	ret = hinic_parse_fdir_filter(dev, attr, pattern,
166273122b52SXiaoyun Wang 				actions, &fdir_rule, error);
166373122b52SXiaoyun Wang 
166473122b52SXiaoyun Wang 	return ret;
166573122b52SXiaoyun Wang }
166673122b52SXiaoyun Wang 
16671fe89aa3SXiaoyun Wang static inline int ntuple_ip_filter(struct rte_eth_ntuple_filter *filter,
16681fe89aa3SXiaoyun Wang 		 struct hinic_5tuple_filter_info *hinic_filter_info)
1669a3920be3SXiaoyun Wang {
1670a3920be3SXiaoyun Wang 	switch (filter->dst_ip_mask) {
1671a3920be3SXiaoyun Wang 	case UINT32_MAX:
16721fe89aa3SXiaoyun Wang 		hinic_filter_info->dst_ip_mask = 0;
16731fe89aa3SXiaoyun Wang 		hinic_filter_info->dst_ip = filter->dst_ip;
1674a3920be3SXiaoyun Wang 		break;
1675a3920be3SXiaoyun Wang 	case 0:
16761fe89aa3SXiaoyun Wang 		hinic_filter_info->dst_ip_mask = 1;
16771fe89aa3SXiaoyun Wang 		hinic_filter_info->dst_ip = 0;
1678a3920be3SXiaoyun Wang 		break;
1679a3920be3SXiaoyun Wang 	default:
1680a3920be3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
1681a3920be3SXiaoyun Wang 		return -EINVAL;
1682a3920be3SXiaoyun Wang 	}
1683a3920be3SXiaoyun Wang 
1684a3920be3SXiaoyun Wang 	switch (filter->src_ip_mask) {
1685a3920be3SXiaoyun Wang 	case UINT32_MAX:
16861fe89aa3SXiaoyun Wang 		hinic_filter_info->src_ip_mask = 0;
16871fe89aa3SXiaoyun Wang 		hinic_filter_info->src_ip = filter->src_ip;
1688a3920be3SXiaoyun Wang 		break;
1689a3920be3SXiaoyun Wang 	case 0:
16901fe89aa3SXiaoyun Wang 		hinic_filter_info->src_ip_mask = 1;
16911fe89aa3SXiaoyun Wang 		hinic_filter_info->src_ip = 0;
1692a3920be3SXiaoyun Wang 		break;
1693a3920be3SXiaoyun Wang 	default:
1694a3920be3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
1695a3920be3SXiaoyun Wang 		return -EINVAL;
1696a3920be3SXiaoyun Wang 	}
1697a3920be3SXiaoyun Wang 	return 0;
1698a3920be3SXiaoyun Wang }
1699a3920be3SXiaoyun Wang 
17001fe89aa3SXiaoyun Wang static inline int ntuple_port_filter(struct rte_eth_ntuple_filter *filter,
17011fe89aa3SXiaoyun Wang 		   struct hinic_5tuple_filter_info *hinic_filter_info)
1702a3920be3SXiaoyun Wang {
1703a3920be3SXiaoyun Wang 	switch (filter->dst_port_mask) {
1704a3920be3SXiaoyun Wang 	case UINT16_MAX:
17051fe89aa3SXiaoyun Wang 		hinic_filter_info->dst_port_mask = 0;
17061fe89aa3SXiaoyun Wang 		hinic_filter_info->dst_port = filter->dst_port;
1707a3920be3SXiaoyun Wang 		break;
1708a3920be3SXiaoyun Wang 	case 0:
17091fe89aa3SXiaoyun Wang 		hinic_filter_info->dst_port_mask = 1;
17101fe89aa3SXiaoyun Wang 		hinic_filter_info->dst_port = 0;
1711a3920be3SXiaoyun Wang 		break;
1712a3920be3SXiaoyun Wang 	default:
1713a3920be3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Invalid dst_port mask.");
1714a3920be3SXiaoyun Wang 		return -EINVAL;
1715a3920be3SXiaoyun Wang 	}
1716a3920be3SXiaoyun Wang 
1717a3920be3SXiaoyun Wang 	switch (filter->src_port_mask) {
1718a3920be3SXiaoyun Wang 	case UINT16_MAX:
17191fe89aa3SXiaoyun Wang 		hinic_filter_info->src_port_mask = 0;
17201fe89aa3SXiaoyun Wang 		hinic_filter_info->src_port = filter->src_port;
1721a3920be3SXiaoyun Wang 		break;
1722a3920be3SXiaoyun Wang 	case 0:
17231fe89aa3SXiaoyun Wang 		hinic_filter_info->src_port_mask = 1;
17241fe89aa3SXiaoyun Wang 		hinic_filter_info->src_port = 0;
1725a3920be3SXiaoyun Wang 		break;
1726a3920be3SXiaoyun Wang 	default:
1727a3920be3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Invalid src_port mask.");
1728a3920be3SXiaoyun Wang 		return -EINVAL;
1729a3920be3SXiaoyun Wang 	}
1730a3920be3SXiaoyun Wang 
1731a3920be3SXiaoyun Wang 	return 0;
1732a3920be3SXiaoyun Wang }
1733a3920be3SXiaoyun Wang 
17341fe89aa3SXiaoyun Wang static inline int ntuple_proto_filter(struct rte_eth_ntuple_filter *filter,
17351fe89aa3SXiaoyun Wang 		    struct hinic_5tuple_filter_info *hinic_filter_info)
1736a3920be3SXiaoyun Wang {
1737a3920be3SXiaoyun Wang 	switch (filter->proto_mask) {
1738a3920be3SXiaoyun Wang 	case UINT8_MAX:
17391fe89aa3SXiaoyun Wang 		hinic_filter_info->proto_mask = 0;
17401fe89aa3SXiaoyun Wang 		hinic_filter_info->proto = filter->proto;
1741a3920be3SXiaoyun Wang 		break;
1742a3920be3SXiaoyun Wang 	case 0:
17431fe89aa3SXiaoyun Wang 		hinic_filter_info->proto_mask = 1;
17441fe89aa3SXiaoyun Wang 		hinic_filter_info->proto = 0;
1745a3920be3SXiaoyun Wang 		break;
1746a3920be3SXiaoyun Wang 	default:
1747a3920be3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Invalid protocol mask.");
1748a3920be3SXiaoyun Wang 		return -EINVAL;
1749a3920be3SXiaoyun Wang 	}
1750a3920be3SXiaoyun Wang 
1751a3920be3SXiaoyun Wang 	return 0;
1752a3920be3SXiaoyun Wang }
1753a3920be3SXiaoyun Wang 
17541fe89aa3SXiaoyun Wang static inline int ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
1755a3920be3SXiaoyun Wang 			struct hinic_5tuple_filter_info *filter_info)
1756a3920be3SXiaoyun Wang {
1757a3920be3SXiaoyun Wang 	if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM ||
1758a3920be3SXiaoyun Wang 		filter->priority > HINIC_MAX_N_TUPLE_PRIO ||
1759a3920be3SXiaoyun Wang 		filter->priority < HINIC_MIN_N_TUPLE_PRIO)
1760a3920be3SXiaoyun Wang 		return -EINVAL;
1761a3920be3SXiaoyun Wang 
1762a3920be3SXiaoyun Wang 	if (ntuple_ip_filter(filter, filter_info) ||
1763a3920be3SXiaoyun Wang 		ntuple_port_filter(filter, filter_info) ||
1764a3920be3SXiaoyun Wang 		ntuple_proto_filter(filter, filter_info))
1765a3920be3SXiaoyun Wang 		return -EINVAL;
1766a3920be3SXiaoyun Wang 
1767a3920be3SXiaoyun Wang 	filter_info->priority = (uint8_t)filter->priority;
1768a3920be3SXiaoyun Wang 	return 0;
1769a3920be3SXiaoyun Wang }
1770a3920be3SXiaoyun Wang 
1771a3920be3SXiaoyun Wang static inline struct hinic_5tuple_filter *
1772a3920be3SXiaoyun Wang hinic_5tuple_filter_lookup(struct hinic_5tuple_filter_list *filter_list,
1773a3920be3SXiaoyun Wang 			   struct hinic_5tuple_filter_info *key)
1774a3920be3SXiaoyun Wang {
1775a3920be3SXiaoyun Wang 	struct hinic_5tuple_filter *it;
1776a3920be3SXiaoyun Wang 
1777a3920be3SXiaoyun Wang 	TAILQ_FOREACH(it, filter_list, entries) {
1778a3920be3SXiaoyun Wang 		if (memcmp(key, &it->filter_info,
1779a3920be3SXiaoyun Wang 			sizeof(struct hinic_5tuple_filter_info)) == 0) {
1780a3920be3SXiaoyun Wang 			return it;
1781a3920be3SXiaoyun Wang 		}
1782a3920be3SXiaoyun Wang 	}
1783a3920be3SXiaoyun Wang 
1784a3920be3SXiaoyun Wang 	return NULL;
1785a3920be3SXiaoyun Wang }
1786a3920be3SXiaoyun Wang 
1787f4ca3fd5SXiaoyun Wang static int hinic_set_lacp_tcam(struct hinic_nic_dev *nic_dev)
1788f4ca3fd5SXiaoyun Wang {
1789f4ca3fd5SXiaoyun Wang 	struct tag_pa_rule lacp_rule;
1790f4ca3fd5SXiaoyun Wang 	struct tag_pa_action lacp_action;
1791f4ca3fd5SXiaoyun Wang 
1792f4ca3fd5SXiaoyun Wang 	memset(&lacp_rule, 0, sizeof(lacp_rule));
1793f4ca3fd5SXiaoyun Wang 	memset(&lacp_action, 0, sizeof(lacp_action));
1794f4ca3fd5SXiaoyun Wang 	/* LACP TCAM rule */
1795f4ca3fd5SXiaoyun Wang 	lacp_rule.eth_type = PA_ETH_TYPE_OTHER;
1796f4ca3fd5SXiaoyun Wang 	lacp_rule.l2_header.eth_type.val16 = 0x8809;
1797f4ca3fd5SXiaoyun Wang 	lacp_rule.l2_header.eth_type.mask16 = 0xffff;
1798f4ca3fd5SXiaoyun Wang 
1799f4ca3fd5SXiaoyun Wang 	/* LACP TCAM action */
1800f4ca3fd5SXiaoyun Wang 	lacp_action.err_type = 0x3f; /* err from ipsu, not convert */
1801f4ca3fd5SXiaoyun Wang 	lacp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1802f4ca3fd5SXiaoyun Wang 	lacp_action.pkt_type = PKT_LACP_TYPE;
1803f4ca3fd5SXiaoyun Wang 	lacp_action.pri = 0x0;
1804f4ca3fd5SXiaoyun Wang 	lacp_action.push_len = 0xf; /* push_len:0xf, not convert */
1805f4ca3fd5SXiaoyun Wang 
1806f4ca3fd5SXiaoyun Wang 	return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP,
1807f4ca3fd5SXiaoyun Wang 					&lacp_rule, &lacp_action);
1808f4ca3fd5SXiaoyun Wang }
1809f4ca3fd5SXiaoyun Wang 
1810a3920be3SXiaoyun Wang static int hinic_set_bgp_dport_tcam(struct hinic_nic_dev *nic_dev)
1811a3920be3SXiaoyun Wang {
1812a3920be3SXiaoyun Wang 	struct tag_pa_rule bgp_rule;
1813a3920be3SXiaoyun Wang 	struct tag_pa_action bgp_action;
1814a3920be3SXiaoyun Wang 
1815a3920be3SXiaoyun Wang 	memset(&bgp_rule, 0, sizeof(bgp_rule));
1816a3920be3SXiaoyun Wang 	memset(&bgp_action, 0, sizeof(bgp_action));
1817a3920be3SXiaoyun Wang 	/* BGP TCAM rule */
1818a3920be3SXiaoyun Wang 	bgp_rule.eth_type = PA_ETH_TYPE_IPV4; /* Eth type is IPV4 */
1819a3920be3SXiaoyun Wang 	bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1820a3920be3SXiaoyun Wang 	bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1821a3920be3SXiaoyun Wang 	bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1822a3920be3SXiaoyun Wang 	bgp_rule.eth_ip_tcp.dport.val16 = BGP_DPORT_ID; /* Dport is 179 */
1823a3920be3SXiaoyun Wang 	bgp_rule.eth_ip_tcp.dport.mask16 = UINT16_MAX;
1824a3920be3SXiaoyun Wang 
1825a3920be3SXiaoyun Wang 	/* BGP TCAM action */
1826a3920be3SXiaoyun Wang 	bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1827a3920be3SXiaoyun Wang 	bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1828a3920be3SXiaoyun Wang 	bgp_action.pkt_type = PKT_BGPD_DPORT_TYPE; /* bgp_dport: 83 */
1829a3920be3SXiaoyun Wang 	bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1830a3920be3SXiaoyun Wang 			       * results, not need to convert
1831a3920be3SXiaoyun Wang 			       */
1832a3920be3SXiaoyun Wang 	bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1833a3920be3SXiaoyun Wang 
1834a3920be3SXiaoyun Wang 	return hinic_set_fdir_tcam(nic_dev->hwdev,
1835a3920be3SXiaoyun Wang 			TCAM_PKT_BGP_DPORT, &bgp_rule, &bgp_action);
1836a3920be3SXiaoyun Wang }
1837a3920be3SXiaoyun Wang 
1838a3920be3SXiaoyun Wang static int hinic_set_bgp_sport_tcam(struct hinic_nic_dev *nic_dev)
1839a3920be3SXiaoyun Wang {
1840a3920be3SXiaoyun Wang 	struct tag_pa_rule bgp_rule;
1841a3920be3SXiaoyun Wang 	struct tag_pa_action bgp_action;
1842a3920be3SXiaoyun Wang 
1843a3920be3SXiaoyun Wang 	memset(&bgp_rule, 0, sizeof(bgp_rule));
1844a3920be3SXiaoyun Wang 	memset(&bgp_action, 0, sizeof(bgp_action));
1845a3920be3SXiaoyun Wang 	/* BGP TCAM rule */
1846a3920be3SXiaoyun Wang 	bgp_rule.eth_type = PA_ETH_TYPE_IPV4;
1847a3920be3SXiaoyun Wang 	bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1848a3920be3SXiaoyun Wang 	bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1849a3920be3SXiaoyun Wang 	bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1850a3920be3SXiaoyun Wang 	bgp_rule.eth_ip_tcp.sport.val16 = BGP_DPORT_ID;
1851a3920be3SXiaoyun Wang 	bgp_rule.eth_ip_tcp.sport.mask16 = UINT16_MAX;
1852a3920be3SXiaoyun Wang 
1853a3920be3SXiaoyun Wang 	/* BGP TCAM action */
1854a3920be3SXiaoyun Wang 	bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1855a3920be3SXiaoyun Wang 	bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1856a3920be3SXiaoyun Wang 	bgp_action.pkt_type = PKT_BGPD_SPORT_TYPE; /* bgp:sport: 84 */
1857a3920be3SXiaoyun Wang 	bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1858a3920be3SXiaoyun Wang 			       * results, not need to convert
1859a3920be3SXiaoyun Wang 			       */
1860a3920be3SXiaoyun Wang 	bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1861a3920be3SXiaoyun Wang 
1862a3920be3SXiaoyun Wang 	return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT,
1863a3920be3SXiaoyun Wang 					&bgp_rule, &bgp_action);
1864a3920be3SXiaoyun Wang }
1865a3920be3SXiaoyun Wang 
1866a3920be3SXiaoyun Wang static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev)
1867a3920be3SXiaoyun Wang {
1868a3920be3SXiaoyun Wang 	struct tag_pa_rule vrrp_rule;
1869a3920be3SXiaoyun Wang 	struct tag_pa_action vrrp_action;
1870a3920be3SXiaoyun Wang 
1871a3920be3SXiaoyun Wang 	memset(&vrrp_rule, 0, sizeof(vrrp_rule));
1872a3920be3SXiaoyun Wang 	memset(&vrrp_action, 0, sizeof(vrrp_action));
1873a3920be3SXiaoyun Wang 	/* VRRP TCAM rule */
1874a3920be3SXiaoyun Wang 	vrrp_rule.eth_type = PA_ETH_TYPE_IPV4;
1875a3920be3SXiaoyun Wang 	vrrp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1876a3920be3SXiaoyun Wang 	vrrp_rule.ip_header.protocol.mask8 = 0xff;
1877a3920be3SXiaoyun Wang 	vrrp_rule.ip_header.protocol.val8 = PA_IP_PROTOCOL_TYPE_VRRP;
1878a3920be3SXiaoyun Wang 
1879a3920be3SXiaoyun Wang 	/* VRRP TCAM action */
1880a3920be3SXiaoyun Wang 	vrrp_action.err_type = 0x3f;
1881a3920be3SXiaoyun Wang 	vrrp_action.fwd_action = 0x7;
1882a3920be3SXiaoyun Wang 	vrrp_action.pkt_type = PKT_VRRP_TYPE; /* VRRP: 85 */
1883a3920be3SXiaoyun Wang 	vrrp_action.pri = 0xf;
1884a3920be3SXiaoyun Wang 	vrrp_action.push_len = 0xf;
1885a3920be3SXiaoyun Wang 
1886a3920be3SXiaoyun Wang 	return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP,
1887a3920be3SXiaoyun Wang 					&vrrp_rule, &vrrp_action);
1888a3920be3SXiaoyun Wang }
1889a3920be3SXiaoyun Wang 
18901742421bSXiaoyun Wang /**
18911742421bSXiaoyun Wang  *  Clear all fdir configuration.
18921742421bSXiaoyun Wang  *
18931742421bSXiaoyun Wang  * @param nic_dev
18941742421bSXiaoyun Wang  *   The hardware interface of a Ethernet device.
18951742421bSXiaoyun Wang  *
18961742421bSXiaoyun Wang  * @return
18971742421bSXiaoyun Wang  *   0 on success,
18981742421bSXiaoyun Wang  *   negative error value otherwise.
18991742421bSXiaoyun Wang  */
19001742421bSXiaoyun Wang void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev)
19011742421bSXiaoyun Wang {
19021fe89aa3SXiaoyun Wang 	(void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
19031742421bSXiaoyun Wang 
19040023e525SXiaoyun Wang 	(void)hinic_set_fdir_tcam_rule_filter(nic_dev->hwdev, false);
19050023e525SXiaoyun Wang 
19061fe89aa3SXiaoyun Wang 	(void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_DPORT);
19071742421bSXiaoyun Wang 
19081fe89aa3SXiaoyun Wang 	(void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT);
19091742421bSXiaoyun Wang 
19101fe89aa3SXiaoyun Wang 	(void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
19111742421bSXiaoyun Wang 
19121fe89aa3SXiaoyun Wang 	(void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
19131742421bSXiaoyun Wang 
19141fe89aa3SXiaoyun Wang 	(void)hinic_flush_tcam_rule(nic_dev->hwdev);
19151742421bSXiaoyun Wang }
19161742421bSXiaoyun Wang 
19171fe89aa3SXiaoyun Wang static int hinic_filter_info_init(struct hinic_5tuple_filter *filter,
1918a3920be3SXiaoyun Wang 		       struct hinic_filter_info *filter_info)
1919a3920be3SXiaoyun Wang {
1920a3920be3SXiaoyun Wang 	switch (filter->filter_info.proto) {
1921a3920be3SXiaoyun Wang 	case IPPROTO_TCP:
1922a3920be3SXiaoyun Wang 		/* Filter type is bgp type if dst_port or src_port is 179 */
1923a3920be3SXiaoyun Wang 		if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID) &&
1924a3920be3SXiaoyun Wang 			!(filter->filter_info.dst_port_mask)) {
1925a3920be3SXiaoyun Wang 			filter_info->pkt_type = PKT_BGPD_DPORT_TYPE;
1926a3920be3SXiaoyun Wang 		} else if (filter->filter_info.src_port ==
1927a3920be3SXiaoyun Wang 			RTE_BE16(BGP_DPORT_ID) &&
1928a3920be3SXiaoyun Wang 			!(filter->filter_info.src_port_mask)) {
1929a3920be3SXiaoyun Wang 			filter_info->pkt_type = PKT_BGPD_SPORT_TYPE;
1930a3920be3SXiaoyun Wang 		} else {
1931a3920be3SXiaoyun Wang 			PMD_DRV_LOG(INFO, "TCP PROTOCOL:5tuple filters"
1932a3920be3SXiaoyun Wang 			" just support BGP now, proto:0x%x, "
1933a3920be3SXiaoyun Wang 			"dst_port:0x%x, dst_port_mask:0x%x."
1934a3920be3SXiaoyun Wang 			"src_port:0x%x, src_port_mask:0x%x.",
1935a3920be3SXiaoyun Wang 			filter->filter_info.proto,
1936a3920be3SXiaoyun Wang 			filter->filter_info.dst_port,
1937a3920be3SXiaoyun Wang 			filter->filter_info.dst_port_mask,
1938a3920be3SXiaoyun Wang 			filter->filter_info.src_port,
1939a3920be3SXiaoyun Wang 			filter->filter_info.src_port_mask);
1940a3920be3SXiaoyun Wang 			return -EINVAL;
1941a3920be3SXiaoyun Wang 		}
1942a3920be3SXiaoyun Wang 		break;
1943a3920be3SXiaoyun Wang 
1944a3920be3SXiaoyun Wang 	case IPPROTO_VRRP:
1945a3920be3SXiaoyun Wang 		filter_info->pkt_type = PKT_VRRP_TYPE;
1946a3920be3SXiaoyun Wang 		break;
1947a3920be3SXiaoyun Wang 
1948a3920be3SXiaoyun Wang 	case IPPROTO_ICMP:
1949a3920be3SXiaoyun Wang 		filter_info->pkt_type = PKT_ICMP_IPV4_TYPE;
1950a3920be3SXiaoyun Wang 		break;
1951a3920be3SXiaoyun Wang 
1952a3920be3SXiaoyun Wang 	case IPPROTO_ICMPV6:
1953a3920be3SXiaoyun Wang 		filter_info->pkt_type = PKT_ICMP_IPV6_TYPE;
1954a3920be3SXiaoyun Wang 		break;
1955a3920be3SXiaoyun Wang 
1956a3920be3SXiaoyun Wang 	default:
1957a3920be3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "5tuple filters just support BGP/VRRP/ICMP now, "
1958a3920be3SXiaoyun Wang 		"proto: 0x%x, dst_port: 0x%x, dst_port_mask: 0x%x."
1959a3920be3SXiaoyun Wang 		"src_port: 0x%x, src_port_mask: 0x%x.",
1960a3920be3SXiaoyun Wang 		filter->filter_info.proto, filter->filter_info.dst_port,
1961a3920be3SXiaoyun Wang 		filter->filter_info.dst_port_mask,
1962a3920be3SXiaoyun Wang 		filter->filter_info.src_port,
1963a3920be3SXiaoyun Wang 		filter->filter_info.src_port_mask);
1964a3920be3SXiaoyun Wang 		return -EINVAL;
1965a3920be3SXiaoyun Wang 	}
1966a3920be3SXiaoyun Wang 
1967a3920be3SXiaoyun Wang 	return 0;
1968a3920be3SXiaoyun Wang }
1969a3920be3SXiaoyun Wang 
19701fe89aa3SXiaoyun Wang static int hinic_lookup_new_filter(struct hinic_5tuple_filter *filter,
19711fe89aa3SXiaoyun Wang 			struct hinic_filter_info *filter_info, int *index)
1972a3920be3SXiaoyun Wang {
1973a3920be3SXiaoyun Wang 	int type_id;
1974a3920be3SXiaoyun Wang 
1975a3920be3SXiaoyun Wang 	type_id = HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
1976a3920be3SXiaoyun Wang 
1977a3920be3SXiaoyun Wang 	if (type_id > HINIC_MAX_Q_FILTERS - 1) {
1978a3920be3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Pkt filters only support 64 filter type.");
1979a3920be3SXiaoyun Wang 		return -EINVAL;
1980a3920be3SXiaoyun Wang 	}
1981a3920be3SXiaoyun Wang 
1982a3920be3SXiaoyun Wang 	if (!(filter_info->type_mask & (1 << type_id))) {
1983a3920be3SXiaoyun Wang 		filter_info->type_mask |= 1 << type_id;
1984a3920be3SXiaoyun Wang 		filter->index = type_id;
1985a3920be3SXiaoyun Wang 		filter_info->pkt_filters[type_id].enable = true;
1986a3920be3SXiaoyun Wang 		filter_info->pkt_filters[type_id].pkt_proto =
1987a3920be3SXiaoyun Wang 						filter->filter_info.proto;
1988a3920be3SXiaoyun Wang 		TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
1989a3920be3SXiaoyun Wang 				  filter, entries);
1990a3920be3SXiaoyun Wang 	} else {
1991a3920be3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Filter type: %d exists.", type_id);
1992a3920be3SXiaoyun Wang 		return -EIO;
1993a3920be3SXiaoyun Wang 	}
1994a3920be3SXiaoyun Wang 
1995a3920be3SXiaoyun Wang 	*index = type_id;
1996a3920be3SXiaoyun Wang 	return 0;
1997a3920be3SXiaoyun Wang }
1998a3920be3SXiaoyun Wang 
1999a3920be3SXiaoyun Wang /*
2000a3920be3SXiaoyun Wang  * Add a 5tuple filter
2001a3920be3SXiaoyun Wang  *
2002a3920be3SXiaoyun Wang  * @param dev:
2003a3920be3SXiaoyun Wang  *  Pointer to struct rte_eth_dev.
2004a3920be3SXiaoyun Wang  * @param filter:
2005a3920be3SXiaoyun Wang  *  Pointer to the filter that will be added.
2006a3920be3SXiaoyun Wang  * @return
2007a3920be3SXiaoyun Wang  *    - On success, zero.
2008a3920be3SXiaoyun Wang  *    - On failure, a negative value.
2009a3920be3SXiaoyun Wang  */
20101fe89aa3SXiaoyun Wang static int hinic_add_5tuple_filter(struct rte_eth_dev *dev,
2011a3920be3SXiaoyun Wang 				struct hinic_5tuple_filter *filter)
2012a3920be3SXiaoyun Wang {
2013a3920be3SXiaoyun Wang 	struct hinic_filter_info *filter_info =
2014a3920be3SXiaoyun Wang 		HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2015a3920be3SXiaoyun Wang 	int i, ret_fw;
2016a3920be3SXiaoyun Wang 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2017a3920be3SXiaoyun Wang 
2018a3920be3SXiaoyun Wang 	if (hinic_filter_info_init(filter, filter_info) ||
2019a3920be3SXiaoyun Wang 		hinic_lookup_new_filter(filter, filter_info, &i))
2020a3920be3SXiaoyun Wang 		return -EFAULT;
2021a3920be3SXiaoyun Wang 
2022a3920be3SXiaoyun Wang 	ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
2023a3920be3SXiaoyun Wang 					filter_info->qid,
2024a3920be3SXiaoyun Wang 					filter_info->pkt_filters[i].enable,
2025a3920be3SXiaoyun Wang 					true);
2026a3920be3SXiaoyun Wang 	if (ret_fw) {
2027a3920be3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Set fdir filter failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2028a3920be3SXiaoyun Wang 			filter_info->pkt_type, filter->queue,
2029a3920be3SXiaoyun Wang 			filter_info->pkt_filters[i].enable);
2030a3920be3SXiaoyun Wang 		return -EFAULT;
2031a3920be3SXiaoyun Wang 	}
2032a3920be3SXiaoyun Wang 
2033a3920be3SXiaoyun Wang 	PMD_DRV_LOG(INFO, "Add 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2034a3920be3SXiaoyun Wang 			filter_info->pkt_type, filter_info->qid,
2035a3920be3SXiaoyun Wang 			filter_info->pkt_filters[filter->index].enable);
2036a3920be3SXiaoyun Wang 
2037a3920be3SXiaoyun Wang 	switch (filter->filter_info.proto) {
2038a3920be3SXiaoyun Wang 	case IPPROTO_TCP:
2039a3920be3SXiaoyun Wang 		if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) {
2040a3920be3SXiaoyun Wang 			ret_fw = hinic_set_bgp_dport_tcam(nic_dev);
2041a3920be3SXiaoyun Wang 			if (ret_fw) {
2042a3920be3SXiaoyun Wang 				PMD_DRV_LOG(ERR, "Set dport bgp failed, "
2043a3920be3SXiaoyun Wang 					"type: 0x%x, qid: 0x%x, enable: 0x%x",
2044a3920be3SXiaoyun Wang 					filter_info->pkt_type, filter->queue,
2045a3920be3SXiaoyun Wang 					filter_info->pkt_filters[i].enable);
2046a3920be3SXiaoyun Wang 				return -EFAULT;
2047a3920be3SXiaoyun Wang 			}
2048a3920be3SXiaoyun Wang 
2049a3920be3SXiaoyun Wang 			PMD_DRV_LOG(INFO, "Set dport bgp succeed, qid: 0x%x, enable: 0x%x",
2050a3920be3SXiaoyun Wang 				filter->queue,
2051a3920be3SXiaoyun Wang 				filter_info->pkt_filters[i].enable);
2052a3920be3SXiaoyun Wang 		} else if (filter->filter_info.src_port ==
2053a3920be3SXiaoyun Wang 			RTE_BE16(BGP_DPORT_ID)) {
2054a3920be3SXiaoyun Wang 			ret_fw = hinic_set_bgp_sport_tcam(nic_dev);
2055a3920be3SXiaoyun Wang 			if (ret_fw) {
2056a3920be3SXiaoyun Wang 				PMD_DRV_LOG(ERR, "Set sport bgp failed, "
2057a3920be3SXiaoyun Wang 					"type: 0x%x, qid: 0x%x, enable: 0x%x",
2058a3920be3SXiaoyun Wang 					filter_info->pkt_type, filter->queue,
2059a3920be3SXiaoyun Wang 					filter_info->pkt_filters[i].enable);
2060a3920be3SXiaoyun Wang 				return -EFAULT;
2061a3920be3SXiaoyun Wang 			}
2062a3920be3SXiaoyun Wang 
2063a3920be3SXiaoyun Wang 			PMD_DRV_LOG(INFO, "Set sport bgp succeed, qid: 0x%x, enable: 0x%x",
2064a3920be3SXiaoyun Wang 					filter->queue,
2065a3920be3SXiaoyun Wang 					filter_info->pkt_filters[i].enable);
2066a3920be3SXiaoyun Wang 		}
2067a3920be3SXiaoyun Wang 
2068a3920be3SXiaoyun Wang 		break;
2069a3920be3SXiaoyun Wang 
2070a3920be3SXiaoyun Wang 	case IPPROTO_VRRP:
2071a3920be3SXiaoyun Wang 		ret_fw = hinic_set_vrrp_tcam(nic_dev);
2072a3920be3SXiaoyun Wang 		if (ret_fw) {
2073a3920be3SXiaoyun Wang 			PMD_DRV_LOG(ERR, "Set VRRP failed, "
2074a3920be3SXiaoyun Wang 				"type: 0x%x, qid: 0x%x, enable: 0x%x",
2075a3920be3SXiaoyun Wang 				filter_info->pkt_type, filter->queue,
2076a3920be3SXiaoyun Wang 				filter_info->pkt_filters[i].enable);
2077a3920be3SXiaoyun Wang 			return -EFAULT;
2078a3920be3SXiaoyun Wang 		}
2079a3920be3SXiaoyun Wang 		PMD_DRV_LOG(INFO, "Set VRRP succeed, qid: 0x%x, enable: 0x%x",
2080a3920be3SXiaoyun Wang 				filter->queue,
2081a3920be3SXiaoyun Wang 				filter_info->pkt_filters[i].enable);
2082a3920be3SXiaoyun Wang 		break;
2083a3920be3SXiaoyun Wang 
2084a3920be3SXiaoyun Wang 	default:
2085a3920be3SXiaoyun Wang 		break;
2086a3920be3SXiaoyun Wang 	}
2087a3920be3SXiaoyun Wang 
2088a3920be3SXiaoyun Wang 	return 0;
2089a3920be3SXiaoyun Wang }
2090a3920be3SXiaoyun Wang 
2091a3920be3SXiaoyun Wang /*
2092a3920be3SXiaoyun Wang  * Remove a 5tuple filter
2093a3920be3SXiaoyun Wang  *
2094a3920be3SXiaoyun Wang  * @param dev
2095a3920be3SXiaoyun Wang  *  Pointer to struct rte_eth_dev.
2096a3920be3SXiaoyun Wang  * @param filter
2097a3920be3SXiaoyun Wang  *  The pointer of the filter will be removed.
2098a3920be3SXiaoyun Wang  */
20991fe89aa3SXiaoyun Wang static void hinic_remove_5tuple_filter(struct rte_eth_dev *dev,
2100a3920be3SXiaoyun Wang 			   struct hinic_5tuple_filter *filter)
2101a3920be3SXiaoyun Wang {
2102a3920be3SXiaoyun Wang 	struct hinic_filter_info *filter_info =
2103a3920be3SXiaoyun Wang 		HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2104a3920be3SXiaoyun Wang 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2105a3920be3SXiaoyun Wang 
2106a3920be3SXiaoyun Wang 	switch (filter->filter_info.proto) {
2107a3920be3SXiaoyun Wang 	case IPPROTO_VRRP:
2108a3920be3SXiaoyun Wang 		(void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
2109a3920be3SXiaoyun Wang 		break;
2110a3920be3SXiaoyun Wang 
2111a3920be3SXiaoyun Wang 	case IPPROTO_TCP:
2112a3920be3SXiaoyun Wang 		if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID))
2113a3920be3SXiaoyun Wang 			(void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2114a3920be3SXiaoyun Wang 							TCAM_PKT_BGP_DPORT);
2115a3920be3SXiaoyun Wang 		else if (filter->filter_info.src_port == RTE_BE16(BGP_DPORT_ID))
2116a3920be3SXiaoyun Wang 			(void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2117a3920be3SXiaoyun Wang 							TCAM_PKT_BGP_SPORT);
2118a3920be3SXiaoyun Wang 		break;
2119a3920be3SXiaoyun Wang 
2120a3920be3SXiaoyun Wang 	default:
2121a3920be3SXiaoyun Wang 		break;
2122a3920be3SXiaoyun Wang 	}
2123a3920be3SXiaoyun Wang 
2124a3920be3SXiaoyun Wang 	hinic_filter_info_init(filter, filter_info);
2125a3920be3SXiaoyun Wang 
2126a3920be3SXiaoyun Wang 	filter_info->pkt_filters[filter->index].enable = false;
2127a3920be3SXiaoyun Wang 	filter_info->pkt_filters[filter->index].pkt_proto = 0;
2128a3920be3SXiaoyun Wang 
2129a3920be3SXiaoyun Wang 	PMD_DRV_LOG(INFO, "Del 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2130a3920be3SXiaoyun Wang 		filter_info->pkt_type,
2131a3920be3SXiaoyun Wang 		filter_info->pkt_filters[filter->index].qid,
2132a3920be3SXiaoyun Wang 		filter_info->pkt_filters[filter->index].enable);
2133a3920be3SXiaoyun Wang 	(void)hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
2134a3920be3SXiaoyun Wang 				filter_info->pkt_filters[filter->index].qid,
2135a3920be3SXiaoyun Wang 				filter_info->pkt_filters[filter->index].enable,
2136a3920be3SXiaoyun Wang 				true);
2137a3920be3SXiaoyun Wang 
2138a3920be3SXiaoyun Wang 	filter_info->pkt_type = 0;
2139a3920be3SXiaoyun Wang 	filter_info->qid = 0;
2140a3920be3SXiaoyun Wang 	filter_info->pkt_filters[filter->index].qid = 0;
2141a3920be3SXiaoyun Wang 	filter_info->type_mask &= ~(1 <<  (filter->index));
2142a3920be3SXiaoyun Wang 	TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
2143a3920be3SXiaoyun Wang 
2144a3920be3SXiaoyun Wang 	rte_free(filter);
2145a3920be3SXiaoyun Wang }
2146a3920be3SXiaoyun Wang 
2147a3920be3SXiaoyun Wang /*
2148a3920be3SXiaoyun Wang  * Add or delete a ntuple filter
2149a3920be3SXiaoyun Wang  *
2150a3920be3SXiaoyun Wang  * @param dev
2151a3920be3SXiaoyun Wang  *  Pointer to struct rte_eth_dev.
2152a3920be3SXiaoyun Wang  * @param ntuple_filter
2153a3920be3SXiaoyun Wang  *  Pointer to struct rte_eth_ntuple_filter
2154a3920be3SXiaoyun Wang  * @param add
2155a3920be3SXiaoyun Wang  *  If true, add filter; if false, remove filter
2156a3920be3SXiaoyun Wang  * @return
2157a3920be3SXiaoyun Wang  *    - On success, zero.
2158a3920be3SXiaoyun Wang  *    - On failure, a negative value.
2159a3920be3SXiaoyun Wang  */
2160a3920be3SXiaoyun Wang static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
2161a3920be3SXiaoyun Wang 				struct rte_eth_ntuple_filter *ntuple_filter,
2162a3920be3SXiaoyun Wang 				bool add)
2163a3920be3SXiaoyun Wang {
2164a3920be3SXiaoyun Wang 	struct hinic_filter_info *filter_info =
2165a3920be3SXiaoyun Wang 		HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2166a3920be3SXiaoyun Wang 	struct hinic_5tuple_filter_info filter_5tuple;
2167a3920be3SXiaoyun Wang 	struct hinic_5tuple_filter *filter;
2168a3920be3SXiaoyun Wang 	int ret;
2169a3920be3SXiaoyun Wang 
2170a3920be3SXiaoyun Wang 	if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
2171a3920be3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Only 5tuple is supported.");
2172a3920be3SXiaoyun Wang 		return -EINVAL;
2173a3920be3SXiaoyun Wang 	}
2174a3920be3SXiaoyun Wang 
2175a3920be3SXiaoyun Wang 	memset(&filter_5tuple, 0, sizeof(struct hinic_5tuple_filter_info));
2176a3920be3SXiaoyun Wang 	ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
2177a3920be3SXiaoyun Wang 	if (ret < 0)
2178a3920be3SXiaoyun Wang 		return ret;
2179a3920be3SXiaoyun Wang 
2180a3920be3SXiaoyun Wang 	filter = hinic_5tuple_filter_lookup(&filter_info->fivetuple_list,
2181a3920be3SXiaoyun Wang 					 &filter_5tuple);
2182a3920be3SXiaoyun Wang 	if (filter != NULL && add) {
2183a3920be3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Filter exists.");
2184a3920be3SXiaoyun Wang 		return -EEXIST;
2185a3920be3SXiaoyun Wang 	}
2186a3920be3SXiaoyun Wang 	if (filter == NULL && !add) {
2187a3920be3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Filter doesn't exist.");
2188a3920be3SXiaoyun Wang 		return -ENOENT;
2189a3920be3SXiaoyun Wang 	}
2190a3920be3SXiaoyun Wang 
2191a3920be3SXiaoyun Wang 	if (add) {
2192a3920be3SXiaoyun Wang 		filter = rte_zmalloc("hinic_5tuple_filter",
2193a3920be3SXiaoyun Wang 				sizeof(struct hinic_5tuple_filter), 0);
2194a3920be3SXiaoyun Wang 		if (filter == NULL)
2195a3920be3SXiaoyun Wang 			return -ENOMEM;
2196a3920be3SXiaoyun Wang 		rte_memcpy(&filter->filter_info, &filter_5tuple,
2197a3920be3SXiaoyun Wang 				sizeof(struct hinic_5tuple_filter_info));
2198a3920be3SXiaoyun Wang 		filter->queue = ntuple_filter->queue;
2199a3920be3SXiaoyun Wang 
2200a3920be3SXiaoyun Wang 		filter_info->qid = ntuple_filter->queue;
2201a3920be3SXiaoyun Wang 
2202a3920be3SXiaoyun Wang 		ret = hinic_add_5tuple_filter(dev, filter);
2203a3920be3SXiaoyun Wang 		if (ret)
2204a3920be3SXiaoyun Wang 			rte_free(filter);
2205a3920be3SXiaoyun Wang 
2206a3920be3SXiaoyun Wang 		return ret;
2207a3920be3SXiaoyun Wang 	}
2208a3920be3SXiaoyun Wang 
2209a3920be3SXiaoyun Wang 	hinic_remove_5tuple_filter(dev, filter);
2210a3920be3SXiaoyun Wang 
2211a3920be3SXiaoyun Wang 	return 0;
2212a3920be3SXiaoyun Wang }
2213a3920be3SXiaoyun Wang 
2214f4ca3fd5SXiaoyun Wang static inline int
2215f4ca3fd5SXiaoyun Wang hinic_check_ethertype_filter(struct rte_eth_ethertype_filter *filter)
2216f4ca3fd5SXiaoyun Wang {
2217f4ca3fd5SXiaoyun Wang 	if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM)
2218f4ca3fd5SXiaoyun Wang 		return -EINVAL;
2219f4ca3fd5SXiaoyun Wang 
2220f4ca3fd5SXiaoyun Wang 	if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2221f4ca3fd5SXiaoyun Wang 		filter->ether_type == RTE_ETHER_TYPE_IPV6) {
2222f4ca3fd5SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Unsupported ether_type(0x%04x) in"
2223f4ca3fd5SXiaoyun Wang 			" ethertype filter", filter->ether_type);
2224f4ca3fd5SXiaoyun Wang 		return -EINVAL;
2225f4ca3fd5SXiaoyun Wang 	}
2226f4ca3fd5SXiaoyun Wang 
2227f4ca3fd5SXiaoyun Wang 	if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
2228f4ca3fd5SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Mac compare is not supported");
2229f4ca3fd5SXiaoyun Wang 		return -EINVAL;
2230f4ca3fd5SXiaoyun Wang 	}
2231f4ca3fd5SXiaoyun Wang 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2232f4ca3fd5SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Drop option is not supported");
2233f4ca3fd5SXiaoyun Wang 		return -EINVAL;
2234f4ca3fd5SXiaoyun Wang 	}
2235f4ca3fd5SXiaoyun Wang 
2236f4ca3fd5SXiaoyun Wang 	return 0;
2237f4ca3fd5SXiaoyun Wang }
2238f4ca3fd5SXiaoyun Wang 
2239f4ca3fd5SXiaoyun Wang static inline int
2240f4ca3fd5SXiaoyun Wang hinic_ethertype_filter_lookup(struct hinic_filter_info *filter_info,
2241f4ca3fd5SXiaoyun Wang 			      struct hinic_pkt_filter *ethertype_filter)
2242f4ca3fd5SXiaoyun Wang {
2243f4ca3fd5SXiaoyun Wang 	switch (ethertype_filter->pkt_proto) {
2244f4ca3fd5SXiaoyun Wang 	case RTE_ETHER_TYPE_SLOW:
2245f4ca3fd5SXiaoyun Wang 		filter_info->pkt_type = PKT_LACP_TYPE;
2246f4ca3fd5SXiaoyun Wang 		break;
2247f4ca3fd5SXiaoyun Wang 
2248f4ca3fd5SXiaoyun Wang 	case RTE_ETHER_TYPE_ARP:
2249f4ca3fd5SXiaoyun Wang 		filter_info->pkt_type = PKT_ARP_TYPE;
2250f4ca3fd5SXiaoyun Wang 		break;
2251f4ca3fd5SXiaoyun Wang 
2252f4ca3fd5SXiaoyun Wang 	default:
2253f4ca3fd5SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Just support LACP/ARP for ethertype filters");
2254f4ca3fd5SXiaoyun Wang 		return -EIO;
2255f4ca3fd5SXiaoyun Wang 	}
2256f4ca3fd5SXiaoyun Wang 
2257f4ca3fd5SXiaoyun Wang 	return HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
2258f4ca3fd5SXiaoyun Wang }
2259f4ca3fd5SXiaoyun Wang 
2260f4ca3fd5SXiaoyun Wang static inline int
2261f4ca3fd5SXiaoyun Wang hinic_ethertype_filter_insert(struct hinic_filter_info *filter_info,
2262f4ca3fd5SXiaoyun Wang 			      struct hinic_pkt_filter *ethertype_filter)
2263f4ca3fd5SXiaoyun Wang {
2264f4ca3fd5SXiaoyun Wang 	int id;
2265f4ca3fd5SXiaoyun Wang 
2266f4ca3fd5SXiaoyun Wang 	/* Find LACP or VRRP type id */
2267f4ca3fd5SXiaoyun Wang 	id = hinic_ethertype_filter_lookup(filter_info, ethertype_filter);
2268f4ca3fd5SXiaoyun Wang 	if (id < 0)
2269f4ca3fd5SXiaoyun Wang 		return -EINVAL;
2270f4ca3fd5SXiaoyun Wang 
2271f4ca3fd5SXiaoyun Wang 	if (!(filter_info->type_mask & (1 << id))) {
2272f4ca3fd5SXiaoyun Wang 		filter_info->type_mask |= 1 << id;
2273f4ca3fd5SXiaoyun Wang 		filter_info->pkt_filters[id].pkt_proto =
2274f4ca3fd5SXiaoyun Wang 			ethertype_filter->pkt_proto;
2275f4ca3fd5SXiaoyun Wang 		filter_info->pkt_filters[id].enable = ethertype_filter->enable;
2276f4ca3fd5SXiaoyun Wang 		filter_info->qid = ethertype_filter->qid;
2277f4ca3fd5SXiaoyun Wang 		return id;
2278f4ca3fd5SXiaoyun Wang 	}
2279f4ca3fd5SXiaoyun Wang 
2280f4ca3fd5SXiaoyun Wang 	PMD_DRV_LOG(ERR, "Filter type: %d exists", id);
2281f4ca3fd5SXiaoyun Wang 	return -EINVAL;
2282f4ca3fd5SXiaoyun Wang }
2283f4ca3fd5SXiaoyun Wang 
2284f4ca3fd5SXiaoyun Wang static inline void
2285f4ca3fd5SXiaoyun Wang hinic_ethertype_filter_remove(struct hinic_filter_info *filter_info,
2286f4ca3fd5SXiaoyun Wang 			      uint8_t idx)
2287f4ca3fd5SXiaoyun Wang {
2288f4ca3fd5SXiaoyun Wang 	if (idx >= HINIC_MAX_Q_FILTERS)
2289f4ca3fd5SXiaoyun Wang 		return;
2290f4ca3fd5SXiaoyun Wang 
2291f4ca3fd5SXiaoyun Wang 	filter_info->pkt_type = 0;
2292f4ca3fd5SXiaoyun Wang 	filter_info->type_mask &= ~(1 << idx);
2293f4ca3fd5SXiaoyun Wang 	filter_info->pkt_filters[idx].pkt_proto = (uint16_t)0;
2294f4ca3fd5SXiaoyun Wang 	filter_info->pkt_filters[idx].enable = FALSE;
2295f4ca3fd5SXiaoyun Wang 	filter_info->pkt_filters[idx].qid = 0;
2296f4ca3fd5SXiaoyun Wang }
2297f4ca3fd5SXiaoyun Wang 
2298f4ca3fd5SXiaoyun Wang static inline int
2299f4ca3fd5SXiaoyun Wang hinic_add_del_ethertype_filter(struct rte_eth_dev *dev,
2300f4ca3fd5SXiaoyun Wang 			       struct rte_eth_ethertype_filter *filter,
2301f4ca3fd5SXiaoyun Wang 			       bool add)
2302f4ca3fd5SXiaoyun Wang {
2303f4ca3fd5SXiaoyun Wang 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2304f4ca3fd5SXiaoyun Wang 	struct hinic_filter_info *filter_info =
2305f4ca3fd5SXiaoyun Wang 		HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2306f4ca3fd5SXiaoyun Wang 	struct hinic_pkt_filter ethertype_filter;
2307f4ca3fd5SXiaoyun Wang 	int i;
2308f4ca3fd5SXiaoyun Wang 	int ret_fw;
2309f4ca3fd5SXiaoyun Wang 
2310f4ca3fd5SXiaoyun Wang 	if (hinic_check_ethertype_filter(filter))
2311f4ca3fd5SXiaoyun Wang 		return -EINVAL;
2312f4ca3fd5SXiaoyun Wang 
2313f4ca3fd5SXiaoyun Wang 	if (add) {
2314f4ca3fd5SXiaoyun Wang 		ethertype_filter.pkt_proto = filter->ether_type;
2315f4ca3fd5SXiaoyun Wang 		ethertype_filter.enable = TRUE;
2316f4ca3fd5SXiaoyun Wang 		ethertype_filter.qid = (u8)filter->queue;
2317f4ca3fd5SXiaoyun Wang 		i = hinic_ethertype_filter_insert(filter_info,
2318f4ca3fd5SXiaoyun Wang 						    &ethertype_filter);
2319f4ca3fd5SXiaoyun Wang 		if (i < 0)
2320f4ca3fd5SXiaoyun Wang 			return -ENOSPC;
2321f4ca3fd5SXiaoyun Wang 
2322f4ca3fd5SXiaoyun Wang 		ret_fw = hinic_set_fdir_filter(nic_dev->hwdev,
2323f4ca3fd5SXiaoyun Wang 				filter_info->pkt_type, filter_info->qid,
2324f4ca3fd5SXiaoyun Wang 				filter_info->pkt_filters[i].enable, true);
2325f4ca3fd5SXiaoyun Wang 		if (ret_fw) {
2326f4ca3fd5SXiaoyun Wang 			PMD_DRV_LOG(ERR, "add ethertype failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2327f4ca3fd5SXiaoyun Wang 				filter_info->pkt_type, filter->queue,
2328f4ca3fd5SXiaoyun Wang 				filter_info->pkt_filters[i].enable);
2329f4ca3fd5SXiaoyun Wang 
2330f4ca3fd5SXiaoyun Wang 			hinic_ethertype_filter_remove(filter_info, i);
2331f4ca3fd5SXiaoyun Wang 			return -ENOENT;
2332f4ca3fd5SXiaoyun Wang 		}
2333f4ca3fd5SXiaoyun Wang 		PMD_DRV_LOG(INFO, "Add ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2334f4ca3fd5SXiaoyun Wang 				filter_info->pkt_type, filter->queue,
2335f4ca3fd5SXiaoyun Wang 				filter_info->pkt_filters[i].enable);
2336f4ca3fd5SXiaoyun Wang 
2337f4ca3fd5SXiaoyun Wang 		switch (ethertype_filter.pkt_proto) {
2338f4ca3fd5SXiaoyun Wang 		case RTE_ETHER_TYPE_SLOW:
2339f4ca3fd5SXiaoyun Wang 			ret_fw = hinic_set_lacp_tcam(nic_dev);
2340f4ca3fd5SXiaoyun Wang 			if (ret_fw) {
2341f4ca3fd5SXiaoyun Wang 				PMD_DRV_LOG(ERR, "Add lacp tcam failed");
2342f4ca3fd5SXiaoyun Wang 				hinic_ethertype_filter_remove(filter_info, i);
2343f4ca3fd5SXiaoyun Wang 				return -ENOENT;
2344f4ca3fd5SXiaoyun Wang 			}
2345f4ca3fd5SXiaoyun Wang 
2346f4ca3fd5SXiaoyun Wang 			PMD_DRV_LOG(INFO, "Add lacp tcam succeed");
2347f4ca3fd5SXiaoyun Wang 			break;
2348f4ca3fd5SXiaoyun Wang 		default:
2349f4ca3fd5SXiaoyun Wang 			break;
2350f4ca3fd5SXiaoyun Wang 		}
2351f4ca3fd5SXiaoyun Wang 	} else {
2352f4ca3fd5SXiaoyun Wang 		ethertype_filter.pkt_proto = filter->ether_type;
2353f4ca3fd5SXiaoyun Wang 		i = hinic_ethertype_filter_lookup(filter_info,
2354f4ca3fd5SXiaoyun Wang 						&ethertype_filter);
23552e8fb3d2SYunjian Wang 		if (i < 0)
23562e8fb3d2SYunjian Wang 			return -EINVAL;
2357f4ca3fd5SXiaoyun Wang 
2358f4ca3fd5SXiaoyun Wang 		if ((filter_info->type_mask & (1 << i))) {
2359f4ca3fd5SXiaoyun Wang 			filter_info->pkt_filters[i].enable = FALSE;
2360f4ca3fd5SXiaoyun Wang 			(void)hinic_set_fdir_filter(nic_dev->hwdev,
2361f4ca3fd5SXiaoyun Wang 					filter_info->pkt_type,
2362f4ca3fd5SXiaoyun Wang 					filter_info->pkt_filters[i].qid,
2363f4ca3fd5SXiaoyun Wang 					filter_info->pkt_filters[i].enable,
2364f4ca3fd5SXiaoyun Wang 					true);
2365f4ca3fd5SXiaoyun Wang 
2366f4ca3fd5SXiaoyun Wang 			PMD_DRV_LOG(INFO, "Del ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2367f4ca3fd5SXiaoyun Wang 					filter_info->pkt_type,
2368f4ca3fd5SXiaoyun Wang 					filter_info->pkt_filters[i].qid,
2369f4ca3fd5SXiaoyun Wang 					filter_info->pkt_filters[i].enable);
2370f4ca3fd5SXiaoyun Wang 
2371f4ca3fd5SXiaoyun Wang 			switch (ethertype_filter.pkt_proto) {
2372f4ca3fd5SXiaoyun Wang 			case RTE_ETHER_TYPE_SLOW:
2373f4ca3fd5SXiaoyun Wang 				(void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2374f4ca3fd5SXiaoyun Wang 								TCAM_PKT_LACP);
2375a528b671SXiaoyun Wang 				PMD_DRV_LOG(INFO, "Del lacp tcam succeed");
2376f4ca3fd5SXiaoyun Wang 				break;
2377f4ca3fd5SXiaoyun Wang 			default:
2378f4ca3fd5SXiaoyun Wang 				break;
2379f4ca3fd5SXiaoyun Wang 			}
2380f4ca3fd5SXiaoyun Wang 
2381f4ca3fd5SXiaoyun Wang 			hinic_ethertype_filter_remove(filter_info, i);
2382f4ca3fd5SXiaoyun Wang 
2383f4ca3fd5SXiaoyun Wang 		} else {
2384f4ca3fd5SXiaoyun Wang 			PMD_DRV_LOG(ERR, "Ethertype doesn't exist, type: 0x%x, qid: 0x%x, enable: 0x%x",
2385f4ca3fd5SXiaoyun Wang 					filter_info->pkt_type, filter->queue,
2386f4ca3fd5SXiaoyun Wang 					filter_info->pkt_filters[i].enable);
2387f4ca3fd5SXiaoyun Wang 			return -ENOENT;
2388f4ca3fd5SXiaoyun Wang 		}
2389f4ca3fd5SXiaoyun Wang 	}
2390f4ca3fd5SXiaoyun Wang 
2391f4ca3fd5SXiaoyun Wang 	return 0;
2392f4ca3fd5SXiaoyun Wang }
2393f4ca3fd5SXiaoyun Wang 
23941fe89aa3SXiaoyun Wang static int hinic_fdir_info_init(struct hinic_fdir_rule *rule,
2395f4ca3fd5SXiaoyun Wang 				struct hinic_fdir_info *fdir_info)
2396f4ca3fd5SXiaoyun Wang {
2397f4ca3fd5SXiaoyun Wang 	switch (rule->mask.src_ipv4_mask) {
2398f4ca3fd5SXiaoyun Wang 	case UINT32_MAX:
2399f4ca3fd5SXiaoyun Wang 		fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_SIP;
2400f4ca3fd5SXiaoyun Wang 		fdir_info->qid = rule->queue;
2401f4ca3fd5SXiaoyun Wang 		fdir_info->fdir_key = rule->hinic_fdir.src_ip;
2402f4ca3fd5SXiaoyun Wang 		return 0;
2403f4ca3fd5SXiaoyun Wang 
2404f4ca3fd5SXiaoyun Wang 	case 0:
2405f4ca3fd5SXiaoyun Wang 		break;
2406f4ca3fd5SXiaoyun Wang 
2407f4ca3fd5SXiaoyun Wang 	default:
2408f4ca3fd5SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
2409f4ca3fd5SXiaoyun Wang 		return -EINVAL;
2410f4ca3fd5SXiaoyun Wang 	}
2411f4ca3fd5SXiaoyun Wang 
2412f4ca3fd5SXiaoyun Wang 	switch (rule->mask.dst_ipv4_mask) {
2413f4ca3fd5SXiaoyun Wang 	case UINT32_MAX:
2414f4ca3fd5SXiaoyun Wang 		fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_DIP;
2415f4ca3fd5SXiaoyun Wang 		fdir_info->qid = rule->queue;
2416f4ca3fd5SXiaoyun Wang 		fdir_info->fdir_key = rule->hinic_fdir.dst_ip;
2417f4ca3fd5SXiaoyun Wang 		return 0;
2418f4ca3fd5SXiaoyun Wang 
2419f4ca3fd5SXiaoyun Wang 	case 0:
2420f4ca3fd5SXiaoyun Wang 		break;
2421f4ca3fd5SXiaoyun Wang 
2422f4ca3fd5SXiaoyun Wang 	default:
2423f4ca3fd5SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
2424f4ca3fd5SXiaoyun Wang 		return -EINVAL;
2425f4ca3fd5SXiaoyun Wang 	}
2426f4ca3fd5SXiaoyun Wang 
2427f4ca3fd5SXiaoyun Wang 	if (fdir_info->fdir_flag == 0) {
2428f4ca3fd5SXiaoyun Wang 		PMD_DRV_LOG(ERR, "All support mask is NULL.");
2429f4ca3fd5SXiaoyun Wang 		return -EINVAL;
2430f4ca3fd5SXiaoyun Wang 	}
2431f4ca3fd5SXiaoyun Wang 
2432f4ca3fd5SXiaoyun Wang 	return 0;
2433f4ca3fd5SXiaoyun Wang }
2434f4ca3fd5SXiaoyun Wang 
24351fe89aa3SXiaoyun Wang static inline int hinic_add_del_fdir_filter(struct rte_eth_dev *dev,
24361fe89aa3SXiaoyun Wang 					struct hinic_fdir_rule *rule, bool add)
2437f4ca3fd5SXiaoyun Wang {
2438f4ca3fd5SXiaoyun Wang 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2439f4ca3fd5SXiaoyun Wang 	struct hinic_fdir_info fdir_info;
2440f4ca3fd5SXiaoyun Wang 	int ret;
2441f4ca3fd5SXiaoyun Wang 
2442f4ca3fd5SXiaoyun Wang 	memset(&fdir_info, 0, sizeof(struct hinic_fdir_info));
2443f4ca3fd5SXiaoyun Wang 
2444f4ca3fd5SXiaoyun Wang 	ret = hinic_fdir_info_init(rule, &fdir_info);
2445f4ca3fd5SXiaoyun Wang 	if (ret) {
2446f4ca3fd5SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
2447f4ca3fd5SXiaoyun Wang 		return ret;
2448f4ca3fd5SXiaoyun Wang 	}
2449f4ca3fd5SXiaoyun Wang 
2450f4ca3fd5SXiaoyun Wang 	if (add) {
2451f4ca3fd5SXiaoyun Wang 		ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2452f4ca3fd5SXiaoyun Wang 						true, fdir_info.fdir_key,
2453f4ca3fd5SXiaoyun Wang 						true, fdir_info.fdir_flag);
2454f4ca3fd5SXiaoyun Wang 		if (ret) {
2455f4ca3fd5SXiaoyun Wang 			PMD_DRV_LOG(ERR, "Add fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2456f4ca3fd5SXiaoyun Wang 					fdir_info.fdir_flag, fdir_info.qid,
2457f4ca3fd5SXiaoyun Wang 					fdir_info.fdir_key);
2458f4ca3fd5SXiaoyun Wang 			return -ENOENT;
2459f4ca3fd5SXiaoyun Wang 		}
2460f4ca3fd5SXiaoyun Wang 		PMD_DRV_LOG(INFO, "Add fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2461f4ca3fd5SXiaoyun Wang 				fdir_info.fdir_flag, fdir_info.qid,
2462f4ca3fd5SXiaoyun Wang 				fdir_info.fdir_key);
2463f4ca3fd5SXiaoyun Wang 	} else {
2464f4ca3fd5SXiaoyun Wang 		ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2465f4ca3fd5SXiaoyun Wang 						false, fdir_info.fdir_key, true,
2466f4ca3fd5SXiaoyun Wang 						fdir_info.fdir_flag);
2467f4ca3fd5SXiaoyun Wang 		if (ret) {
24689d441c45SXiaoyun Wang 			PMD_DRV_LOG(ERR, "Del fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2469f4ca3fd5SXiaoyun Wang 				fdir_info.fdir_flag, fdir_info.qid,
2470f4ca3fd5SXiaoyun Wang 				fdir_info.fdir_key);
2471f4ca3fd5SXiaoyun Wang 			return -ENOENT;
2472f4ca3fd5SXiaoyun Wang 		}
2473f4ca3fd5SXiaoyun Wang 		PMD_DRV_LOG(INFO, "Del fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2474f4ca3fd5SXiaoyun Wang 				fdir_info.fdir_flag, fdir_info.qid,
2475f4ca3fd5SXiaoyun Wang 				fdir_info.fdir_key);
2476f4ca3fd5SXiaoyun Wang 	}
2477f4ca3fd5SXiaoyun Wang 
2478f4ca3fd5SXiaoyun Wang 	return 0;
2479f4ca3fd5SXiaoyun Wang }
2480f4ca3fd5SXiaoyun Wang 
24811fe89aa3SXiaoyun Wang static void tcam_translate_key_y(u8 *key_y, u8 *src_input, u8 *mask, u8 len)
24821fe89aa3SXiaoyun Wang {
24831fe89aa3SXiaoyun Wang 	u8 idx;
24841fe89aa3SXiaoyun Wang 
24851fe89aa3SXiaoyun Wang 	for (idx = 0; idx < len; idx++)
24861fe89aa3SXiaoyun Wang 		key_y[idx] = src_input[idx] & mask[idx];
24871fe89aa3SXiaoyun Wang }
24881fe89aa3SXiaoyun Wang 
24891fe89aa3SXiaoyun Wang static void tcam_translate_key_x(u8 *key_x, u8 *key_y, u8 *mask, u8 len)
24901fe89aa3SXiaoyun Wang {
24911fe89aa3SXiaoyun Wang 	u8 idx;
24921fe89aa3SXiaoyun Wang 
24931fe89aa3SXiaoyun Wang 	for (idx = 0; idx < len; idx++)
24941fe89aa3SXiaoyun Wang 		key_x[idx] = key_y[idx] ^ mask[idx];
24951fe89aa3SXiaoyun Wang }
24961fe89aa3SXiaoyun Wang 
24971fe89aa3SXiaoyun Wang static void tcam_key_calculate(struct tag_tcam_key *tcam_key,
24981fe89aa3SXiaoyun Wang 				struct tag_tcam_cfg_rule *fdir_tcam_rule)
24991fe89aa3SXiaoyun Wang {
25001fe89aa3SXiaoyun Wang 	tcam_translate_key_y(fdir_tcam_rule->key.y,
25011fe89aa3SXiaoyun Wang 		(u8 *)(&tcam_key->key_info),
25021fe89aa3SXiaoyun Wang 		(u8 *)(&tcam_key->key_mask),
25031fe89aa3SXiaoyun Wang 		TCAM_FLOW_KEY_SIZE);
25041fe89aa3SXiaoyun Wang 	tcam_translate_key_x(fdir_tcam_rule->key.x,
25051fe89aa3SXiaoyun Wang 		fdir_tcam_rule->key.y,
25061fe89aa3SXiaoyun Wang 		(u8 *)(&tcam_key->key_mask),
25071fe89aa3SXiaoyun Wang 		TCAM_FLOW_KEY_SIZE);
25081fe89aa3SXiaoyun Wang }
25091fe89aa3SXiaoyun Wang 
25109d441c45SXiaoyun Wang static int hinic_fdir_tcam_ipv4_init(struct rte_eth_dev *dev,
25111fe89aa3SXiaoyun Wang 				     struct hinic_fdir_rule *rule,
25129d441c45SXiaoyun Wang 				     struct tag_tcam_key *tcam_key)
25131fe89aa3SXiaoyun Wang {
25141fe89aa3SXiaoyun Wang 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
25151fe89aa3SXiaoyun Wang 
25161fe89aa3SXiaoyun Wang 	switch (rule->mask.dst_ipv4_mask) {
25171fe89aa3SXiaoyun Wang 	case UINT32_MAX:
25181fe89aa3SXiaoyun Wang 		tcam_key->key_info.ext_dip_h =
25191fe89aa3SXiaoyun Wang 			(rule->hinic_fdir.dst_ip >> 16) & 0xffffU;
25201fe89aa3SXiaoyun Wang 		tcam_key->key_info.ext_dip_l =
25211fe89aa3SXiaoyun Wang 			rule->hinic_fdir.dst_ip & 0xffffU;
25221fe89aa3SXiaoyun Wang 		tcam_key->key_mask.ext_dip_h =
25231fe89aa3SXiaoyun Wang 			(rule->mask.dst_ipv4_mask >> 16) & 0xffffU;
25241fe89aa3SXiaoyun Wang 		tcam_key->key_mask.ext_dip_l =
25251fe89aa3SXiaoyun Wang 			rule->mask.dst_ipv4_mask & 0xffffU;
25261fe89aa3SXiaoyun Wang 		break;
25271fe89aa3SXiaoyun Wang 
25281fe89aa3SXiaoyun Wang 	case 0:
25291fe89aa3SXiaoyun Wang 		break;
25301fe89aa3SXiaoyun Wang 
25311fe89aa3SXiaoyun Wang 	default:
25321fe89aa3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "invalid src_ip mask.");
25331fe89aa3SXiaoyun Wang 		return -EINVAL;
25341fe89aa3SXiaoyun Wang 	}
25351fe89aa3SXiaoyun Wang 
25361fe89aa3SXiaoyun Wang 	if (rule->mask.dst_port_mask > 0) {
25371fe89aa3SXiaoyun Wang 		tcam_key->key_info.dst_port = rule->hinic_fdir.dst_port;
25381fe89aa3SXiaoyun Wang 		tcam_key->key_mask.dst_port = rule->mask.dst_port_mask;
25391fe89aa3SXiaoyun Wang 	}
25401fe89aa3SXiaoyun Wang 
25411fe89aa3SXiaoyun Wang 	if (rule->mask.src_port_mask > 0) {
25421fe89aa3SXiaoyun Wang 		tcam_key->key_info.src_port = rule->hinic_fdir.src_port;
25431fe89aa3SXiaoyun Wang 		tcam_key->key_mask.src_port = rule->mask.src_port_mask;
25441fe89aa3SXiaoyun Wang 	}
25451fe89aa3SXiaoyun Wang 
25461fe89aa3SXiaoyun Wang 	switch (rule->mask.tunnel_flag) {
25471fe89aa3SXiaoyun Wang 	case UINT16_MAX:
25481fe89aa3SXiaoyun Wang 		tcam_key->key_info.tunnel_flag = FDIR_TCAM_TUNNEL_PACKET;
25491fe89aa3SXiaoyun Wang 		tcam_key->key_mask.tunnel_flag = UINT8_MAX;
25501fe89aa3SXiaoyun Wang 		break;
25511fe89aa3SXiaoyun Wang 
25521fe89aa3SXiaoyun Wang 	case 0:
25531fe89aa3SXiaoyun Wang 		tcam_key->key_info.tunnel_flag = FDIR_TCAM_NORMAL_PACKET;
25541fe89aa3SXiaoyun Wang 		tcam_key->key_mask.tunnel_flag = 0;
25551fe89aa3SXiaoyun Wang 		break;
25561fe89aa3SXiaoyun Wang 
25571fe89aa3SXiaoyun Wang 	default:
25581fe89aa3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
25591fe89aa3SXiaoyun Wang 		return -EINVAL;
25601fe89aa3SXiaoyun Wang 	}
25611fe89aa3SXiaoyun Wang 
25621fe89aa3SXiaoyun Wang 	if (rule->mask.tunnel_inner_dst_port_mask > 0) {
25631fe89aa3SXiaoyun Wang 		tcam_key->key_info.dst_port =
25641fe89aa3SXiaoyun Wang 					rule->hinic_fdir.tunnel_inner_dst_port;
25651fe89aa3SXiaoyun Wang 		tcam_key->key_mask.dst_port =
25661fe89aa3SXiaoyun Wang 					rule->mask.tunnel_inner_dst_port_mask;
25671fe89aa3SXiaoyun Wang 	}
25681fe89aa3SXiaoyun Wang 
25691fe89aa3SXiaoyun Wang 	if (rule->mask.tunnel_inner_src_port_mask > 0) {
25701fe89aa3SXiaoyun Wang 		tcam_key->key_info.src_port =
25711fe89aa3SXiaoyun Wang 					rule->hinic_fdir.tunnel_inner_src_port;
25721fe89aa3SXiaoyun Wang 		tcam_key->key_mask.src_port =
25731fe89aa3SXiaoyun Wang 					rule->mask.tunnel_inner_src_port_mask;
25741fe89aa3SXiaoyun Wang 	}
25751fe89aa3SXiaoyun Wang 
25761fe89aa3SXiaoyun Wang 	switch (rule->mask.proto_mask) {
25771fe89aa3SXiaoyun Wang 	case UINT16_MAX:
25781fe89aa3SXiaoyun Wang 		tcam_key->key_info.protocol = rule->hinic_fdir.proto;
25791fe89aa3SXiaoyun Wang 		tcam_key->key_mask.protocol = UINT8_MAX;
25801fe89aa3SXiaoyun Wang 		break;
25811fe89aa3SXiaoyun Wang 
25821fe89aa3SXiaoyun Wang 	case 0:
25831fe89aa3SXiaoyun Wang 		break;
25841fe89aa3SXiaoyun Wang 
25851fe89aa3SXiaoyun Wang 	default:
25861fe89aa3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
25871fe89aa3SXiaoyun Wang 		return -EINVAL;
25881fe89aa3SXiaoyun Wang 	}
25891fe89aa3SXiaoyun Wang 
25901fe89aa3SXiaoyun Wang 	tcam_key->key_mask.function_id = UINT16_MAX;
25919d441c45SXiaoyun Wang 	tcam_key->key_info.function_id =
25929d441c45SXiaoyun Wang 		hinic_global_func_id(nic_dev->hwdev) & 0x7fff;
25931fe89aa3SXiaoyun Wang 
25949d441c45SXiaoyun Wang 	return 0;
25959d441c45SXiaoyun Wang }
25969d441c45SXiaoyun Wang 
25979d441c45SXiaoyun Wang static int hinic_fdir_tcam_ipv6_init(struct rte_eth_dev *dev,
25989d441c45SXiaoyun Wang 				     struct hinic_fdir_rule *rule,
25999d441c45SXiaoyun Wang 				     struct tag_tcam_key *tcam_key)
26009d441c45SXiaoyun Wang {
26019d441c45SXiaoyun Wang 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
26029d441c45SXiaoyun Wang 
26039d441c45SXiaoyun Wang 	switch (rule->mask.dst_ipv6_mask) {
26049d441c45SXiaoyun Wang 	case UINT16_MAX:
26059d441c45SXiaoyun Wang 		tcam_key->key_info_ipv6.ipv6_key0 =
26069d441c45SXiaoyun Wang 			((rule->hinic_fdir.dst_ipv6[0] << 8) & 0xff00) |
26079d441c45SXiaoyun Wang 			rule->hinic_fdir.dst_ipv6[1];
26089d441c45SXiaoyun Wang 		tcam_key->key_info_ipv6.ipv6_key1 =
26099d441c45SXiaoyun Wang 			((rule->hinic_fdir.dst_ipv6[2] << 8) & 0xff00) |
26109d441c45SXiaoyun Wang 			rule->hinic_fdir.dst_ipv6[3];
26119d441c45SXiaoyun Wang 		tcam_key->key_info_ipv6.ipv6_key2 =
26129d441c45SXiaoyun Wang 			((rule->hinic_fdir.dst_ipv6[4] << 8) & 0xff00) |
26139d441c45SXiaoyun Wang 			rule->hinic_fdir.dst_ipv6[5];
26149d441c45SXiaoyun Wang 		tcam_key->key_info_ipv6.ipv6_key3 =
26159d441c45SXiaoyun Wang 			((rule->hinic_fdir.dst_ipv6[6] << 8) & 0xff00) |
26169d441c45SXiaoyun Wang 			rule->hinic_fdir.dst_ipv6[7];
26179d441c45SXiaoyun Wang 		tcam_key->key_info_ipv6.ipv6_key4 =
26189d441c45SXiaoyun Wang 			((rule->hinic_fdir.dst_ipv6[8] << 8) & 0xff00) |
26199d441c45SXiaoyun Wang 			rule->hinic_fdir.dst_ipv6[9];
26209d441c45SXiaoyun Wang 		tcam_key->key_info_ipv6.ipv6_key5 =
26219d441c45SXiaoyun Wang 			((rule->hinic_fdir.dst_ipv6[10] << 8) & 0xff00) |
26229d441c45SXiaoyun Wang 			rule->hinic_fdir.dst_ipv6[11];
26239d441c45SXiaoyun Wang 		tcam_key->key_info_ipv6.ipv6_key6 =
26249d441c45SXiaoyun Wang 			((rule->hinic_fdir.dst_ipv6[12] << 8) & 0xff00) |
26259d441c45SXiaoyun Wang 			rule->hinic_fdir.dst_ipv6[13];
26269d441c45SXiaoyun Wang 		tcam_key->key_info_ipv6.ipv6_key7 =
26279d441c45SXiaoyun Wang 			((rule->hinic_fdir.dst_ipv6[14] << 8) & 0xff00) |
26289d441c45SXiaoyun Wang 			rule->hinic_fdir.dst_ipv6[15];
26299d441c45SXiaoyun Wang 		tcam_key->key_mask_ipv6.ipv6_key0 = UINT16_MAX;
26309d441c45SXiaoyun Wang 		tcam_key->key_mask_ipv6.ipv6_key1 = UINT16_MAX;
26319d441c45SXiaoyun Wang 		tcam_key->key_mask_ipv6.ipv6_key2 = UINT16_MAX;
26329d441c45SXiaoyun Wang 		tcam_key->key_mask_ipv6.ipv6_key3 = UINT16_MAX;
26339d441c45SXiaoyun Wang 		tcam_key->key_mask_ipv6.ipv6_key4 = UINT16_MAX;
26349d441c45SXiaoyun Wang 		tcam_key->key_mask_ipv6.ipv6_key5 = UINT16_MAX;
26359d441c45SXiaoyun Wang 		tcam_key->key_mask_ipv6.ipv6_key6 = UINT16_MAX;
26369d441c45SXiaoyun Wang 		tcam_key->key_mask_ipv6.ipv6_key7 = UINT16_MAX;
26379d441c45SXiaoyun Wang 		break;
26389d441c45SXiaoyun Wang 
26399d441c45SXiaoyun Wang 	case 0:
26409d441c45SXiaoyun Wang 		break;
26419d441c45SXiaoyun Wang 
26429d441c45SXiaoyun Wang 	default:
26439d441c45SXiaoyun Wang 		PMD_DRV_LOG(ERR, "invalid dst_ipv6 mask");
26449d441c45SXiaoyun Wang 		return -EINVAL;
26459d441c45SXiaoyun Wang 	}
26469d441c45SXiaoyun Wang 
26479d441c45SXiaoyun Wang 	if (rule->mask.dst_port_mask > 0) {
26489d441c45SXiaoyun Wang 		tcam_key->key_info_ipv6.dst_port = rule->hinic_fdir.dst_port;
26499d441c45SXiaoyun Wang 		tcam_key->key_mask_ipv6.dst_port = rule->mask.dst_port_mask;
26509d441c45SXiaoyun Wang 	}
26519d441c45SXiaoyun Wang 
26529d441c45SXiaoyun Wang 	switch (rule->mask.proto_mask) {
26539d441c45SXiaoyun Wang 	case UINT16_MAX:
26549d441c45SXiaoyun Wang 		tcam_key->key_info_ipv6.protocol =
26559d441c45SXiaoyun Wang 			(rule->hinic_fdir.proto) & 0x7F;
26569d441c45SXiaoyun Wang 		tcam_key->key_mask_ipv6.protocol = 0x7F;
26579d441c45SXiaoyun Wang 		break;
26589d441c45SXiaoyun Wang 
26599d441c45SXiaoyun Wang 	case 0:
26609d441c45SXiaoyun Wang 		break;
26619d441c45SXiaoyun Wang 
26629d441c45SXiaoyun Wang 	default:
26639d441c45SXiaoyun Wang 		PMD_DRV_LOG(ERR, "invalid tunnel flag mask");
26649d441c45SXiaoyun Wang 		return -EINVAL;
26659d441c45SXiaoyun Wang 	}
26669d441c45SXiaoyun Wang 
26679d441c45SXiaoyun Wang 	tcam_key->key_info_ipv6.ipv6_flag = 1;
26689d441c45SXiaoyun Wang 	tcam_key->key_mask_ipv6.ipv6_flag = 1;
26699d441c45SXiaoyun Wang 
26709d441c45SXiaoyun Wang 	tcam_key->key_mask_ipv6.function_id = UINT8_MAX;
26719d441c45SXiaoyun Wang 	tcam_key->key_info_ipv6.function_id =
26729d441c45SXiaoyun Wang 			(u8)hinic_global_func_id(nic_dev->hwdev);
26739d441c45SXiaoyun Wang 
26749d441c45SXiaoyun Wang 	return 0;
26759d441c45SXiaoyun Wang }
26769d441c45SXiaoyun Wang 
26779d441c45SXiaoyun Wang static int hinic_fdir_tcam_info_init(struct rte_eth_dev *dev,
26789d441c45SXiaoyun Wang 				     struct hinic_fdir_rule *rule,
26799d441c45SXiaoyun Wang 				     struct tag_tcam_key *tcam_key,
26809d441c45SXiaoyun Wang 				     struct tag_tcam_cfg_rule *fdir_tcam_rule)
26819d441c45SXiaoyun Wang {
26829d441c45SXiaoyun Wang 	int ret = -1;
26839d441c45SXiaoyun Wang 
26849d441c45SXiaoyun Wang 	if (rule->mask.dst_ipv4_mask == UINT32_MAX)
26859d441c45SXiaoyun Wang 		ret = hinic_fdir_tcam_ipv4_init(dev, rule, tcam_key);
26869d441c45SXiaoyun Wang 	else if (rule->mask.dst_ipv6_mask == UINT16_MAX)
26879d441c45SXiaoyun Wang 		ret = hinic_fdir_tcam_ipv6_init(dev, rule, tcam_key);
26889d441c45SXiaoyun Wang 
26899d441c45SXiaoyun Wang 	if (ret < 0)
26909d441c45SXiaoyun Wang 		return ret;
26911fe89aa3SXiaoyun Wang 
26921fe89aa3SXiaoyun Wang 	fdir_tcam_rule->data.qid = rule->queue;
26931fe89aa3SXiaoyun Wang 
26941fe89aa3SXiaoyun Wang 	tcam_key_calculate(tcam_key, fdir_tcam_rule);
26951fe89aa3SXiaoyun Wang 
26961fe89aa3SXiaoyun Wang 	return 0;
26971fe89aa3SXiaoyun Wang }
26981fe89aa3SXiaoyun Wang 
26991fe89aa3SXiaoyun Wang static inline struct hinic_tcam_filter *
27001fe89aa3SXiaoyun Wang hinic_tcam_filter_lookup(struct hinic_tcam_filter_list *filter_list,
27011fe89aa3SXiaoyun Wang 			struct tag_tcam_key *key)
27021fe89aa3SXiaoyun Wang {
27031fe89aa3SXiaoyun Wang 	struct hinic_tcam_filter *it;
27041fe89aa3SXiaoyun Wang 
27051fe89aa3SXiaoyun Wang 	TAILQ_FOREACH(it, filter_list, entries) {
27061fe89aa3SXiaoyun Wang 		if (memcmp(key, &it->tcam_key,
27071fe89aa3SXiaoyun Wang 			sizeof(struct tag_tcam_key)) == 0) {
27081fe89aa3SXiaoyun Wang 			return it;
27091fe89aa3SXiaoyun Wang 		}
27101fe89aa3SXiaoyun Wang 	}
27111fe89aa3SXiaoyun Wang 
27121fe89aa3SXiaoyun Wang 	return NULL;
27131fe89aa3SXiaoyun Wang }
27141fe89aa3SXiaoyun Wang 
27151fe89aa3SXiaoyun Wang static int hinic_lookup_new_tcam_filter(struct rte_eth_dev *dev,
27161fe89aa3SXiaoyun Wang 					struct hinic_tcam_info *tcam_info,
27171fe89aa3SXiaoyun Wang 					struct hinic_tcam_filter *tcam_filter,
27181fe89aa3SXiaoyun Wang 					u16 *tcam_index)
27191fe89aa3SXiaoyun Wang {
27201fe89aa3SXiaoyun Wang 	int index;
27211fe89aa3SXiaoyun Wang 	int max_index;
27221fe89aa3SXiaoyun Wang 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
27231fe89aa3SXiaoyun Wang 
27241fe89aa3SXiaoyun Wang 	if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
27251fe89aa3SXiaoyun Wang 		max_index = HINIC_VF_MAX_TCAM_FILTERS;
27261fe89aa3SXiaoyun Wang 	else
27271fe89aa3SXiaoyun Wang 		max_index = HINIC_PF_MAX_TCAM_FILTERS;
27281fe89aa3SXiaoyun Wang 
27291fe89aa3SXiaoyun Wang 	for (index = 0; index < max_index; index++) {
27301fe89aa3SXiaoyun Wang 		if (tcam_info->tcam_index_array[index] == 0)
27311fe89aa3SXiaoyun Wang 			break;
27321fe89aa3SXiaoyun Wang 	}
27331fe89aa3SXiaoyun Wang 
27341fe89aa3SXiaoyun Wang 	if (index == max_index) {
27351fe89aa3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "function 0x%x tcam filters only support %d filter rules",
27361fe89aa3SXiaoyun Wang 			hinic_global_func_id(nic_dev->hwdev), max_index);
27371fe89aa3SXiaoyun Wang 		return -EINVAL;
27381fe89aa3SXiaoyun Wang 	}
27391fe89aa3SXiaoyun Wang 
27401fe89aa3SXiaoyun Wang 	tcam_filter->index = index;
27411fe89aa3SXiaoyun Wang 	*tcam_index = index;
27421fe89aa3SXiaoyun Wang 
27431fe89aa3SXiaoyun Wang 	return 0;
27441fe89aa3SXiaoyun Wang }
27451fe89aa3SXiaoyun Wang 
27461fe89aa3SXiaoyun Wang static int hinic_add_tcam_filter(struct rte_eth_dev *dev,
27471fe89aa3SXiaoyun Wang 				struct hinic_tcam_filter *tcam_filter,
27481fe89aa3SXiaoyun Wang 				struct tag_tcam_cfg_rule *fdir_tcam_rule)
27491fe89aa3SXiaoyun Wang {
27501fe89aa3SXiaoyun Wang 	struct hinic_tcam_info *tcam_info =
27511fe89aa3SXiaoyun Wang 		HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
27521fe89aa3SXiaoyun Wang 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
27531fe89aa3SXiaoyun Wang 	u16 index = 0;
27541fe89aa3SXiaoyun Wang 	u16 tcam_block_index = 0;
27551fe89aa3SXiaoyun Wang 	int rc;
27561fe89aa3SXiaoyun Wang 
27571fe89aa3SXiaoyun Wang 	if (hinic_lookup_new_tcam_filter(dev, tcam_info, tcam_filter, &index))
27581fe89aa3SXiaoyun Wang 		return -EINVAL;
27591fe89aa3SXiaoyun Wang 
27601fe89aa3SXiaoyun Wang 	if (tcam_info->tcam_rule_nums == 0) {
27611fe89aa3SXiaoyun Wang 		if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
27621fe89aa3SXiaoyun Wang 			rc = hinic_alloc_tcam_block(nic_dev->hwdev,
27631fe89aa3SXiaoyun Wang 				HINIC_TCAM_BLOCK_TYPE_VF, &tcam_block_index);
27641fe89aa3SXiaoyun Wang 			if (rc != 0) {
27651fe89aa3SXiaoyun Wang 				PMD_DRV_LOG(ERR, "VF fdir filter tcam alloc block failed!");
27661fe89aa3SXiaoyun Wang 				return -EFAULT;
27671fe89aa3SXiaoyun Wang 			}
27681fe89aa3SXiaoyun Wang 		} else {
27691fe89aa3SXiaoyun Wang 			rc = hinic_alloc_tcam_block(nic_dev->hwdev,
27701fe89aa3SXiaoyun Wang 				HINIC_TCAM_BLOCK_TYPE_PF, &tcam_block_index);
27711fe89aa3SXiaoyun Wang 			if (rc != 0) {
27721fe89aa3SXiaoyun Wang 				PMD_DRV_LOG(ERR, "PF fdir filter tcam alloc block failed!");
27731fe89aa3SXiaoyun Wang 				return -EFAULT;
27741fe89aa3SXiaoyun Wang 			}
27751fe89aa3SXiaoyun Wang 		}
27761fe89aa3SXiaoyun Wang 
27771fe89aa3SXiaoyun Wang 		tcam_info->tcam_block_index = tcam_block_index;
27781fe89aa3SXiaoyun Wang 	} else {
27791fe89aa3SXiaoyun Wang 		tcam_block_index = tcam_info->tcam_block_index;
27801fe89aa3SXiaoyun Wang 	}
27811fe89aa3SXiaoyun Wang 
27821fe89aa3SXiaoyun Wang 	if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
27831fe89aa3SXiaoyun Wang 		fdir_tcam_rule->index =
27841fe89aa3SXiaoyun Wang 			HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) + index;
27851fe89aa3SXiaoyun Wang 	} else {
27861fe89aa3SXiaoyun Wang 		fdir_tcam_rule->index =
27871fe89aa3SXiaoyun Wang 			tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS + index;
27881fe89aa3SXiaoyun Wang 	}
27891fe89aa3SXiaoyun Wang 
27901fe89aa3SXiaoyun Wang 	rc = hinic_add_tcam_rule(nic_dev->hwdev, fdir_tcam_rule);
27911fe89aa3SXiaoyun Wang 	if (rc != 0) {
27921fe89aa3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Fdir_tcam_rule add failed!");
27931fe89aa3SXiaoyun Wang 		return -EFAULT;
27941fe89aa3SXiaoyun Wang 	}
27951fe89aa3SXiaoyun Wang 
27961fe89aa3SXiaoyun Wang 	PMD_DRV_LOG(INFO, "Add fdir_tcam_rule function_id: 0x%x,"
27971fe89aa3SXiaoyun Wang 		"tcam_block_id: %d, index: %d, queue: %d, tcam_rule_nums: %d succeed",
27981fe89aa3SXiaoyun Wang 		hinic_global_func_id(nic_dev->hwdev), tcam_block_index,
27991fe89aa3SXiaoyun Wang 		fdir_tcam_rule->index, fdir_tcam_rule->data.qid,
28001fe89aa3SXiaoyun Wang 		tcam_info->tcam_rule_nums + 1);
28011fe89aa3SXiaoyun Wang 
28021fe89aa3SXiaoyun Wang 	if (tcam_info->tcam_rule_nums == 0) {
28031fe89aa3SXiaoyun Wang 		rc = hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, true);
28041fe89aa3SXiaoyun Wang 		if (rc < 0) {
28051fe89aa3SXiaoyun Wang 			(void)hinic_del_tcam_rule(nic_dev->hwdev,
28061fe89aa3SXiaoyun Wang 						fdir_tcam_rule->index);
28071fe89aa3SXiaoyun Wang 			return rc;
28081fe89aa3SXiaoyun Wang 		}
28090023e525SXiaoyun Wang 
28100023e525SXiaoyun Wang 		rc = hinic_set_fdir_tcam_rule_filter(nic_dev->hwdev, true);
28110023e525SXiaoyun Wang 		if (rc && rc != HINIC_MGMT_CMD_UNSUPPORTED) {
281219cc0283SXiaoyun Wang 			/*
281319cc0283SXiaoyun Wang 			 * hinic supports two methods: linear table and tcam
281419cc0283SXiaoyun Wang 			 * table, if tcam filter enables failed but linear table
281519cc0283SXiaoyun Wang 			 * is ok, which also needs to enable filter, so for this
281619cc0283SXiaoyun Wang 			 * scene, driver should not close fdir switch.
281719cc0283SXiaoyun Wang 			 */
28180023e525SXiaoyun Wang 			(void)hinic_del_tcam_rule(nic_dev->hwdev,
28190023e525SXiaoyun Wang 						fdir_tcam_rule->index);
28200023e525SXiaoyun Wang 			return rc;
28210023e525SXiaoyun Wang 		}
28221fe89aa3SXiaoyun Wang 	}
28231fe89aa3SXiaoyun Wang 
28241fe89aa3SXiaoyun Wang 	TAILQ_INSERT_TAIL(&tcam_info->tcam_list, tcam_filter, entries);
28251fe89aa3SXiaoyun Wang 
28261fe89aa3SXiaoyun Wang 	tcam_info->tcam_index_array[index] = 1;
28271fe89aa3SXiaoyun Wang 	tcam_info->tcam_rule_nums++;
28281fe89aa3SXiaoyun Wang 
28291fe89aa3SXiaoyun Wang 	return 0;
28301fe89aa3SXiaoyun Wang }
28311fe89aa3SXiaoyun Wang 
28321fe89aa3SXiaoyun Wang static int hinic_del_tcam_filter(struct rte_eth_dev *dev,
28331fe89aa3SXiaoyun Wang 				struct hinic_tcam_filter *tcam_filter)
28341fe89aa3SXiaoyun Wang {
28351fe89aa3SXiaoyun Wang 	struct hinic_tcam_info *tcam_info =
28361fe89aa3SXiaoyun Wang 		HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
28371fe89aa3SXiaoyun Wang 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
28381fe89aa3SXiaoyun Wang 	u32 index = 0;
28391fe89aa3SXiaoyun Wang 	u16 tcam_block_index = tcam_info->tcam_block_index;
28401fe89aa3SXiaoyun Wang 	int rc;
28411fe89aa3SXiaoyun Wang 	u8 block_type = 0;
28421fe89aa3SXiaoyun Wang 
28431fe89aa3SXiaoyun Wang 	if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
28441fe89aa3SXiaoyun Wang 		index = HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) +
28451fe89aa3SXiaoyun Wang 			tcam_filter->index;
28461fe89aa3SXiaoyun Wang 		block_type = HINIC_TCAM_BLOCK_TYPE_VF;
28471fe89aa3SXiaoyun Wang 	} else {
28481fe89aa3SXiaoyun Wang 		index = tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS +
28491fe89aa3SXiaoyun Wang 			tcam_filter->index;
28501fe89aa3SXiaoyun Wang 		block_type = HINIC_TCAM_BLOCK_TYPE_PF;
28511fe89aa3SXiaoyun Wang 	}
28521fe89aa3SXiaoyun Wang 
28531fe89aa3SXiaoyun Wang 	rc = hinic_del_tcam_rule(nic_dev->hwdev, index);
28541fe89aa3SXiaoyun Wang 	if (rc != 0) {
28551fe89aa3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "fdir_tcam_rule del failed!");
28561fe89aa3SXiaoyun Wang 		return -EFAULT;
28571fe89aa3SXiaoyun Wang 	}
28581fe89aa3SXiaoyun Wang 
28591fe89aa3SXiaoyun Wang 	PMD_DRV_LOG(INFO, "Del fdir_tcam_rule function_id: 0x%x, "
28601fe89aa3SXiaoyun Wang 		"tcam_block_id: %d, index: %d, tcam_rule_nums: %d succeed",
28611fe89aa3SXiaoyun Wang 		hinic_global_func_id(nic_dev->hwdev), tcam_block_index, index,
28621fe89aa3SXiaoyun Wang 		tcam_info->tcam_rule_nums - 1);
28631fe89aa3SXiaoyun Wang 
28641fe89aa3SXiaoyun Wang 	TAILQ_REMOVE(&tcam_info->tcam_list, tcam_filter, entries);
28651fe89aa3SXiaoyun Wang 
28661fe89aa3SXiaoyun Wang 	tcam_info->tcam_index_array[tcam_filter->index] = 0;
28671fe89aa3SXiaoyun Wang 
28681fe89aa3SXiaoyun Wang 	rte_free(tcam_filter);
28691fe89aa3SXiaoyun Wang 
28701fe89aa3SXiaoyun Wang 	tcam_info->tcam_rule_nums--;
28711fe89aa3SXiaoyun Wang 
28721fe89aa3SXiaoyun Wang 	if (tcam_info->tcam_rule_nums == 0) {
28731fe89aa3SXiaoyun Wang 		(void)hinic_free_tcam_block(nic_dev->hwdev, block_type,
28741fe89aa3SXiaoyun Wang 					&tcam_block_index);
28751fe89aa3SXiaoyun Wang 	}
28761fe89aa3SXiaoyun Wang 
28771fe89aa3SXiaoyun Wang 	return 0;
28781fe89aa3SXiaoyun Wang }
28791fe89aa3SXiaoyun Wang 
28801fe89aa3SXiaoyun Wang static int hinic_add_del_tcam_fdir_filter(struct rte_eth_dev *dev,
28811fe89aa3SXiaoyun Wang 					struct hinic_fdir_rule *rule, bool add)
28821fe89aa3SXiaoyun Wang {
28831fe89aa3SXiaoyun Wang 	struct hinic_tcam_info *tcam_info =
28841fe89aa3SXiaoyun Wang 		HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
28851fe89aa3SXiaoyun Wang 	struct hinic_tcam_filter *tcam_filter;
28861fe89aa3SXiaoyun Wang 	struct tag_tcam_cfg_rule fdir_tcam_rule;
28871fe89aa3SXiaoyun Wang 	struct tag_tcam_key tcam_key;
28881fe89aa3SXiaoyun Wang 	int ret;
28891fe89aa3SXiaoyun Wang 
28901fe89aa3SXiaoyun Wang 	memset(&fdir_tcam_rule, 0, sizeof(struct tag_tcam_cfg_rule));
28911fe89aa3SXiaoyun Wang 	memset((void *)&tcam_key, 0, sizeof(struct tag_tcam_key));
28921fe89aa3SXiaoyun Wang 
28931fe89aa3SXiaoyun Wang 	ret = hinic_fdir_tcam_info_init(dev, rule, &tcam_key, &fdir_tcam_rule);
28941fe89aa3SXiaoyun Wang 	if (ret) {
28959d441c45SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
28961fe89aa3SXiaoyun Wang 		return ret;
28971fe89aa3SXiaoyun Wang 	}
28981fe89aa3SXiaoyun Wang 
28991fe89aa3SXiaoyun Wang 	tcam_filter = hinic_tcam_filter_lookup(&tcam_info->tcam_list,
29001fe89aa3SXiaoyun Wang 						&tcam_key);
29011fe89aa3SXiaoyun Wang 	if (tcam_filter != NULL && add) {
29021fe89aa3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Filter exists.");
29031fe89aa3SXiaoyun Wang 		return -EEXIST;
29041fe89aa3SXiaoyun Wang 	}
29051fe89aa3SXiaoyun Wang 	if (tcam_filter == NULL && !add) {
29061fe89aa3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Filter doesn't exist.");
29071fe89aa3SXiaoyun Wang 		return -ENOENT;
29081fe89aa3SXiaoyun Wang 	}
29091fe89aa3SXiaoyun Wang 
29101fe89aa3SXiaoyun Wang 	if (add) {
29119d441c45SXiaoyun Wang 		tcam_filter = rte_zmalloc("hinic_5tuple_filter",
29121fe89aa3SXiaoyun Wang 				sizeof(struct hinic_tcam_filter), 0);
29131fe89aa3SXiaoyun Wang 		if (tcam_filter == NULL)
29141fe89aa3SXiaoyun Wang 			return -ENOMEM;
29151fe89aa3SXiaoyun Wang 		(void)rte_memcpy(&tcam_filter->tcam_key,
29161fe89aa3SXiaoyun Wang 				 &tcam_key, sizeof(struct tag_tcam_key));
29171fe89aa3SXiaoyun Wang 		tcam_filter->queue = fdir_tcam_rule.data.qid;
29181fe89aa3SXiaoyun Wang 
29191fe89aa3SXiaoyun Wang 		ret = hinic_add_tcam_filter(dev, tcam_filter, &fdir_tcam_rule);
29201fe89aa3SXiaoyun Wang 		if (ret < 0) {
29211fe89aa3SXiaoyun Wang 			rte_free(tcam_filter);
29221fe89aa3SXiaoyun Wang 			return ret;
29231fe89aa3SXiaoyun Wang 		}
29241fe89aa3SXiaoyun Wang 
29251fe89aa3SXiaoyun Wang 		rule->tcam_index = fdir_tcam_rule.index;
29261fe89aa3SXiaoyun Wang 
29271fe89aa3SXiaoyun Wang 	} else {
29289d441c45SXiaoyun Wang 		PMD_DRV_LOG(INFO, "begin to hinic_del_tcam_filter");
29291fe89aa3SXiaoyun Wang 		ret = hinic_del_tcam_filter(dev, tcam_filter);
29301fe89aa3SXiaoyun Wang 		if (ret < 0)
29311fe89aa3SXiaoyun Wang 			return ret;
29321fe89aa3SXiaoyun Wang 	}
29331fe89aa3SXiaoyun Wang 
29341fe89aa3SXiaoyun Wang 	return 0;
29351fe89aa3SXiaoyun Wang }
29361fe89aa3SXiaoyun Wang 
2937a3920be3SXiaoyun Wang /**
2938a3920be3SXiaoyun Wang  * Create or destroy a flow rule.
2939a3920be3SXiaoyun Wang  * Theorically one rule can match more than one filters.
2940a3920be3SXiaoyun Wang  * We will let it use the filter which it hitt first.
2941a3920be3SXiaoyun Wang  * So, the sequence matters.
2942a3920be3SXiaoyun Wang  */
2943a3920be3SXiaoyun Wang static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev,
2944a3920be3SXiaoyun Wang 					const struct rte_flow_attr *attr,
2945a3920be3SXiaoyun Wang 					const struct rte_flow_item pattern[],
2946a3920be3SXiaoyun Wang 					const struct rte_flow_action actions[],
2947a3920be3SXiaoyun Wang 					struct rte_flow_error *error)
2948a3920be3SXiaoyun Wang {
2949a3920be3SXiaoyun Wang 	int ret;
2950a3920be3SXiaoyun Wang 	struct rte_eth_ntuple_filter ntuple_filter;
2951f4ca3fd5SXiaoyun Wang 	struct rte_eth_ethertype_filter ethertype_filter;
2952f4ca3fd5SXiaoyun Wang 	struct hinic_fdir_rule fdir_rule;
2953a3920be3SXiaoyun Wang 	struct rte_flow *flow = NULL;
2954f4ca3fd5SXiaoyun Wang 	struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
2955a3920be3SXiaoyun Wang 	struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
2956f4ca3fd5SXiaoyun Wang 	struct hinic_fdir_rule_ele *fdir_rule_ptr;
2957a3920be3SXiaoyun Wang 	struct hinic_flow_mem *hinic_flow_mem_ptr;
2958a3920be3SXiaoyun Wang 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2959a3920be3SXiaoyun Wang 
2960a3920be3SXiaoyun Wang 	flow = rte_zmalloc("hinic_rte_flow", sizeof(struct rte_flow), 0);
2961a3920be3SXiaoyun Wang 	if (!flow) {
2962a3920be3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Failed to allocate flow memory");
2963a3920be3SXiaoyun Wang 		return NULL;
2964a3920be3SXiaoyun Wang 	}
2965a3920be3SXiaoyun Wang 
2966a3920be3SXiaoyun Wang 	hinic_flow_mem_ptr = rte_zmalloc("hinic_flow_mem",
2967a3920be3SXiaoyun Wang 			sizeof(struct hinic_flow_mem), 0);
2968a3920be3SXiaoyun Wang 	if (!hinic_flow_mem_ptr) {
2969a3920be3SXiaoyun Wang 		PMD_DRV_LOG(ERR, "Failed to allocate hinic_flow_mem_ptr");
2970a3920be3SXiaoyun Wang 		rte_free(flow);
2971a3920be3SXiaoyun Wang 		return NULL;
2972a3920be3SXiaoyun Wang 	}
2973a3920be3SXiaoyun Wang 
2974a3920be3SXiaoyun Wang 	hinic_flow_mem_ptr->flow = flow;
2975a3920be3SXiaoyun Wang 	TAILQ_INSERT_TAIL(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
2976a3920be3SXiaoyun Wang 				entries);
2977a3920be3SXiaoyun Wang 
2978f4ca3fd5SXiaoyun Wang 	/* Add ntuple filter */
2979a3920be3SXiaoyun Wang 	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2980a3920be3SXiaoyun Wang 	ret = hinic_parse_ntuple_filter(dev, attr, pattern,
2981a3920be3SXiaoyun Wang 			actions, &ntuple_filter, error);
2982f4ca3fd5SXiaoyun Wang 	if (!ret) {
2983a3920be3SXiaoyun Wang 		ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2984f4ca3fd5SXiaoyun Wang 		if (!ret) {
2985a3920be3SXiaoyun Wang 			ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter",
2986a3920be3SXiaoyun Wang 				sizeof(struct hinic_ntuple_filter_ele), 0);
2987d7964ce1SYunjian Wang 			if (ntuple_filter_ptr == NULL) {
2988d7964ce1SYunjian Wang 				PMD_DRV_LOG(ERR, "Failed to allocate ntuple_filter_ptr");
29890c87a15fSXiaoyun Wang 				(void)hinic_add_del_ntuple_filter(dev,
29900c87a15fSXiaoyun Wang 							&ntuple_filter, FALSE);
2991d7964ce1SYunjian Wang 				goto out;
2992d7964ce1SYunjian Wang 			}
2993a3920be3SXiaoyun Wang 			rte_memcpy(&ntuple_filter_ptr->filter_info,
2994a3920be3SXiaoyun Wang 				   &ntuple_filter,
2995a3920be3SXiaoyun Wang 				   sizeof(struct rte_eth_ntuple_filter));
2996a3920be3SXiaoyun Wang 			TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list,
2997a3920be3SXiaoyun Wang 			ntuple_filter_ptr, entries);
2998a3920be3SXiaoyun Wang 			flow->rule = ntuple_filter_ptr;
2999a3920be3SXiaoyun Wang 			flow->filter_type = RTE_ETH_FILTER_NTUPLE;
3000a3920be3SXiaoyun Wang 
3001a3920be3SXiaoyun Wang 			PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x",
3002a3920be3SXiaoyun Wang 			hinic_global_func_id(nic_dev->hwdev));
3003a3920be3SXiaoyun Wang 			return flow;
3004f4ca3fd5SXiaoyun Wang 		}
3005f4ca3fd5SXiaoyun Wang 		goto out;
3006f4ca3fd5SXiaoyun Wang 	}
3007f4ca3fd5SXiaoyun Wang 
3008f4ca3fd5SXiaoyun Wang 	/* Add ethertype filter */
3009f4ca3fd5SXiaoyun Wang 	memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3010f4ca3fd5SXiaoyun Wang 	ret = hinic_parse_ethertype_filter(dev, attr, pattern, actions,
3011f4ca3fd5SXiaoyun Wang 					&ethertype_filter, error);
3012f4ca3fd5SXiaoyun Wang 	if (!ret) {
3013f4ca3fd5SXiaoyun Wang 		ret = hinic_add_del_ethertype_filter(dev, &ethertype_filter,
3014f4ca3fd5SXiaoyun Wang 						     TRUE);
3015f4ca3fd5SXiaoyun Wang 		if (!ret) {
3016f4ca3fd5SXiaoyun Wang 			ethertype_filter_ptr =
3017f4ca3fd5SXiaoyun Wang 				rte_zmalloc("hinic_ethertype_filter",
3018f4ca3fd5SXiaoyun Wang 				sizeof(struct hinic_ethertype_filter_ele), 0);
3019d7964ce1SYunjian Wang 			if (ethertype_filter_ptr == NULL) {
3020d7964ce1SYunjian Wang 				PMD_DRV_LOG(ERR, "Failed to allocate ethertype_filter_ptr");
30210c87a15fSXiaoyun Wang 				(void)hinic_add_del_ethertype_filter(dev,
30220c87a15fSXiaoyun Wang 						&ethertype_filter, FALSE);
3023d7964ce1SYunjian Wang 				goto out;
3024d7964ce1SYunjian Wang 			}
3025f4ca3fd5SXiaoyun Wang 			rte_memcpy(&ethertype_filter_ptr->filter_info,
3026f4ca3fd5SXiaoyun Wang 				&ethertype_filter,
3027f4ca3fd5SXiaoyun Wang 				sizeof(struct rte_eth_ethertype_filter));
3028f4ca3fd5SXiaoyun Wang 			TAILQ_INSERT_TAIL(&nic_dev->filter_ethertype_list,
3029f4ca3fd5SXiaoyun Wang 				ethertype_filter_ptr, entries);
3030f4ca3fd5SXiaoyun Wang 			flow->rule = ethertype_filter_ptr;
3031f4ca3fd5SXiaoyun Wang 			flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
3032f4ca3fd5SXiaoyun Wang 
3033f4ca3fd5SXiaoyun Wang 			PMD_DRV_LOG(INFO, "Create flow ethertype succeed, func_id: 0x%x",
3034f4ca3fd5SXiaoyun Wang 					hinic_global_func_id(nic_dev->hwdev));
3035f4ca3fd5SXiaoyun Wang 			return flow;
3036f4ca3fd5SXiaoyun Wang 		}
3037f4ca3fd5SXiaoyun Wang 		goto out;
3038f4ca3fd5SXiaoyun Wang 	}
3039f4ca3fd5SXiaoyun Wang 
3040f4ca3fd5SXiaoyun Wang 	/* Add fdir filter */
3041f4ca3fd5SXiaoyun Wang 	memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
3042f4ca3fd5SXiaoyun Wang 	ret = hinic_parse_fdir_filter(dev, attr, pattern,
3043f4ca3fd5SXiaoyun Wang 				      actions, &fdir_rule, error);
3044f4ca3fd5SXiaoyun Wang 	if (!ret) {
30451fe89aa3SXiaoyun Wang 		if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
30460c87a15fSXiaoyun Wang 			ret = hinic_add_del_fdir_filter(dev, &fdir_rule, TRUE);
30471fe89aa3SXiaoyun Wang 		} else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
30480c87a15fSXiaoyun Wang 			ret = hinic_add_del_tcam_fdir_filter(dev, &fdir_rule,
30490c87a15fSXiaoyun Wang 							     TRUE);
30501fe89aa3SXiaoyun Wang 		}  else {
30511fe89aa3SXiaoyun Wang 			PMD_DRV_LOG(INFO, "flow fdir rule create failed, rule mode wrong");
30521fe89aa3SXiaoyun Wang 			goto out;
30531fe89aa3SXiaoyun Wang 		}
3054f4ca3fd5SXiaoyun Wang 		if (!ret) {
3055f4ca3fd5SXiaoyun Wang 			fdir_rule_ptr = rte_zmalloc("hinic_fdir_rule",
3056f4ca3fd5SXiaoyun Wang 				sizeof(struct hinic_fdir_rule_ele), 0);
3057d7964ce1SYunjian Wang 			if (fdir_rule_ptr == NULL) {
3058d7964ce1SYunjian Wang 				PMD_DRV_LOG(ERR, "Failed to allocate fdir_rule_ptr");
30590c87a15fSXiaoyun Wang 				if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL)
30600c87a15fSXiaoyun Wang 					hinic_add_del_fdir_filter(dev,
30610c87a15fSXiaoyun Wang 						&fdir_rule, FALSE);
30620c87a15fSXiaoyun Wang 				else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM)
30630c87a15fSXiaoyun Wang 					hinic_add_del_tcam_fdir_filter(dev,
30640c87a15fSXiaoyun Wang 						&fdir_rule, FALSE);
30650c87a15fSXiaoyun Wang 
3066d7964ce1SYunjian Wang 				goto out;
3067d7964ce1SYunjian Wang 			}
3068f4ca3fd5SXiaoyun Wang 			rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule,
3069f4ca3fd5SXiaoyun Wang 				sizeof(struct hinic_fdir_rule));
3070f4ca3fd5SXiaoyun Wang 			TAILQ_INSERT_TAIL(&nic_dev->filter_fdir_rule_list,
3071f4ca3fd5SXiaoyun Wang 				fdir_rule_ptr, entries);
3072f4ca3fd5SXiaoyun Wang 			flow->rule = fdir_rule_ptr;
3073f4ca3fd5SXiaoyun Wang 			flow->filter_type = RTE_ETH_FILTER_FDIR;
3074f4ca3fd5SXiaoyun Wang 
3075f4ca3fd5SXiaoyun Wang 			PMD_DRV_LOG(INFO, "Create flow fdir rule succeed, func_id : 0x%x",
3076f4ca3fd5SXiaoyun Wang 					hinic_global_func_id(nic_dev->hwdev));
3077f4ca3fd5SXiaoyun Wang 			return flow;
3078f4ca3fd5SXiaoyun Wang 		}
3079f4ca3fd5SXiaoyun Wang 		goto out;
3080f4ca3fd5SXiaoyun Wang 	}
3081a3920be3SXiaoyun Wang 
3082a3920be3SXiaoyun Wang out:
3083a3920be3SXiaoyun Wang 	TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries);
3084a3920be3SXiaoyun Wang 	rte_flow_error_set(error, -ret,
3085a3920be3SXiaoyun Wang 			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3086a3920be3SXiaoyun Wang 			   "Failed to create flow.");
3087a3920be3SXiaoyun Wang 	rte_free(hinic_flow_mem_ptr);
3088a3920be3SXiaoyun Wang 	rte_free(flow);
3089a3920be3SXiaoyun Wang 	return NULL;
3090a3920be3SXiaoyun Wang }
3091a3920be3SXiaoyun Wang 
3092a3920be3SXiaoyun Wang /* Destroy a flow rule on hinic. */
30931fe89aa3SXiaoyun Wang static int hinic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
3094a3920be3SXiaoyun Wang 				struct rte_flow_error *error)
3095a3920be3SXiaoyun Wang {
3096a3920be3SXiaoyun Wang 	int ret;
3097a3920be3SXiaoyun Wang 	struct rte_flow *pmd_flow = flow;
3098a3920be3SXiaoyun Wang 	enum rte_filter_type filter_type = pmd_flow->filter_type;
3099a3920be3SXiaoyun Wang 	struct rte_eth_ntuple_filter ntuple_filter;
3100f4ca3fd5SXiaoyun Wang 	struct rte_eth_ethertype_filter ethertype_filter;
3101f4ca3fd5SXiaoyun Wang 	struct hinic_fdir_rule fdir_rule;
3102a3920be3SXiaoyun Wang 	struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
3103f4ca3fd5SXiaoyun Wang 	struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
3104f4ca3fd5SXiaoyun Wang 	struct hinic_fdir_rule_ele *fdir_rule_ptr;
3105a3920be3SXiaoyun Wang 	struct hinic_flow_mem *hinic_flow_mem_ptr;
3106a3920be3SXiaoyun Wang 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3107a3920be3SXiaoyun Wang 
3108a3920be3SXiaoyun Wang 	switch (filter_type) {
3109a3920be3SXiaoyun Wang 	case RTE_ETH_FILTER_NTUPLE:
3110a3920be3SXiaoyun Wang 		ntuple_filter_ptr = (struct hinic_ntuple_filter_ele *)
3111a3920be3SXiaoyun Wang 					pmd_flow->rule;
3112a3920be3SXiaoyun Wang 		rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info,
3113a3920be3SXiaoyun Wang 			sizeof(struct rte_eth_ntuple_filter));
3114a3920be3SXiaoyun Wang 		ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3115a3920be3SXiaoyun Wang 		if (!ret) {
3116a3920be3SXiaoyun Wang 			TAILQ_REMOVE(&nic_dev->filter_ntuple_list,
3117a3920be3SXiaoyun Wang 				ntuple_filter_ptr, entries);
3118a3920be3SXiaoyun Wang 			rte_free(ntuple_filter_ptr);
3119a3920be3SXiaoyun Wang 		}
3120a3920be3SXiaoyun Wang 		break;
3121f4ca3fd5SXiaoyun Wang 	case RTE_ETH_FILTER_ETHERTYPE:
3122f4ca3fd5SXiaoyun Wang 		ethertype_filter_ptr = (struct hinic_ethertype_filter_ele *)
3123f4ca3fd5SXiaoyun Wang 					pmd_flow->rule;
3124f4ca3fd5SXiaoyun Wang 		rte_memcpy(&ethertype_filter,
3125f4ca3fd5SXiaoyun Wang 			&ethertype_filter_ptr->filter_info,
3126f4ca3fd5SXiaoyun Wang 			sizeof(struct rte_eth_ethertype_filter));
3127f4ca3fd5SXiaoyun Wang 		ret = hinic_add_del_ethertype_filter(dev,
3128f4ca3fd5SXiaoyun Wang 				&ethertype_filter, FALSE);
3129f4ca3fd5SXiaoyun Wang 		if (!ret) {
3130f4ca3fd5SXiaoyun Wang 			TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
3131f4ca3fd5SXiaoyun Wang 				ethertype_filter_ptr, entries);
3132f4ca3fd5SXiaoyun Wang 			rte_free(ethertype_filter_ptr);
3133f4ca3fd5SXiaoyun Wang 		}
3134f4ca3fd5SXiaoyun Wang 		break;
3135f4ca3fd5SXiaoyun Wang 	case RTE_ETH_FILTER_FDIR:
3136f4ca3fd5SXiaoyun Wang 		fdir_rule_ptr = (struct hinic_fdir_rule_ele *)pmd_flow->rule;
3137f4ca3fd5SXiaoyun Wang 		rte_memcpy(&fdir_rule,
3138f4ca3fd5SXiaoyun Wang 			&fdir_rule_ptr->filter_info,
3139f4ca3fd5SXiaoyun Wang 			sizeof(struct hinic_fdir_rule));
31401fe89aa3SXiaoyun Wang 		if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
3141f4ca3fd5SXiaoyun Wang 			ret = hinic_add_del_fdir_filter(dev, &fdir_rule, FALSE);
31421fe89aa3SXiaoyun Wang 		} else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
31431fe89aa3SXiaoyun Wang 			ret = hinic_add_del_tcam_fdir_filter(dev, &fdir_rule,
31441fe89aa3SXiaoyun Wang 								FALSE);
31451fe89aa3SXiaoyun Wang 		} else {
31461fe89aa3SXiaoyun Wang 			PMD_DRV_LOG(ERR, "FDIR Filter type is wrong!");
31471fe89aa3SXiaoyun Wang 			ret = -EINVAL;
31481fe89aa3SXiaoyun Wang 		}
3149f4ca3fd5SXiaoyun Wang 		if (!ret) {
3150f4ca3fd5SXiaoyun Wang 			TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list,
3151f4ca3fd5SXiaoyun Wang 				fdir_rule_ptr, entries);
3152f4ca3fd5SXiaoyun Wang 			rte_free(fdir_rule_ptr);
3153f4ca3fd5SXiaoyun Wang 		}
3154f4ca3fd5SXiaoyun Wang 		break;
3155a3920be3SXiaoyun Wang 	default:
3156a528b671SXiaoyun Wang 		PMD_DRV_LOG(WARNING, "Filter type (%d) is not supported",
3157a3920be3SXiaoyun Wang 			filter_type);
3158a3920be3SXiaoyun Wang 		ret = -EINVAL;
3159a3920be3SXiaoyun Wang 		break;
3160a3920be3SXiaoyun Wang 	}
3161a3920be3SXiaoyun Wang 
3162a3920be3SXiaoyun Wang 	if (ret) {
3163a3920be3SXiaoyun Wang 		rte_flow_error_set(error, EINVAL,
3164a3920be3SXiaoyun Wang 				RTE_FLOW_ERROR_TYPE_HANDLE,
3165a3920be3SXiaoyun Wang 				NULL, "Failed to destroy flow");
3166a3920be3SXiaoyun Wang 		return ret;
3167a3920be3SXiaoyun Wang 	}
3168a3920be3SXiaoyun Wang 
3169a3920be3SXiaoyun Wang 	TAILQ_FOREACH(hinic_flow_mem_ptr, &nic_dev->hinic_flow_list, entries) {
3170a3920be3SXiaoyun Wang 		if (hinic_flow_mem_ptr->flow == pmd_flow) {
3171a3920be3SXiaoyun Wang 			TAILQ_REMOVE(&nic_dev->hinic_flow_list,
3172a3920be3SXiaoyun Wang 				hinic_flow_mem_ptr, entries);
3173a3920be3SXiaoyun Wang 			rte_free(hinic_flow_mem_ptr);
3174a3920be3SXiaoyun Wang 			break;
3175a3920be3SXiaoyun Wang 		}
3176a3920be3SXiaoyun Wang 	}
3177a3920be3SXiaoyun Wang 	rte_free(flow);
3178a3920be3SXiaoyun Wang 
3179a3920be3SXiaoyun Wang 	PMD_DRV_LOG(INFO, "Destroy flow succeed, func_id: 0x%x",
3180a3920be3SXiaoyun Wang 			hinic_global_func_id(nic_dev->hwdev));
3181a3920be3SXiaoyun Wang 
3182a3920be3SXiaoyun Wang 	return ret;
3183a3920be3SXiaoyun Wang }
3184a3920be3SXiaoyun Wang 
31851742421bSXiaoyun Wang /* Remove all the n-tuple filters */
31861742421bSXiaoyun Wang static void hinic_clear_all_ntuple_filter(struct rte_eth_dev *dev)
31871742421bSXiaoyun Wang {
31881742421bSXiaoyun Wang 	struct hinic_filter_info *filter_info =
31891742421bSXiaoyun Wang 		HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
31901742421bSXiaoyun Wang 	struct hinic_5tuple_filter *p_5tuple;
31911742421bSXiaoyun Wang 
31921742421bSXiaoyun Wang 	while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
31931742421bSXiaoyun Wang 		hinic_remove_5tuple_filter(dev, p_5tuple);
31941742421bSXiaoyun Wang }
31951742421bSXiaoyun Wang 
31961742421bSXiaoyun Wang /* Remove all the ether type filters */
31971742421bSXiaoyun Wang static void hinic_clear_all_ethertype_filter(struct rte_eth_dev *dev)
31981742421bSXiaoyun Wang {
31991742421bSXiaoyun Wang 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
32001742421bSXiaoyun Wang 	struct hinic_filter_info *filter_info =
32011742421bSXiaoyun Wang 		HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev);
32021742421bSXiaoyun Wang 	int ret = 0;
32031742421bSXiaoyun Wang 
32041742421bSXiaoyun Wang 	if (filter_info->type_mask &
32051742421bSXiaoyun Wang 		(1 << HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE))) {
32061742421bSXiaoyun Wang 		hinic_ethertype_filter_remove(filter_info,
32071742421bSXiaoyun Wang 			HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE));
32081742421bSXiaoyun Wang 		ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_LACP_TYPE,
32091742421bSXiaoyun Wang 					filter_info->qid, false, true);
32101742421bSXiaoyun Wang 
32111742421bSXiaoyun Wang 		(void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
32121742421bSXiaoyun Wang 	}
32131742421bSXiaoyun Wang 
32141742421bSXiaoyun Wang 	if (filter_info->type_mask &
32151742421bSXiaoyun Wang 		(1 << HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE))) {
32161742421bSXiaoyun Wang 		hinic_ethertype_filter_remove(filter_info,
32171742421bSXiaoyun Wang 			HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE));
32181742421bSXiaoyun Wang 		ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_ARP_TYPE,
32191742421bSXiaoyun Wang 			filter_info->qid, false, true);
32201742421bSXiaoyun Wang 	}
32211742421bSXiaoyun Wang 
32221742421bSXiaoyun Wang 	if (ret)
32231742421bSXiaoyun Wang 		PMD_DRV_LOG(ERR, "Clear ethertype failed, filter type: 0x%x",
32241742421bSXiaoyun Wang 				filter_info->pkt_type);
32251742421bSXiaoyun Wang }
32261742421bSXiaoyun Wang 
32271742421bSXiaoyun Wang /* Remove all the ether type filters */
32281742421bSXiaoyun Wang static void hinic_clear_all_fdir_filter(struct rte_eth_dev *dev)
32291742421bSXiaoyun Wang {
32301742421bSXiaoyun Wang 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
32311fe89aa3SXiaoyun Wang 	struct hinic_tcam_info *tcam_info =
32321fe89aa3SXiaoyun Wang 		HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
32331fe89aa3SXiaoyun Wang 	struct hinic_tcam_filter *tcam_filter_ptr;
32341fe89aa3SXiaoyun Wang 
32351fe89aa3SXiaoyun Wang 	while ((tcam_filter_ptr = TAILQ_FIRST(&tcam_info->tcam_list)))
32361fe89aa3SXiaoyun Wang 		(void)hinic_del_tcam_filter(dev, tcam_filter_ptr);
32371742421bSXiaoyun Wang 
32381742421bSXiaoyun Wang 	(void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
32391fe89aa3SXiaoyun Wang 
32400023e525SXiaoyun Wang 	(void)hinic_set_fdir_tcam_rule_filter(nic_dev->hwdev, false);
32410023e525SXiaoyun Wang 
32421fe89aa3SXiaoyun Wang 	(void)hinic_flush_tcam_rule(nic_dev->hwdev);
32431742421bSXiaoyun Wang }
32441742421bSXiaoyun Wang 
32451742421bSXiaoyun Wang static void hinic_filterlist_flush(struct rte_eth_dev *dev)
32461742421bSXiaoyun Wang {
32471742421bSXiaoyun Wang 	struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
32481742421bSXiaoyun Wang 	struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
32491742421bSXiaoyun Wang 	struct hinic_fdir_rule_ele *fdir_rule_ptr;
32501742421bSXiaoyun Wang 	struct hinic_flow_mem *hinic_flow_mem_ptr;
32511742421bSXiaoyun Wang 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
32521742421bSXiaoyun Wang 
32531742421bSXiaoyun Wang 	while ((ntuple_filter_ptr =
32541742421bSXiaoyun Wang 			TAILQ_FIRST(&nic_dev->filter_ntuple_list))) {
32551742421bSXiaoyun Wang 		TAILQ_REMOVE(&nic_dev->filter_ntuple_list, ntuple_filter_ptr,
32561742421bSXiaoyun Wang 				 entries);
32571742421bSXiaoyun Wang 		rte_free(ntuple_filter_ptr);
32581742421bSXiaoyun Wang 	}
32591742421bSXiaoyun Wang 
32601742421bSXiaoyun Wang 	while ((ethertype_filter_ptr =
32611742421bSXiaoyun Wang 			TAILQ_FIRST(&nic_dev->filter_ethertype_list))) {
32621742421bSXiaoyun Wang 		TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
32631742421bSXiaoyun Wang 				ethertype_filter_ptr,
32641742421bSXiaoyun Wang 				entries);
32651742421bSXiaoyun Wang 		rte_free(ethertype_filter_ptr);
32661742421bSXiaoyun Wang 	}
32671742421bSXiaoyun Wang 
32681742421bSXiaoyun Wang 	while ((fdir_rule_ptr =
32691742421bSXiaoyun Wang 			TAILQ_FIRST(&nic_dev->filter_fdir_rule_list))) {
32701742421bSXiaoyun Wang 		TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list, fdir_rule_ptr,
32711742421bSXiaoyun Wang 				 entries);
32721742421bSXiaoyun Wang 		rte_free(fdir_rule_ptr);
32731742421bSXiaoyun Wang 	}
32741742421bSXiaoyun Wang 
32751742421bSXiaoyun Wang 	while ((hinic_flow_mem_ptr =
32761742421bSXiaoyun Wang 			TAILQ_FIRST(&nic_dev->hinic_flow_list))) {
32771742421bSXiaoyun Wang 		TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
32781742421bSXiaoyun Wang 				 entries);
32791742421bSXiaoyun Wang 		rte_free(hinic_flow_mem_ptr->flow);
32801742421bSXiaoyun Wang 		rte_free(hinic_flow_mem_ptr);
32811742421bSXiaoyun Wang 	}
32821742421bSXiaoyun Wang }
32831742421bSXiaoyun Wang 
32841742421bSXiaoyun Wang /* Destroy all flow rules associated with a port on hinic. */
32851742421bSXiaoyun Wang static int hinic_flow_flush(struct rte_eth_dev *dev,
32861742421bSXiaoyun Wang 				__rte_unused struct rte_flow_error *error)
32871742421bSXiaoyun Wang {
32881742421bSXiaoyun Wang 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
32891742421bSXiaoyun Wang 
32901742421bSXiaoyun Wang 	hinic_clear_all_ntuple_filter(dev);
32911742421bSXiaoyun Wang 	hinic_clear_all_ethertype_filter(dev);
32921742421bSXiaoyun Wang 	hinic_clear_all_fdir_filter(dev);
32931742421bSXiaoyun Wang 	hinic_filterlist_flush(dev);
32941742421bSXiaoyun Wang 
32951742421bSXiaoyun Wang 	PMD_DRV_LOG(INFO, "Flush flow succeed, func_id: 0x%x",
32961742421bSXiaoyun Wang 			hinic_global_func_id(nic_dev->hwdev));
32971742421bSXiaoyun Wang 	return 0;
32981742421bSXiaoyun Wang }
32991742421bSXiaoyun Wang 
33001fe89aa3SXiaoyun Wang void hinic_destroy_fdir_filter(struct rte_eth_dev *dev)
33011fe89aa3SXiaoyun Wang {
33021fe89aa3SXiaoyun Wang 	hinic_clear_all_ntuple_filter(dev);
33031fe89aa3SXiaoyun Wang 	hinic_clear_all_ethertype_filter(dev);
33041fe89aa3SXiaoyun Wang 	hinic_clear_all_fdir_filter(dev);
33051fe89aa3SXiaoyun Wang 	hinic_filterlist_flush(dev);
33061fe89aa3SXiaoyun Wang }
33071fe89aa3SXiaoyun Wang 
330873122b52SXiaoyun Wang const struct rte_flow_ops hinic_flow_ops = {
330973122b52SXiaoyun Wang 	.validate = hinic_flow_validate,
3310a3920be3SXiaoyun Wang 	.create = hinic_flow_create,
3311a3920be3SXiaoyun Wang 	.destroy = hinic_flow_destroy,
33121742421bSXiaoyun Wang 	.flush = hinic_flow_flush,
331373122b52SXiaoyun Wang };
3314