xref: /dpdk/drivers/net/intel/iavf/iavf_fdir.c (revision c1d145834f287aa8cf53de914618a7312f2c360e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17 
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20 #include "virtchnl.h"
21 #include "iavf_rxtx.h"
22 
23 #define IAVF_FDIR_MAX_QREGION_SIZE 128
24 
25 #define IAVF_FDIR_IPV6_TC_OFFSET 20
26 #define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
27 
28 #define IAVF_GTPU_EH_DWLINK 0
29 #define IAVF_GTPU_EH_UPLINK 1
30 
31 #define IAVF_FDIR_INSET_ETH (\
32 	IAVF_INSET_DMAC | IAVF_INSET_SMAC | IAVF_INSET_ETHERTYPE)
33 
34 #define IAVF_FDIR_INSET_ETH_IPV4 (\
35 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
36 	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
37 	IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_ID)
38 
39 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
40 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
41 	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
42 	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
43 
44 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
45 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
46 	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
47 	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
48 
49 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
50 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
51 	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
52 	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
53 
54 #define IAVF_FDIR_INSET_ETH_IPV6 (\
55 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
56 	IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
57 	IAVF_INSET_IPV6_HOP_LIMIT)
58 
59 #define IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT (\
60 	IAVF_FDIR_INSET_ETH_IPV6 | IAVF_INSET_IPV6_ID)
61 
62 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
63 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
64 	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
65 	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
66 
67 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
68 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
69 	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
70 	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
71 
72 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
73 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
74 	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
75 	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
76 
77 #define IAVF_FDIR_INSET_IPV4_GTPU (\
78 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
79 	IAVF_INSET_GTPU_TEID)
80 
81 #define IAVF_FDIR_INSET_GTPU_IPV4 (\
82 	IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
83 	IAVF_INSET_TUN_IPV4_PROTO | IAVF_INSET_TUN_IPV4_TOS | \
84 	IAVF_INSET_TUN_IPV4_TTL)
85 
86 #define IAVF_FDIR_INSET_GTPU_IPV4_UDP (\
87 	IAVF_FDIR_INSET_GTPU_IPV4 | \
88 	IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
89 
90 #define IAVF_FDIR_INSET_GTPU_IPV4_TCP (\
91 	IAVF_FDIR_INSET_GTPU_IPV4 | \
92 	IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
93 
94 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
95 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
96 	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
97 
98 #define IAVF_FDIR_INSET_IPV6_GTPU (\
99 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
100 	IAVF_INSET_GTPU_TEID)
101 
102 #define IAVF_FDIR_INSET_GTPU_IPV6 (\
103 	IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
104 	IAVF_INSET_TUN_IPV6_NEXT_HDR | IAVF_INSET_TUN_IPV6_TC | \
105 	IAVF_INSET_TUN_IPV6_HOP_LIMIT)
106 
107 #define IAVF_FDIR_INSET_GTPU_IPV6_UDP (\
108 	IAVF_FDIR_INSET_GTPU_IPV6 | \
109 	IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
110 
111 #define IAVF_FDIR_INSET_GTPU_IPV6_TCP (\
112 	IAVF_FDIR_INSET_GTPU_IPV6 | \
113 	IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
114 
115 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
116 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
117 	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
118 
119 #define IAVF_FDIR_INSET_L2TPV3OIP (\
120 	IAVF_L2TPV3OIP_SESSION_ID)
121 
122 #define IAVF_FDIR_INSET_IPV4_ESP (\
123 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
124 	IAVF_INSET_ESP_SPI)
125 
126 #define IAVF_FDIR_INSET_IPV6_ESP (\
127 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
128 	IAVF_INSET_ESP_SPI)
129 
130 #define IAVF_FDIR_INSET_AH (\
131 	IAVF_INSET_AH_SPI)
132 
133 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
134 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
135 	IAVF_INSET_ESP_SPI)
136 
137 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
138 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
139 	IAVF_INSET_ESP_SPI)
140 
141 #define IAVF_FDIR_INSET_PFCP (\
142 	IAVF_INSET_PFCP_S_FIELD)
143 
144 #define IAVF_FDIR_INSET_ECPRI (\
145 	IAVF_INSET_ECPRI)
146 
147 #define IAVF_FDIR_INSET_GRE_IPV4 (\
148 	IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
149 	IAVF_INSET_TUN_IPV4_TOS | IAVF_INSET_TUN_IPV4_PROTO)
150 
151 #define IAVF_FDIR_INSET_GRE_IPV4_TCP (\
152 	IAVF_FDIR_INSET_GRE_IPV4 | IAVF_INSET_TUN_TCP_SRC_PORT | \
153 	IAVF_INSET_TUN_TCP_DST_PORT)
154 
155 #define IAVF_FDIR_INSET_GRE_IPV4_UDP (\
156 	IAVF_FDIR_INSET_GRE_IPV4 | IAVF_INSET_TUN_UDP_SRC_PORT | \
157 	IAVF_INSET_TUN_UDP_DST_PORT)
158 
159 #define IAVF_FDIR_INSET_GRE_IPV6 (\
160 	IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
161 	IAVF_INSET_TUN_IPV6_TC | IAVF_INSET_TUN_IPV6_NEXT_HDR)
162 
163 #define IAVF_FDIR_INSET_GRE_IPV6_TCP (\
164 	IAVF_FDIR_INSET_GRE_IPV6 | IAVF_INSET_TUN_TCP_SRC_PORT | \
165 	IAVF_INSET_TUN_TCP_DST_PORT)
166 
167 #define IAVF_FDIR_INSET_GRE_IPV6_UDP (\
168 	IAVF_FDIR_INSET_GRE_IPV6 | IAVF_INSET_TUN_UDP_SRC_PORT | \
169 	IAVF_INSET_TUN_UDP_DST_PORT)
170 
171 #define IAVF_FDIR_INSET_L2TPV2 (\
172 	IAVF_INSET_SMAC | IAVF_INSET_DMAC | IAVF_INSET_L2TPV2)
173 
174 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV4 (\
175 	IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST)
176 
177 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_UDP (\
178 	IAVF_FDIR_INSET_L2TPV2_PPP_IPV4 | IAVF_INSET_TUN_UDP_SRC_PORT | \
179 	IAVF_INSET_TUN_UDP_DST_PORT)
180 
181 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_TCP (\
182 	IAVF_FDIR_INSET_L2TPV2_PPP_IPV4 | IAVF_INSET_TUN_TCP_SRC_PORT | \
183 	IAVF_INSET_TUN_TCP_DST_PORT)
184 
185 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV6 (\
186 	IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST)
187 
188 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_UDP (\
189 	IAVF_FDIR_INSET_L2TPV2_PPP_IPV6 | IAVF_INSET_TUN_UDP_SRC_PORT | \
190 	IAVF_INSET_TUN_UDP_DST_PORT)
191 
192 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_TCP (\
193 	IAVF_FDIR_INSET_L2TPV2_PPP_IPV6 | IAVF_INSET_TUN_TCP_SRC_PORT | \
194 	IAVF_INSET_TUN_TCP_DST_PORT)
195 
196 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
197 	{iavf_pattern_raw,			 IAVF_INSET_NONE,		IAVF_INSET_NONE},
198 	{iavf_pattern_ethertype,		 IAVF_FDIR_INSET_ETH,		IAVF_INSET_NONE},
199 	{iavf_pattern_eth_ipv4,			 IAVF_FDIR_INSET_ETH_IPV4,	IAVF_INSET_NONE},
200 	{iavf_pattern_eth_ipv4_udp,		 IAVF_FDIR_INSET_ETH_IPV4_UDP,	IAVF_INSET_NONE},
201 	{iavf_pattern_eth_ipv4_tcp,		 IAVF_FDIR_INSET_ETH_IPV4_TCP,	IAVF_INSET_NONE},
202 	{iavf_pattern_eth_ipv4_sctp,		 IAVF_FDIR_INSET_ETH_IPV4_SCTP,	IAVF_INSET_NONE},
203 	{iavf_pattern_eth_ipv6,			 IAVF_FDIR_INSET_ETH_IPV6,	IAVF_INSET_NONE},
204 	{iavf_pattern_eth_ipv6_frag_ext,	IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT,	IAVF_INSET_NONE},
205 	{iavf_pattern_eth_ipv6_udp,		 IAVF_FDIR_INSET_ETH_IPV6_UDP,	IAVF_INSET_NONE},
206 	{iavf_pattern_eth_ipv6_tcp,		 IAVF_FDIR_INSET_ETH_IPV6_TCP,	IAVF_INSET_NONE},
207 	{iavf_pattern_eth_ipv6_sctp,		 IAVF_FDIR_INSET_ETH_IPV6_SCTP,	IAVF_INSET_NONE},
208 	{iavf_pattern_eth_ipv4_gtpu,		 IAVF_FDIR_INSET_IPV4_GTPU,	IAVF_INSET_NONE},
209 	{iavf_pattern_eth_ipv4_gtpu_ipv4,	 IAVF_FDIR_INSET_GTPU_IPV4,	IAVF_INSET_NONE},
210 	{iavf_pattern_eth_ipv4_gtpu_ipv4_udp,	 IAVF_FDIR_INSET_GTPU_IPV4_UDP,	IAVF_INSET_NONE},
211 	{iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,	 IAVF_FDIR_INSET_GTPU_IPV4_TCP,	IAVF_INSET_NONE},
212 	{iavf_pattern_eth_ipv4_gtpu_ipv6,	 IAVF_FDIR_INSET_GTPU_IPV6,	IAVF_INSET_NONE},
213 	{iavf_pattern_eth_ipv4_gtpu_ipv6_udp,	 IAVF_FDIR_INSET_GTPU_IPV6_UDP,	IAVF_INSET_NONE},
214 	{iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,	 IAVF_FDIR_INSET_GTPU_IPV6_TCP,	IAVF_INSET_NONE},
215 	{iavf_pattern_eth_ipv4_gtpu_eh,		 IAVF_FDIR_INSET_IPV4_GTPU_EH,	IAVF_INSET_NONE},
216 	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4,	 IAVF_FDIR_INSET_GTPU_IPV4,	IAVF_INSET_NONE},
217 	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP,	IAVF_INSET_NONE},
218 	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP,	IAVF_INSET_NONE},
219 	{iavf_pattern_eth_ipv4_gtpu_eh_ipv6,	 IAVF_FDIR_INSET_GTPU_IPV6,	IAVF_INSET_NONE},
220 	{iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP,	IAVF_INSET_NONE},
221 	{iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP,	IAVF_INSET_NONE},
222 	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu,		 IAVF_FDIR_INSET_IPV4_GTPU,	IAVF_INSET_NONE},
223 	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4,	 IAVF_FDIR_INSET_GTPU_IPV4,	IAVF_INSET_NONE},
224 	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_udp,	 IAVF_FDIR_INSET_GTPU_IPV4_UDP,	IAVF_INSET_NONE},
225 	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_tcp,	 IAVF_FDIR_INSET_GTPU_IPV4_TCP,	IAVF_INSET_NONE},
226 	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6,	 IAVF_FDIR_INSET_GTPU_IPV6,	IAVF_INSET_NONE},
227 	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_udp,	 IAVF_FDIR_INSET_GTPU_IPV6_UDP,	IAVF_INSET_NONE},
228 	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_tcp,	 IAVF_FDIR_INSET_GTPU_IPV6_TCP,	IAVF_INSET_NONE},
229 	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu,		 IAVF_FDIR_INSET_IPV4_GTPU,	IAVF_INSET_NONE},
230 	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4,	 IAVF_FDIR_INSET_GTPU_IPV4,	IAVF_INSET_NONE},
231 	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_udp,	 IAVF_FDIR_INSET_GTPU_IPV4_UDP,	IAVF_INSET_NONE},
232 	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_tcp,	 IAVF_FDIR_INSET_GTPU_IPV4_TCP,	IAVF_INSET_NONE},
233 	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6,	 IAVF_FDIR_INSET_GTPU_IPV6,	IAVF_INSET_NONE},
234 	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_udp,	 IAVF_FDIR_INSET_GTPU_IPV6_UDP,	IAVF_INSET_NONE},
235 	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_tcp,	 IAVF_FDIR_INSET_GTPU_IPV6_TCP,	IAVF_INSET_NONE},
236 	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu,		 IAVF_FDIR_INSET_IPV6_GTPU,	IAVF_INSET_NONE},
237 	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4,	 IAVF_FDIR_INSET_GTPU_IPV4,	IAVF_INSET_NONE},
238 	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_udp,	 IAVF_FDIR_INSET_GTPU_IPV4_UDP,	IAVF_INSET_NONE},
239 	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_tcp,	 IAVF_FDIR_INSET_GTPU_IPV4_TCP,	IAVF_INSET_NONE},
240 	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6,	 IAVF_FDIR_INSET_GTPU_IPV6,	IAVF_INSET_NONE},
241 	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_udp,	 IAVF_FDIR_INSET_GTPU_IPV6_UDP,	IAVF_INSET_NONE},
242 	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_tcp,	 IAVF_FDIR_INSET_GTPU_IPV6_TCP,	IAVF_INSET_NONE},
243 	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu,		 IAVF_FDIR_INSET_IPV6_GTPU,	IAVF_INSET_NONE},
244 	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4,	 IAVF_FDIR_INSET_GTPU_IPV4,	IAVF_INSET_NONE},
245 	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_udp,	 IAVF_FDIR_INSET_GTPU_IPV4_UDP,	IAVF_INSET_NONE},
246 	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_tcp,	 IAVF_FDIR_INSET_GTPU_IPV4_TCP,	IAVF_INSET_NONE},
247 	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6,	 IAVF_FDIR_INSET_GTPU_IPV6,	IAVF_INSET_NONE},
248 	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_udp,	 IAVF_FDIR_INSET_GTPU_IPV6_UDP,	IAVF_INSET_NONE},
249 	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_tcp,	 IAVF_FDIR_INSET_GTPU_IPV6_TCP,	IAVF_INSET_NONE},
250 	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh,		 IAVF_FDIR_INSET_IPV4_GTPU_EH,	IAVF_INSET_NONE},
251 	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4,		 IAVF_FDIR_INSET_GTPU_IPV4,	IAVF_INSET_NONE},
252 	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_udp,	 IAVF_FDIR_INSET_GTPU_IPV4_UDP,	IAVF_INSET_NONE},
253 	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_tcp,	 IAVF_FDIR_INSET_GTPU_IPV4_TCP,	IAVF_INSET_NONE},
254 	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6,		 IAVF_FDIR_INSET_GTPU_IPV6,	IAVF_INSET_NONE},
255 	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_udp,	 IAVF_FDIR_INSET_GTPU_IPV6_UDP,	IAVF_INSET_NONE},
256 	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_tcp,	 IAVF_FDIR_INSET_GTPU_IPV6_TCP,	IAVF_INSET_NONE},
257 	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh,		 IAVF_FDIR_INSET_IPV4_GTPU_EH,	IAVF_INSET_NONE},
258 	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4,		 IAVF_FDIR_INSET_GTPU_IPV4,	IAVF_INSET_NONE},
259 	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_udp,	 IAVF_FDIR_INSET_GTPU_IPV4_UDP,	IAVF_INSET_NONE},
260 	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_tcp,	 IAVF_FDIR_INSET_GTPU_IPV4_TCP,	IAVF_INSET_NONE},
261 	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6,		 IAVF_FDIR_INSET_GTPU_IPV6,	IAVF_INSET_NONE},
262 	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_udp,	 IAVF_FDIR_INSET_GTPU_IPV6_UDP,	IAVF_INSET_NONE},
263 	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_tcp,	 IAVF_FDIR_INSET_GTPU_IPV6_TCP,	IAVF_INSET_NONE},
264 	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh,		 IAVF_FDIR_INSET_IPV6_GTPU_EH,	IAVF_INSET_NONE},
265 	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4,		 IAVF_FDIR_INSET_GTPU_IPV4,	IAVF_INSET_NONE},
266 	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_udp,	 IAVF_FDIR_INSET_GTPU_IPV4_UDP,	IAVF_INSET_NONE},
267 	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_tcp,	 IAVF_FDIR_INSET_GTPU_IPV4_TCP,	IAVF_INSET_NONE},
268 	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6,		 IAVF_FDIR_INSET_GTPU_IPV6,	IAVF_INSET_NONE},
269 	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_udp,	 IAVF_FDIR_INSET_GTPU_IPV6_UDP,	IAVF_INSET_NONE},
270 	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_tcp,	 IAVF_FDIR_INSET_GTPU_IPV6_TCP,	IAVF_INSET_NONE},
271 	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh,		 IAVF_FDIR_INSET_IPV6_GTPU_EH,	IAVF_INSET_NONE},
272 	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4,		 IAVF_FDIR_INSET_GTPU_IPV4,	IAVF_INSET_NONE},
273 	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_udp,	 IAVF_FDIR_INSET_GTPU_IPV4_UDP,	IAVF_INSET_NONE},
274 	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_tcp,	 IAVF_FDIR_INSET_GTPU_IPV4_TCP,	IAVF_INSET_NONE},
275 	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6,		 IAVF_FDIR_INSET_GTPU_IPV6,	IAVF_INSET_NONE},
276 	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_udp,	 IAVF_FDIR_INSET_GTPU_IPV6_UDP,	IAVF_INSET_NONE},
277 	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_tcp,	 IAVF_FDIR_INSET_GTPU_IPV6_TCP,	IAVF_INSET_NONE},
278 	{iavf_pattern_eth_ipv6_gtpu,		 IAVF_FDIR_INSET_IPV6_GTPU,	IAVF_INSET_NONE},
279 	{iavf_pattern_eth_ipv6_gtpu_eh,		 IAVF_FDIR_INSET_IPV6_GTPU_EH,	IAVF_INSET_NONE},
280 	{iavf_pattern_eth_ipv4_l2tpv3,		 IAVF_FDIR_INSET_L2TPV3OIP,	IAVF_INSET_NONE},
281 	{iavf_pattern_eth_ipv6_l2tpv3,		 IAVF_FDIR_INSET_L2TPV3OIP,	IAVF_INSET_NONE},
282 	{iavf_pattern_eth_ipv4_esp,		 IAVF_FDIR_INSET_IPV4_ESP,	IAVF_INSET_NONE},
283 	{iavf_pattern_eth_ipv6_esp,		 IAVF_FDIR_INSET_IPV6_ESP,	IAVF_INSET_NONE},
284 	{iavf_pattern_eth_ipv4_ah,		 IAVF_FDIR_INSET_AH,		IAVF_INSET_NONE},
285 	{iavf_pattern_eth_ipv6_ah,		 IAVF_FDIR_INSET_AH,		IAVF_INSET_NONE},
286 	{iavf_pattern_eth_ipv4_udp_esp,		 IAVF_FDIR_INSET_IPV4_NATT_ESP,	IAVF_INSET_NONE},
287 	{iavf_pattern_eth_ipv6_udp_esp,		 IAVF_FDIR_INSET_IPV6_NATT_ESP,	IAVF_INSET_NONE},
288 	{iavf_pattern_eth_ipv4_pfcp,		 IAVF_FDIR_INSET_PFCP,		IAVF_INSET_NONE},
289 	{iavf_pattern_eth_ipv6_pfcp,		 IAVF_FDIR_INSET_PFCP,		IAVF_INSET_NONE},
290 	{iavf_pattern_eth_ecpri,		 IAVF_FDIR_INSET_ECPRI,		IAVF_INSET_NONE},
291 	{iavf_pattern_eth_ipv4_ecpri,		 IAVF_FDIR_INSET_ECPRI,		IAVF_INSET_NONE},
292 	{iavf_pattern_eth_ipv4_gre_ipv4,	IAVF_FDIR_INSET_GRE_IPV4,	IAVF_INSET_NONE},
293 	{iavf_pattern_eth_ipv4_gre_ipv4_tcp,	IAVF_FDIR_INSET_GRE_IPV4_TCP,	IAVF_INSET_NONE},
294 	{iavf_pattern_eth_ipv4_gre_ipv4_udp,	IAVF_FDIR_INSET_GRE_IPV4_UDP,	IAVF_INSET_NONE},
295 	{iavf_pattern_eth_ipv4_gre_ipv6,	IAVF_FDIR_INSET_GRE_IPV6,	IAVF_INSET_NONE},
296 	{iavf_pattern_eth_ipv4_gre_ipv6_tcp,	IAVF_FDIR_INSET_GRE_IPV6_TCP,	IAVF_INSET_NONE},
297 	{iavf_pattern_eth_ipv4_gre_ipv6_udp,	IAVF_FDIR_INSET_GRE_IPV6_UDP,	IAVF_INSET_NONE},
298 	{iavf_pattern_eth_ipv6_gre_ipv4,	IAVF_FDIR_INSET_GRE_IPV4,	IAVF_INSET_NONE},
299 	{iavf_pattern_eth_ipv6_gre_ipv4_tcp,	IAVF_FDIR_INSET_GRE_IPV4_TCP,	IAVF_INSET_NONE},
300 	{iavf_pattern_eth_ipv6_gre_ipv4_udp,	IAVF_FDIR_INSET_GRE_IPV4_UDP,	IAVF_INSET_NONE},
301 	{iavf_pattern_eth_ipv6_gre_ipv6,	IAVF_FDIR_INSET_GRE_IPV6,	IAVF_INSET_NONE},
302 	{iavf_pattern_eth_ipv6_gre_ipv6_tcp,	IAVF_FDIR_INSET_GRE_IPV6_TCP,	IAVF_INSET_NONE},
303 	{iavf_pattern_eth_ipv6_gre_ipv6_udp,	IAVF_FDIR_INSET_GRE_IPV6_UDP,	IAVF_INSET_NONE},
304 
305 	{iavf_pattern_eth_ipv4_udp_l2tpv2,		IAVF_FDIR_INSET_L2TPV2,			IAVF_INSET_NONE},
306 	{iavf_pattern_eth_ipv4_udp_l2tpv2_ppp,		IAVF_FDIR_INSET_L2TPV2,			IAVF_INSET_NONE},
307 	{iavf_pattern_eth_ipv6_udp_l2tpv2,		IAVF_FDIR_INSET_L2TPV2,			IAVF_INSET_NONE},
308 	{iavf_pattern_eth_ipv6_udp_l2tpv2_ppp,		IAVF_FDIR_INSET_L2TPV2,			IAVF_INSET_NONE},
309 	{iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv4,	IAVF_FDIR_INSET_L2TPV2_PPP_IPV4,	IAVF_INSET_NONE},
310 	{iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv4_udp,	IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_UDP,	IAVF_INSET_NONE},
311 	{iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv4_tcp,	IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_TCP,	IAVF_INSET_NONE},
312 	{iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv4,	IAVF_FDIR_INSET_L2TPV2_PPP_IPV4,	IAVF_INSET_NONE},
313 	{iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv4_udp,	IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_UDP,	IAVF_INSET_NONE},
314 	{iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv4_tcp,	IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_TCP,	IAVF_INSET_NONE},
315 	{iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv6,	IAVF_FDIR_INSET_L2TPV2_PPP_IPV6,	IAVF_INSET_NONE},
316 	{iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv6_udp,	IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_UDP,	IAVF_INSET_NONE},
317 	{iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv6_tcp,	IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_TCP,	IAVF_INSET_NONE},
318 	{iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv6,	IAVF_FDIR_INSET_L2TPV2_PPP_IPV6,	IAVF_INSET_NONE},
319 	{iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv6_udp,	IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_UDP,	IAVF_INSET_NONE},
320 	{iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv6_tcp,	IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_TCP,	IAVF_INSET_NONE},
321 };
322 
323 static struct iavf_flow_parser iavf_fdir_parser;
324 
325 static int
326 iavf_fdir_init(struct iavf_adapter *ad)
327 {
328 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
329 	struct iavf_flow_parser *parser;
330 
331 	if (!vf->vf_res)
332 		return -EINVAL;
333 
334 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
335 		parser = &iavf_fdir_parser;
336 	else
337 		return -ENOTSUP;
338 
339 	return iavf_register_parser(parser, ad);
340 }
341 
342 static void
343 iavf_fdir_uninit(struct iavf_adapter *ad)
344 {
345 	iavf_unregister_parser(&iavf_fdir_parser, ad);
346 }
347 
348 static int
349 iavf_fdir_create(struct iavf_adapter *ad,
350 		struct rte_flow *flow,
351 		void *meta,
352 		struct rte_flow_error *error)
353 {
354 	struct iavf_fdir_conf *filter = meta;
355 	struct iavf_fdir_conf *rule;
356 	int ret;
357 
358 	rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
359 	if (!rule) {
360 		rte_flow_error_set(error, ENOMEM,
361 				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
362 				"Failed to allocate memory for fdir rule");
363 		return -rte_errno;
364 	}
365 
366 	ret = iavf_fdir_add(ad, filter);
367 	if (ret) {
368 		rte_flow_error_set(error, -ret,
369 				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
370 				"Failed to add filter rule.");
371 		goto free_entry;
372 	}
373 
374 	if (filter->mark_flag == 1)
375 		iavf_fdir_rx_proc_enable(ad, 1);
376 
377 	rte_memcpy(rule, filter, sizeof(*rule));
378 	flow->rule = rule;
379 
380 	return 0;
381 
382 free_entry:
383 	rte_free(rule);
384 	return -rte_errno;
385 }
386 
387 static int
388 iavf_fdir_destroy(struct iavf_adapter *ad,
389 		struct rte_flow *flow,
390 		struct rte_flow_error *error)
391 {
392 	struct iavf_fdir_conf *filter;
393 	int ret;
394 
395 	filter = (struct iavf_fdir_conf *)flow->rule;
396 
397 	ret = iavf_fdir_del(ad, filter);
398 	if (ret) {
399 		rte_flow_error_set(error, -ret,
400 				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
401 				"Failed to delete filter rule.");
402 		return -rte_errno;
403 	}
404 
405 	if (filter->mark_flag == 1)
406 		iavf_fdir_rx_proc_enable(ad, 0);
407 
408 	flow->rule = NULL;
409 	rte_free(filter);
410 
411 	return 0;
412 }
413 
414 static int
415 iavf_fdir_validation(struct iavf_adapter *ad,
416 		__rte_unused struct rte_flow *flow,
417 		void *meta,
418 		struct rte_flow_error *error)
419 {
420 	struct iavf_fdir_conf *filter = meta;
421 	int ret;
422 
423 	ret = iavf_fdir_check(ad, filter);
424 	if (ret) {
425 		rte_flow_error_set(error, -ret,
426 				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
427 				"Failed to validate filter rule.");
428 		return -rte_errno;
429 	}
430 
431 	return 0;
432 };
433 
434 static struct iavf_flow_engine iavf_fdir_engine = {
435 	.init = iavf_fdir_init,
436 	.uninit = iavf_fdir_uninit,
437 	.create = iavf_fdir_create,
438 	.destroy = iavf_fdir_destroy,
439 	.validation = iavf_fdir_validation,
440 	.type = IAVF_FLOW_ENGINE_FDIR,
441 };
442 
443 static int
444 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
445 			struct rte_flow_error *error,
446 			const struct rte_flow_action *act,
447 			struct virtchnl_filter_action *filter_action)
448 {
449 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
450 	const struct rte_flow_action_rss *rss = act->conf;
451 	uint32_t i;
452 
453 	if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
454 		rte_flow_error_set(error, EINVAL,
455 				RTE_FLOW_ERROR_TYPE_ACTION, act,
456 				"Invalid action.");
457 		return -rte_errno;
458 	}
459 
460 	if (rss->queue_num <= 1) {
461 		rte_flow_error_set(error, EINVAL,
462 				RTE_FLOW_ERROR_TYPE_ACTION, act,
463 				"Queue region size can't be 0 or 1.");
464 		return -rte_errno;
465 	}
466 
467 	/* check if queue index for queue region is continuous */
468 	for (i = 0; i < rss->queue_num - 1; i++) {
469 		if (rss->queue[i + 1] != rss->queue[i] + 1) {
470 			rte_flow_error_set(error, EINVAL,
471 					RTE_FLOW_ERROR_TYPE_ACTION, act,
472 					"Discontinuous queue region");
473 			return -rte_errno;
474 		}
475 	}
476 
477 	if (rss->queue[rss->queue_num - 1] >= ad->dev_data->nb_rx_queues) {
478 		rte_flow_error_set(error, EINVAL,
479 				RTE_FLOW_ERROR_TYPE_ACTION, act,
480 				"Invalid queue region indexes.");
481 		return -rte_errno;
482 	}
483 
484 	if (!(rte_is_power_of_2(rss->queue_num) &&
485 		rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
486 		rte_flow_error_set(error, EINVAL,
487 				RTE_FLOW_ERROR_TYPE_ACTION, act,
488 				"The region size should be any of the following values:"
489 				"1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
490 				"of queues do not exceed the VSI allocation.");
491 		return -rte_errno;
492 	}
493 
494 	if (rss->queue_num > vf->max_rss_qregion) {
495 		rte_flow_error_set(error, EINVAL,
496 				RTE_FLOW_ERROR_TYPE_ACTION, act,
497 				"The region size cannot be large than the supported max RSS queue region");
498 		return -rte_errno;
499 	}
500 
501 	filter_action->act_conf.queue.index = rss->queue[0];
502 	filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
503 
504 	return 0;
505 }
506 
507 static int
508 iavf_fdir_parse_action(struct iavf_adapter *ad,
509 			const struct rte_flow_action actions[],
510 			struct rte_flow_error *error,
511 			struct iavf_fdir_conf *filter)
512 {
513 	const struct rte_flow_action_queue *act_q;
514 	const struct rte_flow_action_mark *mark_spec = NULL;
515 	uint32_t dest_num = 0;
516 	uint32_t mark_num = 0;
517 	int ret;
518 
519 	int number = 0;
520 	struct virtchnl_filter_action *filter_action;
521 
522 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
523 		switch (actions->type) {
524 		case RTE_FLOW_ACTION_TYPE_VOID:
525 			break;
526 
527 		case RTE_FLOW_ACTION_TYPE_PASSTHRU:
528 			dest_num++;
529 
530 			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
531 
532 			filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
533 
534 			filter->add_fltr.rule_cfg.action_set.count = ++number;
535 			break;
536 
537 		case RTE_FLOW_ACTION_TYPE_DROP:
538 			dest_num++;
539 
540 			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
541 
542 			filter_action->type = VIRTCHNL_ACTION_DROP;
543 
544 			filter->add_fltr.rule_cfg.action_set.count = ++number;
545 			break;
546 
547 		case RTE_FLOW_ACTION_TYPE_QUEUE:
548 			dest_num++;
549 
550 			act_q = actions->conf;
551 			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
552 
553 			filter_action->type = VIRTCHNL_ACTION_QUEUE;
554 			filter_action->act_conf.queue.index = act_q->index;
555 
556 			if (filter_action->act_conf.queue.index >=
557 				ad->dev_data->nb_rx_queues) {
558 				rte_flow_error_set(error, EINVAL,
559 					RTE_FLOW_ERROR_TYPE_ACTION,
560 					actions, "Invalid queue for FDIR.");
561 				return -rte_errno;
562 			}
563 
564 			filter->add_fltr.rule_cfg.action_set.count = ++number;
565 			break;
566 
567 		case RTE_FLOW_ACTION_TYPE_RSS:
568 			dest_num++;
569 
570 			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
571 
572 			filter_action->type = VIRTCHNL_ACTION_Q_REGION;
573 
574 			ret = iavf_fdir_parse_action_qregion(ad,
575 						error, actions, filter_action);
576 			if (ret)
577 				return ret;
578 
579 			filter->add_fltr.rule_cfg.action_set.count = ++number;
580 			break;
581 
582 		case RTE_FLOW_ACTION_TYPE_MARK:
583 			mark_num++;
584 
585 			filter->mark_flag = 1;
586 			mark_spec = actions->conf;
587 			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
588 
589 			filter_action->type = VIRTCHNL_ACTION_MARK;
590 			filter_action->act_conf.mark_id = mark_spec->id;
591 
592 			filter->add_fltr.rule_cfg.action_set.count = ++number;
593 			break;
594 
595 		default:
596 			rte_flow_error_set(error, EINVAL,
597 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
598 					"Invalid action.");
599 			return -rte_errno;
600 		}
601 	}
602 
603 	if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
604 		rte_flow_error_set(error, EINVAL,
605 			RTE_FLOW_ERROR_TYPE_ACTION, actions,
606 			"Action numbers exceed the maximum value");
607 		return -rte_errno;
608 	}
609 
610 	if (dest_num >= 2) {
611 		rte_flow_error_set(error, EINVAL,
612 			RTE_FLOW_ERROR_TYPE_ACTION, actions,
613 			"Unsupported action combination");
614 		return -rte_errno;
615 	}
616 
617 	if (mark_num >= 2) {
618 		rte_flow_error_set(error, EINVAL,
619 			RTE_FLOW_ERROR_TYPE_ACTION, actions,
620 			"Too many mark actions");
621 		return -rte_errno;
622 	}
623 
624 	if (dest_num + mark_num == 0) {
625 		rte_flow_error_set(error, EINVAL,
626 			RTE_FLOW_ERROR_TYPE_ACTION, actions,
627 			"Empty action");
628 		return -rte_errno;
629 	}
630 
631 	/* Mark only is equal to mark + passthru. */
632 	if (dest_num == 0) {
633 		filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
634 		filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
635 		filter->add_fltr.rule_cfg.action_set.count = ++number;
636 	}
637 
638 	return 0;
639 }
640 
641 static bool
642 iavf_fdir_refine_input_set(const uint64_t input_set,
643 			   const uint64_t input_set_mask,
644 			   struct iavf_fdir_conf *filter)
645 {
646 	struct virtchnl_proto_hdr *hdr, *hdr_last;
647 	struct rte_flow_item_ipv4 ipv4_spec;
648 	struct rte_flow_item_ipv6 ipv6_spec;
649 	int last_layer;
650 	uint8_t proto_id;
651 
652 	if (input_set & ~input_set_mask)
653 		return false;
654 	else if (input_set)
655 		return true;
656 
657 	last_layer = filter->add_fltr.rule_cfg.proto_hdrs.count - 1;
658 	/* Last layer of TCP/UDP pattern isn't less than 2. */
659 	if (last_layer < 2)
660 		return false;
661 	hdr_last = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer];
662 	if (hdr_last->type == VIRTCHNL_PROTO_HDR_TCP)
663 		proto_id = 6;
664 	else if (hdr_last->type == VIRTCHNL_PROTO_HDR_UDP)
665 		proto_id = 17;
666 	else
667 		return false;
668 
669 	hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer - 1];
670 	switch (hdr->type) {
671 	case VIRTCHNL_PROTO_HDR_IPV4:
672 		VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
673 		memset(&ipv4_spec, 0, sizeof(ipv4_spec));
674 		ipv4_spec.hdr.next_proto_id = proto_id;
675 		rte_memcpy(hdr->buffer, &ipv4_spec.hdr,
676 			   sizeof(ipv4_spec.hdr));
677 		return true;
678 	case VIRTCHNL_PROTO_HDR_IPV6:
679 		VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
680 		memset(&ipv6_spec, 0, sizeof(ipv6_spec));
681 		ipv6_spec.hdr.proto = proto_id;
682 		rte_memcpy(hdr->buffer, &ipv6_spec.hdr,
683 			   sizeof(ipv6_spec.hdr));
684 		return true;
685 	default:
686 		return false;
687 	}
688 }
689 
690 static void
691 iavf_fdir_add_fragment_hdr(struct virtchnl_proto_hdrs *hdrs, int layer)
692 {
693 	struct virtchnl_proto_hdr *hdr1;
694 	struct virtchnl_proto_hdr *hdr2;
695 	int i;
696 
697 	if (layer < 0 || layer > hdrs->count)
698 		return;
699 
700 	/* shift headers layer */
701 	for (i = hdrs->count; i >= layer; i--) {
702 		hdr1 = &hdrs->proto_hdr[i];
703 		hdr2 = &hdrs->proto_hdr[i - 1];
704 		*hdr1 = *hdr2;
705 	}
706 
707 	/* adding dummy fragment header */
708 	hdr1 = &hdrs->proto_hdr[layer];
709 	VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, IPV4_FRAG);
710 	hdr1->field_selector = 0;
711 	hdrs->count = ++layer;
712 }
713 
714 static int
715 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
716 			const struct rte_flow_item pattern[],
717 			const uint64_t input_set_mask,
718 			struct rte_flow_error *error,
719 			struct iavf_fdir_conf *filter)
720 {
721 	struct virtchnl_proto_hdrs *hdrs =
722 			&filter->add_fltr.rule_cfg.proto_hdrs;
723 	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
724 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
725 	const struct rte_flow_item_eth *eth_spec, *eth_mask;
726 	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
727 	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
728 	const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec;
729 	const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_mask;
730 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
731 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
732 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
733 	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
734 	const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
735 	const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
736 	const struct rte_flow_item_esp *esp_spec, *esp_mask;
737 	const struct rte_flow_item_ah *ah_spec, *ah_mask;
738 	const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
739 	const struct rte_flow_item_ecpri *ecpri_spec, *ecpri_mask;
740 	const struct rte_flow_item_gre *gre_spec, *gre_mask;
741 	const struct rte_flow_item_l2tpv2 *l2tpv2_spec, *l2tpv2_mask;
742 	const struct rte_flow_item_ppp *ppp_spec, *ppp_mask;
743 	const struct rte_flow_item *item = pattern;
744 	struct virtchnl_proto_hdr *hdr, *hdr1 = NULL;
745 	struct rte_ecpri_common_hdr ecpri_common;
746 	uint64_t input_set = IAVF_INSET_NONE;
747 	enum rte_flow_item_type item_type;
748 	enum rte_flow_item_type next_type;
749 	uint8_t tun_inner = 0;
750 	uint16_t ether_type, flags_version;
751 	uint8_t item_num = 0;
752 	int layer = 0;
753 
754 	uint8_t  ipv6_addr_mask[16] = {
755 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
756 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
757 	};
758 
759 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
760 		item_type = item->type;
761 
762 		if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
763 				    item_type ==
764 				    RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
765 			rte_flow_error_set(error, EINVAL,
766 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
767 					   "Not support range");
768 		}
769 		item_num++;
770 
771 		switch (item_type) {
772 		case RTE_FLOW_ITEM_TYPE_RAW: {
773 			raw_spec = item->spec;
774 			raw_mask = item->mask;
775 
776 			if (item_num != 1)
777 				return -rte_errno;
778 
779 			if (raw_spec->length != raw_mask->length)
780 				return -rte_errno;
781 
782 			uint16_t pkt_len = 0;
783 			uint16_t tmp_val = 0;
784 			uint8_t tmp = 0;
785 			int i, j;
786 
787 			pkt_len = raw_spec->length;
788 
789 			for (i = 0, j = 0; i < pkt_len; i += 2, j++) {
790 				tmp = raw_spec->pattern[i];
791 				if (tmp >= 'a' && tmp <= 'f')
792 					tmp_val = tmp - 'a' + 10;
793 				if (tmp >= 'A' && tmp <= 'F')
794 					tmp_val = tmp - 'A' + 10;
795 				if (tmp >= '0' && tmp <= '9')
796 					tmp_val = tmp - '0';
797 
798 				tmp_val *= 16;
799 				tmp = raw_spec->pattern[i + 1];
800 				if (tmp >= 'a' && tmp <= 'f')
801 					tmp_val += (tmp - 'a' + 10);
802 				if (tmp >= 'A' && tmp <= 'F')
803 					tmp_val += (tmp - 'A' + 10);
804 				if (tmp >= '0' && tmp <= '9')
805 					tmp_val += (tmp - '0');
806 
807 				hdrs->raw.spec[j] = tmp_val;
808 
809 				tmp = raw_mask->pattern[i];
810 				if (tmp >= 'a' && tmp <= 'f')
811 					tmp_val = tmp - 'a' + 10;
812 				if (tmp >= 'A' && tmp <= 'F')
813 					tmp_val = tmp - 'A' + 10;
814 				if (tmp >= '0' && tmp <= '9')
815 					tmp_val = tmp - '0';
816 
817 				tmp_val *= 16;
818 				tmp = raw_mask->pattern[i + 1];
819 				if (tmp >= 'a' && tmp <= 'f')
820 					tmp_val += (tmp - 'a' + 10);
821 				if (tmp >= 'A' && tmp <= 'F')
822 					tmp_val += (tmp - 'A' + 10);
823 				if (tmp >= '0' && tmp <= '9')
824 					tmp_val += (tmp - '0');
825 
826 				hdrs->raw.mask[j] = tmp_val;
827 			}
828 
829 			hdrs->raw.pkt_len = pkt_len / 2;
830 			hdrs->tunnel_level = 0;
831 			hdrs->count = 0;
832 			return 0;
833 		}
834 
835 		case RTE_FLOW_ITEM_TYPE_ETH:
836 			eth_spec = item->spec;
837 			eth_mask = item->mask;
838 			next_type = (item + 1)->type;
839 
840 			hdr1 = &hdrs->proto_hdr[layer];
841 
842 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
843 
844 			if (next_type == RTE_FLOW_ITEM_TYPE_END &&
845 			    (!eth_spec || !eth_mask)) {
846 				rte_flow_error_set(error, EINVAL,
847 						RTE_FLOW_ERROR_TYPE_ITEM,
848 						item, "NULL eth spec/mask.");
849 				return -rte_errno;
850 			}
851 
852 			if (eth_spec && eth_mask) {
853 				if (!rte_is_zero_ether_addr(&eth_mask->hdr.dst_addr)) {
854 					input_set |= IAVF_INSET_DMAC;
855 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1,
856 									ETH,
857 									DST);
858 				} else if (!rte_is_zero_ether_addr(&eth_mask->hdr.src_addr)) {
859 					input_set |= IAVF_INSET_SMAC;
860 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1,
861 									ETH,
862 									SRC);
863 				}
864 
865 				if (eth_mask->hdr.ether_type) {
866 					if (eth_mask->hdr.ether_type != RTE_BE16(0xffff)) {
867 						rte_flow_error_set(error, EINVAL,
868 							RTE_FLOW_ERROR_TYPE_ITEM,
869 							item, "Invalid type mask.");
870 						return -rte_errno;
871 					}
872 
873 					ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type);
874 					if (ether_type == RTE_ETHER_TYPE_IPV4 ||
875 						ether_type == RTE_ETHER_TYPE_IPV6) {
876 						rte_flow_error_set(error, EINVAL,
877 							RTE_FLOW_ERROR_TYPE_ITEM,
878 							item,
879 							"Unsupported ether_type.");
880 						return -rte_errno;
881 					}
882 
883 					input_set |= IAVF_INSET_ETHERTYPE;
884 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
885 									ETHERTYPE);
886 				}
887 
888 				rte_memcpy(hdr1->buffer, eth_spec,
889 					   sizeof(struct rte_ether_hdr));
890 			}
891 
892 			hdrs->count = ++layer;
893 			break;
894 
895 		case RTE_FLOW_ITEM_TYPE_IPV4:
896 			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
897 			ipv4_spec = item->spec;
898 			ipv4_last = item->last;
899 			ipv4_mask = item->mask;
900 			next_type = (item + 1)->type;
901 
902 			hdr = &hdrs->proto_hdr[layer];
903 
904 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
905 
906 			if (!(ipv4_spec && ipv4_mask)) {
907 				hdrs->count = ++layer;
908 				break;
909 			}
910 
911 			if (ipv4_mask->hdr.version_ihl ||
912 			    ipv4_mask->hdr.total_length ||
913 			    ipv4_mask->hdr.hdr_checksum) {
914 				rte_flow_error_set(error, EINVAL,
915 						   RTE_FLOW_ERROR_TYPE_ITEM,
916 						   item, "Invalid IPv4 mask.");
917 				return -rte_errno;
918 			}
919 
920 			if (ipv4_last &&
921 			    (ipv4_last->hdr.version_ihl ||
922 			     ipv4_last->hdr.type_of_service ||
923 			     ipv4_last->hdr.time_to_live ||
924 			     ipv4_last->hdr.total_length |
925 			     ipv4_last->hdr.next_proto_id ||
926 			     ipv4_last->hdr.hdr_checksum ||
927 			     ipv4_last->hdr.src_addr ||
928 			     ipv4_last->hdr.dst_addr)) {
929 				rte_flow_error_set(error, EINVAL,
930 						   RTE_FLOW_ERROR_TYPE_ITEM,
931 						   item, "Invalid IPv4 last.");
932 				return -rte_errno;
933 			}
934 
935 			/* Mask for IPv4 src/dst addrs not supported */
936 			if (ipv4_mask->hdr.src_addr &&
937 				ipv4_mask->hdr.src_addr != UINT32_MAX)
938 				return -rte_errno;
939 			if (ipv4_mask->hdr.dst_addr &&
940 				ipv4_mask->hdr.dst_addr != UINT32_MAX)
941 				return -rte_errno;
942 
943 			if (ipv4_mask->hdr.type_of_service ==
944 			    UINT8_MAX) {
945 				input_set |= IAVF_INSET_IPV4_TOS;
946 				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
947 								 DSCP);
948 			}
949 
950 			if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
951 				input_set |= IAVF_INSET_IPV4_PROTO;
952 				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
953 								 PROT);
954 			}
955 
956 			if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
957 				input_set |= IAVF_INSET_IPV4_TTL;
958 				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
959 								 TTL);
960 			}
961 
962 			if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
963 				input_set |= IAVF_INSET_IPV4_SRC;
964 				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
965 								 SRC);
966 			}
967 
968 			if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
969 				input_set |= IAVF_INSET_IPV4_DST;
970 				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
971 								 DST);
972 			}
973 
974 			if (tun_inner) {
975 				input_set &= ~IAVF_PROT_IPV4_OUTER;
976 				input_set |= IAVF_PROT_IPV4_INNER;
977 			}
978 
979 			rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
980 				   sizeof(ipv4_spec->hdr));
981 
982 			hdrs->count = ++layer;
983 
984 			/* fragment Ipv4:
985 			 * spec is 0x2000, mask is 0x2000
986 			 */
987 			if (ipv4_spec->hdr.fragment_offset ==
988 			    rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) &&
989 			    ipv4_mask->hdr.fragment_offset ==
990 			    rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG)) {
991 				/* all IPv4 fragment packet has the same
992 				 * ethertype, if the spec and mask is valid,
993 				 * set ethertype into input set.
994 				 */
995 				input_set |= IAVF_INSET_ETHERTYPE;
996 				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
997 								 ETHERTYPE);
998 
999 				/* add dummy header for IPv4 Fragment */
1000 				iavf_fdir_add_fragment_hdr(hdrs, layer);
1001 			} else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
1002 				rte_flow_error_set(error, EINVAL,
1003 						   RTE_FLOW_ERROR_TYPE_ITEM,
1004 						   item, "Invalid IPv4 mask.");
1005 				return -rte_errno;
1006 			}
1007 
1008 			break;
1009 
1010 		case RTE_FLOW_ITEM_TYPE_IPV6:
1011 			l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1012 			ipv6_spec = item->spec;
1013 			ipv6_mask = item->mask;
1014 
1015 			hdr = &hdrs->proto_hdr[layer];
1016 
1017 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
1018 
1019 			if (!(ipv6_spec && ipv6_mask)) {
1020 				hdrs->count = ++layer;
1021 				break;
1022 			}
1023 
1024 			if (ipv6_mask->hdr.payload_len) {
1025 				rte_flow_error_set(error, EINVAL,
1026 						   RTE_FLOW_ERROR_TYPE_ITEM,
1027 						   item, "Invalid IPv6 mask");
1028 				return -rte_errno;
1029 			}
1030 
1031 			if ((ipv6_mask->hdr.vtc_flow &
1032 			      rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
1033 			     == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
1034 				input_set |= IAVF_INSET_IPV6_TC;
1035 				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
1036 								 TC);
1037 			}
1038 
1039 			if (ipv6_mask->hdr.proto == UINT8_MAX) {
1040 				input_set |= IAVF_INSET_IPV6_NEXT_HDR;
1041 				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
1042 								 PROT);
1043 			}
1044 
1045 			if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
1046 				input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
1047 				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
1048 								 HOP_LIMIT);
1049 			}
1050 
1051 			if (!memcmp(&ipv6_mask->hdr.src_addr, ipv6_addr_mask,
1052 				    sizeof(ipv6_mask->hdr.src_addr))) {
1053 				input_set |= IAVF_INSET_IPV6_SRC;
1054 				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
1055 								 SRC);
1056 			}
1057 			if (!memcmp(&ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
1058 				    sizeof(ipv6_mask->hdr.dst_addr))) {
1059 				input_set |= IAVF_INSET_IPV6_DST;
1060 				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
1061 								 DST);
1062 			}
1063 
1064 			if (tun_inner) {
1065 				input_set &= ~IAVF_PROT_IPV6_OUTER;
1066 				input_set |= IAVF_PROT_IPV6_INNER;
1067 			}
1068 
1069 			rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
1070 				   sizeof(ipv6_spec->hdr));
1071 
1072 			hdrs->count = ++layer;
1073 			break;
1074 
1075 		case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
1076 			ipv6_frag_spec = item->spec;
1077 			ipv6_frag_mask = item->mask;
1078 			next_type = (item + 1)->type;
1079 
1080 			hdr = &hdrs->proto_hdr[layer];
1081 
1082 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6_EH_FRAG);
1083 
1084 			if (!(ipv6_frag_spec && ipv6_frag_mask)) {
1085 				hdrs->count = ++layer;
1086 				break;
1087 			}
1088 
1089 			/* fragment Ipv6:
1090 			 * spec is 0x1, mask is 0x1
1091 			 */
1092 			if (ipv6_frag_spec->hdr.frag_data ==
1093 			    rte_cpu_to_be_16(1) &&
1094 			    ipv6_frag_mask->hdr.frag_data ==
1095 			    rte_cpu_to_be_16(1)) {
1096 				/* all IPv6 fragment packet has the same
1097 				 * ethertype, if the spec and mask is valid,
1098 				 * set ethertype into input set.
1099 				 */
1100 				input_set |= IAVF_INSET_ETHERTYPE;
1101 				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
1102 								 ETHERTYPE);
1103 
1104 				rte_memcpy(hdr->buffer, &ipv6_frag_spec->hdr,
1105 					   sizeof(ipv6_frag_spec->hdr));
1106 			} else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
1107 				rte_flow_error_set(error, EINVAL,
1108 						   RTE_FLOW_ERROR_TYPE_ITEM,
1109 						   item, "Invalid IPv6 mask.");
1110 				return -rte_errno;
1111 			}
1112 
1113 			hdrs->count = ++layer;
1114 			break;
1115 
1116 		case RTE_FLOW_ITEM_TYPE_UDP:
1117 			udp_spec = item->spec;
1118 			udp_mask = item->mask;
1119 
1120 			hdr = &hdrs->proto_hdr[layer];
1121 
1122 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
1123 
1124 			if (udp_spec && udp_mask) {
1125 				if (udp_mask->hdr.dgram_len ||
1126 					udp_mask->hdr.dgram_cksum) {
1127 					rte_flow_error_set(error, EINVAL,
1128 						RTE_FLOW_ERROR_TYPE_ITEM, item,
1129 						"Invalid UDP mask");
1130 					return -rte_errno;
1131 				}
1132 
1133 				/* Mask for UDP src/dst ports not supported */
1134 				if (udp_mask->hdr.src_port &&
1135 					udp_mask->hdr.src_port != UINT16_MAX)
1136 					return -rte_errno;
1137 				if (udp_mask->hdr.dst_port &&
1138 					udp_mask->hdr.dst_port != UINT16_MAX)
1139 					return -rte_errno;
1140 
1141 				if (udp_mask->hdr.src_port == UINT16_MAX) {
1142 					input_set |= IAVF_INSET_UDP_SRC_PORT;
1143 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
1144 				}
1145 				if (udp_mask->hdr.dst_port == UINT16_MAX) {
1146 					input_set |= IAVF_INSET_UDP_DST_PORT;
1147 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
1148 				}
1149 
1150 				if (tun_inner) {
1151 					input_set &= ~IAVF_PROT_UDP_OUTER;
1152 					input_set |= IAVF_PROT_UDP_INNER;
1153 				}
1154 
1155 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1156 					rte_memcpy(hdr->buffer,
1157 						&udp_spec->hdr,
1158 						sizeof(udp_spec->hdr));
1159 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1160 					rte_memcpy(hdr->buffer,
1161 						&udp_spec->hdr,
1162 						sizeof(udp_spec->hdr));
1163 			}
1164 
1165 			hdrs->count = ++layer;
1166 			break;
1167 
1168 		case RTE_FLOW_ITEM_TYPE_TCP:
1169 			tcp_spec = item->spec;
1170 			tcp_mask = item->mask;
1171 
1172 			hdr = &hdrs->proto_hdr[layer];
1173 
1174 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
1175 
1176 			if (tcp_spec && tcp_mask) {
1177 				if (tcp_mask->hdr.sent_seq ||
1178 					tcp_mask->hdr.recv_ack ||
1179 					tcp_mask->hdr.data_off ||
1180 					tcp_mask->hdr.tcp_flags ||
1181 					tcp_mask->hdr.rx_win ||
1182 					tcp_mask->hdr.cksum ||
1183 					tcp_mask->hdr.tcp_urp) {
1184 					rte_flow_error_set(error, EINVAL,
1185 						RTE_FLOW_ERROR_TYPE_ITEM, item,
1186 						"Invalid TCP mask");
1187 					return -rte_errno;
1188 				}
1189 
1190 				/* Mask for TCP src/dst ports not supported */
1191 				if (tcp_mask->hdr.src_port &&
1192 					tcp_mask->hdr.src_port != UINT16_MAX)
1193 					return -rte_errno;
1194 				if (tcp_mask->hdr.dst_port &&
1195 					tcp_mask->hdr.dst_port != UINT16_MAX)
1196 					return -rte_errno;
1197 
1198 				if (tcp_mask->hdr.src_port == UINT16_MAX) {
1199 					input_set |= IAVF_INSET_TCP_SRC_PORT;
1200 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
1201 				}
1202 				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
1203 					input_set |= IAVF_INSET_TCP_DST_PORT;
1204 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
1205 				}
1206 
1207 				if (tun_inner) {
1208 					input_set &= ~IAVF_PROT_TCP_OUTER;
1209 					input_set |= IAVF_PROT_TCP_INNER;
1210 				}
1211 
1212 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1213 					rte_memcpy(hdr->buffer,
1214 						&tcp_spec->hdr,
1215 						sizeof(tcp_spec->hdr));
1216 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1217 					rte_memcpy(hdr->buffer,
1218 						&tcp_spec->hdr,
1219 						sizeof(tcp_spec->hdr));
1220 			}
1221 
1222 			hdrs->count = ++layer;
1223 			break;
1224 
1225 		case RTE_FLOW_ITEM_TYPE_SCTP:
1226 			sctp_spec = item->spec;
1227 			sctp_mask = item->mask;
1228 
1229 			hdr = &hdrs->proto_hdr[layer];
1230 
1231 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
1232 
1233 			if (sctp_spec && sctp_mask) {
1234 				if (sctp_mask->hdr.cksum) {
1235 					rte_flow_error_set(error, EINVAL,
1236 						RTE_FLOW_ERROR_TYPE_ITEM, item,
1237 						"Invalid UDP mask");
1238 					return -rte_errno;
1239 				}
1240 
1241 				/* Mask for SCTP src/dst ports not supported */
1242 				if (sctp_mask->hdr.src_port &&
1243 					sctp_mask->hdr.src_port != UINT16_MAX)
1244 					return -rte_errno;
1245 				if (sctp_mask->hdr.dst_port &&
1246 					sctp_mask->hdr.dst_port != UINT16_MAX)
1247 					return -rte_errno;
1248 
1249 				if (sctp_mask->hdr.src_port == UINT16_MAX) {
1250 					input_set |= IAVF_INSET_SCTP_SRC_PORT;
1251 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
1252 				}
1253 				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
1254 					input_set |= IAVF_INSET_SCTP_DST_PORT;
1255 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
1256 				}
1257 
1258 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1259 					rte_memcpy(hdr->buffer,
1260 						&sctp_spec->hdr,
1261 						sizeof(sctp_spec->hdr));
1262 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1263 					rte_memcpy(hdr->buffer,
1264 						&sctp_spec->hdr,
1265 						sizeof(sctp_spec->hdr));
1266 			}
1267 
1268 			hdrs->count = ++layer;
1269 			break;
1270 
1271 		case RTE_FLOW_ITEM_TYPE_GTPU:
1272 			gtp_spec = item->spec;
1273 			gtp_mask = item->mask;
1274 
1275 			hdr = &hdrs->proto_hdr[layer];
1276 
1277 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
1278 
1279 			if (gtp_spec && gtp_mask) {
1280 				if (gtp_mask->hdr.gtp_hdr_info ||
1281 					gtp_mask->hdr.msg_type ||
1282 					gtp_mask->hdr.plen) {
1283 					rte_flow_error_set(error, EINVAL,
1284 						RTE_FLOW_ERROR_TYPE_ITEM,
1285 						item, "Invalid GTP mask");
1286 					return -rte_errno;
1287 				}
1288 
1289 				if (gtp_mask->hdr.teid == UINT32_MAX) {
1290 					input_set |= IAVF_INSET_GTPU_TEID;
1291 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
1292 				}
1293 
1294 				rte_memcpy(hdr->buffer,
1295 					gtp_spec, sizeof(*gtp_spec));
1296 			}
1297 
1298 			tun_inner = 1;
1299 
1300 			hdrs->count = ++layer;
1301 			break;
1302 
1303 		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1304 			gtp_psc_spec = item->spec;
1305 			gtp_psc_mask = item->mask;
1306 
1307 			hdr = &hdrs->proto_hdr[layer];
1308 
1309 			if (!gtp_psc_spec)
1310 				VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1311 			else if ((gtp_psc_mask->hdr.qfi) &&
1312 				!(gtp_psc_mask->hdr.type))
1313 				VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1314 			else if (gtp_psc_spec->hdr.type == IAVF_GTPU_EH_UPLINK)
1315 				VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP);
1316 			else if (gtp_psc_spec->hdr.type == IAVF_GTPU_EH_DWLINK)
1317 				VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN);
1318 
1319 			if (gtp_psc_spec && gtp_psc_mask) {
1320 				if (gtp_psc_mask->hdr.qfi == 0x3F) {
1321 					input_set |= IAVF_INSET_GTPU_QFI;
1322 					if (!gtp_psc_mask->hdr.type)
1323 						VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1324 										 GTPU_EH, QFI);
1325 					else if (gtp_psc_spec->hdr.type ==
1326 								IAVF_GTPU_EH_UPLINK)
1327 						VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1328 										 GTPU_UP, QFI);
1329 					else if (gtp_psc_spec->hdr.type ==
1330 								IAVF_GTPU_EH_DWLINK)
1331 						VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1332 										 GTPU_DWN, QFI);
1333 				}
1334 
1335 				/*
1336 				 * New structure to fix gap between kernel driver and
1337 				 * rte_gtp_psc_generic_hdr.
1338 				 */
1339 				struct iavf_gtp_psc_spec_hdr {
1340 					uint8_t len;
1341 					uint8_t qfi:6;
1342 					uint8_t type:4;
1343 					uint8_t next;
1344 				} psc;
1345 				psc.len = gtp_psc_spec->hdr.ext_hdr_len;
1346 				psc.qfi = gtp_psc_spec->hdr.qfi;
1347 				psc.type = gtp_psc_spec->hdr.type;
1348 				psc.next = 0;
1349 				rte_memcpy(hdr->buffer, &psc,
1350 					sizeof(struct iavf_gtp_psc_spec_hdr));
1351 			}
1352 
1353 			hdrs->count = ++layer;
1354 			break;
1355 
1356 		case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1357 			l2tpv3oip_spec = item->spec;
1358 			l2tpv3oip_mask = item->mask;
1359 
1360 			hdr = &hdrs->proto_hdr[layer];
1361 
1362 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
1363 
1364 			if (l2tpv3oip_spec && l2tpv3oip_mask) {
1365 				if (l2tpv3oip_mask->session_id == UINT32_MAX) {
1366 					input_set |= IAVF_L2TPV3OIP_SESSION_ID;
1367 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
1368 				}
1369 
1370 				rte_memcpy(hdr->buffer, l2tpv3oip_spec,
1371 					sizeof(*l2tpv3oip_spec));
1372 			}
1373 
1374 			hdrs->count = ++layer;
1375 			break;
1376 
1377 		case RTE_FLOW_ITEM_TYPE_ESP:
1378 			esp_spec = item->spec;
1379 			esp_mask = item->mask;
1380 
1381 			hdr = &hdrs->proto_hdr[layer];
1382 
1383 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
1384 
1385 			if (esp_spec && esp_mask) {
1386 				if (esp_mask->hdr.spi == UINT32_MAX) {
1387 					input_set |= IAVF_INSET_ESP_SPI;
1388 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
1389 				}
1390 
1391 				rte_memcpy(hdr->buffer, &esp_spec->hdr,
1392 					sizeof(esp_spec->hdr));
1393 			}
1394 
1395 			hdrs->count = ++layer;
1396 			break;
1397 
1398 		case RTE_FLOW_ITEM_TYPE_AH:
1399 			ah_spec = item->spec;
1400 			ah_mask = item->mask;
1401 
1402 			hdr = &hdrs->proto_hdr[layer];
1403 
1404 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
1405 
1406 			if (ah_spec && ah_mask) {
1407 				if (ah_mask->spi == UINT32_MAX) {
1408 					input_set |= IAVF_INSET_AH_SPI;
1409 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
1410 				}
1411 
1412 				rte_memcpy(hdr->buffer, ah_spec,
1413 					sizeof(*ah_spec));
1414 			}
1415 
1416 			hdrs->count = ++layer;
1417 			break;
1418 
1419 		case RTE_FLOW_ITEM_TYPE_PFCP:
1420 			pfcp_spec = item->spec;
1421 			pfcp_mask = item->mask;
1422 
1423 			hdr = &hdrs->proto_hdr[layer];
1424 
1425 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
1426 
1427 			if (pfcp_spec && pfcp_mask) {
1428 				if (pfcp_mask->s_field == UINT8_MAX) {
1429 					input_set |= IAVF_INSET_PFCP_S_FIELD;
1430 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
1431 				}
1432 
1433 				rte_memcpy(hdr->buffer, pfcp_spec,
1434 					sizeof(*pfcp_spec));
1435 			}
1436 
1437 			hdrs->count = ++layer;
1438 			break;
1439 
1440 		case RTE_FLOW_ITEM_TYPE_ECPRI:
1441 			ecpri_spec = item->spec;
1442 			ecpri_mask = item->mask;
1443 
1444 			ecpri_common.u32 = rte_be_to_cpu_32(ecpri_spec->hdr.common.u32);
1445 
1446 			hdr = &hdrs->proto_hdr[layer];
1447 
1448 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ECPRI);
1449 
1450 			if (ecpri_spec && ecpri_mask) {
1451 				if (ecpri_common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA &&
1452 						ecpri_mask->hdr.type0.pc_id == UINT16_MAX) {
1453 					input_set |= IAVF_ECPRI_PC_RTC_ID;
1454 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ECPRI,
1455 									 PC_RTC_ID);
1456 				}
1457 
1458 				rte_memcpy(hdr->buffer, ecpri_spec,
1459 					sizeof(*ecpri_spec));
1460 			}
1461 
1462 			hdrs->count = ++layer;
1463 			break;
1464 
1465 		case RTE_FLOW_ITEM_TYPE_GRE:
1466 			gre_spec = item->spec;
1467 			gre_mask = item->mask;
1468 
1469 			hdr = &hdrs->proto_hdr[layer];
1470 
1471 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GRE);
1472 
1473 			if (gre_spec && gre_mask) {
1474 				rte_memcpy(hdr->buffer, gre_spec,
1475 					   sizeof(*gre_spec));
1476 			}
1477 
1478 			tun_inner = 1;
1479 
1480 			hdrs->count = ++layer;
1481 			break;
1482 
1483 		case RTE_FLOW_ITEM_TYPE_L2TPV2:
1484 			l2tpv2_spec = item->spec;
1485 			l2tpv2_mask = item->mask;
1486 
1487 			hdr = &hdrs->proto_hdr[layer];
1488 
1489 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV2);
1490 
1491 			if (l2tpv2_spec && l2tpv2_mask) {
1492 				flags_version =
1493 					rte_be_to_cpu_16(l2tpv2_spec->hdr.common.flags_version);
1494 				if ((flags_version == RTE_L2TPV2_MSG_TYPE_CONTROL &&
1495 				     l2tpv2_mask->hdr.type3.session_id == UINT16_MAX) ||
1496 				    (flags_version == RTE_L2TPV2_MSG_TYPE_DATA &&
1497 				     l2tpv2_mask->hdr.type7.session_id == UINT16_MAX) ||
1498 				    (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_L &&
1499 				     l2tpv2_mask->hdr.type6.session_id == UINT16_MAX) ||
1500 				    (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_S &&
1501 				     l2tpv2_mask->hdr.type5.session_id == UINT16_MAX) ||
1502 				    (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_O &&
1503 				     l2tpv2_mask->hdr.type4.session_id == UINT16_MAX) ||
1504 				    (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_L_S &&
1505 				     l2tpv2_mask->hdr.type3.session_id == UINT16_MAX) ||
1506 				    (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_L_O &&
1507 				     l2tpv2_mask->hdr.type2.session_id == UINT16_MAX) ||
1508 				    (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_S_O &&
1509 				     l2tpv2_mask->hdr.type1.session_id == UINT16_MAX) ||
1510 				    (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_L_S_O &&
1511 				     l2tpv2_mask->hdr.type0.session_id == UINT16_MAX)) {
1512 					input_set |= IAVF_L2TPV2_SESSION_ID;
1513 					if (flags_version & IAVF_L2TPV2_FLAGS_LEN)
1514 						VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1515 								L2TPV2,
1516 								LEN_SESS_ID);
1517 					else
1518 						VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1519 								L2TPV2,
1520 								SESS_ID);
1521 				}
1522 
1523 				rte_memcpy(hdr->buffer, l2tpv2_spec,
1524 					   sizeof(*l2tpv2_spec));
1525 			}
1526 
1527 			tun_inner = 1;
1528 
1529 			hdrs->count = ++layer;
1530 			break;
1531 
1532 		case RTE_FLOW_ITEM_TYPE_PPP:
1533 			ppp_spec = item->spec;
1534 			ppp_mask = item->mask;
1535 
1536 			hdr = &hdrs->proto_hdr[layer];
1537 
1538 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PPP);
1539 
1540 			if (ppp_spec && ppp_mask) {
1541 				rte_memcpy(hdr->buffer, ppp_spec,
1542 					   sizeof(*ppp_spec));
1543 			}
1544 
1545 			hdrs->count = ++layer;
1546 			break;
1547 
1548 		case RTE_FLOW_ITEM_TYPE_VOID:
1549 			break;
1550 
1551 		default:
1552 			rte_flow_error_set(error, EINVAL,
1553 					RTE_FLOW_ERROR_TYPE_ITEM, item,
1554 					"Invalid pattern item.");
1555 			return -rte_errno;
1556 		}
1557 	}
1558 
1559 	if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
1560 		rte_flow_error_set(error, EINVAL,
1561 			RTE_FLOW_ERROR_TYPE_ITEM, item,
1562 			"Protocol header layers exceed the maximum value");
1563 		return -rte_errno;
1564 	}
1565 
1566 	if (!iavf_fdir_refine_input_set(input_set,
1567 					input_set_mask | IAVF_INSET_ETHERTYPE,
1568 					filter)) {
1569 		rte_flow_error_set(error, EINVAL,
1570 				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
1571 				   "Invalid input set");
1572 		return -rte_errno;
1573 	}
1574 
1575 	filter->input_set = input_set;
1576 
1577 	return 0;
1578 }
1579 
1580 static int
1581 iavf_fdir_parse(struct iavf_adapter *ad,
1582 		struct iavf_pattern_match_item *array,
1583 		uint32_t array_len,
1584 		const struct rte_flow_item pattern[],
1585 		const struct rte_flow_action actions[],
1586 		uint32_t priority,
1587 		void **meta,
1588 		struct rte_flow_error *error)
1589 {
1590 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1591 	struct iavf_fdir_conf *filter = &vf->fdir.conf;
1592 	struct iavf_pattern_match_item *item = NULL;
1593 	int ret;
1594 
1595 	memset(filter, 0, sizeof(*filter));
1596 
1597 	if (priority >= 1)
1598 		return -rte_errno;
1599 
1600 	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
1601 	if (!item)
1602 		return -rte_errno;
1603 
1604 	ret = iavf_fdir_parse_pattern(ad, pattern, item->input_set_mask,
1605 				      error, filter);
1606 	if (ret)
1607 		goto error;
1608 
1609 	ret = iavf_fdir_parse_action(ad, actions, error, filter);
1610 	if (ret)
1611 		goto error;
1612 
1613 	if (meta)
1614 		*meta = filter;
1615 
1616 error:
1617 	rte_free(item);
1618 	return ret;
1619 }
1620 
1621 static struct iavf_flow_parser iavf_fdir_parser = {
1622 	.engine = &iavf_fdir_engine,
1623 	.array = iavf_fdir_pattern,
1624 	.array_len = RTE_DIM(iavf_fdir_pattern),
1625 	.parse_pattern_action = iavf_fdir_parse,
1626 	.stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
1627 };
1628 
1629 RTE_INIT(iavf_fdir_engine_register)
1630 {
1631 	iavf_register_flow_engine(&iavf_fdir_engine);
1632 }
1633