xref: /dpdk/drivers/net/hns3/hns3_flow.c (revision 585f1f68f18c7acbc4f920053cbf4ba888e0c271)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4 
5 #include <rte_flow_driver.h>
6 #include <rte_io.h>
7 #include <rte_malloc.h>
8 
9 #include "hns3_ethdev.h"
10 #include "hns3_logs.h"
11 #include "hns3_flow.h"
12 
13 #define NEXT_ITEM_OF_ACTION(act, actions, index) \
14 	do { \
15 		(act) = (actions) + (index); \
16 		while ((act)->type == RTE_FLOW_ACTION_TYPE_VOID) { \
17 			(index)++; \
18 			(act) = (actions) + (index); \
19 		} \
20 	} while (0)
21 
22 #define NEXT_ITEM_OF_PATTERN(item, pattern, index) \
23 	do { \
24 		(item) = (pattern) + (index); \
25 		while ((item)->type == RTE_FLOW_ITEM_TYPE_VOID) { \
26 			(index)++; \
27 			(item) = (pattern) + (index); \
28 		} \
29 	} while (0)
30 
31 #define HNS3_HASH_HDR_ETH	RTE_BIT64(0)
32 #define HNS3_HASH_HDR_IPV4	RTE_BIT64(1)
33 #define HNS3_HASH_HDR_IPV6	RTE_BIT64(2)
34 #define HNS3_HASH_HDR_TCP	RTE_BIT64(3)
35 #define HNS3_HASH_HDR_UDP	RTE_BIT64(4)
36 #define HNS3_HASH_HDR_SCTP	RTE_BIT64(5)
37 
38 #define HNS3_HASH_VOID_NEXT_ALLOW	BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH)
39 
40 #define HNS3_HASH_ETH_NEXT_ALLOW	(BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV4) | \
41 					 BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV6))
42 
43 #define HNS3_HASH_IP_NEXT_ALLOW		(BIT_ULL(RTE_FLOW_ITEM_TYPE_TCP) | \
44 					 BIT_ULL(RTE_FLOW_ITEM_TYPE_UDP) | \
45 					 BIT_ULL(RTE_FLOW_ITEM_TYPE_SCTP))
46 
47 static const uint64_t hash_pattern_next_allow_items[] = {
48 	[RTE_FLOW_ITEM_TYPE_VOID] = HNS3_HASH_VOID_NEXT_ALLOW,
49 	[RTE_FLOW_ITEM_TYPE_ETH]  = HNS3_HASH_ETH_NEXT_ALLOW,
50 	[RTE_FLOW_ITEM_TYPE_IPV4] = HNS3_HASH_IP_NEXT_ALLOW,
51 	[RTE_FLOW_ITEM_TYPE_IPV6] = HNS3_HASH_IP_NEXT_ALLOW,
52 };
53 
54 static const uint64_t hash_pattern_item_header[] = {
55 	[RTE_FLOW_ITEM_TYPE_ETH]  = HNS3_HASH_HDR_ETH,
56 	[RTE_FLOW_ITEM_TYPE_IPV4] = HNS3_HASH_HDR_IPV4,
57 	[RTE_FLOW_ITEM_TYPE_IPV6] = HNS3_HASH_HDR_IPV6,
58 	[RTE_FLOW_ITEM_TYPE_TCP]  = HNS3_HASH_HDR_TCP,
59 	[RTE_FLOW_ITEM_TYPE_UDP]  = HNS3_HASH_HDR_UDP,
60 	[RTE_FLOW_ITEM_TYPE_SCTP] = HNS3_HASH_HDR_SCTP,
61 };
62 
63 #define HNS3_HASH_IPV4		(HNS3_HASH_HDR_ETH | HNS3_HASH_HDR_IPV4)
64 #define HNS3_HASH_IPV4_TCP	(HNS3_HASH_HDR_ETH | \
65 				 HNS3_HASH_HDR_IPV4 | \
66 				 HNS3_HASH_HDR_TCP)
67 #define HNS3_HASH_IPV4_UDP	(HNS3_HASH_HDR_ETH | \
68 				 HNS3_HASH_HDR_IPV4 | \
69 				 HNS3_HASH_HDR_UDP)
70 #define HNS3_HASH_IPV4_SCTP	(HNS3_HASH_HDR_ETH | \
71 				 HNS3_HASH_HDR_IPV4 | \
72 				 HNS3_HASH_HDR_SCTP)
73 #define HNS3_HASH_IPV6		(HNS3_HASH_HDR_ETH | HNS3_HASH_HDR_IPV6)
74 #define HNS3_HASH_IPV6_TCP	(HNS3_HASH_HDR_ETH | \
75 				 HNS3_HASH_HDR_IPV6 | \
76 				 HNS3_HASH_HDR_TCP)
77 #define HNS3_HASH_IPV6_UDP	(HNS3_HASH_HDR_ETH | \
78 				 HNS3_HASH_HDR_IPV6 | \
79 				 HNS3_HASH_HDR_UDP)
80 #define HNS3_HASH_IPV6_SCTP	(HNS3_HASH_HDR_ETH | \
81 				 HNS3_HASH_HDR_IPV6 | \
82 				 HNS3_HASH_HDR_SCTP)
83 
84 static const struct hns3_hash_map_info {
85 	/* flow type specified, zero means action works for all flow types. */
86 	uint64_t pattern_type;
87 	uint64_t rss_pctype; /* packet type with prefix RTE_ETH_RSS_xxx */
88 	uint64_t l3l4_types; /* Supported L3/L4 RSS types for this packet type */
89 	uint64_t hw_pctype; /* packet type in driver */
90 	uint64_t tuple_mask; /* full tuples of the hw_pctype */
91 } hash_map_table[] = {
92 	/* IPV4 */
93 	{ HNS3_HASH_IPV4,
94 	  RTE_ETH_RSS_IPV4, HNS3_RSS_SUPPORT_L3_SRC_DST,
95 	  HNS3_RSS_PCTYPE_IPV4_NONF, HNS3_RSS_TUPLE_IPV4_NONF_M },
96 	{ HNS3_HASH_IPV4,
97 	  RTE_ETH_RSS_NONFRAG_IPV4_OTHER, HNS3_RSS_SUPPORT_L3_SRC_DST,
98 	  HNS3_RSS_PCTYPE_IPV4_NONF, HNS3_RSS_TUPLE_IPV4_NONF_M },
99 	{ HNS3_HASH_IPV4,
100 	  RTE_ETH_RSS_FRAG_IPV4, HNS3_RSS_SUPPORT_L3_SRC_DST,
101 	  HNS3_RSS_PCTYPE_IPV4_FLAG, HNS3_RSS_TUPLE_IPV4_FLAG_M },
102 	{ HNS3_HASH_IPV4_TCP,
103 	  RTE_ETH_RSS_NONFRAG_IPV4_TCP, HNS3_RSS_SUPPORT_L3L4,
104 	  HNS3_RSS_PCTYPE_IPV4_TCP, HNS3_RSS_TUPLE_IPV4_TCP_M },
105 	{ HNS3_HASH_IPV4_UDP,
106 	  RTE_ETH_RSS_NONFRAG_IPV4_UDP, HNS3_RSS_SUPPORT_L3L4,
107 	  HNS3_RSS_PCTYPE_IPV4_UDP, HNS3_RSS_TUPLE_IPV4_UDP_M },
108 	{ HNS3_HASH_IPV4_SCTP,
109 	  RTE_ETH_RSS_NONFRAG_IPV4_SCTP, HNS3_RSS_SUPPORT_L3L4,
110 	  HNS3_RSS_PCTYPE_IPV4_SCTP, HNS3_RSS_TUPLE_IPV4_SCTP_M },
111 	/* IPV6 */
112 	{ HNS3_HASH_IPV6,
113 	  RTE_ETH_RSS_IPV6, HNS3_RSS_SUPPORT_L3_SRC_DST,
114 	  HNS3_RSS_PCTYPE_IPV6_NONF, HNS3_RSS_TUPLE_IPV6_NONF_M },
115 	{ HNS3_HASH_IPV6,
116 	  RTE_ETH_RSS_NONFRAG_IPV6_OTHER, HNS3_RSS_SUPPORT_L3_SRC_DST,
117 	  HNS3_RSS_PCTYPE_IPV6_NONF, HNS3_RSS_TUPLE_IPV6_NONF_M },
118 	{ HNS3_HASH_IPV6,
119 	  RTE_ETH_RSS_FRAG_IPV6, HNS3_RSS_SUPPORT_L3_SRC_DST,
120 	  HNS3_RSS_PCTYPE_IPV6_FLAG, HNS3_RSS_TUPLE_IPV6_FLAG_M },
121 	{ HNS3_HASH_IPV6_TCP,
122 	  RTE_ETH_RSS_NONFRAG_IPV6_TCP, HNS3_RSS_SUPPORT_L3L4,
123 	  HNS3_RSS_PCTYPE_IPV6_TCP, HNS3_RSS_TUPLE_IPV6_TCP_M },
124 	{ HNS3_HASH_IPV6_UDP,
125 	  RTE_ETH_RSS_NONFRAG_IPV6_UDP, HNS3_RSS_SUPPORT_L3L4,
126 	  HNS3_RSS_PCTYPE_IPV6_UDP, HNS3_RSS_TUPLE_IPV6_UDP_M },
127 	{ HNS3_HASH_IPV6_SCTP,
128 	  RTE_ETH_RSS_NONFRAG_IPV6_SCTP, HNS3_RSS_SUPPORT_L3L4,
129 	  HNS3_RSS_PCTYPE_IPV6_SCTP, HNS3_RSS_TUPLE_IPV6_SCTP_M },
130 };
131 
132 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
133 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
134 
135 /* Special Filter id for non-specific packet flagging. Don't change value */
136 #define HNS3_MAX_FILTER_ID	0x0FFF
137 
138 #define ETHER_TYPE_MASK		0xFFFF
139 #define IPPROTO_MASK		0xFF
140 #define TUNNEL_TYPE_MASK	0xFFFF
141 
142 #define HNS3_TUNNEL_TYPE_VXLAN		0x12B5
143 #define HNS3_TUNNEL_TYPE_VXLAN_GPE	0x12B6
144 #define HNS3_TUNNEL_TYPE_GENEVE		0x17C1
145 #define HNS3_TUNNEL_TYPE_NVGRE		0x6558
146 
147 static enum rte_flow_item_type first_items[] = {
148 	RTE_FLOW_ITEM_TYPE_ETH,
149 	RTE_FLOW_ITEM_TYPE_IPV4,
150 	RTE_FLOW_ITEM_TYPE_IPV6,
151 	RTE_FLOW_ITEM_TYPE_TCP,
152 	RTE_FLOW_ITEM_TYPE_UDP,
153 	RTE_FLOW_ITEM_TYPE_SCTP,
154 	RTE_FLOW_ITEM_TYPE_ICMP,
155 	RTE_FLOW_ITEM_TYPE_NVGRE,
156 	RTE_FLOW_ITEM_TYPE_VXLAN,
157 	RTE_FLOW_ITEM_TYPE_GENEVE,
158 	RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
159 	RTE_FLOW_ITEM_TYPE_PTYPE
160 };
161 
162 static enum rte_flow_item_type L2_next_items[] = {
163 	RTE_FLOW_ITEM_TYPE_VLAN,
164 	RTE_FLOW_ITEM_TYPE_IPV4,
165 	RTE_FLOW_ITEM_TYPE_IPV6,
166 	RTE_FLOW_ITEM_TYPE_PTYPE
167 };
168 
169 static enum rte_flow_item_type L3_next_items[] = {
170 	RTE_FLOW_ITEM_TYPE_TCP,
171 	RTE_FLOW_ITEM_TYPE_UDP,
172 	RTE_FLOW_ITEM_TYPE_SCTP,
173 	RTE_FLOW_ITEM_TYPE_NVGRE,
174 	RTE_FLOW_ITEM_TYPE_ICMP,
175 	RTE_FLOW_ITEM_TYPE_PTYPE
176 };
177 
178 static enum rte_flow_item_type L4_next_items[] = {
179 	RTE_FLOW_ITEM_TYPE_VXLAN,
180 	RTE_FLOW_ITEM_TYPE_GENEVE,
181 	RTE_FLOW_ITEM_TYPE_VXLAN_GPE
182 };
183 
184 static enum rte_flow_item_type tunnel_next_items[] = {
185 	RTE_FLOW_ITEM_TYPE_ETH,
186 	RTE_FLOW_ITEM_TYPE_VLAN
187 };
188 
189 struct items_step_mngr {
190 	enum rte_flow_item_type *items;
191 	size_t count;
192 };
193 
194 static inline void
195 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
196 {
197 	size_t i;
198 
199 	for (i = 0; i < len; i++)
200 		dst[i] = rte_be_to_cpu_32(src[i]);
201 }
202 
203 /*
204  * This function is used to parse filter type.
205  * 1. As we know RSS is used to spread packets among several queues, the flow
206  *    API provide the struct rte_flow_action_rss, user could config its field
207  *    sush as: func/level/types/key/queue to control RSS function.
208  * 2. The flow API also supports queue region configuration for hns3. It was
209  *    implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
210  *    which action is RSS queues region.
211  * 3. When action is RSS, we use the following rule to distinguish:
212  *    Case 1: pattern has ETH and all fields in RSS action except 'queues' are
213  *            zero or default, indicate it is queue region configuration.
214  *    Case other: an rss general action.
215  */
216 static void
217 hns3_parse_filter_type(const struct rte_flow_item pattern[],
218 		       const struct rte_flow_action actions[],
219 		       struct hns3_filter_info *filter_info)
220 {
221 	const struct rte_flow_action_rss *rss_act;
222 	const struct rte_flow_action *act = NULL;
223 	bool only_has_queues = false;
224 	bool have_eth = false;
225 
226 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
227 		if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
228 			act = actions;
229 			break;
230 		}
231 	}
232 	if (act == NULL) {
233 		filter_info->type = RTE_ETH_FILTER_FDIR;
234 		return;
235 	}
236 
237 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
238 		if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
239 			have_eth = true;
240 			break;
241 		}
242 	}
243 
244 	rss_act = act->conf;
245 	only_has_queues = (rss_act->queue_num > 0) &&
246 			  (rss_act->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
247 			   rss_act->types == 0 && rss_act->key_len == 0);
248 	if (have_eth && only_has_queues) {
249 		/*
250 		 * Pattern has ETH and all fields in RSS action except 'queues'
251 		 * are zero or default, which indicates this is queue region
252 		 * configuration.
253 		 */
254 		filter_info->type = RTE_ETH_FILTER_FDIR;
255 		return;
256 	}
257 
258 	filter_info->type = RTE_ETH_FILTER_HASH;
259 }
260 
261 static inline struct hns3_flow_counter *
262 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
263 {
264 	struct hns3_adapter *hns = dev->data->dev_private;
265 	struct hns3_pf *pf = &hns->pf;
266 	struct hns3_flow_counter *cnt;
267 
268 	LIST_FOREACH(cnt, &pf->flow_counters, next) {
269 		if (cnt->id == id)
270 			return cnt;
271 	}
272 	return NULL;
273 }
274 
275 static int
276 hns3_counter_new(struct rte_eth_dev *dev, uint32_t indirect, uint32_t id,
277 		 struct rte_flow_error *error)
278 {
279 	struct hns3_adapter *hns = dev->data->dev_private;
280 	struct hns3_pf *pf = &hns->pf;
281 	struct hns3_hw *hw = &hns->hw;
282 	struct hns3_flow_counter *cnt;
283 	uint64_t value;
284 	int ret;
285 
286 	cnt = hns3_counter_lookup(dev, id);
287 	if (cnt) {
288 		if (!cnt->indirect || cnt->indirect != indirect)
289 			return rte_flow_error_set(error, EINVAL,
290 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
291 				cnt,
292 				"Counter id is used, indirect flag not match");
293 		/* Clear the indirect counter on first use. */
294 		if (cnt->indirect && cnt->ref_cnt == 1)
295 			(void)hns3_fd_get_count(hw, id, &value);
296 		cnt->ref_cnt++;
297 		return 0;
298 	}
299 
300 	/* Clear the counter by read ops because the counter is read-clear */
301 	ret = hns3_fd_get_count(hw, id, &value);
302 	if (ret)
303 		return rte_flow_error_set(error, EIO,
304 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
305 					  "Clear counter failed!");
306 
307 	cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
308 	if (cnt == NULL)
309 		return rte_flow_error_set(error, ENOMEM,
310 					  RTE_FLOW_ERROR_TYPE_HANDLE, cnt,
311 					  "Alloc mem for counter failed");
312 	cnt->id = id;
313 	cnt->indirect = indirect;
314 	cnt->ref_cnt = 1;
315 	cnt->hits = 0;
316 	LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
317 	return 0;
318 }
319 
320 static int
321 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
322 		   struct rte_flow_query_count *qc,
323 		   struct rte_flow_error *error)
324 {
325 	struct hns3_adapter *hns = dev->data->dev_private;
326 	struct hns3_flow_counter *cnt;
327 	uint64_t value;
328 	int ret;
329 
330 	/* FDIR is available only in PF driver */
331 	if (hns->is_vf)
332 		return rte_flow_error_set(error, ENOTSUP,
333 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
334 					  "Fdir is not supported in VF");
335 	cnt = hns3_counter_lookup(dev, flow->counter_id);
336 	if (cnt == NULL)
337 		return rte_flow_error_set(error, EINVAL,
338 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
339 					  "Can't find counter id");
340 
341 	ret = hns3_fd_get_count(&hns->hw, flow->counter_id, &value);
342 	if (ret) {
343 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
344 				   NULL, "Read counter fail.");
345 		return ret;
346 	}
347 	qc->hits_set = 1;
348 	qc->hits = value;
349 	qc->bytes_set = 0;
350 	qc->bytes = 0;
351 
352 	return 0;
353 }
354 
355 static int
356 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
357 {
358 	struct hns3_adapter *hns = dev->data->dev_private;
359 	struct hns3_hw *hw = &hns->hw;
360 	struct hns3_flow_counter *cnt;
361 
362 	cnt = hns3_counter_lookup(dev, id);
363 	if (cnt == NULL) {
364 		hns3_err(hw, "Can't find available counter to release");
365 		return -EINVAL;
366 	}
367 	cnt->ref_cnt--;
368 	if (cnt->ref_cnt == 0) {
369 		LIST_REMOVE(cnt, next);
370 		rte_free(cnt);
371 	}
372 	return 0;
373 }
374 
375 static void
376 hns3_counter_flush(struct rte_eth_dev *dev)
377 {
378 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
379 	LIST_HEAD(counters, hns3_flow_counter) indir_counters;
380 	struct hns3_flow_counter *cnt_ptr;
381 
382 	LIST_INIT(&indir_counters);
383 	cnt_ptr = LIST_FIRST(&pf->flow_counters);
384 	while (cnt_ptr) {
385 		LIST_REMOVE(cnt_ptr, next);
386 		if (cnt_ptr->indirect)
387 			LIST_INSERT_HEAD(&indir_counters, cnt_ptr, next);
388 		else
389 			rte_free(cnt_ptr);
390 		cnt_ptr = LIST_FIRST(&pf->flow_counters);
391 	}
392 
393 	/* Reset the indirect action and add to pf->flow_counters list. */
394 	cnt_ptr = LIST_FIRST(&indir_counters);
395 	while (cnt_ptr) {
396 		LIST_REMOVE(cnt_ptr, next);
397 		cnt_ptr->ref_cnt = 1;
398 		cnt_ptr->hits = 0;
399 		LIST_INSERT_HEAD(&pf->flow_counters, cnt_ptr, next);
400 		cnt_ptr = LIST_FIRST(&indir_counters);
401 	}
402 }
403 
404 static int
405 hns3_handle_action_queue(struct rte_eth_dev *dev,
406 			 const struct rte_flow_action *action,
407 			 struct hns3_fdir_rule *rule,
408 			 struct rte_flow_error *error)
409 {
410 	struct hns3_adapter *hns = dev->data->dev_private;
411 	const struct rte_flow_action_queue *queue;
412 	struct hns3_hw *hw = &hns->hw;
413 
414 	queue = (const struct rte_flow_action_queue *)action->conf;
415 	if (queue->index >= hw->data->nb_rx_queues) {
416 		hns3_err(hw, "queue ID(%u) is greater than number of available queue (%u) in driver.",
417 			 queue->index, hw->data->nb_rx_queues);
418 		return rte_flow_error_set(error, EINVAL,
419 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
420 					  action, "Invalid queue ID in PF");
421 	}
422 
423 	rule->queue_id = queue->index;
424 	rule->nb_queues = 1;
425 	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
426 	return 0;
427 }
428 
429 static int
430 hns3_handle_action_queue_region(struct rte_eth_dev *dev,
431 				const struct rte_flow_action *action,
432 				struct hns3_fdir_rule *rule,
433 				struct rte_flow_error *error)
434 {
435 	struct hns3_adapter *hns = dev->data->dev_private;
436 	const struct rte_flow_action_rss *conf = action->conf;
437 	struct hns3_hw *hw = &hns->hw;
438 	uint16_t idx;
439 
440 	if (!hns3_dev_get_support(hw, FD_QUEUE_REGION))
441 		return rte_flow_error_set(error, ENOTSUP,
442 			RTE_FLOW_ERROR_TYPE_ACTION, action,
443 			"Not support config queue region!");
444 
445 	if ((!rte_is_power_of_2(conf->queue_num)) ||
446 		conf->queue_num > hw->rss_size_max ||
447 		conf->queue[0] >= hw->data->nb_rx_queues ||
448 		conf->queue[0] + conf->queue_num > hw->data->nb_rx_queues) {
449 		return rte_flow_error_set(error, EINVAL,
450 			RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
451 			"Invalid start queue ID and queue num! the start queue "
452 			"ID must valid, the queue num must be power of 2 and "
453 			"<= rss_size_max.");
454 	}
455 
456 	for (idx = 1; idx < conf->queue_num; idx++) {
457 		if (conf->queue[idx] != conf->queue[idx - 1] + 1)
458 			return rte_flow_error_set(error, EINVAL,
459 				RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
460 				"Invalid queue ID sequence! the queue ID "
461 				"must be continuous increment.");
462 	}
463 
464 	rule->queue_id = conf->queue[0];
465 	rule->nb_queues = conf->queue_num;
466 	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
467 	return 0;
468 }
469 
470 static int
471 hns3_handle_action_indirect(struct rte_eth_dev *dev,
472 			    const struct rte_flow_action *action,
473 			    struct hns3_fdir_rule *rule,
474 			    struct rte_flow_error *error)
475 {
476 	const struct rte_flow_action_handle *indir = action->conf;
477 
478 	if (indir->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT)
479 		return rte_flow_error_set(error, EINVAL,
480 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
481 				action, "Invalid indirect type");
482 
483 	if (hns3_counter_lookup(dev, indir->counter_id) == NULL)
484 		return rte_flow_error_set(error, EINVAL,
485 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
486 				action, "Counter id not exist");
487 
488 	rule->act_cnt.id = indir->counter_id;
489 	rule->flags |= (HNS3_RULE_FLAG_COUNTER | HNS3_RULE_FLAG_COUNTER_INDIR);
490 
491 	return 0;
492 }
493 
494 /*
495  * Parse actions structure from the provided pattern.
496  * The pattern is validated as the items are copied.
497  *
498  * @param actions[in]
499  * @param rule[out]
500  *   NIC specific actions derived from the actions.
501  * @param error[out]
502  */
503 static int
504 hns3_handle_actions(struct rte_eth_dev *dev,
505 		    const struct rte_flow_action actions[],
506 		    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
507 {
508 	struct hns3_adapter *hns = dev->data->dev_private;
509 	const struct rte_flow_action_count *act_count;
510 	const struct rte_flow_action_mark *mark;
511 	struct hns3_pf *pf = &hns->pf;
512 	uint32_t counter_num;
513 	int ret;
514 
515 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
516 		switch (actions->type) {
517 		case RTE_FLOW_ACTION_TYPE_QUEUE:
518 			ret = hns3_handle_action_queue(dev, actions, rule,
519 						       error);
520 			if (ret)
521 				return ret;
522 			break;
523 		case RTE_FLOW_ACTION_TYPE_DROP:
524 			rule->action = HNS3_FD_ACTION_DROP_PACKET;
525 			break;
526 		/*
527 		 * Here RSS's real action is queue region.
528 		 * Queue region is implemented by FDIR + RSS in hns3 hardware,
529 		 * the FDIR's action is one queue region (start_queue_id and
530 		 * queue_num), then RSS spread packets to the queue region by
531 		 * RSS algorithm.
532 		 */
533 		case RTE_FLOW_ACTION_TYPE_RSS:
534 			ret = hns3_handle_action_queue_region(dev, actions,
535 							      rule, error);
536 			if (ret)
537 				return ret;
538 			break;
539 		case RTE_FLOW_ACTION_TYPE_MARK:
540 			mark =
541 			    (const struct rte_flow_action_mark *)actions->conf;
542 			if (mark->id >= HNS3_MAX_FILTER_ID)
543 				return rte_flow_error_set(error, EINVAL,
544 						RTE_FLOW_ERROR_TYPE_ACTION_CONF,
545 						actions,
546 						"Invalid Mark ID");
547 			rule->fd_id = mark->id;
548 			rule->flags |= HNS3_RULE_FLAG_FDID;
549 			break;
550 		case RTE_FLOW_ACTION_TYPE_FLAG:
551 			rule->fd_id = HNS3_MAX_FILTER_ID;
552 			rule->flags |= HNS3_RULE_FLAG_FDID;
553 			break;
554 		case RTE_FLOW_ACTION_TYPE_COUNT:
555 			act_count =
556 			    (const struct rte_flow_action_count *)actions->conf;
557 			counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
558 			if (act_count->id >= counter_num)
559 				return rte_flow_error_set(error, EINVAL,
560 						RTE_FLOW_ERROR_TYPE_ACTION_CONF,
561 						actions,
562 						"Invalid counter id");
563 			rule->act_cnt = *act_count;
564 			rule->flags |= HNS3_RULE_FLAG_COUNTER;
565 			rule->flags &= ~HNS3_RULE_FLAG_COUNTER_INDIR;
566 			break;
567 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
568 			ret = hns3_handle_action_indirect(dev, actions, rule,
569 							  error);
570 			if (ret)
571 				return ret;
572 			break;
573 		case RTE_FLOW_ACTION_TYPE_VOID:
574 			break;
575 		default:
576 			return rte_flow_error_set(error, ENOTSUP,
577 						  RTE_FLOW_ERROR_TYPE_ACTION,
578 						  NULL, "Unsupported action");
579 		}
580 	}
581 
582 	return 0;
583 }
584 
585 static int
586 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
587 {
588 	if (!attr->ingress)
589 		return rte_flow_error_set(error, EINVAL,
590 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
591 					  attr, "Ingress can't be zero");
592 	if (attr->egress)
593 		return rte_flow_error_set(error, ENOTSUP,
594 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
595 					  attr, "Not support egress");
596 	if (attr->transfer)
597 		return rte_flow_error_set(error, ENOTSUP,
598 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
599 					  attr, "No support for transfer");
600 	if (attr->group)
601 		return rte_flow_error_set(error, ENOTSUP,
602 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
603 					  attr, "Not support group");
604 	return 0;
605 }
606 
607 static int
608 hns3_check_tuple(const struct rte_eth_dev *dev, const struct hns3_fdir_rule *rule,
609 		 struct rte_flow_error *error)
610 {
611 	const char * const err_msg[] = {
612 		"Not support outer dst mac",
613 		"Not support outer src mac",
614 		"Not support outer vlan1 tag",
615 		"Not support outer vlan2 tag",
616 		"Not support outer eth type",
617 		"Not support outer l2 rsv",
618 		"Not support outer ip tos",
619 		"Not support outer ip proto",
620 		"Not support outer src ip",
621 		"Not support outer dst ip",
622 		"Not support outer l3 rsv",
623 		"Not support outer src port",
624 		"Not support outer dst port",
625 		"Not support outer l4 rsv",
626 		"Not support outer tun vni",
627 		"Not support outer tun flow id",
628 		"Not support inner dst mac",
629 		"Not support inner src mac",
630 		"Not support inner vlan tag1",
631 		"Not support inner vlan tag2",
632 		"Not support inner eth type",
633 		"Not support inner l2 rsv",
634 		"Not support inner ip tos",
635 		"Not support inner ip proto",
636 		"Not support inner src ip",
637 		"Not support inner dst ip",
638 		"Not support inner l3 rsv",
639 		"Not support inner src port",
640 		"Not support inner dst port",
641 		"Not support inner sctp tag",
642 	};
643 	struct hns3_adapter *hns = dev->data->dev_private;
644 	uint32_t tuple_active = hns->pf.fdir.fd_cfg.key_cfg[HNS3_FD_STAGE_1].tuple_active;
645 	uint32_t i;
646 
647 	for (i = 0; i < MAX_TUPLE; i++) {
648 		if ((rule->input_set & BIT(i)) == 0)
649 			continue;
650 		if (tuple_active & BIT(i))
651 			continue;
652 		return rte_flow_error_set(error, ENOTSUP,
653 					  RTE_FLOW_ERROR_TYPE_ITEM,
654 					  NULL, err_msg[i]);
655 	}
656 
657 	return 0;
658 }
659 
660 static int
661 hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
662 	       struct rte_flow_error *error __rte_unused)
663 {
664 	const struct rte_flow_item_eth *eth_spec;
665 	const struct rte_flow_item_eth *eth_mask;
666 
667 	/* Only used to describe the protocol stack. */
668 	if (item->spec == NULL && item->mask == NULL)
669 		return 0;
670 
671 	eth_mask = item->mask;
672 	if (eth_mask) {
673 		if (eth_mask->hdr.ether_type) {
674 			hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
675 			rule->key_conf.mask.ether_type =
676 			    rte_be_to_cpu_16(eth_mask->hdr.ether_type);
677 		}
678 		if (!rte_is_zero_ether_addr(&eth_mask->hdr.src_addr)) {
679 			hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
680 			memcpy(rule->key_conf.mask.src_mac,
681 			       eth_mask->hdr.src_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
682 		}
683 		if (!rte_is_zero_ether_addr(&eth_mask->hdr.dst_addr)) {
684 			hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
685 			memcpy(rule->key_conf.mask.dst_mac,
686 			       eth_mask->hdr.dst_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
687 		}
688 		if (eth_mask->has_vlan)
689 			rule->has_vlan_m = true;
690 	}
691 
692 	eth_spec = item->spec;
693 	if (eth_mask && eth_mask->has_vlan && eth_spec->has_vlan) {
694 		rule->key_conf.vlan_num++;
695 		rule->has_vlan_v = true;
696 	}
697 
698 	rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type);
699 	memcpy(rule->key_conf.spec.src_mac, eth_spec->hdr.src_addr.addr_bytes,
700 	       RTE_ETHER_ADDR_LEN);
701 	memcpy(rule->key_conf.spec.dst_mac, eth_spec->hdr.dst_addr.addr_bytes,
702 	       RTE_ETHER_ADDR_LEN);
703 	return 0;
704 }
705 
706 static int
707 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
708 		struct rte_flow_error *error)
709 {
710 	const struct rte_flow_item_vlan *vlan_spec;
711 	const struct rte_flow_item_vlan *vlan_mask;
712 
713 	if (rule->has_vlan_m && !rule->has_vlan_v)
714 		return rte_flow_error_set(error, EINVAL,
715 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
716 					  "VLAN item is conflict with 'has_vlan is 0' in ETH item");
717 
718 	if (rule->has_more_vlan_m && !rule->has_more_vlan_v)
719 		return rte_flow_error_set(error, EINVAL,
720 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
721 					  "VLAN item is conflict with 'has_more_vlan is 0' in the previous VLAN item");
722 
723 	if (rule->has_vlan_m && rule->has_vlan_v) {
724 		rule->has_vlan_m = false;
725 		rule->key_conf.vlan_num--;
726 	}
727 
728 	if (rule->has_more_vlan_m && rule->has_more_vlan_v) {
729 		rule->has_more_vlan_m = false;
730 		rule->key_conf.vlan_num--;
731 	}
732 
733 	rule->key_conf.vlan_num++;
734 	if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
735 		return rte_flow_error_set(error, EINVAL,
736 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
737 					  "Vlan_num is more than 2");
738 
739 	/* Only used to describe the protocol stack. */
740 	if (item->spec == NULL && item->mask == NULL)
741 		return 0;
742 
743 	vlan_mask = item->mask;
744 	if (vlan_mask) {
745 		if (vlan_mask->hdr.vlan_tci) {
746 			if (rule->key_conf.vlan_num == 1) {
747 				hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
748 					     1);
749 				rule->key_conf.mask.vlan_tag1 =
750 				    rte_be_to_cpu_16(vlan_mask->hdr.vlan_tci);
751 			} else {
752 				hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
753 					     1);
754 				rule->key_conf.mask.vlan_tag2 =
755 				    rte_be_to_cpu_16(vlan_mask->hdr.vlan_tci);
756 			}
757 		}
758 		if (vlan_mask->has_more_vlan)
759 			rule->has_more_vlan_m = true;
760 	}
761 
762 	vlan_spec = item->spec;
763 	if (rule->key_conf.vlan_num == 1)
764 		rule->key_conf.spec.vlan_tag1 =
765 		    rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci);
766 	else
767 		rule->key_conf.spec.vlan_tag2 =
768 		    rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci);
769 
770 	if (vlan_mask && vlan_mask->has_more_vlan && vlan_spec->has_more_vlan) {
771 		rule->key_conf.vlan_num++;
772 		if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
773 			return rte_flow_error_set(error, EINVAL,
774 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
775 					  "Vlan_num is more than 2");
776 		rule->has_more_vlan_v = true;
777 	}
778 
779 	return 0;
780 }
781 
782 static bool
783 hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask)
784 {
785 	if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id ||
786 	    ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live ||
787 	    ipv4_mask->hdr.hdr_checksum)
788 		return false;
789 
790 	return true;
791 }
792 
793 static int
794 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
795 		struct rte_flow_error *error)
796 {
797 	const struct rte_flow_item_ipv4 *ipv4_spec;
798 	const struct rte_flow_item_ipv4 *ipv4_mask;
799 
800 	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
801 	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
802 	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
803 
804 	/* Only used to describe the protocol stack. */
805 	if (item->spec == NULL && item->mask == NULL)
806 		return 0;
807 
808 	if (item->mask) {
809 		ipv4_mask = item->mask;
810 		if (!hns3_check_ipv4_mask_supported(ipv4_mask)) {
811 			return rte_flow_error_set(error, EINVAL,
812 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
813 						  item,
814 						  "Only support src & dst ip,tos,proto in IPV4");
815 		}
816 
817 		if (ipv4_mask->hdr.src_addr) {
818 			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
819 			rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
820 			    rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
821 		}
822 
823 		if (ipv4_mask->hdr.dst_addr) {
824 			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
825 			rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
826 			    rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
827 		}
828 
829 		if (ipv4_mask->hdr.type_of_service) {
830 			hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
831 			rule->key_conf.mask.ip_tos =
832 			    ipv4_mask->hdr.type_of_service;
833 		}
834 
835 		if (ipv4_mask->hdr.next_proto_id) {
836 			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
837 			rule->key_conf.mask.ip_proto =
838 			    ipv4_mask->hdr.next_proto_id;
839 		}
840 	}
841 
842 	ipv4_spec = item->spec;
843 	rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
844 	    rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
845 	rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
846 	    rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
847 	rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
848 	rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
849 	return 0;
850 }
851 
852 static int
853 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
854 		struct rte_flow_error *error)
855 {
856 	const struct rte_flow_item_ipv6 *ipv6_spec;
857 	const struct rte_flow_item_ipv6 *ipv6_mask;
858 
859 	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
860 	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
861 	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
862 
863 	/* Only used to describe the protocol stack. */
864 	if (item->spec == NULL && item->mask == NULL)
865 		return 0;
866 
867 	if (item->mask) {
868 		ipv6_mask = item->mask;
869 		if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
870 		    ipv6_mask->hdr.hop_limits) {
871 			return rte_flow_error_set(error, EINVAL,
872 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
873 						  item,
874 						  "Only support src & dst ip,proto in IPV6");
875 		}
876 		net_addr_to_host(rule->key_conf.mask.src_ip,
877 				 (const rte_be32_t *)&ipv6_mask->hdr.src_addr,
878 				 IP_ADDR_LEN);
879 		net_addr_to_host(rule->key_conf.mask.dst_ip,
880 				 (const rte_be32_t *)&ipv6_mask->hdr.dst_addr,
881 				 IP_ADDR_LEN);
882 		rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
883 		if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
884 			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
885 		if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
886 			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
887 		if (ipv6_mask->hdr.proto)
888 			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
889 	}
890 
891 	ipv6_spec = item->spec;
892 	net_addr_to_host(rule->key_conf.spec.src_ip,
893 			 (const rte_be32_t *)&ipv6_spec->hdr.src_addr,
894 			 IP_ADDR_LEN);
895 	net_addr_to_host(rule->key_conf.spec.dst_ip,
896 			 (const rte_be32_t *)&ipv6_spec->hdr.dst_addr,
897 			 IP_ADDR_LEN);
898 	rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
899 
900 	return 0;
901 }
902 
903 static bool
904 hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask)
905 {
906 	if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
907 	    tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
908 	    tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
909 	    tcp_mask->hdr.tcp_urp)
910 		return false;
911 
912 	return true;
913 }
914 
915 static int
916 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
917 	       struct rte_flow_error *error)
918 {
919 	const struct rte_flow_item_tcp *tcp_spec;
920 	const struct rte_flow_item_tcp *tcp_mask;
921 
922 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
923 	rule->key_conf.spec.ip_proto = IPPROTO_TCP;
924 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
925 
926 	/* Only used to describe the protocol stack. */
927 	if (item->spec == NULL && item->mask == NULL)
928 		return 0;
929 
930 	if (item->mask) {
931 		tcp_mask = item->mask;
932 		if (!hns3_check_tcp_mask_supported(tcp_mask)) {
933 			return rte_flow_error_set(error, EINVAL,
934 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
935 						  item,
936 						  "Only support src & dst port in TCP");
937 		}
938 
939 		if (tcp_mask->hdr.src_port) {
940 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
941 			rule->key_conf.mask.src_port =
942 			    rte_be_to_cpu_16(tcp_mask->hdr.src_port);
943 		}
944 		if (tcp_mask->hdr.dst_port) {
945 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
946 			rule->key_conf.mask.dst_port =
947 			    rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
948 		}
949 	}
950 
951 	tcp_spec = item->spec;
952 	rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
953 	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
954 
955 	return 0;
956 }
957 
958 static int
959 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
960 	       struct rte_flow_error *error)
961 {
962 	const struct rte_flow_item_udp *udp_spec;
963 	const struct rte_flow_item_udp *udp_mask;
964 
965 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
966 	rule->key_conf.spec.ip_proto = IPPROTO_UDP;
967 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
968 
969 	/* Only used to describe the protocol stack. */
970 	if (item->spec == NULL && item->mask == NULL)
971 		return 0;
972 
973 	if (item->mask) {
974 		udp_mask = item->mask;
975 		if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
976 			return rte_flow_error_set(error, EINVAL,
977 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
978 						  item,
979 						  "Only support src & dst port in UDP");
980 		}
981 		if (udp_mask->hdr.src_port) {
982 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
983 			rule->key_conf.mask.src_port =
984 			    rte_be_to_cpu_16(udp_mask->hdr.src_port);
985 		}
986 		if (udp_mask->hdr.dst_port) {
987 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
988 			rule->key_conf.mask.dst_port =
989 			    rte_be_to_cpu_16(udp_mask->hdr.dst_port);
990 		}
991 	}
992 
993 	udp_spec = item->spec;
994 	rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
995 	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
996 
997 	return 0;
998 }
999 
1000 static int
1001 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1002 		struct rte_flow_error *error)
1003 {
1004 	const struct rte_flow_item_sctp *sctp_spec;
1005 	const struct rte_flow_item_sctp *sctp_mask;
1006 
1007 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
1008 	rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
1009 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
1010 
1011 	/* Only used to describe the protocol stack. */
1012 	if (item->spec == NULL && item->mask == NULL)
1013 		return 0;
1014 
1015 	if (item->mask) {
1016 		sctp_mask = item->mask;
1017 		if (sctp_mask->hdr.cksum)
1018 			return rte_flow_error_set(error, EINVAL,
1019 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1020 						  item,
1021 						  "Only support src & dst port & v-tag in SCTP");
1022 		if (sctp_mask->hdr.src_port) {
1023 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
1024 			rule->key_conf.mask.src_port =
1025 			    rte_be_to_cpu_16(sctp_mask->hdr.src_port);
1026 		}
1027 		if (sctp_mask->hdr.dst_port) {
1028 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
1029 			rule->key_conf.mask.dst_port =
1030 			    rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
1031 		}
1032 		if (sctp_mask->hdr.tag) {
1033 			hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
1034 			rule->key_conf.mask.sctp_tag =
1035 			    rte_be_to_cpu_32(sctp_mask->hdr.tag);
1036 		}
1037 	}
1038 
1039 	sctp_spec = item->spec;
1040 	rule->key_conf.spec.src_port =
1041 	    rte_be_to_cpu_16(sctp_spec->hdr.src_port);
1042 	rule->key_conf.spec.dst_port =
1043 	    rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
1044 	rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
1045 
1046 	return 0;
1047 }
1048 
1049 /*
1050  * Check items before tunnel, save inner configs to outer configs, and clear
1051  * inner configs.
1052  * The key consists of two parts: meta_data and tuple keys.
1053  * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
1054  * packet(1bit).
1055  * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
1056  * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
1057  * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
1058  * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
1059  * Vlantag2(16bit) and sctp-tag(32bit).
1060  */
1061 static int
1062 hns3_handle_tunnel(const struct rte_flow_item *item,
1063 		   struct hns3_fdir_rule *rule, struct rte_flow_error *error)
1064 {
1065 	/* check eth config */
1066 	if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
1067 		return rte_flow_error_set(error, EINVAL,
1068 					  RTE_FLOW_ERROR_TYPE_ITEM,
1069 					  item, "Outer eth mac is unsupported");
1070 	if (rule->input_set & BIT(INNER_ETH_TYPE)) {
1071 		hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
1072 		rule->key_conf.spec.outer_ether_type =
1073 		    rule->key_conf.spec.ether_type;
1074 		rule->key_conf.mask.outer_ether_type =
1075 		    rule->key_conf.mask.ether_type;
1076 		hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
1077 		rule->key_conf.spec.ether_type = 0;
1078 		rule->key_conf.mask.ether_type = 0;
1079 	}
1080 
1081 	if (rule->input_set & BIT(INNER_VLAN_TAG1)) {
1082 		hns3_set_bit(rule->input_set, OUTER_VLAN_TAG_FST, 1);
1083 		hns3_set_bit(rule->input_set, INNER_VLAN_TAG1, 0);
1084 		rule->key_conf.spec.outer_vlan_tag1 = rule->key_conf.spec.vlan_tag1;
1085 		rule->key_conf.mask.outer_vlan_tag1 = rule->key_conf.mask.vlan_tag1;
1086 		rule->key_conf.spec.vlan_tag1 = 0;
1087 		rule->key_conf.mask.vlan_tag1 = 0;
1088 	}
1089 	if (rule->input_set & BIT(INNER_VLAN_TAG2)) {
1090 		hns3_set_bit(rule->input_set, OUTER_VLAN_TAG_SEC, 1);
1091 		hns3_set_bit(rule->input_set, INNER_VLAN_TAG2, 0);
1092 		rule->key_conf.spec.outer_vlan_tag2 = rule->key_conf.spec.vlan_tag2;
1093 		rule->key_conf.mask.outer_vlan_tag2 = rule->key_conf.mask.vlan_tag2;
1094 		rule->key_conf.spec.vlan_tag2 = 0;
1095 		rule->key_conf.mask.vlan_tag2 = 0;
1096 	}
1097 
1098 	/* clear vlan_num for inner vlan select */
1099 	rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
1100 	rule->key_conf.vlan_num = 0;
1101 
1102 	/* check L3 config */
1103 	if (rule->input_set &
1104 	    (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
1105 		return rte_flow_error_set(error, EINVAL,
1106 					  RTE_FLOW_ERROR_TYPE_ITEM,
1107 					  item, "Outer ip is unsupported");
1108 	if (rule->input_set & BIT(INNER_IP_PROTO)) {
1109 		hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
1110 		rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
1111 		rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
1112 		hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
1113 		rule->key_conf.spec.ip_proto = 0;
1114 		rule->key_conf.mask.ip_proto = 0;
1115 	}
1116 
1117 	/* check L4 config */
1118 	if (rule->input_set & BIT(INNER_SCTP_TAG))
1119 		return rte_flow_error_set(error, EINVAL,
1120 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1121 					  "Outer sctp tag is unsupported");
1122 
1123 	if (rule->input_set & BIT(INNER_SRC_PORT)) {
1124 		hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
1125 		rule->key_conf.spec.outer_src_port =
1126 		    rule->key_conf.spec.src_port;
1127 		rule->key_conf.mask.outer_src_port =
1128 		    rule->key_conf.mask.src_port;
1129 		hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
1130 		rule->key_conf.spec.src_port = 0;
1131 		rule->key_conf.mask.src_port = 0;
1132 	}
1133 	if (rule->input_set & BIT(INNER_DST_PORT)) {
1134 		hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
1135 		rule->key_conf.spec.dst_port = 0;
1136 		rule->key_conf.mask.dst_port = 0;
1137 	}
1138 	return 0;
1139 }
1140 
1141 static int
1142 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1143 		 struct rte_flow_error *error)
1144 {
1145 	const struct rte_flow_item_vxlan *vxlan_spec;
1146 	const struct rte_flow_item_vxlan *vxlan_mask;
1147 
1148 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
1149 	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
1150 	if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1151 		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
1152 	else
1153 		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
1154 
1155 	/* Only used to describe the protocol stack. */
1156 	if (item->spec == NULL && item->mask == NULL)
1157 		return 0;
1158 
1159 	vxlan_mask = item->mask;
1160 	vxlan_spec = item->spec;
1161 
1162 	if (vxlan_mask->hdr.flags)
1163 		return rte_flow_error_set(error, EINVAL,
1164 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1165 					  "Flags is not supported in VxLAN");
1166 
1167 	/* VNI must be totally masked or not. */
1168 	if (memcmp(vxlan_mask->hdr.vni, full_mask, VNI_OR_TNI_LEN) &&
1169 	    memcmp(vxlan_mask->hdr.vni, zero_mask, VNI_OR_TNI_LEN))
1170 		return rte_flow_error_set(error, EINVAL,
1171 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1172 					  "VNI must be totally masked or not in VxLAN");
1173 	if (vxlan_mask->hdr.vni[0]) {
1174 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1175 		memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->hdr.vni,
1176 			   VNI_OR_TNI_LEN);
1177 	}
1178 	memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->hdr.vni,
1179 		   VNI_OR_TNI_LEN);
1180 	return 0;
1181 }
1182 
1183 static int
1184 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1185 		 struct rte_flow_error *error)
1186 {
1187 	const struct rte_flow_item_nvgre *nvgre_spec;
1188 	const struct rte_flow_item_nvgre *nvgre_mask;
1189 
1190 	hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
1191 	rule->key_conf.spec.outer_proto = IPPROTO_GRE;
1192 	rule->key_conf.mask.outer_proto = IPPROTO_MASK;
1193 
1194 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
1195 	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
1196 	rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
1197 	/* Only used to describe the protocol stack. */
1198 	if (item->spec == NULL && item->mask == NULL)
1199 		return 0;
1200 
1201 	nvgre_mask = item->mask;
1202 	nvgre_spec = item->spec;
1203 
1204 	if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
1205 		return rte_flow_error_set(error, EINVAL,
1206 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1207 					  "Ver/protocol is not supported in NVGRE");
1208 
1209 	/* TNI must be totally masked or not. */
1210 	if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
1211 	    memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
1212 		return rte_flow_error_set(error, EINVAL,
1213 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1214 					  "TNI must be totally masked or not in NVGRE");
1215 
1216 	if (nvgre_mask->tni[0]) {
1217 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1218 		memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
1219 			   VNI_OR_TNI_LEN);
1220 	}
1221 	memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
1222 		   VNI_OR_TNI_LEN);
1223 
1224 	if (nvgre_mask->flow_id) {
1225 		hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
1226 		rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
1227 	}
1228 	rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
1229 	return 0;
1230 }
1231 
1232 static int
1233 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1234 		  struct rte_flow_error *error)
1235 {
1236 	const struct rte_flow_item_geneve *geneve_spec;
1237 	const struct rte_flow_item_geneve *geneve_mask;
1238 
1239 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
1240 	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
1241 	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
1242 	/* Only used to describe the protocol stack. */
1243 	if (item->spec == NULL && item->mask == NULL)
1244 		return 0;
1245 
1246 	geneve_mask = item->mask;
1247 	geneve_spec = item->spec;
1248 
1249 	if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
1250 		return rte_flow_error_set(error, EINVAL,
1251 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1252 					  "Ver/protocol is not supported in GENEVE");
1253 	/* VNI must be totally masked or not. */
1254 	if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
1255 	    memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
1256 		return rte_flow_error_set(error, EINVAL,
1257 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1258 					  "VNI must be totally masked or not in GENEVE");
1259 	if (geneve_mask->vni[0]) {
1260 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1261 		memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
1262 			   VNI_OR_TNI_LEN);
1263 	}
1264 	memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
1265 		   VNI_OR_TNI_LEN);
1266 	return 0;
1267 }
1268 
1269 static int
1270 hns3_parse_ptype(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1271 		  struct rte_flow_error *error)
1272 {
1273 	const struct rte_flow_item_ptype *spec = item->spec;
1274 	const struct rte_flow_item_ptype *mask = item->mask;
1275 
1276 	if (spec == NULL || mask == NULL)
1277 		return rte_flow_error_set(error, EINVAL,
1278 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1279 					  "PTYPE must set spec and mask at the same time!");
1280 
1281 	if (spec->packet_type != RTE_PTYPE_TUNNEL_MASK ||
1282 	    (mask->packet_type & RTE_PTYPE_TUNNEL_MASK) != RTE_PTYPE_TUNNEL_MASK)
1283 		return rte_flow_error_set(error, EINVAL,
1284 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1285 					  "PTYPE only support general tunnel!");
1286 
1287 	/*
1288 	 * Set tunnel_type to non-zero, so that meta-data's tunnel packet bit
1289 	 * will be set, then hardware will match tunnel packet.
1290 	 */
1291 	rule->key_conf.spec.tunnel_type = 1;
1292 	return 0;
1293 }
1294 
1295 static int
1296 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1297 		  struct rte_flow_error *error)
1298 {
1299 	int ret;
1300 
1301 	if (item->spec == NULL && item->mask)
1302 		return rte_flow_error_set(error, EINVAL,
1303 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1304 					  "Can't configure FDIR with mask "
1305 					  "but without spec");
1306 	else if (item->spec && (item->mask == NULL))
1307 		return rte_flow_error_set(error, EINVAL,
1308 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1309 					  "Tunnel packets must configure "
1310 					  "with mask");
1311 
1312 	if (rule->key_conf.spec.tunnel_type != 0)
1313 		return rte_flow_error_set(error, EINVAL,
1314 					  RTE_FLOW_ERROR_TYPE_ITEM,
1315 					  item, "Too many tunnel headers!");
1316 
1317 	switch (item->type) {
1318 	case RTE_FLOW_ITEM_TYPE_VXLAN:
1319 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1320 		ret = hns3_parse_vxlan(item, rule, error);
1321 		break;
1322 	case RTE_FLOW_ITEM_TYPE_NVGRE:
1323 		ret = hns3_parse_nvgre(item, rule, error);
1324 		break;
1325 	case RTE_FLOW_ITEM_TYPE_GENEVE:
1326 		ret = hns3_parse_geneve(item, rule, error);
1327 		break;
1328 	case RTE_FLOW_ITEM_TYPE_PTYPE:
1329 		ret = hns3_parse_ptype(item, rule, error);
1330 		break;
1331 	default:
1332 		return rte_flow_error_set(error, ENOTSUP,
1333 					  RTE_FLOW_ERROR_TYPE_ITEM,
1334 					  NULL, "Unsupported tunnel type!");
1335 	}
1336 	if (ret)
1337 		return ret;
1338 	return hns3_handle_tunnel(item, rule, error);
1339 }
1340 
1341 static int
1342 hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1343 		  struct items_step_mngr *step_mngr,
1344 		  struct rte_flow_error *error)
1345 {
1346 	int ret;
1347 
1348 	if (item->spec == NULL && item->mask)
1349 		return rte_flow_error_set(error, EINVAL,
1350 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1351 					  "Can't configure FDIR with mask "
1352 					  "but without spec");
1353 
1354 	switch (item->type) {
1355 	case RTE_FLOW_ITEM_TYPE_ETH:
1356 		ret = hns3_parse_eth(item, rule, error);
1357 		step_mngr->items = L2_next_items;
1358 		step_mngr->count = RTE_DIM(L2_next_items);
1359 		break;
1360 	case RTE_FLOW_ITEM_TYPE_VLAN:
1361 		ret = hns3_parse_vlan(item, rule, error);
1362 		step_mngr->items = L2_next_items;
1363 		step_mngr->count = RTE_DIM(L2_next_items);
1364 		break;
1365 	case RTE_FLOW_ITEM_TYPE_IPV4:
1366 		ret = hns3_parse_ipv4(item, rule, error);
1367 		step_mngr->items = L3_next_items;
1368 		step_mngr->count = RTE_DIM(L3_next_items);
1369 		break;
1370 	case RTE_FLOW_ITEM_TYPE_IPV6:
1371 		ret = hns3_parse_ipv6(item, rule, error);
1372 		step_mngr->items = L3_next_items;
1373 		step_mngr->count = RTE_DIM(L3_next_items);
1374 		break;
1375 	case RTE_FLOW_ITEM_TYPE_TCP:
1376 		ret = hns3_parse_tcp(item, rule, error);
1377 		step_mngr->items = L4_next_items;
1378 		step_mngr->count = RTE_DIM(L4_next_items);
1379 		break;
1380 	case RTE_FLOW_ITEM_TYPE_UDP:
1381 		ret = hns3_parse_udp(item, rule, error);
1382 		step_mngr->items = L4_next_items;
1383 		step_mngr->count = RTE_DIM(L4_next_items);
1384 		break;
1385 	case RTE_FLOW_ITEM_TYPE_SCTP:
1386 		ret = hns3_parse_sctp(item, rule, error);
1387 		step_mngr->items = L4_next_items;
1388 		step_mngr->count = RTE_DIM(L4_next_items);
1389 		break;
1390 	default:
1391 		return rte_flow_error_set(error, ENOTSUP,
1392 					  RTE_FLOW_ERROR_TYPE_ITEM,
1393 					  NULL, "Unsupported normal type!");
1394 	}
1395 
1396 	return ret;
1397 }
1398 
1399 static int
1400 hns3_validate_item(const struct rte_flow_item *item,
1401 		   struct items_step_mngr step_mngr,
1402 		   struct rte_flow_error *error)
1403 {
1404 	uint32_t i;
1405 
1406 	if (item->last)
1407 		return rte_flow_error_set(error, ENOTSUP,
1408 					  RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
1409 					  "Not supported last point for range");
1410 
1411 	for (i = 0; i < step_mngr.count; i++) {
1412 		if (item->type == step_mngr.items[i])
1413 			break;
1414 	}
1415 
1416 	if (i == step_mngr.count) {
1417 		return rte_flow_error_set(error, EINVAL,
1418 					  RTE_FLOW_ERROR_TYPE_ITEM,
1419 					  item, "Inval or missing item");
1420 	}
1421 	return 0;
1422 }
1423 
1424 static inline bool
1425 is_tunnel_packet(enum rte_flow_item_type type)
1426 {
1427 	if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1428 	    type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1429 	    type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1430 	    type == RTE_FLOW_ITEM_TYPE_GENEVE ||
1431 	    /*
1432 	     * Here treat PTYPE as tunnel type because driver only support PTYPE_TUNNEL,
1433 	     * other PTYPE will return error in hns3_parse_ptype() later.
1434 	     */
1435 	    type == RTE_FLOW_ITEM_TYPE_PTYPE)
1436 		return true;
1437 	return false;
1438 }
1439 
1440 static int
1441 hns3_handle_attributes(struct rte_eth_dev *dev,
1442 		       const struct rte_flow_attr *attr,
1443 		       struct hns3_fdir_rule *rule,
1444 		       struct rte_flow_error *error)
1445 {
1446 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1447 	struct hns3_fdir_info fdir = pf->fdir;
1448 	uint32_t rule_num;
1449 
1450 	if (fdir.index_cfg != HNS3_FDIR_INDEX_CONFIG_PRIORITY) {
1451 		if (attr->priority == 0)
1452 			return 0;
1453 		return rte_flow_error_set(error, ENOTSUP,
1454 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1455 					  attr, "Not support priority");
1456 	}
1457 
1458 	rule_num = fdir.fd_cfg.rule_num[HNS3_FD_STAGE_1];
1459 	if (attr->priority >= rule_num)
1460 		return rte_flow_error_set(error, EINVAL,
1461 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1462 					  attr, "Priority out of range");
1463 
1464 	if (fdir.hash_map[attr->priority] != NULL)
1465 		return rte_flow_error_set(error, EINVAL,
1466 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1467 					  attr, "Priority already exists");
1468 
1469 	rule->location = attr->priority;
1470 
1471 	return 0;
1472 }
1473 
1474 /*
1475  * Parse the flow director rule.
1476  * The supported PATTERN:
1477  *   case: non-tunnel packet:
1478  *     ETH : src-mac, dst-mac, ethertype
1479  *     VLAN: tag1, tag2
1480  *     IPv4: src-ip, dst-ip, tos, proto
1481  *     IPv6: src-ip(last 32 bit addr), dst-ip(last 32 bit addr), proto
1482  *     UDP : src-port, dst-port
1483  *     TCP : src-port, dst-port
1484  *     SCTP: src-port, dst-port, tag
1485  *   case: tunnel packet:
1486  *     OUTER-ETH: ethertype
1487  *     OUTER-L3 : proto
1488  *     OUTER-L4 : src-port, dst-port
1489  *     TUNNEL   : vni, flow-id(only valid when NVGRE)
1490  *     INNER-ETH/VLAN/IPv4/IPv6/UDP/TCP/SCTP: same as non-tunnel packet
1491  * The supported ACTION:
1492  *    QUEUE
1493  *    DROP
1494  *    COUNT
1495  *    MARK: the id range [0, 4094]
1496  *    FLAG
1497  *    RSS: only valid if firmware support FD_QUEUE_REGION.
1498  */
1499 static int
1500 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1501 		       const struct rte_flow_attr *attr,
1502 		       const struct rte_flow_item pattern[],
1503 		       const struct rte_flow_action actions[],
1504 		       struct hns3_fdir_rule *rule,
1505 		       struct rte_flow_error *error)
1506 {
1507 	struct hns3_adapter *hns = dev->data->dev_private;
1508 	const struct rte_flow_item *item;
1509 	struct items_step_mngr step_mngr;
1510 	int ret;
1511 
1512 	/* FDIR is available only in PF driver */
1513 	if (hns->is_vf)
1514 		return rte_flow_error_set(error, ENOTSUP,
1515 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1516 					  "Fdir not supported in VF");
1517 
1518 	ret = hns3_handle_attributes(dev, attr, rule, error);
1519 	if (ret)
1520 		return ret;
1521 
1522 	step_mngr.items = first_items;
1523 	step_mngr.count = RTE_DIM(first_items);
1524 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1525 		if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1526 			continue;
1527 
1528 		ret = hns3_validate_item(item, step_mngr, error);
1529 		if (ret)
1530 			return ret;
1531 
1532 		if (is_tunnel_packet(item->type)) {
1533 			ret = hns3_parse_tunnel(item, rule, error);
1534 			if (ret)
1535 				return ret;
1536 			step_mngr.items = tunnel_next_items;
1537 			step_mngr.count = RTE_DIM(tunnel_next_items);
1538 		} else {
1539 			ret = hns3_parse_normal(item, rule, &step_mngr, error);
1540 			if (ret)
1541 				return ret;
1542 		}
1543 	}
1544 
1545 	ret = hns3_check_tuple(dev, rule, error);
1546 	if (ret)
1547 		return ret;
1548 
1549 	return hns3_handle_actions(dev, actions, rule, error);
1550 }
1551 
1552 static void
1553 hns3_filterlist_flush(struct rte_eth_dev *dev)
1554 {
1555 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1556 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1557 	struct hns3_flow_mem *flow_node;
1558 
1559 	fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1560 	while (fdir_rule_ptr) {
1561 		TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1562 		rte_free(fdir_rule_ptr);
1563 		fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1564 	}
1565 
1566 	flow_node = TAILQ_FIRST(&hw->flow_list);
1567 	while (flow_node) {
1568 		TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1569 		rte_free(flow_node->flow);
1570 		rte_free(flow_node);
1571 		flow_node = TAILQ_FIRST(&hw->flow_list);
1572 	}
1573 }
1574 
1575 static bool
1576 hns3_flow_rule_key_same(const struct rte_flow_action_rss *comp,
1577 			const struct rte_flow_action_rss *with)
1578 {
1579 	if (comp->key_len != with->key_len)
1580 		return false;
1581 
1582 	if (with->key_len == 0)
1583 		return true;
1584 
1585 	if (comp->key == NULL && with->key == NULL)
1586 		return true;
1587 
1588 	if (!(comp->key != NULL && with->key != NULL))
1589 		return false;
1590 
1591 	return !memcmp(comp->key, with->key, with->key_len);
1592 }
1593 
1594 static bool
1595 hns3_flow_rule_queues_same(const struct rte_flow_action_rss *comp,
1596 			   const struct rte_flow_action_rss *with)
1597 {
1598 	if (comp->queue_num != with->queue_num)
1599 		return false;
1600 
1601 	if (with->queue_num == 0)
1602 		return true;
1603 
1604 	if (comp->queue == NULL && with->queue == NULL)
1605 		return true;
1606 
1607 	if (!(comp->queue != NULL && with->queue != NULL))
1608 		return false;
1609 
1610 	return !memcmp(comp->queue, with->queue, with->queue_num);
1611 }
1612 
1613 static bool
1614 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1615 		     const struct rte_flow_action_rss *with)
1616 {
1617 	bool same_level;
1618 	bool same_types;
1619 	bool same_func;
1620 
1621 	same_level = (comp->level == with->level);
1622 	same_types = (comp->types == with->types);
1623 	same_func = (comp->func == with->func);
1624 
1625 	return same_level && same_types && same_func &&
1626 		hns3_flow_rule_key_same(comp, with) &&
1627 		hns3_flow_rule_queues_same(comp, with);
1628 }
1629 
1630 static bool
1631 hns3_valid_ipv6_sctp_rss_types(struct hns3_hw *hw, uint64_t types)
1632 {
1633 	/*
1634 	 * Some hardware don't support to use src/dst port fields to hash
1635 	 * for IPV6 SCTP packet type.
1636 	 */
1637 	if (types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP &&
1638 	    types & HNS3_RSS_SUPPORT_L4_SRC_DST &&
1639 	    !hw->rss_info.ipv6_sctp_offload_supported)
1640 		return false;
1641 
1642 	return true;
1643 }
1644 
1645 static int
1646 hns3_flow_parse_hash_func(const struct rte_flow_action_rss *rss_act,
1647 			  struct hns3_flow_rss_conf *rss_conf,
1648 			  struct rte_flow_error *error)
1649 {
1650 	if (rss_act->func >= RTE_ETH_HASH_FUNCTION_MAX)
1651 		return rte_flow_error_set(error, ENOTSUP,
1652 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1653 					  NULL, "RSS hash func are not supported");
1654 
1655 	rss_conf->conf.func = rss_act->func;
1656 	return 0;
1657 }
1658 
1659 static int
1660 hns3_flow_parse_hash_key(struct hns3_hw *hw,
1661 			 const struct rte_flow_action_rss *rss_act,
1662 			 struct hns3_flow_rss_conf *rss_conf,
1663 			 struct rte_flow_error *error)
1664 {
1665 	if (rss_act->key_len != hw->rss_key_size)
1666 		return rte_flow_error_set(error, EINVAL,
1667 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1668 					  NULL, "invalid RSS key length");
1669 
1670 	if (rss_act->key != NULL)
1671 		memcpy(rss_conf->key, rss_act->key, rss_act->key_len);
1672 	else
1673 		memcpy(rss_conf->key, hns3_hash_key,
1674 			RTE_MIN(sizeof(hns3_hash_key), rss_act->key_len));
1675 	/* Need to record if user sets hash key. */
1676 	rss_conf->conf.key = rss_act->key;
1677 	rss_conf->conf.key_len = rss_act->key_len;
1678 
1679 	return 0;
1680 }
1681 
1682 static int
1683 hns3_flow_parse_queues(struct hns3_hw *hw,
1684 		       const struct rte_flow_action_rss *rss_act,
1685 		       struct hns3_flow_rss_conf *rss_conf,
1686 		       struct rte_flow_error *error)
1687 {
1688 	uint16_t i;
1689 
1690 	if (rss_act->queue_num > hw->rss_ind_tbl_size)
1691 		return rte_flow_error_set(error, ENOTSUP,
1692 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1693 					  NULL,
1694 					  "queue number can not exceed RSS indirection table.");
1695 
1696 	if (rss_act->queue_num > HNS3_RSS_QUEUES_BUFFER_NUM)
1697 		return rte_flow_error_set(error, ENOTSUP,
1698 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1699 					  NULL,
1700 					  "queue number configured exceeds queue buffer size driver supported");
1701 
1702 	for (i = 0; i < rss_act->queue_num; i++) {
1703 		if (rss_act->queue[i] >= hw->alloc_rss_size)
1704 			return rte_flow_error_set(error, EINVAL,
1705 						RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1706 						NULL,
1707 						"queue id must be less than queue number allocated to a TC");
1708 	}
1709 
1710 	memcpy(rss_conf->queue, rss_act->queue,
1711 	       rss_act->queue_num * sizeof(rss_conf->queue[0]));
1712 	rss_conf->conf.queue = rss_conf->queue;
1713 	rss_conf->conf.queue_num = rss_act->queue_num;
1714 
1715 	return 0;
1716 }
1717 
1718 static int
1719 hns3_flow_get_hw_pctype(struct hns3_hw *hw,
1720 			const struct rte_flow_action_rss *rss_act,
1721 			const struct hns3_hash_map_info *map,
1722 			struct hns3_flow_rss_conf *rss_conf,
1723 			struct rte_flow_error *error)
1724 {
1725 	uint64_t l3l4_src_dst, l3l4_refine, left_types;
1726 
1727 	if (rss_act->types == 0) {
1728 		/* Disable RSS hash of this packet type if types is zero. */
1729 		rss_conf->hw_pctypes |= map->hw_pctype;
1730 		return 0;
1731 	}
1732 
1733 	/*
1734 	 * Can not have extra types except rss_pctype and l3l4_type in this map.
1735 	 */
1736 	left_types = ~map->rss_pctype & rss_act->types;
1737 	if (left_types & ~map->l3l4_types)
1738 		return rte_flow_error_set(error, EINVAL,
1739 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1740 					  "cannot set extra types.");
1741 
1742 	l3l4_src_dst = left_types;
1743 	/* L3/L4 SRC and DST shouldn't be specified at the same time. */
1744 	l3l4_refine = rte_eth_rss_hf_refine(l3l4_src_dst);
1745 	if (l3l4_refine != l3l4_src_dst)
1746 		return rte_flow_error_set(error, ENOTSUP,
1747 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1748 					  "cannot specify L3_SRC/DST_ONLY or L4_SRC/DST_ONLY at the same.");
1749 
1750 	if (!hns3_valid_ipv6_sctp_rss_types(hw, rss_act->types))
1751 		return rte_flow_error_set(error, ENOTSUP,
1752 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1753 					  "hardware doesn't support to use L4 src/dst to hash for IPV6-SCTP.");
1754 
1755 	rss_conf->hw_pctypes |= map->hw_pctype;
1756 
1757 	return 0;
1758 }
1759 
1760 static int
1761 hns3_flow_parse_rss_types_by_ptype(struct hns3_hw *hw,
1762 				   const struct rte_flow_action_rss *rss_act,
1763 				   uint64_t pattern_type,
1764 				   struct hns3_flow_rss_conf *rss_conf,
1765 				   struct rte_flow_error *error)
1766 {
1767 	const struct hns3_hash_map_info *map;
1768 	bool matched = false;
1769 	uint16_t i;
1770 	int ret;
1771 
1772 	for (i = 0; i < RTE_DIM(hash_map_table); i++) {
1773 		map = &hash_map_table[i];
1774 		if (map->pattern_type != pattern_type) {
1775 			/*
1776 			 * If the target pattern type is already matched with
1777 			 * the one before this pattern in the hash map table,
1778 			 * no need to continue walk.
1779 			 */
1780 			if (matched)
1781 				break;
1782 			continue;
1783 		}
1784 		matched = true;
1785 
1786 		/*
1787 		 * If pattern type is matched and the 'types' is zero, all packet flow
1788 		 * types related to this pattern type disable RSS hash.
1789 		 * Otherwise, RSS types must match the pattern type and cannot have no
1790 		 * extra or unsupported types.
1791 		 */
1792 		if (rss_act->types != 0 && !(map->rss_pctype & rss_act->types))
1793 			continue;
1794 
1795 		ret = hns3_flow_get_hw_pctype(hw, rss_act, map, rss_conf, error);
1796 		if (ret != 0)
1797 			return ret;
1798 	}
1799 
1800 	if (rss_conf->hw_pctypes != 0)
1801 		return 0;
1802 
1803 	if (matched)
1804 		return rte_flow_error_set(error, ENOTSUP,
1805 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1806 					  NULL, "RSS types are unsupported");
1807 
1808 	return rte_flow_error_set(error, ENOTSUP,
1809 				  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1810 				  NULL, "Pattern specified is unsupported");
1811 }
1812 
1813 static uint64_t
1814 hns3_flow_get_all_hw_pctypes(uint64_t types)
1815 {
1816 	uint64_t hw_pctypes = 0;
1817 	uint16_t i;
1818 
1819 	for (i = 0; i < RTE_DIM(hash_map_table); i++) {
1820 		if (types & hash_map_table[i].rss_pctype)
1821 			hw_pctypes |= hash_map_table[i].hw_pctype;
1822 	}
1823 
1824 	return hw_pctypes;
1825 }
1826 
1827 static int
1828 hns3_flow_parse_rss_types(struct hns3_hw *hw,
1829 			  const struct rte_flow_action_rss *rss_act,
1830 			  uint64_t pattern_type,
1831 			  struct hns3_flow_rss_conf *rss_conf,
1832 			  struct rte_flow_error *error)
1833 {
1834 	rss_conf->conf.types = rss_act->types;
1835 
1836 	/* no pattern specified to set global RSS types. */
1837 	if (pattern_type == 0) {
1838 		if (!hns3_check_rss_types_valid(hw, rss_act->types))
1839 			return rte_flow_error_set(error, EINVAL,
1840 					RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1841 					NULL, "RSS types is invalid.");
1842 		rss_conf->hw_pctypes =
1843 				hns3_flow_get_all_hw_pctypes(rss_act->types);
1844 		return 0;
1845 	}
1846 
1847 	return hns3_flow_parse_rss_types_by_ptype(hw, rss_act, pattern_type,
1848 						  rss_conf, error);
1849 }
1850 
1851 static int
1852 hns3_flow_parse_hash_global_conf(struct rte_eth_dev *dev,
1853 				 const struct rte_flow_action_rss *rss_act,
1854 				 struct hns3_flow_rss_conf *rss_conf,
1855 				 struct rte_flow_error *error)
1856 {
1857 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1858 	int ret;
1859 
1860 	ret = hns3_flow_parse_hash_func(rss_act, rss_conf, error);
1861 	if (ret != 0)
1862 		return ret;
1863 
1864 	if (rss_act->queue_num > 0) {
1865 		ret = hns3_flow_parse_queues(hw, rss_act, rss_conf, error);
1866 		if (ret != 0)
1867 			return ret;
1868 	}
1869 
1870 	if (rss_act->key_len > 0) {
1871 		ret = hns3_flow_parse_hash_key(hw, rss_act, rss_conf, error);
1872 		if (ret != 0)
1873 			return ret;
1874 	}
1875 
1876 	return hns3_flow_parse_rss_types(hw, rss_act, rss_conf->pattern_type,
1877 					 rss_conf, error);
1878 }
1879 
1880 static int
1881 hns3_flow_parse_pattern_type(const struct rte_flow_item pattern[],
1882 			     uint64_t *ptype, struct rte_flow_error *error)
1883 {
1884 	enum rte_flow_item_type pre_type = RTE_FLOW_ITEM_TYPE_VOID;
1885 	const char *message = "Pattern specified isn't supported";
1886 	uint64_t item_hdr, pattern_hdrs = 0;
1887 	enum rte_flow_item_type cur_type;
1888 
1889 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1890 		if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID)
1891 			continue;
1892 		if (pattern->mask || pattern->spec || pattern->last) {
1893 			message = "Header info shouldn't be specified";
1894 			goto unsup;
1895 		}
1896 
1897 		/* Check the sub-item allowed by the previous item . */
1898 		if (pre_type >= RTE_DIM(hash_pattern_next_allow_items) ||
1899 		    !(hash_pattern_next_allow_items[pre_type] &
1900 				BIT_ULL(pattern->type)))
1901 			goto unsup;
1902 
1903 		cur_type = pattern->type;
1904 		/* Unsupported for current type being greater than array size. */
1905 		if (cur_type >= RTE_DIM(hash_pattern_item_header))
1906 			goto unsup;
1907 
1908 		/* The value is zero, which means unsupported current header. */
1909 		item_hdr = hash_pattern_item_header[cur_type];
1910 		if (item_hdr == 0)
1911 			goto unsup;
1912 
1913 		/* Have duplicate pattern header. */
1914 		if (item_hdr & pattern_hdrs)
1915 			goto unsup;
1916 		pre_type = cur_type;
1917 		pattern_hdrs |= item_hdr;
1918 	}
1919 
1920 	if (pattern_hdrs != 0) {
1921 		*ptype = pattern_hdrs;
1922 		return 0;
1923 	}
1924 
1925 unsup:
1926 	return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1927 				  pattern, message);
1928 }
1929 
1930 static int
1931 hns3_flow_parse_pattern_act(struct rte_eth_dev *dev,
1932 			    const struct rte_flow_item pattern[],
1933 			    const struct rte_flow_action_rss *rss_act,
1934 			    struct hns3_flow_rss_conf *rss_conf,
1935 			    struct rte_flow_error *error)
1936 {
1937 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1938 	int ret;
1939 
1940 	ret = hns3_flow_parse_hash_func(rss_act, rss_conf, error);
1941 	if (ret != 0)
1942 		return ret;
1943 
1944 	if (rss_act->key_len > 0) {
1945 		ret = hns3_flow_parse_hash_key(hw, rss_act, rss_conf, error);
1946 		if (ret != 0)
1947 			return ret;
1948 	}
1949 
1950 	if (rss_act->queue_num > 0) {
1951 		ret = hns3_flow_parse_queues(hw, rss_act, rss_conf, error);
1952 		if (ret != 0)
1953 			return ret;
1954 	}
1955 
1956 	ret = hns3_flow_parse_pattern_type(pattern, &rss_conf->pattern_type,
1957 					   error);
1958 	if (ret != 0)
1959 		return ret;
1960 
1961 	ret = hns3_flow_parse_rss_types(hw, rss_act, rss_conf->pattern_type,
1962 					rss_conf, error);
1963 	if (ret != 0)
1964 		return ret;
1965 
1966 	if (rss_act->func != RTE_ETH_HASH_FUNCTION_DEFAULT ||
1967 	    rss_act->key_len > 0 || rss_act->queue_num > 0)
1968 		hns3_warn(hw, "hash func, key and queues are global config, which work for all flow types. "
1969 			  "Recommend: don't set them together with pattern.");
1970 
1971 	return 0;
1972 }
1973 
1974 static bool
1975 hns3_rss_action_is_dup(struct hns3_hw *hw,
1976 		       const struct hns3_flow_rss_conf *conf)
1977 {
1978 	struct hns3_rss_conf_ele *filter;
1979 
1980 	TAILQ_FOREACH(filter, &hw->flow_rss_list, entries) {
1981 		if (conf->pattern_type != filter->filter_info.pattern_type)
1982 			continue;
1983 
1984 		if (hns3_action_rss_same(&filter->filter_info.conf, &conf->conf))
1985 			return true;
1986 	}
1987 
1988 	return false;
1989 }
1990 
1991 /*
1992  * This function is used to parse rss action validation.
1993  */
1994 static int
1995 hns3_parse_rss_filter(struct rte_eth_dev *dev,
1996 		      const struct rte_flow_item pattern[],
1997 		      const struct rte_flow_action *actions,
1998 		      struct hns3_flow_rss_conf *rss_conf,
1999 		      struct rte_flow_error *error)
2000 {
2001 	struct hns3_adapter *hns = dev->data->dev_private;
2002 	const struct rte_flow_action_rss *rss_act;
2003 	const struct rte_flow_action *act;
2004 	const struct rte_flow_item *pat;
2005 	struct hns3_hw *hw = &hns->hw;
2006 	uint32_t index = 0;
2007 	int ret;
2008 
2009 	NEXT_ITEM_OF_ACTION(act, actions, index);
2010 	if (actions[1].type != RTE_FLOW_ACTION_TYPE_END)
2011 		return rte_flow_error_set(error, EINVAL,
2012 					  RTE_FLOW_ERROR_TYPE_ACTION,
2013 					  &actions[1],
2014 					  "Only support one action for RSS.");
2015 
2016 	rss_act = (const struct rte_flow_action_rss *)act->conf;
2017 	if (rss_act == NULL) {
2018 		return rte_flow_error_set(error, EINVAL,
2019 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2020 					  act, "lost RSS action configuration");
2021 	}
2022 
2023 	if (rss_act->level != 0)
2024 		return rte_flow_error_set(error, ENOTSUP,
2025 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2026 					  act,
2027 					  "RSS level is not supported");
2028 
2029 	index = 0;
2030 	NEXT_ITEM_OF_PATTERN(pat, pattern, index);
2031 	if (pat[0].type == RTE_FLOW_ITEM_TYPE_END) {
2032 		rss_conf->pattern_type = 0;
2033 		ret = hns3_flow_parse_hash_global_conf(dev, rss_act,
2034 						       rss_conf, error);
2035 	} else {
2036 		ret = hns3_flow_parse_pattern_act(dev, pat, rss_act,
2037 						  rss_conf, error);
2038 	}
2039 	if (ret != 0)
2040 		return ret;
2041 
2042 	if (hns3_rss_action_is_dup(hw, rss_conf))
2043 		return rte_flow_error_set(error, EINVAL,
2044 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2045 					  act, "duplicate RSS rule");
2046 
2047 	return 0;
2048 }
2049 
2050 static int
2051 hns3_update_indir_table(struct hns3_hw *hw,
2052 			const struct rte_flow_action_rss *conf, uint16_t num)
2053 {
2054 	uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
2055 	uint16_t j;
2056 	uint32_t i;
2057 
2058 	/* Fill in redirection table */
2059 	for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) {
2060 		j %= num;
2061 		if (conf->queue[j] >= hw->alloc_rss_size) {
2062 			hns3_err(hw, "queue id(%u) set to redirection table "
2063 				 "exceeds queue number(%u) allocated to a TC.",
2064 				 conf->queue[j], hw->alloc_rss_size);
2065 			return -EINVAL;
2066 		}
2067 		indir_tbl[i] = conf->queue[j];
2068 	}
2069 
2070 	return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size);
2071 }
2072 
2073 static uint64_t
2074 hns3_flow_get_pctype_tuple_mask(uint64_t hw_pctype)
2075 {
2076 	uint64_t tuple_mask = 0;
2077 	uint16_t i;
2078 
2079 	for (i = 0; i < RTE_DIM(hash_map_table); i++) {
2080 		if (hw_pctype == hash_map_table[i].hw_pctype) {
2081 			tuple_mask = hash_map_table[i].tuple_mask;
2082 			break;
2083 		}
2084 	}
2085 
2086 	return tuple_mask;
2087 }
2088 
2089 static int
2090 hns3_flow_set_rss_ptype_tuple(struct hns3_hw *hw,
2091 			      struct hns3_flow_rss_conf *rss_conf)
2092 {
2093 	uint64_t old_tuple_fields, new_tuple_fields;
2094 	uint64_t hw_pctypes, tuples, tuple_mask = 0;
2095 	bool cfg_global_tuple;
2096 	int ret;
2097 
2098 	cfg_global_tuple = (rss_conf->pattern_type == 0);
2099 	if (!cfg_global_tuple) {
2100 		/*
2101 		 * To ensure that different packets do not affect each other,
2102 		 * we have to first read all tuple fields, and then only modify
2103 		 * the tuples for the specified packet type.
2104 		 */
2105 		ret = hns3_get_rss_tuple_field(hw, &old_tuple_fields);
2106 		if (ret != 0)
2107 			return ret;
2108 
2109 		new_tuple_fields = old_tuple_fields;
2110 		hw_pctypes = rss_conf->hw_pctypes;
2111 		while (hw_pctypes > 0) {
2112 			uint32_t idx = rte_bsf64(hw_pctypes);
2113 			uint64_t pctype = BIT_ULL(idx);
2114 
2115 			tuple_mask = hns3_flow_get_pctype_tuple_mask(pctype);
2116 			tuples = hns3_rss_calc_tuple_filed(rss_conf->conf.types);
2117 			new_tuple_fields &= ~tuple_mask;
2118 			new_tuple_fields |= tuples;
2119 			hw_pctypes &= ~pctype;
2120 		}
2121 	} else {
2122 		new_tuple_fields =
2123 			hns3_rss_calc_tuple_filed(rss_conf->conf.types);
2124 	}
2125 
2126 	ret = hns3_set_rss_tuple_field(hw, new_tuple_fields);
2127 	if (ret != 0)
2128 		return ret;
2129 
2130 	if (!cfg_global_tuple)
2131 		hns3_info(hw, "RSS tuple fields changed from 0x%" PRIx64 " to 0x%" PRIx64,
2132 			  old_tuple_fields, new_tuple_fields);
2133 
2134 	return 0;
2135 }
2136 
2137 static int
2138 hns3_config_rss_filter(struct hns3_hw *hw,
2139 		       struct hns3_flow_rss_conf *rss_conf)
2140 {
2141 	struct rte_flow_action_rss *rss_act;
2142 	int ret;
2143 
2144 	rss_act = &rss_conf->conf;
2145 	if (rss_act->queue_num > 0) {
2146 		ret = hns3_update_indir_table(hw, rss_act, rss_act->queue_num);
2147 		if (ret) {
2148 			hns3_err(hw, "set queues action failed, ret = %d", ret);
2149 			return ret;
2150 		}
2151 	}
2152 
2153 	if (rss_act->key_len > 0 ||
2154 	    rss_act->func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
2155 		ret = hns3_update_rss_algo_key(hw, rss_act->func, rss_conf->key,
2156 					       rss_act->key_len);
2157 		if (ret != 0) {
2158 			hns3_err(hw, "set func or hash key action failed, ret = %d",
2159 				 ret);
2160 			return ret;
2161 		}
2162 	}
2163 
2164 	if (rss_conf->hw_pctypes > 0) {
2165 		ret = hns3_flow_set_rss_ptype_tuple(hw, rss_conf);
2166 		if (ret != 0) {
2167 			hns3_err(hw, "set types action failed, ret = %d", ret);
2168 			return ret;
2169 		}
2170 	}
2171 
2172 	return 0;
2173 }
2174 
2175 static int
2176 hns3_clear_rss_filter(struct rte_eth_dev *dev)
2177 {
2178 	struct hns3_adapter *hns = dev->data->dev_private;
2179 	struct hns3_rss_conf_ele *rss_filter_ptr;
2180 	struct hns3_hw *hw = &hns->hw;
2181 
2182 	rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
2183 	while (rss_filter_ptr) {
2184 		TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
2185 		rte_free(rss_filter_ptr);
2186 		rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
2187 	}
2188 
2189 	return hns3_config_rss(hns);
2190 }
2191 
2192 static int
2193 hns3_reconfig_all_rss_filter(struct hns3_hw *hw)
2194 {
2195 	struct hns3_rss_conf_ele *filter;
2196 	uint32_t rule_no = 0;
2197 	int ret;
2198 
2199 	TAILQ_FOREACH(filter, &hw->flow_rss_list, entries) {
2200 		ret = hns3_config_rss_filter(hw, &filter->filter_info);
2201 		if (ret != 0) {
2202 			hns3_err(hw, "config %uth RSS filter failed, ret = %d",
2203 				 rule_no, ret);
2204 			return ret;
2205 		}
2206 		rule_no++;
2207 	}
2208 
2209 	return 0;
2210 }
2211 
2212 static int
2213 hns3_restore_rss_filter(struct hns3_hw *hw)
2214 {
2215 	int ret;
2216 
2217 	pthread_mutex_lock(&hw->flows_lock);
2218 	ret = hns3_reconfig_all_rss_filter(hw);
2219 	pthread_mutex_unlock(&hw->flows_lock);
2220 
2221 	return ret;
2222 }
2223 
2224 int
2225 hns3_restore_filter(struct hns3_adapter *hns)
2226 {
2227 	struct hns3_hw *hw = &hns->hw;
2228 	int ret;
2229 
2230 	ret = hns3_restore_all_fdir_filter(hns);
2231 	if (ret != 0)
2232 		return ret;
2233 
2234 	return hns3_restore_rss_filter(hw);
2235 }
2236 
2237 static int
2238 hns3_flow_args_check(const struct rte_flow_attr *attr,
2239 		     const struct rte_flow_item pattern[],
2240 		     const struct rte_flow_action actions[],
2241 		     struct rte_flow_error *error)
2242 {
2243 	if (pattern == NULL)
2244 		return rte_flow_error_set(error, EINVAL,
2245 					  RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2246 					  NULL, "NULL pattern.");
2247 
2248 	if (actions == NULL)
2249 		return rte_flow_error_set(error, EINVAL,
2250 					  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2251 					  NULL, "NULL action.");
2252 
2253 	if (attr == NULL)
2254 		return rte_flow_error_set(error, EINVAL,
2255 					  RTE_FLOW_ERROR_TYPE_ATTR,
2256 					  NULL, "NULL attribute.");
2257 
2258 	return hns3_check_attr(attr, error);
2259 }
2260 
2261 /*
2262  * Check if the flow rule is supported by hns3.
2263  * It only checks the format. Don't guarantee the rule can be programmed into
2264  * the HW. Because there can be no enough room for the rule.
2265  */
2266 static int
2267 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2268 		   const struct rte_flow_item pattern[],
2269 		   const struct rte_flow_action actions[],
2270 		   struct rte_flow_error *error,
2271 		   struct hns3_filter_info *filter_info)
2272 {
2273 	union hns3_filter_conf *conf;
2274 	int ret;
2275 
2276 	ret = hns3_flow_args_check(attr, pattern, actions, error);
2277 	if (ret)
2278 		return ret;
2279 
2280 	hns3_parse_filter_type(pattern, actions, filter_info);
2281 	conf = &filter_info->conf;
2282 	if (filter_info->type == RTE_ETH_FILTER_HASH)
2283 		return hns3_parse_rss_filter(dev, pattern, actions,
2284 					     &conf->rss_conf, error);
2285 
2286 	return hns3_parse_fdir_filter(dev, attr, pattern, actions,
2287 				      &conf->fdir_conf, error);
2288 }
2289 
2290 static int
2291 hns3_flow_rebuild_all_rss_filter(struct hns3_adapter *hns)
2292 {
2293 	struct hns3_hw *hw = &hns->hw;
2294 	int ret;
2295 
2296 	ret = hns3_config_rss(hns);
2297 	if (ret != 0) {
2298 		hns3_err(hw, "restore original RSS configuration failed, ret = %d.",
2299 			 ret);
2300 		return ret;
2301 	}
2302 	ret = hns3_reconfig_all_rss_filter(hw);
2303 	if (ret != 0)
2304 		hns3_err(hw, "rebuild all RSS filter failed, ret = %d.", ret);
2305 
2306 	return ret;
2307 }
2308 
2309 static int
2310 hns3_flow_create_rss_rule(struct rte_eth_dev *dev,
2311 			  struct hns3_flow_rss_conf *rss_conf,
2312 			  struct rte_flow *flow)
2313 {
2314 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2315 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2316 	struct hns3_rss_conf_ele *rss_filter_ptr;
2317 	struct hns3_flow_rss_conf *new_conf;
2318 	struct rte_flow_action_rss *rss_act;
2319 	int ret;
2320 
2321 	rss_filter_ptr = rte_zmalloc("hns3 rss filter",
2322 				     sizeof(struct hns3_rss_conf_ele), 0);
2323 	if (rss_filter_ptr == NULL) {
2324 		hns3_err(hw, "failed to allocate hns3_rss_filter memory");
2325 		return -ENOMEM;
2326 	}
2327 
2328 	new_conf = &rss_filter_ptr->filter_info;
2329 	memcpy(new_conf, rss_conf, sizeof(*new_conf));
2330 	rss_act = &new_conf->conf;
2331 	if (rss_act->queue_num > 0)
2332 		new_conf->conf.queue = new_conf->queue;
2333 	/*
2334 	 * There are two ways to deliver hash key action:
2335 	 * 1> 'key_len' is greater than zero and 'key' isn't NULL.
2336 	 * 2> 'key_len' is greater than zero, but 'key' is NULL.
2337 	 * For case 2, we need to keep 'key' of the new_conf is NULL so as to
2338 	 * inherit the configuration from user in case of failing to verify
2339 	 * duplicate rule later.
2340 	 */
2341 	if (rss_act->key_len > 0 && rss_act->key != NULL)
2342 		new_conf->conf.key = new_conf->key;
2343 
2344 	ret = hns3_config_rss_filter(hw, new_conf);
2345 	if (ret != 0) {
2346 		rte_free(rss_filter_ptr);
2347 		(void)hns3_flow_rebuild_all_rss_filter(hns);
2348 		return ret;
2349 	}
2350 
2351 	TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries);
2352 	flow->rule = rss_filter_ptr;
2353 	flow->filter_type = RTE_ETH_FILTER_HASH;
2354 
2355 	return 0;
2356 }
2357 
2358 static int
2359 hns3_flow_create_fdir_rule(struct rte_eth_dev *dev,
2360 			   struct hns3_fdir_rule *fdir_rule,
2361 			   struct rte_flow_error *error,
2362 			   struct rte_flow *flow)
2363 {
2364 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2365 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2366 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
2367 	bool indir;
2368 	int ret;
2369 
2370 	indir = !!(fdir_rule->flags & HNS3_RULE_FLAG_COUNTER_INDIR);
2371 	if (fdir_rule->flags & HNS3_RULE_FLAG_COUNTER) {
2372 		ret = hns3_counter_new(dev, indir, fdir_rule->act_cnt.id,
2373 				       error);
2374 		if (ret != 0)
2375 			return ret;
2376 
2377 		flow->counter_id = fdir_rule->act_cnt.id;
2378 	}
2379 
2380 	fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
2381 				    sizeof(struct hns3_fdir_rule_ele), 0);
2382 	if (fdir_rule_ptr == NULL) {
2383 		hns3_err(hw, "failed to allocate fdir_rule memory.");
2384 		ret = -ENOMEM;
2385 		goto err_malloc;
2386 	}
2387 
2388 	/*
2389 	 * After all the preceding tasks are successfully configured, configure
2390 	 * rules to the hardware to simplify the rollback of rules in the
2391 	 * hardware.
2392 	 */
2393 	ret = hns3_fdir_filter_program(hns, fdir_rule, false);
2394 	if (ret != 0)
2395 		goto err_fdir_filter;
2396 
2397 	memcpy(&fdir_rule_ptr->fdir_conf, fdir_rule,
2398 		sizeof(struct hns3_fdir_rule));
2399 	TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries);
2400 	flow->rule = fdir_rule_ptr;
2401 	flow->filter_type = RTE_ETH_FILTER_FDIR;
2402 
2403 	return 0;
2404 
2405 err_fdir_filter:
2406 	rte_free(fdir_rule_ptr);
2407 err_malloc:
2408 	if (fdir_rule->flags & HNS3_RULE_FLAG_COUNTER)
2409 		hns3_counter_release(dev, fdir_rule->act_cnt.id);
2410 
2411 	return ret;
2412 }
2413 
2414 /*
2415  * Create or destroy a flow rule.
2416  * Theorically one rule can match more than one filters.
2417  * We will let it use the filter which it hit first.
2418  * So, the sequence matters.
2419  */
2420 static struct rte_flow *
2421 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2422 		 const struct rte_flow_item pattern[],
2423 		 const struct rte_flow_action actions[],
2424 		 struct rte_flow_error *error)
2425 {
2426 	struct hns3_adapter *hns = dev->data->dev_private;
2427 	struct hns3_filter_info filter_info = {0};
2428 	struct hns3_flow_mem *flow_node;
2429 	struct hns3_hw *hw = &hns->hw;
2430 	union hns3_filter_conf *conf;
2431 	struct rte_flow *flow;
2432 	int ret;
2433 
2434 	ret = hns3_flow_validate(dev, attr, pattern, actions, error,
2435 				 &filter_info);
2436 	if (ret)
2437 		return NULL;
2438 
2439 	flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
2440 	if (flow == NULL) {
2441 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
2442 				   NULL, "Failed to allocate flow memory");
2443 		return NULL;
2444 	}
2445 	flow_node = rte_zmalloc("hns3 flow node",
2446 				sizeof(struct hns3_flow_mem), 0);
2447 	if (flow_node == NULL) {
2448 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
2449 				   NULL, "Failed to allocate flow list memory");
2450 		rte_free(flow);
2451 		return NULL;
2452 	}
2453 
2454 	flow_node->flow = flow;
2455 	conf = &filter_info.conf;
2456 	TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries);
2457 	if (filter_info.type == RTE_ETH_FILTER_HASH)
2458 		ret = hns3_flow_create_rss_rule(dev, &conf->rss_conf, flow);
2459 	else
2460 		ret = hns3_flow_create_fdir_rule(dev, &conf->fdir_conf,
2461 						 error, flow);
2462 	if (ret == 0)
2463 		return flow;
2464 
2465 	rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2466 			   "Failed to create flow");
2467 	TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
2468 	rte_free(flow_node);
2469 	rte_free(flow);
2470 
2471 	return NULL;
2472 }
2473 
2474 /* Destroy a flow rule on hns3. */
2475 static int
2476 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
2477 		  struct rte_flow_error *error)
2478 {
2479 	struct hns3_adapter *hns = dev->data->dev_private;
2480 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
2481 	struct hns3_rss_conf_ele *rss_filter_ptr;
2482 	struct hns3_flow_mem *flow_node;
2483 	enum rte_filter_type filter_type;
2484 	struct hns3_fdir_rule fdir_rule;
2485 	struct hns3_hw *hw = &hns->hw;
2486 	int ret;
2487 
2488 	if (flow == NULL)
2489 		return rte_flow_error_set(error, EINVAL,
2490 					  RTE_FLOW_ERROR_TYPE_HANDLE,
2491 					  flow, "Flow is NULL");
2492 
2493 	filter_type = flow->filter_type;
2494 	switch (filter_type) {
2495 	case RTE_ETH_FILTER_FDIR:
2496 		fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
2497 		memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
2498 			   sizeof(struct hns3_fdir_rule));
2499 
2500 		ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
2501 		if (ret)
2502 			return rte_flow_error_set(error, EIO,
2503 						  RTE_FLOW_ERROR_TYPE_HANDLE,
2504 						  flow,
2505 						  "Destroy FDIR fail.Try again");
2506 		if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
2507 			hns3_counter_release(dev, fdir_rule.act_cnt.id);
2508 		TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
2509 		rte_free(fdir_rule_ptr);
2510 		fdir_rule_ptr = NULL;
2511 		break;
2512 	case RTE_ETH_FILTER_HASH:
2513 		rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
2514 		TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
2515 		rte_free(rss_filter_ptr);
2516 		rss_filter_ptr = NULL;
2517 		(void)hns3_flow_rebuild_all_rss_filter(hns);
2518 		break;
2519 	default:
2520 		return rte_flow_error_set(error, EINVAL,
2521 					  RTE_FLOW_ERROR_TYPE_HANDLE, flow,
2522 					  "Unsupported filter type");
2523 	}
2524 
2525 	TAILQ_FOREACH(flow_node, &hw->flow_list, entries) {
2526 		if (flow_node->flow == flow) {
2527 			TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
2528 			rte_free(flow_node);
2529 			flow_node = NULL;
2530 			break;
2531 		}
2532 	}
2533 	rte_free(flow);
2534 
2535 	return 0;
2536 }
2537 
2538 /*  Destroy all flow rules associated with a port on hns3. */
2539 static int
2540 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2541 {
2542 	struct hns3_adapter *hns = dev->data->dev_private;
2543 	int ret;
2544 
2545 	/* FDIR is available only in PF driver */
2546 	if (!hns->is_vf) {
2547 		ret = hns3_clear_all_fdir_filter(hns);
2548 		if (ret) {
2549 			rte_flow_error_set(error, ret,
2550 					   RTE_FLOW_ERROR_TYPE_HANDLE,
2551 					   NULL, "Failed to flush rule");
2552 			return ret;
2553 		}
2554 		hns3_counter_flush(dev);
2555 	}
2556 
2557 	ret = hns3_clear_rss_filter(dev);
2558 	if (ret) {
2559 		rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
2560 				   NULL, "Failed to flush rss filter");
2561 		return ret;
2562 	}
2563 
2564 	hns3_filterlist_flush(dev);
2565 
2566 	return 0;
2567 }
2568 
2569 /* Query an existing flow rule. */
2570 static int
2571 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
2572 		const struct rte_flow_action *actions, void *data,
2573 		struct rte_flow_error *error)
2574 {
2575 	struct rte_flow_action_rss *rss_conf;
2576 	struct hns3_rss_conf_ele *rss_rule;
2577 	struct rte_flow_query_count *qc;
2578 	int ret;
2579 
2580 	if (!flow->rule)
2581 		return rte_flow_error_set(error, EINVAL,
2582 			RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule");
2583 
2584 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2585 		switch (actions->type) {
2586 		case RTE_FLOW_ACTION_TYPE_VOID:
2587 			break;
2588 		case RTE_FLOW_ACTION_TYPE_COUNT:
2589 			qc = (struct rte_flow_query_count *)data;
2590 			ret = hns3_counter_query(dev, flow, qc, error);
2591 			if (ret)
2592 				return ret;
2593 			break;
2594 		case RTE_FLOW_ACTION_TYPE_RSS:
2595 			if (flow->filter_type != RTE_ETH_FILTER_HASH) {
2596 				return rte_flow_error_set(error, ENOTSUP,
2597 					RTE_FLOW_ERROR_TYPE_ACTION,
2598 					actions, "action is not supported");
2599 			}
2600 			rss_conf = (struct rte_flow_action_rss *)data;
2601 			rss_rule = (struct hns3_rss_conf_ele *)flow->rule;
2602 			rte_memcpy(rss_conf, &rss_rule->filter_info.conf,
2603 				   sizeof(struct rte_flow_action_rss));
2604 			break;
2605 		default:
2606 			return rte_flow_error_set(error, ENOTSUP,
2607 				RTE_FLOW_ERROR_TYPE_ACTION,
2608 				actions, "action is not supported");
2609 		}
2610 	}
2611 
2612 	return 0;
2613 }
2614 
2615 static int
2616 hns3_flow_validate_wrap(struct rte_eth_dev *dev,
2617 			const struct rte_flow_attr *attr,
2618 			const struct rte_flow_item pattern[],
2619 			const struct rte_flow_action actions[],
2620 			struct rte_flow_error *error)
2621 {
2622 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2623 	struct hns3_filter_info filter_info = {0};
2624 	int ret;
2625 
2626 	pthread_mutex_lock(&hw->flows_lock);
2627 	ret = hns3_flow_validate(dev, attr, pattern, actions, error,
2628 				 &filter_info);
2629 	pthread_mutex_unlock(&hw->flows_lock);
2630 
2631 	return ret;
2632 }
2633 
2634 static struct rte_flow *
2635 hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2636 		      const struct rte_flow_item pattern[],
2637 		      const struct rte_flow_action actions[],
2638 		      struct rte_flow_error *error)
2639 {
2640 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2641 	struct rte_flow *flow;
2642 
2643 	pthread_mutex_lock(&hw->flows_lock);
2644 	flow = hns3_flow_create(dev, attr, pattern, actions, error);
2645 	pthread_mutex_unlock(&hw->flows_lock);
2646 
2647 	return flow;
2648 }
2649 
2650 static int
2651 hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2652 		       struct rte_flow_error *error)
2653 {
2654 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2655 	int ret;
2656 
2657 	pthread_mutex_lock(&hw->flows_lock);
2658 	ret = hns3_flow_destroy(dev, flow, error);
2659 	pthread_mutex_unlock(&hw->flows_lock);
2660 
2661 	return ret;
2662 }
2663 
2664 static int
2665 hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
2666 {
2667 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2668 	int ret;
2669 
2670 	pthread_mutex_lock(&hw->flows_lock);
2671 	ret = hns3_flow_flush(dev, error);
2672 	pthread_mutex_unlock(&hw->flows_lock);
2673 
2674 	return ret;
2675 }
2676 
2677 static int
2678 hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2679 		     const struct rte_flow_action *actions, void *data,
2680 		     struct rte_flow_error *error)
2681 {
2682 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2683 	int ret;
2684 
2685 	pthread_mutex_lock(&hw->flows_lock);
2686 	ret = hns3_flow_query(dev, flow, actions, data, error);
2687 	pthread_mutex_unlock(&hw->flows_lock);
2688 
2689 	return ret;
2690 }
2691 
2692 static int
2693 hns3_check_indir_action(const struct rte_flow_indir_action_conf *conf,
2694 			const struct rte_flow_action *action,
2695 			struct rte_flow_error *error)
2696 {
2697 	if (!conf->ingress)
2698 		return rte_flow_error_set(error, EINVAL,
2699 				RTE_FLOW_ERROR_TYPE_ACTION,
2700 				NULL, "Indir action ingress can't be zero");
2701 
2702 	if (conf->egress)
2703 		return rte_flow_error_set(error, EINVAL,
2704 				RTE_FLOW_ERROR_TYPE_ACTION,
2705 				NULL, "Indir action not support egress");
2706 
2707 	if (conf->transfer)
2708 		return rte_flow_error_set(error, EINVAL,
2709 				RTE_FLOW_ERROR_TYPE_ACTION,
2710 				NULL, "Indir action not support transfer");
2711 
2712 	if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
2713 		return rte_flow_error_set(error, EINVAL,
2714 				RTE_FLOW_ERROR_TYPE_ACTION,
2715 				NULL, "Indir action only support count");
2716 
2717 	return 0;
2718 }
2719 
2720 static struct rte_flow_action_handle *
2721 hns3_flow_action_create(struct rte_eth_dev *dev,
2722 			const struct rte_flow_indir_action_conf *conf,
2723 			const struct rte_flow_action *action,
2724 			struct rte_flow_error *error)
2725 {
2726 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2727 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2728 	const struct rte_flow_action_count *act_count;
2729 	struct rte_flow_action_handle *handle = NULL;
2730 	struct hns3_flow_counter *counter;
2731 
2732 	if (hns3_check_indir_action(conf, action, error))
2733 		return NULL;
2734 
2735 	handle = rte_zmalloc("hns3 action handle",
2736 			     sizeof(struct rte_flow_action_handle), 0);
2737 	if (handle == NULL) {
2738 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
2739 				   NULL, "Failed to allocate action memory");
2740 		return NULL;
2741 	}
2742 
2743 	pthread_mutex_lock(&hw->flows_lock);
2744 
2745 	act_count = (const struct rte_flow_action_count *)action->conf;
2746 	if (act_count->id >= pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1]) {
2747 		rte_flow_error_set(error, EINVAL,
2748 				   RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2749 				   action, "Invalid counter id");
2750 		goto err_exit;
2751 	}
2752 
2753 	if (hns3_counter_new(dev, false, act_count->id, error))
2754 		goto err_exit;
2755 
2756 	counter = hns3_counter_lookup(dev, act_count->id);
2757 	if (counter == NULL) {
2758 		rte_flow_error_set(error, EINVAL,
2759 				   RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2760 				   action, "Counter id not found");
2761 		goto err_exit;
2762 	}
2763 
2764 	counter->indirect = true;
2765 	handle->indirect_type = HNS3_INDIRECT_ACTION_TYPE_COUNT;
2766 	handle->counter_id = counter->id;
2767 
2768 	pthread_mutex_unlock(&hw->flows_lock);
2769 	return handle;
2770 
2771 err_exit:
2772 	pthread_mutex_unlock(&hw->flows_lock);
2773 	rte_free(handle);
2774 	return NULL;
2775 }
2776 
2777 static int
2778 hns3_flow_action_destroy(struct rte_eth_dev *dev,
2779 			 struct rte_flow_action_handle *handle,
2780 			 struct rte_flow_error *error)
2781 {
2782 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2783 	struct hns3_flow_counter *counter;
2784 
2785 	pthread_mutex_lock(&hw->flows_lock);
2786 
2787 	if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
2788 		pthread_mutex_unlock(&hw->flows_lock);
2789 		return rte_flow_error_set(error, EINVAL,
2790 					RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2791 					handle, "Invalid indirect type");
2792 	}
2793 
2794 	counter = hns3_counter_lookup(dev, handle->counter_id);
2795 	if (counter == NULL) {
2796 		pthread_mutex_unlock(&hw->flows_lock);
2797 		return rte_flow_error_set(error, EINVAL,
2798 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2799 				handle, "Counter id not exist");
2800 	}
2801 
2802 	if (counter->ref_cnt > 1) {
2803 		pthread_mutex_unlock(&hw->flows_lock);
2804 		return rte_flow_error_set(error, EBUSY,
2805 				RTE_FLOW_ERROR_TYPE_HANDLE,
2806 				handle, "Counter id in use");
2807 	}
2808 
2809 	(void)hns3_counter_release(dev, handle->counter_id);
2810 	rte_free(handle);
2811 
2812 	pthread_mutex_unlock(&hw->flows_lock);
2813 	return 0;
2814 }
2815 
2816 static int
2817 hns3_flow_action_query(struct rte_eth_dev *dev,
2818 		 const struct rte_flow_action_handle *handle,
2819 		 void *data,
2820 		 struct rte_flow_error *error)
2821 {
2822 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2823 	struct rte_flow flow;
2824 	int ret;
2825 
2826 	pthread_mutex_lock(&hw->flows_lock);
2827 
2828 	if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
2829 		pthread_mutex_unlock(&hw->flows_lock);
2830 		return rte_flow_error_set(error, EINVAL,
2831 					RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2832 					handle, "Invalid indirect type");
2833 	}
2834 
2835 	memset(&flow, 0, sizeof(flow));
2836 	flow.counter_id = handle->counter_id;
2837 	ret = hns3_counter_query(dev, &flow,
2838 				 (struct rte_flow_query_count *)data, error);
2839 	pthread_mutex_unlock(&hw->flows_lock);
2840 	return ret;
2841 }
2842 
2843 static const struct rte_flow_ops hns3_flow_ops = {
2844 	.validate = hns3_flow_validate_wrap,
2845 	.create = hns3_flow_create_wrap,
2846 	.destroy = hns3_flow_destroy_wrap,
2847 	.flush = hns3_flow_flush_wrap,
2848 	.query = hns3_flow_query_wrap,
2849 	.isolate = NULL,
2850 	.action_handle_create = hns3_flow_action_create,
2851 	.action_handle_destroy = hns3_flow_action_destroy,
2852 	.action_handle_query = hns3_flow_action_query,
2853 };
2854 
2855 int
2856 hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
2857 		      const struct rte_flow_ops **ops)
2858 {
2859 	struct hns3_hw *hw;
2860 
2861 	hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2862 	if (hw->adapter_state >= HNS3_NIC_CLOSED)
2863 		return -ENODEV;
2864 
2865 	*ops = &hns3_flow_ops;
2866 	return 0;
2867 }
2868 
2869 void
2870 hns3_flow_init(struct rte_eth_dev *dev)
2871 {
2872 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2873 	pthread_mutexattr_t attr;
2874 
2875 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2876 		return;
2877 
2878 	pthread_mutexattr_init(&attr);
2879 	pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
2880 	pthread_mutex_init(&hw->flows_lock, &attr);
2881 	dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
2882 
2883 	TAILQ_INIT(&hw->flow_fdir_list);
2884 	TAILQ_INIT(&hw->flow_rss_list);
2885 	TAILQ_INIT(&hw->flow_list);
2886 }
2887 
2888 void
2889 hns3_flow_uninit(struct rte_eth_dev *dev)
2890 {
2891 	struct rte_flow_error error;
2892 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2893 		hns3_flow_flush_wrap(dev, &error);
2894 }
2895