xref: /dpdk/drivers/net/hns3/hns3_flow.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4 
5 #include <rte_flow_driver.h>
6 #include <rte_io.h>
7 #include <rte_malloc.h>
8 
9 #include "hns3_ethdev.h"
10 #include "hns3_logs.h"
11 #include "hns3_flow.h"
12 
13 #define NEXT_ITEM_OF_ACTION(act, actions, index) \
14 	do { \
15 		(act) = (actions) + (index); \
16 		while ((act)->type == RTE_FLOW_ACTION_TYPE_VOID) { \
17 			(index)++; \
18 			(act) = (actions) + (index); \
19 		} \
20 	} while (0)
21 
22 #define NEXT_ITEM_OF_PATTERN(item, pattern, index) \
23 	do { \
24 		(item) = (pattern) + (index); \
25 		while ((item)->type == RTE_FLOW_ITEM_TYPE_VOID) { \
26 			(index)++; \
27 			(item) = (pattern) + (index); \
28 		} \
29 	} while (0)
30 
31 #define HNS3_HASH_HDR_ETH	RTE_BIT64(0)
32 #define HNS3_HASH_HDR_IPV4	RTE_BIT64(1)
33 #define HNS3_HASH_HDR_IPV6	RTE_BIT64(2)
34 #define HNS3_HASH_HDR_TCP	RTE_BIT64(3)
35 #define HNS3_HASH_HDR_UDP	RTE_BIT64(4)
36 #define HNS3_HASH_HDR_SCTP	RTE_BIT64(5)
37 
38 #define HNS3_HASH_VOID_NEXT_ALLOW	BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH)
39 
40 #define HNS3_HASH_ETH_NEXT_ALLOW	(BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV4) | \
41 					 BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV6))
42 
43 #define HNS3_HASH_IP_NEXT_ALLOW		(BIT_ULL(RTE_FLOW_ITEM_TYPE_TCP) | \
44 					 BIT_ULL(RTE_FLOW_ITEM_TYPE_UDP) | \
45 					 BIT_ULL(RTE_FLOW_ITEM_TYPE_SCTP))
46 
47 static const uint64_t hash_pattern_next_allow_items[] = {
48 	[RTE_FLOW_ITEM_TYPE_VOID] = HNS3_HASH_VOID_NEXT_ALLOW,
49 	[RTE_FLOW_ITEM_TYPE_ETH]  = HNS3_HASH_ETH_NEXT_ALLOW,
50 	[RTE_FLOW_ITEM_TYPE_IPV4] = HNS3_HASH_IP_NEXT_ALLOW,
51 	[RTE_FLOW_ITEM_TYPE_IPV6] = HNS3_HASH_IP_NEXT_ALLOW,
52 };
53 
54 static const uint64_t hash_pattern_item_header[] = {
55 	[RTE_FLOW_ITEM_TYPE_ETH]  = HNS3_HASH_HDR_ETH,
56 	[RTE_FLOW_ITEM_TYPE_IPV4] = HNS3_HASH_HDR_IPV4,
57 	[RTE_FLOW_ITEM_TYPE_IPV6] = HNS3_HASH_HDR_IPV6,
58 	[RTE_FLOW_ITEM_TYPE_TCP]  = HNS3_HASH_HDR_TCP,
59 	[RTE_FLOW_ITEM_TYPE_UDP]  = HNS3_HASH_HDR_UDP,
60 	[RTE_FLOW_ITEM_TYPE_SCTP] = HNS3_HASH_HDR_SCTP,
61 };
62 
63 #define HNS3_HASH_IPV4		(HNS3_HASH_HDR_ETH | HNS3_HASH_HDR_IPV4)
64 #define HNS3_HASH_IPV4_TCP	(HNS3_HASH_HDR_ETH | \
65 				 HNS3_HASH_HDR_IPV4 | \
66 				 HNS3_HASH_HDR_TCP)
67 #define HNS3_HASH_IPV4_UDP	(HNS3_HASH_HDR_ETH | \
68 				 HNS3_HASH_HDR_IPV4 | \
69 				 HNS3_HASH_HDR_UDP)
70 #define HNS3_HASH_IPV4_SCTP	(HNS3_HASH_HDR_ETH | \
71 				 HNS3_HASH_HDR_IPV4 | \
72 				 HNS3_HASH_HDR_SCTP)
73 #define HNS3_HASH_IPV6		(HNS3_HASH_HDR_ETH | HNS3_HASH_HDR_IPV6)
74 #define HNS3_HASH_IPV6_TCP	(HNS3_HASH_HDR_ETH | \
75 				 HNS3_HASH_HDR_IPV6 | \
76 				 HNS3_HASH_HDR_TCP)
77 #define HNS3_HASH_IPV6_UDP	(HNS3_HASH_HDR_ETH | \
78 				 HNS3_HASH_HDR_IPV6 | \
79 				 HNS3_HASH_HDR_UDP)
80 #define HNS3_HASH_IPV6_SCTP	(HNS3_HASH_HDR_ETH | \
81 				 HNS3_HASH_HDR_IPV6 | \
82 				 HNS3_HASH_HDR_SCTP)
83 
84 static const struct hns3_hash_map_info {
85 	/* flow type specified, zero means action works for all flow types. */
86 	uint64_t pattern_type;
87 	uint64_t rss_pctype; /* packet type with prefix RTE_ETH_RSS_xxx */
88 	uint64_t l3l4_types; /* Supported L3/L4 RSS types for this packet type */
89 	uint64_t hw_pctype; /* packet type in driver */
90 	uint64_t tuple_mask; /* full tuples of the hw_pctype */
91 } hash_map_table[] = {
92 	/* IPV4 */
93 	{ HNS3_HASH_IPV4,
94 	  RTE_ETH_RSS_IPV4, HNS3_RSS_SUPPORT_L3_SRC_DST,
95 	  HNS3_RSS_PCTYPE_IPV4_NONF, HNS3_RSS_TUPLE_IPV4_NONF_M },
96 	{ HNS3_HASH_IPV4,
97 	  RTE_ETH_RSS_NONFRAG_IPV4_OTHER, HNS3_RSS_SUPPORT_L3_SRC_DST,
98 	  HNS3_RSS_PCTYPE_IPV4_NONF, HNS3_RSS_TUPLE_IPV4_NONF_M },
99 	{ HNS3_HASH_IPV4,
100 	  RTE_ETH_RSS_FRAG_IPV4, HNS3_RSS_SUPPORT_L3_SRC_DST,
101 	  HNS3_RSS_PCTYPE_IPV4_FLAG, HNS3_RSS_TUPLE_IPV4_FLAG_M },
102 	{ HNS3_HASH_IPV4_TCP,
103 	  RTE_ETH_RSS_NONFRAG_IPV4_TCP, HNS3_RSS_SUPPORT_L3L4,
104 	  HNS3_RSS_PCTYPE_IPV4_TCP, HNS3_RSS_TUPLE_IPV4_TCP_M },
105 	{ HNS3_HASH_IPV4_UDP,
106 	  RTE_ETH_RSS_NONFRAG_IPV4_UDP, HNS3_RSS_SUPPORT_L3L4,
107 	  HNS3_RSS_PCTYPE_IPV4_UDP, HNS3_RSS_TUPLE_IPV4_UDP_M },
108 	{ HNS3_HASH_IPV4_SCTP,
109 	  RTE_ETH_RSS_NONFRAG_IPV4_SCTP, HNS3_RSS_SUPPORT_L3L4,
110 	  HNS3_RSS_PCTYPE_IPV4_SCTP, HNS3_RSS_TUPLE_IPV4_SCTP_M },
111 	/* IPV6 */
112 	{ HNS3_HASH_IPV6,
113 	  RTE_ETH_RSS_IPV6, HNS3_RSS_SUPPORT_L3_SRC_DST,
114 	  HNS3_RSS_PCTYPE_IPV6_NONF, HNS3_RSS_TUPLE_IPV6_NONF_M },
115 	{ HNS3_HASH_IPV6,
116 	  RTE_ETH_RSS_NONFRAG_IPV6_OTHER, HNS3_RSS_SUPPORT_L3_SRC_DST,
117 	  HNS3_RSS_PCTYPE_IPV6_NONF, HNS3_RSS_TUPLE_IPV6_NONF_M },
118 	{ HNS3_HASH_IPV6,
119 	  RTE_ETH_RSS_FRAG_IPV6, HNS3_RSS_SUPPORT_L3_SRC_DST,
120 	  HNS3_RSS_PCTYPE_IPV6_FLAG, HNS3_RSS_TUPLE_IPV6_FLAG_M },
121 	{ HNS3_HASH_IPV6_TCP,
122 	  RTE_ETH_RSS_NONFRAG_IPV6_TCP, HNS3_RSS_SUPPORT_L3L4,
123 	  HNS3_RSS_PCTYPE_IPV6_TCP, HNS3_RSS_TUPLE_IPV6_TCP_M },
124 	{ HNS3_HASH_IPV6_UDP,
125 	  RTE_ETH_RSS_NONFRAG_IPV6_UDP, HNS3_RSS_SUPPORT_L3L4,
126 	  HNS3_RSS_PCTYPE_IPV6_UDP, HNS3_RSS_TUPLE_IPV6_UDP_M },
127 	{ HNS3_HASH_IPV6_SCTP,
128 	  RTE_ETH_RSS_NONFRAG_IPV6_SCTP, HNS3_RSS_SUPPORT_L3L4,
129 	  HNS3_RSS_PCTYPE_IPV6_SCTP, HNS3_RSS_TUPLE_IPV6_SCTP_M },
130 };
131 
132 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
133 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
134 
135 /* Special Filter id for non-specific packet flagging. Don't change value */
136 #define HNS3_MAX_FILTER_ID	0x0FFF
137 
138 #define ETHER_TYPE_MASK		0xFFFF
139 #define IPPROTO_MASK		0xFF
140 #define TUNNEL_TYPE_MASK	0xFFFF
141 
142 #define HNS3_TUNNEL_TYPE_VXLAN		0x12B5
143 #define HNS3_TUNNEL_TYPE_VXLAN_GPE	0x12B6
144 #define HNS3_TUNNEL_TYPE_GENEVE		0x17C1
145 #define HNS3_TUNNEL_TYPE_NVGRE		0x6558
146 
147 static enum rte_flow_item_type first_items[] = {
148 	RTE_FLOW_ITEM_TYPE_ETH,
149 	RTE_FLOW_ITEM_TYPE_IPV4,
150 	RTE_FLOW_ITEM_TYPE_IPV6,
151 	RTE_FLOW_ITEM_TYPE_TCP,
152 	RTE_FLOW_ITEM_TYPE_UDP,
153 	RTE_FLOW_ITEM_TYPE_SCTP,
154 	RTE_FLOW_ITEM_TYPE_ICMP,
155 	RTE_FLOW_ITEM_TYPE_NVGRE,
156 	RTE_FLOW_ITEM_TYPE_VXLAN,
157 	RTE_FLOW_ITEM_TYPE_GENEVE,
158 	RTE_FLOW_ITEM_TYPE_VXLAN_GPE
159 };
160 
161 static enum rte_flow_item_type L2_next_items[] = {
162 	RTE_FLOW_ITEM_TYPE_VLAN,
163 	RTE_FLOW_ITEM_TYPE_IPV4,
164 	RTE_FLOW_ITEM_TYPE_IPV6
165 };
166 
167 static enum rte_flow_item_type L3_next_items[] = {
168 	RTE_FLOW_ITEM_TYPE_TCP,
169 	RTE_FLOW_ITEM_TYPE_UDP,
170 	RTE_FLOW_ITEM_TYPE_SCTP,
171 	RTE_FLOW_ITEM_TYPE_NVGRE,
172 	RTE_FLOW_ITEM_TYPE_ICMP
173 };
174 
175 static enum rte_flow_item_type L4_next_items[] = {
176 	RTE_FLOW_ITEM_TYPE_VXLAN,
177 	RTE_FLOW_ITEM_TYPE_GENEVE,
178 	RTE_FLOW_ITEM_TYPE_VXLAN_GPE
179 };
180 
181 static enum rte_flow_item_type tunnel_next_items[] = {
182 	RTE_FLOW_ITEM_TYPE_ETH,
183 	RTE_FLOW_ITEM_TYPE_VLAN
184 };
185 
186 struct items_step_mngr {
187 	enum rte_flow_item_type *items;
188 	size_t count;
189 };
190 
191 static inline void
192 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
193 {
194 	size_t i;
195 
196 	for (i = 0; i < len; i++)
197 		dst[i] = rte_be_to_cpu_32(src[i]);
198 }
199 
200 /*
201  * This function is used to parse filter type.
202  * 1. As we know RSS is used to spread packets among several queues, the flow
203  *    API provide the struct rte_flow_action_rss, user could config its field
204  *    sush as: func/level/types/key/queue to control RSS function.
205  * 2. The flow API also supports queue region configuration for hns3. It was
206  *    implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
207  *    which action is RSS queues region.
208  * 3. When action is RSS, we use the following rule to distinguish:
209  *    Case 1: pattern has ETH and all fields in RSS action except 'queues' are
210  *            zero or default, indicate it is queue region configuration.
211  *    Case other: an rss general action.
212  */
213 static void
214 hns3_parse_filter_type(const struct rte_flow_item pattern[],
215 		       const struct rte_flow_action actions[],
216 		       struct hns3_filter_info *filter_info)
217 {
218 	const struct rte_flow_action_rss *rss_act;
219 	const struct rte_flow_action *act = NULL;
220 	bool only_has_queues = false;
221 	bool have_eth = false;
222 
223 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
224 		if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
225 			act = actions;
226 			break;
227 		}
228 	}
229 	if (act == NULL) {
230 		filter_info->type = RTE_ETH_FILTER_FDIR;
231 		return;
232 	}
233 
234 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
235 		if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
236 			have_eth = true;
237 			break;
238 		}
239 	}
240 
241 	rss_act = act->conf;
242 	only_has_queues = (rss_act->queue_num > 0) &&
243 			  (rss_act->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
244 			   rss_act->types == 0 && rss_act->key_len == 0);
245 	if (have_eth && only_has_queues) {
246 		/*
247 		 * Pattern has ETH and all fields in RSS action except 'queues'
248 		 * are zero or default, which indicates this is queue region
249 		 * configuration.
250 		 */
251 		filter_info->type = RTE_ETH_FILTER_FDIR;
252 		return;
253 	}
254 
255 	filter_info->type = RTE_ETH_FILTER_HASH;
256 }
257 
258 static inline struct hns3_flow_counter *
259 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
260 {
261 	struct hns3_adapter *hns = dev->data->dev_private;
262 	struct hns3_pf *pf = &hns->pf;
263 	struct hns3_flow_counter *cnt;
264 
265 	LIST_FOREACH(cnt, &pf->flow_counters, next) {
266 		if (cnt->id == id)
267 			return cnt;
268 	}
269 	return NULL;
270 }
271 
272 static int
273 hns3_counter_new(struct rte_eth_dev *dev, uint32_t indirect, uint32_t id,
274 		 struct rte_flow_error *error)
275 {
276 	struct hns3_adapter *hns = dev->data->dev_private;
277 	struct hns3_pf *pf = &hns->pf;
278 	struct hns3_hw *hw = &hns->hw;
279 	struct hns3_flow_counter *cnt;
280 	uint64_t value;
281 	int ret;
282 
283 	cnt = hns3_counter_lookup(dev, id);
284 	if (cnt) {
285 		if (!cnt->indirect || cnt->indirect != indirect)
286 			return rte_flow_error_set(error, ENOTSUP,
287 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
288 				cnt,
289 				"Counter id is used, indirect flag not match");
290 		/* Clear the indirect counter on first use. */
291 		if (cnt->indirect && cnt->ref_cnt == 1)
292 			(void)hns3_fd_get_count(hw, id, &value);
293 		cnt->ref_cnt++;
294 		return 0;
295 	}
296 
297 	/* Clear the counter by read ops because the counter is read-clear */
298 	ret = hns3_fd_get_count(hw, id, &value);
299 	if (ret)
300 		return rte_flow_error_set(error, EIO,
301 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
302 					  "Clear counter failed!");
303 
304 	cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
305 	if (cnt == NULL)
306 		return rte_flow_error_set(error, ENOMEM,
307 					  RTE_FLOW_ERROR_TYPE_HANDLE, cnt,
308 					  "Alloc mem for counter failed");
309 	cnt->id = id;
310 	cnt->indirect = indirect;
311 	cnt->ref_cnt = 1;
312 	cnt->hits = 0;
313 	LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
314 	return 0;
315 }
316 
317 static int
318 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
319 		   struct rte_flow_query_count *qc,
320 		   struct rte_flow_error *error)
321 {
322 	struct hns3_adapter *hns = dev->data->dev_private;
323 	struct hns3_flow_counter *cnt;
324 	uint64_t value;
325 	int ret;
326 
327 	/* FDIR is available only in PF driver */
328 	if (hns->is_vf)
329 		return rte_flow_error_set(error, ENOTSUP,
330 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
331 					  "Fdir is not supported in VF");
332 	cnt = hns3_counter_lookup(dev, flow->counter_id);
333 	if (cnt == NULL)
334 		return rte_flow_error_set(error, EINVAL,
335 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
336 					  "Can't find counter id");
337 
338 	ret = hns3_fd_get_count(&hns->hw, flow->counter_id, &value);
339 	if (ret) {
340 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
341 				   NULL, "Read counter fail.");
342 		return ret;
343 	}
344 	qc->hits_set = 1;
345 	qc->hits = value;
346 	qc->bytes_set = 0;
347 	qc->bytes = 0;
348 
349 	return 0;
350 }
351 
352 static int
353 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
354 {
355 	struct hns3_adapter *hns = dev->data->dev_private;
356 	struct hns3_hw *hw = &hns->hw;
357 	struct hns3_flow_counter *cnt;
358 
359 	cnt = hns3_counter_lookup(dev, id);
360 	if (cnt == NULL) {
361 		hns3_err(hw, "Can't find available counter to release");
362 		return -EINVAL;
363 	}
364 	cnt->ref_cnt--;
365 	if (cnt->ref_cnt == 0) {
366 		LIST_REMOVE(cnt, next);
367 		rte_free(cnt);
368 	}
369 	return 0;
370 }
371 
372 static void
373 hns3_counter_flush(struct rte_eth_dev *dev)
374 {
375 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
376 	LIST_HEAD(counters, hns3_flow_counter) indir_counters;
377 	struct hns3_flow_counter *cnt_ptr;
378 
379 	LIST_INIT(&indir_counters);
380 	cnt_ptr = LIST_FIRST(&pf->flow_counters);
381 	while (cnt_ptr) {
382 		LIST_REMOVE(cnt_ptr, next);
383 		if (cnt_ptr->indirect)
384 			LIST_INSERT_HEAD(&indir_counters, cnt_ptr, next);
385 		else
386 			rte_free(cnt_ptr);
387 		cnt_ptr = LIST_FIRST(&pf->flow_counters);
388 	}
389 
390 	/* Reset the indirect action and add to pf->flow_counters list. */
391 	cnt_ptr = LIST_FIRST(&indir_counters);
392 	while (cnt_ptr) {
393 		LIST_REMOVE(cnt_ptr, next);
394 		cnt_ptr->ref_cnt = 1;
395 		cnt_ptr->hits = 0;
396 		LIST_INSERT_HEAD(&pf->flow_counters, cnt_ptr, next);
397 		cnt_ptr = LIST_FIRST(&indir_counters);
398 	}
399 }
400 
401 static int
402 hns3_handle_action_queue(struct rte_eth_dev *dev,
403 			 const struct rte_flow_action *action,
404 			 struct hns3_fdir_rule *rule,
405 			 struct rte_flow_error *error)
406 {
407 	struct hns3_adapter *hns = dev->data->dev_private;
408 	const struct rte_flow_action_queue *queue;
409 	struct hns3_hw *hw = &hns->hw;
410 
411 	queue = (const struct rte_flow_action_queue *)action->conf;
412 	if (queue->index >= hw->data->nb_rx_queues) {
413 		hns3_err(hw, "queue ID(%u) is greater than number of available queue (%u) in driver.",
414 			 queue->index, hw->data->nb_rx_queues);
415 		return rte_flow_error_set(error, EINVAL,
416 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
417 					  action, "Invalid queue ID in PF");
418 	}
419 
420 	rule->queue_id = queue->index;
421 	rule->nb_queues = 1;
422 	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
423 	return 0;
424 }
425 
426 static int
427 hns3_handle_action_queue_region(struct rte_eth_dev *dev,
428 				const struct rte_flow_action *action,
429 				struct hns3_fdir_rule *rule,
430 				struct rte_flow_error *error)
431 {
432 	struct hns3_adapter *hns = dev->data->dev_private;
433 	const struct rte_flow_action_rss *conf = action->conf;
434 	struct hns3_hw *hw = &hns->hw;
435 	uint16_t idx;
436 
437 	if (!hns3_dev_get_support(hw, FD_QUEUE_REGION))
438 		return rte_flow_error_set(error, ENOTSUP,
439 			RTE_FLOW_ERROR_TYPE_ACTION, action,
440 			"Not support config queue region!");
441 
442 	if ((!rte_is_power_of_2(conf->queue_num)) ||
443 		conf->queue_num > hw->rss_size_max ||
444 		conf->queue[0] >= hw->data->nb_rx_queues ||
445 		conf->queue[0] + conf->queue_num > hw->data->nb_rx_queues) {
446 		return rte_flow_error_set(error, EINVAL,
447 			RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
448 			"Invalid start queue ID and queue num! the start queue "
449 			"ID must valid, the queue num must be power of 2 and "
450 			"<= rss_size_max.");
451 	}
452 
453 	for (idx = 1; idx < conf->queue_num; idx++) {
454 		if (conf->queue[idx] != conf->queue[idx - 1] + 1)
455 			return rte_flow_error_set(error, EINVAL,
456 				RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
457 				"Invalid queue ID sequence! the queue ID "
458 				"must be continuous increment.");
459 	}
460 
461 	rule->queue_id = conf->queue[0];
462 	rule->nb_queues = conf->queue_num;
463 	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
464 	return 0;
465 }
466 
467 static int
468 hns3_handle_action_indirect(struct rte_eth_dev *dev,
469 			    const struct rte_flow_action *action,
470 			    struct hns3_fdir_rule *rule,
471 			    struct rte_flow_error *error)
472 {
473 	const struct rte_flow_action_handle *indir = action->conf;
474 
475 	if (indir->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT)
476 		return rte_flow_error_set(error, EINVAL,
477 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
478 				action, "Invalid indirect type");
479 
480 	if (hns3_counter_lookup(dev, indir->counter_id) == NULL)
481 		return rte_flow_error_set(error, EINVAL,
482 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
483 				action, "Counter id not exist");
484 
485 	rule->act_cnt.id = indir->counter_id;
486 	rule->flags |= (HNS3_RULE_FLAG_COUNTER | HNS3_RULE_FLAG_COUNTER_INDIR);
487 
488 	return 0;
489 }
490 
491 /*
492  * Parse actions structure from the provided pattern.
493  * The pattern is validated as the items are copied.
494  *
495  * @param actions[in]
496  * @param rule[out]
497  *   NIC specific actions derived from the actions.
498  * @param error[out]
499  */
500 static int
501 hns3_handle_actions(struct rte_eth_dev *dev,
502 		    const struct rte_flow_action actions[],
503 		    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
504 {
505 	struct hns3_adapter *hns = dev->data->dev_private;
506 	const struct rte_flow_action_count *act_count;
507 	const struct rte_flow_action_mark *mark;
508 	struct hns3_pf *pf = &hns->pf;
509 	uint32_t counter_num;
510 	int ret;
511 
512 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
513 		switch (actions->type) {
514 		case RTE_FLOW_ACTION_TYPE_QUEUE:
515 			ret = hns3_handle_action_queue(dev, actions, rule,
516 						       error);
517 			if (ret)
518 				return ret;
519 			break;
520 		case RTE_FLOW_ACTION_TYPE_DROP:
521 			rule->action = HNS3_FD_ACTION_DROP_PACKET;
522 			break;
523 		/*
524 		 * Here RSS's real action is queue region.
525 		 * Queue region is implemented by FDIR + RSS in hns3 hardware,
526 		 * the FDIR's action is one queue region (start_queue_id and
527 		 * queue_num), then RSS spread packets to the queue region by
528 		 * RSS algorithm.
529 		 */
530 		case RTE_FLOW_ACTION_TYPE_RSS:
531 			ret = hns3_handle_action_queue_region(dev, actions,
532 							      rule, error);
533 			if (ret)
534 				return ret;
535 			break;
536 		case RTE_FLOW_ACTION_TYPE_MARK:
537 			mark =
538 			    (const struct rte_flow_action_mark *)actions->conf;
539 			if (mark->id >= HNS3_MAX_FILTER_ID)
540 				return rte_flow_error_set(error, EINVAL,
541 						RTE_FLOW_ERROR_TYPE_ACTION_CONF,
542 						actions,
543 						"Invalid Mark ID");
544 			rule->fd_id = mark->id;
545 			rule->flags |= HNS3_RULE_FLAG_FDID;
546 			break;
547 		case RTE_FLOW_ACTION_TYPE_FLAG:
548 			rule->fd_id = HNS3_MAX_FILTER_ID;
549 			rule->flags |= HNS3_RULE_FLAG_FDID;
550 			break;
551 		case RTE_FLOW_ACTION_TYPE_COUNT:
552 			act_count =
553 			    (const struct rte_flow_action_count *)actions->conf;
554 			counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
555 			if (act_count->id >= counter_num)
556 				return rte_flow_error_set(error, EINVAL,
557 						RTE_FLOW_ERROR_TYPE_ACTION_CONF,
558 						actions,
559 						"Invalid counter id");
560 			rule->act_cnt = *act_count;
561 			rule->flags |= HNS3_RULE_FLAG_COUNTER;
562 			rule->flags &= ~HNS3_RULE_FLAG_COUNTER_INDIR;
563 			break;
564 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
565 			ret = hns3_handle_action_indirect(dev, actions, rule,
566 							  error);
567 			if (ret)
568 				return ret;
569 			break;
570 		case RTE_FLOW_ACTION_TYPE_VOID:
571 			break;
572 		default:
573 			return rte_flow_error_set(error, ENOTSUP,
574 						  RTE_FLOW_ERROR_TYPE_ACTION,
575 						  NULL, "Unsupported action");
576 		}
577 	}
578 
579 	return 0;
580 }
581 
582 static int
583 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
584 {
585 	if (!attr->ingress)
586 		return rte_flow_error_set(error, EINVAL,
587 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
588 					  attr, "Ingress can't be zero");
589 	if (attr->egress)
590 		return rte_flow_error_set(error, ENOTSUP,
591 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
592 					  attr, "Not support egress");
593 	if (attr->transfer)
594 		return rte_flow_error_set(error, ENOTSUP,
595 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
596 					  attr, "No support for transfer");
597 	if (attr->priority)
598 		return rte_flow_error_set(error, ENOTSUP,
599 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
600 					  attr, "Not support priority");
601 	if (attr->group)
602 		return rte_flow_error_set(error, ENOTSUP,
603 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
604 					  attr, "Not support group");
605 	return 0;
606 }
607 
608 static int
609 hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
610 	       struct rte_flow_error *error __rte_unused)
611 {
612 	const struct rte_flow_item_eth *eth_spec;
613 	const struct rte_flow_item_eth *eth_mask;
614 
615 	/* Only used to describe the protocol stack. */
616 	if (item->spec == NULL && item->mask == NULL)
617 		return 0;
618 
619 	eth_mask = item->mask;
620 	if (eth_mask) {
621 		if (eth_mask->hdr.ether_type) {
622 			hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
623 			rule->key_conf.mask.ether_type =
624 			    rte_be_to_cpu_16(eth_mask->hdr.ether_type);
625 		}
626 		if (!rte_is_zero_ether_addr(&eth_mask->hdr.src_addr)) {
627 			hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
628 			memcpy(rule->key_conf.mask.src_mac,
629 			       eth_mask->hdr.src_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
630 		}
631 		if (!rte_is_zero_ether_addr(&eth_mask->hdr.dst_addr)) {
632 			hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
633 			memcpy(rule->key_conf.mask.dst_mac,
634 			       eth_mask->hdr.dst_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
635 		}
636 		if (eth_mask->has_vlan)
637 			rule->has_vlan_m = true;
638 	}
639 
640 	eth_spec = item->spec;
641 	if (eth_mask && eth_mask->has_vlan && eth_spec->has_vlan) {
642 		rule->key_conf.vlan_num++;
643 		rule->has_vlan_v = true;
644 	}
645 
646 	rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type);
647 	memcpy(rule->key_conf.spec.src_mac, eth_spec->hdr.src_addr.addr_bytes,
648 	       RTE_ETHER_ADDR_LEN);
649 	memcpy(rule->key_conf.spec.dst_mac, eth_spec->hdr.dst_addr.addr_bytes,
650 	       RTE_ETHER_ADDR_LEN);
651 	return 0;
652 }
653 
654 static int
655 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
656 		struct rte_flow_error *error)
657 {
658 	const struct rte_flow_item_vlan *vlan_spec;
659 	const struct rte_flow_item_vlan *vlan_mask;
660 
661 	if (rule->has_vlan_m && !rule->has_vlan_v)
662 		return rte_flow_error_set(error, EINVAL,
663 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
664 					  "VLAN item is conflict with 'has_vlan is 0' in ETH item");
665 
666 	if (rule->has_more_vlan_m && !rule->has_more_vlan_v)
667 		return rte_flow_error_set(error, EINVAL,
668 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
669 					  "VLAN item is conflict with 'has_more_vlan is 0' in the previous VLAN item");
670 
671 	if (rule->has_vlan_m && rule->has_vlan_v) {
672 		rule->has_vlan_m = false;
673 		rule->key_conf.vlan_num--;
674 	}
675 
676 	if (rule->has_more_vlan_m && rule->has_more_vlan_v) {
677 		rule->has_more_vlan_m = false;
678 		rule->key_conf.vlan_num--;
679 	}
680 
681 	rule->key_conf.vlan_num++;
682 	if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
683 		return rte_flow_error_set(error, EINVAL,
684 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
685 					  "Vlan_num is more than 2");
686 
687 	/* Only used to describe the protocol stack. */
688 	if (item->spec == NULL && item->mask == NULL)
689 		return 0;
690 
691 	vlan_mask = item->mask;
692 	if (vlan_mask) {
693 		if (vlan_mask->hdr.vlan_tci) {
694 			if (rule->key_conf.vlan_num == 1) {
695 				hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
696 					     1);
697 				rule->key_conf.mask.vlan_tag1 =
698 				    rte_be_to_cpu_16(vlan_mask->hdr.vlan_tci);
699 			} else {
700 				hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
701 					     1);
702 				rule->key_conf.mask.vlan_tag2 =
703 				    rte_be_to_cpu_16(vlan_mask->hdr.vlan_tci);
704 			}
705 		}
706 		if (vlan_mask->has_more_vlan)
707 			rule->has_more_vlan_m = true;
708 	}
709 
710 	vlan_spec = item->spec;
711 	if (rule->key_conf.vlan_num == 1)
712 		rule->key_conf.spec.vlan_tag1 =
713 		    rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci);
714 	else
715 		rule->key_conf.spec.vlan_tag2 =
716 		    rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci);
717 
718 	if (vlan_mask && vlan_mask->has_more_vlan && vlan_spec->has_more_vlan) {
719 		rule->key_conf.vlan_num++;
720 		if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
721 			return rte_flow_error_set(error, EINVAL,
722 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
723 					  "Vlan_num is more than 2");
724 		rule->has_more_vlan_v = true;
725 	}
726 
727 	return 0;
728 }
729 
730 static bool
731 hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask)
732 {
733 	if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id ||
734 	    ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live ||
735 	    ipv4_mask->hdr.hdr_checksum)
736 		return false;
737 
738 	return true;
739 }
740 
741 static int
742 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
743 		struct rte_flow_error *error)
744 {
745 	const struct rte_flow_item_ipv4 *ipv4_spec;
746 	const struct rte_flow_item_ipv4 *ipv4_mask;
747 
748 	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
749 	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
750 	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
751 
752 	/* Only used to describe the protocol stack. */
753 	if (item->spec == NULL && item->mask == NULL)
754 		return 0;
755 
756 	if (item->mask) {
757 		ipv4_mask = item->mask;
758 		if (!hns3_check_ipv4_mask_supported(ipv4_mask)) {
759 			return rte_flow_error_set(error, EINVAL,
760 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
761 						  item,
762 						  "Only support src & dst ip,tos,proto in IPV4");
763 		}
764 
765 		if (ipv4_mask->hdr.src_addr) {
766 			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
767 			rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
768 			    rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
769 		}
770 
771 		if (ipv4_mask->hdr.dst_addr) {
772 			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
773 			rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
774 			    rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
775 		}
776 
777 		if (ipv4_mask->hdr.type_of_service) {
778 			hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
779 			rule->key_conf.mask.ip_tos =
780 			    ipv4_mask->hdr.type_of_service;
781 		}
782 
783 		if (ipv4_mask->hdr.next_proto_id) {
784 			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
785 			rule->key_conf.mask.ip_proto =
786 			    ipv4_mask->hdr.next_proto_id;
787 		}
788 	}
789 
790 	ipv4_spec = item->spec;
791 	rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
792 	    rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
793 	rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
794 	    rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
795 	rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
796 	rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
797 	return 0;
798 }
799 
800 static int
801 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
802 		struct rte_flow_error *error)
803 {
804 	const struct rte_flow_item_ipv6 *ipv6_spec;
805 	const struct rte_flow_item_ipv6 *ipv6_mask;
806 
807 	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
808 	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
809 	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
810 
811 	/* Only used to describe the protocol stack. */
812 	if (item->spec == NULL && item->mask == NULL)
813 		return 0;
814 
815 	if (item->mask) {
816 		ipv6_mask = item->mask;
817 		if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
818 		    ipv6_mask->hdr.hop_limits) {
819 			return rte_flow_error_set(error, EINVAL,
820 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
821 						  item,
822 						  "Only support src & dst ip,proto in IPV6");
823 		}
824 		net_addr_to_host(rule->key_conf.mask.src_ip,
825 				 (const rte_be32_t *)ipv6_mask->hdr.src_addr,
826 				 IP_ADDR_LEN);
827 		net_addr_to_host(rule->key_conf.mask.dst_ip,
828 				 (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
829 				 IP_ADDR_LEN);
830 		rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
831 		if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
832 			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
833 		if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
834 			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
835 		if (ipv6_mask->hdr.proto)
836 			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
837 	}
838 
839 	ipv6_spec = item->spec;
840 	net_addr_to_host(rule->key_conf.spec.src_ip,
841 			 (const rte_be32_t *)ipv6_spec->hdr.src_addr,
842 			 IP_ADDR_LEN);
843 	net_addr_to_host(rule->key_conf.spec.dst_ip,
844 			 (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
845 			 IP_ADDR_LEN);
846 	rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
847 
848 	return 0;
849 }
850 
851 static bool
852 hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask)
853 {
854 	if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
855 	    tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
856 	    tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
857 	    tcp_mask->hdr.tcp_urp)
858 		return false;
859 
860 	return true;
861 }
862 
863 static int
864 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
865 	       struct rte_flow_error *error)
866 {
867 	const struct rte_flow_item_tcp *tcp_spec;
868 	const struct rte_flow_item_tcp *tcp_mask;
869 
870 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
871 	rule->key_conf.spec.ip_proto = IPPROTO_TCP;
872 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
873 
874 	/* Only used to describe the protocol stack. */
875 	if (item->spec == NULL && item->mask == NULL)
876 		return 0;
877 
878 	if (item->mask) {
879 		tcp_mask = item->mask;
880 		if (!hns3_check_tcp_mask_supported(tcp_mask)) {
881 			return rte_flow_error_set(error, EINVAL,
882 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
883 						  item,
884 						  "Only support src & dst port in TCP");
885 		}
886 
887 		if (tcp_mask->hdr.src_port) {
888 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
889 			rule->key_conf.mask.src_port =
890 			    rte_be_to_cpu_16(tcp_mask->hdr.src_port);
891 		}
892 		if (tcp_mask->hdr.dst_port) {
893 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
894 			rule->key_conf.mask.dst_port =
895 			    rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
896 		}
897 	}
898 
899 	tcp_spec = item->spec;
900 	rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
901 	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
902 
903 	return 0;
904 }
905 
906 static int
907 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
908 	       struct rte_flow_error *error)
909 {
910 	const struct rte_flow_item_udp *udp_spec;
911 	const struct rte_flow_item_udp *udp_mask;
912 
913 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
914 	rule->key_conf.spec.ip_proto = IPPROTO_UDP;
915 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
916 
917 	/* Only used to describe the protocol stack. */
918 	if (item->spec == NULL && item->mask == NULL)
919 		return 0;
920 
921 	if (item->mask) {
922 		udp_mask = item->mask;
923 		if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
924 			return rte_flow_error_set(error, EINVAL,
925 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
926 						  item,
927 						  "Only support src & dst port in UDP");
928 		}
929 		if (udp_mask->hdr.src_port) {
930 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
931 			rule->key_conf.mask.src_port =
932 			    rte_be_to_cpu_16(udp_mask->hdr.src_port);
933 		}
934 		if (udp_mask->hdr.dst_port) {
935 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
936 			rule->key_conf.mask.dst_port =
937 			    rte_be_to_cpu_16(udp_mask->hdr.dst_port);
938 		}
939 	}
940 
941 	udp_spec = item->spec;
942 	rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
943 	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
944 
945 	return 0;
946 }
947 
948 static int
949 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
950 		struct rte_flow_error *error)
951 {
952 	const struct rte_flow_item_sctp *sctp_spec;
953 	const struct rte_flow_item_sctp *sctp_mask;
954 
955 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
956 	rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
957 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
958 
959 	/* Only used to describe the protocol stack. */
960 	if (item->spec == NULL && item->mask == NULL)
961 		return 0;
962 
963 	if (item->mask) {
964 		sctp_mask = item->mask;
965 		if (sctp_mask->hdr.cksum)
966 			return rte_flow_error_set(error, EINVAL,
967 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
968 						  item,
969 						  "Only support src & dst port & v-tag in SCTP");
970 		if (sctp_mask->hdr.src_port) {
971 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
972 			rule->key_conf.mask.src_port =
973 			    rte_be_to_cpu_16(sctp_mask->hdr.src_port);
974 		}
975 		if (sctp_mask->hdr.dst_port) {
976 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
977 			rule->key_conf.mask.dst_port =
978 			    rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
979 		}
980 		if (sctp_mask->hdr.tag) {
981 			hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
982 			rule->key_conf.mask.sctp_tag =
983 			    rte_be_to_cpu_32(sctp_mask->hdr.tag);
984 		}
985 	}
986 
987 	sctp_spec = item->spec;
988 	rule->key_conf.spec.src_port =
989 	    rte_be_to_cpu_16(sctp_spec->hdr.src_port);
990 	rule->key_conf.spec.dst_port =
991 	    rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
992 	rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
993 
994 	return 0;
995 }
996 
997 /*
998  * Check items before tunnel, save inner configs to outer configs, and clear
999  * inner configs.
1000  * The key consists of two parts: meta_data and tuple keys.
1001  * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
1002  * packet(1bit).
1003  * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
1004  * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
1005  * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
1006  * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
1007  * Vlantag2(16bit) and sctp-tag(32bit).
1008  */
1009 static int
1010 hns3_handle_tunnel(const struct rte_flow_item *item,
1011 		   struct hns3_fdir_rule *rule, struct rte_flow_error *error)
1012 {
1013 	/* check eth config */
1014 	if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
1015 		return rte_flow_error_set(error, EINVAL,
1016 					  RTE_FLOW_ERROR_TYPE_ITEM,
1017 					  item, "Outer eth mac is unsupported");
1018 	if (rule->input_set & BIT(INNER_ETH_TYPE)) {
1019 		hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
1020 		rule->key_conf.spec.outer_ether_type =
1021 		    rule->key_conf.spec.ether_type;
1022 		rule->key_conf.mask.outer_ether_type =
1023 		    rule->key_conf.mask.ether_type;
1024 		hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
1025 		rule->key_conf.spec.ether_type = 0;
1026 		rule->key_conf.mask.ether_type = 0;
1027 	}
1028 
1029 	/* check vlan config */
1030 	if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
1031 		return rte_flow_error_set(error, EINVAL,
1032 					  RTE_FLOW_ERROR_TYPE_ITEM,
1033 					  item,
1034 					  "Outer vlan tags is unsupported");
1035 
1036 	/* clear vlan_num for inner vlan select */
1037 	rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
1038 	rule->key_conf.vlan_num = 0;
1039 
1040 	/* check L3 config */
1041 	if (rule->input_set &
1042 	    (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
1043 		return rte_flow_error_set(error, EINVAL,
1044 					  RTE_FLOW_ERROR_TYPE_ITEM,
1045 					  item, "Outer ip is unsupported");
1046 	if (rule->input_set & BIT(INNER_IP_PROTO)) {
1047 		hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
1048 		rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
1049 		rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
1050 		hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
1051 		rule->key_conf.spec.ip_proto = 0;
1052 		rule->key_conf.mask.ip_proto = 0;
1053 	}
1054 
1055 	/* check L4 config */
1056 	if (rule->input_set & BIT(INNER_SCTP_TAG))
1057 		return rte_flow_error_set(error, EINVAL,
1058 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1059 					  "Outer sctp tag is unsupported");
1060 
1061 	if (rule->input_set & BIT(INNER_SRC_PORT)) {
1062 		hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
1063 		rule->key_conf.spec.outer_src_port =
1064 		    rule->key_conf.spec.src_port;
1065 		rule->key_conf.mask.outer_src_port =
1066 		    rule->key_conf.mask.src_port;
1067 		hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
1068 		rule->key_conf.spec.src_port = 0;
1069 		rule->key_conf.mask.src_port = 0;
1070 	}
1071 	if (rule->input_set & BIT(INNER_DST_PORT)) {
1072 		hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
1073 		rule->key_conf.spec.dst_port = 0;
1074 		rule->key_conf.mask.dst_port = 0;
1075 	}
1076 	return 0;
1077 }
1078 
1079 static int
1080 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1081 		 struct rte_flow_error *error)
1082 {
1083 	const struct rte_flow_item_vxlan *vxlan_spec;
1084 	const struct rte_flow_item_vxlan *vxlan_mask;
1085 
1086 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
1087 	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
1088 	if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1089 		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
1090 	else
1091 		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
1092 
1093 	/* Only used to describe the protocol stack. */
1094 	if (item->spec == NULL && item->mask == NULL)
1095 		return 0;
1096 
1097 	vxlan_mask = item->mask;
1098 	vxlan_spec = item->spec;
1099 
1100 	if (vxlan_mask->hdr.flags)
1101 		return rte_flow_error_set(error, EINVAL,
1102 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1103 					  "Flags is not supported in VxLAN");
1104 
1105 	/* VNI must be totally masked or not. */
1106 	if (memcmp(vxlan_mask->hdr.vni, full_mask, VNI_OR_TNI_LEN) &&
1107 	    memcmp(vxlan_mask->hdr.vni, zero_mask, VNI_OR_TNI_LEN))
1108 		return rte_flow_error_set(error, EINVAL,
1109 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1110 					  "VNI must be totally masked or not in VxLAN");
1111 	if (vxlan_mask->hdr.vni[0]) {
1112 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1113 		memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->hdr.vni,
1114 			   VNI_OR_TNI_LEN);
1115 	}
1116 	memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->hdr.vni,
1117 		   VNI_OR_TNI_LEN);
1118 	return 0;
1119 }
1120 
1121 static int
1122 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1123 		 struct rte_flow_error *error)
1124 {
1125 	const struct rte_flow_item_nvgre *nvgre_spec;
1126 	const struct rte_flow_item_nvgre *nvgre_mask;
1127 
1128 	hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
1129 	rule->key_conf.spec.outer_proto = IPPROTO_GRE;
1130 	rule->key_conf.mask.outer_proto = IPPROTO_MASK;
1131 
1132 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
1133 	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
1134 	rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
1135 	/* Only used to describe the protocol stack. */
1136 	if (item->spec == NULL && item->mask == NULL)
1137 		return 0;
1138 
1139 	nvgre_mask = item->mask;
1140 	nvgre_spec = item->spec;
1141 
1142 	if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
1143 		return rte_flow_error_set(error, EINVAL,
1144 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1145 					  "Ver/protocol is not supported in NVGRE");
1146 
1147 	/* TNI must be totally masked or not. */
1148 	if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
1149 	    memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
1150 		return rte_flow_error_set(error, EINVAL,
1151 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1152 					  "TNI must be totally masked or not in NVGRE");
1153 
1154 	if (nvgre_mask->tni[0]) {
1155 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1156 		memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
1157 			   VNI_OR_TNI_LEN);
1158 	}
1159 	memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
1160 		   VNI_OR_TNI_LEN);
1161 
1162 	if (nvgre_mask->flow_id) {
1163 		hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
1164 		rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
1165 	}
1166 	rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
1167 	return 0;
1168 }
1169 
1170 static int
1171 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1172 		  struct rte_flow_error *error)
1173 {
1174 	const struct rte_flow_item_geneve *geneve_spec;
1175 	const struct rte_flow_item_geneve *geneve_mask;
1176 
1177 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
1178 	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
1179 	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
1180 	/* Only used to describe the protocol stack. */
1181 	if (item->spec == NULL && item->mask == NULL)
1182 		return 0;
1183 
1184 	geneve_mask = item->mask;
1185 	geneve_spec = item->spec;
1186 
1187 	if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
1188 		return rte_flow_error_set(error, EINVAL,
1189 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1190 					  "Ver/protocol is not supported in GENEVE");
1191 	/* VNI must be totally masked or not. */
1192 	if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
1193 	    memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
1194 		return rte_flow_error_set(error, EINVAL,
1195 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1196 					  "VNI must be totally masked or not in GENEVE");
1197 	if (geneve_mask->vni[0]) {
1198 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1199 		memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
1200 			   VNI_OR_TNI_LEN);
1201 	}
1202 	memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
1203 		   VNI_OR_TNI_LEN);
1204 	return 0;
1205 }
1206 
1207 static int
1208 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1209 		  struct rte_flow_error *error)
1210 {
1211 	int ret;
1212 
1213 	if (item->spec == NULL && item->mask)
1214 		return rte_flow_error_set(error, EINVAL,
1215 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1216 					  "Can't configure FDIR with mask "
1217 					  "but without spec");
1218 	else if (item->spec && (item->mask == NULL))
1219 		return rte_flow_error_set(error, EINVAL,
1220 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1221 					  "Tunnel packets must configure "
1222 					  "with mask");
1223 
1224 	switch (item->type) {
1225 	case RTE_FLOW_ITEM_TYPE_VXLAN:
1226 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1227 		ret = hns3_parse_vxlan(item, rule, error);
1228 		break;
1229 	case RTE_FLOW_ITEM_TYPE_NVGRE:
1230 		ret = hns3_parse_nvgre(item, rule, error);
1231 		break;
1232 	case RTE_FLOW_ITEM_TYPE_GENEVE:
1233 		ret = hns3_parse_geneve(item, rule, error);
1234 		break;
1235 	default:
1236 		return rte_flow_error_set(error, ENOTSUP,
1237 					  RTE_FLOW_ERROR_TYPE_ITEM,
1238 					  NULL, "Unsupported tunnel type!");
1239 	}
1240 	if (ret)
1241 		return ret;
1242 	return hns3_handle_tunnel(item, rule, error);
1243 }
1244 
1245 static int
1246 hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1247 		  struct items_step_mngr *step_mngr,
1248 		  struct rte_flow_error *error)
1249 {
1250 	int ret;
1251 
1252 	if (item->spec == NULL && item->mask)
1253 		return rte_flow_error_set(error, EINVAL,
1254 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1255 					  "Can't configure FDIR with mask "
1256 					  "but without spec");
1257 
1258 	switch (item->type) {
1259 	case RTE_FLOW_ITEM_TYPE_ETH:
1260 		ret = hns3_parse_eth(item, rule, error);
1261 		step_mngr->items = L2_next_items;
1262 		step_mngr->count = RTE_DIM(L2_next_items);
1263 		break;
1264 	case RTE_FLOW_ITEM_TYPE_VLAN:
1265 		ret = hns3_parse_vlan(item, rule, error);
1266 		step_mngr->items = L2_next_items;
1267 		step_mngr->count = RTE_DIM(L2_next_items);
1268 		break;
1269 	case RTE_FLOW_ITEM_TYPE_IPV4:
1270 		ret = hns3_parse_ipv4(item, rule, error);
1271 		step_mngr->items = L3_next_items;
1272 		step_mngr->count = RTE_DIM(L3_next_items);
1273 		break;
1274 	case RTE_FLOW_ITEM_TYPE_IPV6:
1275 		ret = hns3_parse_ipv6(item, rule, error);
1276 		step_mngr->items = L3_next_items;
1277 		step_mngr->count = RTE_DIM(L3_next_items);
1278 		break;
1279 	case RTE_FLOW_ITEM_TYPE_TCP:
1280 		ret = hns3_parse_tcp(item, rule, error);
1281 		step_mngr->items = L4_next_items;
1282 		step_mngr->count = RTE_DIM(L4_next_items);
1283 		break;
1284 	case RTE_FLOW_ITEM_TYPE_UDP:
1285 		ret = hns3_parse_udp(item, rule, error);
1286 		step_mngr->items = L4_next_items;
1287 		step_mngr->count = RTE_DIM(L4_next_items);
1288 		break;
1289 	case RTE_FLOW_ITEM_TYPE_SCTP:
1290 		ret = hns3_parse_sctp(item, rule, error);
1291 		step_mngr->items = L4_next_items;
1292 		step_mngr->count = RTE_DIM(L4_next_items);
1293 		break;
1294 	default:
1295 		return rte_flow_error_set(error, ENOTSUP,
1296 					  RTE_FLOW_ERROR_TYPE_ITEM,
1297 					  NULL, "Unsupported normal type!");
1298 	}
1299 
1300 	return ret;
1301 }
1302 
1303 static int
1304 hns3_validate_item(const struct rte_flow_item *item,
1305 		   struct items_step_mngr step_mngr,
1306 		   struct rte_flow_error *error)
1307 {
1308 	uint32_t i;
1309 
1310 	if (item->last)
1311 		return rte_flow_error_set(error, ENOTSUP,
1312 					  RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
1313 					  "Not supported last point for range");
1314 
1315 	for (i = 0; i < step_mngr.count; i++) {
1316 		if (item->type == step_mngr.items[i])
1317 			break;
1318 	}
1319 
1320 	if (i == step_mngr.count) {
1321 		return rte_flow_error_set(error, EINVAL,
1322 					  RTE_FLOW_ERROR_TYPE_ITEM,
1323 					  item, "Inval or missing item");
1324 	}
1325 	return 0;
1326 }
1327 
1328 static inline bool
1329 is_tunnel_packet(enum rte_flow_item_type type)
1330 {
1331 	if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1332 	    type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1333 	    type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1334 	    type == RTE_FLOW_ITEM_TYPE_GENEVE)
1335 		return true;
1336 	return false;
1337 }
1338 
1339 /*
1340  * Parse the flow director rule.
1341  * The supported PATTERN:
1342  *   case: non-tunnel packet:
1343  *     ETH : src-mac, dst-mac, ethertype
1344  *     VLAN: tag1, tag2
1345  *     IPv4: src-ip, dst-ip, tos, proto
1346  *     IPv6: src-ip(last 32 bit addr), dst-ip(last 32 bit addr), proto
1347  *     UDP : src-port, dst-port
1348  *     TCP : src-port, dst-port
1349  *     SCTP: src-port, dst-port, tag
1350  *   case: tunnel packet:
1351  *     OUTER-ETH: ethertype
1352  *     OUTER-L3 : proto
1353  *     OUTER-L4 : src-port, dst-port
1354  *     TUNNEL   : vni, flow-id(only valid when NVGRE)
1355  *     INNER-ETH/VLAN/IPv4/IPv6/UDP/TCP/SCTP: same as non-tunnel packet
1356  * The supported ACTION:
1357  *    QUEUE
1358  *    DROP
1359  *    COUNT
1360  *    MARK: the id range [0, 4094]
1361  *    FLAG
1362  *    RSS: only valid if firmware support FD_QUEUE_REGION.
1363  */
1364 static int
1365 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1366 		       const struct rte_flow_item pattern[],
1367 		       const struct rte_flow_action actions[],
1368 		       struct hns3_fdir_rule *rule,
1369 		       struct rte_flow_error *error)
1370 {
1371 	struct hns3_adapter *hns = dev->data->dev_private;
1372 	const struct rte_flow_item *item;
1373 	struct items_step_mngr step_mngr;
1374 	int ret;
1375 
1376 	/* FDIR is available only in PF driver */
1377 	if (hns->is_vf)
1378 		return rte_flow_error_set(error, ENOTSUP,
1379 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1380 					  "Fdir not supported in VF");
1381 
1382 	step_mngr.items = first_items;
1383 	step_mngr.count = RTE_DIM(first_items);
1384 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1385 		if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1386 			continue;
1387 
1388 		ret = hns3_validate_item(item, step_mngr, error);
1389 		if (ret)
1390 			return ret;
1391 
1392 		if (is_tunnel_packet(item->type)) {
1393 			ret = hns3_parse_tunnel(item, rule, error);
1394 			if (ret)
1395 				return ret;
1396 			step_mngr.items = tunnel_next_items;
1397 			step_mngr.count = RTE_DIM(tunnel_next_items);
1398 		} else {
1399 			ret = hns3_parse_normal(item, rule, &step_mngr, error);
1400 			if (ret)
1401 				return ret;
1402 		}
1403 	}
1404 
1405 	return hns3_handle_actions(dev, actions, rule, error);
1406 }
1407 
1408 static void
1409 hns3_filterlist_flush(struct rte_eth_dev *dev)
1410 {
1411 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1412 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1413 	struct hns3_flow_mem *flow_node;
1414 
1415 	fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1416 	while (fdir_rule_ptr) {
1417 		TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1418 		rte_free(fdir_rule_ptr);
1419 		fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1420 	}
1421 
1422 	flow_node = TAILQ_FIRST(&hw->flow_list);
1423 	while (flow_node) {
1424 		TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1425 		rte_free(flow_node->flow);
1426 		rte_free(flow_node);
1427 		flow_node = TAILQ_FIRST(&hw->flow_list);
1428 	}
1429 }
1430 
1431 static bool
1432 hns3_flow_rule_key_same(const struct rte_flow_action_rss *comp,
1433 			const struct rte_flow_action_rss *with)
1434 {
1435 	if (comp->key_len != with->key_len)
1436 		return false;
1437 
1438 	if (with->key_len == 0)
1439 		return true;
1440 
1441 	if (comp->key == NULL && with->key == NULL)
1442 		return true;
1443 
1444 	if (!(comp->key != NULL && with->key != NULL))
1445 		return false;
1446 
1447 	return !memcmp(comp->key, with->key, with->key_len);
1448 }
1449 
1450 static bool
1451 hns3_flow_rule_queues_same(const struct rte_flow_action_rss *comp,
1452 			   const struct rte_flow_action_rss *with)
1453 {
1454 	if (comp->queue_num != with->queue_num)
1455 		return false;
1456 
1457 	if (with->queue_num == 0)
1458 		return true;
1459 
1460 	if (comp->queue == NULL && with->queue == NULL)
1461 		return true;
1462 
1463 	if (!(comp->queue != NULL && with->queue != NULL))
1464 		return false;
1465 
1466 	return !memcmp(comp->queue, with->queue, with->queue_num);
1467 }
1468 
1469 static bool
1470 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1471 		     const struct rte_flow_action_rss *with)
1472 {
1473 	bool same_level;
1474 	bool same_types;
1475 	bool same_func;
1476 
1477 	same_level = (comp->level == with->level);
1478 	same_types = (comp->types == with->types);
1479 	same_func = (comp->func == with->func);
1480 
1481 	return same_level && same_types && same_func &&
1482 		hns3_flow_rule_key_same(comp, with) &&
1483 		hns3_flow_rule_queues_same(comp, with);
1484 }
1485 
1486 static bool
1487 hns3_valid_ipv6_sctp_rss_types(struct hns3_hw *hw, uint64_t types)
1488 {
1489 	/*
1490 	 * Some hardware don't support to use src/dst port fields to hash
1491 	 * for IPV6 SCTP packet type.
1492 	 */
1493 	if (types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP &&
1494 	    types & HNS3_RSS_SUPPORT_L4_SRC_DST &&
1495 	    !hw->rss_info.ipv6_sctp_offload_supported)
1496 		return false;
1497 
1498 	return true;
1499 }
1500 
1501 static int
1502 hns3_flow_parse_hash_func(const struct rte_flow_action_rss *rss_act,
1503 			  struct hns3_flow_rss_conf *rss_conf,
1504 			  struct rte_flow_error *error)
1505 {
1506 	if (rss_act->func >= RTE_ETH_HASH_FUNCTION_MAX)
1507 		return rte_flow_error_set(error, ENOTSUP,
1508 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1509 					  NULL, "RSS hash func are not supported");
1510 
1511 	rss_conf->conf.func = rss_act->func;
1512 	return 0;
1513 }
1514 
1515 static int
1516 hns3_flow_parse_hash_key(struct hns3_hw *hw,
1517 			 const struct rte_flow_action_rss *rss_act,
1518 			 struct hns3_flow_rss_conf *rss_conf,
1519 			 struct rte_flow_error *error)
1520 {
1521 	if (rss_act->key_len != hw->rss_key_size)
1522 		return rte_flow_error_set(error, EINVAL,
1523 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1524 					  NULL, "invalid RSS key length");
1525 
1526 	if (rss_act->key != NULL)
1527 		memcpy(rss_conf->key, rss_act->key, rss_act->key_len);
1528 	else
1529 		memcpy(rss_conf->key, hns3_hash_key,
1530 			RTE_MIN(sizeof(hns3_hash_key), rss_act->key_len));
1531 	/* Need to record if user sets hash key. */
1532 	rss_conf->conf.key = rss_act->key;
1533 	rss_conf->conf.key_len = rss_act->key_len;
1534 
1535 	return 0;
1536 }
1537 
1538 static int
1539 hns3_flow_parse_queues(struct hns3_hw *hw,
1540 		       const struct rte_flow_action_rss *rss_act,
1541 		       struct hns3_flow_rss_conf *rss_conf,
1542 		       struct rte_flow_error *error)
1543 {
1544 	uint16_t i;
1545 
1546 	if (rss_act->queue_num > hw->rss_ind_tbl_size)
1547 		return rte_flow_error_set(error, ENOTSUP,
1548 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1549 					  NULL,
1550 					  "queue number can not exceed RSS indirection table.");
1551 
1552 	if (rss_act->queue_num > HNS3_RSS_QUEUES_BUFFER_NUM)
1553 		return rte_flow_error_set(error, ENOTSUP,
1554 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1555 					  NULL,
1556 					  "queue number configured exceeds queue buffer size driver supported");
1557 
1558 	for (i = 0; i < rss_act->queue_num; i++) {
1559 		if (rss_act->queue[i] >= hw->alloc_rss_size)
1560 			return rte_flow_error_set(error, EINVAL,
1561 						RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1562 						NULL,
1563 						"queue id must be less than queue number allocated to a TC");
1564 	}
1565 
1566 	memcpy(rss_conf->queue, rss_act->queue,
1567 	       rss_act->queue_num * sizeof(rss_conf->queue[0]));
1568 	rss_conf->conf.queue = rss_conf->queue;
1569 	rss_conf->conf.queue_num = rss_act->queue_num;
1570 
1571 	return 0;
1572 }
1573 
1574 static int
1575 hns3_flow_get_hw_pctype(struct hns3_hw *hw,
1576 			const struct rte_flow_action_rss *rss_act,
1577 			const struct hns3_hash_map_info *map,
1578 			struct hns3_flow_rss_conf *rss_conf,
1579 			struct rte_flow_error *error)
1580 {
1581 	uint64_t l3l4_src_dst, l3l4_refine, left_types;
1582 
1583 	if (rss_act->types == 0) {
1584 		/* Disable RSS hash of this packet type if types is zero. */
1585 		rss_conf->hw_pctypes |= map->hw_pctype;
1586 		return 0;
1587 	}
1588 
1589 	/*
1590 	 * Can not have extra types except rss_pctype and l3l4_type in this map.
1591 	 */
1592 	left_types = ~map->rss_pctype & rss_act->types;
1593 	if (left_types & ~map->l3l4_types)
1594 		return rte_flow_error_set(error, EINVAL,
1595 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1596 					  "cannot set extra types.");
1597 
1598 	l3l4_src_dst = left_types;
1599 	/* L3/L4 SRC and DST shouldn't be specified at the same time. */
1600 	l3l4_refine = rte_eth_rss_hf_refine(l3l4_src_dst);
1601 	if (l3l4_refine != l3l4_src_dst)
1602 		return rte_flow_error_set(error, ENOTSUP,
1603 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1604 					  "cannot specify L3_SRC/DST_ONLY or L4_SRC/DST_ONLY at the same.");
1605 
1606 	if (!hns3_valid_ipv6_sctp_rss_types(hw, rss_act->types))
1607 		return rte_flow_error_set(error, ENOTSUP,
1608 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1609 					  "hardware doesn't support to use L4 src/dst to hash for IPV6-SCTP.");
1610 
1611 	rss_conf->hw_pctypes |= map->hw_pctype;
1612 
1613 	return 0;
1614 }
1615 
1616 static int
1617 hns3_flow_parse_rss_types_by_ptype(struct hns3_hw *hw,
1618 				   const struct rte_flow_action_rss *rss_act,
1619 				   uint64_t pattern_type,
1620 				   struct hns3_flow_rss_conf *rss_conf,
1621 				   struct rte_flow_error *error)
1622 {
1623 	const struct hns3_hash_map_info *map;
1624 	bool matched = false;
1625 	uint16_t i;
1626 	int ret;
1627 
1628 	for (i = 0; i < RTE_DIM(hash_map_table); i++) {
1629 		map = &hash_map_table[i];
1630 		if (map->pattern_type != pattern_type) {
1631 			/*
1632 			 * If the target pattern type is already matched with
1633 			 * the one before this pattern in the hash map table,
1634 			 * no need to continue walk.
1635 			 */
1636 			if (matched)
1637 				break;
1638 			continue;
1639 		}
1640 		matched = true;
1641 
1642 		/*
1643 		 * If pattern type is matched and the 'types' is zero, all packet flow
1644 		 * types related to this pattern type disable RSS hash.
1645 		 * Otherwise, RSS types must match the pattern type and cannot have no
1646 		 * extra or unsupported types.
1647 		 */
1648 		if (rss_act->types != 0 && !(map->rss_pctype & rss_act->types))
1649 			continue;
1650 
1651 		ret = hns3_flow_get_hw_pctype(hw, rss_act, map, rss_conf, error);
1652 		if (ret != 0)
1653 			return ret;
1654 	}
1655 
1656 	if (rss_conf->hw_pctypes != 0)
1657 		return 0;
1658 
1659 	if (matched)
1660 		return rte_flow_error_set(error, ENOTSUP,
1661 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1662 					  NULL, "RSS types are unsupported");
1663 
1664 	return rte_flow_error_set(error, ENOTSUP,
1665 				  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1666 				  NULL, "Pattern specified is unsupported");
1667 }
1668 
1669 static uint64_t
1670 hns3_flow_get_all_hw_pctypes(uint64_t types)
1671 {
1672 	uint64_t hw_pctypes = 0;
1673 	uint16_t i;
1674 
1675 	for (i = 0; i < RTE_DIM(hash_map_table); i++) {
1676 		if (types & hash_map_table[i].rss_pctype)
1677 			hw_pctypes |= hash_map_table[i].hw_pctype;
1678 	}
1679 
1680 	return hw_pctypes;
1681 }
1682 
1683 static int
1684 hns3_flow_parse_rss_types(struct hns3_hw *hw,
1685 			  const struct rte_flow_action_rss *rss_act,
1686 			  uint64_t pattern_type,
1687 			  struct hns3_flow_rss_conf *rss_conf,
1688 			  struct rte_flow_error *error)
1689 {
1690 	rss_conf->conf.types = rss_act->types;
1691 
1692 	/* no pattern specified to set global RSS types. */
1693 	if (pattern_type == 0) {
1694 		if (!hns3_check_rss_types_valid(hw, rss_act->types))
1695 			return rte_flow_error_set(error, EINVAL,
1696 					RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1697 					NULL, "RSS types is invalid.");
1698 		rss_conf->hw_pctypes =
1699 				hns3_flow_get_all_hw_pctypes(rss_act->types);
1700 		return 0;
1701 	}
1702 
1703 	return hns3_flow_parse_rss_types_by_ptype(hw, rss_act, pattern_type,
1704 						  rss_conf, error);
1705 }
1706 
1707 static int
1708 hns3_flow_parse_hash_global_conf(struct rte_eth_dev *dev,
1709 				 const struct rte_flow_action_rss *rss_act,
1710 				 struct hns3_flow_rss_conf *rss_conf,
1711 				 struct rte_flow_error *error)
1712 {
1713 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1714 	int ret;
1715 
1716 	ret = hns3_flow_parse_hash_func(rss_act, rss_conf, error);
1717 	if (ret != 0)
1718 		return ret;
1719 
1720 	if (rss_act->queue_num > 0) {
1721 		ret = hns3_flow_parse_queues(hw, rss_act, rss_conf, error);
1722 		if (ret != 0)
1723 			return ret;
1724 	}
1725 
1726 	if (rss_act->key_len > 0) {
1727 		ret = hns3_flow_parse_hash_key(hw, rss_act, rss_conf, error);
1728 		if (ret != 0)
1729 			return ret;
1730 	}
1731 
1732 	return hns3_flow_parse_rss_types(hw, rss_act, rss_conf->pattern_type,
1733 					 rss_conf, error);
1734 }
1735 
1736 static int
1737 hns3_flow_parse_pattern_type(const struct rte_flow_item pattern[],
1738 			     uint64_t *ptype, struct rte_flow_error *error)
1739 {
1740 	enum rte_flow_item_type pre_type = RTE_FLOW_ITEM_TYPE_VOID;
1741 	const char *message = "Pattern specified isn't supported";
1742 	uint64_t item_hdr, pattern_hdrs = 0;
1743 	enum rte_flow_item_type cur_type;
1744 
1745 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1746 		if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID)
1747 			continue;
1748 		if (pattern->mask || pattern->spec || pattern->last) {
1749 			message = "Header info shouldn't be specified";
1750 			goto unsup;
1751 		}
1752 
1753 		/* Check the sub-item allowed by the previous item . */
1754 		if (pre_type >= RTE_DIM(hash_pattern_next_allow_items) ||
1755 		    !(hash_pattern_next_allow_items[pre_type] &
1756 				BIT_ULL(pattern->type)))
1757 			goto unsup;
1758 
1759 		cur_type = pattern->type;
1760 		/* Unsupported for current type being greater than array size. */
1761 		if (cur_type >= RTE_DIM(hash_pattern_item_header))
1762 			goto unsup;
1763 
1764 		/* The value is zero, which means unsupported current header. */
1765 		item_hdr = hash_pattern_item_header[cur_type];
1766 		if (item_hdr == 0)
1767 			goto unsup;
1768 
1769 		/* Have duplicate pattern header. */
1770 		if (item_hdr & pattern_hdrs)
1771 			goto unsup;
1772 		pre_type = cur_type;
1773 		pattern_hdrs |= item_hdr;
1774 	}
1775 
1776 	if (pattern_hdrs != 0) {
1777 		*ptype = pattern_hdrs;
1778 		return 0;
1779 	}
1780 
1781 unsup:
1782 	return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1783 				  pattern, message);
1784 }
1785 
1786 static int
1787 hns3_flow_parse_pattern_act(struct rte_eth_dev *dev,
1788 			    const struct rte_flow_item pattern[],
1789 			    const struct rte_flow_action_rss *rss_act,
1790 			    struct hns3_flow_rss_conf *rss_conf,
1791 			    struct rte_flow_error *error)
1792 {
1793 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1794 	int ret;
1795 
1796 	ret = hns3_flow_parse_hash_func(rss_act, rss_conf, error);
1797 	if (ret != 0)
1798 		return ret;
1799 
1800 	if (rss_act->key_len > 0) {
1801 		ret = hns3_flow_parse_hash_key(hw, rss_act, rss_conf, error);
1802 		if (ret != 0)
1803 			return ret;
1804 	}
1805 
1806 	if (rss_act->queue_num > 0) {
1807 		ret = hns3_flow_parse_queues(hw, rss_act, rss_conf, error);
1808 		if (ret != 0)
1809 			return ret;
1810 	}
1811 
1812 	ret = hns3_flow_parse_pattern_type(pattern, &rss_conf->pattern_type,
1813 					   error);
1814 	if (ret != 0)
1815 		return ret;
1816 
1817 	ret = hns3_flow_parse_rss_types(hw, rss_act, rss_conf->pattern_type,
1818 					rss_conf, error);
1819 	if (ret != 0)
1820 		return ret;
1821 
1822 	if (rss_act->func != RTE_ETH_HASH_FUNCTION_DEFAULT ||
1823 	    rss_act->key_len > 0 || rss_act->queue_num > 0)
1824 		hns3_warn(hw, "hash func, key and queues are global config, which work for all flow types. "
1825 			  "Recommend: don't set them together with pattern.");
1826 
1827 	return 0;
1828 }
1829 
1830 static bool
1831 hns3_rss_action_is_dup(struct hns3_hw *hw,
1832 		       const struct hns3_flow_rss_conf *conf)
1833 {
1834 	struct hns3_rss_conf_ele *filter;
1835 
1836 	TAILQ_FOREACH(filter, &hw->flow_rss_list, entries) {
1837 		if (conf->pattern_type != filter->filter_info.pattern_type)
1838 			continue;
1839 
1840 		if (hns3_action_rss_same(&filter->filter_info.conf, &conf->conf))
1841 			return true;
1842 	}
1843 
1844 	return false;
1845 }
1846 
1847 /*
1848  * This function is used to parse rss action validation.
1849  */
1850 static int
1851 hns3_parse_rss_filter(struct rte_eth_dev *dev,
1852 		      const struct rte_flow_item pattern[],
1853 		      const struct rte_flow_action *actions,
1854 		      struct hns3_flow_rss_conf *rss_conf,
1855 		      struct rte_flow_error *error)
1856 {
1857 	struct hns3_adapter *hns = dev->data->dev_private;
1858 	const struct rte_flow_action_rss *rss_act;
1859 	const struct rte_flow_action *act;
1860 	const struct rte_flow_item *pat;
1861 	struct hns3_hw *hw = &hns->hw;
1862 	uint32_t index = 0;
1863 	int ret;
1864 
1865 	NEXT_ITEM_OF_ACTION(act, actions, index);
1866 	if (actions[1].type != RTE_FLOW_ACTION_TYPE_END)
1867 		return rte_flow_error_set(error, EINVAL,
1868 					  RTE_FLOW_ERROR_TYPE_ACTION,
1869 					  &actions[1],
1870 					  "Only support one action for RSS.");
1871 
1872 	rss_act = (const struct rte_flow_action_rss *)act->conf;
1873 	if (rss_act == NULL) {
1874 		return rte_flow_error_set(error, EINVAL,
1875 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1876 					  act, "lost RSS action configuration");
1877 	}
1878 
1879 	if (rss_act->level != 0)
1880 		return rte_flow_error_set(error, ENOTSUP,
1881 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1882 					  act,
1883 					  "RSS level is not supported");
1884 
1885 	index = 0;
1886 	NEXT_ITEM_OF_PATTERN(pat, pattern, index);
1887 	if (pat[0].type == RTE_FLOW_ITEM_TYPE_END) {
1888 		rss_conf->pattern_type = 0;
1889 		ret = hns3_flow_parse_hash_global_conf(dev, rss_act,
1890 						       rss_conf, error);
1891 	} else {
1892 		ret = hns3_flow_parse_pattern_act(dev, pat, rss_act,
1893 						  rss_conf, error);
1894 	}
1895 	if (ret != 0)
1896 		return ret;
1897 
1898 	if (hns3_rss_action_is_dup(hw, rss_conf))
1899 		return rte_flow_error_set(error, EINVAL,
1900 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1901 					  act, "duplicate RSS rule");
1902 
1903 	return 0;
1904 }
1905 
1906 static int
1907 hns3_update_indir_table(struct hns3_hw *hw,
1908 			const struct rte_flow_action_rss *conf, uint16_t num)
1909 {
1910 	uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
1911 	uint16_t j;
1912 	uint32_t i;
1913 
1914 	/* Fill in redirection table */
1915 	for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) {
1916 		j %= num;
1917 		if (conf->queue[j] >= hw->alloc_rss_size) {
1918 			hns3_err(hw, "queue id(%u) set to redirection table "
1919 				 "exceeds queue number(%u) allocated to a TC.",
1920 				 conf->queue[j], hw->alloc_rss_size);
1921 			return -EINVAL;
1922 		}
1923 		indir_tbl[i] = conf->queue[j];
1924 	}
1925 
1926 	return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size);
1927 }
1928 
1929 static uint64_t
1930 hns3_flow_get_pctype_tuple_mask(uint64_t hw_pctype)
1931 {
1932 	uint64_t tuple_mask = 0;
1933 	uint16_t i;
1934 
1935 	for (i = 0; i < RTE_DIM(hash_map_table); i++) {
1936 		if (hw_pctype == hash_map_table[i].hw_pctype) {
1937 			tuple_mask = hash_map_table[i].tuple_mask;
1938 			break;
1939 		}
1940 	}
1941 
1942 	return tuple_mask;
1943 }
1944 
1945 static int
1946 hns3_flow_set_rss_ptype_tuple(struct hns3_hw *hw,
1947 			      struct hns3_flow_rss_conf *rss_conf)
1948 {
1949 	uint64_t old_tuple_fields, new_tuple_fields;
1950 	uint64_t hw_pctypes, tuples, tuple_mask = 0;
1951 	bool cfg_global_tuple;
1952 	int ret;
1953 
1954 	cfg_global_tuple = (rss_conf->pattern_type == 0);
1955 	if (!cfg_global_tuple) {
1956 		/*
1957 		 * To ensure that different packets do not affect each other,
1958 		 * we have to first read all tuple fields, and then only modify
1959 		 * the tuples for the specified packet type.
1960 		 */
1961 		ret = hns3_get_rss_tuple_field(hw, &old_tuple_fields);
1962 		if (ret != 0)
1963 			return ret;
1964 
1965 		new_tuple_fields = old_tuple_fields;
1966 		hw_pctypes = rss_conf->hw_pctypes;
1967 		while (hw_pctypes > 0) {
1968 			uint32_t idx = rte_bsf64(hw_pctypes);
1969 			uint64_t pctype = BIT_ULL(idx);
1970 
1971 			tuple_mask = hns3_flow_get_pctype_tuple_mask(pctype);
1972 			tuples = hns3_rss_calc_tuple_filed(rss_conf->conf.types);
1973 			new_tuple_fields &= ~tuple_mask;
1974 			new_tuple_fields |= tuples;
1975 			hw_pctypes &= ~pctype;
1976 		}
1977 	} else {
1978 		new_tuple_fields =
1979 			hns3_rss_calc_tuple_filed(rss_conf->conf.types);
1980 	}
1981 
1982 	ret = hns3_set_rss_tuple_field(hw, new_tuple_fields);
1983 	if (ret != 0)
1984 		return ret;
1985 
1986 	if (!cfg_global_tuple)
1987 		hns3_info(hw, "RSS tuple fields changed from 0x%" PRIx64 " to 0x%" PRIx64,
1988 			  old_tuple_fields, new_tuple_fields);
1989 
1990 	return 0;
1991 }
1992 
1993 static int
1994 hns3_config_rss_filter(struct hns3_hw *hw,
1995 		       struct hns3_flow_rss_conf *rss_conf)
1996 {
1997 	struct rte_flow_action_rss *rss_act;
1998 	int ret;
1999 
2000 	rss_act = &rss_conf->conf;
2001 	if (rss_act->queue_num > 0) {
2002 		ret = hns3_update_indir_table(hw, rss_act, rss_act->queue_num);
2003 		if (ret) {
2004 			hns3_err(hw, "set queues action failed, ret = %d", ret);
2005 			return ret;
2006 		}
2007 	}
2008 
2009 	if (rss_act->key_len > 0 ||
2010 	    rss_act->func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
2011 		ret = hns3_update_rss_algo_key(hw, rss_act->func, rss_conf->key,
2012 					       rss_act->key_len);
2013 		if (ret != 0) {
2014 			hns3_err(hw, "set func or hash key action failed, ret = %d",
2015 				 ret);
2016 			return ret;
2017 		}
2018 	}
2019 
2020 	if (rss_conf->hw_pctypes > 0) {
2021 		ret = hns3_flow_set_rss_ptype_tuple(hw, rss_conf);
2022 		if (ret != 0) {
2023 			hns3_err(hw, "set types action failed, ret = %d", ret);
2024 			return ret;
2025 		}
2026 	}
2027 
2028 	return 0;
2029 }
2030 
2031 static int
2032 hns3_clear_rss_filter(struct rte_eth_dev *dev)
2033 {
2034 	struct hns3_adapter *hns = dev->data->dev_private;
2035 	struct hns3_rss_conf_ele *rss_filter_ptr;
2036 	struct hns3_hw *hw = &hns->hw;
2037 
2038 	rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
2039 	while (rss_filter_ptr) {
2040 		TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
2041 		rte_free(rss_filter_ptr);
2042 		rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
2043 	}
2044 
2045 	return hns3_config_rss(hns);
2046 }
2047 
2048 static int
2049 hns3_reconfig_all_rss_filter(struct hns3_hw *hw)
2050 {
2051 	struct hns3_rss_conf_ele *filter;
2052 	uint32_t rule_no = 0;
2053 	int ret;
2054 
2055 	TAILQ_FOREACH(filter, &hw->flow_rss_list, entries) {
2056 		ret = hns3_config_rss_filter(hw, &filter->filter_info);
2057 		if (ret != 0) {
2058 			hns3_err(hw, "config %uth RSS filter failed, ret = %d",
2059 				 rule_no, ret);
2060 			return ret;
2061 		}
2062 		rule_no++;
2063 	}
2064 
2065 	return 0;
2066 }
2067 
2068 static int
2069 hns3_restore_rss_filter(struct hns3_hw *hw)
2070 {
2071 	int ret;
2072 
2073 	pthread_mutex_lock(&hw->flows_lock);
2074 	ret = hns3_reconfig_all_rss_filter(hw);
2075 	pthread_mutex_unlock(&hw->flows_lock);
2076 
2077 	return ret;
2078 }
2079 
2080 int
2081 hns3_restore_filter(struct hns3_adapter *hns)
2082 {
2083 	struct hns3_hw *hw = &hns->hw;
2084 	int ret;
2085 
2086 	ret = hns3_restore_all_fdir_filter(hns);
2087 	if (ret != 0)
2088 		return ret;
2089 
2090 	return hns3_restore_rss_filter(hw);
2091 }
2092 
2093 static int
2094 hns3_flow_args_check(const struct rte_flow_attr *attr,
2095 		     const struct rte_flow_item pattern[],
2096 		     const struct rte_flow_action actions[],
2097 		     struct rte_flow_error *error)
2098 {
2099 	if (pattern == NULL)
2100 		return rte_flow_error_set(error, EINVAL,
2101 					  RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2102 					  NULL, "NULL pattern.");
2103 
2104 	if (actions == NULL)
2105 		return rte_flow_error_set(error, EINVAL,
2106 					  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2107 					  NULL, "NULL action.");
2108 
2109 	if (attr == NULL)
2110 		return rte_flow_error_set(error, EINVAL,
2111 					  RTE_FLOW_ERROR_TYPE_ATTR,
2112 					  NULL, "NULL attribute.");
2113 
2114 	return hns3_check_attr(attr, error);
2115 }
2116 
2117 /*
2118  * Check if the flow rule is supported by hns3.
2119  * It only checks the format. Don't guarantee the rule can be programmed into
2120  * the HW. Because there can be no enough room for the rule.
2121  */
2122 static int
2123 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2124 		   const struct rte_flow_item pattern[],
2125 		   const struct rte_flow_action actions[],
2126 		   struct rte_flow_error *error,
2127 		   struct hns3_filter_info *filter_info)
2128 {
2129 	union hns3_filter_conf *conf;
2130 	int ret;
2131 
2132 	ret = hns3_flow_args_check(attr, pattern, actions, error);
2133 	if (ret)
2134 		return ret;
2135 
2136 	hns3_parse_filter_type(pattern, actions, filter_info);
2137 	conf = &filter_info->conf;
2138 	if (filter_info->type == RTE_ETH_FILTER_HASH)
2139 		return hns3_parse_rss_filter(dev, pattern, actions,
2140 					     &conf->rss_conf, error);
2141 
2142 	return hns3_parse_fdir_filter(dev, pattern, actions,
2143 				      &conf->fdir_conf, error);
2144 }
2145 
2146 static int
2147 hns3_flow_rebuild_all_rss_filter(struct hns3_adapter *hns)
2148 {
2149 	struct hns3_hw *hw = &hns->hw;
2150 	int ret;
2151 
2152 	ret = hns3_config_rss(hns);
2153 	if (ret != 0) {
2154 		hns3_err(hw, "restore original RSS configuration failed, ret = %d.",
2155 			 ret);
2156 		return ret;
2157 	}
2158 	ret = hns3_reconfig_all_rss_filter(hw);
2159 	if (ret != 0)
2160 		hns3_err(hw, "rebuild all RSS filter failed, ret = %d.", ret);
2161 
2162 	return ret;
2163 }
2164 
2165 static int
2166 hns3_flow_create_rss_rule(struct rte_eth_dev *dev,
2167 			  struct hns3_flow_rss_conf *rss_conf,
2168 			  struct rte_flow *flow)
2169 {
2170 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2171 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2172 	struct hns3_rss_conf_ele *rss_filter_ptr;
2173 	struct hns3_flow_rss_conf *new_conf;
2174 	struct rte_flow_action_rss *rss_act;
2175 	int ret;
2176 
2177 	rss_filter_ptr = rte_zmalloc("hns3 rss filter",
2178 				     sizeof(struct hns3_rss_conf_ele), 0);
2179 	if (rss_filter_ptr == NULL) {
2180 		hns3_err(hw, "failed to allocate hns3_rss_filter memory");
2181 		return -ENOMEM;
2182 	}
2183 
2184 	new_conf = &rss_filter_ptr->filter_info;
2185 	memcpy(new_conf, rss_conf, sizeof(*new_conf));
2186 	rss_act = &new_conf->conf;
2187 	if (rss_act->queue_num > 0)
2188 		new_conf->conf.queue = new_conf->queue;
2189 	/*
2190 	 * There are two ways to deliver hash key action:
2191 	 * 1> 'key_len' is greater than zero and 'key' isn't NULL.
2192 	 * 2> 'key_len' is greater than zero, but 'key' is NULL.
2193 	 * For case 2, we need to keep 'key' of the new_conf is NULL so as to
2194 	 * inherit the configuration from user in case of failing to verify
2195 	 * duplicate rule later.
2196 	 */
2197 	if (rss_act->key_len > 0 && rss_act->key != NULL)
2198 		new_conf->conf.key = new_conf->key;
2199 
2200 	ret = hns3_config_rss_filter(hw, new_conf);
2201 	if (ret != 0) {
2202 		rte_free(rss_filter_ptr);
2203 		(void)hns3_flow_rebuild_all_rss_filter(hns);
2204 		return ret;
2205 	}
2206 
2207 	TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries);
2208 	flow->rule = rss_filter_ptr;
2209 	flow->filter_type = RTE_ETH_FILTER_HASH;
2210 
2211 	return 0;
2212 }
2213 
2214 static int
2215 hns3_flow_create_fdir_rule(struct rte_eth_dev *dev,
2216 			   struct hns3_fdir_rule *fdir_rule,
2217 			   struct rte_flow_error *error,
2218 			   struct rte_flow *flow)
2219 {
2220 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2221 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2222 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
2223 	bool indir;
2224 	int ret;
2225 
2226 	indir = !!(fdir_rule->flags & HNS3_RULE_FLAG_COUNTER_INDIR);
2227 	if (fdir_rule->flags & HNS3_RULE_FLAG_COUNTER) {
2228 		ret = hns3_counter_new(dev, indir, fdir_rule->act_cnt.id,
2229 				       error);
2230 		if (ret != 0)
2231 			return ret;
2232 
2233 		flow->counter_id = fdir_rule->act_cnt.id;
2234 	}
2235 
2236 	fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
2237 				    sizeof(struct hns3_fdir_rule_ele), 0);
2238 	if (fdir_rule_ptr == NULL) {
2239 		hns3_err(hw, "failed to allocate fdir_rule memory.");
2240 		ret = -ENOMEM;
2241 		goto err_malloc;
2242 	}
2243 
2244 	/*
2245 	 * After all the preceding tasks are successfully configured, configure
2246 	 * rules to the hardware to simplify the rollback of rules in the
2247 	 * hardware.
2248 	 */
2249 	ret = hns3_fdir_filter_program(hns, fdir_rule, false);
2250 	if (ret != 0)
2251 		goto err_fdir_filter;
2252 
2253 	memcpy(&fdir_rule_ptr->fdir_conf, fdir_rule,
2254 		sizeof(struct hns3_fdir_rule));
2255 	TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries);
2256 	flow->rule = fdir_rule_ptr;
2257 	flow->filter_type = RTE_ETH_FILTER_FDIR;
2258 
2259 	return 0;
2260 
2261 err_fdir_filter:
2262 	rte_free(fdir_rule_ptr);
2263 err_malloc:
2264 	if (fdir_rule->flags & HNS3_RULE_FLAG_COUNTER)
2265 		hns3_counter_release(dev, fdir_rule->act_cnt.id);
2266 
2267 	return ret;
2268 }
2269 
2270 /*
2271  * Create or destroy a flow rule.
2272  * Theorically one rule can match more than one filters.
2273  * We will let it use the filter which it hit first.
2274  * So, the sequence matters.
2275  */
2276 static struct rte_flow *
2277 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2278 		 const struct rte_flow_item pattern[],
2279 		 const struct rte_flow_action actions[],
2280 		 struct rte_flow_error *error)
2281 {
2282 	struct hns3_adapter *hns = dev->data->dev_private;
2283 	struct hns3_filter_info filter_info = {0};
2284 	struct hns3_flow_mem *flow_node;
2285 	struct hns3_hw *hw = &hns->hw;
2286 	union hns3_filter_conf *conf;
2287 	struct rte_flow *flow;
2288 	int ret;
2289 
2290 	ret = hns3_flow_validate(dev, attr, pattern, actions, error,
2291 				 &filter_info);
2292 	if (ret)
2293 		return NULL;
2294 
2295 	flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
2296 	if (flow == NULL) {
2297 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
2298 				   NULL, "Failed to allocate flow memory");
2299 		return NULL;
2300 	}
2301 	flow_node = rte_zmalloc("hns3 flow node",
2302 				sizeof(struct hns3_flow_mem), 0);
2303 	if (flow_node == NULL) {
2304 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
2305 				   NULL, "Failed to allocate flow list memory");
2306 		rte_free(flow);
2307 		return NULL;
2308 	}
2309 
2310 	flow_node->flow = flow;
2311 	conf = &filter_info.conf;
2312 	TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries);
2313 	if (filter_info.type == RTE_ETH_FILTER_HASH)
2314 		ret = hns3_flow_create_rss_rule(dev, &conf->rss_conf, flow);
2315 	else
2316 		ret = hns3_flow_create_fdir_rule(dev, &conf->fdir_conf,
2317 						 error, flow);
2318 	if (ret == 0)
2319 		return flow;
2320 
2321 	rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2322 			   "Failed to create flow");
2323 	TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
2324 	rte_free(flow_node);
2325 	rte_free(flow);
2326 
2327 	return NULL;
2328 }
2329 
2330 /* Destroy a flow rule on hns3. */
2331 static int
2332 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
2333 		  struct rte_flow_error *error)
2334 {
2335 	struct hns3_adapter *hns = dev->data->dev_private;
2336 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
2337 	struct hns3_rss_conf_ele *rss_filter_ptr;
2338 	struct hns3_flow_mem *flow_node;
2339 	enum rte_filter_type filter_type;
2340 	struct hns3_fdir_rule fdir_rule;
2341 	struct hns3_hw *hw = &hns->hw;
2342 	int ret;
2343 
2344 	if (flow == NULL)
2345 		return rte_flow_error_set(error, EINVAL,
2346 					  RTE_FLOW_ERROR_TYPE_HANDLE,
2347 					  flow, "Flow is NULL");
2348 
2349 	filter_type = flow->filter_type;
2350 	switch (filter_type) {
2351 	case RTE_ETH_FILTER_FDIR:
2352 		fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
2353 		memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
2354 			   sizeof(struct hns3_fdir_rule));
2355 
2356 		ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
2357 		if (ret)
2358 			return rte_flow_error_set(error, EIO,
2359 						  RTE_FLOW_ERROR_TYPE_HANDLE,
2360 						  flow,
2361 						  "Destroy FDIR fail.Try again");
2362 		if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
2363 			hns3_counter_release(dev, fdir_rule.act_cnt.id);
2364 		TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
2365 		rte_free(fdir_rule_ptr);
2366 		fdir_rule_ptr = NULL;
2367 		break;
2368 	case RTE_ETH_FILTER_HASH:
2369 		rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
2370 		TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
2371 		rte_free(rss_filter_ptr);
2372 		rss_filter_ptr = NULL;
2373 		(void)hns3_flow_rebuild_all_rss_filter(hns);
2374 		break;
2375 	default:
2376 		return rte_flow_error_set(error, EINVAL,
2377 					  RTE_FLOW_ERROR_TYPE_HANDLE, flow,
2378 					  "Unsupported filter type");
2379 	}
2380 
2381 	TAILQ_FOREACH(flow_node, &hw->flow_list, entries) {
2382 		if (flow_node->flow == flow) {
2383 			TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
2384 			rte_free(flow_node);
2385 			flow_node = NULL;
2386 			break;
2387 		}
2388 	}
2389 	rte_free(flow);
2390 
2391 	return 0;
2392 }
2393 
2394 /*  Destroy all flow rules associated with a port on hns3. */
2395 static int
2396 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2397 {
2398 	struct hns3_adapter *hns = dev->data->dev_private;
2399 	int ret;
2400 
2401 	/* FDIR is available only in PF driver */
2402 	if (!hns->is_vf) {
2403 		ret = hns3_clear_all_fdir_filter(hns);
2404 		if (ret) {
2405 			rte_flow_error_set(error, ret,
2406 					   RTE_FLOW_ERROR_TYPE_HANDLE,
2407 					   NULL, "Failed to flush rule");
2408 			return ret;
2409 		}
2410 		hns3_counter_flush(dev);
2411 	}
2412 
2413 	ret = hns3_clear_rss_filter(dev);
2414 	if (ret) {
2415 		rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
2416 				   NULL, "Failed to flush rss filter");
2417 		return ret;
2418 	}
2419 
2420 	hns3_filterlist_flush(dev);
2421 
2422 	return 0;
2423 }
2424 
2425 /* Query an existing flow rule. */
2426 static int
2427 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
2428 		const struct rte_flow_action *actions, void *data,
2429 		struct rte_flow_error *error)
2430 {
2431 	struct rte_flow_action_rss *rss_conf;
2432 	struct hns3_rss_conf_ele *rss_rule;
2433 	struct rte_flow_query_count *qc;
2434 	int ret;
2435 
2436 	if (!flow->rule)
2437 		return rte_flow_error_set(error, EINVAL,
2438 			RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule");
2439 
2440 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2441 		switch (actions->type) {
2442 		case RTE_FLOW_ACTION_TYPE_VOID:
2443 			break;
2444 		case RTE_FLOW_ACTION_TYPE_COUNT:
2445 			qc = (struct rte_flow_query_count *)data;
2446 			ret = hns3_counter_query(dev, flow, qc, error);
2447 			if (ret)
2448 				return ret;
2449 			break;
2450 		case RTE_FLOW_ACTION_TYPE_RSS:
2451 			if (flow->filter_type != RTE_ETH_FILTER_HASH) {
2452 				return rte_flow_error_set(error, ENOTSUP,
2453 					RTE_FLOW_ERROR_TYPE_ACTION,
2454 					actions, "action is not supported");
2455 			}
2456 			rss_conf = (struct rte_flow_action_rss *)data;
2457 			rss_rule = (struct hns3_rss_conf_ele *)flow->rule;
2458 			rte_memcpy(rss_conf, &rss_rule->filter_info.conf,
2459 				   sizeof(struct rte_flow_action_rss));
2460 			break;
2461 		default:
2462 			return rte_flow_error_set(error, ENOTSUP,
2463 				RTE_FLOW_ERROR_TYPE_ACTION,
2464 				actions, "action is not supported");
2465 		}
2466 	}
2467 
2468 	return 0;
2469 }
2470 
2471 static int
2472 hns3_flow_validate_wrap(struct rte_eth_dev *dev,
2473 			const struct rte_flow_attr *attr,
2474 			const struct rte_flow_item pattern[],
2475 			const struct rte_flow_action actions[],
2476 			struct rte_flow_error *error)
2477 {
2478 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2479 	struct hns3_filter_info filter_info = {0};
2480 	int ret;
2481 
2482 	pthread_mutex_lock(&hw->flows_lock);
2483 	ret = hns3_flow_validate(dev, attr, pattern, actions, error,
2484 				 &filter_info);
2485 	pthread_mutex_unlock(&hw->flows_lock);
2486 
2487 	return ret;
2488 }
2489 
2490 static struct rte_flow *
2491 hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2492 		      const struct rte_flow_item pattern[],
2493 		      const struct rte_flow_action actions[],
2494 		      struct rte_flow_error *error)
2495 {
2496 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2497 	struct rte_flow *flow;
2498 
2499 	pthread_mutex_lock(&hw->flows_lock);
2500 	flow = hns3_flow_create(dev, attr, pattern, actions, error);
2501 	pthread_mutex_unlock(&hw->flows_lock);
2502 
2503 	return flow;
2504 }
2505 
2506 static int
2507 hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2508 		       struct rte_flow_error *error)
2509 {
2510 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2511 	int ret;
2512 
2513 	pthread_mutex_lock(&hw->flows_lock);
2514 	ret = hns3_flow_destroy(dev, flow, error);
2515 	pthread_mutex_unlock(&hw->flows_lock);
2516 
2517 	return ret;
2518 }
2519 
2520 static int
2521 hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
2522 {
2523 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2524 	int ret;
2525 
2526 	pthread_mutex_lock(&hw->flows_lock);
2527 	ret = hns3_flow_flush(dev, error);
2528 	pthread_mutex_unlock(&hw->flows_lock);
2529 
2530 	return ret;
2531 }
2532 
2533 static int
2534 hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2535 		     const struct rte_flow_action *actions, void *data,
2536 		     struct rte_flow_error *error)
2537 {
2538 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2539 	int ret;
2540 
2541 	pthread_mutex_lock(&hw->flows_lock);
2542 	ret = hns3_flow_query(dev, flow, actions, data, error);
2543 	pthread_mutex_unlock(&hw->flows_lock);
2544 
2545 	return ret;
2546 }
2547 
2548 static int
2549 hns3_check_indir_action(const struct rte_flow_indir_action_conf *conf,
2550 			const struct rte_flow_action *action,
2551 			struct rte_flow_error *error)
2552 {
2553 	if (!conf->ingress)
2554 		return rte_flow_error_set(error, EINVAL,
2555 				RTE_FLOW_ERROR_TYPE_ACTION,
2556 				NULL, "Indir action ingress can't be zero");
2557 
2558 	if (conf->egress)
2559 		return rte_flow_error_set(error, EINVAL,
2560 				RTE_FLOW_ERROR_TYPE_ACTION,
2561 				NULL, "Indir action not support egress");
2562 
2563 	if (conf->transfer)
2564 		return rte_flow_error_set(error, EINVAL,
2565 				RTE_FLOW_ERROR_TYPE_ACTION,
2566 				NULL, "Indir action not support transfer");
2567 
2568 	if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
2569 		return rte_flow_error_set(error, EINVAL,
2570 				RTE_FLOW_ERROR_TYPE_ACTION,
2571 				NULL, "Indir action only support count");
2572 
2573 	return 0;
2574 }
2575 
2576 static struct rte_flow_action_handle *
2577 hns3_flow_action_create(struct rte_eth_dev *dev,
2578 			const struct rte_flow_indir_action_conf *conf,
2579 			const struct rte_flow_action *action,
2580 			struct rte_flow_error *error)
2581 {
2582 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2583 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2584 	const struct rte_flow_action_count *act_count;
2585 	struct rte_flow_action_handle *handle = NULL;
2586 	struct hns3_flow_counter *counter;
2587 
2588 	if (hns3_check_indir_action(conf, action, error))
2589 		return NULL;
2590 
2591 	handle = rte_zmalloc("hns3 action handle",
2592 			     sizeof(struct rte_flow_action_handle), 0);
2593 	if (handle == NULL) {
2594 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
2595 				   NULL, "Failed to allocate action memory");
2596 		return NULL;
2597 	}
2598 
2599 	pthread_mutex_lock(&hw->flows_lock);
2600 
2601 	act_count = (const struct rte_flow_action_count *)action->conf;
2602 	if (act_count->id >= pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1]) {
2603 		rte_flow_error_set(error, EINVAL,
2604 				   RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2605 				   action, "Invalid counter id");
2606 		goto err_exit;
2607 	}
2608 
2609 	if (hns3_counter_new(dev, false, act_count->id, error))
2610 		goto err_exit;
2611 
2612 	counter = hns3_counter_lookup(dev, act_count->id);
2613 	if (counter == NULL) {
2614 		rte_flow_error_set(error, EINVAL,
2615 				   RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2616 				   action, "Counter id not found");
2617 		goto err_exit;
2618 	}
2619 
2620 	counter->indirect = true;
2621 	handle->indirect_type = HNS3_INDIRECT_ACTION_TYPE_COUNT;
2622 	handle->counter_id = counter->id;
2623 
2624 	pthread_mutex_unlock(&hw->flows_lock);
2625 	return handle;
2626 
2627 err_exit:
2628 	pthread_mutex_unlock(&hw->flows_lock);
2629 	rte_free(handle);
2630 	return NULL;
2631 }
2632 
2633 static int
2634 hns3_flow_action_destroy(struct rte_eth_dev *dev,
2635 			 struct rte_flow_action_handle *handle,
2636 			 struct rte_flow_error *error)
2637 {
2638 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2639 	struct hns3_flow_counter *counter;
2640 
2641 	pthread_mutex_lock(&hw->flows_lock);
2642 
2643 	if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
2644 		pthread_mutex_unlock(&hw->flows_lock);
2645 		return rte_flow_error_set(error, EINVAL,
2646 					RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2647 					handle, "Invalid indirect type");
2648 	}
2649 
2650 	counter = hns3_counter_lookup(dev, handle->counter_id);
2651 	if (counter == NULL) {
2652 		pthread_mutex_unlock(&hw->flows_lock);
2653 		return rte_flow_error_set(error, EINVAL,
2654 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2655 				handle, "Counter id not exist");
2656 	}
2657 
2658 	if (counter->ref_cnt > 1) {
2659 		pthread_mutex_unlock(&hw->flows_lock);
2660 		return rte_flow_error_set(error, EBUSY,
2661 				RTE_FLOW_ERROR_TYPE_HANDLE,
2662 				handle, "Counter id in use");
2663 	}
2664 
2665 	(void)hns3_counter_release(dev, handle->counter_id);
2666 	rte_free(handle);
2667 
2668 	pthread_mutex_unlock(&hw->flows_lock);
2669 	return 0;
2670 }
2671 
2672 static int
2673 hns3_flow_action_query(struct rte_eth_dev *dev,
2674 		 const struct rte_flow_action_handle *handle,
2675 		 void *data,
2676 		 struct rte_flow_error *error)
2677 {
2678 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2679 	struct rte_flow flow;
2680 	int ret;
2681 
2682 	pthread_mutex_lock(&hw->flows_lock);
2683 
2684 	if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
2685 		pthread_mutex_unlock(&hw->flows_lock);
2686 		return rte_flow_error_set(error, EINVAL,
2687 					RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2688 					handle, "Invalid indirect type");
2689 	}
2690 
2691 	memset(&flow, 0, sizeof(flow));
2692 	flow.counter_id = handle->counter_id;
2693 	ret = hns3_counter_query(dev, &flow,
2694 				 (struct rte_flow_query_count *)data, error);
2695 	pthread_mutex_unlock(&hw->flows_lock);
2696 	return ret;
2697 }
2698 
2699 static const struct rte_flow_ops hns3_flow_ops = {
2700 	.validate = hns3_flow_validate_wrap,
2701 	.create = hns3_flow_create_wrap,
2702 	.destroy = hns3_flow_destroy_wrap,
2703 	.flush = hns3_flow_flush_wrap,
2704 	.query = hns3_flow_query_wrap,
2705 	.isolate = NULL,
2706 	.action_handle_create = hns3_flow_action_create,
2707 	.action_handle_destroy = hns3_flow_action_destroy,
2708 	.action_handle_query = hns3_flow_action_query,
2709 };
2710 
2711 int
2712 hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
2713 		      const struct rte_flow_ops **ops)
2714 {
2715 	struct hns3_hw *hw;
2716 
2717 	hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2718 	if (hw->adapter_state >= HNS3_NIC_CLOSED)
2719 		return -ENODEV;
2720 
2721 	*ops = &hns3_flow_ops;
2722 	return 0;
2723 }
2724 
2725 void
2726 hns3_flow_init(struct rte_eth_dev *dev)
2727 {
2728 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2729 	pthread_mutexattr_t attr;
2730 
2731 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2732 		return;
2733 
2734 	pthread_mutexattr_init(&attr);
2735 	pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
2736 	pthread_mutex_init(&hw->flows_lock, &attr);
2737 	dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
2738 
2739 	TAILQ_INIT(&hw->flow_fdir_list);
2740 	TAILQ_INIT(&hw->flow_rss_list);
2741 	TAILQ_INIT(&hw->flow_list);
2742 }
2743 
2744 void
2745 hns3_flow_uninit(struct rte_eth_dev *dev)
2746 {
2747 	struct rte_flow_error error;
2748 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2749 		hns3_flow_flush_wrap(dev, &error);
2750 }
2751