xref: /dpdk/drivers/net/hns3/hns3_flow.c (revision 42a8fc7daa46256d150278fc9a7a846e27945a0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4 
5 #include <rte_flow_driver.h>
6 #include <rte_io.h>
7 #include <rte_malloc.h>
8 
9 #include "hns3_ethdev.h"
10 #include "hns3_logs.h"
11 #include "hns3_flow.h"
12 
13 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
14 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
15 
16 /* Special Filter id for non-specific packet flagging. Don't change value */
17 #define HNS3_MAX_FILTER_ID	0x0FFF
18 
19 #define ETHER_TYPE_MASK		0xFFFF
20 #define IPPROTO_MASK		0xFF
21 #define TUNNEL_TYPE_MASK	0xFFFF
22 
23 #define HNS3_TUNNEL_TYPE_VXLAN		0x12B5
24 #define HNS3_TUNNEL_TYPE_VXLAN_GPE	0x12B6
25 #define HNS3_TUNNEL_TYPE_GENEVE		0x17C1
26 #define HNS3_TUNNEL_TYPE_NVGRE		0x6558
27 
28 static enum rte_flow_item_type first_items[] = {
29 	RTE_FLOW_ITEM_TYPE_ETH,
30 	RTE_FLOW_ITEM_TYPE_IPV4,
31 	RTE_FLOW_ITEM_TYPE_IPV6,
32 	RTE_FLOW_ITEM_TYPE_TCP,
33 	RTE_FLOW_ITEM_TYPE_UDP,
34 	RTE_FLOW_ITEM_TYPE_SCTP,
35 	RTE_FLOW_ITEM_TYPE_ICMP,
36 	RTE_FLOW_ITEM_TYPE_NVGRE,
37 	RTE_FLOW_ITEM_TYPE_VXLAN,
38 	RTE_FLOW_ITEM_TYPE_GENEVE,
39 	RTE_FLOW_ITEM_TYPE_VXLAN_GPE
40 };
41 
42 static enum rte_flow_item_type L2_next_items[] = {
43 	RTE_FLOW_ITEM_TYPE_VLAN,
44 	RTE_FLOW_ITEM_TYPE_IPV4,
45 	RTE_FLOW_ITEM_TYPE_IPV6
46 };
47 
48 static enum rte_flow_item_type L3_next_items[] = {
49 	RTE_FLOW_ITEM_TYPE_TCP,
50 	RTE_FLOW_ITEM_TYPE_UDP,
51 	RTE_FLOW_ITEM_TYPE_SCTP,
52 	RTE_FLOW_ITEM_TYPE_NVGRE,
53 	RTE_FLOW_ITEM_TYPE_ICMP
54 };
55 
56 static enum rte_flow_item_type L4_next_items[] = {
57 	RTE_FLOW_ITEM_TYPE_VXLAN,
58 	RTE_FLOW_ITEM_TYPE_GENEVE,
59 	RTE_FLOW_ITEM_TYPE_VXLAN_GPE
60 };
61 
62 static enum rte_flow_item_type tunnel_next_items[] = {
63 	RTE_FLOW_ITEM_TYPE_ETH,
64 	RTE_FLOW_ITEM_TYPE_VLAN
65 };
66 
67 struct items_step_mngr {
68 	enum rte_flow_item_type *items;
69 	int count;
70 };
71 
72 static inline void
73 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
74 {
75 	size_t i;
76 
77 	for (i = 0; i < len; i++)
78 		dst[i] = rte_be_to_cpu_32(src[i]);
79 }
80 
81 /*
82  * This function is used to find rss general action.
83  * 1. As we know RSS is used to spread packets among several queues, the flow
84  *    API provide the struct rte_flow_action_rss, user could config its field
85  *    sush as: func/level/types/key/queue to control RSS function.
86  * 2. The flow API also supports queue region configuration for hns3. It was
87  *    implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
88  *    which action is RSS queues region.
89  * 3. When action is RSS, we use the following rule to distinguish:
90  *    Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue
91  *            region configuration.
92  *    Case other: an rss general action.
93  */
94 static const struct rte_flow_action *
95 hns3_find_rss_general_action(const struct rte_flow_item pattern[],
96 			     const struct rte_flow_action actions[])
97 {
98 	const struct rte_flow_action *act = NULL;
99 	const struct hns3_rss_conf *rss;
100 	bool have_eth = false;
101 
102 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
103 		if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
104 			act = actions;
105 			break;
106 		}
107 	}
108 	if (!act)
109 		return NULL;
110 
111 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
112 		if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
113 			have_eth = true;
114 			break;
115 		}
116 	}
117 
118 	rss = act->conf;
119 	if (have_eth && rss->conf.queue_num) {
120 		/*
121 		 * Pattern have ETH and action's queue_num > 0, indicate this is
122 		 * queue region configuration.
123 		 * Because queue region is implemented by FDIR + RSS in hns3
124 		 * hardware, it needs to enter FDIR process, so here return NULL
125 		 * to avoid enter RSS process.
126 		 */
127 		return NULL;
128 	}
129 
130 	return act;
131 }
132 
133 static inline struct hns3_flow_counter *
134 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
135 {
136 	struct hns3_adapter *hns = dev->data->dev_private;
137 	struct hns3_pf *pf = &hns->pf;
138 	struct hns3_flow_counter *cnt;
139 
140 	LIST_FOREACH(cnt, &pf->flow_counters, next) {
141 		if (cnt->id == id)
142 			return cnt;
143 	}
144 	return NULL;
145 }
146 
147 static int
148 hns3_counter_new(struct rte_eth_dev *dev, uint32_t indirect, uint32_t id,
149 		 struct rte_flow_error *error)
150 {
151 	struct hns3_adapter *hns = dev->data->dev_private;
152 	struct hns3_pf *pf = &hns->pf;
153 	struct hns3_hw *hw = &hns->hw;
154 	struct hns3_flow_counter *cnt;
155 	uint64_t value;
156 	int ret;
157 
158 	cnt = hns3_counter_lookup(dev, id);
159 	if (cnt) {
160 		if (!cnt->indirect || cnt->indirect != indirect)
161 			return rte_flow_error_set(error, ENOTSUP,
162 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
163 				cnt,
164 				"Counter id is used, indirect flag not match");
165 		/* Clear the indirect counter on first use. */
166 		if (cnt->indirect && cnt->ref_cnt == 1)
167 			(void)hns3_fd_get_count(hw, id, &value);
168 		cnt->ref_cnt++;
169 		return 0;
170 	}
171 
172 	/* Clear the counter by read ops because the counter is read-clear */
173 	ret = hns3_fd_get_count(hw, id, &value);
174 	if (ret)
175 		return rte_flow_error_set(error, EIO,
176 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
177 					  "Clear counter failed!");
178 
179 	cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
180 	if (cnt == NULL)
181 		return rte_flow_error_set(error, ENOMEM,
182 					  RTE_FLOW_ERROR_TYPE_HANDLE, cnt,
183 					  "Alloc mem for counter failed");
184 	cnt->id = id;
185 	cnt->indirect = indirect;
186 	cnt->ref_cnt = 1;
187 	cnt->hits = 0;
188 	LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
189 	return 0;
190 }
191 
192 static int
193 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
194 		   struct rte_flow_query_count *qc,
195 		   struct rte_flow_error *error)
196 {
197 	struct hns3_adapter *hns = dev->data->dev_private;
198 	struct hns3_flow_counter *cnt;
199 	uint64_t value;
200 	int ret;
201 
202 	/* FDIR is available only in PF driver */
203 	if (hns->is_vf)
204 		return rte_flow_error_set(error, ENOTSUP,
205 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
206 					  "Fdir is not supported in VF");
207 	cnt = hns3_counter_lookup(dev, flow->counter_id);
208 	if (cnt == NULL)
209 		return rte_flow_error_set(error, EINVAL,
210 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
211 					  "Can't find counter id");
212 
213 	ret = hns3_fd_get_count(&hns->hw, flow->counter_id, &value);
214 	if (ret) {
215 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
216 				   NULL, "Read counter fail.");
217 		return ret;
218 	}
219 	qc->hits_set = 1;
220 	qc->hits = value;
221 	qc->bytes_set = 0;
222 	qc->bytes = 0;
223 
224 	return 0;
225 }
226 
227 static int
228 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
229 {
230 	struct hns3_adapter *hns = dev->data->dev_private;
231 	struct hns3_hw *hw = &hns->hw;
232 	struct hns3_flow_counter *cnt;
233 
234 	cnt = hns3_counter_lookup(dev, id);
235 	if (cnt == NULL) {
236 		hns3_err(hw, "Can't find available counter to release");
237 		return -EINVAL;
238 	}
239 	cnt->ref_cnt--;
240 	if (cnt->ref_cnt == 0) {
241 		LIST_REMOVE(cnt, next);
242 		rte_free(cnt);
243 	}
244 	return 0;
245 }
246 
247 static void
248 hns3_counter_flush(struct rte_eth_dev *dev)
249 {
250 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
251 	LIST_HEAD(counters, hns3_flow_counter) indir_counters;
252 	struct hns3_flow_counter *cnt_ptr;
253 
254 	LIST_INIT(&indir_counters);
255 	cnt_ptr = LIST_FIRST(&pf->flow_counters);
256 	while (cnt_ptr) {
257 		LIST_REMOVE(cnt_ptr, next);
258 		if (cnt_ptr->indirect)
259 			LIST_INSERT_HEAD(&indir_counters, cnt_ptr, next);
260 		else
261 			rte_free(cnt_ptr);
262 		cnt_ptr = LIST_FIRST(&pf->flow_counters);
263 	}
264 
265 	/* Reset the indirect action and add to pf->flow_counters list. */
266 	cnt_ptr = LIST_FIRST(&indir_counters);
267 	while (cnt_ptr) {
268 		LIST_REMOVE(cnt_ptr, next);
269 		cnt_ptr->ref_cnt = 1;
270 		cnt_ptr->hits = 0;
271 		LIST_INSERT_HEAD(&pf->flow_counters, cnt_ptr, next);
272 		cnt_ptr = LIST_FIRST(&indir_counters);
273 	}
274 }
275 
276 static int
277 hns3_handle_action_queue(struct rte_eth_dev *dev,
278 			 const struct rte_flow_action *action,
279 			 struct hns3_fdir_rule *rule,
280 			 struct rte_flow_error *error)
281 {
282 	struct hns3_adapter *hns = dev->data->dev_private;
283 	const struct rte_flow_action_queue *queue;
284 	struct hns3_hw *hw = &hns->hw;
285 
286 	queue = (const struct rte_flow_action_queue *)action->conf;
287 	if (queue->index >= hw->data->nb_rx_queues) {
288 		hns3_err(hw, "queue ID(%u) is greater than number of available queue (%u) in driver.",
289 			 queue->index, hw->data->nb_rx_queues);
290 		return rte_flow_error_set(error, EINVAL,
291 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
292 					  action, "Invalid queue ID in PF");
293 	}
294 
295 	rule->queue_id = queue->index;
296 	rule->nb_queues = 1;
297 	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
298 	return 0;
299 }
300 
301 static int
302 hns3_handle_action_queue_region(struct rte_eth_dev *dev,
303 				const struct rte_flow_action *action,
304 				struct hns3_fdir_rule *rule,
305 				struct rte_flow_error *error)
306 {
307 	struct hns3_adapter *hns = dev->data->dev_private;
308 	const struct rte_flow_action_rss *conf = action->conf;
309 	struct hns3_hw *hw = &hns->hw;
310 	uint16_t idx;
311 
312 	if (!hns3_dev_get_support(hw, FD_QUEUE_REGION))
313 		return rte_flow_error_set(error, ENOTSUP,
314 			RTE_FLOW_ERROR_TYPE_ACTION, action,
315 			"Not support config queue region!");
316 
317 	if ((!rte_is_power_of_2(conf->queue_num)) ||
318 		conf->queue_num > hw->rss_size_max ||
319 		conf->queue[0] >= hw->data->nb_rx_queues ||
320 		conf->queue[0] + conf->queue_num > hw->data->nb_rx_queues) {
321 		return rte_flow_error_set(error, EINVAL,
322 			RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
323 			"Invalid start queue ID and queue num! the start queue "
324 			"ID must valid, the queue num must be power of 2 and "
325 			"<= rss_size_max.");
326 	}
327 
328 	for (idx = 1; idx < conf->queue_num; idx++) {
329 		if (conf->queue[idx] != conf->queue[idx - 1] + 1)
330 			return rte_flow_error_set(error, EINVAL,
331 				RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
332 				"Invalid queue ID sequence! the queue ID "
333 				"must be continuous increment.");
334 	}
335 
336 	rule->queue_id = conf->queue[0];
337 	rule->nb_queues = conf->queue_num;
338 	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
339 	return 0;
340 }
341 
342 static int
343 hns3_handle_action_indirect(struct rte_eth_dev *dev,
344 			    const struct rte_flow_action *action,
345 			    struct hns3_fdir_rule *rule,
346 			    struct rte_flow_error *error)
347 {
348 	const struct rte_flow_action_handle *indir = action->conf;
349 
350 	if (indir->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT)
351 		return rte_flow_error_set(error, EINVAL,
352 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
353 				action, "Invalid indirect type");
354 
355 	if (hns3_counter_lookup(dev, indir->counter_id) == NULL)
356 		return rte_flow_error_set(error, EINVAL,
357 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
358 				action, "Counter id not exist");
359 
360 	rule->act_cnt.id = indir->counter_id;
361 	rule->flags |= (HNS3_RULE_FLAG_COUNTER | HNS3_RULE_FLAG_COUNTER_INDIR);
362 
363 	return 0;
364 }
365 
366 /*
367  * Parse actions structure from the provided pattern.
368  * The pattern is validated as the items are copied.
369  *
370  * @param actions[in]
371  * @param rule[out]
372  *   NIC specific actions derived from the actions.
373  * @param error[out]
374  */
375 static int
376 hns3_handle_actions(struct rte_eth_dev *dev,
377 		    const struct rte_flow_action actions[],
378 		    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
379 {
380 	struct hns3_adapter *hns = dev->data->dev_private;
381 	const struct rte_flow_action_count *act_count;
382 	const struct rte_flow_action_mark *mark;
383 	struct hns3_pf *pf = &hns->pf;
384 	uint32_t counter_num;
385 	int ret;
386 
387 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
388 		switch (actions->type) {
389 		case RTE_FLOW_ACTION_TYPE_QUEUE:
390 			ret = hns3_handle_action_queue(dev, actions, rule,
391 						       error);
392 			if (ret)
393 				return ret;
394 			break;
395 		case RTE_FLOW_ACTION_TYPE_DROP:
396 			rule->action = HNS3_FD_ACTION_DROP_PACKET;
397 			break;
398 		/*
399 		 * Here RSS's real action is queue region.
400 		 * Queue region is implemented by FDIR + RSS in hns3 hardware,
401 		 * the FDIR's action is one queue region (start_queue_id and
402 		 * queue_num), then RSS spread packets to the queue region by
403 		 * RSS algorithm.
404 		 */
405 		case RTE_FLOW_ACTION_TYPE_RSS:
406 			ret = hns3_handle_action_queue_region(dev, actions,
407 							      rule, error);
408 			if (ret)
409 				return ret;
410 			break;
411 		case RTE_FLOW_ACTION_TYPE_MARK:
412 			mark =
413 			    (const struct rte_flow_action_mark *)actions->conf;
414 			if (mark->id >= HNS3_MAX_FILTER_ID)
415 				return rte_flow_error_set(error, EINVAL,
416 						RTE_FLOW_ERROR_TYPE_ACTION_CONF,
417 						actions,
418 						"Invalid Mark ID");
419 			rule->fd_id = mark->id;
420 			rule->flags |= HNS3_RULE_FLAG_FDID;
421 			break;
422 		case RTE_FLOW_ACTION_TYPE_FLAG:
423 			rule->fd_id = HNS3_MAX_FILTER_ID;
424 			rule->flags |= HNS3_RULE_FLAG_FDID;
425 			break;
426 		case RTE_FLOW_ACTION_TYPE_COUNT:
427 			act_count =
428 			    (const struct rte_flow_action_count *)actions->conf;
429 			counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
430 			if (act_count->id >= counter_num)
431 				return rte_flow_error_set(error, EINVAL,
432 						RTE_FLOW_ERROR_TYPE_ACTION_CONF,
433 						actions,
434 						"Invalid counter id");
435 			rule->act_cnt = *act_count;
436 			rule->flags |= HNS3_RULE_FLAG_COUNTER;
437 			rule->flags &= ~HNS3_RULE_FLAG_COUNTER_INDIR;
438 			break;
439 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
440 			ret = hns3_handle_action_indirect(dev, actions, rule,
441 							  error);
442 			if (ret)
443 				return ret;
444 			break;
445 		case RTE_FLOW_ACTION_TYPE_VOID:
446 			break;
447 		default:
448 			return rte_flow_error_set(error, ENOTSUP,
449 						  RTE_FLOW_ERROR_TYPE_ACTION,
450 						  NULL, "Unsupported action");
451 		}
452 	}
453 
454 	return 0;
455 }
456 
457 static int
458 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
459 {
460 	if (!attr->ingress)
461 		return rte_flow_error_set(error, EINVAL,
462 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
463 					  attr, "Ingress can't be zero");
464 	if (attr->egress)
465 		return rte_flow_error_set(error, ENOTSUP,
466 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
467 					  attr, "Not support egress");
468 	if (attr->transfer)
469 		return rte_flow_error_set(error, ENOTSUP,
470 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
471 					  attr, "No support for transfer");
472 	if (attr->priority)
473 		return rte_flow_error_set(error, ENOTSUP,
474 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
475 					  attr, "Not support priority");
476 	if (attr->group)
477 		return rte_flow_error_set(error, ENOTSUP,
478 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
479 					  attr, "Not support group");
480 	return 0;
481 }
482 
483 static int
484 hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
485 	       struct rte_flow_error *error __rte_unused)
486 {
487 	const struct rte_flow_item_eth *eth_spec;
488 	const struct rte_flow_item_eth *eth_mask;
489 
490 	/* Only used to describe the protocol stack. */
491 	if (item->spec == NULL && item->mask == NULL)
492 		return 0;
493 
494 	if (item->mask) {
495 		eth_mask = item->mask;
496 		if (eth_mask->type) {
497 			hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
498 			rule->key_conf.mask.ether_type =
499 			    rte_be_to_cpu_16(eth_mask->type);
500 		}
501 		if (!rte_is_zero_ether_addr(&eth_mask->src)) {
502 			hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
503 			memcpy(rule->key_conf.mask.src_mac,
504 			       eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
505 		}
506 		if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
507 			hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
508 			memcpy(rule->key_conf.mask.dst_mac,
509 			       eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
510 		}
511 	}
512 
513 	eth_spec = item->spec;
514 	rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
515 	memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
516 	       RTE_ETHER_ADDR_LEN);
517 	memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
518 	       RTE_ETHER_ADDR_LEN);
519 	return 0;
520 }
521 
522 static int
523 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
524 		struct rte_flow_error *error)
525 {
526 	const struct rte_flow_item_vlan *vlan_spec;
527 	const struct rte_flow_item_vlan *vlan_mask;
528 
529 	rule->key_conf.vlan_num++;
530 	if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
531 		return rte_flow_error_set(error, EINVAL,
532 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
533 					  "Vlan_num is more than 2");
534 
535 	/* Only used to describe the protocol stack. */
536 	if (item->spec == NULL && item->mask == NULL)
537 		return 0;
538 
539 	if (item->mask) {
540 		vlan_mask = item->mask;
541 		if (vlan_mask->tci) {
542 			if (rule->key_conf.vlan_num == 1) {
543 				hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
544 					     1);
545 				rule->key_conf.mask.vlan_tag1 =
546 				    rte_be_to_cpu_16(vlan_mask->tci);
547 			} else {
548 				hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
549 					     1);
550 				rule->key_conf.mask.vlan_tag2 =
551 				    rte_be_to_cpu_16(vlan_mask->tci);
552 			}
553 		}
554 	}
555 
556 	vlan_spec = item->spec;
557 	if (rule->key_conf.vlan_num == 1)
558 		rule->key_conf.spec.vlan_tag1 =
559 		    rte_be_to_cpu_16(vlan_spec->tci);
560 	else
561 		rule->key_conf.spec.vlan_tag2 =
562 		    rte_be_to_cpu_16(vlan_spec->tci);
563 	return 0;
564 }
565 
566 static bool
567 hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask)
568 {
569 	if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id ||
570 	    ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live ||
571 	    ipv4_mask->hdr.hdr_checksum)
572 		return false;
573 
574 	return true;
575 }
576 
577 static int
578 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
579 		struct rte_flow_error *error)
580 {
581 	const struct rte_flow_item_ipv4 *ipv4_spec;
582 	const struct rte_flow_item_ipv4 *ipv4_mask;
583 
584 	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
585 	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
586 	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
587 
588 	/* Only used to describe the protocol stack. */
589 	if (item->spec == NULL && item->mask == NULL)
590 		return 0;
591 
592 	if (item->mask) {
593 		ipv4_mask = item->mask;
594 		if (!hns3_check_ipv4_mask_supported(ipv4_mask)) {
595 			return rte_flow_error_set(error, EINVAL,
596 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
597 						  item,
598 						  "Only support src & dst ip,tos,proto in IPV4");
599 		}
600 
601 		if (ipv4_mask->hdr.src_addr) {
602 			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
603 			rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
604 			    rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
605 		}
606 
607 		if (ipv4_mask->hdr.dst_addr) {
608 			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
609 			rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
610 			    rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
611 		}
612 
613 		if (ipv4_mask->hdr.type_of_service) {
614 			hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
615 			rule->key_conf.mask.ip_tos =
616 			    ipv4_mask->hdr.type_of_service;
617 		}
618 
619 		if (ipv4_mask->hdr.next_proto_id) {
620 			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
621 			rule->key_conf.mask.ip_proto =
622 			    ipv4_mask->hdr.next_proto_id;
623 		}
624 	}
625 
626 	ipv4_spec = item->spec;
627 	rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
628 	    rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
629 	rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
630 	    rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
631 	rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
632 	rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
633 	return 0;
634 }
635 
636 static int
637 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
638 		struct rte_flow_error *error)
639 {
640 	const struct rte_flow_item_ipv6 *ipv6_spec;
641 	const struct rte_flow_item_ipv6 *ipv6_mask;
642 
643 	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
644 	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
645 	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
646 
647 	/* Only used to describe the protocol stack. */
648 	if (item->spec == NULL && item->mask == NULL)
649 		return 0;
650 
651 	if (item->mask) {
652 		ipv6_mask = item->mask;
653 		if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
654 		    ipv6_mask->hdr.hop_limits) {
655 			return rte_flow_error_set(error, EINVAL,
656 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
657 						  item,
658 						  "Only support src & dst ip,proto in IPV6");
659 		}
660 		net_addr_to_host(rule->key_conf.mask.src_ip,
661 				 (const rte_be32_t *)ipv6_mask->hdr.src_addr,
662 				 IP_ADDR_LEN);
663 		net_addr_to_host(rule->key_conf.mask.dst_ip,
664 				 (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
665 				 IP_ADDR_LEN);
666 		rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
667 		if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
668 			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
669 		if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
670 			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
671 		if (ipv6_mask->hdr.proto)
672 			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
673 	}
674 
675 	ipv6_spec = item->spec;
676 	net_addr_to_host(rule->key_conf.spec.src_ip,
677 			 (const rte_be32_t *)ipv6_spec->hdr.src_addr,
678 			 IP_ADDR_LEN);
679 	net_addr_to_host(rule->key_conf.spec.dst_ip,
680 			 (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
681 			 IP_ADDR_LEN);
682 	rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
683 
684 	return 0;
685 }
686 
687 static bool
688 hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask)
689 {
690 	if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
691 	    tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
692 	    tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
693 	    tcp_mask->hdr.tcp_urp)
694 		return false;
695 
696 	return true;
697 }
698 
699 static int
700 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
701 	       struct rte_flow_error *error)
702 {
703 	const struct rte_flow_item_tcp *tcp_spec;
704 	const struct rte_flow_item_tcp *tcp_mask;
705 
706 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
707 	rule->key_conf.spec.ip_proto = IPPROTO_TCP;
708 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
709 
710 	/* Only used to describe the protocol stack. */
711 	if (item->spec == NULL && item->mask == NULL)
712 		return 0;
713 
714 	if (item->mask) {
715 		tcp_mask = item->mask;
716 		if (!hns3_check_tcp_mask_supported(tcp_mask)) {
717 			return rte_flow_error_set(error, EINVAL,
718 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
719 						  item,
720 						  "Only support src & dst port in TCP");
721 		}
722 
723 		if (tcp_mask->hdr.src_port) {
724 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
725 			rule->key_conf.mask.src_port =
726 			    rte_be_to_cpu_16(tcp_mask->hdr.src_port);
727 		}
728 		if (tcp_mask->hdr.dst_port) {
729 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
730 			rule->key_conf.mask.dst_port =
731 			    rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
732 		}
733 	}
734 
735 	tcp_spec = item->spec;
736 	rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
737 	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
738 
739 	return 0;
740 }
741 
742 static int
743 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
744 	       struct rte_flow_error *error)
745 {
746 	const struct rte_flow_item_udp *udp_spec;
747 	const struct rte_flow_item_udp *udp_mask;
748 
749 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
750 	rule->key_conf.spec.ip_proto = IPPROTO_UDP;
751 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
752 
753 	/* Only used to describe the protocol stack. */
754 	if (item->spec == NULL && item->mask == NULL)
755 		return 0;
756 
757 	if (item->mask) {
758 		udp_mask = item->mask;
759 		if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
760 			return rte_flow_error_set(error, EINVAL,
761 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
762 						  item,
763 						  "Only support src & dst port in UDP");
764 		}
765 		if (udp_mask->hdr.src_port) {
766 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
767 			rule->key_conf.mask.src_port =
768 			    rte_be_to_cpu_16(udp_mask->hdr.src_port);
769 		}
770 		if (udp_mask->hdr.dst_port) {
771 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
772 			rule->key_conf.mask.dst_port =
773 			    rte_be_to_cpu_16(udp_mask->hdr.dst_port);
774 		}
775 	}
776 
777 	udp_spec = item->spec;
778 	rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
779 	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
780 
781 	return 0;
782 }
783 
784 static int
785 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
786 		struct rte_flow_error *error)
787 {
788 	const struct rte_flow_item_sctp *sctp_spec;
789 	const struct rte_flow_item_sctp *sctp_mask;
790 
791 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
792 	rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
793 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
794 
795 	/* Only used to describe the protocol stack. */
796 	if (item->spec == NULL && item->mask == NULL)
797 		return 0;
798 
799 	if (item->mask) {
800 		sctp_mask = item->mask;
801 		if (sctp_mask->hdr.cksum)
802 			return rte_flow_error_set(error, EINVAL,
803 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
804 						  item,
805 						  "Only support src & dst port in SCTP");
806 		if (sctp_mask->hdr.src_port) {
807 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
808 			rule->key_conf.mask.src_port =
809 			    rte_be_to_cpu_16(sctp_mask->hdr.src_port);
810 		}
811 		if (sctp_mask->hdr.dst_port) {
812 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
813 			rule->key_conf.mask.dst_port =
814 			    rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
815 		}
816 		if (sctp_mask->hdr.tag) {
817 			hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
818 			rule->key_conf.mask.sctp_tag =
819 			    rte_be_to_cpu_32(sctp_mask->hdr.tag);
820 		}
821 	}
822 
823 	sctp_spec = item->spec;
824 	rule->key_conf.spec.src_port =
825 	    rte_be_to_cpu_16(sctp_spec->hdr.src_port);
826 	rule->key_conf.spec.dst_port =
827 	    rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
828 	rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
829 
830 	return 0;
831 }
832 
833 /*
834  * Check items before tunnel, save inner configs to outer configs, and clear
835  * inner configs.
836  * The key consists of two parts: meta_data and tuple keys.
837  * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
838  * packet(1bit).
839  * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
840  * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
841  * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
842  * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
843  * Vlantag2(16bit) and sctp-tag(32bit).
844  */
845 static int
846 hns3_handle_tunnel(const struct rte_flow_item *item,
847 		   struct hns3_fdir_rule *rule, struct rte_flow_error *error)
848 {
849 	/* check eth config */
850 	if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
851 		return rte_flow_error_set(error, EINVAL,
852 					  RTE_FLOW_ERROR_TYPE_ITEM,
853 					  item, "Outer eth mac is unsupported");
854 	if (rule->input_set & BIT(INNER_ETH_TYPE)) {
855 		hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
856 		rule->key_conf.spec.outer_ether_type =
857 		    rule->key_conf.spec.ether_type;
858 		rule->key_conf.mask.outer_ether_type =
859 		    rule->key_conf.mask.ether_type;
860 		hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
861 		rule->key_conf.spec.ether_type = 0;
862 		rule->key_conf.mask.ether_type = 0;
863 	}
864 
865 	/* check vlan config */
866 	if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
867 		return rte_flow_error_set(error, EINVAL,
868 					  RTE_FLOW_ERROR_TYPE_ITEM,
869 					  item,
870 					  "Outer vlan tags is unsupported");
871 
872 	/* clear vlan_num for inner vlan select */
873 	rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
874 	rule->key_conf.vlan_num = 0;
875 
876 	/* check L3 config */
877 	if (rule->input_set &
878 	    (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
879 		return rte_flow_error_set(error, EINVAL,
880 					  RTE_FLOW_ERROR_TYPE_ITEM,
881 					  item, "Outer ip is unsupported");
882 	if (rule->input_set & BIT(INNER_IP_PROTO)) {
883 		hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
884 		rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
885 		rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
886 		hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
887 		rule->key_conf.spec.ip_proto = 0;
888 		rule->key_conf.mask.ip_proto = 0;
889 	}
890 
891 	/* check L4 config */
892 	if (rule->input_set & BIT(INNER_SCTP_TAG))
893 		return rte_flow_error_set(error, EINVAL,
894 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
895 					  "Outer sctp tag is unsupported");
896 
897 	if (rule->input_set & BIT(INNER_SRC_PORT)) {
898 		hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
899 		rule->key_conf.spec.outer_src_port =
900 		    rule->key_conf.spec.src_port;
901 		rule->key_conf.mask.outer_src_port =
902 		    rule->key_conf.mask.src_port;
903 		hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
904 		rule->key_conf.spec.src_port = 0;
905 		rule->key_conf.mask.src_port = 0;
906 	}
907 	if (rule->input_set & BIT(INNER_DST_PORT)) {
908 		hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
909 		rule->key_conf.spec.dst_port = 0;
910 		rule->key_conf.mask.dst_port = 0;
911 	}
912 	return 0;
913 }
914 
915 static int
916 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
917 		 struct rte_flow_error *error)
918 {
919 	const struct rte_flow_item_vxlan *vxlan_spec;
920 	const struct rte_flow_item_vxlan *vxlan_mask;
921 
922 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
923 	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
924 	if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
925 		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
926 	else
927 		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
928 
929 	/* Only used to describe the protocol stack. */
930 	if (item->spec == NULL && item->mask == NULL)
931 		return 0;
932 
933 	vxlan_mask = item->mask;
934 	vxlan_spec = item->spec;
935 
936 	if (vxlan_mask->flags)
937 		return rte_flow_error_set(error, EINVAL,
938 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
939 					  "Flags is not supported in VxLAN");
940 
941 	/* VNI must be totally masked or not. */
942 	if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
943 	    memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
944 		return rte_flow_error_set(error, EINVAL,
945 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
946 					  "VNI must be totally masked or not in VxLAN");
947 	if (vxlan_mask->vni[0]) {
948 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
949 		memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni,
950 			   VNI_OR_TNI_LEN);
951 	}
952 	memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni,
953 		   VNI_OR_TNI_LEN);
954 	return 0;
955 }
956 
957 static int
958 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
959 		 struct rte_flow_error *error)
960 {
961 	const struct rte_flow_item_nvgre *nvgre_spec;
962 	const struct rte_flow_item_nvgre *nvgre_mask;
963 
964 	hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
965 	rule->key_conf.spec.outer_proto = IPPROTO_GRE;
966 	rule->key_conf.mask.outer_proto = IPPROTO_MASK;
967 
968 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
969 	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
970 	rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
971 	/* Only used to describe the protocol stack. */
972 	if (item->spec == NULL && item->mask == NULL)
973 		return 0;
974 
975 	nvgre_mask = item->mask;
976 	nvgre_spec = item->spec;
977 
978 	if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
979 		return rte_flow_error_set(error, EINVAL,
980 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
981 					  "Ver/protocol is not supported in NVGRE");
982 
983 	/* TNI must be totally masked or not. */
984 	if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
985 	    memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
986 		return rte_flow_error_set(error, EINVAL,
987 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
988 					  "TNI must be totally masked or not in NVGRE");
989 
990 	if (nvgre_mask->tni[0]) {
991 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
992 		memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
993 			   VNI_OR_TNI_LEN);
994 	}
995 	memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
996 		   VNI_OR_TNI_LEN);
997 
998 	if (nvgre_mask->flow_id) {
999 		hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
1000 		rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
1001 	}
1002 	rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
1003 	return 0;
1004 }
1005 
1006 static int
1007 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1008 		  struct rte_flow_error *error)
1009 {
1010 	const struct rte_flow_item_geneve *geneve_spec;
1011 	const struct rte_flow_item_geneve *geneve_mask;
1012 
1013 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
1014 	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
1015 	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
1016 	/* Only used to describe the protocol stack. */
1017 	if (item->spec == NULL && item->mask == NULL)
1018 		return 0;
1019 
1020 	geneve_mask = item->mask;
1021 	geneve_spec = item->spec;
1022 
1023 	if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
1024 		return rte_flow_error_set(error, EINVAL,
1025 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1026 					  "Ver/protocol is not supported in GENEVE");
1027 	/* VNI must be totally masked or not. */
1028 	if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
1029 	    memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
1030 		return rte_flow_error_set(error, EINVAL,
1031 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1032 					  "VNI must be totally masked or not in GENEVE");
1033 	if (geneve_mask->vni[0]) {
1034 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1035 		memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
1036 			   VNI_OR_TNI_LEN);
1037 	}
1038 	memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
1039 		   VNI_OR_TNI_LEN);
1040 	return 0;
1041 }
1042 
1043 static int
1044 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1045 		  struct rte_flow_error *error)
1046 {
1047 	int ret;
1048 
1049 	if (item->spec == NULL && item->mask)
1050 		return rte_flow_error_set(error, EINVAL,
1051 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1052 					  "Can't configure FDIR with mask "
1053 					  "but without spec");
1054 	else if (item->spec && (item->mask == NULL))
1055 		return rte_flow_error_set(error, EINVAL,
1056 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1057 					  "Tunnel packets must configure "
1058 					  "with mask");
1059 
1060 	switch (item->type) {
1061 	case RTE_FLOW_ITEM_TYPE_VXLAN:
1062 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1063 		ret = hns3_parse_vxlan(item, rule, error);
1064 		break;
1065 	case RTE_FLOW_ITEM_TYPE_NVGRE:
1066 		ret = hns3_parse_nvgre(item, rule, error);
1067 		break;
1068 	case RTE_FLOW_ITEM_TYPE_GENEVE:
1069 		ret = hns3_parse_geneve(item, rule, error);
1070 		break;
1071 	default:
1072 		return rte_flow_error_set(error, ENOTSUP,
1073 					  RTE_FLOW_ERROR_TYPE_ITEM,
1074 					  NULL, "Unsupported tunnel type!");
1075 	}
1076 	if (ret)
1077 		return ret;
1078 	return hns3_handle_tunnel(item, rule, error);
1079 }
1080 
1081 static int
1082 hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1083 		  struct items_step_mngr *step_mngr,
1084 		  struct rte_flow_error *error)
1085 {
1086 	int ret;
1087 
1088 	if (item->spec == NULL && item->mask)
1089 		return rte_flow_error_set(error, EINVAL,
1090 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1091 					  "Can't configure FDIR with mask "
1092 					  "but without spec");
1093 
1094 	switch (item->type) {
1095 	case RTE_FLOW_ITEM_TYPE_ETH:
1096 		ret = hns3_parse_eth(item, rule, error);
1097 		step_mngr->items = L2_next_items;
1098 		step_mngr->count = RTE_DIM(L2_next_items);
1099 		break;
1100 	case RTE_FLOW_ITEM_TYPE_VLAN:
1101 		ret = hns3_parse_vlan(item, rule, error);
1102 		step_mngr->items = L2_next_items;
1103 		step_mngr->count = RTE_DIM(L2_next_items);
1104 		break;
1105 	case RTE_FLOW_ITEM_TYPE_IPV4:
1106 		ret = hns3_parse_ipv4(item, rule, error);
1107 		step_mngr->items = L3_next_items;
1108 		step_mngr->count = RTE_DIM(L3_next_items);
1109 		break;
1110 	case RTE_FLOW_ITEM_TYPE_IPV6:
1111 		ret = hns3_parse_ipv6(item, rule, error);
1112 		step_mngr->items = L3_next_items;
1113 		step_mngr->count = RTE_DIM(L3_next_items);
1114 		break;
1115 	case RTE_FLOW_ITEM_TYPE_TCP:
1116 		ret = hns3_parse_tcp(item, rule, error);
1117 		step_mngr->items = L4_next_items;
1118 		step_mngr->count = RTE_DIM(L4_next_items);
1119 		break;
1120 	case RTE_FLOW_ITEM_TYPE_UDP:
1121 		ret = hns3_parse_udp(item, rule, error);
1122 		step_mngr->items = L4_next_items;
1123 		step_mngr->count = RTE_DIM(L4_next_items);
1124 		break;
1125 	case RTE_FLOW_ITEM_TYPE_SCTP:
1126 		ret = hns3_parse_sctp(item, rule, error);
1127 		step_mngr->items = L4_next_items;
1128 		step_mngr->count = RTE_DIM(L4_next_items);
1129 		break;
1130 	default:
1131 		return rte_flow_error_set(error, ENOTSUP,
1132 					  RTE_FLOW_ERROR_TYPE_ITEM,
1133 					  NULL, "Unsupported normal type!");
1134 	}
1135 
1136 	return ret;
1137 }
1138 
1139 static int
1140 hns3_validate_item(const struct rte_flow_item *item,
1141 		   struct items_step_mngr step_mngr,
1142 		   struct rte_flow_error *error)
1143 {
1144 	int i;
1145 
1146 	if (item->last)
1147 		return rte_flow_error_set(error, ENOTSUP,
1148 					  RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
1149 					  "Not supported last point for range");
1150 
1151 	for (i = 0; i < step_mngr.count; i++) {
1152 		if (item->type == step_mngr.items[i])
1153 			break;
1154 	}
1155 
1156 	if (i == step_mngr.count) {
1157 		return rte_flow_error_set(error, EINVAL,
1158 					  RTE_FLOW_ERROR_TYPE_ITEM,
1159 					  item, "Inval or missing item");
1160 	}
1161 	return 0;
1162 }
1163 
1164 static inline bool
1165 is_tunnel_packet(enum rte_flow_item_type type)
1166 {
1167 	if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1168 	    type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1169 	    type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1170 	    type == RTE_FLOW_ITEM_TYPE_GENEVE)
1171 		return true;
1172 	return false;
1173 }
1174 
1175 /*
1176  * Parse the flow director rule.
1177  * The supported PATTERN:
1178  *   case: non-tunnel packet:
1179  *     ETH : src-mac, dst-mac, ethertype
1180  *     VLAN: tag1, tag2
1181  *     IPv4: src-ip, dst-ip, tos, proto
1182  *     IPv6: src-ip(last 32 bit addr), dst-ip(last 32 bit addr), proto
1183  *     UDP : src-port, dst-port
1184  *     TCP : src-port, dst-port
1185  *     SCTP: src-port, dst-port, tag
1186  *   case: tunnel packet:
1187  *     OUTER-ETH: ethertype
1188  *     OUTER-L3 : proto
1189  *     OUTER-L4 : src-port, dst-port
1190  *     TUNNEL   : vni, flow-id(only valid when NVGRE)
1191  *     INNER-ETH/VLAN/IPv4/IPv6/UDP/TCP/SCTP: same as non-tunnel packet
1192  * The supported ACTION:
1193  *    QUEUE
1194  *    DROP
1195  *    COUNT
1196  *    MARK: the id range [0, 4094]
1197  *    FLAG
1198  *    RSS: only valid if firmware support FD_QUEUE_REGION.
1199  */
1200 static int
1201 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1202 		       const struct rte_flow_item pattern[],
1203 		       const struct rte_flow_action actions[],
1204 		       struct hns3_fdir_rule *rule,
1205 		       struct rte_flow_error *error)
1206 {
1207 	struct hns3_adapter *hns = dev->data->dev_private;
1208 	const struct rte_flow_item *item;
1209 	struct items_step_mngr step_mngr;
1210 	int ret;
1211 
1212 	/* FDIR is available only in PF driver */
1213 	if (hns->is_vf)
1214 		return rte_flow_error_set(error, ENOTSUP,
1215 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1216 					  "Fdir not supported in VF");
1217 
1218 	step_mngr.items = first_items;
1219 	step_mngr.count = RTE_DIM(first_items);
1220 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1221 		if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1222 			continue;
1223 
1224 		ret = hns3_validate_item(item, step_mngr, error);
1225 		if (ret)
1226 			return ret;
1227 
1228 		if (is_tunnel_packet(item->type)) {
1229 			ret = hns3_parse_tunnel(item, rule, error);
1230 			if (ret)
1231 				return ret;
1232 			step_mngr.items = tunnel_next_items;
1233 			step_mngr.count = RTE_DIM(tunnel_next_items);
1234 		} else {
1235 			ret = hns3_parse_normal(item, rule, &step_mngr, error);
1236 			if (ret)
1237 				return ret;
1238 		}
1239 	}
1240 
1241 	return hns3_handle_actions(dev, actions, rule, error);
1242 }
1243 
1244 static void
1245 hns3_filterlist_flush(struct rte_eth_dev *dev)
1246 {
1247 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1248 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1249 	struct hns3_rss_conf_ele *rss_filter_ptr;
1250 	struct hns3_flow_mem *flow_node;
1251 
1252 	fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1253 	while (fdir_rule_ptr) {
1254 		TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1255 		rte_free(fdir_rule_ptr);
1256 		fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1257 	}
1258 
1259 	rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1260 	while (rss_filter_ptr) {
1261 		TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1262 		rte_free(rss_filter_ptr);
1263 		rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1264 	}
1265 
1266 	flow_node = TAILQ_FIRST(&hw->flow_list);
1267 	while (flow_node) {
1268 		TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1269 		rte_free(flow_node->flow);
1270 		rte_free(flow_node);
1271 		flow_node = TAILQ_FIRST(&hw->flow_list);
1272 	}
1273 }
1274 
1275 static bool
1276 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1277 		     const struct rte_flow_action_rss *with)
1278 {
1279 	bool rss_key_is_same;
1280 	bool func_is_same;
1281 
1282 	/*
1283 	 * When user flush all RSS rule, RSS func is set invalid with
1284 	 * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after
1285 	 * flushed, any validate RSS func is different with it before
1286 	 * flushed. Others, when user create an action RSS with RSS func
1287 	 * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same
1288 	 * between continuous RSS flow.
1289 	 */
1290 	if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
1291 		func_is_same = false;
1292 	else
1293 		func_is_same = (with->func != RTE_ETH_HASH_FUNCTION_DEFAULT) ?
1294 				(comp->func == with->func) : true;
1295 
1296 	if (with->key_len == 0 || with->key == NULL)
1297 		rss_key_is_same = 1;
1298 	else
1299 		rss_key_is_same = comp->key_len == with->key_len &&
1300 		!memcmp(comp->key, with->key, with->key_len);
1301 
1302 	return (func_is_same && rss_key_is_same &&
1303 		comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
1304 		comp->level == with->level &&
1305 		comp->queue_num == with->queue_num &&
1306 		!memcmp(comp->queue, with->queue,
1307 			sizeof(*with->queue) * with->queue_num));
1308 }
1309 
1310 static int
1311 hns3_rss_conf_copy(struct hns3_rss_conf *out,
1312 		   const struct rte_flow_action_rss *in)
1313 {
1314 	if (in->key_len > RTE_DIM(out->key) ||
1315 	    in->queue_num > RTE_DIM(out->queue))
1316 		return -EINVAL;
1317 	if (in->key == NULL && in->key_len)
1318 		return -EINVAL;
1319 	out->conf = (struct rte_flow_action_rss) {
1320 		.func = in->func,
1321 		.level = in->level,
1322 		.types = in->types,
1323 		.key_len = in->key_len,
1324 		.queue_num = in->queue_num,
1325 	};
1326 	out->conf.queue = memcpy(out->queue, in->queue,
1327 				sizeof(*in->queue) * in->queue_num);
1328 	if (in->key)
1329 		out->conf.key = memcpy(out->key, in->key, in->key_len);
1330 
1331 	return 0;
1332 }
1333 
1334 static bool
1335 hns3_rss_input_tuple_supported(struct hns3_hw *hw,
1336 			       const struct rte_flow_action_rss *rss)
1337 {
1338 	/*
1339 	 * For IP packet, it is not supported to use src/dst port fields to RSS
1340 	 * hash for the following packet types.
1341 	 * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG
1342 	 * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst
1343 	 * port fields to RSS hash for IPV6 SCTP packet type. However, the
1344 	 * Kunpeng930 and future kunpeng series support to use src/dst port
1345 	 * fields to RSS hash for IPv6 SCTP packet type.
1346 	 */
1347 	if (rss->types & (RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L4_SRC_ONLY) &&
1348 	    (rss->types & RTE_ETH_RSS_IP ||
1349 	    (!hw->rss_info.ipv6_sctp_offload_supported &&
1350 	    rss->types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
1351 		return false;
1352 
1353 	return true;
1354 }
1355 
1356 /*
1357  * This function is used to parse rss action validation.
1358  */
1359 static int
1360 hns3_parse_rss_filter(struct rte_eth_dev *dev,
1361 		      const struct rte_flow_action *actions,
1362 		      struct rte_flow_error *error)
1363 {
1364 	struct hns3_adapter *hns = dev->data->dev_private;
1365 	struct hns3_hw *hw = &hns->hw;
1366 	struct hns3_rss_conf *rss_conf = &hw->rss_info;
1367 	const struct rte_flow_action_rss *rss;
1368 	const struct rte_flow_action *act;
1369 	uint32_t act_index = 0;
1370 	uint16_t n;
1371 
1372 	NEXT_ITEM_OF_ACTION(act, actions, act_index);
1373 	rss = act->conf;
1374 
1375 	if (rss == NULL) {
1376 		return rte_flow_error_set(error, EINVAL,
1377 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1378 					  act, "no valid queues");
1379 	}
1380 
1381 	if (rss->queue_num > RTE_DIM(rss_conf->queue))
1382 		return rte_flow_error_set(error, ENOTSUP,
1383 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1384 					  "queue number configured exceeds "
1385 					  "queue buffer size driver supported");
1386 
1387 	for (n = 0; n < rss->queue_num; n++) {
1388 		if (rss->queue[n] < hw->alloc_rss_size)
1389 			continue;
1390 		return rte_flow_error_set(error, EINVAL,
1391 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1392 					  "queue id must be less than queue number allocated to a TC");
1393 	}
1394 
1395 	if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
1396 		return rte_flow_error_set(error, EINVAL,
1397 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1398 					  act,
1399 					  "Flow types is unsupported by "
1400 					  "hns3's RSS");
1401 	if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX)
1402 		return rte_flow_error_set(error, ENOTSUP,
1403 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1404 					  "RSS hash func are not supported");
1405 	if (rss->level)
1406 		return rte_flow_error_set(error, ENOTSUP,
1407 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1408 					  "a nonzero RSS encapsulation level is not supported");
1409 	if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1410 		return rte_flow_error_set(error, ENOTSUP,
1411 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1412 					  "RSS hash key must be exactly 40 bytes");
1413 
1414 	if (!hns3_rss_input_tuple_supported(hw, rss))
1415 		return rte_flow_error_set(error, EINVAL,
1416 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1417 					  &rss->types,
1418 					  "input RSS types are not supported");
1419 
1420 	act_index++;
1421 
1422 	/* Check if the next not void action is END */
1423 	NEXT_ITEM_OF_ACTION(act, actions, act_index);
1424 	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1425 		memset(rss_conf, 0, sizeof(struct hns3_rss_conf));
1426 		return rte_flow_error_set(error, EINVAL,
1427 					  RTE_FLOW_ERROR_TYPE_ACTION,
1428 					  act, "Not supported action.");
1429 	}
1430 
1431 	return 0;
1432 }
1433 
1434 static int
1435 hns3_disable_rss(struct hns3_hw *hw)
1436 {
1437 	int ret;
1438 
1439 	ret = hns3_set_rss_tuple_by_rss_hf(hw, 0);
1440 	if (ret)
1441 		return ret;
1442 
1443 	return 0;
1444 }
1445 
1446 static void
1447 hns3_adjust_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf)
1448 {
1449 	if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) {
1450 		hns3_warn(hw, "Default RSS hash key to be set");
1451 		rss_conf->key = hns3_hash_key;
1452 		rss_conf->key_len = HNS3_RSS_KEY_SIZE;
1453 	}
1454 }
1455 
1456 static int
1457 hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
1458 			 uint8_t *hash_algo)
1459 {
1460 	enum rte_eth_hash_function algo_func = *func;
1461 	switch (algo_func) {
1462 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1463 		/* Keep *hash_algo as what it used to be */
1464 		algo_func = hw->rss_info.conf.func;
1465 		break;
1466 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1467 		*hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
1468 		break;
1469 	case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1470 		*hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
1471 		break;
1472 	case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1473 		*hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
1474 		break;
1475 	default:
1476 		hns3_err(hw, "Invalid RSS algorithm configuration(%d)",
1477 			 algo_func);
1478 		return -EINVAL;
1479 	}
1480 	*func = algo_func;
1481 
1482 	return 0;
1483 }
1484 
1485 static int
1486 hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
1487 {
1488 	int ret;
1489 
1490 	hns3_adjust_rss_key(hw, rss_config);
1491 
1492 	ret = hns3_parse_rss_algorithm(hw, &rss_config->func,
1493 				       &hw->rss_info.hash_algo);
1494 	if (ret)
1495 		return ret;
1496 
1497 	ret = hns3_rss_set_algo_key(hw, rss_config->key);
1498 	if (ret)
1499 		return ret;
1500 
1501 	hw->rss_info.conf.func = rss_config->func;
1502 
1503 	ret = hns3_set_rss_tuple_by_rss_hf(hw, rss_config->types);
1504 	if (ret)
1505 		hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
1506 
1507 	return ret;
1508 }
1509 
1510 static int
1511 hns3_update_indir_table(struct rte_eth_dev *dev,
1512 			const struct rte_flow_action_rss *conf, uint16_t num)
1513 {
1514 	struct hns3_adapter *hns = dev->data->dev_private;
1515 	struct hns3_hw *hw = &hns->hw;
1516 	uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
1517 	uint16_t j;
1518 	uint32_t i;
1519 
1520 	/* Fill in redirection table */
1521 	memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
1522 	       sizeof(hw->rss_info.rss_indirection_tbl));
1523 	for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) {
1524 		j %= num;
1525 		if (conf->queue[j] >= hw->alloc_rss_size) {
1526 			hns3_err(hw, "queue id(%u) set to redirection table "
1527 				 "exceeds queue number(%u) allocated to a TC.",
1528 				 conf->queue[j], hw->alloc_rss_size);
1529 			return -EINVAL;
1530 		}
1531 		indir_tbl[i] = conf->queue[j];
1532 	}
1533 
1534 	return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size);
1535 }
1536 
1537 static int
1538 hns3_config_rss_filter(struct rte_eth_dev *dev,
1539 		       const struct hns3_rss_conf *conf, bool add)
1540 {
1541 	struct hns3_adapter *hns = dev->data->dev_private;
1542 	struct hns3_rss_conf_ele *rss_filter_ptr;
1543 	struct hns3_hw *hw = &hns->hw;
1544 	struct hns3_rss_conf *rss_info;
1545 	uint64_t flow_types;
1546 	uint16_t num;
1547 	int ret;
1548 
1549 	struct rte_flow_action_rss rss_flow_conf = {
1550 		.func = conf->conf.func,
1551 		.level = conf->conf.level,
1552 		.types = conf->conf.types,
1553 		.key_len = conf->conf.key_len,
1554 		.queue_num = conf->conf.queue_num,
1555 		.key = conf->conf.key_len ?
1556 		    (void *)(uintptr_t)conf->conf.key : NULL,
1557 		.queue = conf->conf.queue,
1558 	};
1559 
1560 	/* Filter the unsupported flow types */
1561 	flow_types = conf->conf.types ?
1562 		     rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT :
1563 		     hw->rss_info.conf.types;
1564 	if (flow_types != rss_flow_conf.types)
1565 		hns3_warn(hw, "modified RSS types based on hardware support, "
1566 			      "requested:0x%" PRIx64 " configured:0x%" PRIx64,
1567 			  rss_flow_conf.types, flow_types);
1568 	/* Update the useful flow types */
1569 	rss_flow_conf.types = flow_types;
1570 
1571 	rss_info = &hw->rss_info;
1572 	if (!add) {
1573 		if (!conf->valid)
1574 			return 0;
1575 
1576 		ret = hns3_disable_rss(hw);
1577 		if (ret) {
1578 			hns3_err(hw, "RSS disable failed(%d)", ret);
1579 			return ret;
1580 		}
1581 
1582 		if (rss_flow_conf.queue_num) {
1583 			/*
1584 			 * Due the content of queue pointer have been reset to
1585 			 * 0, the rss_info->conf.queue should be set to NULL
1586 			 */
1587 			rss_info->conf.queue = NULL;
1588 			rss_info->conf.queue_num = 0;
1589 		}
1590 
1591 		/* set RSS func invalid after flushed */
1592 		rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX;
1593 		return 0;
1594 	}
1595 
1596 	/* Set rx queues to use */
1597 	num = RTE_MIN(dev->data->nb_rx_queues, rss_flow_conf.queue_num);
1598 	if (rss_flow_conf.queue_num > num)
1599 		hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
1600 			  rss_flow_conf.queue_num);
1601 	hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
1602 
1603 	rte_spinlock_lock(&hw->lock);
1604 	if (num) {
1605 		ret = hns3_update_indir_table(dev, &rss_flow_conf, num);
1606 		if (ret)
1607 			goto rss_config_err;
1608 	}
1609 
1610 	/* Set hash algorithm and flow types by the user's config */
1611 	ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf);
1612 	if (ret)
1613 		goto rss_config_err;
1614 
1615 	ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf);
1616 	if (ret) {
1617 		hns3_err(hw, "RSS config init fail(%d)", ret);
1618 		goto rss_config_err;
1619 	}
1620 
1621 	/*
1622 	 * When create a new RSS rule, the old rule will be overlaid and set
1623 	 * invalid.
1624 	 */
1625 	TAILQ_FOREACH(rss_filter_ptr, &hw->flow_rss_list, entries)
1626 		rss_filter_ptr->filter_info.valid = false;
1627 
1628 rss_config_err:
1629 	rte_spinlock_unlock(&hw->lock);
1630 
1631 	return ret;
1632 }
1633 
1634 static int
1635 hns3_clear_rss_filter(struct rte_eth_dev *dev)
1636 {
1637 	struct hns3_adapter *hns = dev->data->dev_private;
1638 	struct hns3_rss_conf_ele *rss_filter_ptr;
1639 	struct hns3_hw *hw = &hns->hw;
1640 	int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */
1641 	int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */
1642 	int ret = 0;
1643 
1644 	rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1645 	while (rss_filter_ptr) {
1646 		TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1647 		ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1648 					     false);
1649 		if (ret)
1650 			rss_rule_fail_cnt++;
1651 		else
1652 			rss_rule_succ_cnt++;
1653 		rte_free(rss_filter_ptr);
1654 		rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1655 	}
1656 
1657 	if (rss_rule_fail_cnt) {
1658 		hns3_err(hw, "fail to delete all RSS filters, success num = %d fail num = %d",
1659 			 rss_rule_succ_cnt, rss_rule_fail_cnt);
1660 		ret = -EIO;
1661 	}
1662 
1663 	return ret;
1664 }
1665 
1666 int
1667 hns3_restore_rss_filter(struct rte_eth_dev *dev)
1668 {
1669 	struct hns3_adapter *hns = dev->data->dev_private;
1670 	struct hns3_hw *hw = &hns->hw;
1671 
1672 	/* When user flush all rules, it doesn't need to restore RSS rule */
1673 	if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX)
1674 		return 0;
1675 
1676 	return hns3_config_rss_filter(dev, &hw->rss_info, true);
1677 }
1678 
1679 static int
1680 hns3_flow_parse_rss(struct rte_eth_dev *dev,
1681 		    const struct hns3_rss_conf *conf, bool add)
1682 {
1683 	struct hns3_adapter *hns = dev->data->dev_private;
1684 	struct hns3_hw *hw = &hns->hw;
1685 	bool ret;
1686 
1687 	ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf);
1688 	if (ret) {
1689 		hns3_err(hw, "Enter duplicate RSS configuration : %d", ret);
1690 		return -EINVAL;
1691 	}
1692 
1693 	return hns3_config_rss_filter(dev, conf, add);
1694 }
1695 
1696 static int
1697 hns3_flow_args_check(const struct rte_flow_attr *attr,
1698 		     const struct rte_flow_item pattern[],
1699 		     const struct rte_flow_action actions[],
1700 		     struct rte_flow_error *error)
1701 {
1702 	if (pattern == NULL)
1703 		return rte_flow_error_set(error, EINVAL,
1704 					  RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1705 					  NULL, "NULL pattern.");
1706 
1707 	if (actions == NULL)
1708 		return rte_flow_error_set(error, EINVAL,
1709 					  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1710 					  NULL, "NULL action.");
1711 
1712 	if (attr == NULL)
1713 		return rte_flow_error_set(error, EINVAL,
1714 					  RTE_FLOW_ERROR_TYPE_ATTR,
1715 					  NULL, "NULL attribute.");
1716 
1717 	return hns3_check_attr(attr, error);
1718 }
1719 
1720 /*
1721  * Check if the flow rule is supported by hns3.
1722  * It only checks the format. Don't guarantee the rule can be programmed into
1723  * the HW. Because there can be no enough room for the rule.
1724  */
1725 static int
1726 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1727 		   const struct rte_flow_item pattern[],
1728 		   const struct rte_flow_action actions[],
1729 		   struct rte_flow_error *error)
1730 {
1731 	struct hns3_fdir_rule fdir_rule;
1732 	int ret;
1733 
1734 	ret = hns3_flow_args_check(attr, pattern, actions, error);
1735 	if (ret)
1736 		return ret;
1737 
1738 	if (hns3_find_rss_general_action(pattern, actions))
1739 		return hns3_parse_rss_filter(dev, actions, error);
1740 
1741 	memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1742 	return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1743 }
1744 
1745 static int
1746 hns3_flow_create_rss_rule(struct rte_eth_dev *dev,
1747 			  const struct rte_flow_action *act,
1748 			  struct rte_flow *flow)
1749 {
1750 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1751 	struct hns3_rss_conf_ele *rss_filter_ptr;
1752 	const struct hns3_rss_conf *rss_conf;
1753 	int ret;
1754 
1755 	rss_filter_ptr = rte_zmalloc("hns3 rss filter",
1756 				     sizeof(struct hns3_rss_conf_ele), 0);
1757 	if (rss_filter_ptr == NULL) {
1758 		hns3_err(hw, "failed to allocate hns3_rss_filter memory");
1759 		return -ENOMEM;
1760 	}
1761 
1762 	/*
1763 	 * After all the preceding tasks are successfully configured, configure
1764 	 * rules to the hardware to simplify the rollback of rules in the
1765 	 * hardware.
1766 	 */
1767 	rss_conf = (const struct hns3_rss_conf *)act->conf;
1768 	ret = hns3_flow_parse_rss(dev, rss_conf, true);
1769 	if (ret != 0) {
1770 		rte_free(rss_filter_ptr);
1771 		return ret;
1772 	}
1773 
1774 	hns3_rss_conf_copy(&rss_filter_ptr->filter_info, &rss_conf->conf);
1775 	rss_filter_ptr->filter_info.valid = true;
1776 	TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries);
1777 	flow->rule = rss_filter_ptr;
1778 	flow->filter_type = RTE_ETH_FILTER_HASH;
1779 
1780 	return 0;
1781 }
1782 
1783 static int
1784 hns3_flow_create_fdir_rule(struct rte_eth_dev *dev,
1785 			   const struct rte_flow_item pattern[],
1786 			   const struct rte_flow_action actions[],
1787 			   struct rte_flow_error *error,
1788 			   struct rte_flow *flow)
1789 {
1790 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1791 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1792 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1793 	struct hns3_fdir_rule fdir_rule;
1794 	bool indir;
1795 	int ret;
1796 
1797 	memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1798 	ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1799 	if (ret != 0)
1800 		return ret;
1801 
1802 	indir = !!(fdir_rule.flags & HNS3_RULE_FLAG_COUNTER_INDIR);
1803 	if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
1804 		ret = hns3_counter_new(dev, indir, fdir_rule.act_cnt.id,
1805 				       error);
1806 		if (ret != 0)
1807 			return ret;
1808 
1809 		flow->counter_id = fdir_rule.act_cnt.id;
1810 	}
1811 
1812 	fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
1813 				    sizeof(struct hns3_fdir_rule_ele), 0);
1814 	if (fdir_rule_ptr == NULL) {
1815 		hns3_err(hw, "failed to allocate fdir_rule memory.");
1816 		ret = -ENOMEM;
1817 		goto err_malloc;
1818 	}
1819 
1820 	/*
1821 	 * After all the preceding tasks are successfully configured, configure
1822 	 * rules to the hardware to simplify the rollback of rules in the
1823 	 * hardware.
1824 	 */
1825 	ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
1826 	if (ret != 0)
1827 		goto err_fdir_filter;
1828 
1829 	memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
1830 		sizeof(struct hns3_fdir_rule));
1831 	TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1832 	flow->rule = fdir_rule_ptr;
1833 	flow->filter_type = RTE_ETH_FILTER_FDIR;
1834 
1835 	return 0;
1836 
1837 err_fdir_filter:
1838 	rte_free(fdir_rule_ptr);
1839 err_malloc:
1840 	if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1841 		hns3_counter_release(dev, fdir_rule.act_cnt.id);
1842 
1843 	return ret;
1844 }
1845 
1846 /*
1847  * Create or destroy a flow rule.
1848  * Theorically one rule can match more than one filters.
1849  * We will let it use the filter which it hit first.
1850  * So, the sequence matters.
1851  */
1852 static struct rte_flow *
1853 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1854 		 const struct rte_flow_item pattern[],
1855 		 const struct rte_flow_action actions[],
1856 		 struct rte_flow_error *error)
1857 {
1858 	struct hns3_adapter *hns = dev->data->dev_private;
1859 	struct hns3_hw *hw = &hns->hw;
1860 	struct hns3_flow_mem *flow_node;
1861 	const struct rte_flow_action *act;
1862 	struct rte_flow *flow;
1863 	int ret;
1864 
1865 	ret = hns3_flow_validate(dev, attr, pattern, actions, error);
1866 	if (ret)
1867 		return NULL;
1868 
1869 	flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
1870 	if (flow == NULL) {
1871 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1872 				   NULL, "Failed to allocate flow memory");
1873 		return NULL;
1874 	}
1875 	flow_node = rte_zmalloc("hns3 flow node",
1876 				sizeof(struct hns3_flow_mem), 0);
1877 	if (flow_node == NULL) {
1878 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1879 				   NULL, "Failed to allocate flow list memory");
1880 		rte_free(flow);
1881 		return NULL;
1882 	}
1883 
1884 	flow_node->flow = flow;
1885 	TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries);
1886 
1887 	act = hns3_find_rss_general_action(pattern, actions);
1888 	if (act)
1889 		ret = hns3_flow_create_rss_rule(dev, act, flow);
1890 	else
1891 		ret = hns3_flow_create_fdir_rule(dev, pattern, actions,
1892 						 error, flow);
1893 	if (ret == 0)
1894 		return flow;
1895 
1896 	rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1897 			   "Failed to create flow");
1898 	TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1899 	rte_free(flow_node);
1900 	rte_free(flow);
1901 
1902 	return NULL;
1903 }
1904 
1905 /* Destroy a flow rule on hns3. */
1906 static int
1907 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1908 		  struct rte_flow_error *error)
1909 {
1910 	struct hns3_adapter *hns = dev->data->dev_private;
1911 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1912 	struct hns3_rss_conf_ele *rss_filter_ptr;
1913 	struct hns3_flow_mem *flow_node;
1914 	enum rte_filter_type filter_type;
1915 	struct hns3_fdir_rule fdir_rule;
1916 	struct hns3_hw *hw = &hns->hw;
1917 	int ret;
1918 
1919 	if (flow == NULL)
1920 		return rte_flow_error_set(error, EINVAL,
1921 					  RTE_FLOW_ERROR_TYPE_HANDLE,
1922 					  flow, "Flow is NULL");
1923 
1924 	filter_type = flow->filter_type;
1925 	switch (filter_type) {
1926 	case RTE_ETH_FILTER_FDIR:
1927 		fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
1928 		memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
1929 			   sizeof(struct hns3_fdir_rule));
1930 
1931 		ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
1932 		if (ret)
1933 			return rte_flow_error_set(error, EIO,
1934 						  RTE_FLOW_ERROR_TYPE_HANDLE,
1935 						  flow,
1936 						  "Destroy FDIR fail.Try again");
1937 		if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1938 			hns3_counter_release(dev, fdir_rule.act_cnt.id);
1939 		TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1940 		rte_free(fdir_rule_ptr);
1941 		fdir_rule_ptr = NULL;
1942 		break;
1943 	case RTE_ETH_FILTER_HASH:
1944 		rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
1945 		ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1946 					     false);
1947 		if (ret)
1948 			return rte_flow_error_set(error, EIO,
1949 						  RTE_FLOW_ERROR_TYPE_HANDLE,
1950 						  flow,
1951 						  "Destroy RSS fail.Try again");
1952 		TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1953 		rte_free(rss_filter_ptr);
1954 		rss_filter_ptr = NULL;
1955 		break;
1956 	default:
1957 		return rte_flow_error_set(error, EINVAL,
1958 					  RTE_FLOW_ERROR_TYPE_HANDLE, flow,
1959 					  "Unsupported filter type");
1960 	}
1961 
1962 	TAILQ_FOREACH(flow_node, &hw->flow_list, entries) {
1963 		if (flow_node->flow == flow) {
1964 			TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1965 			rte_free(flow_node);
1966 			flow_node = NULL;
1967 			break;
1968 		}
1969 	}
1970 	rte_free(flow);
1971 
1972 	return 0;
1973 }
1974 
1975 /*  Destroy all flow rules associated with a port on hns3. */
1976 static int
1977 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1978 {
1979 	struct hns3_adapter *hns = dev->data->dev_private;
1980 	int ret;
1981 
1982 	/* FDIR is available only in PF driver */
1983 	if (!hns->is_vf) {
1984 		ret = hns3_clear_all_fdir_filter(hns);
1985 		if (ret) {
1986 			rte_flow_error_set(error, ret,
1987 					   RTE_FLOW_ERROR_TYPE_HANDLE,
1988 					   NULL, "Failed to flush rule");
1989 			return ret;
1990 		}
1991 		hns3_counter_flush(dev);
1992 	}
1993 
1994 	ret = hns3_clear_rss_filter(dev);
1995 	if (ret) {
1996 		rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1997 				   NULL, "Failed to flush rss filter");
1998 		return ret;
1999 	}
2000 
2001 	hns3_filterlist_flush(dev);
2002 
2003 	return 0;
2004 }
2005 
2006 /* Query an existing flow rule. */
2007 static int
2008 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
2009 		const struct rte_flow_action *actions, void *data,
2010 		struct rte_flow_error *error)
2011 {
2012 	struct rte_flow_action_rss *rss_conf;
2013 	struct hns3_rss_conf_ele *rss_rule;
2014 	struct rte_flow_query_count *qc;
2015 	int ret;
2016 
2017 	if (!flow->rule)
2018 		return rte_flow_error_set(error, EINVAL,
2019 			RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule");
2020 
2021 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2022 		switch (actions->type) {
2023 		case RTE_FLOW_ACTION_TYPE_VOID:
2024 			break;
2025 		case RTE_FLOW_ACTION_TYPE_COUNT:
2026 			qc = (struct rte_flow_query_count *)data;
2027 			ret = hns3_counter_query(dev, flow, qc, error);
2028 			if (ret)
2029 				return ret;
2030 			break;
2031 		case RTE_FLOW_ACTION_TYPE_RSS:
2032 			if (flow->filter_type != RTE_ETH_FILTER_HASH) {
2033 				return rte_flow_error_set(error, ENOTSUP,
2034 					RTE_FLOW_ERROR_TYPE_ACTION,
2035 					actions, "action is not supported");
2036 			}
2037 			rss_conf = (struct rte_flow_action_rss *)data;
2038 			rss_rule = (struct hns3_rss_conf_ele *)flow->rule;
2039 			rte_memcpy(rss_conf, &rss_rule->filter_info.conf,
2040 				   sizeof(struct rte_flow_action_rss));
2041 			break;
2042 		default:
2043 			return rte_flow_error_set(error, ENOTSUP,
2044 				RTE_FLOW_ERROR_TYPE_ACTION,
2045 				actions, "action is not supported");
2046 		}
2047 	}
2048 
2049 	return 0;
2050 }
2051 
2052 static int
2053 hns3_flow_validate_wrap(struct rte_eth_dev *dev,
2054 			const struct rte_flow_attr *attr,
2055 			const struct rte_flow_item pattern[],
2056 			const struct rte_flow_action actions[],
2057 			struct rte_flow_error *error)
2058 {
2059 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2060 	int ret;
2061 
2062 	pthread_mutex_lock(&hw->flows_lock);
2063 	ret = hns3_flow_validate(dev, attr, pattern, actions, error);
2064 	pthread_mutex_unlock(&hw->flows_lock);
2065 
2066 	return ret;
2067 }
2068 
2069 static struct rte_flow *
2070 hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2071 		      const struct rte_flow_item pattern[],
2072 		      const struct rte_flow_action actions[],
2073 		      struct rte_flow_error *error)
2074 {
2075 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2076 	struct rte_flow *flow;
2077 
2078 	pthread_mutex_lock(&hw->flows_lock);
2079 	flow = hns3_flow_create(dev, attr, pattern, actions, error);
2080 	pthread_mutex_unlock(&hw->flows_lock);
2081 
2082 	return flow;
2083 }
2084 
2085 static int
2086 hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2087 		       struct rte_flow_error *error)
2088 {
2089 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2090 	int ret;
2091 
2092 	pthread_mutex_lock(&hw->flows_lock);
2093 	ret = hns3_flow_destroy(dev, flow, error);
2094 	pthread_mutex_unlock(&hw->flows_lock);
2095 
2096 	return ret;
2097 }
2098 
2099 static int
2100 hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
2101 {
2102 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2103 	int ret;
2104 
2105 	pthread_mutex_lock(&hw->flows_lock);
2106 	ret = hns3_flow_flush(dev, error);
2107 	pthread_mutex_unlock(&hw->flows_lock);
2108 
2109 	return ret;
2110 }
2111 
2112 static int
2113 hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2114 		     const struct rte_flow_action *actions, void *data,
2115 		     struct rte_flow_error *error)
2116 {
2117 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2118 	int ret;
2119 
2120 	pthread_mutex_lock(&hw->flows_lock);
2121 	ret = hns3_flow_query(dev, flow, actions, data, error);
2122 	pthread_mutex_unlock(&hw->flows_lock);
2123 
2124 	return ret;
2125 }
2126 
2127 static int
2128 hns3_check_indir_action(const struct rte_flow_indir_action_conf *conf,
2129 			const struct rte_flow_action *action,
2130 			struct rte_flow_error *error)
2131 {
2132 	if (!conf->ingress)
2133 		return rte_flow_error_set(error, EINVAL,
2134 				RTE_FLOW_ERROR_TYPE_ACTION,
2135 				NULL, "Indir action ingress can't be zero");
2136 
2137 	if (conf->egress)
2138 		return rte_flow_error_set(error, EINVAL,
2139 				RTE_FLOW_ERROR_TYPE_ACTION,
2140 				NULL, "Indir action not support egress");
2141 
2142 	if (conf->transfer)
2143 		return rte_flow_error_set(error, EINVAL,
2144 				RTE_FLOW_ERROR_TYPE_ACTION,
2145 				NULL, "Indir action not support transfer");
2146 
2147 	if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
2148 		return rte_flow_error_set(error, EINVAL,
2149 				RTE_FLOW_ERROR_TYPE_ACTION,
2150 				NULL, "Indir action only support count");
2151 
2152 	return 0;
2153 }
2154 
2155 static struct rte_flow_action_handle *
2156 hns3_flow_action_create(struct rte_eth_dev *dev,
2157 			const struct rte_flow_indir_action_conf *conf,
2158 			const struct rte_flow_action *action,
2159 			struct rte_flow_error *error)
2160 {
2161 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2162 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2163 	const struct rte_flow_action_count *act_count;
2164 	struct rte_flow_action_handle *handle = NULL;
2165 	struct hns3_flow_counter *counter;
2166 
2167 	if (hns3_check_indir_action(conf, action, error))
2168 		return NULL;
2169 
2170 	handle = rte_zmalloc("hns3 action handle",
2171 			     sizeof(struct rte_flow_action_handle), 0);
2172 	if (handle == NULL) {
2173 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
2174 				   NULL, "Failed to allocate action memory");
2175 		return NULL;
2176 	}
2177 
2178 	pthread_mutex_lock(&hw->flows_lock);
2179 
2180 	act_count = (const struct rte_flow_action_count *)action->conf;
2181 	if (act_count->id >= pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1]) {
2182 		rte_flow_error_set(error, EINVAL,
2183 				   RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2184 				   action, "Invalid counter id");
2185 		goto err_exit;
2186 	}
2187 
2188 	if (hns3_counter_new(dev, false, act_count->id, error))
2189 		goto err_exit;
2190 
2191 	counter = hns3_counter_lookup(dev, act_count->id);
2192 	if (counter == NULL) {
2193 		rte_flow_error_set(error, EINVAL,
2194 				   RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2195 				   action, "Counter id not found");
2196 		goto err_exit;
2197 	}
2198 
2199 	counter->indirect = true;
2200 	handle->indirect_type = HNS3_INDIRECT_ACTION_TYPE_COUNT;
2201 	handle->counter_id = counter->id;
2202 
2203 	pthread_mutex_unlock(&hw->flows_lock);
2204 	return handle;
2205 
2206 err_exit:
2207 	pthread_mutex_unlock(&hw->flows_lock);
2208 	rte_free(handle);
2209 	return NULL;
2210 }
2211 
2212 static int
2213 hns3_flow_action_destroy(struct rte_eth_dev *dev,
2214 			 struct rte_flow_action_handle *handle,
2215 			 struct rte_flow_error *error)
2216 {
2217 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2218 	struct hns3_flow_counter *counter;
2219 
2220 	pthread_mutex_lock(&hw->flows_lock);
2221 
2222 	if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
2223 		pthread_mutex_unlock(&hw->flows_lock);
2224 		return rte_flow_error_set(error, EINVAL,
2225 					RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2226 					handle, "Invalid indirect type");
2227 	}
2228 
2229 	counter = hns3_counter_lookup(dev, handle->counter_id);
2230 	if (counter == NULL) {
2231 		pthread_mutex_unlock(&hw->flows_lock);
2232 		return rte_flow_error_set(error, EINVAL,
2233 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2234 				handle, "Counter id not exist");
2235 	}
2236 
2237 	if (counter->ref_cnt > 1) {
2238 		pthread_mutex_unlock(&hw->flows_lock);
2239 		return rte_flow_error_set(error, EBUSY,
2240 				RTE_FLOW_ERROR_TYPE_HANDLE,
2241 				handle, "Counter id in use");
2242 	}
2243 
2244 	(void)hns3_counter_release(dev, handle->counter_id);
2245 	rte_free(handle);
2246 
2247 	pthread_mutex_unlock(&hw->flows_lock);
2248 	return 0;
2249 }
2250 
2251 static int
2252 hns3_flow_action_query(struct rte_eth_dev *dev,
2253 		 const struct rte_flow_action_handle *handle,
2254 		 void *data,
2255 		 struct rte_flow_error *error)
2256 {
2257 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2258 	struct rte_flow flow;
2259 	int ret;
2260 
2261 	pthread_mutex_lock(&hw->flows_lock);
2262 
2263 	if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
2264 		pthread_mutex_unlock(&hw->flows_lock);
2265 		return rte_flow_error_set(error, EINVAL,
2266 					RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2267 					handle, "Invalid indirect type");
2268 	}
2269 
2270 	memset(&flow, 0, sizeof(flow));
2271 	flow.counter_id = handle->counter_id;
2272 	ret = hns3_counter_query(dev, &flow,
2273 				 (struct rte_flow_query_count *)data, error);
2274 	pthread_mutex_unlock(&hw->flows_lock);
2275 	return ret;
2276 }
2277 
2278 static const struct rte_flow_ops hns3_flow_ops = {
2279 	.validate = hns3_flow_validate_wrap,
2280 	.create = hns3_flow_create_wrap,
2281 	.destroy = hns3_flow_destroy_wrap,
2282 	.flush = hns3_flow_flush_wrap,
2283 	.query = hns3_flow_query_wrap,
2284 	.isolate = NULL,
2285 	.action_handle_create = hns3_flow_action_create,
2286 	.action_handle_destroy = hns3_flow_action_destroy,
2287 	.action_handle_query = hns3_flow_action_query,
2288 };
2289 
2290 int
2291 hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
2292 		      const struct rte_flow_ops **ops)
2293 {
2294 	struct hns3_hw *hw;
2295 
2296 	hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2297 	if (hw->adapter_state >= HNS3_NIC_CLOSED)
2298 		return -ENODEV;
2299 
2300 	*ops = &hns3_flow_ops;
2301 	return 0;
2302 }
2303 
2304 void
2305 hns3_flow_init(struct rte_eth_dev *dev)
2306 {
2307 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2308 	pthread_mutexattr_t attr;
2309 
2310 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2311 		return;
2312 
2313 	pthread_mutexattr_init(&attr);
2314 	pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
2315 	pthread_mutex_init(&hw->flows_lock, &attr);
2316 	dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
2317 
2318 	TAILQ_INIT(&hw->flow_fdir_list);
2319 	TAILQ_INIT(&hw->flow_rss_list);
2320 	TAILQ_INIT(&hw->flow_list);
2321 }
2322 
2323 void
2324 hns3_flow_uninit(struct rte_eth_dev *dev)
2325 {
2326 	struct rte_flow_error error;
2327 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2328 		hns3_flow_flush_wrap(dev, &error);
2329 }
2330