xref: /dpdk/drivers/net/hns3/hns3_flow.c (revision 97b914f4e715565d53d38ac6e04815b9be5e58a9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4 
5 #include <rte_flow_driver.h>
6 #include <rte_io.h>
7 #include <rte_malloc.h>
8 
9 #include "hns3_ethdev.h"
10 #include "hns3_logs.h"
11 #include "hns3_flow.h"
12 
13 /* Default default keys */
14 static uint8_t hns3_hash_key[] = {
15 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
16 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
17 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
18 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
19 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
20 };
21 
22 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
23 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
24 
25 /* Special Filter id for non-specific packet flagging. Don't change value */
26 #define HNS3_MAX_FILTER_ID	0x0FFF
27 
28 #define ETHER_TYPE_MASK		0xFFFF
29 #define IPPROTO_MASK		0xFF
30 #define TUNNEL_TYPE_MASK	0xFFFF
31 
32 #define HNS3_TUNNEL_TYPE_VXLAN		0x12B5
33 #define HNS3_TUNNEL_TYPE_VXLAN_GPE	0x12B6
34 #define HNS3_TUNNEL_TYPE_GENEVE		0x17C1
35 #define HNS3_TUNNEL_TYPE_NVGRE		0x6558
36 
37 static enum rte_flow_item_type first_items[] = {
38 	RTE_FLOW_ITEM_TYPE_ETH,
39 	RTE_FLOW_ITEM_TYPE_IPV4,
40 	RTE_FLOW_ITEM_TYPE_IPV6,
41 	RTE_FLOW_ITEM_TYPE_TCP,
42 	RTE_FLOW_ITEM_TYPE_UDP,
43 	RTE_FLOW_ITEM_TYPE_SCTP,
44 	RTE_FLOW_ITEM_TYPE_ICMP,
45 	RTE_FLOW_ITEM_TYPE_NVGRE,
46 	RTE_FLOW_ITEM_TYPE_VXLAN,
47 	RTE_FLOW_ITEM_TYPE_GENEVE,
48 	RTE_FLOW_ITEM_TYPE_VXLAN_GPE
49 };
50 
51 static enum rte_flow_item_type L2_next_items[] = {
52 	RTE_FLOW_ITEM_TYPE_VLAN,
53 	RTE_FLOW_ITEM_TYPE_IPV4,
54 	RTE_FLOW_ITEM_TYPE_IPV6
55 };
56 
57 static enum rte_flow_item_type L3_next_items[] = {
58 	RTE_FLOW_ITEM_TYPE_TCP,
59 	RTE_FLOW_ITEM_TYPE_UDP,
60 	RTE_FLOW_ITEM_TYPE_SCTP,
61 	RTE_FLOW_ITEM_TYPE_NVGRE,
62 	RTE_FLOW_ITEM_TYPE_ICMP
63 };
64 
65 static enum rte_flow_item_type L4_next_items[] = {
66 	RTE_FLOW_ITEM_TYPE_VXLAN,
67 	RTE_FLOW_ITEM_TYPE_GENEVE,
68 	RTE_FLOW_ITEM_TYPE_VXLAN_GPE
69 };
70 
71 static enum rte_flow_item_type tunnel_next_items[] = {
72 	RTE_FLOW_ITEM_TYPE_ETH,
73 	RTE_FLOW_ITEM_TYPE_VLAN
74 };
75 
76 struct items_step_mngr {
77 	enum rte_flow_item_type *items;
78 	int count;
79 };
80 
81 static inline void
82 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
83 {
84 	size_t i;
85 
86 	for (i = 0; i < len; i++)
87 		dst[i] = rte_be_to_cpu_32(src[i]);
88 }
89 
90 /*
91  * This function is used to find rss general action.
92  * 1. As we know RSS is used to spread packets among several queues, the flow
93  *    API provide the struct rte_flow_action_rss, user could config its field
94  *    sush as: func/level/types/key/queue to control RSS function.
95  * 2. The flow API also supports queue region configuration for hns3. It was
96  *    implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
97  *    which action is RSS queues region.
98  * 3. When action is RSS, we use the following rule to distinguish:
99  *    Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue
100  *            region configuration.
101  *    Case other: an rss general action.
102  */
103 static const struct rte_flow_action *
104 hns3_find_rss_general_action(const struct rte_flow_item pattern[],
105 			     const struct rte_flow_action actions[])
106 {
107 	const struct rte_flow_action *act = NULL;
108 	const struct hns3_rss_conf *rss;
109 	bool have_eth = false;
110 
111 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
112 		if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
113 			act = actions;
114 			break;
115 		}
116 	}
117 	if (!act)
118 		return NULL;
119 
120 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
121 		if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
122 			have_eth = true;
123 			break;
124 		}
125 	}
126 
127 	rss = act->conf;
128 	if (have_eth && rss->conf.queue_num) {
129 		/*
130 		 * Pattern have ETH and action's queue_num > 0, indicate this is
131 		 * queue region configuration.
132 		 * Because queue region is implemented by FDIR + RSS in hns3
133 		 * hardware, it needs to enter FDIR process, so here return NULL
134 		 * to avoid enter RSS process.
135 		 */
136 		return NULL;
137 	}
138 
139 	return act;
140 }
141 
142 static inline struct hns3_flow_counter *
143 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
144 {
145 	struct hns3_adapter *hns = dev->data->dev_private;
146 	struct hns3_pf *pf = &hns->pf;
147 	struct hns3_flow_counter *cnt;
148 
149 	LIST_FOREACH(cnt, &pf->flow_counters, next) {
150 		if (cnt->id == id)
151 			return cnt;
152 	}
153 	return NULL;
154 }
155 
156 static int
157 hns3_counter_new(struct rte_eth_dev *dev, uint32_t indirect, uint32_t id,
158 		 struct rte_flow_error *error)
159 {
160 	struct hns3_adapter *hns = dev->data->dev_private;
161 	struct hns3_pf *pf = &hns->pf;
162 	struct hns3_hw *hw = &hns->hw;
163 	struct hns3_flow_counter *cnt;
164 	uint64_t value;
165 	int ret;
166 
167 	cnt = hns3_counter_lookup(dev, id);
168 	if (cnt) {
169 		if (!cnt->indirect || cnt->indirect != indirect)
170 			return rte_flow_error_set(error, ENOTSUP,
171 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
172 				cnt,
173 				"Counter id is used, indirect flag not match");
174 		/* Clear the indirect counter on first use. */
175 		if (cnt->indirect && cnt->ref_cnt == 1)
176 			(void)hns3_get_count(hw, id, &value);
177 		cnt->ref_cnt++;
178 		return 0;
179 	}
180 
181 	/* Clear the counter by read ops because the counter is read-clear */
182 	ret = hns3_get_count(hw, id, &value);
183 	if (ret)
184 		return rte_flow_error_set(error, EIO,
185 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
186 					  "Clear counter failed!");
187 
188 	cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
189 	if (cnt == NULL)
190 		return rte_flow_error_set(error, ENOMEM,
191 					  RTE_FLOW_ERROR_TYPE_HANDLE, cnt,
192 					  "Alloc mem for counter failed");
193 	cnt->id = id;
194 	cnt->indirect = indirect;
195 	cnt->ref_cnt = 1;
196 	cnt->hits = 0;
197 	LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
198 	return 0;
199 }
200 
201 static int
202 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
203 		   struct rte_flow_query_count *qc,
204 		   struct rte_flow_error *error)
205 {
206 	struct hns3_adapter *hns = dev->data->dev_private;
207 	struct hns3_flow_counter *cnt;
208 	uint64_t value;
209 	int ret;
210 
211 	/* FDIR is available only in PF driver */
212 	if (hns->is_vf)
213 		return rte_flow_error_set(error, ENOTSUP,
214 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
215 					  "Fdir is not supported in VF");
216 	cnt = hns3_counter_lookup(dev, flow->counter_id);
217 	if (cnt == NULL)
218 		return rte_flow_error_set(error, EINVAL,
219 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
220 					  "Can't find counter id");
221 
222 	ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
223 	if (ret) {
224 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
225 				   NULL, "Read counter fail.");
226 		return ret;
227 	}
228 	qc->hits_set = 1;
229 	qc->hits = value;
230 	qc->bytes_set = 0;
231 	qc->bytes = 0;
232 
233 	return 0;
234 }
235 
236 static int
237 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
238 {
239 	struct hns3_adapter *hns = dev->data->dev_private;
240 	struct hns3_hw *hw = &hns->hw;
241 	struct hns3_flow_counter *cnt;
242 
243 	cnt = hns3_counter_lookup(dev, id);
244 	if (cnt == NULL) {
245 		hns3_err(hw, "Can't find available counter to release");
246 		return -EINVAL;
247 	}
248 	cnt->ref_cnt--;
249 	if (cnt->ref_cnt == 0) {
250 		LIST_REMOVE(cnt, next);
251 		rte_free(cnt);
252 	}
253 	return 0;
254 }
255 
256 static void
257 hns3_counter_flush(struct rte_eth_dev *dev)
258 {
259 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
260 	LIST_HEAD(counters, hns3_flow_counter) indir_counters;
261 	struct hns3_flow_counter *cnt_ptr;
262 
263 	LIST_INIT(&indir_counters);
264 	cnt_ptr = LIST_FIRST(&pf->flow_counters);
265 	while (cnt_ptr) {
266 		LIST_REMOVE(cnt_ptr, next);
267 		if (cnt_ptr->indirect)
268 			LIST_INSERT_HEAD(&indir_counters, cnt_ptr, next);
269 		else
270 			rte_free(cnt_ptr);
271 		cnt_ptr = LIST_FIRST(&pf->flow_counters);
272 	}
273 
274 	/* Reset the indirect action and add to pf->flow_counters list. */
275 	cnt_ptr = LIST_FIRST(&indir_counters);
276 	while (cnt_ptr) {
277 		LIST_REMOVE(cnt_ptr, next);
278 		cnt_ptr->ref_cnt = 1;
279 		cnt_ptr->hits = 0;
280 		LIST_INSERT_HEAD(&pf->flow_counters, cnt_ptr, next);
281 		cnt_ptr = LIST_FIRST(&indir_counters);
282 	}
283 }
284 
285 static int
286 hns3_handle_action_queue(struct rte_eth_dev *dev,
287 			 const struct rte_flow_action *action,
288 			 struct hns3_fdir_rule *rule,
289 			 struct rte_flow_error *error)
290 {
291 	struct hns3_adapter *hns = dev->data->dev_private;
292 	const struct rte_flow_action_queue *queue;
293 	struct hns3_hw *hw = &hns->hw;
294 
295 	queue = (const struct rte_flow_action_queue *)action->conf;
296 	if (queue->index >= hw->data->nb_rx_queues) {
297 		hns3_err(hw, "queue ID(%u) is greater than number of "
298 			  "available queue (%u) in driver.",
299 			  queue->index, hw->data->nb_rx_queues);
300 		return rte_flow_error_set(error, EINVAL,
301 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
302 					  action, "Invalid queue ID in PF");
303 	}
304 
305 	rule->queue_id = queue->index;
306 	rule->nb_queues = 1;
307 	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
308 	return 0;
309 }
310 
311 static int
312 hns3_handle_action_queue_region(struct rte_eth_dev *dev,
313 				const struct rte_flow_action *action,
314 				struct hns3_fdir_rule *rule,
315 				struct rte_flow_error *error)
316 {
317 	struct hns3_adapter *hns = dev->data->dev_private;
318 	const struct rte_flow_action_rss *conf = action->conf;
319 	struct hns3_hw *hw = &hns->hw;
320 	uint16_t idx;
321 
322 	if (!hns3_dev_get_support(hw, FD_QUEUE_REGION))
323 		return rte_flow_error_set(error, ENOTSUP,
324 			RTE_FLOW_ERROR_TYPE_ACTION, action,
325 			"Not support config queue region!");
326 
327 	if ((!rte_is_power_of_2(conf->queue_num)) ||
328 		conf->queue_num > hw->rss_size_max ||
329 		conf->queue[0] >= hw->data->nb_rx_queues ||
330 		conf->queue[0] + conf->queue_num > hw->data->nb_rx_queues) {
331 		return rte_flow_error_set(error, EINVAL,
332 			RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
333 			"Invalid start queue ID and queue num! the start queue "
334 			"ID must valid, the queue num must be power of 2 and "
335 			"<= rss_size_max.");
336 	}
337 
338 	for (idx = 1; idx < conf->queue_num; idx++) {
339 		if (conf->queue[idx] != conf->queue[idx - 1] + 1)
340 			return rte_flow_error_set(error, EINVAL,
341 				RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
342 				"Invalid queue ID sequence! the queue ID "
343 				"must be continuous increment.");
344 	}
345 
346 	rule->queue_id = conf->queue[0];
347 	rule->nb_queues = conf->queue_num;
348 	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
349 	return 0;
350 }
351 
352 static int
353 hns3_handle_action_indirect(struct rte_eth_dev *dev,
354 			    const struct rte_flow_action *action,
355 			    struct hns3_fdir_rule *rule,
356 			    struct rte_flow_error *error)
357 {
358 	const struct rte_flow_action_handle *indir = action->conf;
359 
360 	if (indir->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT)
361 		return rte_flow_error_set(error, EINVAL,
362 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
363 				action, "Invalid indirect type");
364 
365 	if (hns3_counter_lookup(dev, indir->counter_id) == NULL)
366 		return rte_flow_error_set(error, EINVAL,
367 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
368 				action, "Counter id not exist");
369 
370 	rule->act_cnt.id = indir->counter_id;
371 	rule->flags |= (HNS3_RULE_FLAG_COUNTER | HNS3_RULE_FLAG_COUNTER_INDIR);
372 
373 	return 0;
374 }
375 
376 /*
377  * Parse actions structure from the provided pattern.
378  * The pattern is validated as the items are copied.
379  *
380  * @param actions[in]
381  * @param rule[out]
382  *   NIC specific actions derived from the actions.
383  * @param error[out]
384  */
385 static int
386 hns3_handle_actions(struct rte_eth_dev *dev,
387 		    const struct rte_flow_action actions[],
388 		    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
389 {
390 	struct hns3_adapter *hns = dev->data->dev_private;
391 	const struct rte_flow_action_count *act_count;
392 	const struct rte_flow_action_mark *mark;
393 	struct hns3_pf *pf = &hns->pf;
394 	uint32_t counter_num;
395 	int ret;
396 
397 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
398 		switch (actions->type) {
399 		case RTE_FLOW_ACTION_TYPE_QUEUE:
400 			ret = hns3_handle_action_queue(dev, actions, rule,
401 						       error);
402 			if (ret)
403 				return ret;
404 			break;
405 		case RTE_FLOW_ACTION_TYPE_DROP:
406 			rule->action = HNS3_FD_ACTION_DROP_PACKET;
407 			break;
408 		/*
409 		 * Here RSS's real action is queue region.
410 		 * Queue region is implemented by FDIR + RSS in hns3 hardware,
411 		 * the FDIR's action is one queue region (start_queue_id and
412 		 * queue_num), then RSS spread packets to the queue region by
413 		 * RSS algorithm.
414 		 */
415 		case RTE_FLOW_ACTION_TYPE_RSS:
416 			ret = hns3_handle_action_queue_region(dev, actions,
417 							      rule, error);
418 			if (ret)
419 				return ret;
420 			break;
421 		case RTE_FLOW_ACTION_TYPE_MARK:
422 			mark =
423 			    (const struct rte_flow_action_mark *)actions->conf;
424 			if (mark->id >= HNS3_MAX_FILTER_ID)
425 				return rte_flow_error_set(error, EINVAL,
426 						RTE_FLOW_ERROR_TYPE_ACTION_CONF,
427 						actions,
428 						"Invalid Mark ID");
429 			rule->fd_id = mark->id;
430 			rule->flags |= HNS3_RULE_FLAG_FDID;
431 			break;
432 		case RTE_FLOW_ACTION_TYPE_FLAG:
433 			rule->fd_id = HNS3_MAX_FILTER_ID;
434 			rule->flags |= HNS3_RULE_FLAG_FDID;
435 			break;
436 		case RTE_FLOW_ACTION_TYPE_COUNT:
437 			act_count =
438 			    (const struct rte_flow_action_count *)actions->conf;
439 			counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
440 			if (act_count->id >= counter_num)
441 				return rte_flow_error_set(error, EINVAL,
442 						RTE_FLOW_ERROR_TYPE_ACTION_CONF,
443 						actions,
444 						"Invalid counter id");
445 			rule->act_cnt = *act_count;
446 			rule->flags |= HNS3_RULE_FLAG_COUNTER;
447 			rule->flags &= ~HNS3_RULE_FLAG_COUNTER_INDIR;
448 			break;
449 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
450 			ret = hns3_handle_action_indirect(dev, actions, rule,
451 							  error);
452 			if (ret)
453 				return ret;
454 			break;
455 		case RTE_FLOW_ACTION_TYPE_VOID:
456 			break;
457 		default:
458 			return rte_flow_error_set(error, ENOTSUP,
459 						  RTE_FLOW_ERROR_TYPE_ACTION,
460 						  NULL, "Unsupported action");
461 		}
462 	}
463 
464 	return 0;
465 }
466 
467 static int
468 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
469 {
470 	if (!attr->ingress)
471 		return rte_flow_error_set(error, EINVAL,
472 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
473 					  attr, "Ingress can't be zero");
474 	if (attr->egress)
475 		return rte_flow_error_set(error, ENOTSUP,
476 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
477 					  attr, "Not support egress");
478 	if (attr->transfer)
479 		return rte_flow_error_set(error, ENOTSUP,
480 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
481 					  attr, "No support for transfer");
482 	if (attr->priority)
483 		return rte_flow_error_set(error, ENOTSUP,
484 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
485 					  attr, "Not support priority");
486 	if (attr->group)
487 		return rte_flow_error_set(error, ENOTSUP,
488 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
489 					  attr, "Not support group");
490 	return 0;
491 }
492 
493 static int
494 hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
495 	       struct rte_flow_error *error __rte_unused)
496 {
497 	const struct rte_flow_item_eth *eth_spec;
498 	const struct rte_flow_item_eth *eth_mask;
499 
500 	/* Only used to describe the protocol stack. */
501 	if (item->spec == NULL && item->mask == NULL)
502 		return 0;
503 
504 	if (item->mask) {
505 		eth_mask = item->mask;
506 		if (eth_mask->type) {
507 			hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
508 			rule->key_conf.mask.ether_type =
509 			    rte_be_to_cpu_16(eth_mask->type);
510 		}
511 		if (!rte_is_zero_ether_addr(&eth_mask->src)) {
512 			hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
513 			memcpy(rule->key_conf.mask.src_mac,
514 			       eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
515 		}
516 		if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
517 			hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
518 			memcpy(rule->key_conf.mask.dst_mac,
519 			       eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
520 		}
521 	}
522 
523 	eth_spec = item->spec;
524 	rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
525 	memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
526 	       RTE_ETHER_ADDR_LEN);
527 	memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
528 	       RTE_ETHER_ADDR_LEN);
529 	return 0;
530 }
531 
532 static int
533 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
534 		struct rte_flow_error *error)
535 {
536 	const struct rte_flow_item_vlan *vlan_spec;
537 	const struct rte_flow_item_vlan *vlan_mask;
538 
539 	rule->key_conf.vlan_num++;
540 	if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
541 		return rte_flow_error_set(error, EINVAL,
542 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
543 					  "Vlan_num is more than 2");
544 
545 	/* Only used to describe the protocol stack. */
546 	if (item->spec == NULL && item->mask == NULL)
547 		return 0;
548 
549 	if (item->mask) {
550 		vlan_mask = item->mask;
551 		if (vlan_mask->tci) {
552 			if (rule->key_conf.vlan_num == 1) {
553 				hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
554 					     1);
555 				rule->key_conf.mask.vlan_tag1 =
556 				    rte_be_to_cpu_16(vlan_mask->tci);
557 			} else {
558 				hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
559 					     1);
560 				rule->key_conf.mask.vlan_tag2 =
561 				    rte_be_to_cpu_16(vlan_mask->tci);
562 			}
563 		}
564 	}
565 
566 	vlan_spec = item->spec;
567 	if (rule->key_conf.vlan_num == 1)
568 		rule->key_conf.spec.vlan_tag1 =
569 		    rte_be_to_cpu_16(vlan_spec->tci);
570 	else
571 		rule->key_conf.spec.vlan_tag2 =
572 		    rte_be_to_cpu_16(vlan_spec->tci);
573 	return 0;
574 }
575 
576 static bool
577 hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask)
578 {
579 	if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id ||
580 	    ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live ||
581 	    ipv4_mask->hdr.hdr_checksum)
582 		return false;
583 
584 	return true;
585 }
586 
587 static int
588 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
589 		struct rte_flow_error *error)
590 {
591 	const struct rte_flow_item_ipv4 *ipv4_spec;
592 	const struct rte_flow_item_ipv4 *ipv4_mask;
593 
594 	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
595 	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
596 	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
597 
598 	/* Only used to describe the protocol stack. */
599 	if (item->spec == NULL && item->mask == NULL)
600 		return 0;
601 
602 	if (item->mask) {
603 		ipv4_mask = item->mask;
604 		if (!hns3_check_ipv4_mask_supported(ipv4_mask)) {
605 			return rte_flow_error_set(error, EINVAL,
606 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
607 						  item,
608 						  "Only support src & dst ip,tos,proto in IPV4");
609 		}
610 
611 		if (ipv4_mask->hdr.src_addr) {
612 			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
613 			rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
614 			    rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
615 		}
616 
617 		if (ipv4_mask->hdr.dst_addr) {
618 			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
619 			rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
620 			    rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
621 		}
622 
623 		if (ipv4_mask->hdr.type_of_service) {
624 			hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
625 			rule->key_conf.mask.ip_tos =
626 			    ipv4_mask->hdr.type_of_service;
627 		}
628 
629 		if (ipv4_mask->hdr.next_proto_id) {
630 			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
631 			rule->key_conf.mask.ip_proto =
632 			    ipv4_mask->hdr.next_proto_id;
633 		}
634 	}
635 
636 	ipv4_spec = item->spec;
637 	rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
638 	    rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
639 	rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
640 	    rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
641 	rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
642 	rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
643 	return 0;
644 }
645 
646 static int
647 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
648 		struct rte_flow_error *error)
649 {
650 	const struct rte_flow_item_ipv6 *ipv6_spec;
651 	const struct rte_flow_item_ipv6 *ipv6_mask;
652 
653 	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
654 	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
655 	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
656 
657 	/* Only used to describe the protocol stack. */
658 	if (item->spec == NULL && item->mask == NULL)
659 		return 0;
660 
661 	if (item->mask) {
662 		ipv6_mask = item->mask;
663 		if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
664 		    ipv6_mask->hdr.hop_limits) {
665 			return rte_flow_error_set(error, EINVAL,
666 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
667 						  item,
668 						  "Only support src & dst ip,proto in IPV6");
669 		}
670 		net_addr_to_host(rule->key_conf.mask.src_ip,
671 				 (const rte_be32_t *)ipv6_mask->hdr.src_addr,
672 				 IP_ADDR_LEN);
673 		net_addr_to_host(rule->key_conf.mask.dst_ip,
674 				 (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
675 				 IP_ADDR_LEN);
676 		rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
677 		if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
678 			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
679 		if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
680 			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
681 		if (ipv6_mask->hdr.proto)
682 			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
683 	}
684 
685 	ipv6_spec = item->spec;
686 	net_addr_to_host(rule->key_conf.spec.src_ip,
687 			 (const rte_be32_t *)ipv6_spec->hdr.src_addr,
688 			 IP_ADDR_LEN);
689 	net_addr_to_host(rule->key_conf.spec.dst_ip,
690 			 (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
691 			 IP_ADDR_LEN);
692 	rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
693 
694 	return 0;
695 }
696 
697 static bool
698 hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask)
699 {
700 	if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
701 	    tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
702 	    tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
703 	    tcp_mask->hdr.tcp_urp)
704 		return false;
705 
706 	return true;
707 }
708 
709 static int
710 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
711 	       struct rte_flow_error *error)
712 {
713 	const struct rte_flow_item_tcp *tcp_spec;
714 	const struct rte_flow_item_tcp *tcp_mask;
715 
716 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
717 	rule->key_conf.spec.ip_proto = IPPROTO_TCP;
718 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
719 
720 	/* Only used to describe the protocol stack. */
721 	if (item->spec == NULL && item->mask == NULL)
722 		return 0;
723 
724 	if (item->mask) {
725 		tcp_mask = item->mask;
726 		if (!hns3_check_tcp_mask_supported(tcp_mask)) {
727 			return rte_flow_error_set(error, EINVAL,
728 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
729 						  item,
730 						  "Only support src & dst port in TCP");
731 		}
732 
733 		if (tcp_mask->hdr.src_port) {
734 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
735 			rule->key_conf.mask.src_port =
736 			    rte_be_to_cpu_16(tcp_mask->hdr.src_port);
737 		}
738 		if (tcp_mask->hdr.dst_port) {
739 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
740 			rule->key_conf.mask.dst_port =
741 			    rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
742 		}
743 	}
744 
745 	tcp_spec = item->spec;
746 	rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
747 	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
748 
749 	return 0;
750 }
751 
752 static int
753 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
754 	       struct rte_flow_error *error)
755 {
756 	const struct rte_flow_item_udp *udp_spec;
757 	const struct rte_flow_item_udp *udp_mask;
758 
759 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
760 	rule->key_conf.spec.ip_proto = IPPROTO_UDP;
761 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
762 
763 	/* Only used to describe the protocol stack. */
764 	if (item->spec == NULL && item->mask == NULL)
765 		return 0;
766 
767 	if (item->mask) {
768 		udp_mask = item->mask;
769 		if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
770 			return rte_flow_error_set(error, EINVAL,
771 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
772 						  item,
773 						  "Only support src & dst port in UDP");
774 		}
775 		if (udp_mask->hdr.src_port) {
776 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
777 			rule->key_conf.mask.src_port =
778 			    rte_be_to_cpu_16(udp_mask->hdr.src_port);
779 		}
780 		if (udp_mask->hdr.dst_port) {
781 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
782 			rule->key_conf.mask.dst_port =
783 			    rte_be_to_cpu_16(udp_mask->hdr.dst_port);
784 		}
785 	}
786 
787 	udp_spec = item->spec;
788 	rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
789 	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
790 
791 	return 0;
792 }
793 
794 static int
795 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
796 		struct rte_flow_error *error)
797 {
798 	const struct rte_flow_item_sctp *sctp_spec;
799 	const struct rte_flow_item_sctp *sctp_mask;
800 
801 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
802 	rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
803 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
804 
805 	/* Only used to describe the protocol stack. */
806 	if (item->spec == NULL && item->mask == NULL)
807 		return 0;
808 
809 	if (item->mask) {
810 		sctp_mask = item->mask;
811 		if (sctp_mask->hdr.cksum)
812 			return rte_flow_error_set(error, EINVAL,
813 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
814 						  item,
815 						  "Only support src & dst port in SCTP");
816 		if (sctp_mask->hdr.src_port) {
817 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
818 			rule->key_conf.mask.src_port =
819 			    rte_be_to_cpu_16(sctp_mask->hdr.src_port);
820 		}
821 		if (sctp_mask->hdr.dst_port) {
822 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
823 			rule->key_conf.mask.dst_port =
824 			    rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
825 		}
826 		if (sctp_mask->hdr.tag) {
827 			hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
828 			rule->key_conf.mask.sctp_tag =
829 			    rte_be_to_cpu_32(sctp_mask->hdr.tag);
830 		}
831 	}
832 
833 	sctp_spec = item->spec;
834 	rule->key_conf.spec.src_port =
835 	    rte_be_to_cpu_16(sctp_spec->hdr.src_port);
836 	rule->key_conf.spec.dst_port =
837 	    rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
838 	rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
839 
840 	return 0;
841 }
842 
843 /*
844  * Check items before tunnel, save inner configs to outer configs, and clear
845  * inner configs.
846  * The key consists of two parts: meta_data and tuple keys.
847  * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
848  * packet(1bit).
849  * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
850  * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
851  * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
852  * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
853  * Vlantag2(16bit) and sctp-tag(32bit).
854  */
855 static int
856 hns3_handle_tunnel(const struct rte_flow_item *item,
857 		   struct hns3_fdir_rule *rule, struct rte_flow_error *error)
858 {
859 	/* check eth config */
860 	if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
861 		return rte_flow_error_set(error, EINVAL,
862 					  RTE_FLOW_ERROR_TYPE_ITEM,
863 					  item, "Outer eth mac is unsupported");
864 	if (rule->input_set & BIT(INNER_ETH_TYPE)) {
865 		hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
866 		rule->key_conf.spec.outer_ether_type =
867 		    rule->key_conf.spec.ether_type;
868 		rule->key_conf.mask.outer_ether_type =
869 		    rule->key_conf.mask.ether_type;
870 		hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
871 		rule->key_conf.spec.ether_type = 0;
872 		rule->key_conf.mask.ether_type = 0;
873 	}
874 
875 	/* check vlan config */
876 	if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
877 		return rte_flow_error_set(error, EINVAL,
878 					  RTE_FLOW_ERROR_TYPE_ITEM,
879 					  item,
880 					  "Outer vlan tags is unsupported");
881 
882 	/* clear vlan_num for inner vlan select */
883 	rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
884 	rule->key_conf.vlan_num = 0;
885 
886 	/* check L3 config */
887 	if (rule->input_set &
888 	    (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
889 		return rte_flow_error_set(error, EINVAL,
890 					  RTE_FLOW_ERROR_TYPE_ITEM,
891 					  item, "Outer ip is unsupported");
892 	if (rule->input_set & BIT(INNER_IP_PROTO)) {
893 		hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
894 		rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
895 		rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
896 		hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
897 		rule->key_conf.spec.ip_proto = 0;
898 		rule->key_conf.mask.ip_proto = 0;
899 	}
900 
901 	/* check L4 config */
902 	if (rule->input_set & BIT(INNER_SCTP_TAG))
903 		return rte_flow_error_set(error, EINVAL,
904 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
905 					  "Outer sctp tag is unsupported");
906 
907 	if (rule->input_set & BIT(INNER_SRC_PORT)) {
908 		hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
909 		rule->key_conf.spec.outer_src_port =
910 		    rule->key_conf.spec.src_port;
911 		rule->key_conf.mask.outer_src_port =
912 		    rule->key_conf.mask.src_port;
913 		hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
914 		rule->key_conf.spec.src_port = 0;
915 		rule->key_conf.mask.src_port = 0;
916 	}
917 	if (rule->input_set & BIT(INNER_DST_PORT)) {
918 		hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
919 		rule->key_conf.spec.dst_port = 0;
920 		rule->key_conf.mask.dst_port = 0;
921 	}
922 	return 0;
923 }
924 
925 static int
926 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
927 		 struct rte_flow_error *error)
928 {
929 	const struct rte_flow_item_vxlan *vxlan_spec;
930 	const struct rte_flow_item_vxlan *vxlan_mask;
931 
932 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
933 	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
934 	if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
935 		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
936 	else
937 		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
938 
939 	/* Only used to describe the protocol stack. */
940 	if (item->spec == NULL && item->mask == NULL)
941 		return 0;
942 
943 	vxlan_mask = item->mask;
944 	vxlan_spec = item->spec;
945 
946 	if (vxlan_mask->flags)
947 		return rte_flow_error_set(error, EINVAL,
948 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
949 					  "Flags is not supported in VxLAN");
950 
951 	/* VNI must be totally masked or not. */
952 	if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
953 	    memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
954 		return rte_flow_error_set(error, EINVAL,
955 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
956 					  "VNI must be totally masked or not in VxLAN");
957 	if (vxlan_mask->vni[0]) {
958 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
959 		memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni,
960 			   VNI_OR_TNI_LEN);
961 	}
962 	memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni,
963 		   VNI_OR_TNI_LEN);
964 	return 0;
965 }
966 
967 static int
968 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
969 		 struct rte_flow_error *error)
970 {
971 	const struct rte_flow_item_nvgre *nvgre_spec;
972 	const struct rte_flow_item_nvgre *nvgre_mask;
973 
974 	hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
975 	rule->key_conf.spec.outer_proto = IPPROTO_GRE;
976 	rule->key_conf.mask.outer_proto = IPPROTO_MASK;
977 
978 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
979 	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
980 	rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
981 	/* Only used to describe the protocol stack. */
982 	if (item->spec == NULL && item->mask == NULL)
983 		return 0;
984 
985 	nvgre_mask = item->mask;
986 	nvgre_spec = item->spec;
987 
988 	if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
989 		return rte_flow_error_set(error, EINVAL,
990 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
991 					  "Ver/protocol is not supported in NVGRE");
992 
993 	/* TNI must be totally masked or not. */
994 	if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
995 	    memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
996 		return rte_flow_error_set(error, EINVAL,
997 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
998 					  "TNI must be totally masked or not in NVGRE");
999 
1000 	if (nvgre_mask->tni[0]) {
1001 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1002 		memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
1003 			   VNI_OR_TNI_LEN);
1004 	}
1005 	memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
1006 		   VNI_OR_TNI_LEN);
1007 
1008 	if (nvgre_mask->flow_id) {
1009 		hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
1010 		rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
1011 	}
1012 	rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
1013 	return 0;
1014 }
1015 
1016 static int
1017 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1018 		  struct rte_flow_error *error)
1019 {
1020 	const struct rte_flow_item_geneve *geneve_spec;
1021 	const struct rte_flow_item_geneve *geneve_mask;
1022 
1023 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
1024 	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
1025 	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
1026 	/* Only used to describe the protocol stack. */
1027 	if (item->spec == NULL && item->mask == NULL)
1028 		return 0;
1029 
1030 	geneve_mask = item->mask;
1031 	geneve_spec = item->spec;
1032 
1033 	if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
1034 		return rte_flow_error_set(error, EINVAL,
1035 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1036 					  "Ver/protocol is not supported in GENEVE");
1037 	/* VNI must be totally masked or not. */
1038 	if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
1039 	    memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
1040 		return rte_flow_error_set(error, EINVAL,
1041 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1042 					  "VNI must be totally masked or not in GENEVE");
1043 	if (geneve_mask->vni[0]) {
1044 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1045 		memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
1046 			   VNI_OR_TNI_LEN);
1047 	}
1048 	memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
1049 		   VNI_OR_TNI_LEN);
1050 	return 0;
1051 }
1052 
1053 static int
1054 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1055 		  struct rte_flow_error *error)
1056 {
1057 	int ret;
1058 
1059 	if (item->spec == NULL && item->mask)
1060 		return rte_flow_error_set(error, EINVAL,
1061 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1062 					  "Can't configure FDIR with mask "
1063 					  "but without spec");
1064 	else if (item->spec && (item->mask == NULL))
1065 		return rte_flow_error_set(error, EINVAL,
1066 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1067 					  "Tunnel packets must configure "
1068 					  "with mask");
1069 
1070 	switch (item->type) {
1071 	case RTE_FLOW_ITEM_TYPE_VXLAN:
1072 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1073 		ret = hns3_parse_vxlan(item, rule, error);
1074 		break;
1075 	case RTE_FLOW_ITEM_TYPE_NVGRE:
1076 		ret = hns3_parse_nvgre(item, rule, error);
1077 		break;
1078 	case RTE_FLOW_ITEM_TYPE_GENEVE:
1079 		ret = hns3_parse_geneve(item, rule, error);
1080 		break;
1081 	default:
1082 		return rte_flow_error_set(error, ENOTSUP,
1083 					  RTE_FLOW_ERROR_TYPE_ITEM,
1084 					  NULL, "Unsupported tunnel type!");
1085 	}
1086 	if (ret)
1087 		return ret;
1088 	return hns3_handle_tunnel(item, rule, error);
1089 }
1090 
1091 static int
1092 hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1093 		  struct items_step_mngr *step_mngr,
1094 		  struct rte_flow_error *error)
1095 {
1096 	int ret;
1097 
1098 	if (item->spec == NULL && item->mask)
1099 		return rte_flow_error_set(error, EINVAL,
1100 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1101 					  "Can't configure FDIR with mask "
1102 					  "but without spec");
1103 
1104 	switch (item->type) {
1105 	case RTE_FLOW_ITEM_TYPE_ETH:
1106 		ret = hns3_parse_eth(item, rule, error);
1107 		step_mngr->items = L2_next_items;
1108 		step_mngr->count = RTE_DIM(L2_next_items);
1109 		break;
1110 	case RTE_FLOW_ITEM_TYPE_VLAN:
1111 		ret = hns3_parse_vlan(item, rule, error);
1112 		step_mngr->items = L2_next_items;
1113 		step_mngr->count = RTE_DIM(L2_next_items);
1114 		break;
1115 	case RTE_FLOW_ITEM_TYPE_IPV4:
1116 		ret = hns3_parse_ipv4(item, rule, error);
1117 		step_mngr->items = L3_next_items;
1118 		step_mngr->count = RTE_DIM(L3_next_items);
1119 		break;
1120 	case RTE_FLOW_ITEM_TYPE_IPV6:
1121 		ret = hns3_parse_ipv6(item, rule, error);
1122 		step_mngr->items = L3_next_items;
1123 		step_mngr->count = RTE_DIM(L3_next_items);
1124 		break;
1125 	case RTE_FLOW_ITEM_TYPE_TCP:
1126 		ret = hns3_parse_tcp(item, rule, error);
1127 		step_mngr->items = L4_next_items;
1128 		step_mngr->count = RTE_DIM(L4_next_items);
1129 		break;
1130 	case RTE_FLOW_ITEM_TYPE_UDP:
1131 		ret = hns3_parse_udp(item, rule, error);
1132 		step_mngr->items = L4_next_items;
1133 		step_mngr->count = RTE_DIM(L4_next_items);
1134 		break;
1135 	case RTE_FLOW_ITEM_TYPE_SCTP:
1136 		ret = hns3_parse_sctp(item, rule, error);
1137 		step_mngr->items = L4_next_items;
1138 		step_mngr->count = RTE_DIM(L4_next_items);
1139 		break;
1140 	default:
1141 		return rte_flow_error_set(error, ENOTSUP,
1142 					  RTE_FLOW_ERROR_TYPE_ITEM,
1143 					  NULL, "Unsupported normal type!");
1144 	}
1145 
1146 	return ret;
1147 }
1148 
1149 static int
1150 hns3_validate_item(const struct rte_flow_item *item,
1151 		   struct items_step_mngr step_mngr,
1152 		   struct rte_flow_error *error)
1153 {
1154 	int i;
1155 
1156 	if (item->last)
1157 		return rte_flow_error_set(error, ENOTSUP,
1158 					  RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
1159 					  "Not supported last point for range");
1160 
1161 	for (i = 0; i < step_mngr.count; i++) {
1162 		if (item->type == step_mngr.items[i])
1163 			break;
1164 	}
1165 
1166 	if (i == step_mngr.count) {
1167 		return rte_flow_error_set(error, EINVAL,
1168 					  RTE_FLOW_ERROR_TYPE_ITEM,
1169 					  item, "Inval or missing item");
1170 	}
1171 	return 0;
1172 }
1173 
1174 static inline bool
1175 is_tunnel_packet(enum rte_flow_item_type type)
1176 {
1177 	if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1178 	    type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1179 	    type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1180 	    type == RTE_FLOW_ITEM_TYPE_GENEVE)
1181 		return true;
1182 	return false;
1183 }
1184 
1185 /*
1186  * Parse the flow director rule.
1187  * The supported PATTERN:
1188  *   case: non-tunnel packet:
1189  *     ETH : src-mac, dst-mac, ethertype
1190  *     VLAN: tag1, tag2
1191  *     IPv4: src-ip, dst-ip, tos, proto
1192  *     IPv6: src-ip(last 32 bit addr), dst-ip(last 32 bit addr), proto
1193  *     UDP : src-port, dst-port
1194  *     TCP : src-port, dst-port
1195  *     SCTP: src-port, dst-port, tag
1196  *   case: tunnel packet:
1197  *     OUTER-ETH: ethertype
1198  *     OUTER-L3 : proto
1199  *     OUTER-L4 : src-port, dst-port
1200  *     TUNNEL   : vni, flow-id(only valid when NVGRE)
1201  *     INNER-ETH/VLAN/IPv4/IPv6/UDP/TCP/SCTP: same as non-tunnel packet
1202  * The supported ACTION:
1203  *    QUEUE
1204  *    DROP
1205  *    COUNT
1206  *    MARK: the id range [0, 4094]
1207  *    FLAG
1208  *    RSS: only valid if firmware support FD_QUEUE_REGION.
1209  */
1210 static int
1211 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1212 		       const struct rte_flow_item pattern[],
1213 		       const struct rte_flow_action actions[],
1214 		       struct hns3_fdir_rule *rule,
1215 		       struct rte_flow_error *error)
1216 {
1217 	struct hns3_adapter *hns = dev->data->dev_private;
1218 	const struct rte_flow_item *item;
1219 	struct items_step_mngr step_mngr;
1220 	int ret;
1221 
1222 	/* FDIR is available only in PF driver */
1223 	if (hns->is_vf)
1224 		return rte_flow_error_set(error, ENOTSUP,
1225 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1226 					  "Fdir not supported in VF");
1227 
1228 	step_mngr.items = first_items;
1229 	step_mngr.count = RTE_DIM(first_items);
1230 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1231 		if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1232 			continue;
1233 
1234 		ret = hns3_validate_item(item, step_mngr, error);
1235 		if (ret)
1236 			return ret;
1237 
1238 		if (is_tunnel_packet(item->type)) {
1239 			ret = hns3_parse_tunnel(item, rule, error);
1240 			if (ret)
1241 				return ret;
1242 			step_mngr.items = tunnel_next_items;
1243 			step_mngr.count = RTE_DIM(tunnel_next_items);
1244 		} else {
1245 			ret = hns3_parse_normal(item, rule, &step_mngr, error);
1246 			if (ret)
1247 				return ret;
1248 		}
1249 	}
1250 
1251 	return hns3_handle_actions(dev, actions, rule, error);
1252 }
1253 
1254 static void
1255 hns3_filterlist_flush(struct rte_eth_dev *dev)
1256 {
1257 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1258 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1259 	struct hns3_rss_conf_ele *rss_filter_ptr;
1260 	struct hns3_flow_mem *flow_node;
1261 
1262 	fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1263 	while (fdir_rule_ptr) {
1264 		TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1265 		rte_free(fdir_rule_ptr);
1266 		fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1267 	}
1268 
1269 	rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1270 	while (rss_filter_ptr) {
1271 		TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1272 		rte_free(rss_filter_ptr);
1273 		rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1274 	}
1275 
1276 	flow_node = TAILQ_FIRST(&hw->flow_list);
1277 	while (flow_node) {
1278 		TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1279 		rte_free(flow_node->flow);
1280 		rte_free(flow_node);
1281 		flow_node = TAILQ_FIRST(&hw->flow_list);
1282 	}
1283 }
1284 
1285 static bool
1286 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1287 		     const struct rte_flow_action_rss *with)
1288 {
1289 	bool rss_key_is_same;
1290 	bool func_is_same;
1291 
1292 	/*
1293 	 * When user flush all RSS rule, RSS func is set invalid with
1294 	 * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after
1295 	 * flushed, any validate RSS func is different with it before
1296 	 * flushed. Others, when user create an action RSS with RSS func
1297 	 * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same
1298 	 * between continuous RSS flow.
1299 	 */
1300 	if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
1301 		func_is_same = false;
1302 	else
1303 		func_is_same = (with->func != RTE_ETH_HASH_FUNCTION_DEFAULT) ?
1304 				(comp->func == with->func) : true;
1305 
1306 	if (with->key_len == 0 || with->key == NULL)
1307 		rss_key_is_same = 1;
1308 	else
1309 		rss_key_is_same = comp->key_len == with->key_len &&
1310 		!memcmp(comp->key, with->key, with->key_len);
1311 
1312 	return (func_is_same && rss_key_is_same &&
1313 		comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
1314 		comp->level == with->level &&
1315 		comp->queue_num == with->queue_num &&
1316 		!memcmp(comp->queue, with->queue,
1317 			sizeof(*with->queue) * with->queue_num));
1318 }
1319 
1320 static int
1321 hns3_rss_conf_copy(struct hns3_rss_conf *out,
1322 		   const struct rte_flow_action_rss *in)
1323 {
1324 	if (in->key_len > RTE_DIM(out->key) ||
1325 	    in->queue_num > RTE_DIM(out->queue))
1326 		return -EINVAL;
1327 	if (in->key == NULL && in->key_len)
1328 		return -EINVAL;
1329 	out->conf = (struct rte_flow_action_rss) {
1330 		.func = in->func,
1331 		.level = in->level,
1332 		.types = in->types,
1333 		.key_len = in->key_len,
1334 		.queue_num = in->queue_num,
1335 	};
1336 	out->conf.queue = memcpy(out->queue, in->queue,
1337 				sizeof(*in->queue) * in->queue_num);
1338 	if (in->key)
1339 		out->conf.key = memcpy(out->key, in->key, in->key_len);
1340 
1341 	return 0;
1342 }
1343 
1344 static bool
1345 hns3_rss_input_tuple_supported(struct hns3_hw *hw,
1346 			       const struct rte_flow_action_rss *rss)
1347 {
1348 	/*
1349 	 * For IP packet, it is not supported to use src/dst port fields to RSS
1350 	 * hash for the following packet types.
1351 	 * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG
1352 	 * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst
1353 	 * port fields to RSS hash for IPV6 SCTP packet type. However, the
1354 	 * Kunpeng930 and future kunpeng series support to use src/dst port
1355 	 * fields to RSS hash for IPv6 SCTP packet type.
1356 	 */
1357 	if (rss->types & (RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L4_SRC_ONLY) &&
1358 	    (rss->types & RTE_ETH_RSS_IP ||
1359 	    (!hw->rss_info.ipv6_sctp_offload_supported &&
1360 	    rss->types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
1361 		return false;
1362 
1363 	return true;
1364 }
1365 
1366 /*
1367  * This function is used to parse rss action validation.
1368  */
1369 static int
1370 hns3_parse_rss_filter(struct rte_eth_dev *dev,
1371 		      const struct rte_flow_action *actions,
1372 		      struct rte_flow_error *error)
1373 {
1374 	struct hns3_adapter *hns = dev->data->dev_private;
1375 	struct hns3_hw *hw = &hns->hw;
1376 	struct hns3_rss_conf *rss_conf = &hw->rss_info;
1377 	const struct rte_flow_action_rss *rss;
1378 	const struct rte_flow_action *act;
1379 	uint32_t act_index = 0;
1380 	uint16_t n;
1381 
1382 	NEXT_ITEM_OF_ACTION(act, actions, act_index);
1383 	rss = act->conf;
1384 
1385 	if (rss == NULL) {
1386 		return rte_flow_error_set(error, EINVAL,
1387 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1388 					  act, "no valid queues");
1389 	}
1390 
1391 	if (rss->queue_num > RTE_DIM(rss_conf->queue))
1392 		return rte_flow_error_set(error, ENOTSUP,
1393 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1394 					  "queue number configured exceeds "
1395 					  "queue buffer size driver supported");
1396 
1397 	for (n = 0; n < rss->queue_num; n++) {
1398 		if (rss->queue[n] < hw->alloc_rss_size)
1399 			continue;
1400 		return rte_flow_error_set(error, EINVAL,
1401 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1402 					  "queue id must be less than queue number allocated to a TC");
1403 	}
1404 
1405 	if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
1406 		return rte_flow_error_set(error, EINVAL,
1407 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1408 					  act,
1409 					  "Flow types is unsupported by "
1410 					  "hns3's RSS");
1411 	if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX)
1412 		return rte_flow_error_set(error, ENOTSUP,
1413 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1414 					  "RSS hash func are not supported");
1415 	if (rss->level)
1416 		return rte_flow_error_set(error, ENOTSUP,
1417 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1418 					  "a nonzero RSS encapsulation level is not supported");
1419 	if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1420 		return rte_flow_error_set(error, ENOTSUP,
1421 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1422 					  "RSS hash key must be exactly 40 bytes");
1423 
1424 	if (!hns3_rss_input_tuple_supported(hw, rss))
1425 		return rte_flow_error_set(error, EINVAL,
1426 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1427 					  &rss->types,
1428 					  "input RSS types are not supported");
1429 
1430 	act_index++;
1431 
1432 	/* Check if the next not void action is END */
1433 	NEXT_ITEM_OF_ACTION(act, actions, act_index);
1434 	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1435 		memset(rss_conf, 0, sizeof(struct hns3_rss_conf));
1436 		return rte_flow_error_set(error, EINVAL,
1437 					  RTE_FLOW_ERROR_TYPE_ACTION,
1438 					  act, "Not supported action.");
1439 	}
1440 
1441 	return 0;
1442 }
1443 
1444 static int
1445 hns3_disable_rss(struct hns3_hw *hw)
1446 {
1447 	int ret;
1448 
1449 	ret = hns3_set_rss_tuple_by_rss_hf(hw, 0);
1450 	if (ret)
1451 		return ret;
1452 
1453 	return 0;
1454 }
1455 
1456 static void
1457 hns3_adjust_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf)
1458 {
1459 	if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) {
1460 		hns3_warn(hw, "Default RSS hash key to be set");
1461 		rss_conf->key = hns3_hash_key;
1462 		rss_conf->key_len = HNS3_RSS_KEY_SIZE;
1463 	}
1464 }
1465 
1466 static int
1467 hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
1468 			 uint8_t *hash_algo)
1469 {
1470 	enum rte_eth_hash_function algo_func = *func;
1471 	switch (algo_func) {
1472 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1473 		/* Keep *hash_algo as what it used to be */
1474 		algo_func = hw->rss_info.conf.func;
1475 		break;
1476 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1477 		*hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
1478 		break;
1479 	case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1480 		*hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
1481 		break;
1482 	case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1483 		*hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
1484 		break;
1485 	default:
1486 		hns3_err(hw, "Invalid RSS algorithm configuration(%d)",
1487 			 algo_func);
1488 		return -EINVAL;
1489 	}
1490 	*func = algo_func;
1491 
1492 	return 0;
1493 }
1494 
1495 static int
1496 hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
1497 {
1498 	int ret;
1499 
1500 	hns3_adjust_rss_key(hw, rss_config);
1501 
1502 	ret = hns3_parse_rss_algorithm(hw, &rss_config->func,
1503 				       &hw->rss_info.hash_algo);
1504 	if (ret)
1505 		return ret;
1506 
1507 	ret = hns3_rss_set_algo_key(hw, rss_config->key);
1508 	if (ret)
1509 		return ret;
1510 
1511 	hw->rss_info.conf.func = rss_config->func;
1512 
1513 	ret = hns3_set_rss_tuple_by_rss_hf(hw, rss_config->types);
1514 	if (ret)
1515 		hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
1516 
1517 	return ret;
1518 }
1519 
1520 static int
1521 hns3_update_indir_table(struct rte_eth_dev *dev,
1522 			const struct rte_flow_action_rss *conf, uint16_t num)
1523 {
1524 	struct hns3_adapter *hns = dev->data->dev_private;
1525 	struct hns3_hw *hw = &hns->hw;
1526 	uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
1527 	uint16_t j;
1528 	uint32_t i;
1529 
1530 	/* Fill in redirection table */
1531 	memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
1532 	       sizeof(hw->rss_info.rss_indirection_tbl));
1533 	for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) {
1534 		j %= num;
1535 		if (conf->queue[j] >= hw->alloc_rss_size) {
1536 			hns3_err(hw, "queue id(%u) set to redirection table "
1537 				 "exceeds queue number(%u) allocated to a TC.",
1538 				 conf->queue[j], hw->alloc_rss_size);
1539 			return -EINVAL;
1540 		}
1541 		indir_tbl[i] = conf->queue[j];
1542 	}
1543 
1544 	return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size);
1545 }
1546 
1547 static int
1548 hns3_config_rss_filter(struct rte_eth_dev *dev,
1549 		       const struct hns3_rss_conf *conf, bool add)
1550 {
1551 	struct hns3_adapter *hns = dev->data->dev_private;
1552 	struct hns3_rss_conf_ele *rss_filter_ptr;
1553 	struct hns3_hw *hw = &hns->hw;
1554 	struct hns3_rss_conf *rss_info;
1555 	uint64_t flow_types;
1556 	uint16_t num;
1557 	int ret;
1558 
1559 	struct rte_flow_action_rss rss_flow_conf = {
1560 		.func = conf->conf.func,
1561 		.level = conf->conf.level,
1562 		.types = conf->conf.types,
1563 		.key_len = conf->conf.key_len,
1564 		.queue_num = conf->conf.queue_num,
1565 		.key = conf->conf.key_len ?
1566 		    (void *)(uintptr_t)conf->conf.key : NULL,
1567 		.queue = conf->conf.queue,
1568 	};
1569 
1570 	/* Filter the unsupported flow types */
1571 	flow_types = conf->conf.types ?
1572 		     rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT :
1573 		     hw->rss_info.conf.types;
1574 	if (flow_types != rss_flow_conf.types)
1575 		hns3_warn(hw, "modified RSS types based on hardware support, "
1576 			      "requested:0x%" PRIx64 " configured:0x%" PRIx64,
1577 			  rss_flow_conf.types, flow_types);
1578 	/* Update the useful flow types */
1579 	rss_flow_conf.types = flow_types;
1580 
1581 	rss_info = &hw->rss_info;
1582 	if (!add) {
1583 		if (!conf->valid)
1584 			return 0;
1585 
1586 		ret = hns3_disable_rss(hw);
1587 		if (ret) {
1588 			hns3_err(hw, "RSS disable failed(%d)", ret);
1589 			return ret;
1590 		}
1591 
1592 		if (rss_flow_conf.queue_num) {
1593 			/*
1594 			 * Due the content of queue pointer have been reset to
1595 			 * 0, the rss_info->conf.queue should be set to NULL
1596 			 */
1597 			rss_info->conf.queue = NULL;
1598 			rss_info->conf.queue_num = 0;
1599 		}
1600 
1601 		/* set RSS func invalid after flushed */
1602 		rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX;
1603 		return 0;
1604 	}
1605 
1606 	/* Set rx queues to use */
1607 	num = RTE_MIN(dev->data->nb_rx_queues, rss_flow_conf.queue_num);
1608 	if (rss_flow_conf.queue_num > num)
1609 		hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
1610 			  rss_flow_conf.queue_num);
1611 	hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
1612 
1613 	rte_spinlock_lock(&hw->lock);
1614 	if (num) {
1615 		ret = hns3_update_indir_table(dev, &rss_flow_conf, num);
1616 		if (ret)
1617 			goto rss_config_err;
1618 	}
1619 
1620 	/* Set hash algorithm and flow types by the user's config */
1621 	ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf);
1622 	if (ret)
1623 		goto rss_config_err;
1624 
1625 	ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf);
1626 	if (ret) {
1627 		hns3_err(hw, "RSS config init fail(%d)", ret);
1628 		goto rss_config_err;
1629 	}
1630 
1631 	/*
1632 	 * When create a new RSS rule, the old rule will be overlaid and set
1633 	 * invalid.
1634 	 */
1635 	TAILQ_FOREACH(rss_filter_ptr, &hw->flow_rss_list, entries)
1636 		rss_filter_ptr->filter_info.valid = false;
1637 
1638 rss_config_err:
1639 	rte_spinlock_unlock(&hw->lock);
1640 
1641 	return ret;
1642 }
1643 
1644 static int
1645 hns3_clear_rss_filter(struct rte_eth_dev *dev)
1646 {
1647 	struct hns3_adapter *hns = dev->data->dev_private;
1648 	struct hns3_rss_conf_ele *rss_filter_ptr;
1649 	struct hns3_hw *hw = &hns->hw;
1650 	int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */
1651 	int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */
1652 	int ret = 0;
1653 
1654 	rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1655 	while (rss_filter_ptr) {
1656 		TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1657 		ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1658 					     false);
1659 		if (ret)
1660 			rss_rule_fail_cnt++;
1661 		else
1662 			rss_rule_succ_cnt++;
1663 		rte_free(rss_filter_ptr);
1664 		rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1665 	}
1666 
1667 	if (rss_rule_fail_cnt) {
1668 		hns3_err(hw, "fail to delete all RSS filters, success num = %d "
1669 			     "fail num = %d", rss_rule_succ_cnt,
1670 			     rss_rule_fail_cnt);
1671 		ret = -EIO;
1672 	}
1673 
1674 	return ret;
1675 }
1676 
1677 int
1678 hns3_restore_rss_filter(struct rte_eth_dev *dev)
1679 {
1680 	struct hns3_adapter *hns = dev->data->dev_private;
1681 	struct hns3_hw *hw = &hns->hw;
1682 
1683 	/* When user flush all rules, it doesn't need to restore RSS rule */
1684 	if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX)
1685 		return 0;
1686 
1687 	return hns3_config_rss_filter(dev, &hw->rss_info, true);
1688 }
1689 
1690 static int
1691 hns3_flow_parse_rss(struct rte_eth_dev *dev,
1692 		    const struct hns3_rss_conf *conf, bool add)
1693 {
1694 	struct hns3_adapter *hns = dev->data->dev_private;
1695 	struct hns3_hw *hw = &hns->hw;
1696 	bool ret;
1697 
1698 	ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf);
1699 	if (ret) {
1700 		hns3_err(hw, "Enter duplicate RSS configuration : %d", ret);
1701 		return -EINVAL;
1702 	}
1703 
1704 	return hns3_config_rss_filter(dev, conf, add);
1705 }
1706 
1707 static int
1708 hns3_flow_args_check(const struct rte_flow_attr *attr,
1709 		     const struct rte_flow_item pattern[],
1710 		     const struct rte_flow_action actions[],
1711 		     struct rte_flow_error *error)
1712 {
1713 	if (pattern == NULL)
1714 		return rte_flow_error_set(error, EINVAL,
1715 					  RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1716 					  NULL, "NULL pattern.");
1717 
1718 	if (actions == NULL)
1719 		return rte_flow_error_set(error, EINVAL,
1720 					  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1721 					  NULL, "NULL action.");
1722 
1723 	if (attr == NULL)
1724 		return rte_flow_error_set(error, EINVAL,
1725 					  RTE_FLOW_ERROR_TYPE_ATTR,
1726 					  NULL, "NULL attribute.");
1727 
1728 	return hns3_check_attr(attr, error);
1729 }
1730 
1731 /*
1732  * Check if the flow rule is supported by hns3.
1733  * It only checks the format. Don't guarantee the rule can be programmed into
1734  * the HW. Because there can be no enough room for the rule.
1735  */
1736 static int
1737 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1738 		   const struct rte_flow_item pattern[],
1739 		   const struct rte_flow_action actions[],
1740 		   struct rte_flow_error *error)
1741 {
1742 	struct hns3_fdir_rule fdir_rule;
1743 	int ret;
1744 
1745 	ret = hns3_flow_args_check(attr, pattern, actions, error);
1746 	if (ret)
1747 		return ret;
1748 
1749 	if (hns3_find_rss_general_action(pattern, actions))
1750 		return hns3_parse_rss_filter(dev, actions, error);
1751 
1752 	memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1753 	return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1754 }
1755 
1756 static int
1757 hns3_flow_create_rss_rule(struct rte_eth_dev *dev,
1758 			  const struct rte_flow_action *act,
1759 			  struct rte_flow *flow)
1760 {
1761 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1762 	struct hns3_rss_conf_ele *rss_filter_ptr;
1763 	const struct hns3_rss_conf *rss_conf;
1764 	int ret;
1765 
1766 	rss_filter_ptr = rte_zmalloc("hns3 rss filter",
1767 				     sizeof(struct hns3_rss_conf_ele), 0);
1768 	if (rss_filter_ptr == NULL) {
1769 		hns3_err(hw, "failed to allocate hns3_rss_filter memory");
1770 		return -ENOMEM;
1771 	}
1772 
1773 	/*
1774 	 * After all the preceding tasks are successfully configured, configure
1775 	 * rules to the hardware to simplify the rollback of rules in the
1776 	 * hardware.
1777 	 */
1778 	rss_conf = (const struct hns3_rss_conf *)act->conf;
1779 	ret = hns3_flow_parse_rss(dev, rss_conf, true);
1780 	if (ret != 0) {
1781 		rte_free(rss_filter_ptr);
1782 		return ret;
1783 	}
1784 
1785 	hns3_rss_conf_copy(&rss_filter_ptr->filter_info, &rss_conf->conf);
1786 	rss_filter_ptr->filter_info.valid = true;
1787 	TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries);
1788 	flow->rule = rss_filter_ptr;
1789 	flow->filter_type = RTE_ETH_FILTER_HASH;
1790 
1791 	return 0;
1792 }
1793 
1794 static int
1795 hns3_flow_create_fdir_rule(struct rte_eth_dev *dev,
1796 			   const struct rte_flow_item pattern[],
1797 			   const struct rte_flow_action actions[],
1798 			   struct rte_flow_error *error,
1799 			   struct rte_flow *flow)
1800 {
1801 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1802 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1803 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1804 	struct hns3_fdir_rule fdir_rule;
1805 	bool indir;
1806 	int ret;
1807 
1808 	memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1809 	ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1810 	if (ret != 0)
1811 		return ret;
1812 
1813 	indir = !!(fdir_rule.flags & HNS3_RULE_FLAG_COUNTER_INDIR);
1814 	if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
1815 		ret = hns3_counter_new(dev, indir, fdir_rule.act_cnt.id,
1816 				       error);
1817 		if (ret != 0)
1818 			return ret;
1819 
1820 		flow->counter_id = fdir_rule.act_cnt.id;
1821 	}
1822 
1823 	fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
1824 				    sizeof(struct hns3_fdir_rule_ele), 0);
1825 	if (fdir_rule_ptr == NULL) {
1826 		hns3_err(hw, "failed to allocate fdir_rule memory.");
1827 		ret = -ENOMEM;
1828 		goto err_malloc;
1829 	}
1830 
1831 	/*
1832 	 * After all the preceding tasks are successfully configured, configure
1833 	 * rules to the hardware to simplify the rollback of rules in the
1834 	 * hardware.
1835 	 */
1836 	ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
1837 	if (ret != 0)
1838 		goto err_fdir_filter;
1839 
1840 	memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
1841 		sizeof(struct hns3_fdir_rule));
1842 	TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1843 	flow->rule = fdir_rule_ptr;
1844 	flow->filter_type = RTE_ETH_FILTER_FDIR;
1845 
1846 	return 0;
1847 
1848 err_fdir_filter:
1849 	rte_free(fdir_rule_ptr);
1850 err_malloc:
1851 	if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1852 		hns3_counter_release(dev, fdir_rule.act_cnt.id);
1853 
1854 	return ret;
1855 }
1856 
1857 /*
1858  * Create or destroy a flow rule.
1859  * Theorically one rule can match more than one filters.
1860  * We will let it use the filter which it hit first.
1861  * So, the sequence matters.
1862  */
1863 static struct rte_flow *
1864 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1865 		 const struct rte_flow_item pattern[],
1866 		 const struct rte_flow_action actions[],
1867 		 struct rte_flow_error *error)
1868 {
1869 	struct hns3_adapter *hns = dev->data->dev_private;
1870 	struct hns3_hw *hw = &hns->hw;
1871 	struct hns3_flow_mem *flow_node;
1872 	const struct rte_flow_action *act;
1873 	struct rte_flow *flow;
1874 	int ret;
1875 
1876 	ret = hns3_flow_validate(dev, attr, pattern, actions, error);
1877 	if (ret)
1878 		return NULL;
1879 
1880 	flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
1881 	if (flow == NULL) {
1882 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1883 				   NULL, "Failed to allocate flow memory");
1884 		return NULL;
1885 	}
1886 	flow_node = rte_zmalloc("hns3 flow node",
1887 				sizeof(struct hns3_flow_mem), 0);
1888 	if (flow_node == NULL) {
1889 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1890 				   NULL, "Failed to allocate flow list memory");
1891 		rte_free(flow);
1892 		return NULL;
1893 	}
1894 
1895 	flow_node->flow = flow;
1896 	TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries);
1897 
1898 	act = hns3_find_rss_general_action(pattern, actions);
1899 	if (act)
1900 		ret = hns3_flow_create_rss_rule(dev, act, flow);
1901 	else
1902 		ret = hns3_flow_create_fdir_rule(dev, pattern, actions,
1903 						 error, flow);
1904 	if (ret == 0)
1905 		return flow;
1906 
1907 	rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1908 			   "Failed to create flow");
1909 	TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1910 	rte_free(flow_node);
1911 	rte_free(flow);
1912 
1913 	return NULL;
1914 }
1915 
1916 /* Destroy a flow rule on hns3. */
1917 static int
1918 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1919 		  struct rte_flow_error *error)
1920 {
1921 	struct hns3_adapter *hns = dev->data->dev_private;
1922 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1923 	struct hns3_rss_conf_ele *rss_filter_ptr;
1924 	struct hns3_flow_mem *flow_node;
1925 	enum rte_filter_type filter_type;
1926 	struct hns3_fdir_rule fdir_rule;
1927 	struct hns3_hw *hw = &hns->hw;
1928 	int ret;
1929 
1930 	if (flow == NULL)
1931 		return rte_flow_error_set(error, EINVAL,
1932 					  RTE_FLOW_ERROR_TYPE_HANDLE,
1933 					  flow, "Flow is NULL");
1934 
1935 	filter_type = flow->filter_type;
1936 	switch (filter_type) {
1937 	case RTE_ETH_FILTER_FDIR:
1938 		fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
1939 		memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
1940 			   sizeof(struct hns3_fdir_rule));
1941 
1942 		ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
1943 		if (ret)
1944 			return rte_flow_error_set(error, EIO,
1945 						  RTE_FLOW_ERROR_TYPE_HANDLE,
1946 						  flow,
1947 						  "Destroy FDIR fail.Try again");
1948 		if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1949 			hns3_counter_release(dev, fdir_rule.act_cnt.id);
1950 		TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1951 		rte_free(fdir_rule_ptr);
1952 		fdir_rule_ptr = NULL;
1953 		break;
1954 	case RTE_ETH_FILTER_HASH:
1955 		rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
1956 		ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1957 					     false);
1958 		if (ret)
1959 			return rte_flow_error_set(error, EIO,
1960 						  RTE_FLOW_ERROR_TYPE_HANDLE,
1961 						  flow,
1962 						  "Destroy RSS fail.Try again");
1963 		TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1964 		rte_free(rss_filter_ptr);
1965 		rss_filter_ptr = NULL;
1966 		break;
1967 	default:
1968 		return rte_flow_error_set(error, EINVAL,
1969 					  RTE_FLOW_ERROR_TYPE_HANDLE, flow,
1970 					  "Unsupported filter type");
1971 	}
1972 
1973 	TAILQ_FOREACH(flow_node, &hw->flow_list, entries) {
1974 		if (flow_node->flow == flow) {
1975 			TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1976 			rte_free(flow_node);
1977 			flow_node = NULL;
1978 			break;
1979 		}
1980 	}
1981 	rte_free(flow);
1982 
1983 	return 0;
1984 }
1985 
1986 /*  Destroy all flow rules associated with a port on hns3. */
1987 static int
1988 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1989 {
1990 	struct hns3_adapter *hns = dev->data->dev_private;
1991 	int ret;
1992 
1993 	/* FDIR is available only in PF driver */
1994 	if (!hns->is_vf) {
1995 		ret = hns3_clear_all_fdir_filter(hns);
1996 		if (ret) {
1997 			rte_flow_error_set(error, ret,
1998 					   RTE_FLOW_ERROR_TYPE_HANDLE,
1999 					   NULL, "Failed to flush rule");
2000 			return ret;
2001 		}
2002 		hns3_counter_flush(dev);
2003 	}
2004 
2005 	ret = hns3_clear_rss_filter(dev);
2006 	if (ret) {
2007 		rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
2008 				   NULL, "Failed to flush rss filter");
2009 		return ret;
2010 	}
2011 
2012 	hns3_filterlist_flush(dev);
2013 
2014 	return 0;
2015 }
2016 
2017 /* Query an existing flow rule. */
2018 static int
2019 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
2020 		const struct rte_flow_action *actions, void *data,
2021 		struct rte_flow_error *error)
2022 {
2023 	struct rte_flow_action_rss *rss_conf;
2024 	struct hns3_rss_conf_ele *rss_rule;
2025 	struct rte_flow_query_count *qc;
2026 	int ret;
2027 
2028 	if (!flow->rule)
2029 		return rte_flow_error_set(error, EINVAL,
2030 			RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule");
2031 
2032 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2033 		switch (actions->type) {
2034 		case RTE_FLOW_ACTION_TYPE_VOID:
2035 			break;
2036 		case RTE_FLOW_ACTION_TYPE_COUNT:
2037 			qc = (struct rte_flow_query_count *)data;
2038 			ret = hns3_counter_query(dev, flow, qc, error);
2039 			if (ret)
2040 				return ret;
2041 			break;
2042 		case RTE_FLOW_ACTION_TYPE_RSS:
2043 			if (flow->filter_type != RTE_ETH_FILTER_HASH) {
2044 				return rte_flow_error_set(error, ENOTSUP,
2045 					RTE_FLOW_ERROR_TYPE_ACTION,
2046 					actions, "action is not supported");
2047 			}
2048 			rss_conf = (struct rte_flow_action_rss *)data;
2049 			rss_rule = (struct hns3_rss_conf_ele *)flow->rule;
2050 			rte_memcpy(rss_conf, &rss_rule->filter_info.conf,
2051 				   sizeof(struct rte_flow_action_rss));
2052 			break;
2053 		default:
2054 			return rte_flow_error_set(error, ENOTSUP,
2055 				RTE_FLOW_ERROR_TYPE_ACTION,
2056 				actions, "action is not supported");
2057 		}
2058 	}
2059 
2060 	return 0;
2061 }
2062 
2063 static int
2064 hns3_flow_validate_wrap(struct rte_eth_dev *dev,
2065 			const struct rte_flow_attr *attr,
2066 			const struct rte_flow_item pattern[],
2067 			const struct rte_flow_action actions[],
2068 			struct rte_flow_error *error)
2069 {
2070 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2071 	int ret;
2072 
2073 	pthread_mutex_lock(&hw->flows_lock);
2074 	ret = hns3_flow_validate(dev, attr, pattern, actions, error);
2075 	pthread_mutex_unlock(&hw->flows_lock);
2076 
2077 	return ret;
2078 }
2079 
2080 static struct rte_flow *
2081 hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2082 		      const struct rte_flow_item pattern[],
2083 		      const struct rte_flow_action actions[],
2084 		      struct rte_flow_error *error)
2085 {
2086 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2087 	struct rte_flow *flow;
2088 
2089 	pthread_mutex_lock(&hw->flows_lock);
2090 	flow = hns3_flow_create(dev, attr, pattern, actions, error);
2091 	pthread_mutex_unlock(&hw->flows_lock);
2092 
2093 	return flow;
2094 }
2095 
2096 static int
2097 hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2098 		       struct rte_flow_error *error)
2099 {
2100 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2101 	int ret;
2102 
2103 	pthread_mutex_lock(&hw->flows_lock);
2104 	ret = hns3_flow_destroy(dev, flow, error);
2105 	pthread_mutex_unlock(&hw->flows_lock);
2106 
2107 	return ret;
2108 }
2109 
2110 static int
2111 hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
2112 {
2113 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2114 	int ret;
2115 
2116 	pthread_mutex_lock(&hw->flows_lock);
2117 	ret = hns3_flow_flush(dev, error);
2118 	pthread_mutex_unlock(&hw->flows_lock);
2119 
2120 	return ret;
2121 }
2122 
2123 static int
2124 hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2125 		     const struct rte_flow_action *actions, void *data,
2126 		     struct rte_flow_error *error)
2127 {
2128 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2129 	int ret;
2130 
2131 	pthread_mutex_lock(&hw->flows_lock);
2132 	ret = hns3_flow_query(dev, flow, actions, data, error);
2133 	pthread_mutex_unlock(&hw->flows_lock);
2134 
2135 	return ret;
2136 }
2137 
2138 static int
2139 hns3_check_indir_action(const struct rte_flow_indir_action_conf *conf,
2140 			const struct rte_flow_action *action,
2141 			struct rte_flow_error *error)
2142 {
2143 	if (!conf->ingress)
2144 		return rte_flow_error_set(error, EINVAL,
2145 				RTE_FLOW_ERROR_TYPE_ACTION,
2146 				NULL, "Indir action ingress can't be zero");
2147 
2148 	if (conf->egress)
2149 		return rte_flow_error_set(error, EINVAL,
2150 				RTE_FLOW_ERROR_TYPE_ACTION,
2151 				NULL, "Indir action not support egress");
2152 
2153 	if (conf->transfer)
2154 		return rte_flow_error_set(error, EINVAL,
2155 				RTE_FLOW_ERROR_TYPE_ACTION,
2156 				NULL, "Indir action not support transfer");
2157 
2158 	if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
2159 		return rte_flow_error_set(error, EINVAL,
2160 				RTE_FLOW_ERROR_TYPE_ACTION,
2161 				NULL, "Indir action only support count");
2162 
2163 	return 0;
2164 }
2165 
2166 static struct rte_flow_action_handle *
2167 hns3_flow_action_create(struct rte_eth_dev *dev,
2168 			const struct rte_flow_indir_action_conf *conf,
2169 			const struct rte_flow_action *action,
2170 			struct rte_flow_error *error)
2171 {
2172 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2173 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2174 	const struct rte_flow_action_count *act_count;
2175 	struct rte_flow_action_handle *handle = NULL;
2176 	struct hns3_flow_counter *counter;
2177 
2178 	if (hns3_check_indir_action(conf, action, error))
2179 		return NULL;
2180 
2181 	handle = rte_zmalloc("hns3 action handle",
2182 			     sizeof(struct rte_flow_action_handle), 0);
2183 	if (handle == NULL) {
2184 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
2185 				   NULL, "Failed to allocate action memory");
2186 		return NULL;
2187 	}
2188 
2189 	pthread_mutex_lock(&hw->flows_lock);
2190 
2191 	act_count = (const struct rte_flow_action_count *)action->conf;
2192 	if (act_count->id >= pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1]) {
2193 		rte_flow_error_set(error, EINVAL,
2194 				   RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2195 				   action, "Invalid counter id");
2196 		goto err_exit;
2197 	}
2198 
2199 	if (hns3_counter_new(dev, false, act_count->id, error))
2200 		goto err_exit;
2201 
2202 	counter = hns3_counter_lookup(dev, act_count->id);
2203 	if (counter == NULL) {
2204 		rte_flow_error_set(error, EINVAL,
2205 				   RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2206 				   action, "Counter id not found");
2207 		goto err_exit;
2208 	}
2209 
2210 	counter->indirect = true;
2211 	handle->indirect_type = HNS3_INDIRECT_ACTION_TYPE_COUNT;
2212 	handle->counter_id = counter->id;
2213 
2214 	pthread_mutex_unlock(&hw->flows_lock);
2215 	return handle;
2216 
2217 err_exit:
2218 	pthread_mutex_unlock(&hw->flows_lock);
2219 	rte_free(handle);
2220 	return NULL;
2221 }
2222 
2223 static int
2224 hns3_flow_action_destroy(struct rte_eth_dev *dev,
2225 			 struct rte_flow_action_handle *handle,
2226 			 struct rte_flow_error *error)
2227 {
2228 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2229 	struct hns3_flow_counter *counter;
2230 
2231 	pthread_mutex_lock(&hw->flows_lock);
2232 
2233 	if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
2234 		pthread_mutex_unlock(&hw->flows_lock);
2235 		return rte_flow_error_set(error, EINVAL,
2236 					RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2237 					handle, "Invalid indirect type");
2238 	}
2239 
2240 	counter = hns3_counter_lookup(dev, handle->counter_id);
2241 	if (counter == NULL) {
2242 		pthread_mutex_unlock(&hw->flows_lock);
2243 		return rte_flow_error_set(error, EINVAL,
2244 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2245 				handle, "Counter id not exist");
2246 	}
2247 
2248 	if (counter->ref_cnt > 1) {
2249 		pthread_mutex_unlock(&hw->flows_lock);
2250 		return rte_flow_error_set(error, EBUSY,
2251 				RTE_FLOW_ERROR_TYPE_HANDLE,
2252 				handle, "Counter id in use");
2253 	}
2254 
2255 	(void)hns3_counter_release(dev, handle->counter_id);
2256 	rte_free(handle);
2257 
2258 	pthread_mutex_unlock(&hw->flows_lock);
2259 	return 0;
2260 }
2261 
2262 static int
2263 hns3_flow_action_query(struct rte_eth_dev *dev,
2264 		 const struct rte_flow_action_handle *handle,
2265 		 void *data,
2266 		 struct rte_flow_error *error)
2267 {
2268 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2269 	struct rte_flow flow;
2270 	int ret;
2271 
2272 	pthread_mutex_lock(&hw->flows_lock);
2273 
2274 	if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
2275 		pthread_mutex_unlock(&hw->flows_lock);
2276 		return rte_flow_error_set(error, EINVAL,
2277 					RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2278 					handle, "Invalid indirect type");
2279 	}
2280 
2281 	memset(&flow, 0, sizeof(flow));
2282 	flow.counter_id = handle->counter_id;
2283 	ret = hns3_counter_query(dev, &flow,
2284 				 (struct rte_flow_query_count *)data, error);
2285 	pthread_mutex_unlock(&hw->flows_lock);
2286 	return ret;
2287 }
2288 
2289 static const struct rte_flow_ops hns3_flow_ops = {
2290 	.validate = hns3_flow_validate_wrap,
2291 	.create = hns3_flow_create_wrap,
2292 	.destroy = hns3_flow_destroy_wrap,
2293 	.flush = hns3_flow_flush_wrap,
2294 	.query = hns3_flow_query_wrap,
2295 	.isolate = NULL,
2296 	.action_handle_create = hns3_flow_action_create,
2297 	.action_handle_destroy = hns3_flow_action_destroy,
2298 	.action_handle_query = hns3_flow_action_query,
2299 };
2300 
2301 int
2302 hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
2303 		      const struct rte_flow_ops **ops)
2304 {
2305 	struct hns3_hw *hw;
2306 
2307 	hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2308 	if (hw->adapter_state >= HNS3_NIC_CLOSED)
2309 		return -ENODEV;
2310 
2311 	*ops = &hns3_flow_ops;
2312 	return 0;
2313 }
2314 
2315 void
2316 hns3_flow_init(struct rte_eth_dev *dev)
2317 {
2318 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2319 	pthread_mutexattr_t attr;
2320 
2321 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2322 		return;
2323 
2324 	pthread_mutexattr_init(&attr);
2325 	pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
2326 	pthread_mutex_init(&hw->flows_lock, &attr);
2327 	dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
2328 
2329 	TAILQ_INIT(&hw->flow_fdir_list);
2330 	TAILQ_INIT(&hw->flow_rss_list);
2331 	TAILQ_INIT(&hw->flow_list);
2332 }
2333 
2334 void
2335 hns3_flow_uninit(struct rte_eth_dev *dev)
2336 {
2337 	struct rte_flow_error error;
2338 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2339 		hns3_flow_flush_wrap(dev, &error);
2340 }
2341