xref: /dpdk/drivers/net/hns3/hns3_flow.c (revision 4aa10e5dc1b0fd6cc5b1b18770ac603e2c33a66c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4 
5 #include <rte_flow_driver.h>
6 #include <rte_io.h>
7 #include <rte_malloc.h>
8 
9 #include "hns3_ethdev.h"
10 #include "hns3_logs.h"
11 #include "hns3_flow.h"
12 
13 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
14 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
15 
16 /* Special Filter id for non-specific packet flagging. Don't change value */
17 #define HNS3_MAX_FILTER_ID	0x0FFF
18 
19 #define ETHER_TYPE_MASK		0xFFFF
20 #define IPPROTO_MASK		0xFF
21 #define TUNNEL_TYPE_MASK	0xFFFF
22 
23 #define HNS3_TUNNEL_TYPE_VXLAN		0x12B5
24 #define HNS3_TUNNEL_TYPE_VXLAN_GPE	0x12B6
25 #define HNS3_TUNNEL_TYPE_GENEVE		0x17C1
26 #define HNS3_TUNNEL_TYPE_NVGRE		0x6558
27 
28 static enum rte_flow_item_type first_items[] = {
29 	RTE_FLOW_ITEM_TYPE_ETH,
30 	RTE_FLOW_ITEM_TYPE_IPV4,
31 	RTE_FLOW_ITEM_TYPE_IPV6,
32 	RTE_FLOW_ITEM_TYPE_TCP,
33 	RTE_FLOW_ITEM_TYPE_UDP,
34 	RTE_FLOW_ITEM_TYPE_SCTP,
35 	RTE_FLOW_ITEM_TYPE_ICMP,
36 	RTE_FLOW_ITEM_TYPE_NVGRE,
37 	RTE_FLOW_ITEM_TYPE_VXLAN,
38 	RTE_FLOW_ITEM_TYPE_GENEVE,
39 	RTE_FLOW_ITEM_TYPE_VXLAN_GPE
40 };
41 
42 static enum rte_flow_item_type L2_next_items[] = {
43 	RTE_FLOW_ITEM_TYPE_VLAN,
44 	RTE_FLOW_ITEM_TYPE_IPV4,
45 	RTE_FLOW_ITEM_TYPE_IPV6
46 };
47 
48 static enum rte_flow_item_type L3_next_items[] = {
49 	RTE_FLOW_ITEM_TYPE_TCP,
50 	RTE_FLOW_ITEM_TYPE_UDP,
51 	RTE_FLOW_ITEM_TYPE_SCTP,
52 	RTE_FLOW_ITEM_TYPE_NVGRE,
53 	RTE_FLOW_ITEM_TYPE_ICMP
54 };
55 
56 static enum rte_flow_item_type L4_next_items[] = {
57 	RTE_FLOW_ITEM_TYPE_VXLAN,
58 	RTE_FLOW_ITEM_TYPE_GENEVE,
59 	RTE_FLOW_ITEM_TYPE_VXLAN_GPE
60 };
61 
62 static enum rte_flow_item_type tunnel_next_items[] = {
63 	RTE_FLOW_ITEM_TYPE_ETH,
64 	RTE_FLOW_ITEM_TYPE_VLAN
65 };
66 
67 struct items_step_mngr {
68 	enum rte_flow_item_type *items;
69 	size_t count;
70 };
71 
72 static inline void
73 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
74 {
75 	size_t i;
76 
77 	for (i = 0; i < len; i++)
78 		dst[i] = rte_be_to_cpu_32(src[i]);
79 }
80 
81 /*
82  * This function is used to find rss general action.
83  * 1. As we know RSS is used to spread packets among several queues, the flow
84  *    API provide the struct rte_flow_action_rss, user could config its field
85  *    sush as: func/level/types/key/queue to control RSS function.
86  * 2. The flow API also supports queue region configuration for hns3. It was
87  *    implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
88  *    which action is RSS queues region.
89  * 3. When action is RSS, we use the following rule to distinguish:
90  *    Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue
91  *            region configuration.
92  *    Case other: an rss general action.
93  */
94 static const struct rte_flow_action *
95 hns3_find_rss_general_action(const struct rte_flow_item pattern[],
96 			     const struct rte_flow_action actions[])
97 {
98 	const struct rte_flow_action_rss *rss_act;
99 	const struct rte_flow_action *act = NULL;
100 	bool have_eth = false;
101 
102 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
103 		if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
104 			act = actions;
105 			break;
106 		}
107 	}
108 	if (!act)
109 		return NULL;
110 
111 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
112 		if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
113 			have_eth = true;
114 			break;
115 		}
116 	}
117 
118 	rss_act = act->conf;
119 	if (have_eth && rss_act->queue_num) {
120 		/*
121 		 * Pattern have ETH and action's queue_num > 0, indicate this is
122 		 * queue region configuration.
123 		 * Because queue region is implemented by FDIR + RSS in hns3
124 		 * hardware, it needs to enter FDIR process, so here return NULL
125 		 * to avoid enter RSS process.
126 		 */
127 		return NULL;
128 	}
129 
130 	return act;
131 }
132 
133 static inline struct hns3_flow_counter *
134 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
135 {
136 	struct hns3_adapter *hns = dev->data->dev_private;
137 	struct hns3_pf *pf = &hns->pf;
138 	struct hns3_flow_counter *cnt;
139 
140 	LIST_FOREACH(cnt, &pf->flow_counters, next) {
141 		if (cnt->id == id)
142 			return cnt;
143 	}
144 	return NULL;
145 }
146 
147 static int
148 hns3_counter_new(struct rte_eth_dev *dev, uint32_t indirect, uint32_t id,
149 		 struct rte_flow_error *error)
150 {
151 	struct hns3_adapter *hns = dev->data->dev_private;
152 	struct hns3_pf *pf = &hns->pf;
153 	struct hns3_hw *hw = &hns->hw;
154 	struct hns3_flow_counter *cnt;
155 	uint64_t value;
156 	int ret;
157 
158 	cnt = hns3_counter_lookup(dev, id);
159 	if (cnt) {
160 		if (!cnt->indirect || cnt->indirect != indirect)
161 			return rte_flow_error_set(error, ENOTSUP,
162 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
163 				cnt,
164 				"Counter id is used, indirect flag not match");
165 		/* Clear the indirect counter on first use. */
166 		if (cnt->indirect && cnt->ref_cnt == 1)
167 			(void)hns3_fd_get_count(hw, id, &value);
168 		cnt->ref_cnt++;
169 		return 0;
170 	}
171 
172 	/* Clear the counter by read ops because the counter is read-clear */
173 	ret = hns3_fd_get_count(hw, id, &value);
174 	if (ret)
175 		return rte_flow_error_set(error, EIO,
176 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
177 					  "Clear counter failed!");
178 
179 	cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
180 	if (cnt == NULL)
181 		return rte_flow_error_set(error, ENOMEM,
182 					  RTE_FLOW_ERROR_TYPE_HANDLE, cnt,
183 					  "Alloc mem for counter failed");
184 	cnt->id = id;
185 	cnt->indirect = indirect;
186 	cnt->ref_cnt = 1;
187 	cnt->hits = 0;
188 	LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
189 	return 0;
190 }
191 
192 static int
193 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
194 		   struct rte_flow_query_count *qc,
195 		   struct rte_flow_error *error)
196 {
197 	struct hns3_adapter *hns = dev->data->dev_private;
198 	struct hns3_flow_counter *cnt;
199 	uint64_t value;
200 	int ret;
201 
202 	/* FDIR is available only in PF driver */
203 	if (hns->is_vf)
204 		return rte_flow_error_set(error, ENOTSUP,
205 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
206 					  "Fdir is not supported in VF");
207 	cnt = hns3_counter_lookup(dev, flow->counter_id);
208 	if (cnt == NULL)
209 		return rte_flow_error_set(error, EINVAL,
210 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
211 					  "Can't find counter id");
212 
213 	ret = hns3_fd_get_count(&hns->hw, flow->counter_id, &value);
214 	if (ret) {
215 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
216 				   NULL, "Read counter fail.");
217 		return ret;
218 	}
219 	qc->hits_set = 1;
220 	qc->hits = value;
221 	qc->bytes_set = 0;
222 	qc->bytes = 0;
223 
224 	return 0;
225 }
226 
227 static int
228 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
229 {
230 	struct hns3_adapter *hns = dev->data->dev_private;
231 	struct hns3_hw *hw = &hns->hw;
232 	struct hns3_flow_counter *cnt;
233 
234 	cnt = hns3_counter_lookup(dev, id);
235 	if (cnt == NULL) {
236 		hns3_err(hw, "Can't find available counter to release");
237 		return -EINVAL;
238 	}
239 	cnt->ref_cnt--;
240 	if (cnt->ref_cnt == 0) {
241 		LIST_REMOVE(cnt, next);
242 		rte_free(cnt);
243 	}
244 	return 0;
245 }
246 
247 static void
248 hns3_counter_flush(struct rte_eth_dev *dev)
249 {
250 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
251 	LIST_HEAD(counters, hns3_flow_counter) indir_counters;
252 	struct hns3_flow_counter *cnt_ptr;
253 
254 	LIST_INIT(&indir_counters);
255 	cnt_ptr = LIST_FIRST(&pf->flow_counters);
256 	while (cnt_ptr) {
257 		LIST_REMOVE(cnt_ptr, next);
258 		if (cnt_ptr->indirect)
259 			LIST_INSERT_HEAD(&indir_counters, cnt_ptr, next);
260 		else
261 			rte_free(cnt_ptr);
262 		cnt_ptr = LIST_FIRST(&pf->flow_counters);
263 	}
264 
265 	/* Reset the indirect action and add to pf->flow_counters list. */
266 	cnt_ptr = LIST_FIRST(&indir_counters);
267 	while (cnt_ptr) {
268 		LIST_REMOVE(cnt_ptr, next);
269 		cnt_ptr->ref_cnt = 1;
270 		cnt_ptr->hits = 0;
271 		LIST_INSERT_HEAD(&pf->flow_counters, cnt_ptr, next);
272 		cnt_ptr = LIST_FIRST(&indir_counters);
273 	}
274 }
275 
276 static int
277 hns3_handle_action_queue(struct rte_eth_dev *dev,
278 			 const struct rte_flow_action *action,
279 			 struct hns3_fdir_rule *rule,
280 			 struct rte_flow_error *error)
281 {
282 	struct hns3_adapter *hns = dev->data->dev_private;
283 	const struct rte_flow_action_queue *queue;
284 	struct hns3_hw *hw = &hns->hw;
285 
286 	queue = (const struct rte_flow_action_queue *)action->conf;
287 	if (queue->index >= hw->data->nb_rx_queues) {
288 		hns3_err(hw, "queue ID(%u) is greater than number of available queue (%u) in driver.",
289 			 queue->index, hw->data->nb_rx_queues);
290 		return rte_flow_error_set(error, EINVAL,
291 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
292 					  action, "Invalid queue ID in PF");
293 	}
294 
295 	rule->queue_id = queue->index;
296 	rule->nb_queues = 1;
297 	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
298 	return 0;
299 }
300 
301 static int
302 hns3_handle_action_queue_region(struct rte_eth_dev *dev,
303 				const struct rte_flow_action *action,
304 				struct hns3_fdir_rule *rule,
305 				struct rte_flow_error *error)
306 {
307 	struct hns3_adapter *hns = dev->data->dev_private;
308 	const struct rte_flow_action_rss *conf = action->conf;
309 	struct hns3_hw *hw = &hns->hw;
310 	uint16_t idx;
311 
312 	if (!hns3_dev_get_support(hw, FD_QUEUE_REGION))
313 		return rte_flow_error_set(error, ENOTSUP,
314 			RTE_FLOW_ERROR_TYPE_ACTION, action,
315 			"Not support config queue region!");
316 
317 	if ((!rte_is_power_of_2(conf->queue_num)) ||
318 		conf->queue_num > hw->rss_size_max ||
319 		conf->queue[0] >= hw->data->nb_rx_queues ||
320 		conf->queue[0] + conf->queue_num > hw->data->nb_rx_queues) {
321 		return rte_flow_error_set(error, EINVAL,
322 			RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
323 			"Invalid start queue ID and queue num! the start queue "
324 			"ID must valid, the queue num must be power of 2 and "
325 			"<= rss_size_max.");
326 	}
327 
328 	for (idx = 1; idx < conf->queue_num; idx++) {
329 		if (conf->queue[idx] != conf->queue[idx - 1] + 1)
330 			return rte_flow_error_set(error, EINVAL,
331 				RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
332 				"Invalid queue ID sequence! the queue ID "
333 				"must be continuous increment.");
334 	}
335 
336 	rule->queue_id = conf->queue[0];
337 	rule->nb_queues = conf->queue_num;
338 	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
339 	return 0;
340 }
341 
342 static int
343 hns3_handle_action_indirect(struct rte_eth_dev *dev,
344 			    const struct rte_flow_action *action,
345 			    struct hns3_fdir_rule *rule,
346 			    struct rte_flow_error *error)
347 {
348 	const struct rte_flow_action_handle *indir = action->conf;
349 
350 	if (indir->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT)
351 		return rte_flow_error_set(error, EINVAL,
352 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
353 				action, "Invalid indirect type");
354 
355 	if (hns3_counter_lookup(dev, indir->counter_id) == NULL)
356 		return rte_flow_error_set(error, EINVAL,
357 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
358 				action, "Counter id not exist");
359 
360 	rule->act_cnt.id = indir->counter_id;
361 	rule->flags |= (HNS3_RULE_FLAG_COUNTER | HNS3_RULE_FLAG_COUNTER_INDIR);
362 
363 	return 0;
364 }
365 
366 /*
367  * Parse actions structure from the provided pattern.
368  * The pattern is validated as the items are copied.
369  *
370  * @param actions[in]
371  * @param rule[out]
372  *   NIC specific actions derived from the actions.
373  * @param error[out]
374  */
375 static int
376 hns3_handle_actions(struct rte_eth_dev *dev,
377 		    const struct rte_flow_action actions[],
378 		    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
379 {
380 	struct hns3_adapter *hns = dev->data->dev_private;
381 	const struct rte_flow_action_count *act_count;
382 	const struct rte_flow_action_mark *mark;
383 	struct hns3_pf *pf = &hns->pf;
384 	uint32_t counter_num;
385 	int ret;
386 
387 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
388 		switch (actions->type) {
389 		case RTE_FLOW_ACTION_TYPE_QUEUE:
390 			ret = hns3_handle_action_queue(dev, actions, rule,
391 						       error);
392 			if (ret)
393 				return ret;
394 			break;
395 		case RTE_FLOW_ACTION_TYPE_DROP:
396 			rule->action = HNS3_FD_ACTION_DROP_PACKET;
397 			break;
398 		/*
399 		 * Here RSS's real action is queue region.
400 		 * Queue region is implemented by FDIR + RSS in hns3 hardware,
401 		 * the FDIR's action is one queue region (start_queue_id and
402 		 * queue_num), then RSS spread packets to the queue region by
403 		 * RSS algorithm.
404 		 */
405 		case RTE_FLOW_ACTION_TYPE_RSS:
406 			ret = hns3_handle_action_queue_region(dev, actions,
407 							      rule, error);
408 			if (ret)
409 				return ret;
410 			break;
411 		case RTE_FLOW_ACTION_TYPE_MARK:
412 			mark =
413 			    (const struct rte_flow_action_mark *)actions->conf;
414 			if (mark->id >= HNS3_MAX_FILTER_ID)
415 				return rte_flow_error_set(error, EINVAL,
416 						RTE_FLOW_ERROR_TYPE_ACTION_CONF,
417 						actions,
418 						"Invalid Mark ID");
419 			rule->fd_id = mark->id;
420 			rule->flags |= HNS3_RULE_FLAG_FDID;
421 			break;
422 		case RTE_FLOW_ACTION_TYPE_FLAG:
423 			rule->fd_id = HNS3_MAX_FILTER_ID;
424 			rule->flags |= HNS3_RULE_FLAG_FDID;
425 			break;
426 		case RTE_FLOW_ACTION_TYPE_COUNT:
427 			act_count =
428 			    (const struct rte_flow_action_count *)actions->conf;
429 			counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
430 			if (act_count->id >= counter_num)
431 				return rte_flow_error_set(error, EINVAL,
432 						RTE_FLOW_ERROR_TYPE_ACTION_CONF,
433 						actions,
434 						"Invalid counter id");
435 			rule->act_cnt = *act_count;
436 			rule->flags |= HNS3_RULE_FLAG_COUNTER;
437 			rule->flags &= ~HNS3_RULE_FLAG_COUNTER_INDIR;
438 			break;
439 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
440 			ret = hns3_handle_action_indirect(dev, actions, rule,
441 							  error);
442 			if (ret)
443 				return ret;
444 			break;
445 		case RTE_FLOW_ACTION_TYPE_VOID:
446 			break;
447 		default:
448 			return rte_flow_error_set(error, ENOTSUP,
449 						  RTE_FLOW_ERROR_TYPE_ACTION,
450 						  NULL, "Unsupported action");
451 		}
452 	}
453 
454 	return 0;
455 }
456 
457 static int
458 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
459 {
460 	if (!attr->ingress)
461 		return rte_flow_error_set(error, EINVAL,
462 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
463 					  attr, "Ingress can't be zero");
464 	if (attr->egress)
465 		return rte_flow_error_set(error, ENOTSUP,
466 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
467 					  attr, "Not support egress");
468 	if (attr->transfer)
469 		return rte_flow_error_set(error, ENOTSUP,
470 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
471 					  attr, "No support for transfer");
472 	if (attr->priority)
473 		return rte_flow_error_set(error, ENOTSUP,
474 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
475 					  attr, "Not support priority");
476 	if (attr->group)
477 		return rte_flow_error_set(error, ENOTSUP,
478 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
479 					  attr, "Not support group");
480 	return 0;
481 }
482 
483 static int
484 hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
485 	       struct rte_flow_error *error __rte_unused)
486 {
487 	const struct rte_flow_item_eth *eth_spec;
488 	const struct rte_flow_item_eth *eth_mask;
489 
490 	/* Only used to describe the protocol stack. */
491 	if (item->spec == NULL && item->mask == NULL)
492 		return 0;
493 
494 	if (item->mask) {
495 		eth_mask = item->mask;
496 		if (eth_mask->hdr.ether_type) {
497 			hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
498 			rule->key_conf.mask.ether_type =
499 			    rte_be_to_cpu_16(eth_mask->hdr.ether_type);
500 		}
501 		if (!rte_is_zero_ether_addr(&eth_mask->hdr.src_addr)) {
502 			hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
503 			memcpy(rule->key_conf.mask.src_mac,
504 			       eth_mask->hdr.src_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
505 		}
506 		if (!rte_is_zero_ether_addr(&eth_mask->hdr.dst_addr)) {
507 			hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
508 			memcpy(rule->key_conf.mask.dst_mac,
509 			       eth_mask->hdr.dst_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
510 		}
511 	}
512 
513 	eth_spec = item->spec;
514 	rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type);
515 	memcpy(rule->key_conf.spec.src_mac, eth_spec->hdr.src_addr.addr_bytes,
516 	       RTE_ETHER_ADDR_LEN);
517 	memcpy(rule->key_conf.spec.dst_mac, eth_spec->hdr.dst_addr.addr_bytes,
518 	       RTE_ETHER_ADDR_LEN);
519 	return 0;
520 }
521 
522 static int
523 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
524 		struct rte_flow_error *error)
525 {
526 	const struct rte_flow_item_vlan *vlan_spec;
527 	const struct rte_flow_item_vlan *vlan_mask;
528 
529 	rule->key_conf.vlan_num++;
530 	if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
531 		return rte_flow_error_set(error, EINVAL,
532 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
533 					  "Vlan_num is more than 2");
534 
535 	/* Only used to describe the protocol stack. */
536 	if (item->spec == NULL && item->mask == NULL)
537 		return 0;
538 
539 	if (item->mask) {
540 		vlan_mask = item->mask;
541 		if (vlan_mask->hdr.vlan_tci) {
542 			if (rule->key_conf.vlan_num == 1) {
543 				hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
544 					     1);
545 				rule->key_conf.mask.vlan_tag1 =
546 				    rte_be_to_cpu_16(vlan_mask->hdr.vlan_tci);
547 			} else {
548 				hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
549 					     1);
550 				rule->key_conf.mask.vlan_tag2 =
551 				    rte_be_to_cpu_16(vlan_mask->hdr.vlan_tci);
552 			}
553 		}
554 	}
555 
556 	vlan_spec = item->spec;
557 	if (rule->key_conf.vlan_num == 1)
558 		rule->key_conf.spec.vlan_tag1 =
559 		    rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci);
560 	else
561 		rule->key_conf.spec.vlan_tag2 =
562 		    rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci);
563 	return 0;
564 }
565 
566 static bool
567 hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask)
568 {
569 	if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id ||
570 	    ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live ||
571 	    ipv4_mask->hdr.hdr_checksum)
572 		return false;
573 
574 	return true;
575 }
576 
577 static int
578 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
579 		struct rte_flow_error *error)
580 {
581 	const struct rte_flow_item_ipv4 *ipv4_spec;
582 	const struct rte_flow_item_ipv4 *ipv4_mask;
583 
584 	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
585 	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
586 	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
587 
588 	/* Only used to describe the protocol stack. */
589 	if (item->spec == NULL && item->mask == NULL)
590 		return 0;
591 
592 	if (item->mask) {
593 		ipv4_mask = item->mask;
594 		if (!hns3_check_ipv4_mask_supported(ipv4_mask)) {
595 			return rte_flow_error_set(error, EINVAL,
596 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
597 						  item,
598 						  "Only support src & dst ip,tos,proto in IPV4");
599 		}
600 
601 		if (ipv4_mask->hdr.src_addr) {
602 			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
603 			rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
604 			    rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
605 		}
606 
607 		if (ipv4_mask->hdr.dst_addr) {
608 			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
609 			rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
610 			    rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
611 		}
612 
613 		if (ipv4_mask->hdr.type_of_service) {
614 			hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
615 			rule->key_conf.mask.ip_tos =
616 			    ipv4_mask->hdr.type_of_service;
617 		}
618 
619 		if (ipv4_mask->hdr.next_proto_id) {
620 			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
621 			rule->key_conf.mask.ip_proto =
622 			    ipv4_mask->hdr.next_proto_id;
623 		}
624 	}
625 
626 	ipv4_spec = item->spec;
627 	rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
628 	    rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
629 	rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
630 	    rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
631 	rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
632 	rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
633 	return 0;
634 }
635 
636 static int
637 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
638 		struct rte_flow_error *error)
639 {
640 	const struct rte_flow_item_ipv6 *ipv6_spec;
641 	const struct rte_flow_item_ipv6 *ipv6_mask;
642 
643 	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
644 	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
645 	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
646 
647 	/* Only used to describe the protocol stack. */
648 	if (item->spec == NULL && item->mask == NULL)
649 		return 0;
650 
651 	if (item->mask) {
652 		ipv6_mask = item->mask;
653 		if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
654 		    ipv6_mask->hdr.hop_limits) {
655 			return rte_flow_error_set(error, EINVAL,
656 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
657 						  item,
658 						  "Only support src & dst ip,proto in IPV6");
659 		}
660 		net_addr_to_host(rule->key_conf.mask.src_ip,
661 				 (const rte_be32_t *)ipv6_mask->hdr.src_addr,
662 				 IP_ADDR_LEN);
663 		net_addr_to_host(rule->key_conf.mask.dst_ip,
664 				 (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
665 				 IP_ADDR_LEN);
666 		rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
667 		if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
668 			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
669 		if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
670 			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
671 		if (ipv6_mask->hdr.proto)
672 			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
673 	}
674 
675 	ipv6_spec = item->spec;
676 	net_addr_to_host(rule->key_conf.spec.src_ip,
677 			 (const rte_be32_t *)ipv6_spec->hdr.src_addr,
678 			 IP_ADDR_LEN);
679 	net_addr_to_host(rule->key_conf.spec.dst_ip,
680 			 (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
681 			 IP_ADDR_LEN);
682 	rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
683 
684 	return 0;
685 }
686 
687 static bool
688 hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask)
689 {
690 	if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
691 	    tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
692 	    tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
693 	    tcp_mask->hdr.tcp_urp)
694 		return false;
695 
696 	return true;
697 }
698 
699 static int
700 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
701 	       struct rte_flow_error *error)
702 {
703 	const struct rte_flow_item_tcp *tcp_spec;
704 	const struct rte_flow_item_tcp *tcp_mask;
705 
706 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
707 	rule->key_conf.spec.ip_proto = IPPROTO_TCP;
708 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
709 
710 	/* Only used to describe the protocol stack. */
711 	if (item->spec == NULL && item->mask == NULL)
712 		return 0;
713 
714 	if (item->mask) {
715 		tcp_mask = item->mask;
716 		if (!hns3_check_tcp_mask_supported(tcp_mask)) {
717 			return rte_flow_error_set(error, EINVAL,
718 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
719 						  item,
720 						  "Only support src & dst port in TCP");
721 		}
722 
723 		if (tcp_mask->hdr.src_port) {
724 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
725 			rule->key_conf.mask.src_port =
726 			    rte_be_to_cpu_16(tcp_mask->hdr.src_port);
727 		}
728 		if (tcp_mask->hdr.dst_port) {
729 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
730 			rule->key_conf.mask.dst_port =
731 			    rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
732 		}
733 	}
734 
735 	tcp_spec = item->spec;
736 	rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
737 	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
738 
739 	return 0;
740 }
741 
742 static int
743 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
744 	       struct rte_flow_error *error)
745 {
746 	const struct rte_flow_item_udp *udp_spec;
747 	const struct rte_flow_item_udp *udp_mask;
748 
749 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
750 	rule->key_conf.spec.ip_proto = IPPROTO_UDP;
751 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
752 
753 	/* Only used to describe the protocol stack. */
754 	if (item->spec == NULL && item->mask == NULL)
755 		return 0;
756 
757 	if (item->mask) {
758 		udp_mask = item->mask;
759 		if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
760 			return rte_flow_error_set(error, EINVAL,
761 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
762 						  item,
763 						  "Only support src & dst port in UDP");
764 		}
765 		if (udp_mask->hdr.src_port) {
766 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
767 			rule->key_conf.mask.src_port =
768 			    rte_be_to_cpu_16(udp_mask->hdr.src_port);
769 		}
770 		if (udp_mask->hdr.dst_port) {
771 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
772 			rule->key_conf.mask.dst_port =
773 			    rte_be_to_cpu_16(udp_mask->hdr.dst_port);
774 		}
775 	}
776 
777 	udp_spec = item->spec;
778 	rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
779 	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
780 
781 	return 0;
782 }
783 
784 static int
785 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
786 		struct rte_flow_error *error)
787 {
788 	const struct rte_flow_item_sctp *sctp_spec;
789 	const struct rte_flow_item_sctp *sctp_mask;
790 
791 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
792 	rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
793 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
794 
795 	/* Only used to describe the protocol stack. */
796 	if (item->spec == NULL && item->mask == NULL)
797 		return 0;
798 
799 	if (item->mask) {
800 		sctp_mask = item->mask;
801 		if (sctp_mask->hdr.cksum)
802 			return rte_flow_error_set(error, EINVAL,
803 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
804 						  item,
805 						  "Only support src & dst port in SCTP");
806 		if (sctp_mask->hdr.src_port) {
807 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
808 			rule->key_conf.mask.src_port =
809 			    rte_be_to_cpu_16(sctp_mask->hdr.src_port);
810 		}
811 		if (sctp_mask->hdr.dst_port) {
812 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
813 			rule->key_conf.mask.dst_port =
814 			    rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
815 		}
816 		if (sctp_mask->hdr.tag) {
817 			hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
818 			rule->key_conf.mask.sctp_tag =
819 			    rte_be_to_cpu_32(sctp_mask->hdr.tag);
820 		}
821 	}
822 
823 	sctp_spec = item->spec;
824 	rule->key_conf.spec.src_port =
825 	    rte_be_to_cpu_16(sctp_spec->hdr.src_port);
826 	rule->key_conf.spec.dst_port =
827 	    rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
828 	rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
829 
830 	return 0;
831 }
832 
833 /*
834  * Check items before tunnel, save inner configs to outer configs, and clear
835  * inner configs.
836  * The key consists of two parts: meta_data and tuple keys.
837  * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
838  * packet(1bit).
839  * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
840  * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
841  * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
842  * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
843  * Vlantag2(16bit) and sctp-tag(32bit).
844  */
845 static int
846 hns3_handle_tunnel(const struct rte_flow_item *item,
847 		   struct hns3_fdir_rule *rule, struct rte_flow_error *error)
848 {
849 	/* check eth config */
850 	if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
851 		return rte_flow_error_set(error, EINVAL,
852 					  RTE_FLOW_ERROR_TYPE_ITEM,
853 					  item, "Outer eth mac is unsupported");
854 	if (rule->input_set & BIT(INNER_ETH_TYPE)) {
855 		hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
856 		rule->key_conf.spec.outer_ether_type =
857 		    rule->key_conf.spec.ether_type;
858 		rule->key_conf.mask.outer_ether_type =
859 		    rule->key_conf.mask.ether_type;
860 		hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
861 		rule->key_conf.spec.ether_type = 0;
862 		rule->key_conf.mask.ether_type = 0;
863 	}
864 
865 	/* check vlan config */
866 	if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
867 		return rte_flow_error_set(error, EINVAL,
868 					  RTE_FLOW_ERROR_TYPE_ITEM,
869 					  item,
870 					  "Outer vlan tags is unsupported");
871 
872 	/* clear vlan_num for inner vlan select */
873 	rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
874 	rule->key_conf.vlan_num = 0;
875 
876 	/* check L3 config */
877 	if (rule->input_set &
878 	    (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
879 		return rte_flow_error_set(error, EINVAL,
880 					  RTE_FLOW_ERROR_TYPE_ITEM,
881 					  item, "Outer ip is unsupported");
882 	if (rule->input_set & BIT(INNER_IP_PROTO)) {
883 		hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
884 		rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
885 		rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
886 		hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
887 		rule->key_conf.spec.ip_proto = 0;
888 		rule->key_conf.mask.ip_proto = 0;
889 	}
890 
891 	/* check L4 config */
892 	if (rule->input_set & BIT(INNER_SCTP_TAG))
893 		return rte_flow_error_set(error, EINVAL,
894 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
895 					  "Outer sctp tag is unsupported");
896 
897 	if (rule->input_set & BIT(INNER_SRC_PORT)) {
898 		hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
899 		rule->key_conf.spec.outer_src_port =
900 		    rule->key_conf.spec.src_port;
901 		rule->key_conf.mask.outer_src_port =
902 		    rule->key_conf.mask.src_port;
903 		hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
904 		rule->key_conf.spec.src_port = 0;
905 		rule->key_conf.mask.src_port = 0;
906 	}
907 	if (rule->input_set & BIT(INNER_DST_PORT)) {
908 		hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
909 		rule->key_conf.spec.dst_port = 0;
910 		rule->key_conf.mask.dst_port = 0;
911 	}
912 	return 0;
913 }
914 
915 static int
916 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
917 		 struct rte_flow_error *error)
918 {
919 	const struct rte_flow_item_vxlan *vxlan_spec;
920 	const struct rte_flow_item_vxlan *vxlan_mask;
921 
922 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
923 	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
924 	if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
925 		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
926 	else
927 		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
928 
929 	/* Only used to describe the protocol stack. */
930 	if (item->spec == NULL && item->mask == NULL)
931 		return 0;
932 
933 	vxlan_mask = item->mask;
934 	vxlan_spec = item->spec;
935 
936 	if (vxlan_mask->hdr.flags)
937 		return rte_flow_error_set(error, EINVAL,
938 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
939 					  "Flags is not supported in VxLAN");
940 
941 	/* VNI must be totally masked or not. */
942 	if (memcmp(vxlan_mask->hdr.vni, full_mask, VNI_OR_TNI_LEN) &&
943 	    memcmp(vxlan_mask->hdr.vni, zero_mask, VNI_OR_TNI_LEN))
944 		return rte_flow_error_set(error, EINVAL,
945 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
946 					  "VNI must be totally masked or not in VxLAN");
947 	if (vxlan_mask->hdr.vni[0]) {
948 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
949 		memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->hdr.vni,
950 			   VNI_OR_TNI_LEN);
951 	}
952 	memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->hdr.vni,
953 		   VNI_OR_TNI_LEN);
954 	return 0;
955 }
956 
957 static int
958 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
959 		 struct rte_flow_error *error)
960 {
961 	const struct rte_flow_item_nvgre *nvgre_spec;
962 	const struct rte_flow_item_nvgre *nvgre_mask;
963 
964 	hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
965 	rule->key_conf.spec.outer_proto = IPPROTO_GRE;
966 	rule->key_conf.mask.outer_proto = IPPROTO_MASK;
967 
968 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
969 	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
970 	rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
971 	/* Only used to describe the protocol stack. */
972 	if (item->spec == NULL && item->mask == NULL)
973 		return 0;
974 
975 	nvgre_mask = item->mask;
976 	nvgre_spec = item->spec;
977 
978 	if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
979 		return rte_flow_error_set(error, EINVAL,
980 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
981 					  "Ver/protocol is not supported in NVGRE");
982 
983 	/* TNI must be totally masked or not. */
984 	if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
985 	    memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
986 		return rte_flow_error_set(error, EINVAL,
987 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
988 					  "TNI must be totally masked or not in NVGRE");
989 
990 	if (nvgre_mask->tni[0]) {
991 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
992 		memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
993 			   VNI_OR_TNI_LEN);
994 	}
995 	memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
996 		   VNI_OR_TNI_LEN);
997 
998 	if (nvgre_mask->flow_id) {
999 		hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
1000 		rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
1001 	}
1002 	rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
1003 	return 0;
1004 }
1005 
1006 static int
1007 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1008 		  struct rte_flow_error *error)
1009 {
1010 	const struct rte_flow_item_geneve *geneve_spec;
1011 	const struct rte_flow_item_geneve *geneve_mask;
1012 
1013 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
1014 	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
1015 	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
1016 	/* Only used to describe the protocol stack. */
1017 	if (item->spec == NULL && item->mask == NULL)
1018 		return 0;
1019 
1020 	geneve_mask = item->mask;
1021 	geneve_spec = item->spec;
1022 
1023 	if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
1024 		return rte_flow_error_set(error, EINVAL,
1025 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1026 					  "Ver/protocol is not supported in GENEVE");
1027 	/* VNI must be totally masked or not. */
1028 	if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
1029 	    memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
1030 		return rte_flow_error_set(error, EINVAL,
1031 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1032 					  "VNI must be totally masked or not in GENEVE");
1033 	if (geneve_mask->vni[0]) {
1034 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1035 		memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
1036 			   VNI_OR_TNI_LEN);
1037 	}
1038 	memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
1039 		   VNI_OR_TNI_LEN);
1040 	return 0;
1041 }
1042 
1043 static int
1044 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1045 		  struct rte_flow_error *error)
1046 {
1047 	int ret;
1048 
1049 	if (item->spec == NULL && item->mask)
1050 		return rte_flow_error_set(error, EINVAL,
1051 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1052 					  "Can't configure FDIR with mask "
1053 					  "but without spec");
1054 	else if (item->spec && (item->mask == NULL))
1055 		return rte_flow_error_set(error, EINVAL,
1056 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1057 					  "Tunnel packets must configure "
1058 					  "with mask");
1059 
1060 	switch (item->type) {
1061 	case RTE_FLOW_ITEM_TYPE_VXLAN:
1062 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1063 		ret = hns3_parse_vxlan(item, rule, error);
1064 		break;
1065 	case RTE_FLOW_ITEM_TYPE_NVGRE:
1066 		ret = hns3_parse_nvgre(item, rule, error);
1067 		break;
1068 	case RTE_FLOW_ITEM_TYPE_GENEVE:
1069 		ret = hns3_parse_geneve(item, rule, error);
1070 		break;
1071 	default:
1072 		return rte_flow_error_set(error, ENOTSUP,
1073 					  RTE_FLOW_ERROR_TYPE_ITEM,
1074 					  NULL, "Unsupported tunnel type!");
1075 	}
1076 	if (ret)
1077 		return ret;
1078 	return hns3_handle_tunnel(item, rule, error);
1079 }
1080 
1081 static int
1082 hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1083 		  struct items_step_mngr *step_mngr,
1084 		  struct rte_flow_error *error)
1085 {
1086 	int ret;
1087 
1088 	if (item->spec == NULL && item->mask)
1089 		return rte_flow_error_set(error, EINVAL,
1090 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1091 					  "Can't configure FDIR with mask "
1092 					  "but without spec");
1093 
1094 	switch (item->type) {
1095 	case RTE_FLOW_ITEM_TYPE_ETH:
1096 		ret = hns3_parse_eth(item, rule, error);
1097 		step_mngr->items = L2_next_items;
1098 		step_mngr->count = RTE_DIM(L2_next_items);
1099 		break;
1100 	case RTE_FLOW_ITEM_TYPE_VLAN:
1101 		ret = hns3_parse_vlan(item, rule, error);
1102 		step_mngr->items = L2_next_items;
1103 		step_mngr->count = RTE_DIM(L2_next_items);
1104 		break;
1105 	case RTE_FLOW_ITEM_TYPE_IPV4:
1106 		ret = hns3_parse_ipv4(item, rule, error);
1107 		step_mngr->items = L3_next_items;
1108 		step_mngr->count = RTE_DIM(L3_next_items);
1109 		break;
1110 	case RTE_FLOW_ITEM_TYPE_IPV6:
1111 		ret = hns3_parse_ipv6(item, rule, error);
1112 		step_mngr->items = L3_next_items;
1113 		step_mngr->count = RTE_DIM(L3_next_items);
1114 		break;
1115 	case RTE_FLOW_ITEM_TYPE_TCP:
1116 		ret = hns3_parse_tcp(item, rule, error);
1117 		step_mngr->items = L4_next_items;
1118 		step_mngr->count = RTE_DIM(L4_next_items);
1119 		break;
1120 	case RTE_FLOW_ITEM_TYPE_UDP:
1121 		ret = hns3_parse_udp(item, rule, error);
1122 		step_mngr->items = L4_next_items;
1123 		step_mngr->count = RTE_DIM(L4_next_items);
1124 		break;
1125 	case RTE_FLOW_ITEM_TYPE_SCTP:
1126 		ret = hns3_parse_sctp(item, rule, error);
1127 		step_mngr->items = L4_next_items;
1128 		step_mngr->count = RTE_DIM(L4_next_items);
1129 		break;
1130 	default:
1131 		return rte_flow_error_set(error, ENOTSUP,
1132 					  RTE_FLOW_ERROR_TYPE_ITEM,
1133 					  NULL, "Unsupported normal type!");
1134 	}
1135 
1136 	return ret;
1137 }
1138 
1139 static int
1140 hns3_validate_item(const struct rte_flow_item *item,
1141 		   struct items_step_mngr step_mngr,
1142 		   struct rte_flow_error *error)
1143 {
1144 	uint32_t i;
1145 
1146 	if (item->last)
1147 		return rte_flow_error_set(error, ENOTSUP,
1148 					  RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
1149 					  "Not supported last point for range");
1150 
1151 	for (i = 0; i < step_mngr.count; i++) {
1152 		if (item->type == step_mngr.items[i])
1153 			break;
1154 	}
1155 
1156 	if (i == step_mngr.count) {
1157 		return rte_flow_error_set(error, EINVAL,
1158 					  RTE_FLOW_ERROR_TYPE_ITEM,
1159 					  item, "Inval or missing item");
1160 	}
1161 	return 0;
1162 }
1163 
1164 static inline bool
1165 is_tunnel_packet(enum rte_flow_item_type type)
1166 {
1167 	if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1168 	    type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1169 	    type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1170 	    type == RTE_FLOW_ITEM_TYPE_GENEVE)
1171 		return true;
1172 	return false;
1173 }
1174 
1175 /*
1176  * Parse the flow director rule.
1177  * The supported PATTERN:
1178  *   case: non-tunnel packet:
1179  *     ETH : src-mac, dst-mac, ethertype
1180  *     VLAN: tag1, tag2
1181  *     IPv4: src-ip, dst-ip, tos, proto
1182  *     IPv6: src-ip(last 32 bit addr), dst-ip(last 32 bit addr), proto
1183  *     UDP : src-port, dst-port
1184  *     TCP : src-port, dst-port
1185  *     SCTP: src-port, dst-port, tag
1186  *   case: tunnel packet:
1187  *     OUTER-ETH: ethertype
1188  *     OUTER-L3 : proto
1189  *     OUTER-L4 : src-port, dst-port
1190  *     TUNNEL   : vni, flow-id(only valid when NVGRE)
1191  *     INNER-ETH/VLAN/IPv4/IPv6/UDP/TCP/SCTP: same as non-tunnel packet
1192  * The supported ACTION:
1193  *    QUEUE
1194  *    DROP
1195  *    COUNT
1196  *    MARK: the id range [0, 4094]
1197  *    FLAG
1198  *    RSS: only valid if firmware support FD_QUEUE_REGION.
1199  */
1200 static int
1201 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1202 		       const struct rte_flow_item pattern[],
1203 		       const struct rte_flow_action actions[],
1204 		       struct hns3_fdir_rule *rule,
1205 		       struct rte_flow_error *error)
1206 {
1207 	struct hns3_adapter *hns = dev->data->dev_private;
1208 	const struct rte_flow_item *item;
1209 	struct items_step_mngr step_mngr;
1210 	int ret;
1211 
1212 	/* FDIR is available only in PF driver */
1213 	if (hns->is_vf)
1214 		return rte_flow_error_set(error, ENOTSUP,
1215 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1216 					  "Fdir not supported in VF");
1217 
1218 	step_mngr.items = first_items;
1219 	step_mngr.count = RTE_DIM(first_items);
1220 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1221 		if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1222 			continue;
1223 
1224 		ret = hns3_validate_item(item, step_mngr, error);
1225 		if (ret)
1226 			return ret;
1227 
1228 		if (is_tunnel_packet(item->type)) {
1229 			ret = hns3_parse_tunnel(item, rule, error);
1230 			if (ret)
1231 				return ret;
1232 			step_mngr.items = tunnel_next_items;
1233 			step_mngr.count = RTE_DIM(tunnel_next_items);
1234 		} else {
1235 			ret = hns3_parse_normal(item, rule, &step_mngr, error);
1236 			if (ret)
1237 				return ret;
1238 		}
1239 	}
1240 
1241 	return hns3_handle_actions(dev, actions, rule, error);
1242 }
1243 
1244 static void
1245 hns3_filterlist_flush(struct rte_eth_dev *dev)
1246 {
1247 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1248 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1249 	struct hns3_rss_conf_ele *rss_filter_ptr;
1250 	struct hns3_flow_mem *flow_node;
1251 
1252 	fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1253 	while (fdir_rule_ptr) {
1254 		TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1255 		rte_free(fdir_rule_ptr);
1256 		fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1257 	}
1258 
1259 	rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1260 	while (rss_filter_ptr) {
1261 		TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1262 		rte_free(rss_filter_ptr);
1263 		rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1264 	}
1265 
1266 	flow_node = TAILQ_FIRST(&hw->flow_list);
1267 	while (flow_node) {
1268 		TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1269 		rte_free(flow_node->flow);
1270 		rte_free(flow_node);
1271 		flow_node = TAILQ_FIRST(&hw->flow_list);
1272 	}
1273 }
1274 
1275 static bool
1276 hns3_flow_rule_key_same(const struct rte_flow_action_rss *comp,
1277 			const struct rte_flow_action_rss *with)
1278 {
1279 	if (comp->key_len != with->key_len)
1280 		return false;
1281 
1282 	if (with->key_len == 0)
1283 		return true;
1284 
1285 	if (comp->key == NULL && with->key == NULL)
1286 		return true;
1287 
1288 	if (!(comp->key != NULL && with->key != NULL))
1289 		return false;
1290 
1291 	return !memcmp(comp->key, with->key, with->key_len);
1292 }
1293 
1294 static bool
1295 hns3_flow_rule_queues_same(const struct rte_flow_action_rss *comp,
1296 			   const struct rte_flow_action_rss *with)
1297 {
1298 	if (comp->queue_num != with->queue_num)
1299 		return false;
1300 
1301 	if (with->queue_num == 0)
1302 		return true;
1303 
1304 	if (comp->queue == NULL && with->queue == NULL)
1305 		return true;
1306 
1307 	if (!(comp->queue != NULL && with->queue != NULL))
1308 		return false;
1309 
1310 	return !memcmp(comp->queue, with->queue, with->queue_num);
1311 }
1312 
1313 static bool
1314 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1315 		     const struct rte_flow_action_rss *with)
1316 {
1317 	bool same_level;
1318 	bool same_types;
1319 	bool same_func;
1320 
1321 	same_level = (comp->level == with->level);
1322 	same_types = (comp->types == with->types);
1323 	same_func = (comp->func == with->func);
1324 
1325 	return same_level && same_types && same_func &&
1326 		hns3_flow_rule_key_same(comp, with) &&
1327 		hns3_flow_rule_queues_same(comp, with);
1328 }
1329 
1330 static bool
1331 hns3_rss_input_tuple_supported(struct hns3_hw *hw,
1332 			       const struct rte_flow_action_rss *rss)
1333 {
1334 	/*
1335 	 * For IP packet, it is not supported to use src/dst port fields to RSS
1336 	 * hash for the following packet types.
1337 	 * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG
1338 	 * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst
1339 	 * port fields to RSS hash for IPV6 SCTP packet type. However, the
1340 	 * Kunpeng930 and future kunpeng series support to use src/dst port
1341 	 * fields to RSS hash for IPv6 SCTP packet type.
1342 	 */
1343 	if (rss->types & (RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L4_SRC_ONLY) &&
1344 	    (rss->types & RTE_ETH_RSS_IP ||
1345 	    (!hw->rss_info.ipv6_sctp_offload_supported &&
1346 	    rss->types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
1347 		return false;
1348 
1349 	return true;
1350 }
1351 
1352 /*
1353  * This function is used to parse rss action validation.
1354  */
1355 static int
1356 hns3_parse_rss_filter(struct rte_eth_dev *dev,
1357 		      const struct rte_flow_action *actions,
1358 		      struct rte_flow_error *error)
1359 {
1360 	struct hns3_adapter *hns = dev->data->dev_private;
1361 	struct hns3_hw *hw = &hns->hw;
1362 	struct hns3_rss_conf *rss_conf = &hw->rss_info;
1363 	const struct rte_flow_action_rss *rss;
1364 	const struct rte_flow_action *act;
1365 	uint32_t act_index = 0;
1366 	uint16_t n;
1367 
1368 	NEXT_ITEM_OF_ACTION(act, actions, act_index);
1369 	rss = act->conf;
1370 
1371 	if (rss == NULL) {
1372 		return rte_flow_error_set(error, EINVAL,
1373 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1374 					  act, "no valid queues");
1375 	}
1376 
1377 	if (rss->queue_num > RTE_DIM(rss_conf->queue))
1378 		return rte_flow_error_set(error, ENOTSUP,
1379 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1380 					  "queue number configured exceeds "
1381 					  "queue buffer size driver supported");
1382 
1383 	for (n = 0; n < rss->queue_num; n++) {
1384 		if (rss->queue[n] < hw->alloc_rss_size)
1385 			continue;
1386 		return rte_flow_error_set(error, EINVAL,
1387 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1388 					  "queue id must be less than queue number allocated to a TC");
1389 	}
1390 
1391 	if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
1392 		return rte_flow_error_set(error, EINVAL,
1393 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1394 					  act,
1395 					  "Flow types is unsupported by "
1396 					  "hns3's RSS");
1397 	if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX)
1398 		return rte_flow_error_set(error, ENOTSUP,
1399 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1400 					  "RSS hash func are not supported");
1401 	if (rss->level)
1402 		return rte_flow_error_set(error, ENOTSUP,
1403 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1404 					  "a nonzero RSS encapsulation level is not supported");
1405 	if (rss->key_len && rss->key_len != hw->rss_key_size)
1406 		return rte_flow_error_set(error, ENOTSUP,
1407 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1408 					  "invalid RSS key length");
1409 
1410 	if (!hns3_rss_input_tuple_supported(hw, rss))
1411 		return rte_flow_error_set(error, EINVAL,
1412 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1413 					  &rss->types,
1414 					  "input RSS types are not supported");
1415 
1416 	act_index++;
1417 
1418 	/* Check if the next not void action is END */
1419 	NEXT_ITEM_OF_ACTION(act, actions, act_index);
1420 	if (act->type != RTE_FLOW_ACTION_TYPE_END)
1421 		return rte_flow_error_set(error, EINVAL,
1422 					  RTE_FLOW_ERROR_TYPE_ACTION,
1423 					  act, "Not supported action.");
1424 
1425 	return 0;
1426 }
1427 
1428 static int
1429 hns3_disable_rss(struct hns3_hw *hw)
1430 {
1431 	int ret;
1432 
1433 	ret = hns3_set_rss_tuple_by_rss_hf(hw, 0);
1434 	if (ret)
1435 		return ret;
1436 
1437 	return 0;
1438 }
1439 
1440 static int
1441 hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
1442 			 uint8_t *hash_algo)
1443 {
1444 	enum rte_eth_hash_function algo_func = *func;
1445 	switch (algo_func) {
1446 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1447 		/* Keep *hash_algo as what it used to be */
1448 		algo_func = hw->rss_info.conf.func;
1449 		break;
1450 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1451 		*hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
1452 		break;
1453 	case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1454 		*hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
1455 		break;
1456 	case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1457 		*hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
1458 		break;
1459 	default:
1460 		hns3_err(hw, "Invalid RSS algorithm configuration(%d)",
1461 			 algo_func);
1462 		return -EINVAL;
1463 	}
1464 	*func = algo_func;
1465 
1466 	return 0;
1467 }
1468 
1469 static int
1470 hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
1471 {
1472 	uint8_t rss_key[HNS3_RSS_KEY_SIZE_MAX] = {0};
1473 	bool use_default_key = false;
1474 	int ret;
1475 
1476 	if (rss_config->key == NULL || rss_config->key_len != hw->rss_key_size) {
1477 		hns3_warn(hw, "Default RSS hash key to be set");
1478 		memcpy(rss_key, hns3_hash_key,
1479 			RTE_MIN(sizeof(hns3_hash_key), hw->rss_key_size));
1480 		use_default_key = true;
1481 	}
1482 
1483 	ret = hns3_parse_rss_algorithm(hw, &rss_config->func,
1484 				       &hw->rss_info.hash_algo);
1485 	if (ret)
1486 		return ret;
1487 
1488 	ret = hns3_rss_set_algo_key(hw, hw->rss_info.hash_algo,
1489 				    use_default_key ? rss_key : rss_config->key,
1490 				    hw->rss_key_size);
1491 	if (ret)
1492 		return ret;
1493 
1494 	hw->rss_info.conf.func = rss_config->func;
1495 
1496 	ret = hns3_set_rss_tuple_by_rss_hf(hw, rss_config->types);
1497 	if (ret)
1498 		hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
1499 
1500 	return ret;
1501 }
1502 
1503 static int
1504 hns3_update_indir_table(struct hns3_hw *hw,
1505 			const struct rte_flow_action_rss *conf, uint16_t num)
1506 {
1507 	uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
1508 	uint16_t j;
1509 	uint32_t i;
1510 
1511 	/* Fill in redirection table */
1512 	memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
1513 	       sizeof(hw->rss_info.rss_indirection_tbl));
1514 	for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) {
1515 		j %= num;
1516 		if (conf->queue[j] >= hw->alloc_rss_size) {
1517 			hns3_err(hw, "queue id(%u) set to redirection table "
1518 				 "exceeds queue number(%u) allocated to a TC.",
1519 				 conf->queue[j], hw->alloc_rss_size);
1520 			return -EINVAL;
1521 		}
1522 		indir_tbl[i] = conf->queue[j];
1523 	}
1524 
1525 	return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size);
1526 }
1527 
1528 static int
1529 hns3_config_rss_filter(struct hns3_hw *hw,
1530 		       const struct hns3_rss_conf *conf, bool add)
1531 {
1532 	uint64_t flow_types;
1533 	uint16_t num;
1534 	int ret;
1535 
1536 	struct rte_flow_action_rss rss_flow_conf = {
1537 		.func = conf->conf.func,
1538 		.level = conf->conf.level,
1539 		.types = conf->conf.types,
1540 		.key_len = conf->conf.key_len,
1541 		.queue_num = conf->conf.queue_num,
1542 		.key = conf->conf.key_len ?
1543 		    (void *)(uintptr_t)conf->conf.key : NULL,
1544 		.queue = conf->conf.queue,
1545 	};
1546 
1547 	if (!add) {
1548 		if (!conf->valid)
1549 			return 0;
1550 
1551 		ret = hns3_disable_rss(hw);
1552 		if (ret) {
1553 			hns3_err(hw, "RSS disable failed(%d)", ret);
1554 			return ret;
1555 		}
1556 
1557 		return 0;
1558 	}
1559 
1560 	/* Set rx queues to use */
1561 	num = RTE_MIN(hw->data->nb_rx_queues, rss_flow_conf.queue_num);
1562 	if (rss_flow_conf.queue_num > num)
1563 		hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
1564 			  rss_flow_conf.queue_num);
1565 	hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
1566 	if (num) {
1567 		ret = hns3_update_indir_table(hw, &rss_flow_conf, num);
1568 		if (ret)
1569 			return ret;
1570 	}
1571 
1572 	/* Filter the unsupported flow types */
1573 	flow_types = conf->conf.types ?
1574 		     rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT :
1575 		     hw->rss_info.conf.types;
1576 	if (flow_types != rss_flow_conf.types)
1577 		hns3_warn(hw, "modified RSS types based on hardware support,"
1578 			  " requested:0x%" PRIx64 " configured:0x%" PRIx64,
1579 			  rss_flow_conf.types, flow_types);
1580 	/* Update the useful flow types */
1581 	rss_flow_conf.types = flow_types;
1582 
1583 	/* Set hash algorithm and flow types by the user's config */
1584 	return hns3_hw_rss_hash_set(hw, &rss_flow_conf);
1585 }
1586 
1587 static int
1588 hns3_clear_rss_filter(struct rte_eth_dev *dev)
1589 {
1590 	struct hns3_adapter *hns = dev->data->dev_private;
1591 	struct hns3_rss_conf_ele *rss_filter_ptr;
1592 	struct hns3_hw *hw = &hns->hw;
1593 	int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */
1594 	int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */
1595 	int ret = 0;
1596 
1597 	rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1598 	while (rss_filter_ptr) {
1599 		TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1600 		ret = hns3_config_rss_filter(hw, &rss_filter_ptr->filter_info,
1601 					     false);
1602 		if (ret)
1603 			rss_rule_fail_cnt++;
1604 		else
1605 			rss_rule_succ_cnt++;
1606 		rte_free(rss_filter_ptr);
1607 		rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1608 	}
1609 
1610 	if (rss_rule_fail_cnt) {
1611 		hns3_err(hw, "fail to delete all RSS filters, success num = %d fail num = %d",
1612 			 rss_rule_succ_cnt, rss_rule_fail_cnt);
1613 		ret = -EIO;
1614 	}
1615 
1616 	return ret;
1617 }
1618 
1619 static int
1620 hns3_restore_rss_filter(struct hns3_hw *hw)
1621 {
1622 	struct hns3_rss_conf_ele *filter;
1623 	int ret = 0;
1624 
1625 	pthread_mutex_lock(&hw->flows_lock);
1626 	TAILQ_FOREACH(filter, &hw->flow_rss_list, entries) {
1627 		if (!filter->filter_info.valid)
1628 			continue;
1629 
1630 		ret = hns3_config_rss_filter(hw, &filter->filter_info, true);
1631 		if (ret != 0) {
1632 			hns3_err(hw, "restore RSS filter failed, ret=%d", ret);
1633 			goto out;
1634 		}
1635 	}
1636 
1637 out:
1638 	pthread_mutex_unlock(&hw->flows_lock);
1639 
1640 	return ret;
1641 }
1642 
1643 int
1644 hns3_restore_filter(struct hns3_adapter *hns)
1645 {
1646 	struct hns3_hw *hw = &hns->hw;
1647 	int ret;
1648 
1649 	ret = hns3_restore_all_fdir_filter(hns);
1650 	if (ret != 0)
1651 		return ret;
1652 
1653 	return hns3_restore_rss_filter(hw);
1654 }
1655 
1656 static bool
1657 hns3_rss_action_is_dup(struct hns3_hw *hw,
1658 		       const struct rte_flow_action_rss *act)
1659 {
1660 	struct hns3_rss_conf_ele *filter;
1661 
1662 	TAILQ_FOREACH(filter, &hw->flow_rss_list, entries) {
1663 		if (!filter->filter_info.valid)
1664 			continue;
1665 
1666 		if (hns3_action_rss_same(&filter->filter_info.conf, act))
1667 			return true;
1668 	}
1669 
1670 	return false;
1671 }
1672 
1673 static int
1674 hns3_flow_parse_rss(struct rte_eth_dev *dev,
1675 		    const struct hns3_rss_conf *conf, bool add)
1676 {
1677 	struct hns3_adapter *hns = dev->data->dev_private;
1678 	struct hns3_hw *hw = &hns->hw;
1679 
1680 	if (hns3_rss_action_is_dup(hw, &conf->conf)) {
1681 		hns3_err(hw, "duplicate RSS configuration");
1682 		return -EINVAL;
1683 	}
1684 
1685 	return hns3_config_rss_filter(hw, conf, add);
1686 }
1687 
1688 static int
1689 hns3_flow_args_check(const struct rte_flow_attr *attr,
1690 		     const struct rte_flow_item pattern[],
1691 		     const struct rte_flow_action actions[],
1692 		     struct rte_flow_error *error)
1693 {
1694 	if (pattern == NULL)
1695 		return rte_flow_error_set(error, EINVAL,
1696 					  RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1697 					  NULL, "NULL pattern.");
1698 
1699 	if (actions == NULL)
1700 		return rte_flow_error_set(error, EINVAL,
1701 					  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1702 					  NULL, "NULL action.");
1703 
1704 	if (attr == NULL)
1705 		return rte_flow_error_set(error, EINVAL,
1706 					  RTE_FLOW_ERROR_TYPE_ATTR,
1707 					  NULL, "NULL attribute.");
1708 
1709 	return hns3_check_attr(attr, error);
1710 }
1711 
1712 /*
1713  * Check if the flow rule is supported by hns3.
1714  * It only checks the format. Don't guarantee the rule can be programmed into
1715  * the HW. Because there can be no enough room for the rule.
1716  */
1717 static int
1718 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1719 		   const struct rte_flow_item pattern[],
1720 		   const struct rte_flow_action actions[],
1721 		   struct rte_flow_error *error)
1722 {
1723 	struct hns3_fdir_rule fdir_rule;
1724 	int ret;
1725 
1726 	ret = hns3_flow_args_check(attr, pattern, actions, error);
1727 	if (ret)
1728 		return ret;
1729 
1730 	if (hns3_find_rss_general_action(pattern, actions))
1731 		return hns3_parse_rss_filter(dev, actions, error);
1732 
1733 	memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1734 	return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1735 }
1736 
1737 static int
1738 hns3_flow_create_rss_rule(struct rte_eth_dev *dev,
1739 			  const struct rte_flow_action *act,
1740 			  struct rte_flow *flow)
1741 {
1742 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1743 	const struct rte_flow_action_rss *rss_act;
1744 	struct hns3_rss_conf_ele *rss_filter_ptr;
1745 	struct hns3_rss_conf_ele *filter_ptr;
1746 	struct hns3_rss_conf *new_conf;
1747 	int ret;
1748 
1749 	rss_filter_ptr = rte_zmalloc("hns3 rss filter",
1750 				     sizeof(struct hns3_rss_conf_ele), 0);
1751 	if (rss_filter_ptr == NULL) {
1752 		hns3_err(hw, "failed to allocate hns3_rss_filter memory");
1753 		return -ENOMEM;
1754 	}
1755 
1756 	rss_act = (const struct rte_flow_action_rss *)act->conf;
1757 	new_conf = &rss_filter_ptr->filter_info;
1758 	memcpy(&new_conf->conf, rss_act, sizeof(*rss_act));
1759 	if (rss_act->queue_num > 0) {
1760 		memcpy(new_conf->queue, rss_act->queue,
1761 		       rss_act->queue_num * sizeof(new_conf->queue[0]));
1762 		new_conf->conf.queue = new_conf->queue;
1763 	}
1764 	if (rss_act->key_len > 0) {
1765 		if (rss_act->key != NULL) {
1766 			memcpy(new_conf->key, rss_act->key,
1767 			       rss_act->key_len * sizeof(new_conf->key[0]));
1768 			new_conf->conf.key = new_conf->key;
1769 		}
1770 	}
1771 
1772 	ret = hns3_flow_parse_rss(dev, new_conf, true);
1773 	if (ret != 0) {
1774 		rte_free(rss_filter_ptr);
1775 		return ret;
1776 	}
1777 	rss_filter_ptr->filter_info.valid = true;
1778 
1779 	/*
1780 	 * When create a new RSS rule, the old rule will be overlaid and set
1781 	 * invalid.
1782 	 */
1783 	TAILQ_FOREACH(filter_ptr, &hw->flow_rss_list, entries)
1784 		filter_ptr->filter_info.valid = false;
1785 
1786 	TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries);
1787 	flow->rule = rss_filter_ptr;
1788 	flow->filter_type = RTE_ETH_FILTER_HASH;
1789 
1790 	return 0;
1791 }
1792 
1793 static int
1794 hns3_flow_create_fdir_rule(struct rte_eth_dev *dev,
1795 			   const struct rte_flow_item pattern[],
1796 			   const struct rte_flow_action actions[],
1797 			   struct rte_flow_error *error,
1798 			   struct rte_flow *flow)
1799 {
1800 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1801 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1802 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1803 	struct hns3_fdir_rule fdir_rule;
1804 	bool indir;
1805 	int ret;
1806 
1807 	memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1808 	ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1809 	if (ret != 0)
1810 		return ret;
1811 
1812 	indir = !!(fdir_rule.flags & HNS3_RULE_FLAG_COUNTER_INDIR);
1813 	if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
1814 		ret = hns3_counter_new(dev, indir, fdir_rule.act_cnt.id,
1815 				       error);
1816 		if (ret != 0)
1817 			return ret;
1818 
1819 		flow->counter_id = fdir_rule.act_cnt.id;
1820 	}
1821 
1822 	fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
1823 				    sizeof(struct hns3_fdir_rule_ele), 0);
1824 	if (fdir_rule_ptr == NULL) {
1825 		hns3_err(hw, "failed to allocate fdir_rule memory.");
1826 		ret = -ENOMEM;
1827 		goto err_malloc;
1828 	}
1829 
1830 	/*
1831 	 * After all the preceding tasks are successfully configured, configure
1832 	 * rules to the hardware to simplify the rollback of rules in the
1833 	 * hardware.
1834 	 */
1835 	ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
1836 	if (ret != 0)
1837 		goto err_fdir_filter;
1838 
1839 	memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
1840 		sizeof(struct hns3_fdir_rule));
1841 	TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1842 	flow->rule = fdir_rule_ptr;
1843 	flow->filter_type = RTE_ETH_FILTER_FDIR;
1844 
1845 	return 0;
1846 
1847 err_fdir_filter:
1848 	rte_free(fdir_rule_ptr);
1849 err_malloc:
1850 	if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1851 		hns3_counter_release(dev, fdir_rule.act_cnt.id);
1852 
1853 	return ret;
1854 }
1855 
1856 /*
1857  * Create or destroy a flow rule.
1858  * Theorically one rule can match more than one filters.
1859  * We will let it use the filter which it hit first.
1860  * So, the sequence matters.
1861  */
1862 static struct rte_flow *
1863 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1864 		 const struct rte_flow_item pattern[],
1865 		 const struct rte_flow_action actions[],
1866 		 struct rte_flow_error *error)
1867 {
1868 	struct hns3_adapter *hns = dev->data->dev_private;
1869 	struct hns3_hw *hw = &hns->hw;
1870 	struct hns3_flow_mem *flow_node;
1871 	const struct rte_flow_action *act;
1872 	struct rte_flow *flow;
1873 	int ret;
1874 
1875 	ret = hns3_flow_validate(dev, attr, pattern, actions, error);
1876 	if (ret)
1877 		return NULL;
1878 
1879 	flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
1880 	if (flow == NULL) {
1881 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1882 				   NULL, "Failed to allocate flow memory");
1883 		return NULL;
1884 	}
1885 	flow_node = rte_zmalloc("hns3 flow node",
1886 				sizeof(struct hns3_flow_mem), 0);
1887 	if (flow_node == NULL) {
1888 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1889 				   NULL, "Failed to allocate flow list memory");
1890 		rte_free(flow);
1891 		return NULL;
1892 	}
1893 
1894 	flow_node->flow = flow;
1895 	TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries);
1896 
1897 	act = hns3_find_rss_general_action(pattern, actions);
1898 	if (act)
1899 		ret = hns3_flow_create_rss_rule(dev, act, flow);
1900 	else
1901 		ret = hns3_flow_create_fdir_rule(dev, pattern, actions,
1902 						 error, flow);
1903 	if (ret == 0)
1904 		return flow;
1905 
1906 	rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1907 			   "Failed to create flow");
1908 	TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1909 	rte_free(flow_node);
1910 	rte_free(flow);
1911 
1912 	return NULL;
1913 }
1914 
1915 /* Destroy a flow rule on hns3. */
1916 static int
1917 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1918 		  struct rte_flow_error *error)
1919 {
1920 	struct hns3_adapter *hns = dev->data->dev_private;
1921 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1922 	struct hns3_rss_conf_ele *rss_filter_ptr;
1923 	struct hns3_flow_mem *flow_node;
1924 	enum rte_filter_type filter_type;
1925 	struct hns3_fdir_rule fdir_rule;
1926 	struct hns3_hw *hw = &hns->hw;
1927 	int ret;
1928 
1929 	if (flow == NULL)
1930 		return rte_flow_error_set(error, EINVAL,
1931 					  RTE_FLOW_ERROR_TYPE_HANDLE,
1932 					  flow, "Flow is NULL");
1933 
1934 	filter_type = flow->filter_type;
1935 	switch (filter_type) {
1936 	case RTE_ETH_FILTER_FDIR:
1937 		fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
1938 		memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
1939 			   sizeof(struct hns3_fdir_rule));
1940 
1941 		ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
1942 		if (ret)
1943 			return rte_flow_error_set(error, EIO,
1944 						  RTE_FLOW_ERROR_TYPE_HANDLE,
1945 						  flow,
1946 						  "Destroy FDIR fail.Try again");
1947 		if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1948 			hns3_counter_release(dev, fdir_rule.act_cnt.id);
1949 		TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1950 		rte_free(fdir_rule_ptr);
1951 		fdir_rule_ptr = NULL;
1952 		break;
1953 	case RTE_ETH_FILTER_HASH:
1954 		rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
1955 		ret = hns3_config_rss_filter(hw, &rss_filter_ptr->filter_info,
1956 					     false);
1957 		if (ret)
1958 			return rte_flow_error_set(error, EIO,
1959 						  RTE_FLOW_ERROR_TYPE_HANDLE,
1960 						  flow,
1961 						  "Destroy RSS fail.Try again");
1962 		TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1963 		rte_free(rss_filter_ptr);
1964 		rss_filter_ptr = NULL;
1965 		break;
1966 	default:
1967 		return rte_flow_error_set(error, EINVAL,
1968 					  RTE_FLOW_ERROR_TYPE_HANDLE, flow,
1969 					  "Unsupported filter type");
1970 	}
1971 
1972 	TAILQ_FOREACH(flow_node, &hw->flow_list, entries) {
1973 		if (flow_node->flow == flow) {
1974 			TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1975 			rte_free(flow_node);
1976 			flow_node = NULL;
1977 			break;
1978 		}
1979 	}
1980 	rte_free(flow);
1981 
1982 	return 0;
1983 }
1984 
1985 /*  Destroy all flow rules associated with a port on hns3. */
1986 static int
1987 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1988 {
1989 	struct hns3_adapter *hns = dev->data->dev_private;
1990 	int ret;
1991 
1992 	/* FDIR is available only in PF driver */
1993 	if (!hns->is_vf) {
1994 		ret = hns3_clear_all_fdir_filter(hns);
1995 		if (ret) {
1996 			rte_flow_error_set(error, ret,
1997 					   RTE_FLOW_ERROR_TYPE_HANDLE,
1998 					   NULL, "Failed to flush rule");
1999 			return ret;
2000 		}
2001 		hns3_counter_flush(dev);
2002 	}
2003 
2004 	ret = hns3_clear_rss_filter(dev);
2005 	if (ret) {
2006 		rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
2007 				   NULL, "Failed to flush rss filter");
2008 		return ret;
2009 	}
2010 
2011 	hns3_filterlist_flush(dev);
2012 
2013 	return 0;
2014 }
2015 
2016 /* Query an existing flow rule. */
2017 static int
2018 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
2019 		const struct rte_flow_action *actions, void *data,
2020 		struct rte_flow_error *error)
2021 {
2022 	struct rte_flow_action_rss *rss_conf;
2023 	struct hns3_rss_conf_ele *rss_rule;
2024 	struct rte_flow_query_count *qc;
2025 	int ret;
2026 
2027 	if (!flow->rule)
2028 		return rte_flow_error_set(error, EINVAL,
2029 			RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule");
2030 
2031 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2032 		switch (actions->type) {
2033 		case RTE_FLOW_ACTION_TYPE_VOID:
2034 			break;
2035 		case RTE_FLOW_ACTION_TYPE_COUNT:
2036 			qc = (struct rte_flow_query_count *)data;
2037 			ret = hns3_counter_query(dev, flow, qc, error);
2038 			if (ret)
2039 				return ret;
2040 			break;
2041 		case RTE_FLOW_ACTION_TYPE_RSS:
2042 			if (flow->filter_type != RTE_ETH_FILTER_HASH) {
2043 				return rte_flow_error_set(error, ENOTSUP,
2044 					RTE_FLOW_ERROR_TYPE_ACTION,
2045 					actions, "action is not supported");
2046 			}
2047 			rss_conf = (struct rte_flow_action_rss *)data;
2048 			rss_rule = (struct hns3_rss_conf_ele *)flow->rule;
2049 			rte_memcpy(rss_conf, &rss_rule->filter_info.conf,
2050 				   sizeof(struct rte_flow_action_rss));
2051 			break;
2052 		default:
2053 			return rte_flow_error_set(error, ENOTSUP,
2054 				RTE_FLOW_ERROR_TYPE_ACTION,
2055 				actions, "action is not supported");
2056 		}
2057 	}
2058 
2059 	return 0;
2060 }
2061 
2062 static int
2063 hns3_flow_validate_wrap(struct rte_eth_dev *dev,
2064 			const struct rte_flow_attr *attr,
2065 			const struct rte_flow_item pattern[],
2066 			const struct rte_flow_action actions[],
2067 			struct rte_flow_error *error)
2068 {
2069 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2070 	int ret;
2071 
2072 	pthread_mutex_lock(&hw->flows_lock);
2073 	ret = hns3_flow_validate(dev, attr, pattern, actions, error);
2074 	pthread_mutex_unlock(&hw->flows_lock);
2075 
2076 	return ret;
2077 }
2078 
2079 static struct rte_flow *
2080 hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2081 		      const struct rte_flow_item pattern[],
2082 		      const struct rte_flow_action actions[],
2083 		      struct rte_flow_error *error)
2084 {
2085 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2086 	struct rte_flow *flow;
2087 
2088 	pthread_mutex_lock(&hw->flows_lock);
2089 	flow = hns3_flow_create(dev, attr, pattern, actions, error);
2090 	pthread_mutex_unlock(&hw->flows_lock);
2091 
2092 	return flow;
2093 }
2094 
2095 static int
2096 hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2097 		       struct rte_flow_error *error)
2098 {
2099 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2100 	int ret;
2101 
2102 	pthread_mutex_lock(&hw->flows_lock);
2103 	ret = hns3_flow_destroy(dev, flow, error);
2104 	pthread_mutex_unlock(&hw->flows_lock);
2105 
2106 	return ret;
2107 }
2108 
2109 static int
2110 hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
2111 {
2112 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2113 	int ret;
2114 
2115 	pthread_mutex_lock(&hw->flows_lock);
2116 	ret = hns3_flow_flush(dev, error);
2117 	pthread_mutex_unlock(&hw->flows_lock);
2118 
2119 	return ret;
2120 }
2121 
2122 static int
2123 hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2124 		     const struct rte_flow_action *actions, void *data,
2125 		     struct rte_flow_error *error)
2126 {
2127 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2128 	int ret;
2129 
2130 	pthread_mutex_lock(&hw->flows_lock);
2131 	ret = hns3_flow_query(dev, flow, actions, data, error);
2132 	pthread_mutex_unlock(&hw->flows_lock);
2133 
2134 	return ret;
2135 }
2136 
2137 static int
2138 hns3_check_indir_action(const struct rte_flow_indir_action_conf *conf,
2139 			const struct rte_flow_action *action,
2140 			struct rte_flow_error *error)
2141 {
2142 	if (!conf->ingress)
2143 		return rte_flow_error_set(error, EINVAL,
2144 				RTE_FLOW_ERROR_TYPE_ACTION,
2145 				NULL, "Indir action ingress can't be zero");
2146 
2147 	if (conf->egress)
2148 		return rte_flow_error_set(error, EINVAL,
2149 				RTE_FLOW_ERROR_TYPE_ACTION,
2150 				NULL, "Indir action not support egress");
2151 
2152 	if (conf->transfer)
2153 		return rte_flow_error_set(error, EINVAL,
2154 				RTE_FLOW_ERROR_TYPE_ACTION,
2155 				NULL, "Indir action not support transfer");
2156 
2157 	if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
2158 		return rte_flow_error_set(error, EINVAL,
2159 				RTE_FLOW_ERROR_TYPE_ACTION,
2160 				NULL, "Indir action only support count");
2161 
2162 	return 0;
2163 }
2164 
2165 static struct rte_flow_action_handle *
2166 hns3_flow_action_create(struct rte_eth_dev *dev,
2167 			const struct rte_flow_indir_action_conf *conf,
2168 			const struct rte_flow_action *action,
2169 			struct rte_flow_error *error)
2170 {
2171 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2172 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2173 	const struct rte_flow_action_count *act_count;
2174 	struct rte_flow_action_handle *handle = NULL;
2175 	struct hns3_flow_counter *counter;
2176 
2177 	if (hns3_check_indir_action(conf, action, error))
2178 		return NULL;
2179 
2180 	handle = rte_zmalloc("hns3 action handle",
2181 			     sizeof(struct rte_flow_action_handle), 0);
2182 	if (handle == NULL) {
2183 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
2184 				   NULL, "Failed to allocate action memory");
2185 		return NULL;
2186 	}
2187 
2188 	pthread_mutex_lock(&hw->flows_lock);
2189 
2190 	act_count = (const struct rte_flow_action_count *)action->conf;
2191 	if (act_count->id >= pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1]) {
2192 		rte_flow_error_set(error, EINVAL,
2193 				   RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2194 				   action, "Invalid counter id");
2195 		goto err_exit;
2196 	}
2197 
2198 	if (hns3_counter_new(dev, false, act_count->id, error))
2199 		goto err_exit;
2200 
2201 	counter = hns3_counter_lookup(dev, act_count->id);
2202 	if (counter == NULL) {
2203 		rte_flow_error_set(error, EINVAL,
2204 				   RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2205 				   action, "Counter id not found");
2206 		goto err_exit;
2207 	}
2208 
2209 	counter->indirect = true;
2210 	handle->indirect_type = HNS3_INDIRECT_ACTION_TYPE_COUNT;
2211 	handle->counter_id = counter->id;
2212 
2213 	pthread_mutex_unlock(&hw->flows_lock);
2214 	return handle;
2215 
2216 err_exit:
2217 	pthread_mutex_unlock(&hw->flows_lock);
2218 	rte_free(handle);
2219 	return NULL;
2220 }
2221 
2222 static int
2223 hns3_flow_action_destroy(struct rte_eth_dev *dev,
2224 			 struct rte_flow_action_handle *handle,
2225 			 struct rte_flow_error *error)
2226 {
2227 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2228 	struct hns3_flow_counter *counter;
2229 
2230 	pthread_mutex_lock(&hw->flows_lock);
2231 
2232 	if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
2233 		pthread_mutex_unlock(&hw->flows_lock);
2234 		return rte_flow_error_set(error, EINVAL,
2235 					RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2236 					handle, "Invalid indirect type");
2237 	}
2238 
2239 	counter = hns3_counter_lookup(dev, handle->counter_id);
2240 	if (counter == NULL) {
2241 		pthread_mutex_unlock(&hw->flows_lock);
2242 		return rte_flow_error_set(error, EINVAL,
2243 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2244 				handle, "Counter id not exist");
2245 	}
2246 
2247 	if (counter->ref_cnt > 1) {
2248 		pthread_mutex_unlock(&hw->flows_lock);
2249 		return rte_flow_error_set(error, EBUSY,
2250 				RTE_FLOW_ERROR_TYPE_HANDLE,
2251 				handle, "Counter id in use");
2252 	}
2253 
2254 	(void)hns3_counter_release(dev, handle->counter_id);
2255 	rte_free(handle);
2256 
2257 	pthread_mutex_unlock(&hw->flows_lock);
2258 	return 0;
2259 }
2260 
2261 static int
2262 hns3_flow_action_query(struct rte_eth_dev *dev,
2263 		 const struct rte_flow_action_handle *handle,
2264 		 void *data,
2265 		 struct rte_flow_error *error)
2266 {
2267 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2268 	struct rte_flow flow;
2269 	int ret;
2270 
2271 	pthread_mutex_lock(&hw->flows_lock);
2272 
2273 	if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
2274 		pthread_mutex_unlock(&hw->flows_lock);
2275 		return rte_flow_error_set(error, EINVAL,
2276 					RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2277 					handle, "Invalid indirect type");
2278 	}
2279 
2280 	memset(&flow, 0, sizeof(flow));
2281 	flow.counter_id = handle->counter_id;
2282 	ret = hns3_counter_query(dev, &flow,
2283 				 (struct rte_flow_query_count *)data, error);
2284 	pthread_mutex_unlock(&hw->flows_lock);
2285 	return ret;
2286 }
2287 
2288 static const struct rte_flow_ops hns3_flow_ops = {
2289 	.validate = hns3_flow_validate_wrap,
2290 	.create = hns3_flow_create_wrap,
2291 	.destroy = hns3_flow_destroy_wrap,
2292 	.flush = hns3_flow_flush_wrap,
2293 	.query = hns3_flow_query_wrap,
2294 	.isolate = NULL,
2295 	.action_handle_create = hns3_flow_action_create,
2296 	.action_handle_destroy = hns3_flow_action_destroy,
2297 	.action_handle_query = hns3_flow_action_query,
2298 };
2299 
2300 int
2301 hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
2302 		      const struct rte_flow_ops **ops)
2303 {
2304 	struct hns3_hw *hw;
2305 
2306 	hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2307 	if (hw->adapter_state >= HNS3_NIC_CLOSED)
2308 		return -ENODEV;
2309 
2310 	*ops = &hns3_flow_ops;
2311 	return 0;
2312 }
2313 
2314 void
2315 hns3_flow_init(struct rte_eth_dev *dev)
2316 {
2317 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2318 	pthread_mutexattr_t attr;
2319 
2320 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2321 		return;
2322 
2323 	pthread_mutexattr_init(&attr);
2324 	pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
2325 	pthread_mutex_init(&hw->flows_lock, &attr);
2326 	dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
2327 
2328 	TAILQ_INIT(&hw->flow_fdir_list);
2329 	TAILQ_INIT(&hw->flow_rss_list);
2330 	TAILQ_INIT(&hw->flow_list);
2331 }
2332 
2333 void
2334 hns3_flow_uninit(struct rte_eth_dev *dev)
2335 {
2336 	struct rte_flow_error error;
2337 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2338 		hns3_flow_flush_wrap(dev, &error);
2339 }
2340