xref: /dpdk/drivers/net/hns3/hns3_flow.c (revision cb440babbd45a80c059f8bc80e87c48d09086fd7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4 
5 #include <stdbool.h>
6 #include <sys/queue.h>
7 #include <rte_flow_driver.h>
8 #include <rte_io.h>
9 #include <rte_malloc.h>
10 
11 #include "hns3_ethdev.h"
12 #include "hns3_logs.h"
13 
14 /* Default default keys */
15 static uint8_t hns3_hash_key[] = {
16 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
17 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
18 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
19 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
20 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
21 };
22 
23 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
24 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
25 
26 /* Special Filter id for non-specific packet flagging. Don't change value */
27 #define HNS3_MAX_FILTER_ID	0x0FFF
28 
29 #define ETHER_TYPE_MASK		0xFFFF
30 #define IPPROTO_MASK		0xFF
31 #define TUNNEL_TYPE_MASK	0xFFFF
32 
33 #define HNS3_TUNNEL_TYPE_VXLAN		0x12B5
34 #define HNS3_TUNNEL_TYPE_VXLAN_GPE	0x12B6
35 #define HNS3_TUNNEL_TYPE_GENEVE		0x17C1
36 #define HNS3_TUNNEL_TYPE_NVGRE		0x6558
37 
38 static enum rte_flow_item_type first_items[] = {
39 	RTE_FLOW_ITEM_TYPE_ETH,
40 	RTE_FLOW_ITEM_TYPE_IPV4,
41 	RTE_FLOW_ITEM_TYPE_IPV6,
42 	RTE_FLOW_ITEM_TYPE_TCP,
43 	RTE_FLOW_ITEM_TYPE_UDP,
44 	RTE_FLOW_ITEM_TYPE_SCTP,
45 	RTE_FLOW_ITEM_TYPE_ICMP,
46 	RTE_FLOW_ITEM_TYPE_NVGRE,
47 	RTE_FLOW_ITEM_TYPE_VXLAN,
48 	RTE_FLOW_ITEM_TYPE_GENEVE,
49 	RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
50 	RTE_FLOW_ITEM_TYPE_MPLS
51 };
52 
53 static enum rte_flow_item_type L2_next_items[] = {
54 	RTE_FLOW_ITEM_TYPE_VLAN,
55 	RTE_FLOW_ITEM_TYPE_IPV4,
56 	RTE_FLOW_ITEM_TYPE_IPV6
57 };
58 
59 static enum rte_flow_item_type L3_next_items[] = {
60 	RTE_FLOW_ITEM_TYPE_TCP,
61 	RTE_FLOW_ITEM_TYPE_UDP,
62 	RTE_FLOW_ITEM_TYPE_SCTP,
63 	RTE_FLOW_ITEM_TYPE_NVGRE,
64 	RTE_FLOW_ITEM_TYPE_ICMP
65 };
66 
67 static enum rte_flow_item_type L4_next_items[] = {
68 	RTE_FLOW_ITEM_TYPE_VXLAN,
69 	RTE_FLOW_ITEM_TYPE_GENEVE,
70 	RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
71 	RTE_FLOW_ITEM_TYPE_MPLS
72 };
73 
74 static enum rte_flow_item_type tunnel_next_items[] = {
75 	RTE_FLOW_ITEM_TYPE_ETH,
76 	RTE_FLOW_ITEM_TYPE_VLAN
77 };
78 
79 struct items_step_mngr {
80 	enum rte_flow_item_type *items;
81 	int count;
82 };
83 
84 static inline void
85 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
86 {
87 	size_t i;
88 
89 	for (i = 0; i < len; i++)
90 		dst[i] = rte_be_to_cpu_32(src[i]);
91 }
92 
93 static inline const struct rte_flow_action *
94 find_rss_action(const struct rte_flow_action actions[])
95 {
96 	const struct rte_flow_action *next = &actions[0];
97 
98 	for (; next->type != RTE_FLOW_ACTION_TYPE_END; next++) {
99 		if (next->type == RTE_FLOW_ACTION_TYPE_RSS)
100 			return next;
101 	}
102 	return NULL;
103 }
104 
105 static inline struct hns3_flow_counter *
106 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
107 {
108 	struct hns3_adapter *hns = dev->data->dev_private;
109 	struct hns3_pf *pf = &hns->pf;
110 	struct hns3_flow_counter *cnt;
111 
112 	LIST_FOREACH(cnt, &pf->flow_counters, next) {
113 		if (cnt->id == id)
114 			return cnt;
115 	}
116 	return NULL;
117 }
118 
119 static int
120 hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
121 		 struct rte_flow_error *error)
122 {
123 	struct hns3_adapter *hns = dev->data->dev_private;
124 	struct hns3_pf *pf = &hns->pf;
125 	struct hns3_flow_counter *cnt;
126 
127 	cnt = hns3_counter_lookup(dev, id);
128 	if (cnt) {
129 		if (!cnt->shared || cnt->shared != shared)
130 			return rte_flow_error_set(error, ENOTSUP,
131 						  RTE_FLOW_ERROR_TYPE_ACTION,
132 						  cnt,
133 						  "Counter id is used,shared flag not match");
134 		cnt->ref_cnt++;
135 		return 0;
136 	}
137 
138 	cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
139 	if (cnt == NULL)
140 		return rte_flow_error_set(error, ENOMEM,
141 					  RTE_FLOW_ERROR_TYPE_ACTION, cnt,
142 					  "Alloc mem for counter failed");
143 	cnt->id = id;
144 	cnt->shared = shared;
145 	cnt->ref_cnt = 1;
146 	cnt->hits = 0;
147 	LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
148 	return 0;
149 }
150 
151 static int
152 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
153 		   struct rte_flow_query_count *qc,
154 		   struct rte_flow_error *error)
155 {
156 	struct hns3_adapter *hns = dev->data->dev_private;
157 	struct hns3_flow_counter *cnt;
158 	uint64_t value;
159 	int ret;
160 
161 	/* FDIR is available only in PF driver */
162 	if (hns->is_vf)
163 		return rte_flow_error_set(error, ENOTSUP,
164 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
165 					  "Fdir is not supported in VF");
166 	cnt = hns3_counter_lookup(dev, flow->counter_id);
167 	if (cnt == NULL)
168 		return rte_flow_error_set(error, EINVAL,
169 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
170 					  "Can't find counter id");
171 
172 	ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
173 	if (ret) {
174 		rte_flow_error_set(error, -ret,
175 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
176 				   NULL, "Read counter fail.");
177 		return ret;
178 	}
179 	qc->hits_set = 1;
180 	qc->hits = value;
181 
182 	return 0;
183 }
184 
185 static int
186 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
187 {
188 	struct hns3_adapter *hns = dev->data->dev_private;
189 	struct hns3_hw *hw = &hns->hw;
190 	struct hns3_flow_counter *cnt;
191 
192 	cnt = hns3_counter_lookup(dev, id);
193 	if (cnt == NULL) {
194 		hns3_err(hw, "Can't find available counter to release");
195 		return -EINVAL;
196 	}
197 	cnt->ref_cnt--;
198 	if (cnt->ref_cnt == 0) {
199 		LIST_REMOVE(cnt, next);
200 		rte_free(cnt);
201 	}
202 	return 0;
203 }
204 
205 static void
206 hns3_counter_flush(struct rte_eth_dev *dev)
207 {
208 	struct hns3_adapter *hns = dev->data->dev_private;
209 	struct hns3_pf *pf = &hns->pf;
210 	struct hns3_flow_counter *cnt_ptr;
211 
212 	cnt_ptr = LIST_FIRST(&pf->flow_counters);
213 	while (cnt_ptr) {
214 		LIST_REMOVE(cnt_ptr, next);
215 		rte_free(cnt_ptr);
216 		cnt_ptr = LIST_FIRST(&pf->flow_counters);
217 	}
218 }
219 
220 static int
221 hns3_handle_action_queue(struct rte_eth_dev *dev,
222 			 const struct rte_flow_action *action,
223 			 struct hns3_fdir_rule *rule,
224 			 struct rte_flow_error *error)
225 {
226 	struct hns3_adapter *hns = dev->data->dev_private;
227 	const struct rte_flow_action_queue *queue;
228 	struct hns3_hw *hw = &hns->hw;
229 
230 	queue = (const struct rte_flow_action_queue *)action->conf;
231 	if (queue->index >= hw->used_rx_queues) {
232 		hns3_err(hw, "queue ID(%d) is greater than number of "
233 			  "available queue (%d) in driver.",
234 			  queue->index, hw->used_rx_queues);
235 		return rte_flow_error_set(error, EINVAL,
236 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
237 					  "Invalid queue ID in PF");
238 	}
239 
240 	rule->queue_id = queue->index;
241 	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
242 	return 0;
243 }
244 
245 /*
246  * Parse actions structure from the provided pattern.
247  * The pattern is validated as the items are copied.
248  *
249  * @param actions[in]
250  * @param rule[out]
251  *   NIC specfilc actions derived from the actions.
252  * @param error[out]
253  */
254 static int
255 hns3_handle_actions(struct rte_eth_dev *dev,
256 		    const struct rte_flow_action actions[],
257 		    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
258 {
259 	struct hns3_adapter *hns = dev->data->dev_private;
260 	const struct rte_flow_action_count *act_count;
261 	const struct rte_flow_action_mark *mark;
262 	struct hns3_pf *pf = &hns->pf;
263 	uint32_t counter_num;
264 	int ret;
265 
266 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
267 		switch (actions->type) {
268 		case RTE_FLOW_ACTION_TYPE_QUEUE:
269 			ret = hns3_handle_action_queue(dev, actions, rule,
270 						       error);
271 			if (ret)
272 				return ret;
273 			break;
274 		case RTE_FLOW_ACTION_TYPE_DROP:
275 			rule->action = HNS3_FD_ACTION_DROP_PACKET;
276 			break;
277 		case RTE_FLOW_ACTION_TYPE_MARK:
278 			mark =
279 			    (const struct rte_flow_action_mark *)actions->conf;
280 			if (mark->id >= HNS3_MAX_FILTER_ID)
281 				return rte_flow_error_set(error, EINVAL,
282 						     RTE_FLOW_ERROR_TYPE_ACTION,
283 						     actions,
284 						     "Invalid Mark ID");
285 			rule->fd_id = mark->id;
286 			rule->flags |= HNS3_RULE_FLAG_FDID;
287 			break;
288 		case RTE_FLOW_ACTION_TYPE_FLAG:
289 			rule->fd_id = HNS3_MAX_FILTER_ID;
290 			rule->flags |= HNS3_RULE_FLAG_FDID;
291 			break;
292 		case RTE_FLOW_ACTION_TYPE_COUNT:
293 			act_count =
294 			    (const struct rte_flow_action_count *)actions->conf;
295 			counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
296 			if (act_count->id >= counter_num)
297 				return rte_flow_error_set(error, EINVAL,
298 						     RTE_FLOW_ERROR_TYPE_ACTION,
299 						     actions,
300 						     "Invalid counter id");
301 			rule->act_cnt = *act_count;
302 			rule->flags |= HNS3_RULE_FLAG_COUNTER;
303 			break;
304 		case RTE_FLOW_ACTION_TYPE_VOID:
305 			break;
306 		default:
307 			return rte_flow_error_set(error, ENOTSUP,
308 						  RTE_FLOW_ERROR_TYPE_ACTION,
309 						  NULL, "Unsupported action");
310 		}
311 	}
312 
313 	return 0;
314 }
315 
316 /* Parse to get the attr and action info of flow director rule. */
317 static int
318 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
319 {
320 	if (!attr->ingress)
321 		return rte_flow_error_set(error, EINVAL,
322 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
323 					  attr, "Ingress can't be zero");
324 	if (attr->egress)
325 		return rte_flow_error_set(error, ENOTSUP,
326 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
327 					  attr, "Not support egress");
328 	if (attr->transfer)
329 		return rte_flow_error_set(error, ENOTSUP,
330 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
331 					  attr, "No support for transfer");
332 	if (attr->priority)
333 		return rte_flow_error_set(error, ENOTSUP,
334 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
335 					  attr, "Not support priority");
336 	if (attr->group)
337 		return rte_flow_error_set(error, ENOTSUP,
338 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
339 					  attr, "Not support group");
340 	return 0;
341 }
342 
343 static int
344 hns3_parse_eth(const struct rte_flow_item *item,
345 		   struct hns3_fdir_rule *rule, struct rte_flow_error *error)
346 {
347 	const struct rte_flow_item_eth *eth_spec;
348 	const struct rte_flow_item_eth *eth_mask;
349 
350 	if (item->spec == NULL && item->mask)
351 		return rte_flow_error_set(error, EINVAL,
352 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
353 					  "Can't configure FDIR with mask but without spec");
354 
355 	/* Only used to describe the protocol stack. */
356 	if (item->spec == NULL && item->mask == NULL)
357 		return 0;
358 
359 	if (item->mask) {
360 		eth_mask = item->mask;
361 		if (eth_mask->type) {
362 			hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
363 			rule->key_conf.mask.ether_type =
364 			    rte_be_to_cpu_16(eth_mask->type);
365 		}
366 		if (!rte_is_zero_ether_addr(&eth_mask->src)) {
367 			hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
368 			memcpy(rule->key_conf.mask.src_mac,
369 			       eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
370 		}
371 		if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
372 			hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
373 			memcpy(rule->key_conf.mask.dst_mac,
374 			       eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
375 		}
376 	}
377 
378 	eth_spec = item->spec;
379 	rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
380 	memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
381 	       RTE_ETHER_ADDR_LEN);
382 	memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
383 	       RTE_ETHER_ADDR_LEN);
384 	return 0;
385 }
386 
387 static int
388 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
389 		struct rte_flow_error *error)
390 {
391 	const struct rte_flow_item_vlan *vlan_spec;
392 	const struct rte_flow_item_vlan *vlan_mask;
393 
394 	if (item->spec == NULL && item->mask)
395 		return rte_flow_error_set(error, EINVAL,
396 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
397 					  "Can't configure FDIR with mask but without spec");
398 
399 	rule->key_conf.vlan_num++;
400 	if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
401 		return rte_flow_error_set(error, EINVAL,
402 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
403 					  "Vlan_num is more than 2");
404 
405 	/* Only used to describe the protocol stack. */
406 	if (item->spec == NULL && item->mask == NULL)
407 		return 0;
408 
409 	if (item->mask) {
410 		vlan_mask = item->mask;
411 		if (vlan_mask->tci) {
412 			if (rule->key_conf.vlan_num == 1) {
413 				hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
414 					     1);
415 				rule->key_conf.mask.vlan_tag1 =
416 				    rte_be_to_cpu_16(vlan_mask->tci);
417 			} else {
418 				hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
419 					     1);
420 				rule->key_conf.mask.vlan_tag2 =
421 				    rte_be_to_cpu_16(vlan_mask->tci);
422 			}
423 		}
424 	}
425 
426 	vlan_spec = item->spec;
427 	if (rule->key_conf.vlan_num == 1)
428 		rule->key_conf.spec.vlan_tag1 =
429 		    rte_be_to_cpu_16(vlan_spec->tci);
430 	else
431 		rule->key_conf.spec.vlan_tag2 =
432 		    rte_be_to_cpu_16(vlan_spec->tci);
433 	return 0;
434 }
435 
436 static int
437 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
438 		struct rte_flow_error *error)
439 {
440 	const struct rte_flow_item_ipv4 *ipv4_spec;
441 	const struct rte_flow_item_ipv4 *ipv4_mask;
442 
443 	if (item->spec == NULL && item->mask)
444 		return rte_flow_error_set(error, EINVAL,
445 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
446 					  "Can't configure FDIR with mask but without spec");
447 
448 	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
449 	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
450 	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
451 	/* Only used to describe the protocol stack. */
452 	if (item->spec == NULL && item->mask == NULL)
453 		return 0;
454 
455 	if (item->mask) {
456 		ipv4_mask = item->mask;
457 
458 		if (ipv4_mask->hdr.total_length ||
459 		    ipv4_mask->hdr.packet_id ||
460 		    ipv4_mask->hdr.fragment_offset ||
461 		    ipv4_mask->hdr.time_to_live ||
462 		    ipv4_mask->hdr.hdr_checksum) {
463 			return rte_flow_error_set(error, EINVAL,
464 						  RTE_FLOW_ERROR_TYPE_ITEM,
465 						  item,
466 						  "Only support src & dst ip,tos,proto in IPV4");
467 		}
468 
469 		if (ipv4_mask->hdr.src_addr) {
470 			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
471 			rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
472 			    rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
473 		}
474 
475 		if (ipv4_mask->hdr.dst_addr) {
476 			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
477 			rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
478 			    rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
479 		}
480 
481 		if (ipv4_mask->hdr.type_of_service) {
482 			hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
483 			rule->key_conf.mask.ip_tos =
484 			    ipv4_mask->hdr.type_of_service;
485 		}
486 
487 		if (ipv4_mask->hdr.next_proto_id) {
488 			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
489 			rule->key_conf.mask.ip_proto =
490 			    ipv4_mask->hdr.next_proto_id;
491 		}
492 	}
493 
494 	ipv4_spec = item->spec;
495 	rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
496 	    rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
497 	rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
498 	    rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
499 	rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
500 	rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
501 	return 0;
502 }
503 
504 static int
505 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
506 		struct rte_flow_error *error)
507 {
508 	const struct rte_flow_item_ipv6 *ipv6_spec;
509 	const struct rte_flow_item_ipv6 *ipv6_mask;
510 
511 	if (item->spec == NULL && item->mask)
512 		return rte_flow_error_set(error, EINVAL,
513 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
514 					  "Can't configure FDIR with mask but without spec");
515 
516 	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
517 	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
518 	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
519 
520 	/* Only used to describe the protocol stack. */
521 	if (item->spec == NULL && item->mask == NULL)
522 		return 0;
523 
524 	if (item->mask) {
525 		ipv6_mask = item->mask;
526 		if (ipv6_mask->hdr.vtc_flow ||
527 		    ipv6_mask->hdr.payload_len || ipv6_mask->hdr.hop_limits) {
528 			return rte_flow_error_set(error, EINVAL,
529 						  RTE_FLOW_ERROR_TYPE_ITEM,
530 						  item,
531 						  "Only support src & dst ip,proto in IPV6");
532 		}
533 		net_addr_to_host(rule->key_conf.mask.src_ip,
534 				 (const rte_be32_t *)ipv6_mask->hdr.src_addr,
535 				 IP_ADDR_LEN);
536 		net_addr_to_host(rule->key_conf.mask.dst_ip,
537 				 (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
538 				 IP_ADDR_LEN);
539 		rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
540 		if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
541 			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
542 		if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
543 			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
544 		if (ipv6_mask->hdr.proto)
545 			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
546 	}
547 
548 	ipv6_spec = item->spec;
549 	net_addr_to_host(rule->key_conf.spec.src_ip,
550 			 (const rte_be32_t *)ipv6_spec->hdr.src_addr,
551 			 IP_ADDR_LEN);
552 	net_addr_to_host(rule->key_conf.spec.dst_ip,
553 			 (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
554 			 IP_ADDR_LEN);
555 	rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
556 
557 	return 0;
558 }
559 
560 static int
561 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
562 	       struct rte_flow_error *error)
563 {
564 	const struct rte_flow_item_tcp *tcp_spec;
565 	const struct rte_flow_item_tcp *tcp_mask;
566 
567 	if (item->spec == NULL && item->mask)
568 		return rte_flow_error_set(error, EINVAL,
569 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
570 					  "Can't configure FDIR with mask but without spec");
571 
572 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
573 	rule->key_conf.spec.ip_proto = IPPROTO_TCP;
574 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
575 
576 	/* Only used to describe the protocol stack. */
577 	if (item->spec == NULL && item->mask == NULL)
578 		return 0;
579 
580 	if (item->mask) {
581 		tcp_mask = item->mask;
582 		if (tcp_mask->hdr.sent_seq ||
583 		    tcp_mask->hdr.recv_ack ||
584 		    tcp_mask->hdr.data_off ||
585 		    tcp_mask->hdr.tcp_flags ||
586 		    tcp_mask->hdr.rx_win ||
587 		    tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) {
588 			return rte_flow_error_set(error, EINVAL,
589 						  RTE_FLOW_ERROR_TYPE_ITEM,
590 						  item,
591 						  "Only support src & dst port in TCP");
592 		}
593 
594 		if (tcp_mask->hdr.src_port) {
595 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
596 			rule->key_conf.mask.src_port =
597 			    rte_be_to_cpu_16(tcp_mask->hdr.src_port);
598 		}
599 		if (tcp_mask->hdr.dst_port) {
600 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
601 			rule->key_conf.mask.dst_port =
602 			    rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
603 		}
604 	}
605 
606 	tcp_spec = item->spec;
607 	rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
608 	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
609 
610 	return 0;
611 }
612 
613 static int
614 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
615 	       struct rte_flow_error *error)
616 {
617 	const struct rte_flow_item_udp *udp_spec;
618 	const struct rte_flow_item_udp *udp_mask;
619 
620 	if (item->spec == NULL && item->mask)
621 		return rte_flow_error_set(error, EINVAL,
622 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
623 					  "Can't configure FDIR with mask but without spec");
624 
625 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
626 	rule->key_conf.spec.ip_proto = IPPROTO_UDP;
627 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
628 	/* Only used to describe the protocol stack. */
629 	if (item->spec == NULL && item->mask == NULL)
630 		return 0;
631 
632 	if (item->mask) {
633 		udp_mask = item->mask;
634 		if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
635 			return rte_flow_error_set(error, EINVAL,
636 						  RTE_FLOW_ERROR_TYPE_ITEM,
637 						  item,
638 						  "Only support src & dst port in UDP");
639 		}
640 		if (udp_mask->hdr.src_port) {
641 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
642 			rule->key_conf.mask.src_port =
643 			    rte_be_to_cpu_16(udp_mask->hdr.src_port);
644 		}
645 		if (udp_mask->hdr.dst_port) {
646 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
647 			rule->key_conf.mask.dst_port =
648 			    rte_be_to_cpu_16(udp_mask->hdr.dst_port);
649 		}
650 	}
651 
652 	udp_spec = item->spec;
653 	rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
654 	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
655 
656 	return 0;
657 }
658 
659 static int
660 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
661 		struct rte_flow_error *error)
662 {
663 	const struct rte_flow_item_sctp *sctp_spec;
664 	const struct rte_flow_item_sctp *sctp_mask;
665 
666 	if (item->spec == NULL && item->mask)
667 		return rte_flow_error_set(error, EINVAL,
668 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
669 					  "Can't configure FDIR with mask but without spec");
670 
671 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
672 	rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
673 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
674 
675 	/* Only used to describe the protocol stack. */
676 	if (item->spec == NULL && item->mask == NULL)
677 		return 0;
678 
679 	if (item->mask) {
680 		sctp_mask = item->mask;
681 		if (sctp_mask->hdr.cksum)
682 			return rte_flow_error_set(error, EINVAL,
683 						  RTE_FLOW_ERROR_TYPE_ITEM,
684 						  item,
685 						  "Only support src & dst port in SCTP");
686 
687 		if (sctp_mask->hdr.src_port) {
688 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
689 			rule->key_conf.mask.src_port =
690 			    rte_be_to_cpu_16(sctp_mask->hdr.src_port);
691 		}
692 		if (sctp_mask->hdr.dst_port) {
693 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
694 			rule->key_conf.mask.dst_port =
695 			    rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
696 		}
697 		if (sctp_mask->hdr.tag) {
698 			hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
699 			rule->key_conf.mask.sctp_tag =
700 			    rte_be_to_cpu_32(sctp_mask->hdr.tag);
701 		}
702 	}
703 
704 	sctp_spec = item->spec;
705 	rule->key_conf.spec.src_port =
706 	    rte_be_to_cpu_16(sctp_spec->hdr.src_port);
707 	rule->key_conf.spec.dst_port =
708 	    rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
709 	rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
710 
711 	return 0;
712 }
713 
714 /*
715  * Check items before tunnel, save inner configs to outer configs,and clear
716  * inner configs.
717  * The key consists of two parts: meta_data and tuple keys.
718  * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
719  * packet(1bit).
720  * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
721  * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
722  * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
723  * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
724  * Vlantag2(16bit) and sctp-tag(32bit).
725  */
726 static int
727 hns3_handle_tunnel(const struct rte_flow_item *item,
728 		   struct hns3_fdir_rule *rule, struct rte_flow_error *error)
729 {
730 	/* check eth config */
731 	if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
732 		return rte_flow_error_set(error, EINVAL,
733 					  RTE_FLOW_ERROR_TYPE_ITEM,
734 					  item, "Outer eth mac is unsupported");
735 	if (rule->input_set & BIT(INNER_ETH_TYPE)) {
736 		hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
737 		rule->key_conf.spec.outer_ether_type =
738 		    rule->key_conf.spec.ether_type;
739 		rule->key_conf.mask.outer_ether_type =
740 		    rule->key_conf.mask.ether_type;
741 		hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
742 		rule->key_conf.spec.ether_type = 0;
743 		rule->key_conf.mask.ether_type = 0;
744 	}
745 
746 	/* check vlan config */
747 	if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
748 		return rte_flow_error_set(error, EINVAL,
749 					  RTE_FLOW_ERROR_TYPE_ITEM,
750 					  item,
751 					  "Outer vlan tags is unsupported");
752 
753 	/* clear vlan_num for inner vlan select */
754 	rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
755 	rule->key_conf.vlan_num = 0;
756 
757 	/* check L3 config */
758 	if (rule->input_set &
759 	    (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
760 		return rte_flow_error_set(error, EINVAL,
761 					  RTE_FLOW_ERROR_TYPE_ITEM,
762 					  item, "Outer ip is unsupported");
763 	if (rule->input_set & BIT(INNER_IP_PROTO)) {
764 		hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
765 		rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
766 		rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
767 		hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
768 		rule->key_conf.spec.ip_proto = 0;
769 		rule->key_conf.mask.ip_proto = 0;
770 	}
771 
772 	/* check L4 config */
773 	if (rule->input_set & BIT(INNER_SCTP_TAG))
774 		return rte_flow_error_set(error, EINVAL,
775 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
776 					  "Outer sctp tag is unsupported");
777 
778 	if (rule->input_set & BIT(INNER_SRC_PORT)) {
779 		hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
780 		rule->key_conf.spec.outer_src_port =
781 		    rule->key_conf.spec.src_port;
782 		rule->key_conf.mask.outer_src_port =
783 		    rule->key_conf.mask.src_port;
784 		hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
785 		rule->key_conf.spec.src_port = 0;
786 		rule->key_conf.mask.src_port = 0;
787 	}
788 	if (rule->input_set & BIT(INNER_DST_PORT)) {
789 		hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
790 		rule->key_conf.spec.dst_port = 0;
791 		rule->key_conf.mask.dst_port = 0;
792 	}
793 	return 0;
794 }
795 
796 static int
797 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
798 		 struct rte_flow_error *error)
799 {
800 	const struct rte_flow_item_vxlan *vxlan_spec;
801 	const struct rte_flow_item_vxlan *vxlan_mask;
802 
803 	if (item->spec == NULL && item->mask)
804 		return rte_flow_error_set(error, EINVAL,
805 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
806 					  "Can't configure FDIR with mask but without spec");
807 	else if (item->spec && (item->mask == NULL))
808 		return rte_flow_error_set(error, EINVAL,
809 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
810 					  "Tunnel packets must configure with mask");
811 
812 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
813 	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
814 	if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
815 		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
816 	else
817 		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
818 
819 	/* Only used to describe the protocol stack. */
820 	if (item->spec == NULL && item->mask == NULL)
821 		return 0;
822 
823 	vxlan_mask = item->mask;
824 	vxlan_spec = item->spec;
825 
826 	if (vxlan_mask->flags)
827 		return rte_flow_error_set(error, EINVAL,
828 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
829 					  "Flags is not supported in VxLAN");
830 
831 	/* VNI must be totally masked or not. */
832 	if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
833 	    memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
834 		return rte_flow_error_set(error, EINVAL,
835 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
836 					  "VNI must be totally masked or not in VxLAN");
837 	if (vxlan_mask->vni[0]) {
838 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
839 		memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni,
840 			   VNI_OR_TNI_LEN);
841 	}
842 	memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni,
843 		   VNI_OR_TNI_LEN);
844 	return 0;
845 }
846 
847 static int
848 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
849 		 struct rte_flow_error *error)
850 {
851 	const struct rte_flow_item_nvgre *nvgre_spec;
852 	const struct rte_flow_item_nvgre *nvgre_mask;
853 
854 	if (item->spec == NULL && item->mask)
855 		return rte_flow_error_set(error, EINVAL,
856 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
857 					  "Can't configure FDIR with mask but without spec");
858 	else if (item->spec && (item->mask == NULL))
859 		return rte_flow_error_set(error, EINVAL,
860 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
861 					  "Tunnel packets must configure with mask");
862 
863 	hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
864 	rule->key_conf.spec.outer_proto = IPPROTO_GRE;
865 	rule->key_conf.mask.outer_proto = IPPROTO_MASK;
866 
867 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
868 	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
869 	rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
870 	/* Only used to describe the protocol stack. */
871 	if (item->spec == NULL && item->mask == NULL)
872 		return 0;
873 
874 	nvgre_mask = item->mask;
875 	nvgre_spec = item->spec;
876 
877 	if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
878 		return rte_flow_error_set(error, EINVAL,
879 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
880 					  "Ver/protocal is not supported in NVGRE");
881 
882 	/* TNI must be totally masked or not. */
883 	if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
884 	    memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
885 		return rte_flow_error_set(error, EINVAL,
886 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
887 					  "TNI must be totally masked or not in NVGRE");
888 
889 	if (nvgre_mask->tni[0]) {
890 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
891 		memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
892 			   VNI_OR_TNI_LEN);
893 	}
894 	memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
895 		   VNI_OR_TNI_LEN);
896 
897 	if (nvgre_mask->flow_id) {
898 		hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
899 		rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
900 	}
901 	rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
902 	return 0;
903 }
904 
905 static int
906 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
907 		  struct rte_flow_error *error)
908 {
909 	const struct rte_flow_item_geneve *geneve_spec;
910 	const struct rte_flow_item_geneve *geneve_mask;
911 
912 	if (item->spec == NULL && item->mask)
913 		return rte_flow_error_set(error, EINVAL,
914 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
915 					  "Can't configure FDIR with mask but without spec");
916 	else if (item->spec && (item->mask == NULL))
917 		return rte_flow_error_set(error, EINVAL,
918 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
919 					  "Tunnel packets must configure with mask");
920 
921 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
922 	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
923 	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
924 	/* Only used to describe the protocol stack. */
925 	if (item->spec == NULL && item->mask == NULL)
926 		return 0;
927 
928 	geneve_mask = item->mask;
929 	geneve_spec = item->spec;
930 
931 	if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
932 		return rte_flow_error_set(error, EINVAL,
933 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
934 					  "Ver/protocal is not supported in GENEVE");
935 	/* VNI must be totally masked or not. */
936 	if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
937 	    memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
938 		return rte_flow_error_set(error, EINVAL,
939 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
940 					  "VNI must be totally masked or not in GENEVE");
941 	if (geneve_mask->vni[0]) {
942 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
943 		memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
944 			   VNI_OR_TNI_LEN);
945 	}
946 	memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
947 		   VNI_OR_TNI_LEN);
948 	return 0;
949 }
950 
951 static int
952 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
953 		  struct rte_flow_error *error)
954 {
955 	int ret;
956 
957 	switch (item->type) {
958 	case RTE_FLOW_ITEM_TYPE_VXLAN:
959 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
960 		ret = hns3_parse_vxlan(item, rule, error);
961 		break;
962 	case RTE_FLOW_ITEM_TYPE_NVGRE:
963 		ret = hns3_parse_nvgre(item, rule, error);
964 		break;
965 	case RTE_FLOW_ITEM_TYPE_GENEVE:
966 		ret = hns3_parse_geneve(item, rule, error);
967 		break;
968 	default:
969 		return rte_flow_error_set(error, ENOTSUP,
970 					  RTE_FLOW_ERROR_TYPE_HANDLE,
971 					  NULL, "Unsupported tunnel type!");
972 	}
973 	if (ret)
974 		return ret;
975 	return hns3_handle_tunnel(item, rule, error);
976 }
977 
978 static int
979 hns3_parse_normal(const struct rte_flow_item *item,
980 		  struct hns3_fdir_rule *rule,
981 		  struct items_step_mngr *step_mngr,
982 		  struct rte_flow_error *error)
983 {
984 	int ret;
985 
986 	switch (item->type) {
987 	case RTE_FLOW_ITEM_TYPE_ETH:
988 		ret = hns3_parse_eth(item, rule, error);
989 		step_mngr->items = L2_next_items;
990 		step_mngr->count = ARRAY_SIZE(L2_next_items);
991 		break;
992 	case RTE_FLOW_ITEM_TYPE_VLAN:
993 		ret = hns3_parse_vlan(item, rule, error);
994 		step_mngr->items = L2_next_items;
995 		step_mngr->count = ARRAY_SIZE(L2_next_items);
996 		break;
997 	case RTE_FLOW_ITEM_TYPE_IPV4:
998 		ret = hns3_parse_ipv4(item, rule, error);
999 		step_mngr->items = L3_next_items;
1000 		step_mngr->count = ARRAY_SIZE(L3_next_items);
1001 		break;
1002 	case RTE_FLOW_ITEM_TYPE_IPV6:
1003 		ret = hns3_parse_ipv6(item, rule, error);
1004 		step_mngr->items = L3_next_items;
1005 		step_mngr->count = ARRAY_SIZE(L3_next_items);
1006 		break;
1007 	case RTE_FLOW_ITEM_TYPE_TCP:
1008 		ret = hns3_parse_tcp(item, rule, error);
1009 		step_mngr->items = L4_next_items;
1010 		step_mngr->count = ARRAY_SIZE(L4_next_items);
1011 		break;
1012 	case RTE_FLOW_ITEM_TYPE_UDP:
1013 		ret = hns3_parse_udp(item, rule, error);
1014 		step_mngr->items = L4_next_items;
1015 		step_mngr->count = ARRAY_SIZE(L4_next_items);
1016 		break;
1017 	case RTE_FLOW_ITEM_TYPE_SCTP:
1018 		ret = hns3_parse_sctp(item, rule, error);
1019 		step_mngr->items = L4_next_items;
1020 		step_mngr->count = ARRAY_SIZE(L4_next_items);
1021 		break;
1022 	default:
1023 		return rte_flow_error_set(error, ENOTSUP,
1024 					  RTE_FLOW_ERROR_TYPE_HANDLE,
1025 					  NULL, "Unsupported normal type!");
1026 	}
1027 
1028 	return ret;
1029 }
1030 
1031 static int
1032 hns3_validate_item(const struct rte_flow_item *item,
1033 		   struct items_step_mngr step_mngr,
1034 		   struct rte_flow_error *error)
1035 {
1036 	int i;
1037 
1038 	if (item->last)
1039 		return rte_flow_error_set(error, ENOTSUP,
1040 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
1041 					  "Not supported last point for range");
1042 
1043 	for (i = 0; i < step_mngr.count; i++) {
1044 		if (item->type == step_mngr.items[i])
1045 			break;
1046 	}
1047 
1048 	if (i == step_mngr.count) {
1049 		return rte_flow_error_set(error, EINVAL,
1050 					  RTE_FLOW_ERROR_TYPE_ITEM,
1051 					  item, "Inval or missing item");
1052 	}
1053 	return 0;
1054 }
1055 
1056 static inline bool
1057 is_tunnel_packet(enum rte_flow_item_type type)
1058 {
1059 	if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1060 	    type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1061 	    type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1062 	    type == RTE_FLOW_ITEM_TYPE_GENEVE ||
1063 	    type == RTE_FLOW_ITEM_TYPE_MPLS)
1064 		return true;
1065 	return false;
1066 }
1067 
1068 /*
1069  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1070  * And get the flow director filter info BTW.
1071  * UDP/TCP/SCTP PATTERN:
1072  * The first not void item can be ETH or IPV4 or IPV6
1073  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1074  * The next not void item could be UDP or TCP or SCTP (optional)
1075  * The next not void item could be RAW (for flexbyte, optional)
1076  * The next not void item must be END.
1077  * A Fuzzy Match pattern can appear at any place before END.
1078  * Fuzzy Match is optional for IPV4 but is required for IPV6
1079  * MAC VLAN PATTERN:
1080  * The first not void item must be ETH.
1081  * The second not void item must be MAC VLAN.
1082  * The next not void item must be END.
1083  * ACTION:
1084  * The first not void action should be QUEUE or DROP.
1085  * The second not void optional action should be MARK,
1086  * mark_id is a uint32_t number.
1087  * The next not void action should be END.
1088  * UDP/TCP/SCTP pattern example:
1089  * ITEM		Spec			Mask
1090  * ETH		NULL			NULL
1091  * IPV4		src_addr 192.168.1.20	0xFFFFFFFF
1092  *		dst_addr 192.167.3.50	0xFFFFFFFF
1093  * UDP/TCP/SCTP	src_port	80	0xFFFF
1094  *		dst_port	80	0xFFFF
1095  * END
1096  * MAC VLAN pattern example:
1097  * ITEM		Spec			Mask
1098  * ETH		dst_addr
1099 		{0xAC, 0x7B, 0xA1,	{0xFF, 0xFF, 0xFF,
1100 		0x2C, 0x6D, 0x36}	0xFF, 0xFF, 0xFF}
1101  * MAC VLAN	tci	0x2016		0xEFFF
1102  * END
1103  * Other members in mask and spec should set to 0x00.
1104  * Item->last should be NULL.
1105  */
1106 static int
1107 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1108 		       const struct rte_flow_item pattern[],
1109 		       const struct rte_flow_action actions[],
1110 		       struct hns3_fdir_rule *rule,
1111 		       struct rte_flow_error *error)
1112 {
1113 	struct hns3_adapter *hns = dev->data->dev_private;
1114 	const struct rte_flow_item *item;
1115 	struct items_step_mngr step_mngr;
1116 	int ret;
1117 
1118 	/* FDIR is available only in PF driver */
1119 	if (hns->is_vf)
1120 		return rte_flow_error_set(error, ENOTSUP,
1121 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1122 					  "Fdir not supported in VF");
1123 
1124 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT)
1125 		return rte_flow_error_set(error, ENOTSUP,
1126 					  RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1127 					  "fdir_conf.mode isn't perfect");
1128 
1129 	step_mngr.items = first_items;
1130 	step_mngr.count = ARRAY_SIZE(first_items);
1131 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1132 		if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1133 			continue;
1134 
1135 		ret = hns3_validate_item(item, step_mngr, error);
1136 		if (ret)
1137 			return ret;
1138 
1139 		if (is_tunnel_packet(item->type)) {
1140 			ret = hns3_parse_tunnel(item, rule, error);
1141 			if (ret)
1142 				return ret;
1143 			step_mngr.items = tunnel_next_items;
1144 			step_mngr.count = ARRAY_SIZE(tunnel_next_items);
1145 		} else {
1146 			ret = hns3_parse_normal(item, rule, &step_mngr, error);
1147 			if (ret)
1148 				return ret;
1149 		}
1150 	}
1151 
1152 	return hns3_handle_actions(dev, actions, rule, error);
1153 }
1154 
1155 void
1156 hns3_filterlist_init(struct rte_eth_dev *dev)
1157 {
1158 	struct hns3_process_private *process_list = dev->process_private;
1159 
1160 	TAILQ_INIT(&process_list->fdir_list);
1161 	TAILQ_INIT(&process_list->filter_rss_list);
1162 	TAILQ_INIT(&process_list->flow_list);
1163 }
1164 
1165 static void
1166 hns3_filterlist_flush(struct rte_eth_dev *dev)
1167 {
1168 	struct hns3_process_private *process_list = dev->process_private;
1169 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1170 	struct hns3_rss_conf_ele *rss_filter_ptr;
1171 	struct hns3_flow_mem *flow_node;
1172 
1173 	fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1174 	while (fdir_rule_ptr) {
1175 		TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1176 		rte_free(fdir_rule_ptr);
1177 		fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1178 	}
1179 
1180 	rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1181 	while (rss_filter_ptr) {
1182 		TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1183 			     entries);
1184 		rte_free(rss_filter_ptr);
1185 		rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1186 	}
1187 
1188 	flow_node = TAILQ_FIRST(&process_list->flow_list);
1189 	while (flow_node) {
1190 		TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1191 		rte_free(flow_node->flow);
1192 		rte_free(flow_node);
1193 		flow_node = TAILQ_FIRST(&process_list->flow_list);
1194 	}
1195 }
1196 
1197 static bool
1198 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1199 		     const struct rte_flow_action_rss *with)
1200 {
1201 	return (comp->func == with->func &&
1202 		comp->level == with->level &&
1203 		comp->types == with->types &&
1204 		comp->key_len == with->key_len &&
1205 		comp->queue_num == with->queue_num &&
1206 		!memcmp(comp->key, with->key, with->key_len) &&
1207 		!memcmp(comp->queue, with->queue,
1208 			sizeof(*with->queue) * with->queue_num));
1209 }
1210 
1211 static int
1212 hns3_rss_conf_copy(struct hns3_rss_conf *out,
1213 		   const struct rte_flow_action_rss *in)
1214 {
1215 	if (in->key_len > RTE_DIM(out->key) ||
1216 	    in->queue_num > RTE_DIM(out->queue))
1217 		return -EINVAL;
1218 	if (in->key == NULL && in->key_len)
1219 		return -EINVAL;
1220 	out->conf = (struct rte_flow_action_rss) {
1221 		.func = in->func,
1222 		.level = in->level,
1223 		.types = in->types,
1224 		.key_len = in->key_len,
1225 		.queue_num = in->queue_num,
1226 	};
1227 	out->conf.queue =
1228 		memcpy(out->queue, in->queue,
1229 		       sizeof(*in->queue) * in->queue_num);
1230 	if (in->key)
1231 		out->conf.key = memcpy(out->key, in->key, in->key_len);
1232 
1233 	return 0;
1234 }
1235 
1236 /*
1237  * This function is used to parse rss action validatation.
1238  */
1239 static int
1240 hns3_parse_rss_filter(struct rte_eth_dev *dev,
1241 		      const struct rte_flow_action *actions,
1242 		      struct rte_flow_error *error)
1243 {
1244 	struct hns3_adapter *hns = dev->data->dev_private;
1245 	struct hns3_hw *hw = &hns->hw;
1246 	struct hns3_rss_conf *rss_conf = &hw->rss_info;
1247 	const struct rte_flow_action_rss *rss;
1248 	const struct rte_flow_action *act;
1249 	uint32_t act_index = 0;
1250 	uint64_t flow_types;
1251 	uint16_t n;
1252 
1253 	NEXT_ITEM_OF_ACTION(act, actions, act_index);
1254 	/* Get configuration args from APP cmdline input */
1255 	rss = act->conf;
1256 
1257 	if (rss == NULL || rss->queue_num == 0) {
1258 		return rte_flow_error_set(error, EINVAL,
1259 					  RTE_FLOW_ERROR_TYPE_ACTION,
1260 					  act, "no valid queues");
1261 	}
1262 
1263 	for (n = 0; n < rss->queue_num; n++) {
1264 		if (rss->queue[n] < dev->data->nb_rx_queues)
1265 			continue;
1266 		return rte_flow_error_set(error, EINVAL,
1267 					  RTE_FLOW_ERROR_TYPE_ACTION,
1268 					  act,
1269 					  "queue id > max number of queues");
1270 	}
1271 
1272 	/* Parse flow types of RSS */
1273 	if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
1274 		return rte_flow_error_set(error, EINVAL,
1275 					  RTE_FLOW_ERROR_TYPE_ACTION,
1276 					  act,
1277 					  "Flow types is unsupported by "
1278 					  "hns3's RSS");
1279 
1280 	flow_types = rss->types & HNS3_ETH_RSS_SUPPORT;
1281 	if (flow_types != rss->types)
1282 		hns3_warn(hw, "RSS flow types(%" PRIx64 ") include unsupported "
1283 			  "flow types", rss->types);
1284 
1285 	/* Parse RSS related parameters from RSS configuration */
1286 	switch (rss->func) {
1287 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1288 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1289 	case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1290 	case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1291 		break;
1292 	default:
1293 		return rte_flow_error_set(error, ENOTSUP,
1294 					  RTE_FLOW_ERROR_TYPE_ACTION, act,
1295 					  "input RSS hash functions are not supported");
1296 	}
1297 
1298 	if (rss->level)
1299 		return rte_flow_error_set(error, ENOTSUP,
1300 					  RTE_FLOW_ERROR_TYPE_ACTION, act,
1301 					  "a nonzero RSS encapsulation level is not supported");
1302 	if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1303 		return rte_flow_error_set(error, ENOTSUP,
1304 					  RTE_FLOW_ERROR_TYPE_ACTION, act,
1305 					  "RSS hash key must be exactly 40 bytes");
1306 	if (rss->queue_num > RTE_DIM(rss_conf->queue))
1307 		return rte_flow_error_set(error, ENOTSUP,
1308 					  RTE_FLOW_ERROR_TYPE_ACTION, act,
1309 					  "too many queues for RSS context");
1310 
1311 	act_index++;
1312 
1313 	/* Check if the next not void action is END */
1314 	NEXT_ITEM_OF_ACTION(act, actions, act_index);
1315 	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1316 		memset(rss_conf, 0, sizeof(struct hns3_rss_conf));
1317 		return rte_flow_error_set(error, EINVAL,
1318 					  RTE_FLOW_ERROR_TYPE_ACTION,
1319 					  act, "Not supported action.");
1320 	}
1321 
1322 	return 0;
1323 }
1324 
1325 static int
1326 hns3_disable_rss(struct hns3_hw *hw)
1327 {
1328 	int ret;
1329 
1330 	/* Redirected the redirection table to queue 0 */
1331 	ret = hns3_rss_reset_indir_table(hw);
1332 	if (ret)
1333 		return ret;
1334 
1335 	/* Disable RSS */
1336 	hw->rss_info.conf.types = 0;
1337 	hw->rss_dis_flag = true;
1338 
1339 	return 0;
1340 }
1341 
1342 static void
1343 hns3_parse_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf)
1344 {
1345 	if (rss_conf->key == NULL ||
1346 	    rss_conf->key_len < HNS3_RSS_KEY_SIZE) {
1347 		hns3_info(hw, "Default RSS hash key to be set");
1348 		rss_conf->key = hns3_hash_key;
1349 		rss_conf->key_len = HNS3_RSS_KEY_SIZE;
1350 	}
1351 }
1352 
1353 static int
1354 hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
1355 			 uint8_t *hash_algo)
1356 {
1357 	enum rte_eth_hash_function algo_func = *func;
1358 	switch (algo_func) {
1359 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1360 		/* Keep *hash_algo as what it used to be */
1361 		algo_func = hw->rss_info.conf.func;
1362 		break;
1363 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1364 		*hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
1365 		break;
1366 	case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1367 		*hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
1368 		break;
1369 	case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1370 		*hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
1371 		break;
1372 	default:
1373 		hns3_err(hw, "Invalid RSS algorithm configuration(%u)",
1374 			 algo_func);
1375 		return -EINVAL;
1376 	}
1377 	*func = algo_func;
1378 
1379 	return 0;
1380 }
1381 
1382 static int
1383 hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
1384 {
1385 	struct hns3_rss_tuple_cfg *tuple;
1386 	int ret;
1387 
1388 	/* Parse hash key */
1389 	hns3_parse_rss_key(hw, rss_config);
1390 
1391 	/* Parse hash algorithm */
1392 	ret = hns3_parse_rss_algorithm(hw, &rss_config->func,
1393 				       &hw->rss_info.hash_algo);
1394 	if (ret)
1395 		return ret;
1396 
1397 	ret = hns3_set_rss_algo_key(hw, rss_config->key);
1398 	if (ret)
1399 		return ret;
1400 
1401 	/* Update algorithm of hw */
1402 	hw->rss_info.conf.func = rss_config->func;
1403 
1404 	/* Set flow type supported */
1405 	tuple = &hw->rss_info.rss_tuple_sets;
1406 	ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types);
1407 	if (ret)
1408 		hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
1409 
1410 	return ret;
1411 }
1412 
1413 static int
1414 hns3_update_indir_table(struct rte_eth_dev *dev,
1415 			const struct rte_flow_action_rss *conf, uint16_t num)
1416 {
1417 	struct hns3_adapter *hns = dev->data->dev_private;
1418 	struct hns3_hw *hw = &hns->hw;
1419 	uint8_t indir_tbl[HNS3_RSS_IND_TBL_SIZE];
1420 	uint16_t j, allow_rss_queues;
1421 	uint8_t queue_id;
1422 	uint32_t i;
1423 
1424 	if (num == 0) {
1425 		hns3_err(hw, "No PF queues are configured to enable RSS");
1426 		return -ENOTSUP;
1427 	}
1428 
1429 	allow_rss_queues = RTE_MIN(dev->data->nb_rx_queues, hw->rss_size_max);
1430 	/* Fill in redirection table */
1431 	memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
1432 	       HNS3_RSS_IND_TBL_SIZE);
1433 	for (i = 0, j = 0; i < HNS3_RSS_IND_TBL_SIZE; i++, j++) {
1434 		j %= num;
1435 		if (conf->queue[j] >= allow_rss_queues) {
1436 			hns3_err(hw, "Invalid queue id(%u) to be set in "
1437 				     "redirection table, max number of rss "
1438 				     "queues: %u", conf->queue[j],
1439 				 allow_rss_queues);
1440 			return -EINVAL;
1441 		}
1442 		queue_id = conf->queue[j];
1443 		indir_tbl[i] = queue_id;
1444 	}
1445 
1446 	return hns3_set_rss_indir_table(hw, indir_tbl, HNS3_RSS_IND_TBL_SIZE);
1447 }
1448 
1449 static int
1450 hns3_config_rss_filter(struct rte_eth_dev *dev,
1451 		       const struct hns3_rss_conf *conf, bool add)
1452 {
1453 	struct hns3_adapter *hns = dev->data->dev_private;
1454 	struct hns3_hw *hw = &hns->hw;
1455 	struct hns3_rss_conf *rss_info;
1456 	uint64_t flow_types;
1457 	uint16_t num;
1458 	int ret;
1459 
1460 	struct rte_flow_action_rss rss_flow_conf = {
1461 		.func = conf->conf.func,
1462 		.level = conf->conf.level,
1463 		.types = conf->conf.types,
1464 		.key_len = conf->conf.key_len,
1465 		.queue_num = conf->conf.queue_num,
1466 		.key = conf->conf.key_len ?
1467 		    (void *)(uintptr_t)conf->conf.key : NULL,
1468 		.queue = conf->conf.queue,
1469 	};
1470 
1471 	/* The types is Unsupported by hns3' RSS */
1472 	if (!(rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT) &&
1473 	    rss_flow_conf.types) {
1474 		hns3_err(hw,
1475 			 "Flow types(%" PRIx64 ") is unsupported by hns3's RSS",
1476 			 rss_flow_conf.types);
1477 		return -EINVAL;
1478 	}
1479 
1480 	if (rss_flow_conf.key_len &&
1481 	    rss_flow_conf.key_len > RTE_DIM(rss_info->key)) {
1482 		hns3_err(hw,
1483 			"input hash key(%u) greater than supported len(%zu)",
1484 			rss_flow_conf.key_len, RTE_DIM(rss_info->key));
1485 		return -EINVAL;
1486 	}
1487 
1488 	/* Filter the unsupported flow types */
1489 	flow_types = rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT;
1490 	if (flow_types != rss_flow_conf.types)
1491 		hns3_warn(hw, "modified RSS types based on hardware support, "
1492 			      "requested:%" PRIx64 " configured:%" PRIx64,
1493 			  rss_flow_conf.types, flow_types);
1494 	/* Update the useful flow types */
1495 	rss_flow_conf.types = flow_types;
1496 
1497 	if ((rss_flow_conf.types & ETH_RSS_PROTO_MASK) == 0)
1498 		return hns3_disable_rss(hw);
1499 
1500 	rss_info = &hw->rss_info;
1501 	if (!add) {
1502 		if (hns3_action_rss_same(&rss_info->conf, &rss_flow_conf)) {
1503 			ret = hns3_disable_rss(hw);
1504 			if (ret) {
1505 				hns3_err(hw, "RSS disable failed(%d)", ret);
1506 				return ret;
1507 			}
1508 			memset(rss_info, 0, sizeof(struct hns3_rss_conf));
1509 			return 0;
1510 		}
1511 		return -EINVAL;
1512 	}
1513 
1514 	/* Get rx queues num */
1515 	num = dev->data->nb_rx_queues;
1516 
1517 	/* Set rx queues to use */
1518 	num = RTE_MIN(num, rss_flow_conf.queue_num);
1519 	if (rss_flow_conf.queue_num > num)
1520 		hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
1521 			  rss_flow_conf.queue_num);
1522 	hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
1523 
1524 	rte_spinlock_lock(&hw->lock);
1525 	/* Update redirection talbe of rss */
1526 	ret = hns3_update_indir_table(dev, &rss_flow_conf, num);
1527 	if (ret)
1528 		goto rss_config_err;
1529 
1530 	/* Set hash algorithm and flow types by the user's config */
1531 	ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf);
1532 	if (ret)
1533 		goto rss_config_err;
1534 
1535 	ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf);
1536 	if (ret) {
1537 		hns3_err(hw, "RSS config init fail(%d)", ret);
1538 		goto rss_config_err;
1539 	}
1540 
1541 rss_config_err:
1542 	rte_spinlock_unlock(&hw->lock);
1543 
1544 	return ret;
1545 }
1546 
1547 /* Remove the rss filter */
1548 static int
1549 hns3_clear_rss_filter(struct rte_eth_dev *dev)
1550 {
1551 	struct hns3_adapter *hns = dev->data->dev_private;
1552 	struct hns3_hw *hw = &hns->hw;
1553 
1554 	if (hw->rss_info.conf.queue_num == 0)
1555 		return 0;
1556 
1557 	return hns3_config_rss_filter(dev, &hw->rss_info, false);
1558 }
1559 
1560 /* Restore the rss filter */
1561 int
1562 hns3_restore_rss_filter(struct rte_eth_dev *dev)
1563 {
1564 	struct hns3_adapter *hns = dev->data->dev_private;
1565 	struct hns3_hw *hw = &hns->hw;
1566 
1567 	if (hw->rss_info.conf.queue_num == 0)
1568 		return 0;
1569 
1570 	return hns3_config_rss_filter(dev, &hw->rss_info, true);
1571 }
1572 
1573 static int
1574 hns3_flow_parse_rss(struct rte_eth_dev *dev,
1575 		    const struct hns3_rss_conf *conf, bool add)
1576 {
1577 	struct hns3_adapter *hns = dev->data->dev_private;
1578 	struct hns3_hw *hw = &hns->hw;
1579 	bool ret;
1580 
1581 	/* Action rss same */
1582 	ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf);
1583 	if (ret) {
1584 		hns3_err(hw, "Enter duplicate RSS configuration : %d", ret);
1585 		return -EINVAL;
1586 	}
1587 
1588 	return hns3_config_rss_filter(dev, conf, add);
1589 }
1590 
1591 static int
1592 hns3_flow_args_check(const struct rte_flow_attr *attr,
1593 		     const struct rte_flow_item pattern[],
1594 		     const struct rte_flow_action actions[],
1595 		     struct rte_flow_error *error)
1596 {
1597 	if (pattern == NULL)
1598 		return rte_flow_error_set(error, EINVAL,
1599 					  RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1600 					  NULL, "NULL pattern.");
1601 
1602 	if (actions == NULL)
1603 		return rte_flow_error_set(error, EINVAL,
1604 					  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1605 					  NULL, "NULL action.");
1606 
1607 	if (attr == NULL)
1608 		return rte_flow_error_set(error, EINVAL,
1609 					  RTE_FLOW_ERROR_TYPE_ATTR,
1610 					  NULL, "NULL attribute.");
1611 
1612 	return hns3_check_attr(attr, error);
1613 }
1614 
1615 /*
1616  * Check if the flow rule is supported by hns3.
1617  * It only checkes the format. Don't guarantee the rule can be programmed into
1618  * the HW. Because there can be no enough room for the rule.
1619  */
1620 static int
1621 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1622 		   const struct rte_flow_item pattern[],
1623 		   const struct rte_flow_action actions[],
1624 		   struct rte_flow_error *error)
1625 {
1626 	struct hns3_fdir_rule fdir_rule;
1627 	int ret;
1628 
1629 	ret = hns3_flow_args_check(attr, pattern, actions, error);
1630 	if (ret)
1631 		return ret;
1632 
1633 	if (find_rss_action(actions))
1634 		return hns3_parse_rss_filter(dev, actions, error);
1635 
1636 	memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1637 	return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1638 }
1639 
1640 /*
1641  * Create or destroy a flow rule.
1642  * Theorically one rule can match more than one filters.
1643  * We will let it use the filter which it hitt first.
1644  * So, the sequence matters.
1645  */
1646 static struct rte_flow *
1647 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1648 		 const struct rte_flow_item pattern[],
1649 		 const struct rte_flow_action actions[],
1650 		 struct rte_flow_error *error)
1651 {
1652 	struct hns3_process_private *process_list = dev->process_private;
1653 	struct hns3_adapter *hns = dev->data->dev_private;
1654 	struct hns3_hw *hw = &hns->hw;
1655 	const struct hns3_rss_conf *rss_conf;
1656 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1657 	struct hns3_rss_conf_ele *rss_filter_ptr;
1658 	struct hns3_flow_mem *flow_node;
1659 	const struct rte_flow_action *act;
1660 	struct rte_flow *flow;
1661 	struct hns3_fdir_rule fdir_rule;
1662 	int ret;
1663 
1664 	ret = hns3_flow_args_check(attr, pattern, actions, error);
1665 	if (ret)
1666 		return NULL;
1667 
1668 	flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
1669 	if (flow == NULL) {
1670 		rte_flow_error_set(error, ENOMEM,
1671 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1672 				   "Failed to allocate flow memory");
1673 		return NULL;
1674 	}
1675 	flow_node = rte_zmalloc("hns3 flow node",
1676 				sizeof(struct hns3_flow_mem), 0);
1677 	if (flow_node == NULL) {
1678 		rte_flow_error_set(error, ENOMEM,
1679 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1680 				   "Failed to allocate flow list memory");
1681 		rte_free(flow);
1682 		return NULL;
1683 	}
1684 
1685 	flow_node->flow = flow;
1686 	TAILQ_INSERT_TAIL(&process_list->flow_list, flow_node, entries);
1687 
1688 	act = find_rss_action(actions);
1689 	if (act) {
1690 		rss_conf = act->conf;
1691 
1692 		ret = hns3_flow_parse_rss(dev, rss_conf, true);
1693 		if (ret)
1694 			goto err;
1695 
1696 		rss_filter_ptr = rte_zmalloc("hns3 rss filter",
1697 					     sizeof(struct hns3_rss_conf_ele),
1698 					     0);
1699 		if (rss_filter_ptr == NULL) {
1700 			hns3_err(hw,
1701 				    "Failed to allocate hns3_rss_filter memory");
1702 			ret = -ENOMEM;
1703 			goto err;
1704 		}
1705 		memcpy(&rss_filter_ptr->filter_info, rss_conf,
1706 			sizeof(struct hns3_rss_conf));
1707 		TAILQ_INSERT_TAIL(&process_list->filter_rss_list,
1708 				  rss_filter_ptr, entries);
1709 
1710 		flow->rule = rss_filter_ptr;
1711 		flow->filter_type = RTE_ETH_FILTER_HASH;
1712 		return flow;
1713 	}
1714 
1715 	memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1716 	ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1717 	if (ret)
1718 		goto out;
1719 
1720 	if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
1721 		ret = hns3_counter_new(dev, fdir_rule.act_cnt.shared,
1722 				       fdir_rule.act_cnt.id, error);
1723 		if (ret)
1724 			goto out;
1725 
1726 		flow->counter_id = fdir_rule.act_cnt.id;
1727 	}
1728 	ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
1729 	if (!ret) {
1730 		fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
1731 					    sizeof(struct hns3_fdir_rule_ele),
1732 					    0);
1733 		if (fdir_rule_ptr == NULL) {
1734 			hns3_err(hw, "Failed to allocate fdir_rule memory");
1735 			ret = -ENOMEM;
1736 			goto err_fdir;
1737 		}
1738 		memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
1739 			sizeof(struct hns3_fdir_rule));
1740 		TAILQ_INSERT_TAIL(&process_list->fdir_list,
1741 				  fdir_rule_ptr, entries);
1742 		flow->rule = fdir_rule_ptr;
1743 		flow->filter_type = RTE_ETH_FILTER_FDIR;
1744 
1745 		return flow;
1746 	}
1747 
1748 err_fdir:
1749 	if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1750 		hns3_counter_release(dev, fdir_rule.act_cnt.id);
1751 
1752 err:
1753 	rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1754 			   "Failed to create flow");
1755 out:
1756 	TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1757 	rte_free(flow_node);
1758 	rte_free(flow);
1759 	return NULL;
1760 }
1761 
1762 /* Destroy a flow rule on hns3. */
1763 static int
1764 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1765 		  struct rte_flow_error *error)
1766 {
1767 	struct hns3_process_private *process_list = dev->process_private;
1768 	struct hns3_adapter *hns = dev->data->dev_private;
1769 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1770 	struct hns3_rss_conf_ele *rss_filter_ptr;
1771 	struct hns3_flow_mem *flow_node;
1772 	struct hns3_hw *hw = &hns->hw;
1773 	enum rte_filter_type filter_type;
1774 	struct hns3_fdir_rule fdir_rule;
1775 	int ret;
1776 
1777 	if (flow == NULL)
1778 		return rte_flow_error_set(error, EINVAL,
1779 					  RTE_FLOW_ERROR_TYPE_HANDLE,
1780 					  flow, "Flow is NULL");
1781 	filter_type = flow->filter_type;
1782 	switch (filter_type) {
1783 	case RTE_ETH_FILTER_FDIR:
1784 		fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
1785 		memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
1786 			   sizeof(struct hns3_fdir_rule));
1787 
1788 		ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
1789 		if (ret)
1790 			return rte_flow_error_set(error, EIO,
1791 						  RTE_FLOW_ERROR_TYPE_HANDLE,
1792 						  flow,
1793 						  "Destroy FDIR fail.Try again");
1794 		if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1795 			hns3_counter_release(dev, fdir_rule.act_cnt.id);
1796 		TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1797 		rte_free(fdir_rule_ptr);
1798 		fdir_rule_ptr = NULL;
1799 		break;
1800 	case RTE_ETH_FILTER_HASH:
1801 		rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
1802 		ret = hns3_config_rss_filter(dev, &hw->rss_info, false);
1803 		if (ret)
1804 			return rte_flow_error_set(error, EIO,
1805 						  RTE_FLOW_ERROR_TYPE_HANDLE,
1806 						  flow,
1807 						  "Destroy RSS fail.Try again");
1808 		TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1809 			     entries);
1810 		rte_free(rss_filter_ptr);
1811 		rss_filter_ptr = NULL;
1812 		break;
1813 	default:
1814 		return rte_flow_error_set(error, EINVAL,
1815 					  RTE_FLOW_ERROR_TYPE_HANDLE, flow,
1816 					  "Unsupported filter type");
1817 	}
1818 
1819 	TAILQ_FOREACH(flow_node, &process_list->flow_list, entries) {
1820 		if (flow_node->flow == flow) {
1821 			TAILQ_REMOVE(&process_list->flow_list, flow_node,
1822 				     entries);
1823 			rte_free(flow_node);
1824 			flow_node = NULL;
1825 			break;
1826 		}
1827 	}
1828 	rte_free(flow);
1829 	flow = NULL;
1830 
1831 	return 0;
1832 }
1833 
1834 /*  Destroy all flow rules associated with a port on hns3. */
1835 static int
1836 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1837 {
1838 	struct hns3_adapter *hns = dev->data->dev_private;
1839 	int ret;
1840 
1841 	/* FDIR is available only in PF driver */
1842 	if (!hns->is_vf) {
1843 		ret = hns3_clear_all_fdir_filter(hns);
1844 		if (ret) {
1845 			rte_flow_error_set(error, ret,
1846 					   RTE_FLOW_ERROR_TYPE_HANDLE,
1847 					   NULL, "Failed to flush rule");
1848 			return ret;
1849 		}
1850 		hns3_counter_flush(dev);
1851 	}
1852 
1853 	ret = hns3_clear_rss_filter(dev);
1854 	if (ret) {
1855 		rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1856 				   NULL, "Failed to flush rss filter");
1857 		return ret;
1858 	}
1859 
1860 	hns3_filterlist_flush(dev);
1861 
1862 	return 0;
1863 }
1864 
1865 /* Query an existing flow rule. */
1866 static int
1867 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1868 		const struct rte_flow_action *actions, void *data,
1869 		struct rte_flow_error *error)
1870 {
1871 	struct rte_flow_query_count *qc;
1872 	int ret;
1873 
1874 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1875 		switch (actions->type) {
1876 		case RTE_FLOW_ACTION_TYPE_VOID:
1877 			break;
1878 		case RTE_FLOW_ACTION_TYPE_COUNT:
1879 			qc = (struct rte_flow_query_count *)data;
1880 			ret = hns3_counter_query(dev, flow, qc, error);
1881 			if (ret)
1882 				return ret;
1883 			break;
1884 		default:
1885 			return rte_flow_error_set(error, ENOTSUP,
1886 						  RTE_FLOW_ERROR_TYPE_ACTION,
1887 						  actions,
1888 						  "Query action only support count");
1889 		}
1890 	}
1891 	return 0;
1892 }
1893 
1894 static const struct rte_flow_ops hns3_flow_ops = {
1895 	.validate = hns3_flow_validate,
1896 	.create = hns3_flow_create,
1897 	.destroy = hns3_flow_destroy,
1898 	.flush = hns3_flow_flush,
1899 	.query = hns3_flow_query,
1900 	.isolate = NULL,
1901 };
1902 
1903 /*
1904  * The entry of flow API.
1905  * @param dev
1906  *   Pointer to Ethernet device.
1907  * @return
1908  *   0 on success, a negative errno value otherwise is set.
1909  */
1910 int
1911 hns3_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
1912 		     enum rte_filter_op filter_op, void *arg)
1913 {
1914 	struct hns3_hw *hw;
1915 	int ret = 0;
1916 
1917 	hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1918 	switch (filter_type) {
1919 	case RTE_ETH_FILTER_GENERIC:
1920 		if (filter_op != RTE_ETH_FILTER_GET)
1921 			return -EINVAL;
1922 		if (hw->adapter_state >= HNS3_NIC_CLOSED)
1923 			return -ENODEV;
1924 		*(const void **)arg = &hns3_flow_ops;
1925 		break;
1926 	default:
1927 		hns3_err(hw, "Filter type (%d) not supported", filter_type);
1928 		ret = -EOPNOTSUPP;
1929 		break;
1930 	}
1931 
1932 	return ret;
1933 }
1934