xref: /dpdk/drivers/net/hns3/hns3_flow.c (revision f5057be340e44f3edc0fe90fa875eb89a4c49b4f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4 
5 #include <stdbool.h>
6 #include <sys/queue.h>
7 #include <rte_flow_driver.h>
8 #include <rte_io.h>
9 #include <rte_malloc.h>
10 
11 #include "hns3_ethdev.h"
12 #include "hns3_logs.h"
13 
14 /* Default default keys */
15 static uint8_t hns3_hash_key[] = {
16 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
17 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
18 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
19 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
20 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
21 };
22 
23 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
24 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
25 
26 /* Special Filter id for non-specific packet flagging. Don't change value */
27 #define HNS3_MAX_FILTER_ID	0x0FFF
28 
29 #define ETHER_TYPE_MASK		0xFFFF
30 #define IPPROTO_MASK		0xFF
31 #define TUNNEL_TYPE_MASK	0xFFFF
32 
33 #define HNS3_TUNNEL_TYPE_VXLAN		0x12B5
34 #define HNS3_TUNNEL_TYPE_VXLAN_GPE	0x12B6
35 #define HNS3_TUNNEL_TYPE_GENEVE		0x17C1
36 #define HNS3_TUNNEL_TYPE_NVGRE		0x6558
37 
38 static enum rte_flow_item_type first_items[] = {
39 	RTE_FLOW_ITEM_TYPE_ETH,
40 	RTE_FLOW_ITEM_TYPE_IPV4,
41 	RTE_FLOW_ITEM_TYPE_IPV6,
42 	RTE_FLOW_ITEM_TYPE_TCP,
43 	RTE_FLOW_ITEM_TYPE_UDP,
44 	RTE_FLOW_ITEM_TYPE_SCTP,
45 	RTE_FLOW_ITEM_TYPE_ICMP,
46 	RTE_FLOW_ITEM_TYPE_NVGRE,
47 	RTE_FLOW_ITEM_TYPE_VXLAN,
48 	RTE_FLOW_ITEM_TYPE_GENEVE,
49 	RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
50 	RTE_FLOW_ITEM_TYPE_MPLS
51 };
52 
53 static enum rte_flow_item_type L2_next_items[] = {
54 	RTE_FLOW_ITEM_TYPE_VLAN,
55 	RTE_FLOW_ITEM_TYPE_IPV4,
56 	RTE_FLOW_ITEM_TYPE_IPV6
57 };
58 
59 static enum rte_flow_item_type L3_next_items[] = {
60 	RTE_FLOW_ITEM_TYPE_TCP,
61 	RTE_FLOW_ITEM_TYPE_UDP,
62 	RTE_FLOW_ITEM_TYPE_SCTP,
63 	RTE_FLOW_ITEM_TYPE_NVGRE,
64 	RTE_FLOW_ITEM_TYPE_ICMP
65 };
66 
67 static enum rte_flow_item_type L4_next_items[] = {
68 	RTE_FLOW_ITEM_TYPE_VXLAN,
69 	RTE_FLOW_ITEM_TYPE_GENEVE,
70 	RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
71 	RTE_FLOW_ITEM_TYPE_MPLS
72 };
73 
74 static enum rte_flow_item_type tunnel_next_items[] = {
75 	RTE_FLOW_ITEM_TYPE_ETH,
76 	RTE_FLOW_ITEM_TYPE_VLAN
77 };
78 
79 struct items_step_mngr {
80 	enum rte_flow_item_type *items;
81 	int count;
82 };
83 
84 static inline void
85 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
86 {
87 	size_t i;
88 
89 	for (i = 0; i < len; i++)
90 		dst[i] = rte_be_to_cpu_32(src[i]);
91 }
92 
93 /*
94  * This function is used to find rss general action.
95  * 1. As we know RSS is used to spread packets among several queues, the flow
96  *    API provide the struct rte_flow_action_rss, user could config it's field
97  *    sush as: func/level/types/key/queue to control RSS function.
98  * 2. The flow API also support queue region configuration for hns3. It was
99  *    implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
100  *    which action is RSS queues region.
101  * 3. When action is RSS, we use the following rule to distinguish:
102  *    Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue
103  *            region configuration.
104  *    Case other: an rss general action.
105  */
106 static const struct rte_flow_action *
107 hns3_find_rss_general_action(const struct rte_flow_item pattern[],
108 			     const struct rte_flow_action actions[])
109 {
110 	const struct rte_flow_action *act = NULL;
111 	const struct hns3_rss_conf *rss;
112 	bool have_eth = false;
113 
114 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
115 		if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
116 			act = actions;
117 			break;
118 		}
119 	}
120 	if (!act)
121 		return NULL;
122 
123 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
124 		if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
125 			have_eth = true;
126 			break;
127 		}
128 	}
129 
130 	rss = act->conf;
131 	if (have_eth && rss->conf.queue_num) {
132 		/*
133 		 * Patter have ETH and action's queue_num > 0, indicate this is
134 		 * queue region configuration.
135 		 * Because queue region is implemented by FDIR + RSS in hns3
136 		 * hardware, it need enter FDIR process, so here return NULL to
137 		 * avoid enter RSS process.
138 		 */
139 		return NULL;
140 	}
141 
142 	return act;
143 }
144 
145 static inline struct hns3_flow_counter *
146 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
147 {
148 	struct hns3_adapter *hns = dev->data->dev_private;
149 	struct hns3_pf *pf = &hns->pf;
150 	struct hns3_flow_counter *cnt;
151 
152 	LIST_FOREACH(cnt, &pf->flow_counters, next) {
153 		if (cnt->id == id)
154 			return cnt;
155 	}
156 	return NULL;
157 }
158 
159 static int
160 hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
161 		 struct rte_flow_error *error)
162 {
163 	struct hns3_adapter *hns = dev->data->dev_private;
164 	struct hns3_pf *pf = &hns->pf;
165 	struct hns3_flow_counter *cnt;
166 
167 	cnt = hns3_counter_lookup(dev, id);
168 	if (cnt) {
169 		if (!cnt->shared || cnt->shared != shared)
170 			return rte_flow_error_set(error, ENOTSUP,
171 						  RTE_FLOW_ERROR_TYPE_ACTION,
172 						  cnt,
173 						  "Counter id is used,shared flag not match");
174 		cnt->ref_cnt++;
175 		return 0;
176 	}
177 
178 	cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
179 	if (cnt == NULL)
180 		return rte_flow_error_set(error, ENOMEM,
181 					  RTE_FLOW_ERROR_TYPE_ACTION, cnt,
182 					  "Alloc mem for counter failed");
183 	cnt->id = id;
184 	cnt->shared = shared;
185 	cnt->ref_cnt = 1;
186 	cnt->hits = 0;
187 	LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
188 	return 0;
189 }
190 
191 static int
192 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
193 		   struct rte_flow_query_count *qc,
194 		   struct rte_flow_error *error)
195 {
196 	struct hns3_adapter *hns = dev->data->dev_private;
197 	struct hns3_flow_counter *cnt;
198 	uint64_t value;
199 	int ret;
200 
201 	/* FDIR is available only in PF driver */
202 	if (hns->is_vf)
203 		return rte_flow_error_set(error, ENOTSUP,
204 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
205 					  "Fdir is not supported in VF");
206 	cnt = hns3_counter_lookup(dev, flow->counter_id);
207 	if (cnt == NULL)
208 		return rte_flow_error_set(error, EINVAL,
209 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
210 					  "Can't find counter id");
211 
212 	ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
213 	if (ret) {
214 		rte_flow_error_set(error, -ret,
215 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
216 				   NULL, "Read counter fail.");
217 		return ret;
218 	}
219 	qc->hits_set = 1;
220 	qc->hits = value;
221 
222 	return 0;
223 }
224 
225 static int
226 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
227 {
228 	struct hns3_adapter *hns = dev->data->dev_private;
229 	struct hns3_hw *hw = &hns->hw;
230 	struct hns3_flow_counter *cnt;
231 
232 	cnt = hns3_counter_lookup(dev, id);
233 	if (cnt == NULL) {
234 		hns3_err(hw, "Can't find available counter to release");
235 		return -EINVAL;
236 	}
237 	cnt->ref_cnt--;
238 	if (cnt->ref_cnt == 0) {
239 		LIST_REMOVE(cnt, next);
240 		rte_free(cnt);
241 	}
242 	return 0;
243 }
244 
245 static void
246 hns3_counter_flush(struct rte_eth_dev *dev)
247 {
248 	struct hns3_adapter *hns = dev->data->dev_private;
249 	struct hns3_pf *pf = &hns->pf;
250 	struct hns3_flow_counter *cnt_ptr;
251 
252 	cnt_ptr = LIST_FIRST(&pf->flow_counters);
253 	while (cnt_ptr) {
254 		LIST_REMOVE(cnt_ptr, next);
255 		rte_free(cnt_ptr);
256 		cnt_ptr = LIST_FIRST(&pf->flow_counters);
257 	}
258 }
259 
260 static int
261 hns3_handle_action_queue(struct rte_eth_dev *dev,
262 			 const struct rte_flow_action *action,
263 			 struct hns3_fdir_rule *rule,
264 			 struct rte_flow_error *error)
265 {
266 	struct hns3_adapter *hns = dev->data->dev_private;
267 	const struct rte_flow_action_queue *queue;
268 	struct hns3_hw *hw = &hns->hw;
269 
270 	queue = (const struct rte_flow_action_queue *)action->conf;
271 	if (queue->index >= hw->used_rx_queues) {
272 		hns3_err(hw, "queue ID(%d) is greater than number of "
273 			  "available queue (%d) in driver.",
274 			  queue->index, hw->used_rx_queues);
275 		return rte_flow_error_set(error, EINVAL,
276 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
277 					  action, "Invalid queue ID in PF");
278 	}
279 
280 	rule->queue_id = queue->index;
281 	rule->nb_queues = 1;
282 	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
283 	return 0;
284 }
285 
286 static int
287 hns3_handle_action_queue_region(struct rte_eth_dev *dev,
288 				const struct rte_flow_action *action,
289 				struct hns3_fdir_rule *rule,
290 				struct rte_flow_error *error)
291 {
292 	struct hns3_adapter *hns = dev->data->dev_private;
293 	const struct rte_flow_action_rss *conf = action->conf;
294 	struct hns3_hw *hw = &hns->hw;
295 	uint16_t idx;
296 
297 	if (!hns3_dev_fd_queue_region_supported(hw))
298 		return rte_flow_error_set(error, ENOTSUP,
299 			RTE_FLOW_ERROR_TYPE_ACTION, action,
300 			"Not support config queue region!");
301 
302 	if ((!rte_is_power_of_2(conf->queue_num)) ||
303 		conf->queue_num > hw->rss_size_max ||
304 		conf->queue[0] >= hw->used_rx_queues ||
305 		conf->queue[0] + conf->queue_num > hw->used_rx_queues) {
306 		return rte_flow_error_set(error, EINVAL,
307 			RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
308 			"Invalid start queue ID and queue num! the start queue "
309 			"ID must valid, the queue num must be power of 2 and "
310 			"<= rss_size_max.");
311 	}
312 
313 	for (idx = 1; idx < conf->queue_num; idx++) {
314 		if (conf->queue[idx] != conf->queue[idx - 1] + 1)
315 			return rte_flow_error_set(error, EINVAL,
316 				RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
317 				"Invalid queue ID sequence! the queue ID "
318 				"must be continuous increment.");
319 	}
320 
321 	rule->queue_id = conf->queue[0];
322 	rule->nb_queues = conf->queue_num;
323 	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
324 	return 0;
325 }
326 
327 /*
328  * Parse actions structure from the provided pattern.
329  * The pattern is validated as the items are copied.
330  *
331  * @param actions[in]
332  * @param rule[out]
333  *   NIC specfilc actions derived from the actions.
334  * @param error[out]
335  */
336 static int
337 hns3_handle_actions(struct rte_eth_dev *dev,
338 		    const struct rte_flow_action actions[],
339 		    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
340 {
341 	struct hns3_adapter *hns = dev->data->dev_private;
342 	const struct rte_flow_action_count *act_count;
343 	const struct rte_flow_action_mark *mark;
344 	struct hns3_pf *pf = &hns->pf;
345 	uint32_t counter_num;
346 	int ret;
347 
348 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
349 		switch (actions->type) {
350 		case RTE_FLOW_ACTION_TYPE_QUEUE:
351 			ret = hns3_handle_action_queue(dev, actions, rule,
352 						       error);
353 			if (ret)
354 				return ret;
355 			break;
356 		case RTE_FLOW_ACTION_TYPE_DROP:
357 			rule->action = HNS3_FD_ACTION_DROP_PACKET;
358 			break;
359 		/*
360 		 * Here RSS's real action is queue region.
361 		 * Queue region is implemented by FDIR + RSS in hns3 hardware,
362 		 * the FDIR's action is one queue region (start_queue_id and
363 		 * queue_num), then RSS spread packets to the queue region by
364 		 * RSS algorigthm.
365 		 */
366 		case RTE_FLOW_ACTION_TYPE_RSS:
367 			ret = hns3_handle_action_queue_region(dev, actions,
368 							      rule, error);
369 			if (ret)
370 				return ret;
371 			break;
372 		case RTE_FLOW_ACTION_TYPE_MARK:
373 			mark =
374 			    (const struct rte_flow_action_mark *)actions->conf;
375 			if (mark->id >= HNS3_MAX_FILTER_ID)
376 				return rte_flow_error_set(error, EINVAL,
377 						     RTE_FLOW_ERROR_TYPE_ACTION,
378 						     actions,
379 						     "Invalid Mark ID");
380 			rule->fd_id = mark->id;
381 			rule->flags |= HNS3_RULE_FLAG_FDID;
382 			break;
383 		case RTE_FLOW_ACTION_TYPE_FLAG:
384 			rule->fd_id = HNS3_MAX_FILTER_ID;
385 			rule->flags |= HNS3_RULE_FLAG_FDID;
386 			break;
387 		case RTE_FLOW_ACTION_TYPE_COUNT:
388 			act_count =
389 			    (const struct rte_flow_action_count *)actions->conf;
390 			counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
391 			if (act_count->id >= counter_num)
392 				return rte_flow_error_set(error, EINVAL,
393 						     RTE_FLOW_ERROR_TYPE_ACTION,
394 						     actions,
395 						     "Invalid counter id");
396 			rule->act_cnt = *act_count;
397 			rule->flags |= HNS3_RULE_FLAG_COUNTER;
398 			break;
399 		case RTE_FLOW_ACTION_TYPE_VOID:
400 			break;
401 		default:
402 			return rte_flow_error_set(error, ENOTSUP,
403 						  RTE_FLOW_ERROR_TYPE_ACTION,
404 						  NULL, "Unsupported action");
405 		}
406 	}
407 
408 	return 0;
409 }
410 
411 /* Parse to get the attr and action info of flow director rule. */
412 static int
413 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
414 {
415 	if (!attr->ingress)
416 		return rte_flow_error_set(error, EINVAL,
417 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
418 					  attr, "Ingress can't be zero");
419 	if (attr->egress)
420 		return rte_flow_error_set(error, ENOTSUP,
421 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
422 					  attr, "Not support egress");
423 	if (attr->transfer)
424 		return rte_flow_error_set(error, ENOTSUP,
425 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
426 					  attr, "No support for transfer");
427 	if (attr->priority)
428 		return rte_flow_error_set(error, ENOTSUP,
429 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
430 					  attr, "Not support priority");
431 	if (attr->group)
432 		return rte_flow_error_set(error, ENOTSUP,
433 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
434 					  attr, "Not support group");
435 	return 0;
436 }
437 
438 static int
439 hns3_parse_eth(const struct rte_flow_item *item,
440 		   struct hns3_fdir_rule *rule, struct rte_flow_error *error)
441 {
442 	const struct rte_flow_item_eth *eth_spec;
443 	const struct rte_flow_item_eth *eth_mask;
444 
445 	if (item->spec == NULL && item->mask)
446 		return rte_flow_error_set(error, EINVAL,
447 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
448 					  "Can't configure FDIR with mask but without spec");
449 
450 	/* Only used to describe the protocol stack. */
451 	if (item->spec == NULL && item->mask == NULL)
452 		return 0;
453 
454 	if (item->mask) {
455 		eth_mask = item->mask;
456 		if (eth_mask->type) {
457 			hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
458 			rule->key_conf.mask.ether_type =
459 			    rte_be_to_cpu_16(eth_mask->type);
460 		}
461 		if (!rte_is_zero_ether_addr(&eth_mask->src)) {
462 			hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
463 			memcpy(rule->key_conf.mask.src_mac,
464 			       eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
465 		}
466 		if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
467 			hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
468 			memcpy(rule->key_conf.mask.dst_mac,
469 			       eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
470 		}
471 	}
472 
473 	eth_spec = item->spec;
474 	rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
475 	memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
476 	       RTE_ETHER_ADDR_LEN);
477 	memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
478 	       RTE_ETHER_ADDR_LEN);
479 	return 0;
480 }
481 
482 static int
483 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
484 		struct rte_flow_error *error)
485 {
486 	const struct rte_flow_item_vlan *vlan_spec;
487 	const struct rte_flow_item_vlan *vlan_mask;
488 
489 	if (item->spec == NULL && item->mask)
490 		return rte_flow_error_set(error, EINVAL,
491 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
492 					  "Can't configure FDIR with mask but without spec");
493 
494 	rule->key_conf.vlan_num++;
495 	if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
496 		return rte_flow_error_set(error, EINVAL,
497 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
498 					  "Vlan_num is more than 2");
499 
500 	/* Only used to describe the protocol stack. */
501 	if (item->spec == NULL && item->mask == NULL)
502 		return 0;
503 
504 	if (item->mask) {
505 		vlan_mask = item->mask;
506 		if (vlan_mask->tci) {
507 			if (rule->key_conf.vlan_num == 1) {
508 				hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
509 					     1);
510 				rule->key_conf.mask.vlan_tag1 =
511 				    rte_be_to_cpu_16(vlan_mask->tci);
512 			} else {
513 				hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
514 					     1);
515 				rule->key_conf.mask.vlan_tag2 =
516 				    rte_be_to_cpu_16(vlan_mask->tci);
517 			}
518 		}
519 	}
520 
521 	vlan_spec = item->spec;
522 	if (rule->key_conf.vlan_num == 1)
523 		rule->key_conf.spec.vlan_tag1 =
524 		    rte_be_to_cpu_16(vlan_spec->tci);
525 	else
526 		rule->key_conf.spec.vlan_tag2 =
527 		    rte_be_to_cpu_16(vlan_spec->tci);
528 	return 0;
529 }
530 
531 static int
532 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
533 		struct rte_flow_error *error)
534 {
535 	const struct rte_flow_item_ipv4 *ipv4_spec;
536 	const struct rte_flow_item_ipv4 *ipv4_mask;
537 
538 	if (item->spec == NULL && item->mask)
539 		return rte_flow_error_set(error, EINVAL,
540 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
541 					  "Can't configure FDIR with mask but without spec");
542 
543 	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
544 	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
545 	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
546 	/* Only used to describe the protocol stack. */
547 	if (item->spec == NULL && item->mask == NULL)
548 		return 0;
549 
550 	if (item->mask) {
551 		ipv4_mask = item->mask;
552 
553 		if (ipv4_mask->hdr.total_length ||
554 		    ipv4_mask->hdr.packet_id ||
555 		    ipv4_mask->hdr.fragment_offset ||
556 		    ipv4_mask->hdr.time_to_live ||
557 		    ipv4_mask->hdr.hdr_checksum) {
558 			return rte_flow_error_set(error, EINVAL,
559 						  RTE_FLOW_ERROR_TYPE_ITEM,
560 						  item,
561 						  "Only support src & dst ip,tos,proto in IPV4");
562 		}
563 
564 		if (ipv4_mask->hdr.src_addr) {
565 			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
566 			rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
567 			    rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
568 		}
569 
570 		if (ipv4_mask->hdr.dst_addr) {
571 			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
572 			rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
573 			    rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
574 		}
575 
576 		if (ipv4_mask->hdr.type_of_service) {
577 			hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
578 			rule->key_conf.mask.ip_tos =
579 			    ipv4_mask->hdr.type_of_service;
580 		}
581 
582 		if (ipv4_mask->hdr.next_proto_id) {
583 			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
584 			rule->key_conf.mask.ip_proto =
585 			    ipv4_mask->hdr.next_proto_id;
586 		}
587 	}
588 
589 	ipv4_spec = item->spec;
590 	rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
591 	    rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
592 	rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
593 	    rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
594 	rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
595 	rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
596 	return 0;
597 }
598 
599 static int
600 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
601 		struct rte_flow_error *error)
602 {
603 	const struct rte_flow_item_ipv6 *ipv6_spec;
604 	const struct rte_flow_item_ipv6 *ipv6_mask;
605 
606 	if (item->spec == NULL && item->mask)
607 		return rte_flow_error_set(error, EINVAL,
608 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
609 					  "Can't configure FDIR with mask but without spec");
610 
611 	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
612 	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
613 	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
614 
615 	/* Only used to describe the protocol stack. */
616 	if (item->spec == NULL && item->mask == NULL)
617 		return 0;
618 
619 	if (item->mask) {
620 		ipv6_mask = item->mask;
621 		if (ipv6_mask->hdr.vtc_flow ||
622 		    ipv6_mask->hdr.payload_len || ipv6_mask->hdr.hop_limits) {
623 			return rte_flow_error_set(error, EINVAL,
624 						  RTE_FLOW_ERROR_TYPE_ITEM,
625 						  item,
626 						  "Only support src & dst ip,proto in IPV6");
627 		}
628 		net_addr_to_host(rule->key_conf.mask.src_ip,
629 				 (const rte_be32_t *)ipv6_mask->hdr.src_addr,
630 				 IP_ADDR_LEN);
631 		net_addr_to_host(rule->key_conf.mask.dst_ip,
632 				 (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
633 				 IP_ADDR_LEN);
634 		rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
635 		if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
636 			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
637 		if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
638 			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
639 		if (ipv6_mask->hdr.proto)
640 			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
641 	}
642 
643 	ipv6_spec = item->spec;
644 	net_addr_to_host(rule->key_conf.spec.src_ip,
645 			 (const rte_be32_t *)ipv6_spec->hdr.src_addr,
646 			 IP_ADDR_LEN);
647 	net_addr_to_host(rule->key_conf.spec.dst_ip,
648 			 (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
649 			 IP_ADDR_LEN);
650 	rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
651 
652 	return 0;
653 }
654 
655 static int
656 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
657 	       struct rte_flow_error *error)
658 {
659 	const struct rte_flow_item_tcp *tcp_spec;
660 	const struct rte_flow_item_tcp *tcp_mask;
661 
662 	if (item->spec == NULL && item->mask)
663 		return rte_flow_error_set(error, EINVAL,
664 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
665 					  "Can't configure FDIR with mask but without spec");
666 
667 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
668 	rule->key_conf.spec.ip_proto = IPPROTO_TCP;
669 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
670 
671 	/* Only used to describe the protocol stack. */
672 	if (item->spec == NULL && item->mask == NULL)
673 		return 0;
674 
675 	if (item->mask) {
676 		tcp_mask = item->mask;
677 		if (tcp_mask->hdr.sent_seq ||
678 		    tcp_mask->hdr.recv_ack ||
679 		    tcp_mask->hdr.data_off ||
680 		    tcp_mask->hdr.tcp_flags ||
681 		    tcp_mask->hdr.rx_win ||
682 		    tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) {
683 			return rte_flow_error_set(error, EINVAL,
684 						  RTE_FLOW_ERROR_TYPE_ITEM,
685 						  item,
686 						  "Only support src & dst port in TCP");
687 		}
688 
689 		if (tcp_mask->hdr.src_port) {
690 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
691 			rule->key_conf.mask.src_port =
692 			    rte_be_to_cpu_16(tcp_mask->hdr.src_port);
693 		}
694 		if (tcp_mask->hdr.dst_port) {
695 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
696 			rule->key_conf.mask.dst_port =
697 			    rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
698 		}
699 	}
700 
701 	tcp_spec = item->spec;
702 	rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
703 	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
704 
705 	return 0;
706 }
707 
708 static int
709 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
710 	       struct rte_flow_error *error)
711 {
712 	const struct rte_flow_item_udp *udp_spec;
713 	const struct rte_flow_item_udp *udp_mask;
714 
715 	if (item->spec == NULL && item->mask)
716 		return rte_flow_error_set(error, EINVAL,
717 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
718 					  "Can't configure FDIR with mask but without spec");
719 
720 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
721 	rule->key_conf.spec.ip_proto = IPPROTO_UDP;
722 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
723 	/* Only used to describe the protocol stack. */
724 	if (item->spec == NULL && item->mask == NULL)
725 		return 0;
726 
727 	if (item->mask) {
728 		udp_mask = item->mask;
729 		if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
730 			return rte_flow_error_set(error, EINVAL,
731 						  RTE_FLOW_ERROR_TYPE_ITEM,
732 						  item,
733 						  "Only support src & dst port in UDP");
734 		}
735 		if (udp_mask->hdr.src_port) {
736 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
737 			rule->key_conf.mask.src_port =
738 			    rte_be_to_cpu_16(udp_mask->hdr.src_port);
739 		}
740 		if (udp_mask->hdr.dst_port) {
741 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
742 			rule->key_conf.mask.dst_port =
743 			    rte_be_to_cpu_16(udp_mask->hdr.dst_port);
744 		}
745 	}
746 
747 	udp_spec = item->spec;
748 	rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
749 	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
750 
751 	return 0;
752 }
753 
754 static int
755 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
756 		struct rte_flow_error *error)
757 {
758 	const struct rte_flow_item_sctp *sctp_spec;
759 	const struct rte_flow_item_sctp *sctp_mask;
760 
761 	if (item->spec == NULL && item->mask)
762 		return rte_flow_error_set(error, EINVAL,
763 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
764 					  "Can't configure FDIR with mask but without spec");
765 
766 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
767 	rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
768 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
769 
770 	/* Only used to describe the protocol stack. */
771 	if (item->spec == NULL && item->mask == NULL)
772 		return 0;
773 
774 	if (item->mask) {
775 		sctp_mask = item->mask;
776 		if (sctp_mask->hdr.cksum)
777 			return rte_flow_error_set(error, EINVAL,
778 						  RTE_FLOW_ERROR_TYPE_ITEM,
779 						  item,
780 						  "Only support src & dst port in SCTP");
781 
782 		if (sctp_mask->hdr.src_port) {
783 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
784 			rule->key_conf.mask.src_port =
785 			    rte_be_to_cpu_16(sctp_mask->hdr.src_port);
786 		}
787 		if (sctp_mask->hdr.dst_port) {
788 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
789 			rule->key_conf.mask.dst_port =
790 			    rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
791 		}
792 		if (sctp_mask->hdr.tag) {
793 			hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
794 			rule->key_conf.mask.sctp_tag =
795 			    rte_be_to_cpu_32(sctp_mask->hdr.tag);
796 		}
797 	}
798 
799 	sctp_spec = item->spec;
800 	rule->key_conf.spec.src_port =
801 	    rte_be_to_cpu_16(sctp_spec->hdr.src_port);
802 	rule->key_conf.spec.dst_port =
803 	    rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
804 	rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
805 
806 	return 0;
807 }
808 
809 /*
810  * Check items before tunnel, save inner configs to outer configs,and clear
811  * inner configs.
812  * The key consists of two parts: meta_data and tuple keys.
813  * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
814  * packet(1bit).
815  * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
816  * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
817  * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
818  * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
819  * Vlantag2(16bit) and sctp-tag(32bit).
820  */
821 static int
822 hns3_handle_tunnel(const struct rte_flow_item *item,
823 		   struct hns3_fdir_rule *rule, struct rte_flow_error *error)
824 {
825 	/* check eth config */
826 	if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
827 		return rte_flow_error_set(error, EINVAL,
828 					  RTE_FLOW_ERROR_TYPE_ITEM,
829 					  item, "Outer eth mac is unsupported");
830 	if (rule->input_set & BIT(INNER_ETH_TYPE)) {
831 		hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
832 		rule->key_conf.spec.outer_ether_type =
833 		    rule->key_conf.spec.ether_type;
834 		rule->key_conf.mask.outer_ether_type =
835 		    rule->key_conf.mask.ether_type;
836 		hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
837 		rule->key_conf.spec.ether_type = 0;
838 		rule->key_conf.mask.ether_type = 0;
839 	}
840 
841 	/* check vlan config */
842 	if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
843 		return rte_flow_error_set(error, EINVAL,
844 					  RTE_FLOW_ERROR_TYPE_ITEM,
845 					  item,
846 					  "Outer vlan tags is unsupported");
847 
848 	/* clear vlan_num for inner vlan select */
849 	rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
850 	rule->key_conf.vlan_num = 0;
851 
852 	/* check L3 config */
853 	if (rule->input_set &
854 	    (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
855 		return rte_flow_error_set(error, EINVAL,
856 					  RTE_FLOW_ERROR_TYPE_ITEM,
857 					  item, "Outer ip is unsupported");
858 	if (rule->input_set & BIT(INNER_IP_PROTO)) {
859 		hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
860 		rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
861 		rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
862 		hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
863 		rule->key_conf.spec.ip_proto = 0;
864 		rule->key_conf.mask.ip_proto = 0;
865 	}
866 
867 	/* check L4 config */
868 	if (rule->input_set & BIT(INNER_SCTP_TAG))
869 		return rte_flow_error_set(error, EINVAL,
870 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
871 					  "Outer sctp tag is unsupported");
872 
873 	if (rule->input_set & BIT(INNER_SRC_PORT)) {
874 		hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
875 		rule->key_conf.spec.outer_src_port =
876 		    rule->key_conf.spec.src_port;
877 		rule->key_conf.mask.outer_src_port =
878 		    rule->key_conf.mask.src_port;
879 		hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
880 		rule->key_conf.spec.src_port = 0;
881 		rule->key_conf.mask.src_port = 0;
882 	}
883 	if (rule->input_set & BIT(INNER_DST_PORT)) {
884 		hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
885 		rule->key_conf.spec.dst_port = 0;
886 		rule->key_conf.mask.dst_port = 0;
887 	}
888 	return 0;
889 }
890 
891 static int
892 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
893 		 struct rte_flow_error *error)
894 {
895 	const struct rte_flow_item_vxlan *vxlan_spec;
896 	const struct rte_flow_item_vxlan *vxlan_mask;
897 
898 	if (item->spec == NULL && item->mask)
899 		return rte_flow_error_set(error, EINVAL,
900 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
901 					  "Can't configure FDIR with mask but without spec");
902 	else if (item->spec && (item->mask == NULL))
903 		return rte_flow_error_set(error, EINVAL,
904 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
905 					  "Tunnel packets must configure with mask");
906 
907 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
908 	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
909 	if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
910 		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
911 	else
912 		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
913 
914 	/* Only used to describe the protocol stack. */
915 	if (item->spec == NULL && item->mask == NULL)
916 		return 0;
917 
918 	vxlan_mask = item->mask;
919 	vxlan_spec = item->spec;
920 
921 	if (vxlan_mask->flags)
922 		return rte_flow_error_set(error, EINVAL,
923 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
924 					  "Flags is not supported in VxLAN");
925 
926 	/* VNI must be totally masked or not. */
927 	if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
928 	    memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
929 		return rte_flow_error_set(error, EINVAL,
930 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
931 					  "VNI must be totally masked or not in VxLAN");
932 	if (vxlan_mask->vni[0]) {
933 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
934 		memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni,
935 			   VNI_OR_TNI_LEN);
936 	}
937 	memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni,
938 		   VNI_OR_TNI_LEN);
939 	return 0;
940 }
941 
942 static int
943 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
944 		 struct rte_flow_error *error)
945 {
946 	const struct rte_flow_item_nvgre *nvgre_spec;
947 	const struct rte_flow_item_nvgre *nvgre_mask;
948 
949 	if (item->spec == NULL && item->mask)
950 		return rte_flow_error_set(error, EINVAL,
951 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
952 					  "Can't configure FDIR with mask but without spec");
953 	else if (item->spec && (item->mask == NULL))
954 		return rte_flow_error_set(error, EINVAL,
955 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
956 					  "Tunnel packets must configure with mask");
957 
958 	hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
959 	rule->key_conf.spec.outer_proto = IPPROTO_GRE;
960 	rule->key_conf.mask.outer_proto = IPPROTO_MASK;
961 
962 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
963 	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
964 	rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
965 	/* Only used to describe the protocol stack. */
966 	if (item->spec == NULL && item->mask == NULL)
967 		return 0;
968 
969 	nvgre_mask = item->mask;
970 	nvgre_spec = item->spec;
971 
972 	if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
973 		return rte_flow_error_set(error, EINVAL,
974 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
975 					  "Ver/protocal is not supported in NVGRE");
976 
977 	/* TNI must be totally masked or not. */
978 	if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
979 	    memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
980 		return rte_flow_error_set(error, EINVAL,
981 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
982 					  "TNI must be totally masked or not in NVGRE");
983 
984 	if (nvgre_mask->tni[0]) {
985 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
986 		memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
987 			   VNI_OR_TNI_LEN);
988 	}
989 	memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
990 		   VNI_OR_TNI_LEN);
991 
992 	if (nvgre_mask->flow_id) {
993 		hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
994 		rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
995 	}
996 	rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
997 	return 0;
998 }
999 
1000 static int
1001 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1002 		  struct rte_flow_error *error)
1003 {
1004 	const struct rte_flow_item_geneve *geneve_spec;
1005 	const struct rte_flow_item_geneve *geneve_mask;
1006 
1007 	if (item->spec == NULL && item->mask)
1008 		return rte_flow_error_set(error, EINVAL,
1009 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1010 					  "Can't configure FDIR with mask but without spec");
1011 	else if (item->spec && (item->mask == NULL))
1012 		return rte_flow_error_set(error, EINVAL,
1013 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1014 					  "Tunnel packets must configure with mask");
1015 
1016 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
1017 	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
1018 	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
1019 	/* Only used to describe the protocol stack. */
1020 	if (item->spec == NULL && item->mask == NULL)
1021 		return 0;
1022 
1023 	geneve_mask = item->mask;
1024 	geneve_spec = item->spec;
1025 
1026 	if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
1027 		return rte_flow_error_set(error, EINVAL,
1028 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1029 					  "Ver/protocal is not supported in GENEVE");
1030 	/* VNI must be totally masked or not. */
1031 	if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
1032 	    memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
1033 		return rte_flow_error_set(error, EINVAL,
1034 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1035 					  "VNI must be totally masked or not in GENEVE");
1036 	if (geneve_mask->vni[0]) {
1037 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1038 		memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
1039 			   VNI_OR_TNI_LEN);
1040 	}
1041 	memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
1042 		   VNI_OR_TNI_LEN);
1043 	return 0;
1044 }
1045 
1046 static int
1047 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1048 		  struct rte_flow_error *error)
1049 {
1050 	int ret;
1051 
1052 	switch (item->type) {
1053 	case RTE_FLOW_ITEM_TYPE_VXLAN:
1054 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1055 		ret = hns3_parse_vxlan(item, rule, error);
1056 		break;
1057 	case RTE_FLOW_ITEM_TYPE_NVGRE:
1058 		ret = hns3_parse_nvgre(item, rule, error);
1059 		break;
1060 	case RTE_FLOW_ITEM_TYPE_GENEVE:
1061 		ret = hns3_parse_geneve(item, rule, error);
1062 		break;
1063 	default:
1064 		return rte_flow_error_set(error, ENOTSUP,
1065 					  RTE_FLOW_ERROR_TYPE_HANDLE,
1066 					  NULL, "Unsupported tunnel type!");
1067 	}
1068 	if (ret)
1069 		return ret;
1070 	return hns3_handle_tunnel(item, rule, error);
1071 }
1072 
1073 static int
1074 hns3_parse_normal(const struct rte_flow_item *item,
1075 		  struct hns3_fdir_rule *rule,
1076 		  struct items_step_mngr *step_mngr,
1077 		  struct rte_flow_error *error)
1078 {
1079 	int ret;
1080 
1081 	switch (item->type) {
1082 	case RTE_FLOW_ITEM_TYPE_ETH:
1083 		ret = hns3_parse_eth(item, rule, error);
1084 		step_mngr->items = L2_next_items;
1085 		step_mngr->count = ARRAY_SIZE(L2_next_items);
1086 		break;
1087 	case RTE_FLOW_ITEM_TYPE_VLAN:
1088 		ret = hns3_parse_vlan(item, rule, error);
1089 		step_mngr->items = L2_next_items;
1090 		step_mngr->count = ARRAY_SIZE(L2_next_items);
1091 		break;
1092 	case RTE_FLOW_ITEM_TYPE_IPV4:
1093 		ret = hns3_parse_ipv4(item, rule, error);
1094 		step_mngr->items = L3_next_items;
1095 		step_mngr->count = ARRAY_SIZE(L3_next_items);
1096 		break;
1097 	case RTE_FLOW_ITEM_TYPE_IPV6:
1098 		ret = hns3_parse_ipv6(item, rule, error);
1099 		step_mngr->items = L3_next_items;
1100 		step_mngr->count = ARRAY_SIZE(L3_next_items);
1101 		break;
1102 	case RTE_FLOW_ITEM_TYPE_TCP:
1103 		ret = hns3_parse_tcp(item, rule, error);
1104 		step_mngr->items = L4_next_items;
1105 		step_mngr->count = ARRAY_SIZE(L4_next_items);
1106 		break;
1107 	case RTE_FLOW_ITEM_TYPE_UDP:
1108 		ret = hns3_parse_udp(item, rule, error);
1109 		step_mngr->items = L4_next_items;
1110 		step_mngr->count = ARRAY_SIZE(L4_next_items);
1111 		break;
1112 	case RTE_FLOW_ITEM_TYPE_SCTP:
1113 		ret = hns3_parse_sctp(item, rule, error);
1114 		step_mngr->items = L4_next_items;
1115 		step_mngr->count = ARRAY_SIZE(L4_next_items);
1116 		break;
1117 	default:
1118 		return rte_flow_error_set(error, ENOTSUP,
1119 					  RTE_FLOW_ERROR_TYPE_HANDLE,
1120 					  NULL, "Unsupported normal type!");
1121 	}
1122 
1123 	return ret;
1124 }
1125 
1126 static int
1127 hns3_validate_item(const struct rte_flow_item *item,
1128 		   struct items_step_mngr step_mngr,
1129 		   struct rte_flow_error *error)
1130 {
1131 	int i;
1132 
1133 	if (item->last)
1134 		return rte_flow_error_set(error, ENOTSUP,
1135 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
1136 					  "Not supported last point for range");
1137 
1138 	for (i = 0; i < step_mngr.count; i++) {
1139 		if (item->type == step_mngr.items[i])
1140 			break;
1141 	}
1142 
1143 	if (i == step_mngr.count) {
1144 		return rte_flow_error_set(error, EINVAL,
1145 					  RTE_FLOW_ERROR_TYPE_ITEM,
1146 					  item, "Inval or missing item");
1147 	}
1148 	return 0;
1149 }
1150 
1151 static inline bool
1152 is_tunnel_packet(enum rte_flow_item_type type)
1153 {
1154 	if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1155 	    type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1156 	    type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1157 	    type == RTE_FLOW_ITEM_TYPE_GENEVE ||
1158 	    type == RTE_FLOW_ITEM_TYPE_MPLS)
1159 		return true;
1160 	return false;
1161 }
1162 
1163 /*
1164  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1165  * And get the flow director filter info BTW.
1166  * UDP/TCP/SCTP PATTERN:
1167  * The first not void item can be ETH or IPV4 or IPV6
1168  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1169  * The next not void item could be UDP or TCP or SCTP (optional)
1170  * The next not void item could be RAW (for flexbyte, optional)
1171  * The next not void item must be END.
1172  * A Fuzzy Match pattern can appear at any place before END.
1173  * Fuzzy Match is optional for IPV4 but is required for IPV6
1174  * MAC VLAN PATTERN:
1175  * The first not void item must be ETH.
1176  * The second not void item must be MAC VLAN.
1177  * The next not void item must be END.
1178  * ACTION:
1179  * The first not void action should be QUEUE or DROP.
1180  * The second not void optional action should be MARK,
1181  * mark_id is a uint32_t number.
1182  * The next not void action should be END.
1183  * UDP/TCP/SCTP pattern example:
1184  * ITEM		Spec			Mask
1185  * ETH		NULL			NULL
1186  * IPV4		src_addr 192.168.1.20	0xFFFFFFFF
1187  *		dst_addr 192.167.3.50	0xFFFFFFFF
1188  * UDP/TCP/SCTP	src_port	80	0xFFFF
1189  *		dst_port	80	0xFFFF
1190  * END
1191  * MAC VLAN pattern example:
1192  * ITEM		Spec			Mask
1193  * ETH		dst_addr
1194 		{0xAC, 0x7B, 0xA1,	{0xFF, 0xFF, 0xFF,
1195 		0x2C, 0x6D, 0x36}	0xFF, 0xFF, 0xFF}
1196  * MAC VLAN	tci	0x2016		0xEFFF
1197  * END
1198  * Other members in mask and spec should set to 0x00.
1199  * Item->last should be NULL.
1200  */
1201 static int
1202 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1203 		       const struct rte_flow_item pattern[],
1204 		       const struct rte_flow_action actions[],
1205 		       struct hns3_fdir_rule *rule,
1206 		       struct rte_flow_error *error)
1207 {
1208 	struct hns3_adapter *hns = dev->data->dev_private;
1209 	const struct rte_flow_item *item;
1210 	struct items_step_mngr step_mngr;
1211 	int ret;
1212 
1213 	/* FDIR is available only in PF driver */
1214 	if (hns->is_vf)
1215 		return rte_flow_error_set(error, ENOTSUP,
1216 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1217 					  "Fdir not supported in VF");
1218 
1219 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT)
1220 		return rte_flow_error_set(error, ENOTSUP,
1221 					  RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1222 					  "fdir_conf.mode isn't perfect");
1223 
1224 	step_mngr.items = first_items;
1225 	step_mngr.count = ARRAY_SIZE(first_items);
1226 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1227 		if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1228 			continue;
1229 
1230 		ret = hns3_validate_item(item, step_mngr, error);
1231 		if (ret)
1232 			return ret;
1233 
1234 		if (is_tunnel_packet(item->type)) {
1235 			ret = hns3_parse_tunnel(item, rule, error);
1236 			if (ret)
1237 				return ret;
1238 			step_mngr.items = tunnel_next_items;
1239 			step_mngr.count = ARRAY_SIZE(tunnel_next_items);
1240 		} else {
1241 			ret = hns3_parse_normal(item, rule, &step_mngr, error);
1242 			if (ret)
1243 				return ret;
1244 		}
1245 	}
1246 
1247 	return hns3_handle_actions(dev, actions, rule, error);
1248 }
1249 
1250 void
1251 hns3_filterlist_init(struct rte_eth_dev *dev)
1252 {
1253 	struct hns3_process_private *process_list = dev->process_private;
1254 
1255 	TAILQ_INIT(&process_list->fdir_list);
1256 	TAILQ_INIT(&process_list->filter_rss_list);
1257 	TAILQ_INIT(&process_list->flow_list);
1258 }
1259 
1260 static void
1261 hns3_filterlist_flush(struct rte_eth_dev *dev)
1262 {
1263 	struct hns3_process_private *process_list = dev->process_private;
1264 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1265 	struct hns3_rss_conf_ele *rss_filter_ptr;
1266 	struct hns3_flow_mem *flow_node;
1267 
1268 	fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1269 	while (fdir_rule_ptr) {
1270 		TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1271 		rte_free(fdir_rule_ptr);
1272 		fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1273 	}
1274 
1275 	rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1276 	while (rss_filter_ptr) {
1277 		TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1278 			     entries);
1279 		rte_free(rss_filter_ptr);
1280 		rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1281 	}
1282 
1283 	flow_node = TAILQ_FIRST(&process_list->flow_list);
1284 	while (flow_node) {
1285 		TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1286 		rte_free(flow_node->flow);
1287 		rte_free(flow_node);
1288 		flow_node = TAILQ_FIRST(&process_list->flow_list);
1289 	}
1290 }
1291 
1292 static bool
1293 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1294 		     const struct rte_flow_action_rss *with)
1295 {
1296 	bool func_is_same;
1297 
1298 	/*
1299 	 * When user flush all RSS rule, RSS func is set invalid with
1300 	 * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after
1301 	 * flushed, any validate RSS func is different with it before
1302 	 * flushed. Others, when user create an action RSS with RSS func
1303 	 * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same
1304 	 * between continuous RSS flow.
1305 	 */
1306 	if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
1307 		func_is_same = false;
1308 	else
1309 		func_is_same = (with->func ? (comp->func == with->func) : true);
1310 
1311 	return (func_is_same &&
1312 		comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
1313 		comp->level == with->level && comp->key_len == with->key_len &&
1314 		comp->queue_num == with->queue_num &&
1315 		!memcmp(comp->key, with->key, with->key_len) &&
1316 		!memcmp(comp->queue, with->queue,
1317 			sizeof(*with->queue) * with->queue_num));
1318 }
1319 
1320 static int
1321 hns3_rss_conf_copy(struct hns3_rss_conf *out,
1322 		   const struct rte_flow_action_rss *in)
1323 {
1324 	if (in->key_len > RTE_DIM(out->key) ||
1325 	    in->queue_num > RTE_DIM(out->queue))
1326 		return -EINVAL;
1327 	if (in->key == NULL && in->key_len)
1328 		return -EINVAL;
1329 	out->conf = (struct rte_flow_action_rss) {
1330 		.func = in->func,
1331 		.level = in->level,
1332 		.types = in->types,
1333 		.key_len = in->key_len,
1334 		.queue_num = in->queue_num,
1335 	};
1336 	out->conf.queue =
1337 		memcpy(out->queue, in->queue,
1338 		       sizeof(*in->queue) * in->queue_num);
1339 	if (in->key)
1340 		out->conf.key = memcpy(out->key, in->key, in->key_len);
1341 
1342 	return 0;
1343 }
1344 
1345 /*
1346  * This function is used to parse rss action validatation.
1347  */
1348 static int
1349 hns3_parse_rss_filter(struct rte_eth_dev *dev,
1350 		      const struct rte_flow_action *actions,
1351 		      struct rte_flow_error *error)
1352 {
1353 	struct hns3_adapter *hns = dev->data->dev_private;
1354 	struct hns3_hw *hw = &hns->hw;
1355 	struct hns3_rss_conf *rss_conf = &hw->rss_info;
1356 	const struct rte_flow_action_rss *rss;
1357 	const struct rte_flow_action *act;
1358 	uint32_t act_index = 0;
1359 	uint64_t flow_types;
1360 	uint16_t n;
1361 
1362 	NEXT_ITEM_OF_ACTION(act, actions, act_index);
1363 	rss = act->conf;
1364 
1365 	if (rss == NULL) {
1366 		return rte_flow_error_set(error, EINVAL,
1367 					  RTE_FLOW_ERROR_TYPE_ACTION,
1368 					  act, "no valid queues");
1369 	}
1370 
1371 	for (n = 0; n < rss->queue_num; n++) {
1372 		if (rss->queue[n] < dev->data->nb_rx_queues)
1373 			continue;
1374 		return rte_flow_error_set(error, EINVAL,
1375 					  RTE_FLOW_ERROR_TYPE_ACTION,
1376 					  act,
1377 					  "queue id > max number of queues");
1378 	}
1379 
1380 	/* Parse flow types of RSS */
1381 	if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
1382 		return rte_flow_error_set(error, EINVAL,
1383 					  RTE_FLOW_ERROR_TYPE_ACTION,
1384 					  act,
1385 					  "Flow types is unsupported by "
1386 					  "hns3's RSS");
1387 
1388 	flow_types = rss->types & HNS3_ETH_RSS_SUPPORT;
1389 	if (flow_types != rss->types)
1390 		hns3_warn(hw, "RSS flow types(%" PRIx64 ") include unsupported "
1391 			  "flow types", rss->types);
1392 
1393 	/* Parse RSS related parameters from RSS configuration */
1394 	switch (rss->func) {
1395 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1396 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1397 	case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1398 	case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1399 		break;
1400 	default:
1401 		return rte_flow_error_set(error, ENOTSUP,
1402 					  RTE_FLOW_ERROR_TYPE_ACTION, act,
1403 					  "input RSS hash functions are not supported");
1404 	}
1405 
1406 	if (rss->level)
1407 		return rte_flow_error_set(error, ENOTSUP,
1408 					  RTE_FLOW_ERROR_TYPE_ACTION, act,
1409 					  "a nonzero RSS encapsulation level is not supported");
1410 	if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1411 		return rte_flow_error_set(error, ENOTSUP,
1412 					  RTE_FLOW_ERROR_TYPE_ACTION, act,
1413 					  "RSS hash key must be exactly 40 bytes");
1414 	if (rss->queue_num > RTE_DIM(rss_conf->queue))
1415 		return rte_flow_error_set(error, ENOTSUP,
1416 					  RTE_FLOW_ERROR_TYPE_ACTION, act,
1417 					  "too many queues for RSS context");
1418 
1419 	if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
1420 	    (rss->types & ETH_RSS_IP))
1421 		return rte_flow_error_set(error, EINVAL,
1422 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1423 					  &rss->types,
1424 					  "input RSS types are not supported");
1425 
1426 	act_index++;
1427 
1428 	/* Check if the next not void action is END */
1429 	NEXT_ITEM_OF_ACTION(act, actions, act_index);
1430 	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1431 		memset(rss_conf, 0, sizeof(struct hns3_rss_conf));
1432 		return rte_flow_error_set(error, EINVAL,
1433 					  RTE_FLOW_ERROR_TYPE_ACTION,
1434 					  act, "Not supported action.");
1435 	}
1436 
1437 	return 0;
1438 }
1439 
1440 static int
1441 hns3_disable_rss(struct hns3_hw *hw)
1442 {
1443 	int ret;
1444 
1445 	/* Redirected the redirection table to queue 0 */
1446 	ret = hns3_rss_reset_indir_table(hw);
1447 	if (ret)
1448 		return ret;
1449 
1450 	/* Disable RSS */
1451 	hw->rss_info.conf.types = 0;
1452 	hw->rss_dis_flag = true;
1453 
1454 	return 0;
1455 }
1456 
1457 static void
1458 hns3_parse_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf)
1459 {
1460 	if (rss_conf->key == NULL ||
1461 	    rss_conf->key_len < HNS3_RSS_KEY_SIZE) {
1462 		hns3_info(hw, "Default RSS hash key to be set");
1463 		rss_conf->key = hns3_hash_key;
1464 		rss_conf->key_len = HNS3_RSS_KEY_SIZE;
1465 	}
1466 }
1467 
1468 static int
1469 hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
1470 			 uint8_t *hash_algo)
1471 {
1472 	enum rte_eth_hash_function algo_func = *func;
1473 	switch (algo_func) {
1474 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1475 		/* Keep *hash_algo as what it used to be */
1476 		algo_func = hw->rss_info.conf.func;
1477 		break;
1478 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1479 		*hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
1480 		break;
1481 	case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1482 		*hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
1483 		break;
1484 	case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1485 		*hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
1486 		break;
1487 	default:
1488 		hns3_err(hw, "Invalid RSS algorithm configuration(%u)",
1489 			 algo_func);
1490 		return -EINVAL;
1491 	}
1492 	*func = algo_func;
1493 
1494 	return 0;
1495 }
1496 
1497 static int
1498 hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
1499 {
1500 	struct hns3_rss_tuple_cfg *tuple;
1501 	int ret;
1502 
1503 	/* Parse hash key */
1504 	hns3_parse_rss_key(hw, rss_config);
1505 
1506 	/* Parse hash algorithm */
1507 	ret = hns3_parse_rss_algorithm(hw, &rss_config->func,
1508 				       &hw->rss_info.hash_algo);
1509 	if (ret)
1510 		return ret;
1511 
1512 	ret = hns3_set_rss_algo_key(hw, rss_config->key);
1513 	if (ret)
1514 		return ret;
1515 
1516 	/* Update algorithm of hw */
1517 	hw->rss_info.conf.func = rss_config->func;
1518 
1519 	/* Set flow type supported */
1520 	tuple = &hw->rss_info.rss_tuple_sets;
1521 	ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types);
1522 	if (ret)
1523 		hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
1524 
1525 	return ret;
1526 }
1527 
1528 static int
1529 hns3_update_indir_table(struct rte_eth_dev *dev,
1530 			const struct rte_flow_action_rss *conf, uint16_t num)
1531 {
1532 	struct hns3_adapter *hns = dev->data->dev_private;
1533 	struct hns3_hw *hw = &hns->hw;
1534 	uint8_t indir_tbl[HNS3_RSS_IND_TBL_SIZE];
1535 	uint16_t j, allow_rss_queues;
1536 	uint8_t queue_id;
1537 	uint32_t i;
1538 
1539 	allow_rss_queues = RTE_MIN(dev->data->nb_rx_queues, hw->rss_size_max);
1540 	/* Fill in redirection table */
1541 	memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
1542 	       HNS3_RSS_IND_TBL_SIZE);
1543 	for (i = 0, j = 0; i < HNS3_RSS_IND_TBL_SIZE; i++, j++) {
1544 		j %= num;
1545 		if (conf->queue[j] >= allow_rss_queues) {
1546 			hns3_err(hw, "Invalid queue id(%u) to be set in "
1547 				     "redirection table, max number of rss "
1548 				     "queues: %u", conf->queue[j],
1549 				 allow_rss_queues);
1550 			return -EINVAL;
1551 		}
1552 		queue_id = conf->queue[j];
1553 		indir_tbl[i] = queue_id;
1554 	}
1555 
1556 	return hns3_set_rss_indir_table(hw, indir_tbl, HNS3_RSS_IND_TBL_SIZE);
1557 }
1558 
1559 static int
1560 hns3_config_rss_filter(struct rte_eth_dev *dev,
1561 		       const struct hns3_rss_conf *conf, bool add)
1562 {
1563 	struct hns3_process_private *process_list = dev->process_private;
1564 	struct hns3_adapter *hns = dev->data->dev_private;
1565 	struct hns3_rss_conf_ele *rss_filter_ptr;
1566 	struct hns3_hw *hw = &hns->hw;
1567 	struct hns3_rss_conf *rss_info;
1568 	uint64_t flow_types;
1569 	uint16_t num;
1570 	int ret;
1571 
1572 	struct rte_flow_action_rss rss_flow_conf = {
1573 		.func = conf->conf.func,
1574 		.level = conf->conf.level,
1575 		.types = conf->conf.types,
1576 		.key_len = conf->conf.key_len,
1577 		.queue_num = conf->conf.queue_num,
1578 		.key = conf->conf.key_len ?
1579 		    (void *)(uintptr_t)conf->conf.key : NULL,
1580 		.queue = conf->conf.queue,
1581 	};
1582 
1583 	/* Filter the unsupported flow types */
1584 	flow_types = conf->conf.types ?
1585 		     rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT :
1586 		     hw->rss_info.conf.types;
1587 	if (flow_types != rss_flow_conf.types)
1588 		hns3_warn(hw, "modified RSS types based on hardware support, "
1589 			      "requested:%" PRIx64 " configured:%" PRIx64,
1590 			  rss_flow_conf.types, flow_types);
1591 	/* Update the useful flow types */
1592 	rss_flow_conf.types = flow_types;
1593 
1594 	rss_info = &hw->rss_info;
1595 	if (!add) {
1596 		if (!conf->valid)
1597 			return 0;
1598 
1599 		ret = hns3_disable_rss(hw);
1600 		if (ret) {
1601 			hns3_err(hw, "RSS disable failed(%d)", ret);
1602 			return ret;
1603 		}
1604 
1605 		if (rss_flow_conf.queue_num) {
1606 			/*
1607 			 * Due the content of queue pointer have been reset to
1608 			 * 0, the rss_info->conf.queue should be set NULL
1609 			 */
1610 			rss_info->conf.queue = NULL;
1611 			rss_info->conf.queue_num = 0;
1612 		}
1613 
1614 		/* set RSS func invalid after flushed */
1615 		rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX;
1616 		return 0;
1617 	}
1618 
1619 	/* Get rx queues num */
1620 	num = dev->data->nb_rx_queues;
1621 
1622 	/* Set rx queues to use */
1623 	num = RTE_MIN(num, rss_flow_conf.queue_num);
1624 	if (rss_flow_conf.queue_num > num)
1625 		hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
1626 			  rss_flow_conf.queue_num);
1627 	hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
1628 
1629 	rte_spinlock_lock(&hw->lock);
1630 	if (num) {
1631 		ret = hns3_update_indir_table(dev, &rss_flow_conf, num);
1632 		if (ret)
1633 			goto rss_config_err;
1634 	}
1635 
1636 	/* Set hash algorithm and flow types by the user's config */
1637 	ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf);
1638 	if (ret)
1639 		goto rss_config_err;
1640 
1641 	ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf);
1642 	if (ret) {
1643 		hns3_err(hw, "RSS config init fail(%d)", ret);
1644 		goto rss_config_err;
1645 	}
1646 
1647 	/*
1648 	 * When create a new RSS rule, the old rule will be overlaid and set
1649 	 * invalid.
1650 	 */
1651 	TAILQ_FOREACH(rss_filter_ptr, &process_list->filter_rss_list, entries)
1652 		rss_filter_ptr->filter_info.valid = false;
1653 
1654 rss_config_err:
1655 	rte_spinlock_unlock(&hw->lock);
1656 
1657 	return ret;
1658 }
1659 
1660 /* Remove the rss filter */
1661 static int
1662 hns3_clear_rss_filter(struct rte_eth_dev *dev)
1663 {
1664 	struct hns3_process_private *process_list = dev->process_private;
1665 	struct hns3_adapter *hns = dev->data->dev_private;
1666 	struct hns3_rss_conf_ele *rss_filter_ptr;
1667 	struct hns3_hw *hw = &hns->hw;
1668 	int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */
1669 	int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */
1670 	int ret = 0;
1671 
1672 	rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1673 	while (rss_filter_ptr) {
1674 		TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1675 			     entries);
1676 		ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1677 					     false);
1678 		if (ret)
1679 			rss_rule_fail_cnt++;
1680 		else
1681 			rss_rule_succ_cnt++;
1682 		rte_free(rss_filter_ptr);
1683 		rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1684 	}
1685 
1686 	if (rss_rule_fail_cnt) {
1687 		hns3_err(hw, "fail to delete all RSS filters, success num = %d "
1688 			     "fail num = %d", rss_rule_succ_cnt,
1689 			     rss_rule_fail_cnt);
1690 		ret = -EIO;
1691 	}
1692 
1693 	return ret;
1694 }
1695 
1696 /* Restore the rss filter */
1697 int
1698 hns3_restore_rss_filter(struct rte_eth_dev *dev)
1699 {
1700 	struct hns3_adapter *hns = dev->data->dev_private;
1701 	struct hns3_hw *hw = &hns->hw;
1702 
1703 	/* When user flush all rules, it doesn't need to restore RSS rule */
1704 	if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX)
1705 		return 0;
1706 
1707 	return hns3_config_rss_filter(dev, &hw->rss_info, true);
1708 }
1709 
1710 static int
1711 hns3_flow_parse_rss(struct rte_eth_dev *dev,
1712 		    const struct hns3_rss_conf *conf, bool add)
1713 {
1714 	struct hns3_adapter *hns = dev->data->dev_private;
1715 	struct hns3_hw *hw = &hns->hw;
1716 	bool ret;
1717 
1718 	/* Action rss same */
1719 	ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf);
1720 	if (ret) {
1721 		hns3_err(hw, "Enter duplicate RSS configuration : %d", ret);
1722 		return -EINVAL;
1723 	}
1724 
1725 	return hns3_config_rss_filter(dev, conf, add);
1726 }
1727 
1728 static int
1729 hns3_flow_args_check(const struct rte_flow_attr *attr,
1730 		     const struct rte_flow_item pattern[],
1731 		     const struct rte_flow_action actions[],
1732 		     struct rte_flow_error *error)
1733 {
1734 	if (pattern == NULL)
1735 		return rte_flow_error_set(error, EINVAL,
1736 					  RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1737 					  NULL, "NULL pattern.");
1738 
1739 	if (actions == NULL)
1740 		return rte_flow_error_set(error, EINVAL,
1741 					  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1742 					  NULL, "NULL action.");
1743 
1744 	if (attr == NULL)
1745 		return rte_flow_error_set(error, EINVAL,
1746 					  RTE_FLOW_ERROR_TYPE_ATTR,
1747 					  NULL, "NULL attribute.");
1748 
1749 	return hns3_check_attr(attr, error);
1750 }
1751 
1752 /*
1753  * Check if the flow rule is supported by hns3.
1754  * It only checkes the format. Don't guarantee the rule can be programmed into
1755  * the HW. Because there can be no enough room for the rule.
1756  */
1757 static int
1758 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1759 		   const struct rte_flow_item pattern[],
1760 		   const struct rte_flow_action actions[],
1761 		   struct rte_flow_error *error)
1762 {
1763 	struct hns3_fdir_rule fdir_rule;
1764 	int ret;
1765 
1766 	ret = hns3_flow_args_check(attr, pattern, actions, error);
1767 	if (ret)
1768 		return ret;
1769 
1770 	if (hns3_find_rss_general_action(pattern, actions))
1771 		return hns3_parse_rss_filter(dev, actions, error);
1772 
1773 	memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1774 	return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1775 }
1776 
1777 /*
1778  * Create or destroy a flow rule.
1779  * Theorically one rule can match more than one filters.
1780  * We will let it use the filter which it hitt first.
1781  * So, the sequence matters.
1782  */
1783 static struct rte_flow *
1784 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1785 		 const struct rte_flow_item pattern[],
1786 		 const struct rte_flow_action actions[],
1787 		 struct rte_flow_error *error)
1788 {
1789 	struct hns3_process_private *process_list = dev->process_private;
1790 	struct hns3_adapter *hns = dev->data->dev_private;
1791 	struct hns3_hw *hw = &hns->hw;
1792 	const struct hns3_rss_conf *rss_conf;
1793 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1794 	struct hns3_rss_conf_ele *rss_filter_ptr;
1795 	struct hns3_flow_mem *flow_node;
1796 	const struct rte_flow_action *act;
1797 	struct rte_flow *flow;
1798 	struct hns3_fdir_rule fdir_rule;
1799 	int ret;
1800 
1801 	ret = hns3_flow_validate(dev, attr, pattern, actions, error);
1802 	if (ret)
1803 		return NULL;
1804 
1805 	flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
1806 	if (flow == NULL) {
1807 		rte_flow_error_set(error, ENOMEM,
1808 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1809 				   "Failed to allocate flow memory");
1810 		return NULL;
1811 	}
1812 	flow_node = rte_zmalloc("hns3 flow node",
1813 				sizeof(struct hns3_flow_mem), 0);
1814 	if (flow_node == NULL) {
1815 		rte_flow_error_set(error, ENOMEM,
1816 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1817 				   "Failed to allocate flow list memory");
1818 		rte_free(flow);
1819 		return NULL;
1820 	}
1821 
1822 	flow_node->flow = flow;
1823 	TAILQ_INSERT_TAIL(&process_list->flow_list, flow_node, entries);
1824 
1825 	act = hns3_find_rss_general_action(pattern, actions);
1826 	if (act) {
1827 		rss_conf = act->conf;
1828 
1829 		ret = hns3_flow_parse_rss(dev, rss_conf, true);
1830 		if (ret)
1831 			goto err;
1832 
1833 		rss_filter_ptr = rte_zmalloc("hns3 rss filter",
1834 					     sizeof(struct hns3_rss_conf_ele),
1835 					     0);
1836 		if (rss_filter_ptr == NULL) {
1837 			hns3_err(hw,
1838 				    "Failed to allocate hns3_rss_filter memory");
1839 			ret = -ENOMEM;
1840 			goto err;
1841 		}
1842 		hns3_rss_conf_copy(&rss_filter_ptr->filter_info,
1843 				   &rss_conf->conf);
1844 		rss_filter_ptr->filter_info.valid = true;
1845 		TAILQ_INSERT_TAIL(&process_list->filter_rss_list,
1846 				  rss_filter_ptr, entries);
1847 
1848 		flow->rule = rss_filter_ptr;
1849 		flow->filter_type = RTE_ETH_FILTER_HASH;
1850 		return flow;
1851 	}
1852 
1853 	memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1854 	ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1855 	if (ret)
1856 		goto out;
1857 
1858 	if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
1859 		ret = hns3_counter_new(dev, fdir_rule.act_cnt.shared,
1860 				       fdir_rule.act_cnt.id, error);
1861 		if (ret)
1862 			goto out;
1863 
1864 		flow->counter_id = fdir_rule.act_cnt.id;
1865 	}
1866 	ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
1867 	if (!ret) {
1868 		fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
1869 					    sizeof(struct hns3_fdir_rule_ele),
1870 					    0);
1871 		if (fdir_rule_ptr == NULL) {
1872 			hns3_err(hw, "Failed to allocate fdir_rule memory");
1873 			ret = -ENOMEM;
1874 			goto err_fdir;
1875 		}
1876 		memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
1877 			sizeof(struct hns3_fdir_rule));
1878 		TAILQ_INSERT_TAIL(&process_list->fdir_list,
1879 				  fdir_rule_ptr, entries);
1880 		flow->rule = fdir_rule_ptr;
1881 		flow->filter_type = RTE_ETH_FILTER_FDIR;
1882 
1883 		return flow;
1884 	}
1885 
1886 err_fdir:
1887 	if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1888 		hns3_counter_release(dev, fdir_rule.act_cnt.id);
1889 
1890 err:
1891 	rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1892 			   "Failed to create flow");
1893 out:
1894 	TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1895 	rte_free(flow_node);
1896 	rte_free(flow);
1897 	return NULL;
1898 }
1899 
1900 /* Destroy a flow rule on hns3. */
1901 static int
1902 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1903 		  struct rte_flow_error *error)
1904 {
1905 	struct hns3_process_private *process_list = dev->process_private;
1906 	struct hns3_adapter *hns = dev->data->dev_private;
1907 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1908 	struct hns3_rss_conf_ele *rss_filter_ptr;
1909 	struct hns3_flow_mem *flow_node;
1910 	enum rte_filter_type filter_type;
1911 	struct hns3_fdir_rule fdir_rule;
1912 	int ret;
1913 
1914 	if (flow == NULL)
1915 		return rte_flow_error_set(error, EINVAL,
1916 					  RTE_FLOW_ERROR_TYPE_HANDLE,
1917 					  flow, "Flow is NULL");
1918 	filter_type = flow->filter_type;
1919 	switch (filter_type) {
1920 	case RTE_ETH_FILTER_FDIR:
1921 		fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
1922 		memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
1923 			   sizeof(struct hns3_fdir_rule));
1924 
1925 		ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
1926 		if (ret)
1927 			return rte_flow_error_set(error, EIO,
1928 						  RTE_FLOW_ERROR_TYPE_HANDLE,
1929 						  flow,
1930 						  "Destroy FDIR fail.Try again");
1931 		if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1932 			hns3_counter_release(dev, fdir_rule.act_cnt.id);
1933 		TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1934 		rte_free(fdir_rule_ptr);
1935 		fdir_rule_ptr = NULL;
1936 		break;
1937 	case RTE_ETH_FILTER_HASH:
1938 		rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
1939 		ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1940 					     false);
1941 		if (ret)
1942 			return rte_flow_error_set(error, EIO,
1943 						  RTE_FLOW_ERROR_TYPE_HANDLE,
1944 						  flow,
1945 						  "Destroy RSS fail.Try again");
1946 		TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1947 			     entries);
1948 		rte_free(rss_filter_ptr);
1949 		rss_filter_ptr = NULL;
1950 		break;
1951 	default:
1952 		return rte_flow_error_set(error, EINVAL,
1953 					  RTE_FLOW_ERROR_TYPE_HANDLE, flow,
1954 					  "Unsupported filter type");
1955 	}
1956 
1957 	TAILQ_FOREACH(flow_node, &process_list->flow_list, entries) {
1958 		if (flow_node->flow == flow) {
1959 			TAILQ_REMOVE(&process_list->flow_list, flow_node,
1960 				     entries);
1961 			rte_free(flow_node);
1962 			flow_node = NULL;
1963 			break;
1964 		}
1965 	}
1966 	rte_free(flow);
1967 	flow = NULL;
1968 
1969 	return 0;
1970 }
1971 
1972 /*  Destroy all flow rules associated with a port on hns3. */
1973 static int
1974 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1975 {
1976 	struct hns3_adapter *hns = dev->data->dev_private;
1977 	int ret;
1978 
1979 	/* FDIR is available only in PF driver */
1980 	if (!hns->is_vf) {
1981 		ret = hns3_clear_all_fdir_filter(hns);
1982 		if (ret) {
1983 			rte_flow_error_set(error, ret,
1984 					   RTE_FLOW_ERROR_TYPE_HANDLE,
1985 					   NULL, "Failed to flush rule");
1986 			return ret;
1987 		}
1988 		hns3_counter_flush(dev);
1989 	}
1990 
1991 	ret = hns3_clear_rss_filter(dev);
1992 	if (ret) {
1993 		rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1994 				   NULL, "Failed to flush rss filter");
1995 		return ret;
1996 	}
1997 
1998 	hns3_filterlist_flush(dev);
1999 
2000 	return 0;
2001 }
2002 
2003 /* Query an existing flow rule. */
2004 static int
2005 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
2006 		const struct rte_flow_action *actions, void *data,
2007 		struct rte_flow_error *error)
2008 {
2009 	struct rte_flow_action_rss *rss_conf;
2010 	struct hns3_rss_conf_ele *rss_rule;
2011 	struct rte_flow_query_count *qc;
2012 	int ret;
2013 
2014 	if (!flow->rule)
2015 		return rte_flow_error_set(error, EINVAL,
2016 			RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule");
2017 
2018 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2019 		switch (actions->type) {
2020 		case RTE_FLOW_ACTION_TYPE_VOID:
2021 			break;
2022 		case RTE_FLOW_ACTION_TYPE_COUNT:
2023 			qc = (struct rte_flow_query_count *)data;
2024 			ret = hns3_counter_query(dev, flow, qc, error);
2025 			if (ret)
2026 				return ret;
2027 			break;
2028 		case RTE_FLOW_ACTION_TYPE_RSS:
2029 			if (flow->filter_type != RTE_ETH_FILTER_HASH) {
2030 				return rte_flow_error_set(error, ENOTSUP,
2031 					RTE_FLOW_ERROR_TYPE_ACTION,
2032 					actions, "action is not supported");
2033 			}
2034 			rss_conf = (struct rte_flow_action_rss *)data;
2035 			rss_rule = (struct hns3_rss_conf_ele *)flow->rule;
2036 			rte_memcpy(rss_conf, &rss_rule->filter_info.conf,
2037 				   sizeof(struct rte_flow_action_rss));
2038 			break;
2039 		default:
2040 			return rte_flow_error_set(error, ENOTSUP,
2041 				RTE_FLOW_ERROR_TYPE_ACTION,
2042 				actions, "action is not supported");
2043 		}
2044 	}
2045 
2046 	return 0;
2047 }
2048 
2049 static const struct rte_flow_ops hns3_flow_ops = {
2050 	.validate = hns3_flow_validate,
2051 	.create = hns3_flow_create,
2052 	.destroy = hns3_flow_destroy,
2053 	.flush = hns3_flow_flush,
2054 	.query = hns3_flow_query,
2055 	.isolate = NULL,
2056 };
2057 
2058 /*
2059  * The entry of flow API.
2060  * @param dev
2061  *   Pointer to Ethernet device.
2062  * @return
2063  *   0 on success, a negative errno value otherwise is set.
2064  */
2065 int
2066 hns3_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
2067 		     enum rte_filter_op filter_op, void *arg)
2068 {
2069 	struct hns3_hw *hw;
2070 	int ret = 0;
2071 
2072 	hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2073 	switch (filter_type) {
2074 	case RTE_ETH_FILTER_GENERIC:
2075 		if (filter_op != RTE_ETH_FILTER_GET)
2076 			return -EINVAL;
2077 		if (hw->adapter_state >= HNS3_NIC_CLOSED)
2078 			return -ENODEV;
2079 		*(const void **)arg = &hns3_flow_ops;
2080 		break;
2081 	default:
2082 		hns3_err(hw, "Filter type (%d) not supported", filter_type);
2083 		ret = -EOPNOTSUPP;
2084 		break;
2085 	}
2086 
2087 	return ret;
2088 }
2089