xref: /dpdk/drivers/net/nfp/nfp_net_flow.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2023 Corigine, Inc.
3  * All rights reserved.
4  */
5 
6 #include "nfp_net_flow.h"
7 
8 #include <rte_flow_driver.h>
9 #include <rte_hash.h>
10 #include <rte_jhash.h>
11 #include <rte_malloc.h>
12 
13 #include "nfp_logs.h"
14 #include "nfp_net_cmsg.h"
15 
16 /* Static initializer for a list of subsequent item types */
17 #define NEXT_ITEM(...) \
18 	((const enum rte_flow_item_type []){ \
19 		__VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
20 	})
21 
22 /* Process structure associated with a flow item */
23 struct nfp_net_flow_item_proc {
24 	/* Bit-mask for fields supported by this PMD. */
25 	const void *mask_support;
26 	/* Bit-mask to use when @p item->mask is not provided. */
27 	const void *mask_default;
28 	/* Size in bytes for @p mask_support and @p mask_default. */
29 	const uint32_t mask_sz;
30 	/* Merge a pattern item into a flow rule handle. */
31 	int (*merge)(struct rte_flow *nfp_flow,
32 			const struct rte_flow_item *item,
33 			const struct nfp_net_flow_item_proc *proc);
34 	/* List of possible subsequent items. */
35 	const enum rte_flow_item_type *const next_item;
36 };
37 
38 static int
39 nfp_net_flow_table_add(struct nfp_net_priv *priv,
40 		struct rte_flow *nfp_flow)
41 {
42 	int ret;
43 
44 	ret = rte_hash_add_key_data(priv->flow_table, &nfp_flow->hash_key, nfp_flow);
45 	if (ret != 0) {
46 		PMD_DRV_LOG(ERR, "Add to flow table failed.");
47 		return ret;
48 	}
49 
50 	return 0;
51 }
52 
53 static int
54 nfp_net_flow_table_delete(struct nfp_net_priv *priv,
55 		struct rte_flow *nfp_flow)
56 {
57 	int ret;
58 
59 	ret = rte_hash_del_key(priv->flow_table, &nfp_flow->hash_key);
60 	if (ret < 0) {
61 		PMD_DRV_LOG(ERR, "Delete from flow table failed.");
62 		return ret;
63 	}
64 
65 	return 0;
66 }
67 
68 static struct rte_flow *
69 nfp_net_flow_table_search(struct nfp_net_priv *priv,
70 		struct rte_flow *nfp_flow)
71 {
72 	int index;
73 	struct rte_flow *flow_find;
74 
75 	index = rte_hash_lookup_data(priv->flow_table, &nfp_flow->hash_key,
76 			(void **)&flow_find);
77 	if (index < 0) {
78 		PMD_DRV_LOG(DEBUG, "Data NOT found in the flow table.");
79 		return NULL;
80 	}
81 
82 	return flow_find;
83 }
84 
85 static int
86 nfp_net_flow_position_acquire(struct nfp_net_priv *priv,
87 		uint32_t priority,
88 		struct rte_flow *nfp_flow)
89 {
90 	uint32_t i;
91 
92 	if (priority != 0) {
93 		i = NFP_NET_FLOW_LIMIT - priority - 1;
94 
95 		if (priv->flow_position[i]) {
96 			PMD_DRV_LOG(ERR, "There is already a flow rule in this place.");
97 			return -EAGAIN;
98 		}
99 
100 		priv->flow_position[i] = true;
101 		nfp_flow->position = priority;
102 		return 0;
103 	}
104 
105 	for (i = 0; i < NFP_NET_FLOW_LIMIT; i++) {
106 		if (!priv->flow_position[i]) {
107 			priv->flow_position[i] = true;
108 			break;
109 		}
110 	}
111 
112 	if (i == NFP_NET_FLOW_LIMIT) {
113 		PMD_DRV_LOG(ERR, "The limited flow number is reach.");
114 		return -ERANGE;
115 	}
116 
117 	nfp_flow->position = NFP_NET_FLOW_LIMIT - i - 1;
118 
119 	return 0;
120 }
121 
122 static void
123 nfp_net_flow_position_free(struct nfp_net_priv *priv,
124 		struct rte_flow *nfp_flow)
125 {
126 	priv->flow_position[nfp_flow->position] = false;
127 }
128 
129 static struct rte_flow *
130 nfp_net_flow_alloc(struct nfp_net_priv *priv,
131 		uint32_t priority,
132 		uint32_t match_len,
133 		uint32_t action_len,
134 		uint32_t port_id)
135 {
136 	int ret;
137 	char *data;
138 	struct rte_flow *nfp_flow;
139 	struct nfp_net_flow_payload *payload;
140 
141 	nfp_flow = rte_zmalloc("nfp_flow", sizeof(struct rte_flow), 0);
142 	if (nfp_flow == NULL)
143 		return NULL;
144 
145 	data = rte_zmalloc("nfp_flow_payload", match_len + action_len, 0);
146 	if (data == NULL)
147 		goto free_flow;
148 
149 	ret = nfp_net_flow_position_acquire(priv, priority, nfp_flow);
150 	if (ret != 0)
151 		goto free_payload;
152 
153 	nfp_flow->port_id      = port_id;
154 	payload                = &nfp_flow->payload;
155 	payload->match_len     = match_len;
156 	payload->action_len    = action_len;
157 	payload->match_data    = data;
158 	payload->action_data   = data + match_len;
159 
160 	return nfp_flow;
161 
162 free_payload:
163 	rte_free(data);
164 free_flow:
165 	rte_free(nfp_flow);
166 
167 	return NULL;
168 }
169 
170 static void
171 nfp_net_flow_free(struct nfp_net_priv *priv,
172 		struct rte_flow *nfp_flow)
173 {
174 	nfp_net_flow_position_free(priv, nfp_flow);
175 	rte_free(nfp_flow->payload.match_data);
176 	rte_free(nfp_flow);
177 }
178 
179 static int
180 nfp_net_flow_calculate_items(const struct rte_flow_item items[],
181 		uint32_t *match_len)
182 {
183 	const struct rte_flow_item *item;
184 
185 	for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) {
186 		switch (item->type) {
187 		case RTE_FLOW_ITEM_TYPE_ETH:
188 			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_ETH detected");
189 			*match_len = sizeof(struct nfp_net_cmsg_match_eth);
190 			return 0;
191 		case RTE_FLOW_ITEM_TYPE_IPV4:
192 			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV4 detected");
193 			*match_len = sizeof(struct nfp_net_cmsg_match_v4);
194 			return 0;
195 		case RTE_FLOW_ITEM_TYPE_IPV6:
196 			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV6 detected");
197 			*match_len = sizeof(struct nfp_net_cmsg_match_v6);
198 			return 0;
199 		default:
200 			PMD_DRV_LOG(ERR, "Can't calculate match length");
201 			*match_len = 0;
202 			return -ENOTSUP;
203 		}
204 	}
205 
206 	return -EINVAL;
207 }
208 
209 static int
210 nfp_net_flow_merge_eth(__rte_unused struct rte_flow *nfp_flow,
211 		const struct rte_flow_item *item,
212 		__rte_unused const struct nfp_net_flow_item_proc *proc)
213 {
214 	struct nfp_net_cmsg_match_eth *eth;
215 	const struct rte_flow_item_eth *spec;
216 
217 	spec = item->spec;
218 	if (spec == NULL) {
219 		PMD_DRV_LOG(ERR, "NFP flow merge eth: no item->spec!");
220 		return -EINVAL;
221 	}
222 
223 	nfp_flow->payload.cmsg_type = NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE;
224 
225 	eth = (struct nfp_net_cmsg_match_eth *)nfp_flow->payload.match_data;
226 	eth->ether_type = rte_be_to_cpu_16(spec->type);
227 
228 	return 0;
229 }
230 
231 static int
232 nfp_net_flow_merge_ipv4(struct rte_flow *nfp_flow,
233 		const struct rte_flow_item *item,
234 		const struct nfp_net_flow_item_proc *proc)
235 {
236 	struct nfp_net_cmsg_match_v4 *ipv4;
237 	const struct rte_flow_item_ipv4 *mask;
238 	const struct rte_flow_item_ipv4 *spec;
239 
240 	spec = item->spec;
241 	if (spec == NULL) {
242 		PMD_DRV_LOG(DEBUG, "NFP flow merge ipv4: no item->spec!");
243 		return 0;
244 	}
245 
246 	mask = (item->mask != NULL) ? item->mask : proc->mask_default;
247 
248 	nfp_flow->payload.cmsg_type = NFP_NET_CFG_MBOX_CMD_FS_ADD_V4;
249 	ipv4 = (struct nfp_net_cmsg_match_v4 *)nfp_flow->payload.match_data;
250 
251 	ipv4->l4_protocol_mask = mask->hdr.next_proto_id;
252 	ipv4->src_ipv4_mask    = rte_be_to_cpu_32(mask->hdr.src_addr);
253 	ipv4->dst_ipv4_mask    = rte_be_to_cpu_32(mask->hdr.dst_addr);
254 
255 	ipv4->l4_protocol  = spec->hdr.next_proto_id;
256 	ipv4->src_ipv4     = rte_be_to_cpu_32(spec->hdr.src_addr);
257 	ipv4->dst_ipv4     = rte_be_to_cpu_32(spec->hdr.dst_addr);
258 
259 	return 0;
260 }
261 
262 static int
263 nfp_net_flow_merge_ipv6(struct rte_flow *nfp_flow,
264 		const struct rte_flow_item *item,
265 		const struct nfp_net_flow_item_proc *proc)
266 {
267 	uint32_t i;
268 	struct nfp_net_cmsg_match_v6 *ipv6;
269 	const struct rte_flow_item_ipv6 *mask;
270 	const struct rte_flow_item_ipv6 *spec;
271 
272 	spec = item->spec;
273 	if (spec == NULL) {
274 		PMD_DRV_LOG(DEBUG, "NFP flow merge ipv6: no item->spec!");
275 		return 0;
276 	}
277 
278 	mask = (item->mask != NULL) ? item->mask : proc->mask_default;
279 
280 	nfp_flow->payload.cmsg_type = NFP_NET_CFG_MBOX_CMD_FS_ADD_V6;
281 	ipv6 = (struct nfp_net_cmsg_match_v6 *)nfp_flow->payload.match_data;
282 
283 	ipv6->l4_protocol_mask = mask->hdr.proto;
284 	for (i = 0; i < sizeof(ipv6->src_ipv6); i += 4) {
285 		ipv6->src_ipv6_mask[i] = mask->hdr.src_addr[i + 3];
286 		ipv6->src_ipv6_mask[i + 1] = mask->hdr.src_addr[i + 2];
287 		ipv6->src_ipv6_mask[i + 2] = mask->hdr.src_addr[i + 1];
288 		ipv6->src_ipv6_mask[i + 3] = mask->hdr.src_addr[i];
289 
290 		ipv6->dst_ipv6_mask[i] = mask->hdr.dst_addr[i + 3];
291 		ipv6->dst_ipv6_mask[i + 1] = mask->hdr.dst_addr[i + 2];
292 		ipv6->dst_ipv6_mask[i + 2] = mask->hdr.dst_addr[i + 1];
293 		ipv6->dst_ipv6_mask[i + 3] = mask->hdr.dst_addr[i];
294 	}
295 
296 	ipv6->l4_protocol = spec->hdr.proto;
297 	for (i = 0; i < sizeof(ipv6->src_ipv6); i += 4) {
298 		ipv6->src_ipv6[i] = spec->hdr.src_addr[i + 3];
299 		ipv6->src_ipv6[i + 1] = spec->hdr.src_addr[i + 2];
300 		ipv6->src_ipv6[i + 2] = spec->hdr.src_addr[i + 1];
301 		ipv6->src_ipv6[i + 3] = spec->hdr.src_addr[i];
302 
303 		ipv6->dst_ipv6[i] = spec->hdr.dst_addr[i + 3];
304 		ipv6->dst_ipv6[i + 1] = spec->hdr.dst_addr[i + 2];
305 		ipv6->dst_ipv6[i + 2] = spec->hdr.dst_addr[i + 1];
306 		ipv6->dst_ipv6[i + 3] = spec->hdr.dst_addr[i];
307 	}
308 
309 	return 0;
310 }
311 
312 static int
313 nfp_flow_merge_l4(struct rte_flow *nfp_flow,
314 		const struct rte_flow_item *item,
315 		const struct nfp_net_flow_item_proc *proc)
316 {
317 	const struct rte_flow_item_tcp *mask;
318 	const struct rte_flow_item_tcp *spec;
319 	struct nfp_net_cmsg_match_v4 *ipv4 = NULL;
320 	struct nfp_net_cmsg_match_v6 *ipv6 = NULL;
321 
322 	spec = item->spec;
323 	if (spec == NULL) {
324 		PMD_DRV_LOG(ERR, "NFP flow merge tcp: no item->spec!");
325 		return -EINVAL;
326 	}
327 
328 	mask = (item->mask != NULL) ? item->mask : proc->mask_default;
329 
330 	switch (nfp_flow->payload.cmsg_type) {
331 	case NFP_NET_CFG_MBOX_CMD_FS_ADD_V4:
332 		ipv4 = (struct nfp_net_cmsg_match_v4 *)nfp_flow->payload.match_data;
333 		break;
334 	case NFP_NET_CFG_MBOX_CMD_FS_ADD_V6:
335 		ipv6 = (struct nfp_net_cmsg_match_v6 *)nfp_flow->payload.match_data;
336 		break;
337 	default:
338 		PMD_DRV_LOG(ERR, "L3 layer neither IPv4 nor IPv6.");
339 		return -EINVAL;
340 	}
341 
342 	if (ipv4 != NULL) {
343 		ipv4->src_port_mask = rte_be_to_cpu_16(mask->hdr.src_port);
344 		ipv4->dst_port_mask = rte_be_to_cpu_16(mask->hdr.dst_port);
345 
346 		ipv4->src_port = rte_be_to_cpu_16(spec->hdr.src_port);
347 		ipv4->dst_port = rte_be_to_cpu_16(spec->hdr.dst_port);
348 	} else {
349 		ipv6->src_port_mask = rte_be_to_cpu_16(mask->hdr.src_port);
350 		ipv6->dst_port_mask = rte_be_to_cpu_16(mask->hdr.dst_port);
351 
352 		ipv6->src_port = rte_be_to_cpu_16(spec->hdr.src_port);
353 		ipv6->dst_port = rte_be_to_cpu_16(spec->hdr.dst_port);
354 	}
355 
356 	return 0;
357 }
358 
359 /* Graph of supported items and associated process function */
360 static const struct nfp_net_flow_item_proc nfp_net_flow_item_proc_list[] = {
361 	[RTE_FLOW_ITEM_TYPE_END] = {
362 		.next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH,
363 				RTE_FLOW_ITEM_TYPE_IPV4,
364 				RTE_FLOW_ITEM_TYPE_IPV6),
365 	},
366 	[RTE_FLOW_ITEM_TYPE_ETH] = {
367 		.merge = nfp_net_flow_merge_eth,
368 	},
369 	[RTE_FLOW_ITEM_TYPE_IPV4] = {
370 		.next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_TCP,
371 				RTE_FLOW_ITEM_TYPE_UDP,
372 				RTE_FLOW_ITEM_TYPE_SCTP),
373 		.mask_support = &(const struct rte_flow_item_ipv4){
374 			.hdr = {
375 				.next_proto_id = 0xff,
376 				.src_addr      = RTE_BE32(0xffffffff),
377 				.dst_addr      = RTE_BE32(0xffffffff),
378 			},
379 		},
380 		.mask_default = &rte_flow_item_ipv4_mask,
381 		.mask_sz = sizeof(struct rte_flow_item_ipv4),
382 		.merge = nfp_net_flow_merge_ipv4,
383 	},
384 	[RTE_FLOW_ITEM_TYPE_IPV6] = {
385 		.next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_TCP,
386 				RTE_FLOW_ITEM_TYPE_UDP,
387 				RTE_FLOW_ITEM_TYPE_SCTP),
388 		.mask_support = &(const struct rte_flow_item_ipv6){
389 			.hdr = {
390 				.proto    = 0xff,
391 				.src_addr = "\xff\xff\xff\xff\xff\xff\xff\xff"
392 						"\xff\xff\xff\xff\xff\xff\xff\xff",
393 				.dst_addr = "\xff\xff\xff\xff\xff\xff\xff\xff"
394 						"\xff\xff\xff\xff\xff\xff\xff\xff",
395 			},
396 		},
397 		.mask_default = &rte_flow_item_ipv6_mask,
398 		.mask_sz = sizeof(struct rte_flow_item_ipv6),
399 		.merge = nfp_net_flow_merge_ipv6,
400 	},
401 	[RTE_FLOW_ITEM_TYPE_TCP] = {
402 		.mask_support = &(const struct rte_flow_item_tcp){
403 			.hdr = {
404 				.src_port  = RTE_BE16(0xffff),
405 				.dst_port  = RTE_BE16(0xffff),
406 			},
407 		},
408 		.mask_default = &rte_flow_item_tcp_mask,
409 		.mask_sz = sizeof(struct rte_flow_item_tcp),
410 		.merge = nfp_flow_merge_l4,
411 	},
412 	[RTE_FLOW_ITEM_TYPE_UDP] = {
413 		.mask_support = &(const struct rte_flow_item_udp){
414 			.hdr = {
415 				.src_port = RTE_BE16(0xffff),
416 				.dst_port = RTE_BE16(0xffff),
417 			},
418 		},
419 		.mask_default = &rte_flow_item_udp_mask,
420 		.mask_sz = sizeof(struct rte_flow_item_udp),
421 		.merge = nfp_flow_merge_l4,
422 	},
423 	[RTE_FLOW_ITEM_TYPE_SCTP] = {
424 		.mask_support = &(const struct rte_flow_item_sctp){
425 			.hdr = {
426 				.src_port  = RTE_BE16(0xffff),
427 				.dst_port  = RTE_BE16(0xffff),
428 			},
429 		},
430 		.mask_default = &rte_flow_item_sctp_mask,
431 		.mask_sz = sizeof(struct rte_flow_item_sctp),
432 		.merge = nfp_flow_merge_l4,
433 	},
434 };
435 
436 static int
437 nfp_net_flow_item_check(const struct rte_flow_item *item,
438 		const struct nfp_net_flow_item_proc *proc)
439 {
440 	uint32_t i;
441 	int ret = 0;
442 	const uint8_t *mask;
443 
444 	/* item->last and item->mask cannot exist without item->spec. */
445 	if (item->spec == NULL) {
446 		if (item->mask || item->last) {
447 			PMD_DRV_LOG(ERR, "'mask' or 'last' field provided"
448 					" without a corresponding 'spec'.");
449 			return -EINVAL;
450 		}
451 
452 		/* No spec, no mask, no problem. */
453 		return 0;
454 	}
455 
456 	mask = (item->mask != NULL) ? item->mask : proc->mask_default;
457 
458 	/*
459 	 * Single-pass check to make sure that:
460 	 * - Mask is supported, no bits are set outside proc->mask_support.
461 	 * - Both item->spec and item->last are included in mask.
462 	 */
463 	for (i = 0; i != proc->mask_sz; ++i) {
464 		if (mask[i] == 0)
465 			continue;
466 
467 		if ((mask[i] | ((const uint8_t *)proc->mask_support)[i]) !=
468 				((const uint8_t *)proc->mask_support)[i]) {
469 			PMD_DRV_LOG(ERR, "Unsupported field found in 'mask'.");
470 			ret = -EINVAL;
471 			break;
472 		}
473 
474 		if (item->last != NULL &&
475 				(((const uint8_t *)item->spec)[i] & mask[i]) !=
476 				(((const uint8_t *)item->last)[i] & mask[i])) {
477 			PMD_DRV_LOG(ERR, "Range between 'spec' and 'last'"
478 					" is larger than 'mask'.");
479 			ret = -ERANGE;
480 			break;
481 		}
482 	}
483 
484 	return ret;
485 }
486 
487 static int
488 nfp_net_flow_compile_items(const struct rte_flow_item items[],
489 		struct rte_flow *nfp_flow)
490 {
491 	uint32_t i;
492 	int ret = 0;
493 	const struct rte_flow_item *item;
494 	const struct nfp_net_flow_item_proc *proc_list;
495 
496 	proc_list = nfp_net_flow_item_proc_list;
497 
498 	for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) {
499 		const struct nfp_net_flow_item_proc *proc = NULL;
500 
501 		for (i = 0; (proc_list->next_item != NULL) &&
502 				(proc_list->next_item[i] != RTE_FLOW_ITEM_TYPE_END); ++i) {
503 			if (proc_list->next_item[i] == item->type) {
504 				proc = &nfp_net_flow_item_proc_list[item->type];
505 				break;
506 			}
507 		}
508 
509 		if (proc == NULL) {
510 			PMD_DRV_LOG(ERR, "No next item provided for %d", item->type);
511 			ret = -ENOTSUP;
512 			break;
513 		}
514 
515 		/* Perform basic sanity checks */
516 		ret = nfp_net_flow_item_check(item, proc);
517 		if (ret != 0) {
518 			PMD_DRV_LOG(ERR, "NFP flow item %d check failed", item->type);
519 			ret = -EINVAL;
520 			break;
521 		}
522 
523 		if (proc->merge == NULL) {
524 			PMD_DRV_LOG(ERR, "NFP flow item %d no proc function", item->type);
525 			ret = -ENOTSUP;
526 			break;
527 		}
528 
529 		ret = proc->merge(nfp_flow, item, proc);
530 		if (ret != 0) {
531 			PMD_DRV_LOG(ERR, "NFP flow item %d exact merge failed", item->type);
532 			break;
533 		}
534 
535 		proc_list = proc;
536 	}
537 
538 	return ret;
539 }
540 
541 static void
542 nfp_net_flow_action_drop(struct rte_flow *nfp_flow)
543 {
544 	struct nfp_net_cmsg_action *action_data;
545 
546 	action_data = (struct nfp_net_cmsg_action *)nfp_flow->payload.action_data;
547 
548 	action_data->action = NFP_NET_CMSG_ACTION_DROP;
549 }
550 
551 static void
552 nfp_net_flow_action_mark(struct rte_flow *nfp_flow,
553 		const struct rte_flow_action *action)
554 {
555 	struct nfp_net_cmsg_action *action_data;
556 	const struct rte_flow_action_mark *mark;
557 
558 	action_data = (struct nfp_net_cmsg_action *)nfp_flow->payload.action_data;
559 	mark = action->conf;
560 
561 	action_data->action |= NFP_NET_CMSG_ACTION_MARK;
562 	action_data->mark_id = mark->id;
563 }
564 
565 static void
566 nfp_net_flow_action_queue(struct rte_flow *nfp_flow,
567 		const struct rte_flow_action *action)
568 {
569 	struct nfp_net_cmsg_action *action_data;
570 	const struct rte_flow_action_queue *queue;
571 
572 	action_data = (struct nfp_net_cmsg_action *)nfp_flow->payload.action_data;
573 	queue = action->conf;
574 
575 	action_data->action |= NFP_NET_CMSG_ACTION_QUEUE;
576 	action_data->queue = queue->index;
577 }
578 
579 static int
580 nfp_net_flow_compile_actions(const struct rte_flow_action actions[],
581 		struct rte_flow *nfp_flow)
582 {
583 	const struct rte_flow_action *action;
584 
585 	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
586 		switch (action->type) {
587 		case RTE_FLOW_ACTION_TYPE_DROP:
588 			PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_DROP");
589 			nfp_net_flow_action_drop(nfp_flow);
590 			return 0;
591 		case RTE_FLOW_ACTION_TYPE_MARK:
592 			PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_MARK");
593 			nfp_net_flow_action_mark(nfp_flow, action);
594 			break;
595 		case RTE_FLOW_ACTION_TYPE_QUEUE:
596 			PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_QUEUE");
597 			nfp_net_flow_action_queue(nfp_flow, action);
598 			break;
599 		default:
600 			PMD_DRV_LOG(ERR, "Unsupported action type: %d", action->type);
601 			return -ENOTSUP;
602 		}
603 	}
604 
605 	return 0;
606 }
607 
608 static void
609 nfp_net_flow_process_priority(struct rte_flow *nfp_flow,
610 		uint32_t match_len)
611 {
612 	struct nfp_net_cmsg_match_v4 *ipv4;
613 	struct nfp_net_cmsg_match_v6 *ipv6;
614 
615 	switch (match_len) {
616 	case sizeof(struct nfp_net_cmsg_match_v4):
617 		ipv4 = (struct nfp_net_cmsg_match_v4 *)nfp_flow->payload.match_data;
618 		ipv4->position = nfp_flow->position;
619 		break;
620 	case sizeof(struct nfp_net_cmsg_match_v6):
621 		ipv6 = (struct nfp_net_cmsg_match_v6 *)nfp_flow->payload.match_data;
622 		ipv6->position = nfp_flow->position;
623 		break;
624 	default:
625 		break;
626 	}
627 }
628 
629 static struct rte_flow *
630 nfp_net_flow_setup(struct rte_eth_dev *dev,
631 		const struct rte_flow_attr *attr,
632 		const struct rte_flow_item items[],
633 		const struct rte_flow_action actions[])
634 {
635 	int ret;
636 	char *hash_data;
637 	uint32_t port_id;
638 	uint32_t action_len;
639 	struct nfp_net_hw *hw;
640 	uint32_t match_len = 0;
641 	struct nfp_net_priv *priv;
642 	struct rte_flow *nfp_flow;
643 	struct rte_flow *flow_find;
644 	struct nfp_app_fw_nic *app_fw_nic;
645 
646 	hw = dev->data->dev_private;
647 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw->pf_dev->app_fw_priv);
648 	priv = app_fw_nic->ports[hw->idx]->priv;
649 
650 	ret = nfp_net_flow_calculate_items(items, &match_len);
651 	if (ret != 0) {
652 		PMD_DRV_LOG(ERR, "Key layers calculate failed.");
653 		return NULL;
654 	}
655 
656 	action_len = sizeof(struct nfp_net_cmsg_action);
657 	port_id = ((struct nfp_net_hw *)dev->data->dev_private)->nfp_idx;
658 
659 	nfp_flow = nfp_net_flow_alloc(priv, attr->priority, match_len, action_len, port_id);
660 	if (nfp_flow == NULL) {
661 		PMD_DRV_LOG(ERR, "Alloc nfp flow failed.");
662 		return NULL;
663 	}
664 
665 	ret = nfp_net_flow_compile_items(items, nfp_flow);
666 	if (ret != 0) {
667 		PMD_DRV_LOG(ERR, "NFP flow item process failed.");
668 		goto free_flow;
669 	}
670 
671 	ret = nfp_net_flow_compile_actions(actions, nfp_flow);
672 	if (ret != 0) {
673 		PMD_DRV_LOG(ERR, "NFP flow action process failed.");
674 		goto free_flow;
675 	}
676 
677 	/* Calculate and store the hash_key for later use */
678 	hash_data = nfp_flow->payload.match_data;
679 	nfp_flow->hash_key = rte_jhash(hash_data, match_len + action_len,
680 			priv->hash_seed);
681 
682 	/* Find the flow in hash table */
683 	flow_find = nfp_net_flow_table_search(priv, nfp_flow);
684 	if (flow_find != NULL) {
685 		PMD_DRV_LOG(ERR, "This flow is already exist.");
686 		goto free_flow;
687 	}
688 
689 	priv->flow_count++;
690 
691 	nfp_net_flow_process_priority(nfp_flow, match_len);
692 
693 	return nfp_flow;
694 
695 free_flow:
696 	nfp_net_flow_free(priv, nfp_flow);
697 
698 	return NULL;
699 }
700 
701 static int
702 nfp_net_flow_teardown(struct nfp_net_priv *priv,
703 		__rte_unused struct rte_flow *nfp_flow)
704 {
705 	priv->flow_count--;
706 
707 	return 0;
708 }
709 
710 static int
711 nfp_net_flow_offload(struct nfp_net_hw *hw,
712 		struct rte_flow *flow,
713 		bool delete_flag)
714 {
715 	int ret;
716 	char *tmp;
717 	uint32_t msg_size;
718 	struct nfp_net_cmsg *cmsg;
719 
720 	msg_size = sizeof(uint32_t) + flow->payload.match_len +
721 			flow->payload.action_len;
722 	cmsg = nfp_net_cmsg_alloc(msg_size);
723 	if (cmsg == NULL) {
724 		PMD_DRV_LOG(ERR, "Alloc cmsg failed.");
725 		return -ENOMEM;
726 	}
727 
728 	cmsg->cmd = flow->payload.cmsg_type;
729 	if (delete_flag)
730 		cmsg->cmd++;
731 
732 	tmp = (char *)cmsg->data;
733 	rte_memcpy(tmp, flow->payload.match_data, flow->payload.match_len);
734 	tmp += flow->payload.match_len;
735 	rte_memcpy(tmp, flow->payload.action_data, flow->payload.action_len);
736 
737 	ret = nfp_net_cmsg_xmit(hw, cmsg, msg_size);
738 	if (ret != 0) {
739 		PMD_DRV_LOG(ERR, "Send cmsg failed.");
740 		ret = -EINVAL;
741 		goto free_cmsg;
742 	}
743 
744 free_cmsg:
745 	nfp_net_cmsg_free(cmsg);
746 
747 	return ret;
748 }
749 
750 static int
751 nfp_net_flow_validate(struct rte_eth_dev *dev,
752 		const struct rte_flow_attr *attr,
753 		const struct rte_flow_item items[],
754 		const struct rte_flow_action actions[],
755 		struct rte_flow_error *error)
756 {
757 	int ret;
758 	struct nfp_net_hw *hw;
759 	struct rte_flow *nfp_flow;
760 	struct nfp_net_priv *priv;
761 	struct nfp_app_fw_nic *app_fw_nic;
762 
763 	hw = dev->data->dev_private;
764 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw->pf_dev->app_fw_priv);
765 	priv = app_fw_nic->ports[hw->idx]->priv;
766 
767 	nfp_flow = nfp_net_flow_setup(dev, attr, items, actions);
768 	if (nfp_flow == NULL) {
769 		return rte_flow_error_set(error, ENOTSUP,
770 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
771 				NULL, "This flow can not be offloaded.");
772 	}
773 
774 	ret = nfp_net_flow_teardown(priv, nfp_flow);
775 	if (ret != 0) {
776 		return rte_flow_error_set(error, EINVAL,
777 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
778 				NULL, "Flow resource free failed.");
779 	}
780 
781 	nfp_net_flow_free(priv, nfp_flow);
782 
783 	return 0;
784 }
785 
786 static struct rte_flow *
787 nfp_net_flow_create(struct rte_eth_dev *dev,
788 		const struct rte_flow_attr *attr,
789 		const struct rte_flow_item items[],
790 		const struct rte_flow_action actions[],
791 		struct rte_flow_error *error)
792 {
793 	int ret;
794 	struct nfp_net_hw *hw;
795 	struct rte_flow *nfp_flow;
796 	struct nfp_net_priv *priv;
797 	struct nfp_app_fw_nic *app_fw_nic;
798 
799 	hw = dev->data->dev_private;
800 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw->pf_dev->app_fw_priv);
801 	priv = app_fw_nic->ports[hw->idx]->priv;
802 
803 	nfp_flow = nfp_net_flow_setup(dev, attr, items, actions);
804 	if (nfp_flow == NULL) {
805 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
806 				NULL, "This flow can not be offloaded.");
807 		return NULL;
808 	}
809 
810 	/* Add the flow to flow hash table */
811 	ret = nfp_net_flow_table_add(priv, nfp_flow);
812 	if (ret != 0) {
813 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
814 				NULL, "Add flow to the flow table failed.");
815 		goto flow_teardown;
816 	}
817 
818 	/* Add the flow to hardware */
819 	ret = nfp_net_flow_offload(hw, nfp_flow, false);
820 	if (ret != 0) {
821 		rte_flow_error_set(error, EINVAL,
822 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
823 				NULL, "Add flow to firmware failed.");
824 		goto table_delete;
825 	}
826 
827 	return nfp_flow;
828 
829 table_delete:
830 	nfp_net_flow_table_delete(priv, nfp_flow);
831 flow_teardown:
832 	nfp_net_flow_teardown(priv, nfp_flow);
833 	nfp_net_flow_free(priv, nfp_flow);
834 
835 	return NULL;
836 }
837 
838 static int
839 nfp_net_flow_destroy(struct rte_eth_dev *dev,
840 		struct rte_flow *nfp_flow,
841 		struct rte_flow_error *error)
842 {
843 	int ret;
844 	struct nfp_net_hw *hw;
845 	struct nfp_net_priv *priv;
846 	struct rte_flow *flow_find;
847 	struct nfp_app_fw_nic *app_fw_nic;
848 
849 	hw = dev->data->dev_private;
850 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw->pf_dev->app_fw_priv);
851 	priv = app_fw_nic->ports[hw->idx]->priv;
852 
853 	/* Find the flow in flow hash table */
854 	flow_find = nfp_net_flow_table_search(priv, nfp_flow);
855 	if (flow_find == NULL) {
856 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
857 				NULL, "Flow does not exist.");
858 		ret = -EINVAL;
859 		goto exit;
860 	}
861 
862 	/* Delete the flow from hardware */
863 	ret = nfp_net_flow_offload(hw, nfp_flow, true);
864 	if (ret != 0) {
865 		rte_flow_error_set(error, EINVAL,
866 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
867 				NULL, "Delete flow from firmware failed.");
868 		ret = -EINVAL;
869 		goto exit;
870 	}
871 
872 	/* Delete the flow from flow hash table */
873 	ret = nfp_net_flow_table_delete(priv, nfp_flow);
874 	if (ret != 0) {
875 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
876 				NULL, "Delete flow from the flow table failed.");
877 		ret = -EINVAL;
878 		goto exit;
879 	}
880 
881 	ret = nfp_net_flow_teardown(priv, nfp_flow);
882 	if (ret != 0) {
883 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
884 				NULL, "Flow teardown failed.");
885 		ret = -EINVAL;
886 		goto exit;
887 	}
888 
889 exit:
890 	nfp_net_flow_free(priv, nfp_flow);
891 
892 	return ret;
893 }
894 
895 static int
896 nfp_net_flow_flush(struct rte_eth_dev *dev,
897 		struct rte_flow_error *error)
898 {
899 	int ret = 0;
900 	void *next_data;
901 	uint32_t iter = 0;
902 	const void *next_key;
903 	struct nfp_net_hw *hw;
904 	struct rte_flow *nfp_flow;
905 	struct rte_hash *flow_table;
906 	struct nfp_app_fw_nic *app_fw_nic;
907 
908 	hw = dev->data->dev_private;
909 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw->pf_dev->app_fw_priv);
910 	flow_table = app_fw_nic->ports[hw->idx]->priv->flow_table;
911 
912 	while (rte_hash_iterate(flow_table, &next_key, &next_data, &iter) >= 0) {
913 		nfp_flow = next_data;
914 		ret = nfp_net_flow_destroy(dev, nfp_flow, error);
915 		if (ret != 0)
916 			break;
917 	}
918 
919 	return ret;
920 }
921 
922 static const struct rte_flow_ops nfp_net_flow_ops = {
923 	.validate                = nfp_net_flow_validate,
924 	.create                  = nfp_net_flow_create,
925 	.destroy                 = nfp_net_flow_destroy,
926 	.flush                   = nfp_net_flow_flush,
927 };
928 
929 int
930 nfp_net_flow_ops_get(struct rte_eth_dev *dev,
931 		const struct rte_flow_ops **ops)
932 {
933 	struct nfp_net_hw *hw;
934 
935 	if (rte_eth_dev_is_repr(dev)) {
936 		*ops = NULL;
937 		PMD_DRV_LOG(ERR, "Port is a representor.");
938 		return -EINVAL;
939 	}
940 
941 	hw = dev->data->dev_private;
942 	if ((hw->super.ctrl_ext & NFP_NET_CFG_CTRL_FLOW_STEER) == 0) {
943 		*ops = NULL;
944 		return 0;
945 	}
946 
947 	*ops = &nfp_net_flow_ops;
948 
949 	return 0;
950 }
951 
952 int
953 nfp_net_flow_priv_init(struct nfp_pf_dev *pf_dev,
954 		uint16_t port)
955 {
956 	int ret = 0;
957 	struct nfp_net_priv *priv;
958 	char flow_name[RTE_HASH_NAMESIZE];
959 	struct nfp_app_fw_nic *app_fw_nic;
960 	const char *pci_name = strchr(pf_dev->pci_dev->name, ':') + 1;
961 
962 	snprintf(flow_name, sizeof(flow_name), "%s_fl_%u", pci_name, port);
963 
964 	struct rte_hash_parameters flow_hash_params = {
965 		.name       = flow_name,
966 		.entries    = NFP_NET_FLOW_LIMIT,
967 		.hash_func  = rte_jhash,
968 		.socket_id  = rte_socket_id(),
969 		.key_len    = sizeof(uint32_t),
970 		.extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY,
971 	};
972 
973 	priv = rte_zmalloc("nfp_app_nic_priv", sizeof(struct nfp_net_priv), 0);
974 	if (priv == NULL) {
975 		PMD_INIT_LOG(ERR, "NFP app nic priv creation failed");
976 		ret = -ENOMEM;
977 		goto exit;
978 	}
979 
980 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
981 	app_fw_nic->ports[port]->priv = priv;
982 	priv->hash_seed = (uint32_t)rte_rand();
983 
984 	/* Flow table */
985 	flow_hash_params.hash_func_init_val = priv->hash_seed;
986 	priv->flow_table = rte_hash_create(&flow_hash_params);
987 	if (priv->flow_table == NULL) {
988 		PMD_INIT_LOG(ERR, "flow hash table creation failed");
989 		ret = -ENOMEM;
990 		goto free_priv;
991 	}
992 
993 	return 0;
994 
995 free_priv:
996 	rte_free(priv);
997 exit:
998 	return ret;
999 }
1000 
1001 void
1002 nfp_net_flow_priv_uninit(struct nfp_pf_dev *pf_dev,
1003 		uint16_t port)
1004 {
1005 	struct nfp_net_priv *priv;
1006 	struct nfp_app_fw_nic *app_fw_nic;
1007 
1008 	if (pf_dev == NULL)
1009 		return;
1010 
1011 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
1012 	priv = app_fw_nic->ports[port]->priv;
1013 	if (priv != NULL)
1014 		rte_hash_free(priv->flow_table);
1015 
1016 	rte_free(priv);
1017 }
1018