xref: /dpdk/drivers/net/ntnic/nthw/flow_api/flow_api.c (revision d5f81030df75c587885245ff1b14f123448a97c7)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2023 Napatech A/S
4  */
5 #include "rte_spinlock.h"
6 #include "ntlog.h"
7 #include "nt_util.h"
8 
9 #include "flow_api_engine.h"
10 #include "flow_api_nic_setup.h"
11 #include "ntnic_mod_reg.h"
12 
13 #include "flow_api.h"
14 #include "flow_filter.h"
15 
16 #define RSS_TO_STRING(name) \
17 	{                \
18 		name, #name   \
19 	}
20 
21 const char *dbg_res_descr[] = {
22 	[RES_QUEUE] = "RES_QUEUE",
23 	[RES_CAT_CFN] = "RES_CAT_CFN",
24 	[RES_CAT_COT] = "RES_CAT_COT",
25 	[RES_CAT_EXO] = "RES_CAT_EXO",
26 	[RES_CAT_LEN] = "RES_CAT_LEN",
27 	[RES_KM_FLOW_TYPE] = "RES_KM_FLOW_TYPE",
28 	[RES_KM_CATEGORY] = "RES_KM_CATEGORY",
29 	[RES_HSH_RCP] = "RES_HSH_RCP",
30 	[RES_PDB_RCP] = "RES_PDB_RCP",
31 	[RES_QSL_RCP] = "RES_QSL_RCP",
32 	[RES_QSL_QST] = "RES_QSL_QST",
33 	[RES_SLC_LR_RCP] = "RES_SLC_LR_RCP",
34 	[RES_FLM_FLOW_TYPE] = "RES_FLM_FLOW_TYPE",
35 	[RES_FLM_RCP] = "RES_FLM_RCP",
36 	[RES_TPE_RCP] = "RES_TPE_RCP",
37 	[RES_TPE_EXT] = "RES_TPE_EXT",
38 	[RES_TPE_RPL] = "RES_TPE_RPL",
39 	[RES_SCRUB_RCP] = "RES_SCRUB_RCP",
40 	[RES_COUNT] = "RES_COUNT",
41 	[RES_INVALID] = "RES_INVALID",
42 };
43 
44 static_assert(RTE_DIM(dbg_res_descr) == RES_END,
45 	"The list of debug descriptions is not fully completed");
46 
47 static struct flow_nic_dev *dev_base;
48 static rte_spinlock_t base_mtx = RTE_SPINLOCK_INITIALIZER;
49 
50 /*
51  * Error handling
52  */
53 
54 static const struct {
55 	const char *message;
56 } err_msg[] = {
57 	[ERR_SUCCESS] = {
58 		"Operation successfully completed" },
59 	[ERR_FAILED] = {
60 		"Operation failed" },
61 	[ERR_MEMORY] = {
62 		"Memory allocation failed" },
63 	[ERR_OUTPUT_TOO_MANY] = {
64 		"Too many output destinations" },
65 	[ERR_RSS_TOO_MANY_QUEUES] = {
66 		"Too many output queues for RSS" },
67 	[ERR_VLAN_TYPE_NOT_SUPPORTED] = {
68 		"The VLAN TPID specified is not supported" },
69 	[ERR_VXLAN_HEADER_NOT_ACCEPTED] = {
70 		"The VxLan Push header specified is not accepted" },
71 	[ERR_VXLAN_POP_INVALID_RECIRC_PORT] = {
72 		"While interpreting VxLan Pop action, could not find a destination port" },
73 	[ERR_VXLAN_POP_FAILED_CREATING_VTEP] = {
74 		"Failed in creating a HW-internal VTEP port" },
75 	[ERR_MATCH_VLAN_TOO_MANY] = {
76 		"Too many VLAN tag matches" },
77 	[ERR_MATCH_INVALID_IPV6_HDR] = {
78 		"IPv6 invalid header specified" },
79 	[ERR_MATCH_TOO_MANY_TUNNEL_PORTS] = {
80 		"Too many tunnel ports. HW limit reached" },
81 	[ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM] = {
82 		"Unknown or unsupported flow match element received" },
83 	[ERR_MATCH_FAILED_BY_HW_LIMITS] = {
84 		"Match failed because of HW limitations" },
85 	[ERR_MATCH_RESOURCE_EXHAUSTION] = {
86 		"Match failed because of HW resource limitations" },
87 	[ERR_MATCH_FAILED_TOO_COMPLEX] = {
88 		"Match failed because of too complex element definitions" },
89 	[ERR_ACTION_REPLICATION_FAILED] = {
90 		"Action failed. To too many output destinations" },
91 	[ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION] = {
92 		"Action Output failed, due to HW resource exhaustion" },
93 	[ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT] = {
94 		"Push Tunnel Header action cannot output to multiple destination queues" },
95 	[ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION] = {
96 		"Inline action HW resource exhaustion" },
97 	[ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION] = {
98 		"Action retransmit/recirculate HW resource exhaustion" },
99 	[ERR_ACTION_FLOW_COUNTER_EXHAUSTION] = {
100 		"Flow counter HW resource exhaustion" },
101 	[ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION] = {
102 		"Internal HW resource exhaustion to handle Actions" },
103 	[ERR_INTERNAL_QSL_COMPARE_FAILED] = {
104 		"Internal HW QSL compare failed" },
105 	[ERR_INTERNAL_CAT_FUNC_REUSE_FAILED] = {
106 		"Internal CAT CFN reuse failed" },
107 	[ERR_MATCH_ENTROPHY_FAILED] = {
108 		"Match variations too complex" },
109 	[ERR_MATCH_CAM_EXHAUSTED] = {
110 		"Match failed because of CAM/TCAM full" },
111 	[ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED] = {
112 		"Internal creation of a tunnel end point port failed" },
113 	[ERR_ACTION_UNSUPPORTED] = {
114 		"Unknown or unsupported flow action received" },
115 	[ERR_REMOVE_FLOW_FAILED] = {
116 		"Removing flow failed" },
117 	[ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT] = {
118 		"No output queue specified. Ignore this flow offload and uses default queue"},
119 	[ERR_ACTION_NO_OUTPUT_QUEUE_FOUND] = {
120 		"No output queue found"},
121 	[ERR_MATCH_UNSUPPORTED_ETHER_TYPE] = {
122 		"Unsupported EtherType or rejected caused by offload policy"},
123 	[ERR_OUTPUT_INVALID] = {
124 		"Destination port specified is invalid or not reachable from this NIC"},
125 	[ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED] = {
126 		"Partial offload is not supported in this configuration"},
127 	[ERR_MATCH_CAT_CAM_EXHAUSTED] = {
128 		"Match failed because of CAT CAM exhausted"},
129 	[ERR_MATCH_KCC_KEY_CLASH] = {
130 		"Match failed because of CAT CAM Key clashed with an existing KCC Key"},
131 	[ERR_MATCH_CAT_CAM_FAILED] = {
132 		"Match failed because of CAT CAM write failed"},
133 	[ERR_PARTIAL_FLOW_MARK_TOO_BIG] = {
134 		"Partial flow mark too big for device"},
135 	[ERR_FLOW_PRIORITY_VALUE_INVALID] = {
136 		"Invalid priority value"},
137 	[ERR_ACTION_MULTIPLE_PORT_ID_UNSUPPORTED] = {
138 		"Multiple port_id actions for one flow is not supported"},
139 	[ERR_RSS_TOO_LONG_KEY] = {
140 		"Too long hash key for RSS"},
141 	[ERR_ACTION_AGE_UNSUPPORTED_GROUP_0] = {
142 		"Action AGE is not supported for flow in group 0"},
143 	[ERR_MSG_NO_MSG] = {
144 		"Unknown error"},
145 };
146 
147 static_assert(RTE_DIM(err_msg) == ERR_MSG_END,
148 	"The list of error messages is not fully completed.");
149 
150 void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct rte_flow_error *error)
151 {
152 	assert(msg < ERR_MSG_NO_MSG);
153 
154 	if (error) {
155 		error->message = err_msg[msg].message;
156 		error->type = (msg == ERR_SUCCESS) ? RTE_FLOW_ERROR_TYPE_NONE :
157 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
158 	}
159 }
160 
161 /*
162  * Resources
163  */
164 
165 int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
166 	uint32_t alignment)
167 {
168 	for (unsigned int i = 0; i < ndev->res[res_type].resource_count; i += alignment) {
169 		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
170 			flow_nic_mark_resource_used(ndev, res_type, i);
171 			ndev->res[res_type].ref[i] = 1;
172 			return i;
173 		}
174 	}
175 
176 	return -1;
177 }
178 
179 int flow_nic_alloc_resource_config(struct flow_nic_dev *ndev, enum res_type_e res_type,
180 	unsigned int num, uint32_t alignment)
181 {
182 	unsigned int idx_offs;
183 
184 	for (unsigned int res_idx = 0; res_idx < ndev->res[res_type].resource_count - (num - 1);
185 		res_idx += alignment) {
186 		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
187 			for (idx_offs = 1; idx_offs < num; idx_offs++)
188 				if (flow_nic_is_resource_used(ndev, res_type, res_idx + idx_offs))
189 					break;
190 
191 			if (idx_offs < num)
192 				continue;
193 
194 			/* found a contiguous number of "num" res_type elements - allocate them */
195 			for (idx_offs = 0; idx_offs < num; idx_offs++) {
196 				flow_nic_mark_resource_used(ndev, res_type, res_idx + idx_offs);
197 				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
198 			}
199 
200 			return res_idx;
201 		}
202 	}
203 
204 	return -1;
205 }
206 
207 void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int idx)
208 {
209 	flow_nic_mark_resource_unused(ndev, res_type, idx);
210 }
211 
212 int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index)
213 {
214 	NT_LOG(DBG, FILTER, "Reference resource %s idx %i (before ref cnt %i)",
215 		dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
216 	assert(flow_nic_is_resource_used(ndev, res_type, index));
217 
218 	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
219 		return -1;
220 
221 	ndev->res[res_type].ref[index]++;
222 	return 0;
223 }
224 
225 int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index)
226 {
227 	NT_LOG(DBG, FILTER, "De-reference resource %s idx %i (before ref cnt %i)",
228 		dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
229 	assert(flow_nic_is_resource_used(ndev, res_type, index));
230 	assert(ndev->res[res_type].ref[index]);
231 	/* deref */
232 	ndev->res[res_type].ref[index]--;
233 
234 	if (!ndev->res[res_type].ref[index])
235 		flow_nic_free_resource(ndev, res_type, index);
236 
237 	return !!ndev->res[res_type].ref[index];/* if 0 resource has been freed */
238 }
239 
240 /*
241  * Nic port/adapter lookup
242  */
243 
244 static struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
245 {
246 	struct flow_nic_dev *nic_dev = dev_base;
247 
248 	while (nic_dev) {
249 		if (nic_dev->adapter_no == adapter_no)
250 			break;
251 
252 		nic_dev = nic_dev->next;
253 	}
254 
255 	if (!nic_dev)
256 		return NULL;
257 
258 	struct flow_eth_dev *dev = nic_dev->eth_base;
259 
260 	while (dev) {
261 		if (port == dev->port)
262 			return dev;
263 
264 		dev = dev->next;
265 	}
266 
267 	return NULL;
268 }
269 
270 static struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
271 {
272 	struct flow_nic_dev *ndev = dev_base;
273 
274 	while (ndev) {
275 		if (adapter_no == ndev->adapter_no)
276 			break;
277 
278 		ndev = ndev->next;
279 	}
280 
281 	return ndev;
282 }
283 /*
284  * Flow API
285  */
286 
287 static struct flow_handle *flow_create(struct flow_eth_dev *dev __rte_unused,
288 	const struct rte_flow_attr *attr __rte_unused,
289 	uint16_t forced_vlan_vid __rte_unused,
290 	uint16_t caller_id __rte_unused,
291 	const struct rte_flow_item item[] __rte_unused,
292 	const struct rte_flow_action action[] __rte_unused,
293 	struct rte_flow_error *error __rte_unused)
294 {
295 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
296 
297 	if (profile_inline_ops == NULL) {
298 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
299 		return NULL;
300 	}
301 
302 	return profile_inline_ops->flow_create_profile_inline(dev, attr,
303 		forced_vlan_vid, caller_id,  item, action, error);
304 }
305 
306 static int flow_destroy(struct flow_eth_dev *dev __rte_unused,
307 	struct flow_handle *flow __rte_unused,	struct rte_flow_error *error __rte_unused)
308 {
309 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
310 
311 	if (profile_inline_ops == NULL) {
312 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
313 		return -1;
314 	}
315 
316 	return profile_inline_ops->flow_destroy_profile_inline(dev, flow, error);
317 }
318 
319 static int flow_flush(struct flow_eth_dev *dev, uint16_t caller_id, struct rte_flow_error *error)
320 {
321 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
322 
323 	if (profile_inline_ops == NULL) {
324 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
325 		return -1;
326 	}
327 
328 	return profile_inline_ops->flow_flush_profile_inline(dev, caller_id, error);
329 }
330 
331 static int flow_actions_update(struct flow_eth_dev *dev,
332 	struct flow_handle *flow,
333 	const struct rte_flow_action action[],
334 	struct rte_flow_error *error)
335 {
336 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
337 
338 	if (profile_inline_ops == NULL) {
339 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
340 		return -1;
341 	}
342 
343 	return profile_inline_ops->flow_actions_update_profile_inline(dev, flow, action, error);
344 }
345 
346 /*
347  * Device Management API
348  */
349 
350 static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev, struct flow_eth_dev *dev)
351 {
352 	dev->next = ndev->eth_base;
353 	ndev->eth_base = dev;
354 }
355 
356 static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev, struct flow_eth_dev *eth_dev)
357 {
358 	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
359 
360 	while (dev) {
361 		if (dev == eth_dev) {
362 			if (prev)
363 				prev->next = dev->next;
364 
365 			else
366 				ndev->eth_base = dev->next;
367 
368 			return 0;
369 		}
370 
371 		prev = dev;
372 		dev = dev->next;
373 	}
374 
375 	return -1;
376 }
377 
378 static void flow_ndev_reset(struct flow_nic_dev *ndev)
379 {
380 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
381 
382 	if (profile_inline_ops == NULL) {
383 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
384 		return;
385 	}
386 
387 	/* Delete all eth-port devices created on this NIC device */
388 	while (ndev->eth_base)
389 		flow_delete_eth_dev(ndev->eth_base);
390 
391 	/* Error check */
392 	while (ndev->flow_base) {
393 		NT_LOG(ERR, FILTER,
394 			"ERROR : Flows still defined but all eth-ports deleted. Flow %p",
395 			ndev->flow_base);
396 
397 		profile_inline_ops->flow_destroy_profile_inline(ndev->flow_base->dev,
398 			ndev->flow_base, NULL);
399 	}
400 
401 	profile_inline_ops->done_flow_management_of_ndev_profile_inline(ndev);
402 
403 	km_free_ndev_resource_management(&ndev->km_res_handle);
404 	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
405 
406 	ndev->flow_unique_id_counter = 0;
407 
408 	/*
409 	 * free all resources default allocated, initially for this NIC DEV
410 	 * Is not really needed since the bitmap will be freed in a sec. Therefore
411 	 * only in debug mode
412 	 */
413 
414 	/* Check if all resources has been released */
415 	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i", ndev->adapter_no);
416 
417 	for (unsigned int i = 0; i < RES_COUNT; i++) {
418 		int err = 0;
419 		NT_LOG(DBG, FILTER, "RES state for: %s", dbg_res_descr[i]);
420 
421 		for (unsigned int ii = 0; ii < ndev->res[i].resource_count; ii++) {
422 			int ref = ndev->res[i].ref[ii];
423 			int used = flow_nic_is_resource_used(ndev, i, ii);
424 
425 			if (ref || used) {
426 				NT_LOG(DBG, FILTER, "  [%i]: ref cnt %i, used %i", ii, ref,
427 					used);
428 				err = 1;
429 			}
430 		}
431 
432 		if (err)
433 			NT_LOG(DBG, FILTER, "ERROR - some resources not freed");
434 	}
435 
436 }
437 
438 int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
439 {
440 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
441 
442 	if (profile_inline_ops == NULL) {
443 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
444 		return -1;
445 	}
446 
447 	struct flow_nic_dev *ndev = eth_dev->ndev;
448 
449 	if (!ndev) {
450 		/* Error invalid nic device */
451 		return -1;
452 	}
453 
454 	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i", eth_dev, eth_dev->port);
455 
456 #ifdef FLOW_DEBUG
457 	ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_WRITE);
458 #endif
459 
460 	/* delete all created flows from this device */
461 	rte_spinlock_lock(&ndev->mtx);
462 
463 	struct flow_handle *flow = ndev->flow_base;
464 
465 	while (flow) {
466 		if (flow->dev == eth_dev) {
467 			struct flow_handle *flow_next = flow->next;
468 			profile_inline_ops->flow_destroy_locked_profile_inline(eth_dev, flow,
469 				NULL);
470 			flow = flow_next;
471 
472 		} else {
473 			flow = flow->next;
474 		}
475 	}
476 
477 	/*
478 	 * remove unmatched queue if setup in QSL
479 	 * remove exception queue setting in QSL UNM
480 	 */
481 	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port, 0);
482 	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
483 	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
484 
485 	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
486 		for (int i = 0; i < eth_dev->num_queues; ++i) {
487 			uint32_t qen_value = 0;
488 			uint32_t queue_id = (uint32_t)eth_dev->rx_queue[i].hw_id;
489 
490 			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN, queue_id / 4, &qen_value);
491 			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN, queue_id / 4,
492 				qen_value & ~(1U << (queue_id % 4)));
493 			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
494 		}
495 	}
496 
497 #ifdef FLOW_DEBUG
498 	ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
499 #endif
500 
501 	/* take eth_dev out of ndev list */
502 	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
503 		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found", eth_dev);
504 
505 	rte_spinlock_unlock(&ndev->mtx);
506 
507 	/* free eth_dev */
508 	free(eth_dev);
509 
510 	return 0;
511 }
512 
513 /*
514  * Flow API NIC Setup
515  * Flow backend creation function - register and initialize common backend API to FPA modules
516  */
517 
518 static int init_resource_elements(struct flow_nic_dev *ndev, enum res_type_e res_type,
519 	uint32_t count)
520 {
521 	assert(ndev->res[res_type].alloc_bm == NULL);
522 	/* allocate bitmap and ref counter */
523 	ndev->res[res_type].alloc_bm =
524 		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
525 
526 	if (ndev->res[res_type].alloc_bm) {
527 		ndev->res[res_type].ref =
528 			(uint32_t *)&ndev->res[res_type].alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
529 		ndev->res[res_type].resource_count = count;
530 		return 0;
531 	}
532 
533 	return -1;
534 }
535 
536 static void done_resource_elements(struct flow_nic_dev *ndev, enum res_type_e res_type)
537 {
538 	assert(ndev);
539 
540 	free(ndev->res[res_type].alloc_bm);
541 }
542 
543 static void list_insert_flow_nic(struct flow_nic_dev *ndev)
544 {
545 	rte_spinlock_lock(&base_mtx);
546 	ndev->next = dev_base;
547 	dev_base = ndev;
548 	rte_spinlock_unlock(&base_mtx);
549 }
550 
551 static int list_remove_flow_nic(struct flow_nic_dev *ndev)
552 {
553 	rte_spinlock_lock(&base_mtx);
554 	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
555 
556 	while (nic_dev) {
557 		if (nic_dev == ndev) {
558 			if (prev)
559 				prev->next = nic_dev->next;
560 
561 			else
562 				dev_base = nic_dev->next;
563 
564 			rte_spinlock_unlock(&base_mtx);
565 			return 0;
566 		}
567 
568 		prev = nic_dev;
569 		nic_dev = nic_dev->next;
570 	}
571 
572 	rte_spinlock_unlock(&base_mtx);
573 	return -1;
574 }
575 
576 /*
577  * adapter_no       physical adapter no
578  * port_no          local port no
579  * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
580  */
581 static struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no, uint32_t port_id,
582 	int alloc_rx_queues, struct flow_queue_id_s queue_ids[],
583 	int *rss_target_id, enum flow_eth_dev_profile flow_profile,
584 	uint32_t exception_path)
585 {
586 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
587 
588 	if (profile_inline_ops == NULL)
589 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
590 
591 	int i;
592 	struct flow_eth_dev *eth_dev = NULL;
593 
594 	NT_LOG(DBG, FILTER,
595 		"Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i",
596 		adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
597 
598 	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
599 		assert(0);
600 		NT_LOG(ERR, FILTER,
601 			"ERROR: Internal array for multiple queues too small for API");
602 	}
603 
604 	rte_spinlock_lock(&base_mtx);
605 	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
606 
607 	if (!ndev) {
608 		/* Error - no flow api found on specified adapter */
609 		NT_LOG(ERR, FILTER, "ERROR: no flow interface registered for adapter %d",
610 			adapter_no);
611 		rte_spinlock_unlock(&base_mtx);
612 		return NULL;
613 	}
614 
615 	if (ndev->ports < ((uint16_t)port_no + 1)) {
616 		NT_LOG(ERR, FILTER, "ERROR: port exceeds supported port range for adapter");
617 		rte_spinlock_unlock(&base_mtx);
618 		return NULL;
619 	}
620 
621 	if ((alloc_rx_queues - 1) > FLOW_MAX_QUEUES) {	/* 0th is exception so +1 */
622 		NT_LOG(ERR, FILTER,
623 			"ERROR: Exceeds supported number of rx queues per eth device");
624 		rte_spinlock_unlock(&base_mtx);
625 		return NULL;
626 	}
627 
628 	/* don't accept multiple eth_dev's on same NIC and same port */
629 	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
630 
631 	if (eth_dev) {
632 		NT_LOG(DBG, FILTER, "Re-opening existing NIC port device: NIC DEV: %i Port %i",
633 			adapter_no, port_no);
634 		flow_delete_eth_dev(eth_dev);
635 		eth_dev = NULL;
636 	}
637 
638 	rte_spinlock_lock(&ndev->mtx);
639 
640 	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
641 
642 	if (!eth_dev) {
643 		NT_LOG(ERR, FILTER, "ERROR: calloc failed");
644 		goto err_exit0;
645 	}
646 
647 	eth_dev->ndev = ndev;
648 	eth_dev->port = port_no;
649 	eth_dev->port_id = port_id;
650 
651 	/* First time then NIC is initialized */
652 	if (!ndev->flow_mgnt_prepared) {
653 		ndev->flow_profile = flow_profile;
654 
655 		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
656 		if (profile_inline_ops != NULL &&
657 			profile_inline_ops->initialize_flow_management_of_ndev_profile_inline(ndev))
658 			goto err_exit0;
659 
660 	} else {
661 		/* check if same flow type is requested, otherwise fail */
662 		if (ndev->flow_profile != flow_profile) {
663 			NT_LOG(ERR, FILTER,
664 				"ERROR: Different flow types requested on same NIC device. Not supported.");
665 			goto err_exit0;
666 		}
667 	}
668 
669 	/* Allocate the requested queues in HW for this dev */
670 
671 	for (i = 0; i < alloc_rx_queues; i++) {
672 		eth_dev->rx_queue[i] = queue_ids[i];
673 
674 		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE && exception_path)) {
675 			/*
676 			 * Init QSL UNM - unmatched - redirects otherwise discarded
677 			 * packets in QSL
678 			 */
679 			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
680 				eth_dev->rx_queue[0].hw_id) < 0)
681 				goto err_exit0;
682 
683 			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 1) < 0)
684 				goto err_exit0;
685 
686 			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) < 0)
687 				goto err_exit0;
688 		}
689 
690 		eth_dev->num_queues++;
691 	}
692 
693 	eth_dev->rss_target_id = -1;
694 
695 	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
696 		for (i = 0; i < eth_dev->num_queues; i++) {
697 			uint32_t qen_value = 0;
698 			uint32_t queue_id = (uint32_t)eth_dev->rx_queue[i].hw_id;
699 
700 			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN, queue_id / 4, &qen_value);
701 			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN, queue_id / 4,
702 				qen_value | (1 << (queue_id % 4)));
703 			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
704 		}
705 	}
706 
707 	*rss_target_id = eth_dev->rss_target_id;
708 
709 	nic_insert_eth_port_dev(ndev, eth_dev);
710 
711 	rte_spinlock_unlock(&ndev->mtx);
712 	rte_spinlock_unlock(&base_mtx);
713 	return eth_dev;
714 
715 err_exit0:
716 	rte_spinlock_unlock(&ndev->mtx);
717 	rte_spinlock_unlock(&base_mtx);
718 
719 	free(eth_dev);
720 
721 #ifdef FLOW_DEBUG
722 	ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
723 #endif
724 
725 	NT_LOG(DBG, FILTER, "ERR in %s", __func__);
726 	return NULL;	/* Error exit */
727 }
728 
729 struct flow_nic_dev *flow_api_create(uint8_t adapter_no, const struct flow_api_backend_ops *be_if,
730 	void *be_dev)
731 {
732 	(void)adapter_no;
733 
734 	if (!be_if || be_if->version != 1) {
735 		NT_LOG(DBG, FILTER, "ERR: %s", __func__);
736 		return NULL;
737 	}
738 
739 	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
740 
741 	if (!ndev) {
742 		NT_LOG(ERR, FILTER, "ERROR: calloc failed");
743 		return NULL;
744 	}
745 
746 	/*
747 	 * To dump module initialization writes use
748 	 * FLOW_BACKEND_DEBUG_MODE_WRITE
749 	 * then remember to set it ...NONE afterwards again
750 	 */
751 	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
752 
753 	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
754 		goto err_exit;
755 
756 	ndev->adapter_no = adapter_no;
757 
758 	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ? 256 : ndev->be.num_rx_ports);
759 
760 	/*
761 	 * Free resources in NIC must be managed by this module
762 	 * Get resource sizes and create resource manager elements
763 	 */
764 	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
765 		goto err_exit;
766 
767 	if (init_resource_elements(ndev, RES_CAT_CFN, ndev->be.cat.nb_cat_funcs))
768 		goto err_exit;
769 
770 	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
771 		goto err_exit;
772 
773 	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
774 		goto err_exit;
775 
776 	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
777 		goto err_exit;
778 
779 	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE, ndev->be.cat.nb_flow_types))
780 		goto err_exit;
781 
782 	if (init_resource_elements(ndev, RES_KM_CATEGORY, ndev->be.km.nb_categories))
783 		goto err_exit;
784 
785 	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
786 		goto err_exit;
787 
788 	if (init_resource_elements(ndev, RES_PDB_RCP, ndev->be.pdb.nb_pdb_rcp_categories))
789 		goto err_exit;
790 
791 	if (init_resource_elements(ndev, RES_QSL_RCP, ndev->be.qsl.nb_rcp_categories))
792 		goto err_exit;
793 
794 	if (init_resource_elements(ndev, RES_QSL_QST, ndev->be.qsl.nb_qst_entries))
795 		goto err_exit;
796 
797 	if (init_resource_elements(ndev, RES_SLC_LR_RCP, ndev->be.max_categories))
798 		goto err_exit;
799 
800 	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE, ndev->be.cat.nb_flow_types))
801 		goto err_exit;
802 
803 	if (init_resource_elements(ndev, RES_FLM_RCP, ndev->be.flm.nb_categories))
804 		goto err_exit;
805 
806 	if (init_resource_elements(ndev, RES_TPE_RCP, ndev->be.tpe.nb_rcp_categories))
807 		goto err_exit;
808 
809 	if (init_resource_elements(ndev, RES_TPE_EXT, ndev->be.tpe.nb_rpl_ext_categories))
810 		goto err_exit;
811 
812 	if (init_resource_elements(ndev, RES_TPE_RPL, ndev->be.tpe.nb_rpl_depth))
813 		goto err_exit;
814 
815 	if (init_resource_elements(ndev, RES_SCRUB_RCP, ndev->be.flm.nb_scrub_profiles))
816 		goto err_exit;
817 
818 	/* may need IPF, COR */
819 
820 	/* check all defined has been initialized */
821 	for (int i = 0; i < RES_COUNT; i++)
822 		assert(ndev->res[i].alloc_bm);
823 
824 	rte_spinlock_init(&ndev->mtx);
825 	list_insert_flow_nic(ndev);
826 
827 	return ndev;
828 
829 err_exit:
830 
831 	if (ndev)
832 		flow_api_done(ndev);
833 
834 	NT_LOG(DBG, FILTER, "ERR: %s", __func__);
835 	return NULL;
836 }
837 
838 int flow_api_done(struct flow_nic_dev *ndev)
839 {
840 	NT_LOG(DBG, FILTER, "FLOW API DONE");
841 
842 	if (ndev) {
843 		flow_ndev_reset(ndev);
844 
845 		/* delete resource management allocations for this ndev */
846 		for (int i = 0; i < RES_COUNT; i++)
847 			done_resource_elements(ndev, i);
848 
849 		flow_api_backend_done(&ndev->be);
850 		list_remove_flow_nic(ndev);
851 		free(ndev);
852 	}
853 
854 	return 0;
855 }
856 
857 void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
858 {
859 	if (!ndev) {
860 		NT_LOG(DBG, FILTER, "ERR: %s", __func__);
861 		return NULL;
862 	}
863 
864 	return ndev->be.be_dev;
865 }
866 
867 /* Information for a given RSS type. */
868 struct rss_type_info {
869 	uint64_t rss_type;
870 	const char *str;
871 };
872 
873 static struct rss_type_info rss_to_string[] = {
874 	/* RTE_BIT64(2)   IPv4 dst + IPv4 src */
875 	RSS_TO_STRING(RTE_ETH_RSS_IPV4),
876 	/* RTE_BIT64(3)   IPv4 dst + IPv4 src + Identification of group of fragments  */
877 	RSS_TO_STRING(RTE_ETH_RSS_FRAG_IPV4),
878 	/* RTE_BIT64(4)   IPv4 dst + IPv4 src + L4 protocol */
879 	RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_TCP),
880 	/* RTE_BIT64(5)   IPv4 dst + IPv4 src + L4 protocol */
881 	RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_UDP),
882 	/* RTE_BIT64(6)   IPv4 dst + IPv4 src + L4 protocol */
883 	RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_SCTP),
884 	/* RTE_BIT64(7)   IPv4 dst + IPv4 src + L4 protocol */
885 	RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_OTHER),
886 	/*
887 	 * RTE_BIT64(14)  128-bits of L2 payload starting after src MAC, i.e. including optional
888 	 * VLAN tag and ethertype. Overrides all L3 and L4 flags at the same level, but inner
889 	 * L2 payload can be combined with outer S-VLAN and GTPU TEID flags.
890 	 */
891 	RSS_TO_STRING(RTE_ETH_RSS_L2_PAYLOAD),
892 	/* RTE_BIT64(18)  L4 dst + L4 src + L4 protocol - see comment of RTE_ETH_RSS_L4_CHKSUM */
893 	RSS_TO_STRING(RTE_ETH_RSS_PORT),
894 	/* RTE_BIT64(19)  Not supported */
895 	RSS_TO_STRING(RTE_ETH_RSS_VXLAN),
896 	/* RTE_BIT64(20)  Not supported */
897 	RSS_TO_STRING(RTE_ETH_RSS_GENEVE),
898 	/* RTE_BIT64(21)  Not supported */
899 	RSS_TO_STRING(RTE_ETH_RSS_NVGRE),
900 	/* RTE_BIT64(23)  GTP TEID - always from outer GTPU header */
901 	RSS_TO_STRING(RTE_ETH_RSS_GTPU),
902 	/* RTE_BIT64(24)  MAC dst + MAC src */
903 	RSS_TO_STRING(RTE_ETH_RSS_ETH),
904 	/* RTE_BIT64(25)  outermost VLAN ID + L4 protocol */
905 	RSS_TO_STRING(RTE_ETH_RSS_S_VLAN),
906 	/* RTE_BIT64(26)  innermost VLAN ID + L4 protocol */
907 	RSS_TO_STRING(RTE_ETH_RSS_C_VLAN),
908 	/* RTE_BIT64(27)  Not supported */
909 	RSS_TO_STRING(RTE_ETH_RSS_ESP),
910 	/* RTE_BIT64(28)  Not supported */
911 	RSS_TO_STRING(RTE_ETH_RSS_AH),
912 	/* RTE_BIT64(29)  Not supported */
913 	RSS_TO_STRING(RTE_ETH_RSS_L2TPV3),
914 	/* RTE_BIT64(30)  Not supported */
915 	RSS_TO_STRING(RTE_ETH_RSS_PFCP),
916 	/* RTE_BIT64(31)  Not supported */
917 	RSS_TO_STRING(RTE_ETH_RSS_PPPOE),
918 	/* RTE_BIT64(32)  Not supported */
919 	RSS_TO_STRING(RTE_ETH_RSS_ECPRI),
920 	/* RTE_BIT64(33)  Not supported */
921 	RSS_TO_STRING(RTE_ETH_RSS_MPLS),
922 	/* RTE_BIT64(34)  IPv4 Header checksum + L4 protocol */
923 	RSS_TO_STRING(RTE_ETH_RSS_IPV4_CHKSUM),
924 
925 	/*
926 	 * if combined with RTE_ETH_RSS_NONFRAG_IPV4_[TCP|UDP|SCTP] then
927 	 *   L4 protocol + chosen protocol header Checksum
928 	 * else
929 	 *   error
930 	 */
931 	/* RTE_BIT64(35) */
932 	RSS_TO_STRING(RTE_ETH_RSS_L4_CHKSUM),
933 #ifndef ANDROMEDA_DPDK_21_11
934 	/* RTE_BIT64(36)  Not supported */
935 	RSS_TO_STRING(RTE_ETH_RSS_L2TPV2),
936 #endif
937 
938 	{ RTE_BIT64(37), "unknown_RTE_BIT64(37)" },
939 	{ RTE_BIT64(38), "unknown_RTE_BIT64(38)" },
940 	{ RTE_BIT64(39), "unknown_RTE_BIT64(39)" },
941 	{ RTE_BIT64(40), "unknown_RTE_BIT64(40)" },
942 	{ RTE_BIT64(41), "unknown_RTE_BIT64(41)" },
943 	{ RTE_BIT64(42), "unknown_RTE_BIT64(42)" },
944 	{ RTE_BIT64(43), "unknown_RTE_BIT64(43)" },
945 	{ RTE_BIT64(44), "unknown_RTE_BIT64(44)" },
946 	{ RTE_BIT64(45), "unknown_RTE_BIT64(45)" },
947 	{ RTE_BIT64(46), "unknown_RTE_BIT64(46)" },
948 	{ RTE_BIT64(47), "unknown_RTE_BIT64(47)" },
949 	{ RTE_BIT64(48), "unknown_RTE_BIT64(48)" },
950 	{ RTE_BIT64(49), "unknown_RTE_BIT64(49)" },
951 
952 	/* RTE_BIT64(50)  outermost encapsulation */
953 	RSS_TO_STRING(RTE_ETH_RSS_LEVEL_OUTERMOST),
954 	/* RTE_BIT64(51)  innermost encapsulation */
955 	RSS_TO_STRING(RTE_ETH_RSS_LEVEL_INNERMOST),
956 
957 	/* RTE_BIT64(52)  Not supported */
958 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE96),
959 	/* RTE_BIT64(53)  Not supported */
960 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE64),
961 	/* RTE_BIT64(54)  Not supported */
962 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE56),
963 	/* RTE_BIT64(55)  Not supported */
964 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE48),
965 	/* RTE_BIT64(56)  Not supported */
966 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE40),
967 	/* RTE_BIT64(57)  Not supported */
968 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE32),
969 
970 	/* RTE_BIT64(58) */
971 	RSS_TO_STRING(RTE_ETH_RSS_L2_DST_ONLY),
972 	/* RTE_BIT64(59) */
973 	RSS_TO_STRING(RTE_ETH_RSS_L2_SRC_ONLY),
974 	/* RTE_BIT64(60) */
975 	RSS_TO_STRING(RTE_ETH_RSS_L4_DST_ONLY),
976 	/* RTE_BIT64(61) */
977 	RSS_TO_STRING(RTE_ETH_RSS_L4_SRC_ONLY),
978 	/* RTE_BIT64(62) */
979 	RSS_TO_STRING(RTE_ETH_RSS_L3_DST_ONLY),
980 	/* RTE_BIT64(63) */
981 	RSS_TO_STRING(RTE_ETH_RSS_L3_SRC_ONLY),
982 };
983 
984 int sprint_nt_rss_mask(char *str, uint16_t str_len, const char *prefix, uint64_t hash_mask)
985 {
986 	if (str == NULL || str_len == 0)
987 		return -1;
988 
989 	memset(str, 0x0, str_len);
990 	uint16_t str_end = 0;
991 	const struct rss_type_info *start = rss_to_string;
992 
993 	for (const struct rss_type_info *p = start; p != start + ARRAY_SIZE(rss_to_string); ++p) {
994 		if (p->rss_type & hash_mask) {
995 			if (strlen(prefix) + strlen(p->str) < (size_t)(str_len - str_end)) {
996 				snprintf(str + str_end, str_len - str_end, "%s", prefix);
997 				str_end += strlen(prefix);
998 				snprintf(str + str_end, str_len - str_end, "%s", p->str);
999 				str_end += strlen(p->str);
1000 
1001 			} else {
1002 				return -1;
1003 			}
1004 		}
1005 	}
1006 
1007 	return 0;
1008 }
1009 
1010 /*
1011  * Hash
1012  */
1013 
1014 int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx, enum flow_nic_hash_e algorithm)
1015 {
1016 	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
1017 
1018 	switch (algorithm) {
1019 	case HASH_ALGO_5TUPLE:
1020 		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
1021 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx, 0, 2);
1022 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0, DYN_FINAL_IP_DST);
1023 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0, -16);
1024 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0, DYN_FINAL_IP_DST);
1025 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0, 0);
1026 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0, DYN_L4);
1027 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
1028 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
1029 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
1030 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
1031 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
1032 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0, 0xffffffff);
1033 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1, 0xffffffff);
1034 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2, 0xffffffff);
1035 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3, 0xffffffff);
1036 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4, 0xffffffff);
1037 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5, 0xffffffff);
1038 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6, 0xffffffff);
1039 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7, 0xffffffff);
1040 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8, 0xffffffff);
1041 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9, 0);
1042 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0, 0xffffffff);
1043 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0, 1);
1044 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0, HASH_5TUPLE);
1045 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK, hsh_idx, 0, 1);
1046 
1047 		NT_LOG(DBG, FILTER, "Set IPv6 5-tuple hasher with adaptive IPv4 hashing");
1048 		break;
1049 
1050 	default:
1051 	case HASH_ALGO_ROUND_ROBIN:
1052 		/* zero is round-robin */
1053 		break;
1054 	}
1055 
1056 	return 0;
1057 }
1058 
1059 static int flow_dev_dump(struct flow_eth_dev *dev,
1060 	struct flow_handle *flow,
1061 	uint16_t caller_id,
1062 	FILE *file,
1063 	struct rte_flow_error *error)
1064 {
1065 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1066 
1067 	if (profile_inline_ops == NULL) {
1068 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
1069 		return -1;
1070 	}
1071 
1072 	return profile_inline_ops->flow_dev_dump_profile_inline(dev, flow, caller_id, file, error);
1073 }
1074 
1075 int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
1076 	struct nt_eth_rss_conf rss_conf)
1077 {
1078 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1079 
1080 	if (profile_inline_ops == NULL) {
1081 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
1082 		return -1;
1083 	}
1084 
1085 	return profile_inline_ops->flow_nic_set_hasher_fields_inline(ndev, hsh_idx, rss_conf);
1086 }
1087 
1088 static int flow_get_aged_flows(struct flow_eth_dev *dev,
1089 	uint16_t caller_id,
1090 	void **context,
1091 	uint32_t nb_contexts,
1092 	struct rte_flow_error *error)
1093 {
1094 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1095 
1096 	if (profile_inline_ops == NULL) {
1097 		NT_LOG_DBGX(ERR, FILTER, "profile_inline_ops uninitialized");
1098 		return -1;
1099 	}
1100 
1101 	if (nb_contexts > 0 && !context) {
1102 		error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
1103 		error->message = "rte_flow_get_aged_flows - empty context";
1104 		return -1;
1105 	}
1106 
1107 	return profile_inline_ops->flow_get_aged_flows_profile_inline(dev, caller_id, context,
1108 			nb_contexts, error);
1109 }
1110 
1111 static int flow_info_get(struct flow_eth_dev *dev, uint8_t caller_id,
1112 	struct rte_flow_port_info *port_info, struct rte_flow_queue_info *queue_info,
1113 	struct rte_flow_error *error)
1114 {
1115 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1116 
1117 	if (profile_inline_ops == NULL) {
1118 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1119 		return -1;
1120 	}
1121 
1122 	return profile_inline_ops->flow_info_get_profile_inline(dev, caller_id, port_info,
1123 			queue_info, error);
1124 }
1125 
1126 static int flow_configure(struct flow_eth_dev *dev, uint8_t caller_id,
1127 	const struct rte_flow_port_attr *port_attr, uint16_t nb_queue,
1128 	const struct rte_flow_queue_attr *queue_attr[], struct rte_flow_error *error)
1129 {
1130 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1131 
1132 	if (profile_inline_ops == NULL) {
1133 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1134 		return -1;
1135 	}
1136 
1137 	return profile_inline_ops->flow_configure_profile_inline(dev, caller_id, port_attr,
1138 			nb_queue, queue_attr, error);
1139 }
1140 
1141 /*
1142  * Flow Asynchronous operation API
1143  */
1144 
1145 static struct flow_pattern_template *
1146 flow_pattern_template_create(struct flow_eth_dev *dev,
1147 	const struct rte_flow_pattern_template_attr *template_attr, uint16_t caller_id,
1148 	const struct rte_flow_item pattern[], struct rte_flow_error *error)
1149 {
1150 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1151 
1152 	if (profile_inline_ops == NULL) {
1153 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1154 			return NULL;
1155 	}
1156 
1157 	return profile_inline_ops->flow_pattern_template_create_profile_inline(dev, template_attr,
1158 		caller_id, pattern, error);
1159 }
1160 
1161 static int flow_pattern_template_destroy(struct flow_eth_dev *dev,
1162 	struct flow_pattern_template *pattern_template,
1163 	struct rte_flow_error *error)
1164 {
1165 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1166 
1167 	if (profile_inline_ops == NULL) {
1168 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1169 		return -1;
1170 	}
1171 
1172 	return profile_inline_ops->flow_pattern_template_destroy_profile_inline(dev,
1173 			pattern_template,
1174 			error);
1175 }
1176 
1177 static struct flow_actions_template *
1178 flow_actions_template_create(struct flow_eth_dev *dev,
1179 	const struct rte_flow_actions_template_attr *template_attr, uint16_t caller_id,
1180 	const struct rte_flow_action actions[], const struct rte_flow_action masks[],
1181 	struct rte_flow_error *error)
1182 {
1183 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1184 
1185 	if (profile_inline_ops == NULL) {
1186 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1187 		return NULL;
1188 	}
1189 
1190 	return profile_inline_ops->flow_actions_template_create_profile_inline(dev, template_attr,
1191 		caller_id, actions, masks, error);
1192 }
1193 
1194 static int flow_actions_template_destroy(struct flow_eth_dev *dev,
1195 	struct flow_actions_template *actions_template,
1196 	struct rte_flow_error *error)
1197 {
1198 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1199 
1200 	if (profile_inline_ops == NULL) {
1201 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1202 		return -1;
1203 	}
1204 
1205 	return profile_inline_ops->flow_actions_template_destroy_profile_inline(dev,
1206 			actions_template,
1207 			error);
1208 }
1209 
1210 static struct flow_template_table *flow_template_table_create(struct flow_eth_dev *dev,
1211 	const struct rte_flow_template_table_attr *table_attr, uint16_t forced_vlan_vid,
1212 	uint16_t caller_id, struct flow_pattern_template *pattern_templates[],
1213 	uint8_t nb_pattern_templates, struct flow_actions_template *actions_templates[],
1214 	uint8_t nb_actions_templates, struct rte_flow_error *error)
1215 {
1216 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1217 
1218 	if (profile_inline_ops == NULL) {
1219 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1220 		return NULL;
1221 	}
1222 
1223 	return profile_inline_ops->flow_template_table_create_profile_inline(dev, table_attr,
1224 		forced_vlan_vid, caller_id, pattern_templates, nb_pattern_templates,
1225 		actions_templates, nb_actions_templates, error);
1226 }
1227 
1228 static int flow_template_table_destroy(struct flow_eth_dev *dev,
1229 	struct flow_template_table *template_table,
1230 	struct rte_flow_error *error)
1231 {
1232 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1233 
1234 	if (profile_inline_ops == NULL) {
1235 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1236 		return -1;
1237 	}
1238 
1239 	return profile_inline_ops->flow_template_table_destroy_profile_inline(dev, template_table,
1240 			error);
1241 }
1242 
1243 static struct flow_handle *
1244 flow_async_create(struct flow_eth_dev *dev, uint32_t queue_id,
1245 	const struct rte_flow_op_attr *op_attr, struct flow_template_table *template_table,
1246 	const struct rte_flow_item pattern[], uint8_t pattern_template_index,
1247 	const struct rte_flow_action actions[], uint8_t actions_template_index, void *user_data,
1248 	struct rte_flow_error *error)
1249 {
1250 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1251 
1252 	if (profile_inline_ops == NULL) {
1253 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1254 		return NULL;
1255 	}
1256 
1257 	return profile_inline_ops->flow_async_create_profile_inline(dev, queue_id, op_attr,
1258 			template_table, pattern, pattern_template_index, actions,
1259 			actions_template_index, user_data, error);
1260 }
1261 
1262 static int flow_async_destroy(struct flow_eth_dev *dev, uint32_t queue_id,
1263 	const struct rte_flow_op_attr *op_attr, struct flow_handle *flow,
1264 	void *user_data, struct rte_flow_error *error)
1265 {
1266 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1267 
1268 	if (profile_inline_ops == NULL) {
1269 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1270 		return -1;
1271 	}
1272 
1273 	return profile_inline_ops->flow_async_destroy_profile_inline(dev, queue_id, op_attr, flow,
1274 			user_data, error);
1275 }
1276 int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
1277 {
1278 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1279 
1280 	if (profile_inline_ops == NULL)
1281 		return -1;
1282 
1283 	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
1284 		return profile_inline_ops->flow_get_flm_stats_profile_inline(ndev, data, size);
1285 
1286 	return -1;
1287 }
1288 
1289 static const struct flow_filter_ops ops = {
1290 	.flow_filter_init = flow_filter_init,
1291 	.flow_filter_done = flow_filter_done,
1292 	/*
1293 	 * Device Management API
1294 	 */
1295 	.flow_get_eth_dev = flow_get_eth_dev,
1296 	/*
1297 	 * NT Flow API
1298 	 */
1299 	.flow_create = flow_create,
1300 	.flow_destroy = flow_destroy,
1301 	.flow_flush = flow_flush,
1302 	.flow_actions_update = flow_actions_update,
1303 	.flow_dev_dump = flow_dev_dump,
1304 	.flow_get_flm_stats = flow_get_flm_stats,
1305 	.flow_get_aged_flows = flow_get_aged_flows,
1306 
1307 	/*
1308 	 * NT Flow asynchronous operations API
1309 	 */
1310 	.flow_info_get = flow_info_get,
1311 	.flow_configure = flow_configure,
1312 	.flow_pattern_template_create = flow_pattern_template_create,
1313 	.flow_pattern_template_destroy = flow_pattern_template_destroy,
1314 	.flow_actions_template_create = flow_actions_template_create,
1315 	.flow_actions_template_destroy = flow_actions_template_destroy,
1316 	.flow_template_table_create = flow_template_table_create,
1317 	.flow_template_table_destroy = flow_template_table_destroy,
1318 	.flow_async_create = flow_async_create,
1319 	.flow_async_destroy = flow_async_destroy,
1320 
1321 	/*
1322 	 * Other
1323 	 */
1324 	 .hw_mod_hsh_rcp_flush = hw_mod_hsh_rcp_flush,
1325 	 .flow_nic_set_hasher_fields = flow_nic_set_hasher_fields,
1326 };
1327 
1328 void init_flow_filter(void)
1329 {
1330 	register_flow_filter_ops(&ops);
1331 }
1332