xref: /dpdk/drivers/net/ntnic/nthw/flow_api/flow_api.c (revision b462f2737eb08b07b84da4204fbd1c9b9ba00b2d)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2023 Napatech A/S
4  */
5 #include "rte_spinlock.h"
6 #include "ntlog.h"
7 #include "nt_util.h"
8 
9 #include "flow_api_engine.h"
10 #include "flow_api_nic_setup.h"
11 #include "ntnic_mod_reg.h"
12 
13 #include "flow_api.h"
14 #include "flow_filter.h"
15 
16 #define RSS_TO_STRING(name) \
17 	{                \
18 		name, #name   \
19 	}
20 
21 const char *dbg_res_descr[] = {
22 	/* RES_QUEUE */ "RES_QUEUE",
23 	/* RES_CAT_CFN */ "RES_CAT_CFN",
24 	/* RES_CAT_COT */ "RES_CAT_COT",
25 	/* RES_CAT_EXO */ "RES_CAT_EXO",
26 	/* RES_CAT_LEN */ "RES_CAT_LEN",
27 	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
28 	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
29 	/* RES_HSH_RCP */ "RES_HSH_RCP",
30 	/* RES_PDB_RCP */ "RES_PDB_RCP",
31 	/* RES_QSL_RCP */ "RES_QSL_RCP",
32 	/* RES_QSL_LTX */ "RES_QSL_LTX",
33 	/* RES_QSL_QST */ "RES_QSL_QST",
34 	/* RES_SLC_LR_RCP */ "RES_SLC_LR_RCP",
35 	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
36 	/* RES_FLM_RCP */ "RES_FLM_RCP",
37 	/* RES_TPE_RCP */ "RES_TPE_RCP",
38 	/* RES_TPE_EXT */ "RES_TPE_EXT",
39 	/* RES_TPE_RPL */ "RES_TPE_RPL",
40 	/* RES_COUNT */ "RES_COUNT",
41 	/* RES_INVALID */ "RES_INVALID"
42 };
43 
44 static struct flow_nic_dev *dev_base;
45 static rte_spinlock_t base_mtx = RTE_SPINLOCK_INITIALIZER;
46 
47 /*
48  * Error handling
49  */
50 
51 static const struct {
52 	const char *message;
53 } err_msg[] = {
54 	/* 00 */ { "Operation successfully completed" },
55 	/* 01 */ { "Operation failed" },
56 	/* 02 */ { "Memory allocation failed" },
57 	/* 03 */ { "Too many output destinations" },
58 	/* 04 */ { "Too many output queues for RSS" },
59 	/* 05 */ { "The VLAN TPID specified is not supported" },
60 	/* 06 */ { "The VxLan Push header specified is not accepted" },
61 	/* 07 */ { "While interpreting VxLan Pop action, could not find a destination port" },
62 	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
63 	/* 09 */ { "Too many VLAN tag matches" },
64 	/* 10 */ { "IPv6 invalid header specified" },
65 	/* 11 */ { "Too many tunnel ports. HW limit reached" },
66 	/* 12 */ { "Unknown or unsupported flow match element received" },
67 	/* 13 */ { "Match failed because of HW limitations" },
68 	/* 14 */ { "Match failed because of HW resource limitations" },
69 	/* 15 */ { "Match failed because of too complex element definitions" },
70 	/* 16 */ { "Action failed. To too many output destinations" },
71 	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
72 	/* 18 */ { "Push Tunnel Header action cannot output to multiple destination queues" },
73 	/* 19 */ { "Inline action HW resource exhaustion" },
74 	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
75 	/* 21 */ { "Flow counter HW resource exhaustion" },
76 	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
77 	/* 23 */ { "Internal HW QSL compare failed" },
78 	/* 24 */ { "Internal CAT CFN reuse failed" },
79 	/* 25 */ { "Match variations too complex" },
80 	/* 26 */ { "Match failed because of CAM/TCAM full" },
81 	/* 27 */ { "Internal creation of a tunnel end point port failed" },
82 	/* 28 */ { "Unknown or unsupported flow action received" },
83 	/* 29 */ { "Removing flow failed" },
84 };
85 
86 void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct rte_flow_error *error)
87 {
88 	assert(msg < ERR_MSG_NO_MSG);
89 
90 	if (error) {
91 		error->message = err_msg[msg].message;
92 		error->type = (msg == ERR_SUCCESS) ? RTE_FLOW_ERROR_TYPE_NONE :
93 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
94 	}
95 }
96 
97 /*
98  * Resources
99  */
100 
101 int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
102 	uint32_t alignment)
103 {
104 	for (unsigned int i = 0; i < ndev->res[res_type].resource_count; i += alignment) {
105 		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
106 			flow_nic_mark_resource_used(ndev, res_type, i);
107 			ndev->res[res_type].ref[i] = 1;
108 			return i;
109 		}
110 	}
111 
112 	return -1;
113 }
114 
115 int flow_nic_alloc_resource_config(struct flow_nic_dev *ndev, enum res_type_e res_type,
116 	unsigned int num, uint32_t alignment)
117 {
118 	unsigned int idx_offs;
119 
120 	for (unsigned int res_idx = 0; res_idx < ndev->res[res_type].resource_count - (num - 1);
121 		res_idx += alignment) {
122 		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
123 			for (idx_offs = 1; idx_offs < num; idx_offs++)
124 				if (flow_nic_is_resource_used(ndev, res_type, res_idx + idx_offs))
125 					break;
126 
127 			if (idx_offs < num)
128 				continue;
129 
130 			/* found a contiguous number of "num" res_type elements - allocate them */
131 			for (idx_offs = 0; idx_offs < num; idx_offs++) {
132 				flow_nic_mark_resource_used(ndev, res_type, res_idx + idx_offs);
133 				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
134 			}
135 
136 			return res_idx;
137 		}
138 	}
139 
140 	return -1;
141 }
142 
143 void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int idx)
144 {
145 	flow_nic_mark_resource_unused(ndev, res_type, idx);
146 }
147 
148 int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index)
149 {
150 	NT_LOG(DBG, FILTER, "Reference resource %s idx %i (before ref cnt %i)",
151 		dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
152 	assert(flow_nic_is_resource_used(ndev, res_type, index));
153 
154 	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
155 		return -1;
156 
157 	ndev->res[res_type].ref[index]++;
158 	return 0;
159 }
160 
161 int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index)
162 {
163 	NT_LOG(DBG, FILTER, "De-reference resource %s idx %i (before ref cnt %i)",
164 		dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
165 	assert(flow_nic_is_resource_used(ndev, res_type, index));
166 	assert(ndev->res[res_type].ref[index]);
167 	/* deref */
168 	ndev->res[res_type].ref[index]--;
169 
170 	if (!ndev->res[res_type].ref[index])
171 		flow_nic_free_resource(ndev, res_type, index);
172 
173 	return !!ndev->res[res_type].ref[index];/* if 0 resource has been freed */
174 }
175 
176 /*
177  * Nic port/adapter lookup
178  */
179 
180 static struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
181 {
182 	struct flow_nic_dev *nic_dev = dev_base;
183 
184 	while (nic_dev) {
185 		if (nic_dev->adapter_no == adapter_no)
186 			break;
187 
188 		nic_dev = nic_dev->next;
189 	}
190 
191 	if (!nic_dev)
192 		return NULL;
193 
194 	struct flow_eth_dev *dev = nic_dev->eth_base;
195 
196 	while (dev) {
197 		if (port == dev->port)
198 			return dev;
199 
200 		dev = dev->next;
201 	}
202 
203 	return NULL;
204 }
205 
206 static struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
207 {
208 	struct flow_nic_dev *ndev = dev_base;
209 
210 	while (ndev) {
211 		if (adapter_no == ndev->adapter_no)
212 			break;
213 
214 		ndev = ndev->next;
215 	}
216 
217 	return ndev;
218 }
219 /*
220  * Flow API
221  */
222 
223 static struct flow_handle *flow_create(struct flow_eth_dev *dev __rte_unused,
224 	const struct rte_flow_attr *attr __rte_unused,
225 	uint16_t forced_vlan_vid __rte_unused,
226 	uint16_t caller_id __rte_unused,
227 	const struct rte_flow_item item[] __rte_unused,
228 	const struct rte_flow_action action[] __rte_unused,
229 	struct rte_flow_error *error __rte_unused)
230 {
231 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
232 
233 	if (profile_inline_ops == NULL) {
234 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
235 		return NULL;
236 	}
237 
238 	return profile_inline_ops->flow_create_profile_inline(dev, attr,
239 		forced_vlan_vid, caller_id,  item, action, error);
240 }
241 
242 static int flow_destroy(struct flow_eth_dev *dev __rte_unused,
243 	struct flow_handle *flow __rte_unused,	struct rte_flow_error *error __rte_unused)
244 {
245 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
246 
247 	if (profile_inline_ops == NULL) {
248 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
249 		return -1;
250 	}
251 
252 	return profile_inline_ops->flow_destroy_profile_inline(dev, flow, error);
253 }
254 
255 static int flow_flush(struct flow_eth_dev *dev, uint16_t caller_id, struct rte_flow_error *error)
256 {
257 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
258 
259 	if (profile_inline_ops == NULL) {
260 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
261 		return -1;
262 	}
263 
264 	return profile_inline_ops->flow_flush_profile_inline(dev, caller_id, error);
265 }
266 
267 static int flow_actions_update(struct flow_eth_dev *dev,
268 	struct flow_handle *flow,
269 	const struct rte_flow_action action[],
270 	struct rte_flow_error *error)
271 {
272 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
273 
274 	if (profile_inline_ops == NULL) {
275 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
276 		return -1;
277 	}
278 
279 	return profile_inline_ops->flow_actions_update_profile_inline(dev, flow, action, error);
280 }
281 
282 /*
283  * Device Management API
284  */
285 
286 static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev, struct flow_eth_dev *dev)
287 {
288 	dev->next = ndev->eth_base;
289 	ndev->eth_base = dev;
290 }
291 
292 static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev, struct flow_eth_dev *eth_dev)
293 {
294 	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
295 
296 	while (dev) {
297 		if (dev == eth_dev) {
298 			if (prev)
299 				prev->next = dev->next;
300 
301 			else
302 				ndev->eth_base = dev->next;
303 
304 			return 0;
305 		}
306 
307 		prev = dev;
308 		dev = dev->next;
309 	}
310 
311 	return -1;
312 }
313 
314 static void flow_ndev_reset(struct flow_nic_dev *ndev)
315 {
316 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
317 
318 	if (profile_inline_ops == NULL) {
319 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
320 		return;
321 	}
322 
323 	/* Delete all eth-port devices created on this NIC device */
324 	while (ndev->eth_base)
325 		flow_delete_eth_dev(ndev->eth_base);
326 
327 	/* Error check */
328 	while (ndev->flow_base) {
329 		NT_LOG(ERR, FILTER,
330 			"ERROR : Flows still defined but all eth-ports deleted. Flow %p",
331 			ndev->flow_base);
332 
333 		profile_inline_ops->flow_destroy_profile_inline(ndev->flow_base->dev,
334 			ndev->flow_base, NULL);
335 	}
336 
337 	profile_inline_ops->done_flow_management_of_ndev_profile_inline(ndev);
338 
339 	km_free_ndev_resource_management(&ndev->km_res_handle);
340 	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
341 
342 	ndev->flow_unique_id_counter = 0;
343 
344 	/*
345 	 * free all resources default allocated, initially for this NIC DEV
346 	 * Is not really needed since the bitmap will be freed in a sec. Therefore
347 	 * only in debug mode
348 	 */
349 
350 	/* Check if all resources has been released */
351 	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i", ndev->adapter_no);
352 
353 	for (unsigned int i = 0; i < RES_COUNT; i++) {
354 		int err = 0;
355 		NT_LOG(DBG, FILTER, "RES state for: %s", dbg_res_descr[i]);
356 
357 		for (unsigned int ii = 0; ii < ndev->res[i].resource_count; ii++) {
358 			int ref = ndev->res[i].ref[ii];
359 			int used = flow_nic_is_resource_used(ndev, i, ii);
360 
361 			if (ref || used) {
362 				NT_LOG(DBG, FILTER, "  [%i]: ref cnt %i, used %i", ii, ref,
363 					used);
364 				err = 1;
365 			}
366 		}
367 
368 		if (err)
369 			NT_LOG(DBG, FILTER, "ERROR - some resources not freed");
370 	}
371 
372 }
373 
374 int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
375 {
376 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
377 
378 	if (profile_inline_ops == NULL) {
379 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
380 		return -1;
381 	}
382 
383 	struct flow_nic_dev *ndev = eth_dev->ndev;
384 
385 	if (!ndev) {
386 		/* Error invalid nic device */
387 		return -1;
388 	}
389 
390 	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i", eth_dev, eth_dev->port);
391 
392 #ifdef FLOW_DEBUG
393 	ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_WRITE);
394 #endif
395 
396 	/* delete all created flows from this device */
397 	rte_spinlock_lock(&ndev->mtx);
398 
399 	struct flow_handle *flow = ndev->flow_base;
400 
401 	while (flow) {
402 		if (flow->dev == eth_dev) {
403 			struct flow_handle *flow_next = flow->next;
404 			profile_inline_ops->flow_destroy_locked_profile_inline(eth_dev, flow,
405 				NULL);
406 			flow = flow_next;
407 
408 		} else {
409 			flow = flow->next;
410 		}
411 	}
412 
413 	/*
414 	 * remove unmatched queue if setup in QSL
415 	 * remove exception queue setting in QSL UNM
416 	 */
417 	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port, 0);
418 	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
419 	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
420 
421 	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
422 		for (int i = 0; i < eth_dev->num_queues; ++i) {
423 			uint32_t qen_value = 0;
424 			uint32_t queue_id = (uint32_t)eth_dev->rx_queue[i].hw_id;
425 
426 			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN, queue_id / 4, &qen_value);
427 			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN, queue_id / 4,
428 				qen_value & ~(1U << (queue_id % 4)));
429 			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
430 		}
431 	}
432 
433 #ifdef FLOW_DEBUG
434 	ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
435 #endif
436 
437 	/* take eth_dev out of ndev list */
438 	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
439 		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found", eth_dev);
440 
441 	rte_spinlock_unlock(&ndev->mtx);
442 
443 	/* free eth_dev */
444 	free(eth_dev);
445 
446 	return 0;
447 }
448 
449 /*
450  * Flow API NIC Setup
451  * Flow backend creation function - register and initialize common backend API to FPA modules
452  */
453 
454 static int init_resource_elements(struct flow_nic_dev *ndev, enum res_type_e res_type,
455 	uint32_t count)
456 {
457 	assert(ndev->res[res_type].alloc_bm == NULL);
458 	/* allocate bitmap and ref counter */
459 	ndev->res[res_type].alloc_bm =
460 		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
461 
462 	if (ndev->res[res_type].alloc_bm) {
463 		ndev->res[res_type].ref =
464 			(uint32_t *)&ndev->res[res_type].alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
465 		ndev->res[res_type].resource_count = count;
466 		return 0;
467 	}
468 
469 	return -1;
470 }
471 
472 static void done_resource_elements(struct flow_nic_dev *ndev, enum res_type_e res_type)
473 {
474 	assert(ndev);
475 
476 	if (ndev->res[res_type].alloc_bm)
477 		free(ndev->res[res_type].alloc_bm);
478 }
479 
480 static void list_insert_flow_nic(struct flow_nic_dev *ndev)
481 {
482 	rte_spinlock_lock(&base_mtx);
483 	ndev->next = dev_base;
484 	dev_base = ndev;
485 	rte_spinlock_unlock(&base_mtx);
486 }
487 
488 static int list_remove_flow_nic(struct flow_nic_dev *ndev)
489 {
490 	rte_spinlock_lock(&base_mtx);
491 	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
492 
493 	while (nic_dev) {
494 		if (nic_dev == ndev) {
495 			if (prev)
496 				prev->next = nic_dev->next;
497 
498 			else
499 				dev_base = nic_dev->next;
500 
501 			rte_spinlock_unlock(&base_mtx);
502 			return 0;
503 		}
504 
505 		prev = nic_dev;
506 		nic_dev = nic_dev->next;
507 	}
508 
509 	rte_spinlock_unlock(&base_mtx);
510 	return -1;
511 }
512 
513 /*
514  * adapter_no       physical adapter no
515  * port_no          local port no
516  * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
517  */
518 static struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no, uint32_t port_id,
519 	int alloc_rx_queues, struct flow_queue_id_s queue_ids[],
520 	int *rss_target_id, enum flow_eth_dev_profile flow_profile,
521 	uint32_t exception_path)
522 {
523 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
524 
525 	if (profile_inline_ops == NULL)
526 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
527 
528 	int i;
529 	struct flow_eth_dev *eth_dev = NULL;
530 
531 	NT_LOG(DBG, FILTER,
532 		"Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i",
533 		adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
534 
535 	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
536 		assert(0);
537 		NT_LOG(ERR, FILTER,
538 			"ERROR: Internal array for multiple queues too small for API");
539 	}
540 
541 	rte_spinlock_lock(&base_mtx);
542 	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
543 
544 	if (!ndev) {
545 		/* Error - no flow api found on specified adapter */
546 		NT_LOG(ERR, FILTER, "ERROR: no flow interface registered for adapter %d",
547 			adapter_no);
548 		rte_spinlock_unlock(&base_mtx);
549 		return NULL;
550 	}
551 
552 	if (ndev->ports < ((uint16_t)port_no + 1)) {
553 		NT_LOG(ERR, FILTER, "ERROR: port exceeds supported port range for adapter");
554 		rte_spinlock_unlock(&base_mtx);
555 		return NULL;
556 	}
557 
558 	if ((alloc_rx_queues - 1) > FLOW_MAX_QUEUES) {	/* 0th is exception so +1 */
559 		NT_LOG(ERR, FILTER,
560 			"ERROR: Exceeds supported number of rx queues per eth device");
561 		rte_spinlock_unlock(&base_mtx);
562 		return NULL;
563 	}
564 
565 	/* don't accept multiple eth_dev's on same NIC and same port */
566 	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
567 
568 	if (eth_dev) {
569 		NT_LOG(DBG, FILTER, "Re-opening existing NIC port device: NIC DEV: %i Port %i",
570 			adapter_no, port_no);
571 		flow_delete_eth_dev(eth_dev);
572 		eth_dev = NULL;
573 	}
574 
575 	rte_spinlock_lock(&ndev->mtx);
576 
577 	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
578 
579 	if (!eth_dev) {
580 		NT_LOG(ERR, FILTER, "ERROR: calloc failed");
581 		goto err_exit0;
582 	}
583 
584 	eth_dev->ndev = ndev;
585 	eth_dev->port = port_no;
586 	eth_dev->port_id = port_id;
587 
588 	/* First time then NIC is initialized */
589 	if (!ndev->flow_mgnt_prepared) {
590 		ndev->flow_profile = flow_profile;
591 
592 		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
593 		if (profile_inline_ops != NULL &&
594 			profile_inline_ops->initialize_flow_management_of_ndev_profile_inline(ndev))
595 			goto err_exit0;
596 
597 	} else {
598 		/* check if same flow type is requested, otherwise fail */
599 		if (ndev->flow_profile != flow_profile) {
600 			NT_LOG(ERR, FILTER,
601 				"ERROR: Different flow types requested on same NIC device. Not supported.");
602 			goto err_exit0;
603 		}
604 	}
605 
606 	/* Allocate the requested queues in HW for this dev */
607 
608 	for (i = 0; i < alloc_rx_queues; i++) {
609 		eth_dev->rx_queue[i] = queue_ids[i];
610 
611 		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE && exception_path)) {
612 			/*
613 			 * Init QSL UNM - unmatched - redirects otherwise discarded
614 			 * packets in QSL
615 			 */
616 			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
617 				eth_dev->rx_queue[0].hw_id) < 0)
618 				goto err_exit0;
619 
620 			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 1) < 0)
621 				goto err_exit0;
622 
623 			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) < 0)
624 				goto err_exit0;
625 		}
626 
627 		eth_dev->num_queues++;
628 	}
629 
630 	eth_dev->rss_target_id = -1;
631 
632 	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
633 		for (i = 0; i < eth_dev->num_queues; i++) {
634 			uint32_t qen_value = 0;
635 			uint32_t queue_id = (uint32_t)eth_dev->rx_queue[i].hw_id;
636 
637 			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN, queue_id / 4, &qen_value);
638 			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN, queue_id / 4,
639 				qen_value | (1 << (queue_id % 4)));
640 			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
641 		}
642 	}
643 
644 	*rss_target_id = eth_dev->rss_target_id;
645 
646 	nic_insert_eth_port_dev(ndev, eth_dev);
647 
648 	rte_spinlock_unlock(&ndev->mtx);
649 	rte_spinlock_unlock(&base_mtx);
650 	return eth_dev;
651 
652 err_exit0:
653 	rte_spinlock_unlock(&ndev->mtx);
654 	rte_spinlock_unlock(&base_mtx);
655 
656 	if (eth_dev)
657 		free(eth_dev);
658 
659 #ifdef FLOW_DEBUG
660 	ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
661 #endif
662 
663 	NT_LOG(DBG, FILTER, "ERR in %s", __func__);
664 	return NULL;	/* Error exit */
665 }
666 
667 struct flow_nic_dev *flow_api_create(uint8_t adapter_no, const struct flow_api_backend_ops *be_if,
668 	void *be_dev)
669 {
670 	(void)adapter_no;
671 
672 	if (!be_if || be_if->version != 1) {
673 		NT_LOG(DBG, FILTER, "ERR: %s", __func__);
674 		return NULL;
675 	}
676 
677 	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
678 
679 	if (!ndev) {
680 		NT_LOG(ERR, FILTER, "ERROR: calloc failed");
681 		return NULL;
682 	}
683 
684 	/*
685 	 * To dump module initialization writes use
686 	 * FLOW_BACKEND_DEBUG_MODE_WRITE
687 	 * then remember to set it ...NONE afterwards again
688 	 */
689 	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
690 
691 	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
692 		goto err_exit;
693 
694 	ndev->adapter_no = adapter_no;
695 
696 	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ? 256 : ndev->be.num_rx_ports);
697 
698 	/*
699 	 * Free resources in NIC must be managed by this module
700 	 * Get resource sizes and create resource manager elements
701 	 */
702 	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
703 		goto err_exit;
704 
705 	if (init_resource_elements(ndev, RES_CAT_CFN, ndev->be.cat.nb_cat_funcs))
706 		goto err_exit;
707 
708 	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
709 		goto err_exit;
710 
711 	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
712 		goto err_exit;
713 
714 	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
715 		goto err_exit;
716 
717 	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE, ndev->be.cat.nb_flow_types))
718 		goto err_exit;
719 
720 	if (init_resource_elements(ndev, RES_KM_CATEGORY, ndev->be.km.nb_categories))
721 		goto err_exit;
722 
723 	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
724 		goto err_exit;
725 
726 	if (init_resource_elements(ndev, RES_PDB_RCP, ndev->be.pdb.nb_pdb_rcp_categories))
727 		goto err_exit;
728 
729 	if (init_resource_elements(ndev, RES_QSL_RCP, ndev->be.qsl.nb_rcp_categories))
730 		goto err_exit;
731 
732 	if (init_resource_elements(ndev, RES_QSL_QST, ndev->be.qsl.nb_qst_entries))
733 		goto err_exit;
734 
735 	if (init_resource_elements(ndev, RES_SLC_LR_RCP, ndev->be.max_categories))
736 		goto err_exit;
737 
738 	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE, ndev->be.cat.nb_flow_types))
739 		goto err_exit;
740 
741 	if (init_resource_elements(ndev, RES_FLM_RCP, ndev->be.flm.nb_categories))
742 		goto err_exit;
743 
744 	if (init_resource_elements(ndev, RES_TPE_RCP, ndev->be.tpe.nb_rcp_categories))
745 		goto err_exit;
746 
747 	if (init_resource_elements(ndev, RES_TPE_EXT, ndev->be.tpe.nb_rpl_ext_categories))
748 		goto err_exit;
749 
750 	if (init_resource_elements(ndev, RES_TPE_RPL, ndev->be.tpe.nb_rpl_depth))
751 		goto err_exit;
752 
753 	if (init_resource_elements(ndev, RES_SCRUB_RCP, ndev->be.flm.nb_scrub_profiles))
754 		goto err_exit;
755 
756 	/* may need IPF, COR */
757 
758 	/* check all defined has been initialized */
759 	for (int i = 0; i < RES_COUNT; i++)
760 		assert(ndev->res[i].alloc_bm);
761 
762 	rte_spinlock_init(&ndev->mtx);
763 	list_insert_flow_nic(ndev);
764 
765 	return ndev;
766 
767 err_exit:
768 
769 	if (ndev)
770 		flow_api_done(ndev);
771 
772 	NT_LOG(DBG, FILTER, "ERR: %s", __func__);
773 	return NULL;
774 }
775 
776 int flow_api_done(struct flow_nic_dev *ndev)
777 {
778 	NT_LOG(DBG, FILTER, "FLOW API DONE");
779 
780 	if (ndev) {
781 		flow_ndev_reset(ndev);
782 
783 		/* delete resource management allocations for this ndev */
784 		for (int i = 0; i < RES_COUNT; i++)
785 			done_resource_elements(ndev, i);
786 
787 		flow_api_backend_done(&ndev->be);
788 		list_remove_flow_nic(ndev);
789 		free(ndev);
790 	}
791 
792 	return 0;
793 }
794 
795 void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
796 {
797 	if (!ndev) {
798 		NT_LOG(DBG, FILTER, "ERR: %s", __func__);
799 		return NULL;
800 	}
801 
802 	return ndev->be.be_dev;
803 }
804 
805 /* Information for a given RSS type. */
806 struct rss_type_info {
807 	uint64_t rss_type;
808 	const char *str;
809 };
810 
811 static struct rss_type_info rss_to_string[] = {
812 	/* RTE_BIT64(2)   IPv4 dst + IPv4 src */
813 	RSS_TO_STRING(RTE_ETH_RSS_IPV4),
814 	/* RTE_BIT64(3)   IPv4 dst + IPv4 src + Identification of group of fragments  */
815 	RSS_TO_STRING(RTE_ETH_RSS_FRAG_IPV4),
816 	/* RTE_BIT64(4)   IPv4 dst + IPv4 src + L4 protocol */
817 	RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_TCP),
818 	/* RTE_BIT64(5)   IPv4 dst + IPv4 src + L4 protocol */
819 	RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_UDP),
820 	/* RTE_BIT64(6)   IPv4 dst + IPv4 src + L4 protocol */
821 	RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_SCTP),
822 	/* RTE_BIT64(7)   IPv4 dst + IPv4 src + L4 protocol */
823 	RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_OTHER),
824 	/*
825 	 * RTE_BIT64(14)  128-bits of L2 payload starting after src MAC, i.e. including optional
826 	 * VLAN tag and ethertype. Overrides all L3 and L4 flags at the same level, but inner
827 	 * L2 payload can be combined with outer S-VLAN and GTPU TEID flags.
828 	 */
829 	RSS_TO_STRING(RTE_ETH_RSS_L2_PAYLOAD),
830 	/* RTE_BIT64(18)  L4 dst + L4 src + L4 protocol - see comment of RTE_ETH_RSS_L4_CHKSUM */
831 	RSS_TO_STRING(RTE_ETH_RSS_PORT),
832 	/* RTE_BIT64(19)  Not supported */
833 	RSS_TO_STRING(RTE_ETH_RSS_VXLAN),
834 	/* RTE_BIT64(20)  Not supported */
835 	RSS_TO_STRING(RTE_ETH_RSS_GENEVE),
836 	/* RTE_BIT64(21)  Not supported */
837 	RSS_TO_STRING(RTE_ETH_RSS_NVGRE),
838 	/* RTE_BIT64(23)  GTP TEID - always from outer GTPU header */
839 	RSS_TO_STRING(RTE_ETH_RSS_GTPU),
840 	/* RTE_BIT64(24)  MAC dst + MAC src */
841 	RSS_TO_STRING(RTE_ETH_RSS_ETH),
842 	/* RTE_BIT64(25)  outermost VLAN ID + L4 protocol */
843 	RSS_TO_STRING(RTE_ETH_RSS_S_VLAN),
844 	/* RTE_BIT64(26)  innermost VLAN ID + L4 protocol */
845 	RSS_TO_STRING(RTE_ETH_RSS_C_VLAN),
846 	/* RTE_BIT64(27)  Not supported */
847 	RSS_TO_STRING(RTE_ETH_RSS_ESP),
848 	/* RTE_BIT64(28)  Not supported */
849 	RSS_TO_STRING(RTE_ETH_RSS_AH),
850 	/* RTE_BIT64(29)  Not supported */
851 	RSS_TO_STRING(RTE_ETH_RSS_L2TPV3),
852 	/* RTE_BIT64(30)  Not supported */
853 	RSS_TO_STRING(RTE_ETH_RSS_PFCP),
854 	/* RTE_BIT64(31)  Not supported */
855 	RSS_TO_STRING(RTE_ETH_RSS_PPPOE),
856 	/* RTE_BIT64(32)  Not supported */
857 	RSS_TO_STRING(RTE_ETH_RSS_ECPRI),
858 	/* RTE_BIT64(33)  Not supported */
859 	RSS_TO_STRING(RTE_ETH_RSS_MPLS),
860 	/* RTE_BIT64(34)  IPv4 Header checksum + L4 protocol */
861 	RSS_TO_STRING(RTE_ETH_RSS_IPV4_CHKSUM),
862 
863 	/*
864 	 * if combined with RTE_ETH_RSS_NONFRAG_IPV4_[TCP|UDP|SCTP] then
865 	 *   L4 protocol + chosen protocol header Checksum
866 	 * else
867 	 *   error
868 	 */
869 	/* RTE_BIT64(35) */
870 	RSS_TO_STRING(RTE_ETH_RSS_L4_CHKSUM),
871 #ifndef ANDROMEDA_DPDK_21_11
872 	/* RTE_BIT64(36)  Not supported */
873 	RSS_TO_STRING(RTE_ETH_RSS_L2TPV2),
874 #endif
875 
876 	{ RTE_BIT64(37), "unknown_RTE_BIT64(37)" },
877 	{ RTE_BIT64(38), "unknown_RTE_BIT64(38)" },
878 	{ RTE_BIT64(39), "unknown_RTE_BIT64(39)" },
879 	{ RTE_BIT64(40), "unknown_RTE_BIT64(40)" },
880 	{ RTE_BIT64(41), "unknown_RTE_BIT64(41)" },
881 	{ RTE_BIT64(42), "unknown_RTE_BIT64(42)" },
882 	{ RTE_BIT64(43), "unknown_RTE_BIT64(43)" },
883 	{ RTE_BIT64(44), "unknown_RTE_BIT64(44)" },
884 	{ RTE_BIT64(45), "unknown_RTE_BIT64(45)" },
885 	{ RTE_BIT64(46), "unknown_RTE_BIT64(46)" },
886 	{ RTE_BIT64(47), "unknown_RTE_BIT64(47)" },
887 	{ RTE_BIT64(48), "unknown_RTE_BIT64(48)" },
888 	{ RTE_BIT64(49), "unknown_RTE_BIT64(49)" },
889 
890 	/* RTE_BIT64(50)  outermost encapsulation */
891 	RSS_TO_STRING(RTE_ETH_RSS_LEVEL_OUTERMOST),
892 	/* RTE_BIT64(51)  innermost encapsulation */
893 	RSS_TO_STRING(RTE_ETH_RSS_LEVEL_INNERMOST),
894 
895 	/* RTE_BIT64(52)  Not supported */
896 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE96),
897 	/* RTE_BIT64(53)  Not supported */
898 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE64),
899 	/* RTE_BIT64(54)  Not supported */
900 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE56),
901 	/* RTE_BIT64(55)  Not supported */
902 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE48),
903 	/* RTE_BIT64(56)  Not supported */
904 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE40),
905 	/* RTE_BIT64(57)  Not supported */
906 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE32),
907 
908 	/* RTE_BIT64(58) */
909 	RSS_TO_STRING(RTE_ETH_RSS_L2_DST_ONLY),
910 	/* RTE_BIT64(59) */
911 	RSS_TO_STRING(RTE_ETH_RSS_L2_SRC_ONLY),
912 	/* RTE_BIT64(60) */
913 	RSS_TO_STRING(RTE_ETH_RSS_L4_DST_ONLY),
914 	/* RTE_BIT64(61) */
915 	RSS_TO_STRING(RTE_ETH_RSS_L4_SRC_ONLY),
916 	/* RTE_BIT64(62) */
917 	RSS_TO_STRING(RTE_ETH_RSS_L3_DST_ONLY),
918 	/* RTE_BIT64(63) */
919 	RSS_TO_STRING(RTE_ETH_RSS_L3_SRC_ONLY),
920 };
921 
922 int sprint_nt_rss_mask(char *str, uint16_t str_len, const char *prefix, uint64_t hash_mask)
923 {
924 	if (str == NULL || str_len == 0)
925 		return -1;
926 
927 	memset(str, 0x0, str_len);
928 	uint16_t str_end = 0;
929 	const struct rss_type_info *start = rss_to_string;
930 
931 	for (const struct rss_type_info *p = start; p != start + ARRAY_SIZE(rss_to_string); ++p) {
932 		if (p->rss_type & hash_mask) {
933 			if (strlen(prefix) + strlen(p->str) < (size_t)(str_len - str_end)) {
934 				snprintf(str + str_end, str_len - str_end, "%s", prefix);
935 				str_end += strlen(prefix);
936 				snprintf(str + str_end, str_len - str_end, "%s", p->str);
937 				str_end += strlen(p->str);
938 
939 			} else {
940 				return -1;
941 			}
942 		}
943 	}
944 
945 	return 0;
946 }
947 
948 /*
949  * Hash
950  */
951 
952 int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx, enum flow_nic_hash_e algorithm)
953 {
954 	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
955 
956 	switch (algorithm) {
957 	case HASH_ALGO_5TUPLE:
958 		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
959 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx, 0, 2);
960 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0, DYN_FINAL_IP_DST);
961 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0, -16);
962 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0, DYN_FINAL_IP_DST);
963 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0, 0);
964 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0, DYN_L4);
965 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
966 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
967 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
968 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
969 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
970 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0, 0xffffffff);
971 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1, 0xffffffff);
972 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2, 0xffffffff);
973 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3, 0xffffffff);
974 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4, 0xffffffff);
975 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5, 0xffffffff);
976 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6, 0xffffffff);
977 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7, 0xffffffff);
978 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8, 0xffffffff);
979 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9, 0);
980 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0, 0xffffffff);
981 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0, 1);
982 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0, HASH_5TUPLE);
983 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK, hsh_idx, 0, 1);
984 
985 		NT_LOG(DBG, FILTER, "Set IPv6 5-tuple hasher with adaptive IPv4 hashing");
986 		break;
987 
988 	default:
989 	case HASH_ALGO_ROUND_ROBIN:
990 		/* zero is round-robin */
991 		break;
992 	}
993 
994 	return 0;
995 }
996 
997 static int flow_dev_dump(struct flow_eth_dev *dev,
998 	struct flow_handle *flow,
999 	uint16_t caller_id,
1000 	FILE *file,
1001 	struct rte_flow_error *error)
1002 {
1003 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1004 
1005 	if (profile_inline_ops == NULL) {
1006 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
1007 		return -1;
1008 	}
1009 
1010 	return profile_inline_ops->flow_dev_dump_profile_inline(dev, flow, caller_id, file, error);
1011 }
1012 
1013 int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
1014 	struct nt_eth_rss_conf rss_conf)
1015 {
1016 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1017 
1018 	if (profile_inline_ops == NULL) {
1019 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
1020 		return -1;
1021 	}
1022 
1023 	return profile_inline_ops->flow_nic_set_hasher_fields_inline(ndev, hsh_idx, rss_conf);
1024 }
1025 
1026 static int flow_get_aged_flows(struct flow_eth_dev *dev,
1027 	uint16_t caller_id,
1028 	void **context,
1029 	uint32_t nb_contexts,
1030 	struct rte_flow_error *error)
1031 {
1032 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1033 
1034 	if (profile_inline_ops == NULL) {
1035 		NT_LOG_DBGX(ERR, FILTER, "profile_inline_ops uninitialized");
1036 		return -1;
1037 	}
1038 
1039 	if (nb_contexts > 0 && !context) {
1040 		error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
1041 		error->message = "rte_flow_get_aged_flows - empty context";
1042 		return -1;
1043 	}
1044 
1045 	return profile_inline_ops->flow_get_aged_flows_profile_inline(dev, caller_id, context,
1046 			nb_contexts, error);
1047 }
1048 
1049 static int flow_info_get(struct flow_eth_dev *dev, uint8_t caller_id,
1050 	struct rte_flow_port_info *port_info, struct rte_flow_queue_info *queue_info,
1051 	struct rte_flow_error *error)
1052 {
1053 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1054 
1055 	if (profile_inline_ops == NULL) {
1056 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1057 		return -1;
1058 	}
1059 
1060 	return profile_inline_ops->flow_info_get_profile_inline(dev, caller_id, port_info,
1061 			queue_info, error);
1062 }
1063 
1064 static int flow_configure(struct flow_eth_dev *dev, uint8_t caller_id,
1065 	const struct rte_flow_port_attr *port_attr, uint16_t nb_queue,
1066 	const struct rte_flow_queue_attr *queue_attr[], struct rte_flow_error *error)
1067 {
1068 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1069 
1070 	if (profile_inline_ops == NULL) {
1071 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1072 		return -1;
1073 	}
1074 
1075 	return profile_inline_ops->flow_configure_profile_inline(dev, caller_id, port_attr,
1076 			nb_queue, queue_attr, error);
1077 }
1078 
1079 /*
1080  * Flow Asynchronous operation API
1081  */
1082 
1083 static struct flow_pattern_template *
1084 flow_pattern_template_create(struct flow_eth_dev *dev,
1085 	const struct rte_flow_pattern_template_attr *template_attr, uint16_t caller_id,
1086 	const struct rte_flow_item pattern[], struct rte_flow_error *error)
1087 {
1088 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1089 
1090 	if (profile_inline_ops == NULL) {
1091 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1092 			return NULL;
1093 	}
1094 
1095 	return profile_inline_ops->flow_pattern_template_create_profile_inline(dev, template_attr,
1096 		caller_id, pattern, error);
1097 }
1098 
1099 static int flow_pattern_template_destroy(struct flow_eth_dev *dev,
1100 	struct flow_pattern_template *pattern_template,
1101 	struct rte_flow_error *error)
1102 {
1103 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1104 
1105 	if (profile_inline_ops == NULL) {
1106 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1107 		return -1;
1108 	}
1109 
1110 	return profile_inline_ops->flow_pattern_template_destroy_profile_inline(dev,
1111 			pattern_template,
1112 			error);
1113 }
1114 
1115 static struct flow_actions_template *
1116 flow_actions_template_create(struct flow_eth_dev *dev,
1117 	const struct rte_flow_actions_template_attr *template_attr, uint16_t caller_id,
1118 	const struct rte_flow_action actions[], const struct rte_flow_action masks[],
1119 	struct rte_flow_error *error)
1120 {
1121 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1122 
1123 	if (profile_inline_ops == NULL) {
1124 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1125 		return NULL;
1126 	}
1127 
1128 	return profile_inline_ops->flow_actions_template_create_profile_inline(dev, template_attr,
1129 		caller_id, actions, masks, error);
1130 }
1131 
1132 static int flow_actions_template_destroy(struct flow_eth_dev *dev,
1133 	struct flow_actions_template *actions_template,
1134 	struct rte_flow_error *error)
1135 {
1136 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1137 
1138 	if (profile_inline_ops == NULL) {
1139 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1140 		return -1;
1141 	}
1142 
1143 	return profile_inline_ops->flow_actions_template_destroy_profile_inline(dev,
1144 			actions_template,
1145 			error);
1146 }
1147 
1148 static struct flow_template_table *flow_template_table_create(struct flow_eth_dev *dev,
1149 	const struct rte_flow_template_table_attr *table_attr, uint16_t forced_vlan_vid,
1150 	uint16_t caller_id, struct flow_pattern_template *pattern_templates[],
1151 	uint8_t nb_pattern_templates, struct flow_actions_template *actions_templates[],
1152 	uint8_t nb_actions_templates, struct rte_flow_error *error)
1153 {
1154 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1155 
1156 	if (profile_inline_ops == NULL) {
1157 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1158 		return NULL;
1159 	}
1160 
1161 	return profile_inline_ops->flow_template_table_create_profile_inline(dev, table_attr,
1162 		forced_vlan_vid, caller_id, pattern_templates, nb_pattern_templates,
1163 		actions_templates, nb_actions_templates, error);
1164 }
1165 
1166 static int flow_template_table_destroy(struct flow_eth_dev *dev,
1167 	struct flow_template_table *template_table,
1168 	struct rte_flow_error *error)
1169 {
1170 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1171 
1172 	if (profile_inline_ops == NULL) {
1173 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1174 		return -1;
1175 	}
1176 
1177 	return profile_inline_ops->flow_template_table_destroy_profile_inline(dev, template_table,
1178 			error);
1179 }
1180 
1181 static struct flow_handle *
1182 flow_async_create(struct flow_eth_dev *dev, uint32_t queue_id,
1183 	const struct rte_flow_op_attr *op_attr, struct flow_template_table *template_table,
1184 	const struct rte_flow_item pattern[], uint8_t pattern_template_index,
1185 	const struct rte_flow_action actions[], uint8_t actions_template_index, void *user_data,
1186 	struct rte_flow_error *error)
1187 {
1188 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1189 
1190 	if (profile_inline_ops == NULL) {
1191 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1192 		return NULL;
1193 	}
1194 
1195 	return profile_inline_ops->flow_async_create_profile_inline(dev, queue_id, op_attr,
1196 			template_table, pattern, pattern_template_index, actions,
1197 			actions_template_index, user_data, error);
1198 }
1199 
1200 static int flow_async_destroy(struct flow_eth_dev *dev, uint32_t queue_id,
1201 	const struct rte_flow_op_attr *op_attr, struct flow_handle *flow,
1202 	void *user_data, struct rte_flow_error *error)
1203 {
1204 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1205 
1206 	if (profile_inline_ops == NULL) {
1207 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1208 		return -1;
1209 	}
1210 
1211 	return profile_inline_ops->flow_async_destroy_profile_inline(dev, queue_id, op_attr, flow,
1212 			user_data, error);
1213 }
1214 int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
1215 {
1216 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1217 
1218 	if (profile_inline_ops == NULL)
1219 		return -1;
1220 
1221 	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
1222 		return profile_inline_ops->flow_get_flm_stats_profile_inline(ndev, data, size);
1223 
1224 	return -1;
1225 }
1226 
1227 static const struct flow_filter_ops ops = {
1228 	.flow_filter_init = flow_filter_init,
1229 	.flow_filter_done = flow_filter_done,
1230 	/*
1231 	 * Device Management API
1232 	 */
1233 	.flow_get_eth_dev = flow_get_eth_dev,
1234 	/*
1235 	 * NT Flow API
1236 	 */
1237 	.flow_create = flow_create,
1238 	.flow_destroy = flow_destroy,
1239 	.flow_flush = flow_flush,
1240 	.flow_actions_update = flow_actions_update,
1241 	.flow_dev_dump = flow_dev_dump,
1242 	.flow_get_flm_stats = flow_get_flm_stats,
1243 	.flow_get_aged_flows = flow_get_aged_flows,
1244 
1245 	/*
1246 	 * NT Flow asynchronous operations API
1247 	 */
1248 	.flow_info_get = flow_info_get,
1249 	.flow_configure = flow_configure,
1250 	.flow_pattern_template_create = flow_pattern_template_create,
1251 	.flow_pattern_template_destroy = flow_pattern_template_destroy,
1252 	.flow_actions_template_create = flow_actions_template_create,
1253 	.flow_actions_template_destroy = flow_actions_template_destroy,
1254 	.flow_template_table_create = flow_template_table_create,
1255 	.flow_template_table_destroy = flow_template_table_destroy,
1256 	.flow_async_create = flow_async_create,
1257 	.flow_async_destroy = flow_async_destroy,
1258 
1259 	/*
1260 	 * Other
1261 	 */
1262 	 .hw_mod_hsh_rcp_flush = hw_mod_hsh_rcp_flush,
1263 	 .flow_nic_set_hasher_fields = flow_nic_set_hasher_fields,
1264 };
1265 
1266 void init_flow_filter(void)
1267 {
1268 	register_flow_filter_ops(&ops);
1269 }
1270