xref: /dpdk/drivers/net/ntnic/nthw/flow_api/flow_api.c (revision e7e01fd15ddee1eb92d68a3aabe800850a8c757a)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2023 Napatech A/S
4  */
5 #include "ntlog.h"
6 #include "nt_util.h"
7 
8 #include "flow_api_engine.h"
9 #include "flow_api_nic_setup.h"
10 #include "ntnic_mod_reg.h"
11 
12 #include "flow_api.h"
13 #include "flow_filter.h"
14 
15 #define RSS_TO_STRING(name) \
16 	{                \
17 		name, #name   \
18 	}
19 
20 const char *dbg_res_descr[] = {
21 	/* RES_QUEUE */ "RES_QUEUE",
22 	/* RES_CAT_CFN */ "RES_CAT_CFN",
23 	/* RES_CAT_COT */ "RES_CAT_COT",
24 	/* RES_CAT_EXO */ "RES_CAT_EXO",
25 	/* RES_CAT_LEN */ "RES_CAT_LEN",
26 	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
27 	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
28 	/* RES_HSH_RCP */ "RES_HSH_RCP",
29 	/* RES_PDB_RCP */ "RES_PDB_RCP",
30 	/* RES_QSL_RCP */ "RES_QSL_RCP",
31 	/* RES_QSL_LTX */ "RES_QSL_LTX",
32 	/* RES_QSL_QST */ "RES_QSL_QST",
33 	/* RES_SLC_LR_RCP */ "RES_SLC_LR_RCP",
34 	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
35 	/* RES_FLM_RCP */ "RES_FLM_RCP",
36 	/* RES_TPE_RCP */ "RES_TPE_RCP",
37 	/* RES_TPE_EXT */ "RES_TPE_EXT",
38 	/* RES_TPE_RPL */ "RES_TPE_RPL",
39 	/* RES_COUNT */ "RES_COUNT",
40 	/* RES_INVALID */ "RES_INVALID"
41 };
42 
43 static struct flow_nic_dev *dev_base;
44 static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
45 
46 /*
47  * Error handling
48  */
49 
50 static const struct {
51 	const char *message;
52 } err_msg[] = {
53 	/* 00 */ { "Operation successfully completed" },
54 	/* 01 */ { "Operation failed" },
55 	/* 02 */ { "Memory allocation failed" },
56 	/* 03 */ { "Too many output destinations" },
57 	/* 04 */ { "Too many output queues for RSS" },
58 	/* 05 */ { "The VLAN TPID specified is not supported" },
59 	/* 06 */ { "The VxLan Push header specified is not accepted" },
60 	/* 07 */ { "While interpreting VxLan Pop action, could not find a destination port" },
61 	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
62 	/* 09 */ { "Too many VLAN tag matches" },
63 	/* 10 */ { "IPv6 invalid header specified" },
64 	/* 11 */ { "Too many tunnel ports. HW limit reached" },
65 	/* 12 */ { "Unknown or unsupported flow match element received" },
66 	/* 13 */ { "Match failed because of HW limitations" },
67 	/* 14 */ { "Match failed because of HW resource limitations" },
68 	/* 15 */ { "Match failed because of too complex element definitions" },
69 	/* 16 */ { "Action failed. To too many output destinations" },
70 	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
71 	/* 18 */ { "Push Tunnel Header action cannot output to multiple destination queues" },
72 	/* 19 */ { "Inline action HW resource exhaustion" },
73 	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
74 	/* 21 */ { "Flow counter HW resource exhaustion" },
75 	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
76 	/* 23 */ { "Internal HW QSL compare failed" },
77 	/* 24 */ { "Internal CAT CFN reuse failed" },
78 	/* 25 */ { "Match variations too complex" },
79 	/* 26 */ { "Match failed because of CAM/TCAM full" },
80 	/* 27 */ { "Internal creation of a tunnel end point port failed" },
81 	/* 28 */ { "Unknown or unsupported flow action received" },
82 	/* 29 */ { "Removing flow failed" },
83 };
84 
85 void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct rte_flow_error *error)
86 {
87 	assert(msg < ERR_MSG_NO_MSG);
88 
89 	if (error) {
90 		error->message = err_msg[msg].message;
91 		error->type = (msg == ERR_SUCCESS) ? RTE_FLOW_ERROR_TYPE_NONE :
92 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
93 	}
94 }
95 
96 /*
97  * Resources
98  */
99 
100 int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
101 	uint32_t alignment)
102 {
103 	for (unsigned int i = 0; i < ndev->res[res_type].resource_count; i += alignment) {
104 		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
105 			flow_nic_mark_resource_used(ndev, res_type, i);
106 			ndev->res[res_type].ref[i] = 1;
107 			return i;
108 		}
109 	}
110 
111 	return -1;
112 }
113 
114 int flow_nic_alloc_resource_config(struct flow_nic_dev *ndev, enum res_type_e res_type,
115 	unsigned int num, uint32_t alignment)
116 {
117 	unsigned int idx_offs;
118 
119 	for (unsigned int res_idx = 0; res_idx < ndev->res[res_type].resource_count - (num - 1);
120 		res_idx += alignment) {
121 		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
122 			for (idx_offs = 1; idx_offs < num; idx_offs++)
123 				if (flow_nic_is_resource_used(ndev, res_type, res_idx + idx_offs))
124 					break;
125 
126 			if (idx_offs < num)
127 				continue;
128 
129 			/* found a contiguous number of "num" res_type elements - allocate them */
130 			for (idx_offs = 0; idx_offs < num; idx_offs++) {
131 				flow_nic_mark_resource_used(ndev, res_type, res_idx + idx_offs);
132 				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
133 			}
134 
135 			return res_idx;
136 		}
137 	}
138 
139 	return -1;
140 }
141 
142 void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int idx)
143 {
144 	flow_nic_mark_resource_unused(ndev, res_type, idx);
145 }
146 
147 int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index)
148 {
149 	NT_LOG(DBG, FILTER, "Reference resource %s idx %i (before ref cnt %i)",
150 		dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
151 	assert(flow_nic_is_resource_used(ndev, res_type, index));
152 
153 	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
154 		return -1;
155 
156 	ndev->res[res_type].ref[index]++;
157 	return 0;
158 }
159 
160 int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index)
161 {
162 	NT_LOG(DBG, FILTER, "De-reference resource %s idx %i (before ref cnt %i)",
163 		dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
164 	assert(flow_nic_is_resource_used(ndev, res_type, index));
165 	assert(ndev->res[res_type].ref[index]);
166 	/* deref */
167 	ndev->res[res_type].ref[index]--;
168 
169 	if (!ndev->res[res_type].ref[index])
170 		flow_nic_free_resource(ndev, res_type, index);
171 
172 	return !!ndev->res[res_type].ref[index];/* if 0 resource has been freed */
173 }
174 
175 /*
176  * Nic port/adapter lookup
177  */
178 
179 static struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
180 {
181 	struct flow_nic_dev *nic_dev = dev_base;
182 
183 	while (nic_dev) {
184 		if (nic_dev->adapter_no == adapter_no)
185 			break;
186 
187 		nic_dev = nic_dev->next;
188 	}
189 
190 	if (!nic_dev)
191 		return NULL;
192 
193 	struct flow_eth_dev *dev = nic_dev->eth_base;
194 
195 	while (dev) {
196 		if (port == dev->port)
197 			return dev;
198 
199 		dev = dev->next;
200 	}
201 
202 	return NULL;
203 }
204 
205 static struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
206 {
207 	struct flow_nic_dev *ndev = dev_base;
208 
209 	while (ndev) {
210 		if (adapter_no == ndev->adapter_no)
211 			break;
212 
213 		ndev = ndev->next;
214 	}
215 
216 	return ndev;
217 }
218 /*
219  * Flow API
220  */
221 
222 static struct flow_handle *flow_create(struct flow_eth_dev *dev __rte_unused,
223 	const struct rte_flow_attr *attr __rte_unused,
224 	uint16_t forced_vlan_vid __rte_unused,
225 	uint16_t caller_id __rte_unused,
226 	const struct rte_flow_item item[] __rte_unused,
227 	const struct rte_flow_action action[] __rte_unused,
228 	struct rte_flow_error *error __rte_unused)
229 {
230 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
231 
232 	if (profile_inline_ops == NULL) {
233 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
234 		return NULL;
235 	}
236 
237 	return profile_inline_ops->flow_create_profile_inline(dev, attr,
238 		forced_vlan_vid, caller_id,  item, action, error);
239 }
240 
241 static int flow_destroy(struct flow_eth_dev *dev __rte_unused,
242 	struct flow_handle *flow __rte_unused,	struct rte_flow_error *error __rte_unused)
243 {
244 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
245 
246 	if (profile_inline_ops == NULL) {
247 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
248 		return -1;
249 	}
250 
251 	return profile_inline_ops->flow_destroy_profile_inline(dev, flow, error);
252 }
253 
254 static int flow_flush(struct flow_eth_dev *dev, uint16_t caller_id, struct rte_flow_error *error)
255 {
256 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
257 
258 	if (profile_inline_ops == NULL) {
259 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
260 		return -1;
261 	}
262 
263 	return profile_inline_ops->flow_flush_profile_inline(dev, caller_id, error);
264 }
265 
266 /*
267  * Device Management API
268  */
269 
270 static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev, struct flow_eth_dev *dev)
271 {
272 	dev->next = ndev->eth_base;
273 	ndev->eth_base = dev;
274 }
275 
276 static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev, struct flow_eth_dev *eth_dev)
277 {
278 	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
279 
280 	while (dev) {
281 		if (dev == eth_dev) {
282 			if (prev)
283 				prev->next = dev->next;
284 
285 			else
286 				ndev->eth_base = dev->next;
287 
288 			return 0;
289 		}
290 
291 		prev = dev;
292 		dev = dev->next;
293 	}
294 
295 	return -1;
296 }
297 
298 static void flow_ndev_reset(struct flow_nic_dev *ndev)
299 {
300 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
301 
302 	if (profile_inline_ops == NULL) {
303 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
304 		return;
305 	}
306 
307 	/* Delete all eth-port devices created on this NIC device */
308 	while (ndev->eth_base)
309 		flow_delete_eth_dev(ndev->eth_base);
310 
311 	/* Error check */
312 	while (ndev->flow_base) {
313 		NT_LOG(ERR, FILTER,
314 			"ERROR : Flows still defined but all eth-ports deleted. Flow %p",
315 			ndev->flow_base);
316 
317 		profile_inline_ops->flow_destroy_profile_inline(ndev->flow_base->dev,
318 			ndev->flow_base, NULL);
319 	}
320 
321 	profile_inline_ops->done_flow_management_of_ndev_profile_inline(ndev);
322 
323 	km_free_ndev_resource_management(&ndev->km_res_handle);
324 	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
325 
326 	ndev->flow_unique_id_counter = 0;
327 
328 #ifdef FLOW_DEBUG
329 	/*
330 	 * free all resources default allocated, initially for this NIC DEV
331 	 * Is not really needed since the bitmap will be freed in a sec. Therefore
332 	 * only in debug mode
333 	 */
334 
335 	/* Check if all resources has been released */
336 	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i", ndev->adapter_no);
337 
338 	for (unsigned int i = 0; i < RES_COUNT; i++) {
339 		int err = 0;
340 #if defined(FLOW_DEBUG)
341 		NT_LOG(DBG, FILTER, "RES state for: %s", dbg_res_descr[i]);
342 #endif
343 
344 		for (unsigned int ii = 0; ii < ndev->res[i].resource_count; ii++) {
345 			int ref = ndev->res[i].ref[ii];
346 			int used = flow_nic_is_resource_used(ndev, i, ii);
347 
348 			if (ref || used) {
349 				NT_LOG(DBG, FILTER, "  [%i]: ref cnt %i, used %i", ii, ref,
350 					used);
351 				err = 1;
352 			}
353 		}
354 
355 		if (err)
356 			NT_LOG(DBG, FILTER, "ERROR - some resources not freed");
357 	}
358 
359 #endif
360 }
361 
362 int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
363 {
364 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
365 
366 	if (profile_inline_ops == NULL) {
367 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
368 		return -1;
369 	}
370 
371 	struct flow_nic_dev *ndev = eth_dev->ndev;
372 
373 	if (!ndev) {
374 		/* Error invalid nic device */
375 		return -1;
376 	}
377 
378 	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i", eth_dev, eth_dev->port);
379 
380 #ifdef FLOW_DEBUG
381 	ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_WRITE);
382 #endif
383 
384 	/* delete all created flows from this device */
385 	pthread_mutex_lock(&ndev->mtx);
386 
387 	struct flow_handle *flow = ndev->flow_base;
388 
389 	while (flow) {
390 		if (flow->dev == eth_dev) {
391 			struct flow_handle *flow_next = flow->next;
392 			profile_inline_ops->flow_destroy_locked_profile_inline(eth_dev, flow,
393 				NULL);
394 			flow = flow_next;
395 
396 		} else {
397 			flow = flow->next;
398 		}
399 	}
400 
401 	/*
402 	 * remove unmatched queue if setup in QSL
403 	 * remove exception queue setting in QSL UNM
404 	 */
405 	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port, 0);
406 	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
407 	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
408 
409 	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
410 		for (int i = 0; i < eth_dev->num_queues; ++i) {
411 			uint32_t qen_value = 0;
412 			uint32_t queue_id = (uint32_t)eth_dev->rx_queue[i].hw_id;
413 
414 			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN, queue_id / 4, &qen_value);
415 			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN, queue_id / 4,
416 				qen_value & ~(1U << (queue_id % 4)));
417 			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
418 		}
419 	}
420 
421 #ifdef FLOW_DEBUG
422 	ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
423 #endif
424 
425 	/* take eth_dev out of ndev list */
426 	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
427 		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found", eth_dev);
428 
429 	pthread_mutex_unlock(&ndev->mtx);
430 
431 	/* free eth_dev */
432 	free(eth_dev);
433 
434 	return 0;
435 }
436 
437 /*
438  * Flow API NIC Setup
439  * Flow backend creation function - register and initialize common backend API to FPA modules
440  */
441 
442 static int init_resource_elements(struct flow_nic_dev *ndev, enum res_type_e res_type,
443 	uint32_t count)
444 {
445 	assert(ndev->res[res_type].alloc_bm == NULL);
446 	/* allocate bitmap and ref counter */
447 	ndev->res[res_type].alloc_bm =
448 		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
449 
450 	if (ndev->res[res_type].alloc_bm) {
451 		ndev->res[res_type].ref =
452 			(uint32_t *)&ndev->res[res_type].alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
453 		ndev->res[res_type].resource_count = count;
454 		return 0;
455 	}
456 
457 	return -1;
458 }
459 
460 static void done_resource_elements(struct flow_nic_dev *ndev, enum res_type_e res_type)
461 {
462 	assert(ndev);
463 
464 	if (ndev->res[res_type].alloc_bm)
465 		free(ndev->res[res_type].alloc_bm);
466 }
467 
468 static void list_insert_flow_nic(struct flow_nic_dev *ndev)
469 {
470 	pthread_mutex_lock(&base_mtx);
471 	ndev->next = dev_base;
472 	dev_base = ndev;
473 	pthread_mutex_unlock(&base_mtx);
474 }
475 
476 static int list_remove_flow_nic(struct flow_nic_dev *ndev)
477 {
478 	pthread_mutex_lock(&base_mtx);
479 	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
480 
481 	while (nic_dev) {
482 		if (nic_dev == ndev) {
483 			if (prev)
484 				prev->next = nic_dev->next;
485 
486 			else
487 				dev_base = nic_dev->next;
488 
489 			pthread_mutex_unlock(&base_mtx);
490 			return 0;
491 		}
492 
493 		prev = nic_dev;
494 		nic_dev = nic_dev->next;
495 	}
496 
497 	pthread_mutex_unlock(&base_mtx);
498 	return -1;
499 }
500 
501 /*
502  * adapter_no       physical adapter no
503  * port_no          local port no
504  * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
505  */
506 static struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no, uint32_t port_id,
507 	int alloc_rx_queues, struct flow_queue_id_s queue_ids[],
508 	int *rss_target_id, enum flow_eth_dev_profile flow_profile,
509 	uint32_t exception_path)
510 {
511 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
512 
513 	if (profile_inline_ops == NULL)
514 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
515 
516 	int i;
517 	struct flow_eth_dev *eth_dev = NULL;
518 
519 	NT_LOG(DBG, FILTER,
520 		"Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i",
521 		adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
522 
523 	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
524 		assert(0);
525 		NT_LOG(ERR, FILTER,
526 			"ERROR: Internal array for multiple queues too small for API");
527 	}
528 
529 	pthread_mutex_lock(&base_mtx);
530 	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
531 
532 	if (!ndev) {
533 		/* Error - no flow api found on specified adapter */
534 		NT_LOG(ERR, FILTER, "ERROR: no flow interface registered for adapter %d",
535 			adapter_no);
536 		pthread_mutex_unlock(&base_mtx);
537 		return NULL;
538 	}
539 
540 	if (ndev->ports < ((uint16_t)port_no + 1)) {
541 		NT_LOG(ERR, FILTER, "ERROR: port exceeds supported port range for adapter");
542 		pthread_mutex_unlock(&base_mtx);
543 		return NULL;
544 	}
545 
546 	if ((alloc_rx_queues - 1) > FLOW_MAX_QUEUES) {	/* 0th is exception so +1 */
547 		NT_LOG(ERR, FILTER,
548 			"ERROR: Exceeds supported number of rx queues per eth device");
549 		pthread_mutex_unlock(&base_mtx);
550 		return NULL;
551 	}
552 
553 	/* don't accept multiple eth_dev's on same NIC and same port */
554 	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
555 
556 	if (eth_dev) {
557 		NT_LOG(DBG, FILTER, "Re-opening existing NIC port device: NIC DEV: %i Port %i",
558 			adapter_no, port_no);
559 		pthread_mutex_unlock(&base_mtx);
560 		flow_delete_eth_dev(eth_dev);
561 		eth_dev = NULL;
562 	}
563 
564 	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
565 
566 	if (!eth_dev) {
567 		NT_LOG(ERR, FILTER, "ERROR: calloc failed");
568 		goto err_exit1;
569 	}
570 
571 	pthread_mutex_lock(&ndev->mtx);
572 
573 	eth_dev->ndev = ndev;
574 	eth_dev->port = port_no;
575 	eth_dev->port_id = port_id;
576 
577 	/* First time then NIC is initialized */
578 	if (!ndev->flow_mgnt_prepared) {
579 		ndev->flow_profile = flow_profile;
580 
581 		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
582 		if (profile_inline_ops != NULL &&
583 			profile_inline_ops->initialize_flow_management_of_ndev_profile_inline(ndev))
584 			goto err_exit0;
585 
586 	} else {
587 		/* check if same flow type is requested, otherwise fail */
588 		if (ndev->flow_profile != flow_profile) {
589 			NT_LOG(ERR, FILTER,
590 				"ERROR: Different flow types requested on same NIC device. Not supported.");
591 			goto err_exit0;
592 		}
593 	}
594 
595 	/* Allocate the requested queues in HW for this dev */
596 
597 	for (i = 0; i < alloc_rx_queues; i++) {
598 		eth_dev->rx_queue[i] = queue_ids[i];
599 
600 		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE && exception_path)) {
601 			/*
602 			 * Init QSL UNM - unmatched - redirects otherwise discarded
603 			 * packets in QSL
604 			 */
605 			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
606 				eth_dev->rx_queue[0].hw_id) < 0)
607 				goto err_exit0;
608 
609 			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 1) < 0)
610 				goto err_exit0;
611 
612 			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) < 0)
613 				goto err_exit0;
614 		}
615 
616 		eth_dev->num_queues++;
617 	}
618 
619 	eth_dev->rss_target_id = -1;
620 
621 	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
622 		for (i = 0; i < eth_dev->num_queues; i++) {
623 			uint32_t qen_value = 0;
624 			uint32_t queue_id = (uint32_t)eth_dev->rx_queue[i].hw_id;
625 
626 			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN, queue_id / 4, &qen_value);
627 			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN, queue_id / 4,
628 				qen_value | (1 << (queue_id % 4)));
629 			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
630 		}
631 	}
632 
633 	*rss_target_id = eth_dev->rss_target_id;
634 
635 	nic_insert_eth_port_dev(ndev, eth_dev);
636 
637 	pthread_mutex_unlock(&ndev->mtx);
638 	pthread_mutex_unlock(&base_mtx);
639 	return eth_dev;
640 
641 err_exit0:
642 	pthread_mutex_unlock(&ndev->mtx);
643 	pthread_mutex_unlock(&base_mtx);
644 
645 err_exit1:
646 	if (eth_dev)
647 		free(eth_dev);
648 
649 #ifdef FLOW_DEBUG
650 	ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
651 #endif
652 
653 	NT_LOG(DBG, FILTER, "ERR in %s", __func__);
654 	return NULL;	/* Error exit */
655 }
656 
657 struct flow_nic_dev *flow_api_create(uint8_t adapter_no, const struct flow_api_backend_ops *be_if,
658 	void *be_dev)
659 {
660 	(void)adapter_no;
661 
662 	if (!be_if || be_if->version != 1) {
663 		NT_LOG(DBG, FILTER, "ERR: %s", __func__);
664 		return NULL;
665 	}
666 
667 	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
668 
669 	if (!ndev) {
670 		NT_LOG(ERR, FILTER, "ERROR: calloc failed");
671 		return NULL;
672 	}
673 
674 	/*
675 	 * To dump module initialization writes use
676 	 * FLOW_BACKEND_DEBUG_MODE_WRITE
677 	 * then remember to set it ...NONE afterwards again
678 	 */
679 	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
680 
681 	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
682 		goto err_exit;
683 
684 	ndev->adapter_no = adapter_no;
685 
686 	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ? 256 : ndev->be.num_rx_ports);
687 
688 	/*
689 	 * Free resources in NIC must be managed by this module
690 	 * Get resource sizes and create resource manager elements
691 	 */
692 	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
693 		goto err_exit;
694 
695 	if (init_resource_elements(ndev, RES_CAT_CFN, ndev->be.cat.nb_cat_funcs))
696 		goto err_exit;
697 
698 	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
699 		goto err_exit;
700 
701 	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
702 		goto err_exit;
703 
704 	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
705 		goto err_exit;
706 
707 	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE, ndev->be.cat.nb_flow_types))
708 		goto err_exit;
709 
710 	if (init_resource_elements(ndev, RES_KM_CATEGORY, ndev->be.km.nb_categories))
711 		goto err_exit;
712 
713 	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
714 		goto err_exit;
715 
716 	if (init_resource_elements(ndev, RES_PDB_RCP, ndev->be.pdb.nb_pdb_rcp_categories))
717 		goto err_exit;
718 
719 	if (init_resource_elements(ndev, RES_QSL_RCP, ndev->be.qsl.nb_rcp_categories))
720 		goto err_exit;
721 
722 	if (init_resource_elements(ndev, RES_QSL_QST, ndev->be.qsl.nb_qst_entries))
723 		goto err_exit;
724 
725 	if (init_resource_elements(ndev, RES_SLC_LR_RCP, ndev->be.max_categories))
726 		goto err_exit;
727 
728 	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE, ndev->be.cat.nb_flow_types))
729 		goto err_exit;
730 
731 	if (init_resource_elements(ndev, RES_FLM_RCP, ndev->be.flm.nb_categories))
732 		goto err_exit;
733 
734 	if (init_resource_elements(ndev, RES_TPE_RCP, ndev->be.tpe.nb_rcp_categories))
735 		goto err_exit;
736 
737 	if (init_resource_elements(ndev, RES_TPE_EXT, ndev->be.tpe.nb_rpl_ext_categories))
738 		goto err_exit;
739 
740 	if (init_resource_elements(ndev, RES_TPE_RPL, ndev->be.tpe.nb_rpl_depth))
741 		goto err_exit;
742 
743 	if (init_resource_elements(ndev, RES_SCRUB_RCP, ndev->be.flm.nb_scrub_profiles))
744 		goto err_exit;
745 
746 	/* may need IPF, COR */
747 
748 	/* check all defined has been initialized */
749 	for (int i = 0; i < RES_COUNT; i++)
750 		assert(ndev->res[i].alloc_bm);
751 
752 	pthread_mutex_init(&ndev->mtx, NULL);
753 	list_insert_flow_nic(ndev);
754 
755 	return ndev;
756 
757 err_exit:
758 
759 	if (ndev)
760 		flow_api_done(ndev);
761 
762 	NT_LOG(DBG, FILTER, "ERR: %s", __func__);
763 	return NULL;
764 }
765 
766 int flow_api_done(struct flow_nic_dev *ndev)
767 {
768 	NT_LOG(DBG, FILTER, "FLOW API DONE");
769 
770 	if (ndev) {
771 		flow_ndev_reset(ndev);
772 
773 		/* delete resource management allocations for this ndev */
774 		for (int i = 0; i < RES_COUNT; i++)
775 			done_resource_elements(ndev, i);
776 
777 		flow_api_backend_done(&ndev->be);
778 		list_remove_flow_nic(ndev);
779 		free(ndev);
780 	}
781 
782 	return 0;
783 }
784 
785 void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
786 {
787 	if (!ndev) {
788 		NT_LOG(DBG, FILTER, "ERR: %s", __func__);
789 		return NULL;
790 	}
791 
792 	return ndev->be.be_dev;
793 }
794 
795 /* Information for a given RSS type. */
796 struct rss_type_info {
797 	uint64_t rss_type;
798 	const char *str;
799 };
800 
801 static struct rss_type_info rss_to_string[] = {
802 	/* RTE_BIT64(2)   IPv4 dst + IPv4 src */
803 	RSS_TO_STRING(RTE_ETH_RSS_IPV4),
804 	/* RTE_BIT64(3)   IPv4 dst + IPv4 src + Identification of group of fragments  */
805 	RSS_TO_STRING(RTE_ETH_RSS_FRAG_IPV4),
806 	/* RTE_BIT64(4)   IPv4 dst + IPv4 src + L4 protocol */
807 	RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_TCP),
808 	/* RTE_BIT64(5)   IPv4 dst + IPv4 src + L4 protocol */
809 	RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_UDP),
810 	/* RTE_BIT64(6)   IPv4 dst + IPv4 src + L4 protocol */
811 	RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_SCTP),
812 	/* RTE_BIT64(7)   IPv4 dst + IPv4 src + L4 protocol */
813 	RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_OTHER),
814 	/*
815 	 * RTE_BIT64(14)  128-bits of L2 payload starting after src MAC, i.e. including optional
816 	 * VLAN tag and ethertype. Overrides all L3 and L4 flags at the same level, but inner
817 	 * L2 payload can be combined with outer S-VLAN and GTPU TEID flags.
818 	 */
819 	RSS_TO_STRING(RTE_ETH_RSS_L2_PAYLOAD),
820 	/* RTE_BIT64(18)  L4 dst + L4 src + L4 protocol - see comment of RTE_ETH_RSS_L4_CHKSUM */
821 	RSS_TO_STRING(RTE_ETH_RSS_PORT),
822 	/* RTE_BIT64(19)  Not supported */
823 	RSS_TO_STRING(RTE_ETH_RSS_VXLAN),
824 	/* RTE_BIT64(20)  Not supported */
825 	RSS_TO_STRING(RTE_ETH_RSS_GENEVE),
826 	/* RTE_BIT64(21)  Not supported */
827 	RSS_TO_STRING(RTE_ETH_RSS_NVGRE),
828 	/* RTE_BIT64(23)  GTP TEID - always from outer GTPU header */
829 	RSS_TO_STRING(RTE_ETH_RSS_GTPU),
830 	/* RTE_BIT64(24)  MAC dst + MAC src */
831 	RSS_TO_STRING(RTE_ETH_RSS_ETH),
832 	/* RTE_BIT64(25)  outermost VLAN ID + L4 protocol */
833 	RSS_TO_STRING(RTE_ETH_RSS_S_VLAN),
834 	/* RTE_BIT64(26)  innermost VLAN ID + L4 protocol */
835 	RSS_TO_STRING(RTE_ETH_RSS_C_VLAN),
836 	/* RTE_BIT64(27)  Not supported */
837 	RSS_TO_STRING(RTE_ETH_RSS_ESP),
838 	/* RTE_BIT64(28)  Not supported */
839 	RSS_TO_STRING(RTE_ETH_RSS_AH),
840 	/* RTE_BIT64(29)  Not supported */
841 	RSS_TO_STRING(RTE_ETH_RSS_L2TPV3),
842 	/* RTE_BIT64(30)  Not supported */
843 	RSS_TO_STRING(RTE_ETH_RSS_PFCP),
844 	/* RTE_BIT64(31)  Not supported */
845 	RSS_TO_STRING(RTE_ETH_RSS_PPPOE),
846 	/* RTE_BIT64(32)  Not supported */
847 	RSS_TO_STRING(RTE_ETH_RSS_ECPRI),
848 	/* RTE_BIT64(33)  Not supported */
849 	RSS_TO_STRING(RTE_ETH_RSS_MPLS),
850 	/* RTE_BIT64(34)  IPv4 Header checksum + L4 protocol */
851 	RSS_TO_STRING(RTE_ETH_RSS_IPV4_CHKSUM),
852 
853 	/*
854 	 * if combined with RTE_ETH_RSS_NONFRAG_IPV4_[TCP|UDP|SCTP] then
855 	 *   L4 protocol + chosen protocol header Checksum
856 	 * else
857 	 *   error
858 	 */
859 	/* RTE_BIT64(35) */
860 	RSS_TO_STRING(RTE_ETH_RSS_L4_CHKSUM),
861 #ifndef ANDROMEDA_DPDK_21_11
862 	/* RTE_BIT64(36)  Not supported */
863 	RSS_TO_STRING(RTE_ETH_RSS_L2TPV2),
864 #endif
865 
866 	{ RTE_BIT64(37), "unknown_RTE_BIT64(37)" },
867 	{ RTE_BIT64(38), "unknown_RTE_BIT64(38)" },
868 	{ RTE_BIT64(39), "unknown_RTE_BIT64(39)" },
869 	{ RTE_BIT64(40), "unknown_RTE_BIT64(40)" },
870 	{ RTE_BIT64(41), "unknown_RTE_BIT64(41)" },
871 	{ RTE_BIT64(42), "unknown_RTE_BIT64(42)" },
872 	{ RTE_BIT64(43), "unknown_RTE_BIT64(43)" },
873 	{ RTE_BIT64(44), "unknown_RTE_BIT64(44)" },
874 	{ RTE_BIT64(45), "unknown_RTE_BIT64(45)" },
875 	{ RTE_BIT64(46), "unknown_RTE_BIT64(46)" },
876 	{ RTE_BIT64(47), "unknown_RTE_BIT64(47)" },
877 	{ RTE_BIT64(48), "unknown_RTE_BIT64(48)" },
878 	{ RTE_BIT64(49), "unknown_RTE_BIT64(49)" },
879 
880 	/* RTE_BIT64(50)  outermost encapsulation */
881 	RSS_TO_STRING(RTE_ETH_RSS_LEVEL_OUTERMOST),
882 	/* RTE_BIT64(51)  innermost encapsulation */
883 	RSS_TO_STRING(RTE_ETH_RSS_LEVEL_INNERMOST),
884 
885 	/* RTE_BIT64(52)  Not supported */
886 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE96),
887 	/* RTE_BIT64(53)  Not supported */
888 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE64),
889 	/* RTE_BIT64(54)  Not supported */
890 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE56),
891 	/* RTE_BIT64(55)  Not supported */
892 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE48),
893 	/* RTE_BIT64(56)  Not supported */
894 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE40),
895 	/* RTE_BIT64(57)  Not supported */
896 	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE32),
897 
898 	/* RTE_BIT64(58) */
899 	RSS_TO_STRING(RTE_ETH_RSS_L2_DST_ONLY),
900 	/* RTE_BIT64(59) */
901 	RSS_TO_STRING(RTE_ETH_RSS_L2_SRC_ONLY),
902 	/* RTE_BIT64(60) */
903 	RSS_TO_STRING(RTE_ETH_RSS_L4_DST_ONLY),
904 	/* RTE_BIT64(61) */
905 	RSS_TO_STRING(RTE_ETH_RSS_L4_SRC_ONLY),
906 	/* RTE_BIT64(62) */
907 	RSS_TO_STRING(RTE_ETH_RSS_L3_DST_ONLY),
908 	/* RTE_BIT64(63) */
909 	RSS_TO_STRING(RTE_ETH_RSS_L3_SRC_ONLY),
910 };
911 
912 int sprint_nt_rss_mask(char *str, uint16_t str_len, const char *prefix, uint64_t hash_mask)
913 {
914 	if (str == NULL || str_len == 0)
915 		return -1;
916 
917 	memset(str, 0x0, str_len);
918 	uint16_t str_end = 0;
919 	const struct rss_type_info *start = rss_to_string;
920 
921 	for (const struct rss_type_info *p = start; p != start + ARRAY_SIZE(rss_to_string); ++p) {
922 		if (p->rss_type & hash_mask) {
923 			if (strlen(prefix) + strlen(p->str) < (size_t)(str_len - str_end)) {
924 				snprintf(str + str_end, str_len - str_end, "%s", prefix);
925 				str_end += strlen(prefix);
926 				snprintf(str + str_end, str_len - str_end, "%s", p->str);
927 				str_end += strlen(p->str);
928 
929 			} else {
930 				return -1;
931 			}
932 		}
933 	}
934 
935 	return 0;
936 }
937 
938 /*
939  * Hash
940  */
941 
942 int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx, enum flow_nic_hash_e algorithm)
943 {
944 	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
945 
946 	switch (algorithm) {
947 	case HASH_ALGO_5TUPLE:
948 		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
949 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx, 0, 2);
950 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0, DYN_FINAL_IP_DST);
951 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0, -16);
952 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0, DYN_FINAL_IP_DST);
953 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0, 0);
954 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0, DYN_L4);
955 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
956 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
957 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
958 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
959 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
960 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0, 0xffffffff);
961 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1, 0xffffffff);
962 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2, 0xffffffff);
963 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3, 0xffffffff);
964 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4, 0xffffffff);
965 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5, 0xffffffff);
966 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6, 0xffffffff);
967 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7, 0xffffffff);
968 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8, 0xffffffff);
969 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9, 0);
970 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0, 0xffffffff);
971 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0, 1);
972 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0, HASH_5TUPLE);
973 		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK, hsh_idx, 0, 1);
974 
975 		NT_LOG(DBG, FILTER, "Set IPv6 5-tuple hasher with adaptive IPv4 hashing");
976 		break;
977 
978 	default:
979 	case HASH_ALGO_ROUND_ROBIN:
980 		/* zero is round-robin */
981 		break;
982 	}
983 
984 	return 0;
985 }
986 
987 static int flow_dev_dump(struct flow_eth_dev *dev,
988 	struct flow_handle *flow,
989 	uint16_t caller_id,
990 	FILE *file,
991 	struct rte_flow_error *error)
992 {
993 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
994 
995 	if (profile_inline_ops == NULL) {
996 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
997 		return -1;
998 	}
999 
1000 	return profile_inline_ops->flow_dev_dump_profile_inline(dev, flow, caller_id, file, error);
1001 }
1002 
1003 int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
1004 	struct nt_eth_rss_conf rss_conf)
1005 {
1006 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1007 
1008 	if (profile_inline_ops == NULL) {
1009 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
1010 		return -1;
1011 	}
1012 
1013 	return profile_inline_ops->flow_nic_set_hasher_fields_inline(ndev, hsh_idx, rss_conf);
1014 }
1015 
1016 static int flow_get_aged_flows(struct flow_eth_dev *dev,
1017 	uint16_t caller_id,
1018 	void **context,
1019 	uint32_t nb_contexts,
1020 	struct rte_flow_error *error)
1021 {
1022 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1023 
1024 	if (profile_inline_ops == NULL) {
1025 		NT_LOG_DBGX(ERR, FILTER, "profile_inline_ops uninitialized");
1026 		return -1;
1027 	}
1028 
1029 	if (nb_contexts > 0 && !context) {
1030 		error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
1031 		error->message = "rte_flow_get_aged_flows - empty context";
1032 		return -1;
1033 	}
1034 
1035 	return profile_inline_ops->flow_get_aged_flows_profile_inline(dev, caller_id, context,
1036 			nb_contexts, error);
1037 }
1038 
1039 static int flow_info_get(struct flow_eth_dev *dev, uint8_t caller_id,
1040 	struct rte_flow_port_info *port_info, struct rte_flow_queue_info *queue_info,
1041 	struct rte_flow_error *error)
1042 {
1043 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1044 
1045 	if (profile_inline_ops == NULL) {
1046 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1047 		return -1;
1048 	}
1049 
1050 	return profile_inline_ops->flow_info_get_profile_inline(dev, caller_id, port_info,
1051 			queue_info, error);
1052 }
1053 
1054 static int flow_configure(struct flow_eth_dev *dev, uint8_t caller_id,
1055 	const struct rte_flow_port_attr *port_attr, uint16_t nb_queue,
1056 	const struct rte_flow_queue_attr *queue_attr[], struct rte_flow_error *error)
1057 {
1058 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1059 
1060 	if (profile_inline_ops == NULL) {
1061 		NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized");
1062 		return -1;
1063 	}
1064 
1065 	return profile_inline_ops->flow_configure_profile_inline(dev, caller_id, port_attr,
1066 			nb_queue, queue_attr, error);
1067 }
1068 
1069 int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
1070 {
1071 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1072 
1073 	if (profile_inline_ops == NULL)
1074 		return -1;
1075 
1076 	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
1077 		return profile_inline_ops->flow_get_flm_stats_profile_inline(ndev, data, size);
1078 
1079 	return -1;
1080 }
1081 
1082 static const struct flow_filter_ops ops = {
1083 	.flow_filter_init = flow_filter_init,
1084 	.flow_filter_done = flow_filter_done,
1085 	/*
1086 	 * Device Management API
1087 	 */
1088 	.flow_get_eth_dev = flow_get_eth_dev,
1089 	/*
1090 	 * NT Flow API
1091 	 */
1092 	.flow_create = flow_create,
1093 	.flow_destroy = flow_destroy,
1094 	.flow_flush = flow_flush,
1095 	.flow_dev_dump = flow_dev_dump,
1096 	.flow_get_flm_stats = flow_get_flm_stats,
1097 	.flow_get_aged_flows = flow_get_aged_flows,
1098 
1099 	/*
1100 	 * NT Flow asynchronous operations API
1101 	 */
1102 	.flow_info_get = flow_info_get,
1103 	.flow_configure = flow_configure,
1104 
1105 	/*
1106 	 * Other
1107 	 */
1108 	 .hw_mod_hsh_rcp_flush = hw_mod_hsh_rcp_flush,
1109 	 .flow_nic_set_hasher_fields = flow_nic_set_hasher_fields,
1110 };
1111 
1112 void init_flow_filter(void)
1113 {
1114 	register_flow_filter_ops(&ops);
1115 }
1116