xref: /dpdk/drivers/net/ntnic/nthw/flow_api/flow_api.c (revision 98e40f83f49d1b37c33d59a196e20bd66c83cd81)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2023 Napatech A/S
4  */
5 
6 #include "flow_api_engine.h"
7 #include "flow_api_nic_setup.h"
8 #include "ntnic_mod_reg.h"
9 
10 #include "flow_api.h"
11 #include "flow_filter.h"
12 
13 const char *dbg_res_descr[] = {
14 	/* RES_QUEUE */ "RES_QUEUE",
15 	/* RES_CAT_CFN */ "RES_CAT_CFN",
16 	/* RES_CAT_COT */ "RES_CAT_COT",
17 	/* RES_CAT_EXO */ "RES_CAT_EXO",
18 	/* RES_CAT_LEN */ "RES_CAT_LEN",
19 	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
20 	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
21 	/* RES_HSH_RCP */ "RES_HSH_RCP",
22 	/* RES_PDB_RCP */ "RES_PDB_RCP",
23 	/* RES_QSL_RCP */ "RES_QSL_RCP",
24 	/* RES_QSL_LTX */ "RES_QSL_LTX",
25 	/* RES_QSL_QST */ "RES_QSL_QST",
26 	/* RES_SLC_LR_RCP */ "RES_SLC_LR_RCP",
27 	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
28 	/* RES_FLM_RCP */ "RES_FLM_RCP",
29 	/* RES_TPE_RCP */ "RES_TPE_RCP",
30 	/* RES_TPE_EXT */ "RES_TPE_EXT",
31 	/* RES_TPE_RPL */ "RES_TPE_RPL",
32 	/* RES_COUNT */ "RES_COUNT",
33 	/* RES_INVALID */ "RES_INVALID"
34 };
35 
36 static struct flow_nic_dev *dev_base;
37 static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
38 
39 /*
40  * Error handling
41  */
42 
43 static const struct {
44 	const char *message;
45 } err_msg[] = {
46 	/* 00 */ { "Operation successfully completed" },
47 	/* 01 */ { "Operation failed" },
48 	/* 02 */ { "Memory allocation failed" },
49 	/* 03 */ { "Too many output destinations" },
50 	/* 04 */ { "Too many output queues for RSS" },
51 	/* 05 */ { "The VLAN TPID specified is not supported" },
52 	/* 06 */ { "The VxLan Push header specified is not accepted" },
53 	/* 07 */ { "While interpreting VxLan Pop action, could not find a destination port" },
54 	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
55 	/* 09 */ { "Too many VLAN tag matches" },
56 	/* 10 */ { "IPv6 invalid header specified" },
57 	/* 11 */ { "Too many tunnel ports. HW limit reached" },
58 	/* 12 */ { "Unknown or unsupported flow match element received" },
59 	/* 13 */ { "Match failed because of HW limitations" },
60 	/* 14 */ { "Match failed because of HW resource limitations" },
61 	/* 15 */ { "Match failed because of too complex element definitions" },
62 	/* 16 */ { "Action failed. To too many output destinations" },
63 	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
64 	/* 18 */ { "Push Tunnel Header action cannot output to multiple destination queues" },
65 	/* 19 */ { "Inline action HW resource exhaustion" },
66 	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
67 	/* 21 */ { "Flow counter HW resource exhaustion" },
68 	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
69 	/* 23 */ { "Internal HW QSL compare failed" },
70 	/* 24 */ { "Internal CAT CFN reuse failed" },
71 	/* 25 */ { "Match variations too complex" },
72 	/* 26 */ { "Match failed because of CAM/TCAM full" },
73 	/* 27 */ { "Internal creation of a tunnel end point port failed" },
74 	/* 28 */ { "Unknown or unsupported flow action received" },
75 	/* 29 */ { "Removing flow failed" },
76 };
77 
78 void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct rte_flow_error *error)
79 {
80 	assert(msg < ERR_MSG_NO_MSG);
81 
82 	if (error) {
83 		error->message = err_msg[msg].message;
84 		error->type = (msg == ERR_SUCCESS) ? RTE_FLOW_ERROR_TYPE_NONE :
85 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
86 	}
87 }
88 
89 /*
90  * Resources
91  */
92 
93 int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
94 	uint32_t alignment)
95 {
96 	for (unsigned int i = 0; i < ndev->res[res_type].resource_count; i += alignment) {
97 		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
98 			flow_nic_mark_resource_used(ndev, res_type, i);
99 			ndev->res[res_type].ref[i] = 1;
100 			return i;
101 		}
102 	}
103 
104 	return -1;
105 }
106 
107 int flow_nic_alloc_resource_config(struct flow_nic_dev *ndev, enum res_type_e res_type,
108 	unsigned int num, uint32_t alignment)
109 {
110 	unsigned int idx_offs;
111 
112 	for (unsigned int res_idx = 0; res_idx < ndev->res[res_type].resource_count - (num - 1);
113 		res_idx += alignment) {
114 		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
115 			for (idx_offs = 1; idx_offs < num; idx_offs++)
116 				if (flow_nic_is_resource_used(ndev, res_type, res_idx + idx_offs))
117 					break;
118 
119 			if (idx_offs < num)
120 				continue;
121 
122 			/* found a contiguous number of "num" res_type elements - allocate them */
123 			for (idx_offs = 0; idx_offs < num; idx_offs++) {
124 				flow_nic_mark_resource_used(ndev, res_type, res_idx + idx_offs);
125 				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
126 			}
127 
128 			return res_idx;
129 		}
130 	}
131 
132 	return -1;
133 }
134 
135 void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int idx)
136 {
137 	flow_nic_mark_resource_unused(ndev, res_type, idx);
138 }
139 
140 int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index)
141 {
142 	NT_LOG(DBG, FILTER, "Reference resource %s idx %i (before ref cnt %i)",
143 		dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
144 	assert(flow_nic_is_resource_used(ndev, res_type, index));
145 
146 	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
147 		return -1;
148 
149 	ndev->res[res_type].ref[index]++;
150 	return 0;
151 }
152 
153 int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index)
154 {
155 	NT_LOG(DBG, FILTER, "De-reference resource %s idx %i (before ref cnt %i)",
156 		dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
157 	assert(flow_nic_is_resource_used(ndev, res_type, index));
158 	assert(ndev->res[res_type].ref[index]);
159 	/* deref */
160 	ndev->res[res_type].ref[index]--;
161 
162 	if (!ndev->res[res_type].ref[index])
163 		flow_nic_free_resource(ndev, res_type, index);
164 
165 	return !!ndev->res[res_type].ref[index];/* if 0 resource has been freed */
166 }
167 
168 /*
169  * Nic port/adapter lookup
170  */
171 
172 static struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
173 {
174 	struct flow_nic_dev *nic_dev = dev_base;
175 
176 	while (nic_dev) {
177 		if (nic_dev->adapter_no == adapter_no)
178 			break;
179 
180 		nic_dev = nic_dev->next;
181 	}
182 
183 	if (!nic_dev)
184 		return NULL;
185 
186 	struct flow_eth_dev *dev = nic_dev->eth_base;
187 
188 	while (dev) {
189 		if (port == dev->port)
190 			return dev;
191 
192 		dev = dev->next;
193 	}
194 
195 	return NULL;
196 }
197 
198 static struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
199 {
200 	struct flow_nic_dev *ndev = dev_base;
201 
202 	while (ndev) {
203 		if (adapter_no == ndev->adapter_no)
204 			break;
205 
206 		ndev = ndev->next;
207 	}
208 
209 	return ndev;
210 }
211 /*
212  * Flow API
213  */
214 
215 static struct flow_handle *flow_create(struct flow_eth_dev *dev __rte_unused,
216 	const struct rte_flow_attr *attr __rte_unused,
217 	uint16_t forced_vlan_vid __rte_unused,
218 	uint16_t caller_id __rte_unused,
219 	const struct rte_flow_item item[] __rte_unused,
220 	const struct rte_flow_action action[] __rte_unused,
221 	struct rte_flow_error *error __rte_unused)
222 {
223 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
224 
225 	if (profile_inline_ops == NULL) {
226 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
227 		return NULL;
228 	}
229 
230 	return profile_inline_ops->flow_create_profile_inline(dev, attr,
231 		forced_vlan_vid, caller_id,  item, action, error);
232 }
233 
234 static int flow_destroy(struct flow_eth_dev *dev __rte_unused,
235 	struct flow_handle *flow __rte_unused,	struct rte_flow_error *error __rte_unused)
236 {
237 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
238 
239 	if (profile_inline_ops == NULL) {
240 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
241 		return -1;
242 	}
243 
244 	return profile_inline_ops->flow_destroy_profile_inline(dev, flow, error);
245 }
246 
247 /*
248  * Device Management API
249  */
250 
251 static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev, struct flow_eth_dev *dev)
252 {
253 	dev->next = ndev->eth_base;
254 	ndev->eth_base = dev;
255 }
256 
257 static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev, struct flow_eth_dev *eth_dev)
258 {
259 	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
260 
261 	while (dev) {
262 		if (dev == eth_dev) {
263 			if (prev)
264 				prev->next = dev->next;
265 
266 			else
267 				ndev->eth_base = dev->next;
268 
269 			return 0;
270 		}
271 
272 		prev = dev;
273 		dev = dev->next;
274 	}
275 
276 	return -1;
277 }
278 
279 static void flow_ndev_reset(struct flow_nic_dev *ndev)
280 {
281 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
282 
283 	if (profile_inline_ops == NULL) {
284 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
285 		return;
286 	}
287 
288 	/* Delete all eth-port devices created on this NIC device */
289 	while (ndev->eth_base)
290 		flow_delete_eth_dev(ndev->eth_base);
291 
292 	/* Error check */
293 	while (ndev->flow_base) {
294 		NT_LOG(ERR, FILTER,
295 			"ERROR : Flows still defined but all eth-ports deleted. Flow %p",
296 			ndev->flow_base);
297 
298 		profile_inline_ops->flow_destroy_profile_inline(ndev->flow_base->dev,
299 			ndev->flow_base, NULL);
300 	}
301 
302 	profile_inline_ops->done_flow_management_of_ndev_profile_inline(ndev);
303 
304 	km_free_ndev_resource_management(&ndev->km_res_handle);
305 	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
306 
307 	ndev->flow_unique_id_counter = 0;
308 
309 #ifdef FLOW_DEBUG
310 	/*
311 	 * free all resources default allocated, initially for this NIC DEV
312 	 * Is not really needed since the bitmap will be freed in a sec. Therefore
313 	 * only in debug mode
314 	 */
315 
316 	/* Check if all resources has been released */
317 	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i", ndev->adapter_no);
318 
319 	for (unsigned int i = 0; i < RES_COUNT; i++) {
320 		int err = 0;
321 #if defined(FLOW_DEBUG)
322 		NT_LOG(DBG, FILTER, "RES state for: %s", dbg_res_descr[i]);
323 #endif
324 
325 		for (unsigned int ii = 0; ii < ndev->res[i].resource_count; ii++) {
326 			int ref = ndev->res[i].ref[ii];
327 			int used = flow_nic_is_resource_used(ndev, i, ii);
328 
329 			if (ref || used) {
330 				NT_LOG(DBG, FILTER, "  [%i]: ref cnt %i, used %i", ii, ref,
331 					used);
332 				err = 1;
333 			}
334 		}
335 
336 		if (err)
337 			NT_LOG(DBG, FILTER, "ERROR - some resources not freed");
338 	}
339 
340 #endif
341 }
342 
343 int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
344 {
345 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
346 
347 	if (profile_inline_ops == NULL) {
348 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
349 		return -1;
350 	}
351 
352 	struct flow_nic_dev *ndev = eth_dev->ndev;
353 
354 	if (!ndev) {
355 		/* Error invalid nic device */
356 		return -1;
357 	}
358 
359 	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i", eth_dev, eth_dev->port);
360 
361 #ifdef FLOW_DEBUG
362 	ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_WRITE);
363 #endif
364 
365 	/* delete all created flows from this device */
366 	pthread_mutex_lock(&ndev->mtx);
367 
368 	struct flow_handle *flow = ndev->flow_base;
369 
370 	while (flow) {
371 		if (flow->dev == eth_dev) {
372 			struct flow_handle *flow_next = flow->next;
373 			profile_inline_ops->flow_destroy_locked_profile_inline(eth_dev, flow,
374 				NULL);
375 			flow = flow_next;
376 
377 		} else {
378 			flow = flow->next;
379 		}
380 	}
381 
382 	/*
383 	 * remove unmatched queue if setup in QSL
384 	 * remove exception queue setting in QSL UNM
385 	 */
386 	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port, 0);
387 	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
388 	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
389 
390 	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
391 		for (int i = 0; i < eth_dev->num_queues; ++i) {
392 			uint32_t qen_value = 0;
393 			uint32_t queue_id = (uint32_t)eth_dev->rx_queue[i].hw_id;
394 
395 			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN, queue_id / 4, &qen_value);
396 			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN, queue_id / 4,
397 				qen_value & ~(1U << (queue_id % 4)));
398 			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
399 		}
400 	}
401 
402 #ifdef FLOW_DEBUG
403 	ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
404 #endif
405 
406 	/* take eth_dev out of ndev list */
407 	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
408 		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found", eth_dev);
409 
410 	pthread_mutex_unlock(&ndev->mtx);
411 
412 	/* free eth_dev */
413 	free(eth_dev);
414 
415 	return 0;
416 }
417 
418 /*
419  * Flow API NIC Setup
420  * Flow backend creation function - register and initialize common backend API to FPA modules
421  */
422 
423 static int init_resource_elements(struct flow_nic_dev *ndev, enum res_type_e res_type,
424 	uint32_t count)
425 {
426 	assert(ndev->res[res_type].alloc_bm == NULL);
427 	/* allocate bitmap and ref counter */
428 	ndev->res[res_type].alloc_bm =
429 		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
430 
431 	if (ndev->res[res_type].alloc_bm) {
432 		ndev->res[res_type].ref =
433 			(uint32_t *)&ndev->res[res_type].alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
434 		ndev->res[res_type].resource_count = count;
435 		return 0;
436 	}
437 
438 	return -1;
439 }
440 
441 static void done_resource_elements(struct flow_nic_dev *ndev, enum res_type_e res_type)
442 {
443 	assert(ndev);
444 
445 	if (ndev->res[res_type].alloc_bm)
446 		free(ndev->res[res_type].alloc_bm);
447 }
448 
449 static void list_insert_flow_nic(struct flow_nic_dev *ndev)
450 {
451 	pthread_mutex_lock(&base_mtx);
452 	ndev->next = dev_base;
453 	dev_base = ndev;
454 	pthread_mutex_unlock(&base_mtx);
455 }
456 
457 static int list_remove_flow_nic(struct flow_nic_dev *ndev)
458 {
459 	pthread_mutex_lock(&base_mtx);
460 	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
461 
462 	while (nic_dev) {
463 		if (nic_dev == ndev) {
464 			if (prev)
465 				prev->next = nic_dev->next;
466 
467 			else
468 				dev_base = nic_dev->next;
469 
470 			pthread_mutex_unlock(&base_mtx);
471 			return 0;
472 		}
473 
474 		prev = nic_dev;
475 		nic_dev = nic_dev->next;
476 	}
477 
478 	pthread_mutex_unlock(&base_mtx);
479 	return -1;
480 }
481 
482 /*
483  * adapter_no       physical adapter no
484  * port_no          local port no
485  * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
486  */
487 static struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no, uint32_t port_id,
488 	int alloc_rx_queues, struct flow_queue_id_s queue_ids[],
489 	int *rss_target_id, enum flow_eth_dev_profile flow_profile,
490 	uint32_t exception_path)
491 {
492 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
493 
494 	if (profile_inline_ops == NULL)
495 		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
496 
497 	int i;
498 	struct flow_eth_dev *eth_dev = NULL;
499 
500 	NT_LOG(DBG, FILTER,
501 		"Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i",
502 		adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
503 
504 	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
505 		assert(0);
506 		NT_LOG(ERR, FILTER,
507 			"ERROR: Internal array for multiple queues too small for API");
508 	}
509 
510 	pthread_mutex_lock(&base_mtx);
511 	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
512 
513 	if (!ndev) {
514 		/* Error - no flow api found on specified adapter */
515 		NT_LOG(ERR, FILTER, "ERROR: no flow interface registered for adapter %d",
516 			adapter_no);
517 		pthread_mutex_unlock(&base_mtx);
518 		return NULL;
519 	}
520 
521 	if (ndev->ports < ((uint16_t)port_no + 1)) {
522 		NT_LOG(ERR, FILTER, "ERROR: port exceeds supported port range for adapter");
523 		pthread_mutex_unlock(&base_mtx);
524 		return NULL;
525 	}
526 
527 	if ((alloc_rx_queues - 1) > FLOW_MAX_QUEUES) {	/* 0th is exception so +1 */
528 		NT_LOG(ERR, FILTER,
529 			"ERROR: Exceeds supported number of rx queues per eth device");
530 		pthread_mutex_unlock(&base_mtx);
531 		return NULL;
532 	}
533 
534 	/* don't accept multiple eth_dev's on same NIC and same port */
535 	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
536 
537 	if (eth_dev) {
538 		NT_LOG(DBG, FILTER, "Re-opening existing NIC port device: NIC DEV: %i Port %i",
539 			adapter_no, port_no);
540 		pthread_mutex_unlock(&base_mtx);
541 		flow_delete_eth_dev(eth_dev);
542 		eth_dev = NULL;
543 	}
544 
545 	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
546 
547 	if (!eth_dev) {
548 		NT_LOG(ERR, FILTER, "ERROR: calloc failed");
549 		goto err_exit1;
550 	}
551 
552 	pthread_mutex_lock(&ndev->mtx);
553 
554 	eth_dev->ndev = ndev;
555 	eth_dev->port = port_no;
556 	eth_dev->port_id = port_id;
557 
558 	/* First time then NIC is initialized */
559 	if (!ndev->flow_mgnt_prepared) {
560 		ndev->flow_profile = flow_profile;
561 
562 		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
563 		if (profile_inline_ops != NULL &&
564 			profile_inline_ops->initialize_flow_management_of_ndev_profile_inline(ndev))
565 			goto err_exit0;
566 
567 	} else {
568 		/* check if same flow type is requested, otherwise fail */
569 		if (ndev->flow_profile != flow_profile) {
570 			NT_LOG(ERR, FILTER,
571 				"ERROR: Different flow types requested on same NIC device. Not supported.");
572 			goto err_exit0;
573 		}
574 	}
575 
576 	/* Allocate the requested queues in HW for this dev */
577 
578 	for (i = 0; i < alloc_rx_queues; i++) {
579 		eth_dev->rx_queue[i] = queue_ids[i];
580 
581 		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE && exception_path)) {
582 			/*
583 			 * Init QSL UNM - unmatched - redirects otherwise discarded
584 			 * packets in QSL
585 			 */
586 			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
587 				eth_dev->rx_queue[0].hw_id) < 0)
588 				goto err_exit0;
589 
590 			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 1) < 0)
591 				goto err_exit0;
592 
593 			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) < 0)
594 				goto err_exit0;
595 		}
596 
597 		eth_dev->num_queues++;
598 	}
599 
600 	eth_dev->rss_target_id = -1;
601 
602 	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
603 		for (i = 0; i < eth_dev->num_queues; i++) {
604 			uint32_t qen_value = 0;
605 			uint32_t queue_id = (uint32_t)eth_dev->rx_queue[i].hw_id;
606 
607 			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN, queue_id / 4, &qen_value);
608 			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN, queue_id / 4,
609 				qen_value | (1 << (queue_id % 4)));
610 			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
611 		}
612 	}
613 
614 	*rss_target_id = eth_dev->rss_target_id;
615 
616 	nic_insert_eth_port_dev(ndev, eth_dev);
617 
618 	pthread_mutex_unlock(&ndev->mtx);
619 	pthread_mutex_unlock(&base_mtx);
620 	return eth_dev;
621 
622 err_exit0:
623 	pthread_mutex_unlock(&ndev->mtx);
624 	pthread_mutex_unlock(&base_mtx);
625 
626 err_exit1:
627 	if (eth_dev)
628 		free(eth_dev);
629 
630 #ifdef FLOW_DEBUG
631 	ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
632 #endif
633 
634 	NT_LOG(DBG, FILTER, "ERR in %s", __func__);
635 	return NULL;	/* Error exit */
636 }
637 
638 struct flow_nic_dev *flow_api_create(uint8_t adapter_no, const struct flow_api_backend_ops *be_if,
639 	void *be_dev)
640 {
641 	(void)adapter_no;
642 
643 	if (!be_if || be_if->version != 1) {
644 		NT_LOG(DBG, FILTER, "ERR: %s", __func__);
645 		return NULL;
646 	}
647 
648 	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
649 
650 	if (!ndev) {
651 		NT_LOG(ERR, FILTER, "ERROR: calloc failed");
652 		return NULL;
653 	}
654 
655 	/*
656 	 * To dump module initialization writes use
657 	 * FLOW_BACKEND_DEBUG_MODE_WRITE
658 	 * then remember to set it ...NONE afterwards again
659 	 */
660 	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
661 
662 	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
663 		goto err_exit;
664 
665 	ndev->adapter_no = adapter_no;
666 
667 	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ? 256 : ndev->be.num_rx_ports);
668 
669 	/*
670 	 * Free resources in NIC must be managed by this module
671 	 * Get resource sizes and create resource manager elements
672 	 */
673 	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
674 		goto err_exit;
675 
676 	if (init_resource_elements(ndev, RES_CAT_CFN, ndev->be.cat.nb_cat_funcs))
677 		goto err_exit;
678 
679 	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
680 		goto err_exit;
681 
682 	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
683 		goto err_exit;
684 
685 	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
686 		goto err_exit;
687 
688 	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE, ndev->be.cat.nb_flow_types))
689 		goto err_exit;
690 
691 	if (init_resource_elements(ndev, RES_KM_CATEGORY, ndev->be.km.nb_categories))
692 		goto err_exit;
693 
694 	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
695 		goto err_exit;
696 
697 	if (init_resource_elements(ndev, RES_PDB_RCP, ndev->be.pdb.nb_pdb_rcp_categories))
698 		goto err_exit;
699 
700 	if (init_resource_elements(ndev, RES_QSL_RCP, ndev->be.qsl.nb_rcp_categories))
701 		goto err_exit;
702 
703 	if (init_resource_elements(ndev, RES_QSL_QST, ndev->be.qsl.nb_qst_entries))
704 		goto err_exit;
705 
706 	if (init_resource_elements(ndev, RES_SLC_LR_RCP, ndev->be.max_categories))
707 		goto err_exit;
708 
709 	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE, ndev->be.cat.nb_flow_types))
710 		goto err_exit;
711 
712 	if (init_resource_elements(ndev, RES_FLM_RCP, ndev->be.flm.nb_categories))
713 		goto err_exit;
714 
715 	if (init_resource_elements(ndev, RES_TPE_RCP, ndev->be.tpe.nb_rcp_categories))
716 		goto err_exit;
717 
718 	if (init_resource_elements(ndev, RES_TPE_EXT, ndev->be.tpe.nb_rpl_ext_categories))
719 		goto err_exit;
720 
721 	if (init_resource_elements(ndev, RES_TPE_RPL, ndev->be.tpe.nb_rpl_depth))
722 		goto err_exit;
723 
724 	if (init_resource_elements(ndev, RES_SCRUB_RCP, ndev->be.flm.nb_scrub_profiles))
725 		goto err_exit;
726 
727 	/* may need IPF, COR */
728 
729 	/* check all defined has been initialized */
730 	for (int i = 0; i < RES_COUNT; i++)
731 		assert(ndev->res[i].alloc_bm);
732 
733 	pthread_mutex_init(&ndev->mtx, NULL);
734 	list_insert_flow_nic(ndev);
735 
736 	return ndev;
737 
738 err_exit:
739 
740 	if (ndev)
741 		flow_api_done(ndev);
742 
743 	NT_LOG(DBG, FILTER, "ERR: %s", __func__);
744 	return NULL;
745 }
746 
747 int flow_api_done(struct flow_nic_dev *ndev)
748 {
749 	NT_LOG(DBG, FILTER, "FLOW API DONE");
750 
751 	if (ndev) {
752 		flow_ndev_reset(ndev);
753 
754 		/* delete resource management allocations for this ndev */
755 		for (int i = 0; i < RES_COUNT; i++)
756 			done_resource_elements(ndev, i);
757 
758 		flow_api_backend_done(&ndev->be);
759 		list_remove_flow_nic(ndev);
760 		free(ndev);
761 	}
762 
763 	return 0;
764 }
765 
766 void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
767 {
768 	if (!ndev) {
769 		NT_LOG(DBG, FILTER, "ERR: %s", __func__);
770 		return NULL;
771 	}
772 
773 	return ndev->be.be_dev;
774 }
775 
776 static const struct flow_filter_ops ops = {
777 	.flow_filter_init = flow_filter_init,
778 	.flow_filter_done = flow_filter_done,
779 	/*
780 	 * Device Management API
781 	 */
782 	.flow_get_eth_dev = flow_get_eth_dev,
783 	/*
784 	 * NT Flow API
785 	 */
786 	.flow_create = flow_create,
787 	.flow_destroy = flow_destroy,
788 };
789 
790 void init_flow_filter(void)
791 {
792 	register_flow_filter_ops(&ops);
793 }
794