xref: /dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp.c (revision f8dbaebbf1c9efcbb2e2354b341ed62175466a57)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <rte_log.h>
7 #include <rte_malloc.h>
8 #include <rte_flow.h>
9 #include <rte_flow_driver.h>
10 #include <rte_tailq.h>
11 #include <rte_spinlock.h>
12 
13 #include "bnxt.h"
14 #include "bnxt_ulp.h"
15 #include "bnxt_tf_common.h"
16 #include "tf_core.h"
17 #include "tf_ext_flow_handle.h"
18 
19 #include "ulp_template_db_enum.h"
20 #include "ulp_template_struct.h"
21 #include "ulp_mark_mgr.h"
22 #include "ulp_fc_mgr.h"
23 #include "ulp_flow_db.h"
24 #include "ulp_mapper.h"
25 #include "ulp_port_db.h"
26 #include "ulp_tun.h"
27 #include "ulp_ha_mgr.h"
28 #include "bnxt_tf_pmd_shim.h"
29 
30 /* Linked list of all TF sessions. */
31 STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
32 			STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list);
33 
34 /* Mutex to synchronize bnxt_ulp_session_list operations. */
35 static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
36 
37 /* Spin lock to protect context global list */
38 uint32_t bnxt_ulp_ctxt_lock_created;
39 rte_spinlock_t bnxt_ulp_ctxt_lock;
40 TAILQ_HEAD(cntx_list_entry_list, ulp_context_list_entry);
41 static struct cntx_list_entry_list ulp_cntx_list =
42 	TAILQ_HEAD_INITIALIZER(ulp_cntx_list);
43 
44 /* Static function declarations */
45 static int32_t bnxt_ulp_cntxt_list_init(void);
46 static int32_t bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx);
47 static void bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx);
48 
49 /*
50  * Allow the deletion of context only for the bnxt device that
51  * created the session.
52  */
53 bool
54 ulp_ctx_deinit_allowed(struct bnxt_ulp_context *ulp_ctx)
55 {
56 	if (!ulp_ctx || !ulp_ctx->cfg_data)
57 		return false;
58 
59 	if (!ulp_ctx->cfg_data->ref_cnt) {
60 		BNXT_TF_DBG(DEBUG, "ulp ctx shall initiate deinit\n");
61 		return true;
62 	}
63 
64 	return false;
65 }
66 
67 static int32_t
68 bnxt_ulp_devid_get(struct bnxt *bp,
69 		   enum bnxt_ulp_device_id  *ulp_dev_id)
70 {
71 	if (BNXT_CHIP_P5(bp)) {
72 		*ulp_dev_id = BNXT_ULP_DEVICE_ID_THOR;
73 		return 0;
74 	}
75 
76 	if (BNXT_STINGRAY(bp))
77 		*ulp_dev_id = BNXT_ULP_DEVICE_ID_STINGRAY;
78 	else
79 		/* Assuming Whitney */
80 		*ulp_dev_id = BNXT_ULP_DEVICE_ID_WH_PLUS;
81 
82 	return 0;
83 }
84 
85 struct bnxt_ulp_app_capabilities_info *
86 bnxt_ulp_app_cap_list_get(uint32_t *num_entries)
87 {
88 	if (!num_entries)
89 		return NULL;
90 	*num_entries = BNXT_ULP_APP_CAP_TBL_MAX_SZ;
91 	return ulp_app_cap_info_list;
92 }
93 
94 static struct bnxt_ulp_resource_resv_info *
95 bnxt_ulp_app_resource_resv_list_get(uint32_t *num_entries)
96 {
97 	if (num_entries == NULL)
98 		return NULL;
99 	*num_entries = BNXT_ULP_APP_RESOURCE_RESV_LIST_MAX_SZ;
100 	return ulp_app_resource_resv_list;
101 }
102 
103 struct bnxt_ulp_resource_resv_info *
104 bnxt_ulp_resource_resv_list_get(uint32_t *num_entries)
105 {
106 	if (!num_entries)
107 		return NULL;
108 	*num_entries = BNXT_ULP_RESOURCE_RESV_LIST_MAX_SZ;
109 	return ulp_resource_resv_list;
110 }
111 
112 struct bnxt_ulp_glb_resource_info *
113 bnxt_ulp_app_glb_resource_info_list_get(uint32_t *num_entries)
114 {
115 	if (!num_entries)
116 		return NULL;
117 	*num_entries = BNXT_ULP_APP_GLB_RESOURCE_TBL_MAX_SZ;
118 	return ulp_app_glb_resource_tbl;
119 }
120 
121 static int32_t
122 bnxt_ulp_named_resources_calc(struct bnxt_ulp_context *ulp_ctx,
123 			      struct bnxt_ulp_glb_resource_info *info,
124 			      uint32_t num,
125 			      struct tf_session_resources *res)
126 {
127 	uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST, res_type, i;
128 	enum tf_dir dir;
129 	uint8_t app_id;
130 	int32_t rc = 0;
131 
132 	if (ulp_ctx == NULL || info == NULL || res == NULL || num == 0) {
133 		BNXT_TF_DBG(ERR, "Invalid parms to named resources calc.\n");
134 		return -EINVAL;
135 	}
136 
137 	rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id);
138 	if (rc) {
139 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
140 		return -EINVAL;
141 	}
142 
143 	rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
144 	if (rc) {
145 		BNXT_TF_DBG(ERR, "Unable to get the dev id from ulp.\n");
146 		return -EINVAL;
147 	}
148 
149 	for (i = 0; i < num; i++) {
150 		if (dev_id != info[i].device_id || app_id != info[i].app_id)
151 			continue;
152 		dir = info[i].direction;
153 		res_type = info[i].resource_type;
154 
155 		switch (info[i].resource_func) {
156 		case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER:
157 			res->ident_cnt[dir].cnt[res_type]++;
158 			break;
159 		case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
160 			res->tbl_cnt[dir].cnt[res_type]++;
161 			break;
162 		case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
163 			res->tcam_cnt[dir].cnt[res_type]++;
164 			break;
165 		case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
166 			res->em_cnt[dir].cnt[res_type]++;
167 			break;
168 		default:
169 			BNXT_TF_DBG(ERR, "Unknown resource func (0x%x)\n,",
170 				    info[i].resource_func);
171 			continue;
172 		}
173 	}
174 
175 	return 0;
176 }
177 
178 static int32_t
179 bnxt_ulp_unnamed_resources_calc(struct bnxt_ulp_context *ulp_ctx,
180 				struct bnxt_ulp_resource_resv_info *info,
181 				uint32_t num,
182 				struct tf_session_resources *res)
183 {
184 	uint32_t dev_id, res_type, i;
185 	enum tf_dir dir;
186 	uint8_t app_id;
187 	int32_t rc = 0;
188 
189 	if (ulp_ctx == NULL || res == NULL || info == NULL || num == 0) {
190 		BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
191 		return -EINVAL;
192 	}
193 
194 	rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id);
195 	if (rc) {
196 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
197 		return -EINVAL;
198 	}
199 
200 	rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
201 	if (rc) {
202 		BNXT_TF_DBG(ERR, "Unable to get the dev id from ulp.\n");
203 		return -EINVAL;
204 	}
205 
206 	for (i = 0; i < num; i++) {
207 		if (app_id != info[i].app_id || dev_id != info[i].device_id)
208 			continue;
209 		dir = info[i].direction;
210 		res_type = info[i].resource_type;
211 
212 		switch (info[i].resource_func) {
213 		case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER:
214 			res->ident_cnt[dir].cnt[res_type] = info[i].count;
215 			break;
216 		case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
217 			res->tbl_cnt[dir].cnt[res_type] = info[i].count;
218 			break;
219 		case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
220 			res->tcam_cnt[dir].cnt[res_type] = info[i].count;
221 			break;
222 		case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
223 			res->em_cnt[dir].cnt[res_type] = info[i].count;
224 			break;
225 		default:
226 			break;
227 		}
228 	}
229 	return 0;
230 }
231 
232 static int32_t
233 bnxt_ulp_tf_resources_get(struct bnxt_ulp_context *ulp_ctx,
234 			  struct tf_session_resources *res)
235 {
236 	struct bnxt_ulp_resource_resv_info *unnamed = NULL;
237 	uint32_t unum;
238 	int32_t rc = 0;
239 
240 	if (ulp_ctx == NULL || res == NULL) {
241 		BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
242 		return -EINVAL;
243 	}
244 
245 	unnamed = bnxt_ulp_resource_resv_list_get(&unum);
246 	if (unnamed == NULL) {
247 		BNXT_TF_DBG(ERR, "Unable to get resource resv list.\n");
248 		return -EINVAL;
249 	}
250 
251 	rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, res);
252 	if (rc)
253 		BNXT_TF_DBG(ERR, "Unable to calc resources for session.\n");
254 
255 	return rc;
256 }
257 
258 static int32_t
259 bnxt_ulp_tf_shared_session_resources_get(struct bnxt_ulp_context *ulp_ctx,
260 					 struct tf_session_resources *res)
261 {
262 	struct bnxt_ulp_resource_resv_info *unnamed;
263 	struct bnxt_ulp_glb_resource_info *named;
264 	uint32_t unum, nnum;
265 	int32_t rc;
266 
267 	if (ulp_ctx == NULL || res == NULL) {
268 		BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
269 		return -EINVAL;
270 	}
271 
272 	/* Make sure the resources are zero before accumulating. */
273 	memset(res, 0, sizeof(struct tf_session_resources));
274 
275 	/*
276 	 * Shared resources are comprised of both named and unnamed resources.
277 	 * First get the unnamed counts, and then add the named to the result.
278 	 */
279 	/* Get the baseline counts */
280 	unnamed = bnxt_ulp_app_resource_resv_list_get(&unum);
281 	if (unnamed == NULL) {
282 		BNXT_TF_DBG(ERR, "Unable to get shared resource resv list.\n");
283 		return -EINVAL;
284 	}
285 	rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, res);
286 	if (rc) {
287 		BNXT_TF_DBG(ERR, "Unable to calc resources for shared session.\n");
288 		return -EINVAL;
289 	}
290 
291 	/* Get the named list and add the totals */
292 	named = bnxt_ulp_app_glb_resource_info_list_get(&nnum);
293 	if (named == NULL) {
294 		BNXT_TF_DBG(ERR, "Unable to get app global resource list\n");
295 		return -EINVAL;
296 	}
297 	rc = bnxt_ulp_named_resources_calc(ulp_ctx, named, nnum, res);
298 	if (rc)
299 		BNXT_TF_DBG(ERR, "Unable to calc named resources\n");
300 
301 	return rc;
302 }
303 
304 int32_t
305 bnxt_ulp_cntxt_app_caps_init(struct bnxt *bp,
306 			     uint8_t app_id, uint32_t dev_id)
307 {
308 	struct bnxt_ulp_app_capabilities_info *info;
309 	uint32_t num = 0;
310 	uint16_t i;
311 	bool found = false;
312 	struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx;
313 
314 	if (ULP_APP_DEV_UNSUPPORTED_ENABLED(ulp_ctx->cfg_data->ulp_flags)) {
315 		BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
316 			    app_id, dev_id);
317 		return -EINVAL;
318 	}
319 
320 	info = bnxt_ulp_app_cap_list_get(&num);
321 	if (!info || !num) {
322 		BNXT_TF_DBG(ERR, "Failed to get app capabilities.\n");
323 		return -EINVAL;
324 	}
325 
326 	for (i = 0; i < num; i++) {
327 		if (info[i].app_id != app_id || info[i].device_id != dev_id)
328 			continue;
329 		found = true;
330 		if (info[i].flags & BNXT_ULP_APP_CAP_SHARED_EN)
331 			ulp_ctx->cfg_data->ulp_flags |=
332 				BNXT_ULP_SHARED_SESSION_ENABLED;
333 		if (info[i].flags & BNXT_ULP_APP_CAP_HOT_UPGRADE_EN)
334 			ulp_ctx->cfg_data->ulp_flags |=
335 				BNXT_ULP_HIGH_AVAIL_ENABLED;
336 		if (info[i].flags & BNXT_ULP_APP_CAP_UNICAST_ONLY)
337 			ulp_ctx->cfg_data->ulp_flags |=
338 				BNXT_ULP_APP_UNICAST_ONLY;
339 		if (info[i].flags & BNXT_ULP_APP_CAP_SOCKET_DIRECT) {
340 			/* Enable socket direction only if MR is enabled in fw*/
341 			if (BNXT_MULTIROOT_EN(bp)) {
342 				ulp_ctx->cfg_data->ulp_flags |=
343 					BNXT_ULP_APP_SOCKET_DIRECT;
344 				BNXT_TF_DBG(DEBUG,
345 					    "Socket Direct feature is enabled");
346 			}
347 		}
348 	}
349 	if (!found) {
350 		BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
351 			    app_id, dev_id);
352 		ulp_ctx->cfg_data->ulp_flags |= BNXT_ULP_APP_DEV_UNSUPPORTED;
353 		return -EINVAL;
354 	}
355 
356 	return 0;
357 }
358 
359 static void
360 ulp_ctx_shared_session_close(struct bnxt *bp,
361 			     struct bnxt_ulp_session_state *session)
362 {
363 	struct tf *tfp;
364 	int32_t rc;
365 
366 	if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
367 		return;
368 
369 	tfp = bnxt_ulp_cntxt_shared_tfp_get(bp->ulp_ctx);
370 	if (!tfp) {
371 		/*
372 		 * Log it under debug since this is likely a case of the
373 		 * shared session not being created.  For example, a failed
374 		 * initialization.
375 		 */
376 		BNXT_TF_DBG(DEBUG, "Failed to get shared tfp on close.\n");
377 		return;
378 	}
379 	rc = tf_close_session(tfp);
380 	if (rc)
381 		BNXT_TF_DBG(ERR, "Failed to close the shared session rc=%d.\n",
382 			    rc);
383 	(void)bnxt_ulp_cntxt_shared_tfp_set(bp->ulp_ctx, NULL);
384 
385 	session->g_shared_tfp.session = NULL;
386 }
387 
388 static int32_t
389 ulp_ctx_shared_session_open(struct bnxt *bp,
390 			    struct bnxt_ulp_session_state *session)
391 {
392 	struct rte_eth_dev *ethdev = bp->eth_dev;
393 	struct tf_session_resources *resources;
394 	struct tf_open_session_parms parms;
395 	size_t copy_nbytes;
396 	uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
397 	int32_t	rc = 0;
398 	uint8_t app_id;
399 
400 	/* only perform this if shared session is enabled. */
401 	if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
402 		return 0;
403 
404 	memset(&parms, 0, sizeof(parms));
405 
406 	rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
407 					  parms.ctrl_chan_name);
408 	if (rc) {
409 		BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
410 			    ethdev->data->port_id, rc);
411 		return rc;
412 	}
413 	resources = &parms.resources;
414 
415 	/*
416 	 * Need to account for size of ctrl_chan_name and 1 extra for Null
417 	 * terminator
418 	 */
419 	copy_nbytes = sizeof(parms.ctrl_chan_name) -
420 		strlen(parms.ctrl_chan_name) - 1;
421 
422 	/*
423 	 * Build the ctrl_chan_name with shared token.
424 	 * When HA is enabled, the WC TCAM needs extra management by the core,
425 	 * so add the wc_tcam string to the control channel.
426 	 */
427 	if (bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx))
428 		strncat(parms.ctrl_chan_name, "-tf_shared-wc_tcam",
429 			copy_nbytes);
430 	else
431 		strncat(parms.ctrl_chan_name, "-tf_shared", copy_nbytes);
432 
433 	rc = bnxt_ulp_tf_shared_session_resources_get(bp->ulp_ctx, resources);
434 	if (rc)
435 		return rc;
436 
437 	rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
438 	if (rc) {
439 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
440 		return -EINVAL;
441 	}
442 
443 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
444 	if (rc) {
445 		BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
446 		return rc;
447 	}
448 
449 	switch (ulp_dev_id) {
450 	case BNXT_ULP_DEVICE_ID_WH_PLUS:
451 		parms.device_type = TF_DEVICE_TYPE_WH;
452 		break;
453 	case BNXT_ULP_DEVICE_ID_STINGRAY:
454 		parms.device_type = TF_DEVICE_TYPE_SR;
455 		break;
456 	case BNXT_ULP_DEVICE_ID_THOR:
457 		parms.device_type = TF_DEVICE_TYPE_THOR;
458 		break;
459 	default:
460 		BNXT_TF_DBG(ERR, "Unable to determine dev for opening session.\n");
461 		return rc;
462 	}
463 
464 	parms.shadow_copy = true;
465 	parms.bp = bp;
466 	if (app_id == 0)
467 		parms.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;
468 	else
469 		parms.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;
470 
471 	/*
472 	 * Open the session here, but the collect the resources during the
473 	 * mapper initialization.
474 	 */
475 	rc = tf_open_session(&bp->tfp_shared, &parms);
476 	if (rc)
477 		return rc;
478 
479 	if (parms.shared_session_creator)
480 		BNXT_TF_DBG(DEBUG, "Shared session creator.\n");
481 	else
482 		BNXT_TF_DBG(DEBUG, "Shared session attached.\n");
483 
484 	/* Save the shared session in global data */
485 	if (!session->g_shared_tfp.session)
486 		session->g_shared_tfp.session = bp->tfp_shared.session;
487 
488 	rc = bnxt_ulp_cntxt_shared_tfp_set(bp->ulp_ctx, &bp->tfp_shared);
489 	if (rc)
490 		BNXT_TF_DBG(ERR, "Failed to add shared tfp to ulp (%d)\n", rc);
491 
492 	return rc;
493 }
494 
495 static int32_t
496 ulp_ctx_shared_session_attach(struct bnxt *bp,
497 			      struct bnxt_ulp_session_state *session)
498 {
499 	int32_t rc = 0;
500 
501 	/* Simply return success if shared session not enabled */
502 	if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) {
503 		bp->tfp_shared.session = session->g_shared_tfp.session;
504 		rc = ulp_ctx_shared_session_open(bp, session);
505 	}
506 
507 	return rc;
508 }
509 
510 static void
511 ulp_ctx_shared_session_detach(struct bnxt *bp)
512 {
513 	if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) {
514 		if (bp->tfp_shared.session) {
515 			tf_close_session(&bp->tfp_shared);
516 			bp->tfp_shared.session = NULL;
517 		}
518 	}
519 }
520 
521 /*
522  * Initialize an ULP session.
523  * An ULP session will contain all the resources needed to support rte flow
524  * offloads. A session is initialized as part of rte_eth_device start.
525  * A single vswitch instance can have multiple uplinks which means
526  * rte_eth_device start will be called for each of these devices.
527  * ULP session manager will make sure that a single ULP session is only
528  * initialized once. Apart from this, it also initializes MARK database,
529  * EEM table & flow database. ULP session manager also manages a list of
530  * all opened ULP sessions.
531  */
532 static int32_t
533 ulp_ctx_session_open(struct bnxt *bp,
534 		     struct bnxt_ulp_session_state *session)
535 {
536 	struct rte_eth_dev		*ethdev = bp->eth_dev;
537 	int32_t				rc = 0;
538 	struct tf_open_session_parms	params;
539 	struct tf_session_resources	*resources;
540 	uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
541 	uint8_t app_id;
542 
543 	memset(&params, 0, sizeof(params));
544 
545 	rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
546 					  params.ctrl_chan_name);
547 	if (rc) {
548 		BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
549 			    ethdev->data->port_id, rc);
550 		return rc;
551 	}
552 
553 	params.shadow_copy = true;
554 
555 	rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
556 	if (rc) {
557 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
558 		return -EINVAL;
559 	}
560 
561 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
562 	if (rc) {
563 		BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
564 		return rc;
565 	}
566 
567 	switch (ulp_dev_id) {
568 	case BNXT_ULP_DEVICE_ID_WH_PLUS:
569 		params.device_type = TF_DEVICE_TYPE_WH;
570 		break;
571 	case BNXT_ULP_DEVICE_ID_STINGRAY:
572 		params.device_type = TF_DEVICE_TYPE_SR;
573 		break;
574 	case BNXT_ULP_DEVICE_ID_THOR:
575 		params.device_type = TF_DEVICE_TYPE_THOR;
576 		break;
577 	default:
578 		BNXT_TF_DBG(ERR, "Unable to determine device for opening session.\n");
579 		return rc;
580 	}
581 
582 	resources = &params.resources;
583 	rc = bnxt_ulp_tf_resources_get(bp->ulp_ctx, resources);
584 	if (rc)
585 		return rc;
586 
587 	params.bp = bp;
588 	if (app_id == 0)
589 		params.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;
590 	else
591 		params.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;
592 
593 	rc = tf_open_session(&bp->tfp, &params);
594 	if (rc) {
595 		BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",
596 			    params.ctrl_chan_name, rc);
597 		return -EINVAL;
598 	}
599 	if (!session->session_opened) {
600 		session->session_opened = 1;
601 		session->g_tfp = rte_zmalloc("bnxt_ulp_session_tfp",
602 					     sizeof(struct tf), 0);
603 		session->g_tfp->session = bp->tfp.session;
604 	}
605 	return rc;
606 }
607 
608 /*
609  * Close the ULP session.
610  * It takes the ulp context pointer.
611  */
612 static void
613 ulp_ctx_session_close(struct bnxt *bp,
614 		      struct bnxt_ulp_session_state *session)
615 {
616 	/* close the session in the hardware */
617 	if (session->session_opened)
618 		tf_close_session(&bp->tfp);
619 	session->session_opened = 0;
620 	rte_free(session->g_tfp);
621 	session->g_tfp = NULL;
622 }
623 
624 static void
625 bnxt_init_tbl_scope_parms(struct bnxt *bp,
626 			  struct tf_alloc_tbl_scope_parms *params)
627 {
628 	struct bnxt_ulp_device_params	*dparms;
629 	uint32_t dev_id;
630 	int rc;
631 
632 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
633 	if (rc)
634 		/* TBD: For now, just use default. */
635 		dparms = 0;
636 	else
637 		dparms = bnxt_ulp_device_params_get(dev_id);
638 
639 	/*
640 	 * Set the flush timer for EEM entries. The value is in 100ms intervals,
641 	 * so 100 is 10s.
642 	 */
643 	params->hw_flow_cache_flush_timer = 100;
644 
645 	if (!dparms) {
646 		params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
647 		params->rx_max_action_entry_sz_in_bits =
648 			BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
649 		params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
650 		params->rx_num_flows_in_k = BNXT_ULP_RX_NUM_FLOWS;
651 
652 		params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
653 		params->tx_max_action_entry_sz_in_bits =
654 			BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
655 		params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
656 		params->tx_num_flows_in_k = BNXT_ULP_TX_NUM_FLOWS;
657 	} else {
658 		params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
659 		params->rx_max_action_entry_sz_in_bits =
660 			BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
661 		params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
662 		params->rx_num_flows_in_k =
663 			dparms->ext_flow_db_num_entries / 1024;
664 
665 		params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
666 		params->tx_max_action_entry_sz_in_bits =
667 			BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
668 		params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
669 		params->tx_num_flows_in_k =
670 			dparms->ext_flow_db_num_entries / 1024;
671 	}
672 	BNXT_TF_DBG(INFO, "Table Scope initialized with %uK flows.\n",
673 		    params->rx_num_flows_in_k);
674 }
675 
676 /* Initialize Extended Exact Match host memory. */
677 static int32_t
678 ulp_eem_tbl_scope_init(struct bnxt *bp)
679 {
680 	struct tf_alloc_tbl_scope_parms params = {0};
681 	struct bnxt_ulp_device_params *dparms;
682 	enum bnxt_ulp_flow_mem_type mtype;
683 	uint32_t dev_id;
684 	int rc;
685 
686 	/* Get the dev specific number of flows that needed to be supported. */
687 	if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
688 		BNXT_TF_DBG(ERR, "Invalid device id\n");
689 		return -EINVAL;
690 	}
691 
692 	dparms = bnxt_ulp_device_params_get(dev_id);
693 	if (!dparms) {
694 		BNXT_TF_DBG(ERR, "could not fetch the device params\n");
695 		return -ENODEV;
696 	}
697 
698 	if (bnxt_ulp_cntxt_mem_type_get(bp->ulp_ctx, &mtype))
699 		return -EINVAL;
700 	if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
701 		BNXT_TF_DBG(INFO, "Table Scope alloc is not required\n");
702 		return 0;
703 	}
704 
705 	bnxt_init_tbl_scope_parms(bp, &params);
706 	rc = tf_alloc_tbl_scope(&bp->tfp, &params);
707 	if (rc) {
708 		BNXT_TF_DBG(ERR, "Unable to allocate eem table scope rc = %d\n",
709 			    rc);
710 		return rc;
711 	}
712 	rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id);
713 	if (rc) {
714 		BNXT_TF_DBG(ERR, "Unable to set table scope id\n");
715 		return rc;
716 	}
717 
718 	return 0;
719 }
720 
721 /* Free Extended Exact Match host memory */
722 static int32_t
723 ulp_eem_tbl_scope_deinit(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
724 {
725 	struct tf_free_tbl_scope_parms	params = {0};
726 	struct tf			*tfp;
727 	int32_t				rc = 0;
728 	struct bnxt_ulp_device_params *dparms;
729 	enum bnxt_ulp_flow_mem_type mtype;
730 	uint32_t dev_id;
731 
732 	if (!ulp_ctx || !ulp_ctx->cfg_data)
733 		return -EINVAL;
734 
735 	tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx, BNXT_ULP_SHARED_SESSION_NO);
736 	if (!tfp) {
737 		BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
738 		return -EINVAL;
739 	}
740 
741 	/* Get the dev specific number of flows that needed to be supported. */
742 	if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
743 		BNXT_TF_DBG(ERR, "Invalid device id\n");
744 		return -EINVAL;
745 	}
746 
747 	dparms = bnxt_ulp_device_params_get(dev_id);
748 	if (!dparms) {
749 		BNXT_TF_DBG(ERR, "could not fetch the device params\n");
750 		return -ENODEV;
751 	}
752 
753 	if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
754 		return -EINVAL;
755 	if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
756 		BNXT_TF_DBG(INFO, "Table Scope free is not required\n");
757 		return 0;
758 	}
759 
760 	rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp_ctx, &params.tbl_scope_id);
761 	if (rc) {
762 		BNXT_TF_DBG(ERR, "Failed to get the table scope id\n");
763 		return -EINVAL;
764 	}
765 
766 	rc = tf_free_tbl_scope(tfp, &params);
767 	if (rc) {
768 		BNXT_TF_DBG(ERR, "Unable to free table scope\n");
769 		return -EINVAL;
770 	}
771 	return rc;
772 }
773 
774 /* The function to free and deinit the ulp context data. */
775 static int32_t
776 ulp_ctx_deinit(struct bnxt *bp,
777 	       struct bnxt_ulp_session_state *session)
778 {
779 	/* close the tf session */
780 	ulp_ctx_session_close(bp, session);
781 
782 	/* The shared session must be closed last. */
783 	ulp_ctx_shared_session_close(bp, session);
784 
785 	/* Free the contents */
786 	if (session->cfg_data) {
787 		rte_free(session->cfg_data);
788 		bp->ulp_ctx->cfg_data = NULL;
789 		session->cfg_data = NULL;
790 	}
791 	return 0;
792 }
793 
794 /* The function to allocate and initialize the ulp context data. */
795 static int32_t
796 ulp_ctx_init(struct bnxt *bp,
797 	     struct bnxt_ulp_session_state *session)
798 {
799 	struct bnxt_ulp_data	*ulp_data;
800 	int32_t			rc = 0;
801 	enum bnxt_ulp_device_id devid;
802 
803 	/* Initialize the context entries list */
804 	bnxt_ulp_cntxt_list_init();
805 
806 	/* Add the context to the context entries list */
807 	rc = bnxt_ulp_cntxt_list_add(bp->ulp_ctx);
808 	if (rc) {
809 		BNXT_TF_DBG(ERR, "Failed to add the context list entry\n");
810 		return -ENOMEM;
811 	}
812 
813 	/* Allocate memory to hold ulp context data. */
814 	ulp_data = rte_zmalloc("bnxt_ulp_data",
815 			       sizeof(struct bnxt_ulp_data), 0);
816 	if (!ulp_data) {
817 		BNXT_TF_DBG(ERR, "Failed to allocate memory for ulp data\n");
818 		return -ENOMEM;
819 	}
820 
821 	/* Increment the ulp context data reference count usage. */
822 	bp->ulp_ctx->cfg_data = ulp_data;
823 	session->cfg_data = ulp_data;
824 	ulp_data->ref_cnt++;
825 	ulp_data->ulp_flags |= BNXT_ULP_VF_REP_ENABLED;
826 
827 	rc = bnxt_ulp_devid_get(bp, &devid);
828 	if (rc) {
829 		BNXT_TF_DBG(ERR, "Unable to determine device for ULP init.\n");
830 		goto error_deinit;
831 	}
832 
833 	rc = bnxt_ulp_cntxt_dev_id_set(bp->ulp_ctx, devid);
834 	if (rc) {
835 		BNXT_TF_DBG(ERR, "Unable to set device for ULP init.\n");
836 		goto error_deinit;
837 	}
838 
839 	rc = bnxt_ulp_cntxt_app_id_set(bp->ulp_ctx, bp->app_id);
840 	if (rc) {
841 		BNXT_TF_DBG(ERR, "Unable to set app_id for ULP init.\n");
842 		goto error_deinit;
843 	}
844 	BNXT_TF_DBG(DEBUG, "Ulp initialized with app id %d\n", bp->app_id);
845 
846 	rc = bnxt_ulp_cntxt_app_caps_init(bp, bp->app_id, devid);
847 	if (rc) {
848 		BNXT_TF_DBG(ERR, "Unable to set caps for app(%x)/dev(%x)\n",
849 			    bp->app_id, devid);
850 		goto error_deinit;
851 	}
852 
853 	/*
854 	 * Shared session must be created before first regular session but after
855 	 * the ulp_ctx is valid.
856 	 */
857 	rc = ulp_ctx_shared_session_open(bp, session);
858 	if (rc) {
859 		BNXT_TF_DBG(ERR, "Unable to open shared session (%d)\n", rc);
860 		goto error_deinit;
861 	}
862 
863 	/* Open the ulp session. */
864 	rc = ulp_ctx_session_open(bp, session);
865 	if (rc)
866 		goto error_deinit;
867 
868 	bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
869 	return rc;
870 
871 error_deinit:
872 	session->session_opened = 1;
873 	(void)ulp_ctx_deinit(bp, session);
874 	return rc;
875 }
876 
877 /* The function to initialize ulp dparms with devargs */
878 static int32_t
879 ulp_dparms_init(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
880 {
881 	struct bnxt_ulp_device_params *dparms;
882 	uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST;
883 
884 	if (!bp->max_num_kflows) {
885 		/* Defaults to Internal */
886 		bnxt_ulp_cntxt_mem_type_set(ulp_ctx,
887 					    BNXT_ULP_FLOW_MEM_TYPE_INT);
888 		return 0;
889 	}
890 
891 	/* The max_num_kflows were set, so move to external */
892 	if (bnxt_ulp_cntxt_mem_type_set(ulp_ctx, BNXT_ULP_FLOW_MEM_TYPE_EXT))
893 		return -EINVAL;
894 
895 	if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) {
896 		BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
897 		return -EINVAL;
898 	}
899 
900 	dparms = bnxt_ulp_device_params_get(dev_id);
901 	if (!dparms) {
902 		BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
903 		return -EINVAL;
904 	}
905 
906 	/* num_flows = max_num_kflows * 1024 */
907 	dparms->ext_flow_db_num_entries = bp->max_num_kflows * 1024;
908 	/* GFID =  2 * num_flows */
909 	dparms->mark_db_gfid_entries = dparms->ext_flow_db_num_entries * 2;
910 	BNXT_TF_DBG(DEBUG, "Set the number of flows = %" PRIu64 "\n",
911 		    dparms->ext_flow_db_num_entries);
912 
913 	return 0;
914 }
915 
916 /* The function to initialize bp flags with truflow features */
917 static int32_t
918 ulp_dparms_dev_port_intf_update(struct bnxt *bp,
919 				struct bnxt_ulp_context *ulp_ctx)
920 {
921 	enum bnxt_ulp_flow_mem_type mtype;
922 
923 	if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
924 		return -EINVAL;
925 	/* Update the bp flag with gfid flag */
926 	if (mtype == BNXT_ULP_FLOW_MEM_TYPE_EXT)
927 		bp->flags |= BNXT_FLAG_GFID_ENABLE;
928 
929 	return 0;
930 }
931 
932 static int32_t
933 ulp_ctx_attach(struct bnxt *bp,
934 	       struct bnxt_ulp_session_state *session)
935 {
936 	int32_t rc = 0;
937 	uint32_t flags, dev_id = BNXT_ULP_DEVICE_ID_LAST;
938 	uint8_t app_id;
939 
940 	/* Increment the ulp context data reference count usage. */
941 	bp->ulp_ctx->cfg_data = session->cfg_data;
942 	bp->ulp_ctx->cfg_data->ref_cnt++;
943 
944 	/* update the session details in bnxt tfp */
945 	bp->tfp.session = session->g_tfp->session;
946 
947 	/* Add the context to the context entries list */
948 	rc = bnxt_ulp_cntxt_list_add(bp->ulp_ctx);
949 	if (rc) {
950 		BNXT_TF_DBG(ERR, "Failed to add the context list entry\n");
951 		return -EINVAL;
952 	}
953 
954 	/*
955 	 * The supported flag will be set during the init. Use it now to
956 	 * know if we should go through the attach.
957 	 */
958 	rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
959 	if (rc) {
960 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
961 		return -EINVAL;
962 	}
963 
964 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
965 	if (rc) {
966 		BNXT_TF_DBG(ERR, "Unable do get the dev_id.\n");
967 		return -EINVAL;
968 	}
969 
970 	flags = bp->ulp_ctx->cfg_data->ulp_flags;
971 	if (ULP_APP_DEV_UNSUPPORTED_ENABLED(flags)) {
972 		BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
973 			    app_id, dev_id);
974 		return -EINVAL;
975 	}
976 
977 	/* Create a TF Client */
978 	rc = ulp_ctx_session_open(bp, session);
979 	if (rc) {
980 		PMD_DRV_LOG(ERR, "Failed to open ctxt session, rc:%d\n", rc);
981 		bp->tfp.session = NULL;
982 		return rc;
983 	}
984 
985 	bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
986 	return rc;
987 }
988 
989 static void
990 ulp_ctx_detach(struct bnxt *bp)
991 {
992 	if (bp->tfp.session) {
993 		tf_close_session(&bp->tfp);
994 		bp->tfp.session = NULL;
995 	}
996 }
997 
998 /*
999  * Initialize the state of an ULP session.
1000  * If the state of an ULP session is not initialized, set it's state to
1001  * initialized. If the state is already initialized, do nothing.
1002  */
1003 static void
1004 ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
1005 {
1006 	pthread_mutex_lock(&session->bnxt_ulp_mutex);
1007 
1008 	if (!session->bnxt_ulp_init) {
1009 		session->bnxt_ulp_init = true;
1010 		*init = false;
1011 	} else {
1012 		*init = true;
1013 	}
1014 
1015 	pthread_mutex_unlock(&session->bnxt_ulp_mutex);
1016 }
1017 
1018 /*
1019  * Check if an ULP session is already allocated for a specific PCI
1020  * domain & bus. If it is already allocated simply return the session
1021  * pointer, otherwise allocate a new session.
1022  */
1023 static struct bnxt_ulp_session_state *
1024 ulp_get_session(struct bnxt *bp, struct rte_pci_addr *pci_addr)
1025 {
1026 	struct bnxt_ulp_session_state *session;
1027 
1028 	/* if multi root capability is enabled, then ignore the pci bus id */
1029 	STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) {
1030 		if (session->pci_info.domain == pci_addr->domain &&
1031 		    (BNXT_MULTIROOT_EN(bp) ||
1032 		    session->pci_info.bus == pci_addr->bus)) {
1033 			return session;
1034 		}
1035 	}
1036 	return NULL;
1037 }
1038 
1039 /*
1040  * Allocate and Initialize an ULP session and set it's state to INITIALIZED.
1041  * If it's already initialized simply return the already existing session.
1042  */
1043 static struct bnxt_ulp_session_state *
1044 ulp_session_init(struct bnxt *bp,
1045 		 bool *init)
1046 {
1047 	struct rte_pci_device		*pci_dev;
1048 	struct rte_pci_addr		*pci_addr;
1049 	struct bnxt_ulp_session_state	*session;
1050 	int rc = 0;
1051 
1052 	if (!bp)
1053 		return NULL;
1054 
1055 	pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
1056 	pci_addr = &pci_dev->addr;
1057 
1058 	pthread_mutex_lock(&bnxt_ulp_global_mutex);
1059 
1060 	session = ulp_get_session(bp, pci_addr);
1061 	if (!session) {
1062 		/* Not Found the session  Allocate a new one */
1063 		session = rte_zmalloc("bnxt_ulp_session",
1064 				      sizeof(struct bnxt_ulp_session_state),
1065 				      0);
1066 		if (!session) {
1067 			BNXT_TF_DBG(ERR,
1068 				    "Allocation failed for bnxt_ulp_session\n");
1069 			pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1070 			return NULL;
1071 
1072 		} else {
1073 			/* Add it to the queue */
1074 			session->pci_info.domain = pci_addr->domain;
1075 			session->pci_info.bus = pci_addr->bus;
1076 			rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL);
1077 			if (rc) {
1078 				BNXT_TF_DBG(ERR, "mutex create failed\n");
1079 				pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1080 				return NULL;
1081 			}
1082 			STAILQ_INSERT_TAIL(&bnxt_ulp_session_list,
1083 					   session, next);
1084 		}
1085 	}
1086 	ulp_context_initialized(session, init);
1087 	pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1088 	return session;
1089 }
1090 
1091 /*
1092  * When a device is closed, remove it's associated session from the global
1093  * session list.
1094  */
1095 static void
1096 ulp_session_deinit(struct bnxt_ulp_session_state *session)
1097 {
1098 	if (!session)
1099 		return;
1100 
1101 	if (!session->cfg_data) {
1102 		pthread_mutex_lock(&bnxt_ulp_global_mutex);
1103 		STAILQ_REMOVE(&bnxt_ulp_session_list, session,
1104 			      bnxt_ulp_session_state, next);
1105 		pthread_mutex_destroy(&session->bnxt_ulp_mutex);
1106 		rte_free(session);
1107 		pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1108 	}
1109 }
1110 
1111 /*
1112  * Internal api to enable NAT feature.
1113  * Set set_flag to 1 to set the value or zero to reset the value.
1114  * returns 0 on success.
1115  */
1116 static int32_t
1117 bnxt_ulp_global_cfg_update(struct bnxt *bp,
1118 			   enum tf_dir dir,
1119 			   enum tf_global_config_type type,
1120 			   uint32_t offset,
1121 			   uint32_t value,
1122 			   uint32_t set_flag)
1123 {
1124 	uint32_t global_cfg = 0;
1125 	int rc;
1126 	struct tf_global_cfg_parms parms = { 0 };
1127 
1128 	/* Initialize the params */
1129 	parms.dir = dir,
1130 	parms.type = type,
1131 	parms.offset = offset,
1132 	parms.config = (uint8_t *)&global_cfg,
1133 	parms.config_sz_in_bytes = sizeof(global_cfg);
1134 
1135 	rc = tf_get_global_cfg(&bp->tfp, &parms);
1136 	if (rc) {
1137 		BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n",
1138 			    type, rc);
1139 		return rc;
1140 	}
1141 
1142 	if (set_flag)
1143 		global_cfg |= value;
1144 	else
1145 		global_cfg &= ~value;
1146 
1147 	/* SET the register RE_CFA_REG_ACT_TECT */
1148 	rc = tf_set_global_cfg(&bp->tfp, &parms);
1149 	if (rc) {
1150 		BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n",
1151 			    type, rc);
1152 		return rc;
1153 	}
1154 	return rc;
1155 }
1156 
1157 /* Internal function to delete all the flows belonging to the given port */
1158 static void
1159 bnxt_ulp_flush_port_flows(struct bnxt *bp)
1160 {
1161 	uint16_t func_id;
1162 
1163 	/* it is assumed that port is either TVF or PF */
1164 	if (ulp_port_db_port_func_id_get(bp->ulp_ctx,
1165 					 bp->eth_dev->data->port_id,
1166 					 &func_id)) {
1167 		BNXT_TF_DBG(ERR, "Invalid argument\n");
1168 		return;
1169 	}
1170 	(void)ulp_flow_db_function_flow_flush(bp->ulp_ctx, func_id);
1171 }
1172 
1173 /* Internal function to delete the VFR default flows */
1174 static void
1175 bnxt_ulp_destroy_vfr_default_rules(struct bnxt *bp, bool global)
1176 {
1177 	struct bnxt_ulp_vfr_rule_info *info;
1178 	uint16_t port_id;
1179 	struct rte_eth_dev *vfr_eth_dev;
1180 	struct bnxt_representor *vfr_bp;
1181 
1182 	if (!BNXT_TRUFLOW_EN(bp) || BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
1183 		return;
1184 
1185 	if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
1186 		return;
1187 
1188 	/* Delete default rules for all ports */
1189 	for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
1190 		info = &bp->ulp_ctx->cfg_data->vfr_rule_info[port_id];
1191 		if (!info->valid)
1192 			continue;
1193 
1194 		if (!global && info->parent_port_id !=
1195 		    bp->eth_dev->data->port_id)
1196 			continue;
1197 
1198 		/* Destroy the flows */
1199 		ulp_default_flow_destroy(bp->eth_dev, info->vfr_flow_id);
1200 		/* Clean up the tx action pointer */
1201 		vfr_eth_dev = &rte_eth_devices[port_id];
1202 		if (vfr_eth_dev) {
1203 			vfr_bp = vfr_eth_dev->data->dev_private;
1204 			vfr_bp->vfr_tx_cfa_action = 0;
1205 		}
1206 		memset(info, 0, sizeof(struct bnxt_ulp_vfr_rule_info));
1207 	}
1208 }
1209 
1210 /*
1211  * When a port is deinit'ed by dpdk. This function is called
1212  * and this function clears the ULP context and rest of the
1213  * infrastructure associated with it.
1214  */
1215 static void
1216 bnxt_ulp_deinit(struct bnxt *bp,
1217 		struct bnxt_ulp_session_state *session)
1218 {
1219 	bool ha_enabled;
1220 
1221 	if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
1222 		return;
1223 
1224 	ha_enabled = bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx);
1225 	if (ha_enabled && session->session_opened) {
1226 		int32_t rc = ulp_ha_mgr_close(bp->ulp_ctx);
1227 		if (rc)
1228 			BNXT_TF_DBG(ERR, "Failed to close HA (%d)\n", rc);
1229 	}
1230 
1231 	/* clean up default flows */
1232 	bnxt_ulp_destroy_df_rules(bp, true);
1233 
1234 	/* clean up default VFR flows */
1235 	bnxt_ulp_destroy_vfr_default_rules(bp, true);
1236 
1237 	/* clean up regular flows */
1238 	ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR);
1239 
1240 	/* cleanup the eem table scope */
1241 	ulp_eem_tbl_scope_deinit(bp, bp->ulp_ctx);
1242 
1243 	/* cleanup the flow database */
1244 	ulp_flow_db_deinit(bp->ulp_ctx);
1245 
1246 	/* Delete the Mark database */
1247 	ulp_mark_db_deinit(bp->ulp_ctx);
1248 
1249 	/* cleanup the ulp mapper */
1250 	ulp_mapper_deinit(bp->ulp_ctx);
1251 
1252 	/* Delete the Flow Counter Manager */
1253 	ulp_fc_mgr_deinit(bp->ulp_ctx);
1254 
1255 	/* Delete the Port database */
1256 	ulp_port_db_deinit(bp->ulp_ctx);
1257 
1258 	/* Disable NAT feature */
1259 	(void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
1260 					 TF_TUNNEL_ENCAP_NAT,
1261 					 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
1262 
1263 	(void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
1264 					 TF_TUNNEL_ENCAP_NAT,
1265 					 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
1266 
1267 	/* free the flow db lock */
1268 	pthread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
1269 
1270 	if (ha_enabled)
1271 		ulp_ha_mgr_deinit(bp->ulp_ctx);
1272 
1273 	/* Delete the ulp context and tf session and free the ulp context */
1274 	ulp_ctx_deinit(bp, session);
1275 	BNXT_TF_DBG(DEBUG, "ulp ctx has been deinitialized\n");
1276 }
1277 
1278 /*
1279  * When a port is initialized by dpdk. This functions is called
1280  * and this function initializes the ULP context and rest of the
1281  * infrastructure associated with it.
1282  */
1283 static int32_t
1284 bnxt_ulp_init(struct bnxt *bp,
1285 	      struct bnxt_ulp_session_state *session)
1286 {
1287 	int rc;
1288 
1289 	/* Allocate and Initialize the ulp context. */
1290 	rc = ulp_ctx_init(bp, session);
1291 	if (rc) {
1292 		BNXT_TF_DBG(ERR, "Failed to create the ulp context\n");
1293 		goto jump_to_error;
1294 	}
1295 
1296 	rc = pthread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock, NULL);
1297 	if (rc) {
1298 		BNXT_TF_DBG(ERR, "Unable to initialize flow db lock\n");
1299 		goto jump_to_error;
1300 	}
1301 
1302 	/* Initialize ulp dparms with values devargs passed */
1303 	rc = ulp_dparms_init(bp, bp->ulp_ctx);
1304 	if (rc) {
1305 		BNXT_TF_DBG(ERR, "Failed to initialize the dparms\n");
1306 		goto jump_to_error;
1307 	}
1308 
1309 	/* create the port database */
1310 	rc = ulp_port_db_init(bp->ulp_ctx, bp->port_cnt);
1311 	if (rc) {
1312 		BNXT_TF_DBG(ERR, "Failed to create the port database\n");
1313 		goto jump_to_error;
1314 	}
1315 
1316 	/* Create the Mark database. */
1317 	rc = ulp_mark_db_init(bp->ulp_ctx);
1318 	if (rc) {
1319 		BNXT_TF_DBG(ERR, "Failed to create the mark database\n");
1320 		goto jump_to_error;
1321 	}
1322 
1323 	/* Create the flow database. */
1324 	rc = ulp_flow_db_init(bp->ulp_ctx);
1325 	if (rc) {
1326 		BNXT_TF_DBG(ERR, "Failed to create the flow database\n");
1327 		goto jump_to_error;
1328 	}
1329 
1330 	/* Create the eem table scope. */
1331 	rc = ulp_eem_tbl_scope_init(bp);
1332 	if (rc) {
1333 		BNXT_TF_DBG(ERR, "Failed to create the eem scope table\n");
1334 		goto jump_to_error;
1335 	}
1336 
1337 	rc = ulp_mapper_init(bp->ulp_ctx);
1338 	if (rc) {
1339 		BNXT_TF_DBG(ERR, "Failed to initialize ulp mapper\n");
1340 		goto jump_to_error;
1341 	}
1342 
1343 	rc = ulp_fc_mgr_init(bp->ulp_ctx);
1344 	if (rc) {
1345 		BNXT_TF_DBG(ERR, "Failed to initialize ulp flow counter mgr\n");
1346 		goto jump_to_error;
1347 	}
1348 
1349 	/*
1350 	 * Enable NAT feature. Set the global configuration register
1351 	 * Tunnel encap to enable NAT with the reuse of existing inner
1352 	 * L2 header smac and dmac
1353 	 */
1354 	rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
1355 					TF_TUNNEL_ENCAP_NAT,
1356 					BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
1357 	if (rc) {
1358 		BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n");
1359 		goto jump_to_error;
1360 	}
1361 
1362 	rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
1363 					TF_TUNNEL_ENCAP_NAT,
1364 					BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
1365 	if (rc) {
1366 		BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n");
1367 		goto jump_to_error;
1368 	}
1369 
1370 	if (bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx)) {
1371 		rc = ulp_ha_mgr_init(bp->ulp_ctx);
1372 		if (rc) {
1373 			BNXT_TF_DBG(ERR, "Failed to initialize HA %d\n", rc);
1374 			goto jump_to_error;
1375 		}
1376 		rc = ulp_ha_mgr_open(bp->ulp_ctx);
1377 		if (rc) {
1378 			BNXT_TF_DBG(ERR, "Failed to Process HA Open %d\n", rc);
1379 			goto jump_to_error;
1380 		}
1381 	}
1382 	BNXT_TF_DBG(DEBUG, "ulp ctx has been initialized\n");
1383 	return rc;
1384 
1385 jump_to_error:
1386 	bnxt_ulp_deinit(bp, session);
1387 	return rc;
1388 }
1389 
1390 /*
1391  * When a port is initialized by dpdk. This functions sets up
1392  * the port specific details.
1393  */
1394 int32_t
1395 bnxt_ulp_port_init(struct bnxt *bp)
1396 {
1397 	struct bnxt_ulp_session_state *session;
1398 	bool initialized;
1399 	enum bnxt_ulp_device_id devid = BNXT_ULP_DEVICE_ID_LAST;
1400 	uint32_t ulp_flags;
1401 	int32_t rc = 0;
1402 
1403 	if (!BNXT_TRUFLOW_EN(bp)) {
1404 		BNXT_TF_DBG(DEBUG,
1405 			    "Skip ulp init for port: %d, TF is not enabled\n",
1406 			    bp->eth_dev->data->port_id);
1407 		return rc;
1408 	}
1409 
1410 	if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1411 		BNXT_TF_DBG(DEBUG,
1412 			    "Skip ulp init for port: %d, not a TVF or PF\n",
1413 			    bp->eth_dev->data->port_id);
1414 		return rc;
1415 	}
1416 
1417 	if (bp->ulp_ctx) {
1418 		BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n");
1419 		return rc;
1420 	}
1421 
1422 	bp->ulp_ctx = rte_zmalloc("bnxt_ulp_ctx",
1423 				  sizeof(struct bnxt_ulp_context), 0);
1424 	if (!bp->ulp_ctx) {
1425 		BNXT_TF_DBG(ERR, "Failed to allocate ulp ctx\n");
1426 		return -ENOMEM;
1427 	}
1428 
1429 	/*
1430 	 * Multiple uplink ports can be associated with a single vswitch.
1431 	 * Make sure only the port that is started first will initialize
1432 	 * the TF session.
1433 	 */
1434 	session = ulp_session_init(bp, &initialized);
1435 	if (!session) {
1436 		BNXT_TF_DBG(ERR, "Failed to initialize the tf session\n");
1437 		rc = -EIO;
1438 		goto jump_to_error;
1439 	}
1440 
1441 	if (initialized) {
1442 		/*
1443 		 * If ULP is already initialized for a specific domain then
1444 		 * simply assign the ulp context to this rte_eth_dev.
1445 		 */
1446 		rc = ulp_ctx_attach(bp, session);
1447 		if (rc) {
1448 			BNXT_TF_DBG(ERR, "Failed to attach the ulp context\n");
1449 			goto jump_to_error;
1450 		}
1451 
1452 		/*
1453 		 * Attach to the shared session, must be called after the
1454 		 * ulp_ctx_attach in order to ensure that ulp data is available
1455 		 * for attaching.
1456 		 */
1457 		rc = ulp_ctx_shared_session_attach(bp, session);
1458 		if (rc) {
1459 			BNXT_TF_DBG(ERR,
1460 				    "Failed attach to shared session (%d)", rc);
1461 			goto jump_to_error;
1462 		}
1463 	} else {
1464 		rc = bnxt_ulp_init(bp, session);
1465 		if (rc) {
1466 			BNXT_TF_DBG(ERR, "Failed to initialize the ulp init\n");
1467 			goto jump_to_error;
1468 		}
1469 	}
1470 
1471 	/* Update bnxt driver flags */
1472 	rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx);
1473 	if (rc) {
1474 		BNXT_TF_DBG(ERR, "Failed to update driver flags\n");
1475 		goto jump_to_error;
1476 	}
1477 
1478 	/* update the port database for the given interface */
1479 	rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp->eth_dev);
1480 	if (rc) {
1481 		BNXT_TF_DBG(ERR, "Failed to update port database\n");
1482 		goto jump_to_error;
1483 	}
1484 	/* create the default rules */
1485 	rc = bnxt_ulp_create_df_rules(bp);
1486 	if (rc) {
1487 		BNXT_TF_DBG(ERR, "Failed to create default flow\n");
1488 		goto jump_to_error;
1489 	}
1490 
1491 	rc = bnxt_ulp_devid_get(bp, &devid);
1492 	if (rc) {
1493 		BNXT_TF_DBG(ERR, "Unable to determine device for ULP port init.\n");
1494 		goto jump_to_error;
1495 	}
1496 
1497 	/* set the unicast mode */
1498 	if (bnxt_ulp_cntxt_ptr2_ulp_flags_get(bp->ulp_ctx, &ulp_flags)) {
1499 		BNXT_TF_DBG(ERR, "Error in getting ULP context flags\n");
1500 		goto jump_to_error;
1501 	}
1502 	if (ulp_flags & BNXT_ULP_APP_UNICAST_ONLY) {
1503 		if (bnxt_pmd_set_unicast_rxmask(bp->eth_dev)) {
1504 			BNXT_TF_DBG(ERR, "Error in setting unicast rxmode\n");
1505 			goto jump_to_error;
1506 		}
1507 	}
1508 
1509 	return rc;
1510 
1511 jump_to_error:
1512 	bnxt_ulp_port_deinit(bp);
1513 	return rc;
1514 }
1515 
1516 /*
1517  * When a port is de-initialized by dpdk. This functions clears up
1518  * the port specific details.
1519  */
1520 void
1521 bnxt_ulp_port_deinit(struct bnxt *bp)
1522 {
1523 	struct bnxt_ulp_session_state *session;
1524 	struct rte_pci_device *pci_dev;
1525 	struct rte_pci_addr *pci_addr;
1526 
1527 	if (!BNXT_TRUFLOW_EN(bp)) {
1528 		BNXT_TF_DBG(DEBUG,
1529 			    "Skip ULP deinit for port:%d, TF is not enabled\n",
1530 			    bp->eth_dev->data->port_id);
1531 		return;
1532 	}
1533 
1534 	if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1535 		BNXT_TF_DBG(DEBUG,
1536 			    "Skip ULP deinit port:%d, not a TVF or PF\n",
1537 			    bp->eth_dev->data->port_id);
1538 		return;
1539 	}
1540 
1541 	if (!bp->ulp_ctx) {
1542 		BNXT_TF_DBG(DEBUG, "ulp ctx already de-allocated\n");
1543 		return;
1544 	}
1545 
1546 	BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port deinit\n",
1547 		    bp->eth_dev->data->port_id);
1548 
1549 	/* Get the session details  */
1550 	pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
1551 	pci_addr = &pci_dev->addr;
1552 	pthread_mutex_lock(&bnxt_ulp_global_mutex);
1553 	session = ulp_get_session(bp, pci_addr);
1554 	pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1555 
1556 	/* session not found then just exit */
1557 	if (!session) {
1558 		/* Free the ulp context */
1559 		rte_free(bp->ulp_ctx);
1560 		bp->ulp_ctx = NULL;
1561 		return;
1562 	}
1563 
1564 	/* Check the reference count to deinit or deattach*/
1565 	if (bp->ulp_ctx->cfg_data && bp->ulp_ctx->cfg_data->ref_cnt) {
1566 		bp->ulp_ctx->cfg_data->ref_cnt--;
1567 		if (bp->ulp_ctx->cfg_data->ref_cnt) {
1568 			/* free the port details */
1569 			/* Free the default flow rule associated to this port */
1570 			bnxt_ulp_destroy_df_rules(bp, false);
1571 			bnxt_ulp_destroy_vfr_default_rules(bp, false);
1572 
1573 			/* free flows associated with this port */
1574 			bnxt_ulp_flush_port_flows(bp);
1575 
1576 			/* close the session associated with this port */
1577 			ulp_ctx_detach(bp);
1578 
1579 			/* always detach/close shared after the session. */
1580 			ulp_ctx_shared_session_detach(bp);
1581 		} else {
1582 			/* Perform ulp ctx deinit */
1583 			bnxt_ulp_deinit(bp, session);
1584 		}
1585 	}
1586 
1587 	/* Free the ulp context in the context entry list */
1588 	bnxt_ulp_cntxt_list_del(bp->ulp_ctx);
1589 
1590 	/* clean up the session */
1591 	ulp_session_deinit(session);
1592 
1593 	/* Free the ulp context */
1594 	rte_free(bp->ulp_ctx);
1595 	bp->ulp_ctx = NULL;
1596 }
1597 
1598 /* Below are the access functions to access internal data of ulp context. */
1599 /* Function to set the Mark DB into the context */
1600 int32_t
1601 bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx,
1602 				struct bnxt_ulp_mark_tbl *mark_tbl)
1603 {
1604 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1605 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1606 		return -EINVAL;
1607 	}
1608 
1609 	ulp_ctx->cfg_data->mark_tbl = mark_tbl;
1610 
1611 	return 0;
1612 }
1613 
1614 /* Function to retrieve the Mark DB from the context. */
1615 struct bnxt_ulp_mark_tbl *
1616 bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx)
1617 {
1618 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1619 		return NULL;
1620 
1621 	return ulp_ctx->cfg_data->mark_tbl;
1622 }
1623 
1624 bool
1625 bnxt_ulp_cntxt_shared_session_enabled(struct bnxt_ulp_context *ulp_ctx)
1626 {
1627 	return ULP_SHARED_SESSION_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags);
1628 }
1629 
1630 int32_t
1631 bnxt_ulp_cntxt_app_id_set(struct bnxt_ulp_context *ulp_ctx, uint8_t app_id)
1632 {
1633 	if (!ulp_ctx)
1634 		return -EINVAL;
1635 	ulp_ctx->cfg_data->app_id = app_id;
1636 	return 0;
1637 }
1638 
1639 int32_t
1640 bnxt_ulp_cntxt_app_id_get(struct bnxt_ulp_context *ulp_ctx, uint8_t *app_id)
1641 {
1642 	/* Default APP id is zero */
1643 	if (!ulp_ctx || !app_id)
1644 		return -EINVAL;
1645 	*app_id = ulp_ctx->cfg_data->app_id;
1646 	return 0;
1647 }
1648 
1649 /* Function to set the device id of the hardware. */
1650 int32_t
1651 bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx,
1652 			  uint32_t dev_id)
1653 {
1654 	if (ulp_ctx && ulp_ctx->cfg_data) {
1655 		ulp_ctx->cfg_data->dev_id = dev_id;
1656 		return 0;
1657 	}
1658 
1659 	return -EINVAL;
1660 }
1661 
1662 /* Function to get the device id of the hardware. */
1663 int32_t
1664 bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx,
1665 			  uint32_t *dev_id)
1666 {
1667 	if (ulp_ctx && ulp_ctx->cfg_data) {
1668 		*dev_id = ulp_ctx->cfg_data->dev_id;
1669 		return 0;
1670 	}
1671 	*dev_id = BNXT_ULP_DEVICE_ID_LAST;
1672 	BNXT_TF_DBG(ERR, "Failed to read dev_id from ulp ctxt\n");
1673 	return -EINVAL;
1674 }
1675 
1676 int32_t
1677 bnxt_ulp_cntxt_mem_type_set(struct bnxt_ulp_context *ulp_ctx,
1678 			    enum bnxt_ulp_flow_mem_type mem_type)
1679 {
1680 	if (ulp_ctx && ulp_ctx->cfg_data) {
1681 		ulp_ctx->cfg_data->mem_type = mem_type;
1682 		return 0;
1683 	}
1684 	BNXT_TF_DBG(ERR, "Failed to write mem_type in ulp ctxt\n");
1685 	return -EINVAL;
1686 }
1687 
1688 int32_t
1689 bnxt_ulp_cntxt_mem_type_get(struct bnxt_ulp_context *ulp_ctx,
1690 			    enum bnxt_ulp_flow_mem_type *mem_type)
1691 {
1692 	if (ulp_ctx && ulp_ctx->cfg_data) {
1693 		*mem_type = ulp_ctx->cfg_data->mem_type;
1694 		return 0;
1695 	}
1696 	*mem_type = BNXT_ULP_FLOW_MEM_TYPE_LAST;
1697 	BNXT_TF_DBG(ERR, "Failed to read mem_type in ulp ctxt\n");
1698 	return -EINVAL;
1699 }
1700 
1701 /* Function to get the table scope id of the EEM table. */
1702 int32_t
1703 bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx,
1704 				uint32_t *tbl_scope_id)
1705 {
1706 	if (ulp_ctx && ulp_ctx->cfg_data) {
1707 		*tbl_scope_id = ulp_ctx->cfg_data->tbl_scope_id;
1708 		return 0;
1709 	}
1710 
1711 	return -EINVAL;
1712 }
1713 
1714 /* Function to set the table scope id of the EEM table. */
1715 int32_t
1716 bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx,
1717 				uint32_t tbl_scope_id)
1718 {
1719 	if (ulp_ctx && ulp_ctx->cfg_data) {
1720 		ulp_ctx->cfg_data->tbl_scope_id = tbl_scope_id;
1721 		return 0;
1722 	}
1723 
1724 	return -EINVAL;
1725 }
1726 
1727 /* Function to set the shared tfp session details from the ulp context. */
1728 int32_t
1729 bnxt_ulp_cntxt_shared_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1730 {
1731 	if (!ulp) {
1732 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1733 		return -EINVAL;
1734 	}
1735 
1736 	if (tfp == NULL) {
1737 		if (ulp->cfg_data->num_shared_clients > 0)
1738 			ulp->cfg_data->num_shared_clients--;
1739 	} else {
1740 		ulp->cfg_data->num_shared_clients++;
1741 	}
1742 
1743 	ulp->g_shared_tfp = tfp;
1744 	return 0;
1745 }
1746 
1747 /* Function to get the shared tfp session details from the ulp context. */
1748 struct tf *
1749 bnxt_ulp_cntxt_shared_tfp_get(struct bnxt_ulp_context *ulp)
1750 {
1751 	if (!ulp) {
1752 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1753 		return NULL;
1754 	}
1755 	return ulp->g_shared_tfp;
1756 }
1757 
1758 /* Function to get the number of shared clients attached */
1759 uint8_t
1760 bnxt_ulp_cntxt_num_shared_clients_get(struct bnxt_ulp_context *ulp)
1761 {
1762 	if (ulp == NULL || ulp->cfg_data == NULL) {
1763 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1764 		return 0;
1765 	}
1766 	return ulp->cfg_data->num_shared_clients;
1767 }
1768 
1769 /* Function to set the tfp session details from the ulp context. */
1770 int32_t
1771 bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1772 {
1773 	if (!ulp) {
1774 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1775 		return -EINVAL;
1776 	}
1777 
1778 	ulp->g_tfp = tfp;
1779 	return 0;
1780 }
1781 
1782 /* Function to get the tfp session details from the ulp context. */
1783 struct tf *
1784 bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp,
1785 		       enum bnxt_ulp_shared_session shared)
1786 {
1787 	if (!ulp) {
1788 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1789 		return NULL;
1790 	}
1791 	if (shared)
1792 		return ulp->g_shared_tfp;
1793 	else
1794 		return ulp->g_tfp;
1795 }
1796 
1797 /*
1798  * Get the device table entry based on the device id.
1799  *
1800  * dev_id [in] The device id of the hardware
1801  *
1802  * Returns the pointer to the device parameters.
1803  */
1804 struct bnxt_ulp_device_params *
1805 bnxt_ulp_device_params_get(uint32_t dev_id)
1806 {
1807 	if (dev_id < BNXT_ULP_MAX_NUM_DEVICES)
1808 		return &ulp_device_params[dev_id];
1809 	return NULL;
1810 }
1811 
1812 /* Function to set the flow database to the ulp context. */
1813 int32_t
1814 bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context	*ulp_ctx,
1815 				struct bnxt_ulp_flow_db	*flow_db)
1816 {
1817 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1818 		return -EINVAL;
1819 
1820 	ulp_ctx->cfg_data->flow_db = flow_db;
1821 	return 0;
1822 }
1823 
1824 /* Function to get the flow database from the ulp context. */
1825 struct bnxt_ulp_flow_db	*
1826 bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context	*ulp_ctx)
1827 {
1828 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1829 		return NULL;
1830 
1831 	return ulp_ctx->cfg_data->flow_db;
1832 }
1833 
1834 /* Function to get the tunnel cache table info from the ulp context. */
1835 struct bnxt_tun_cache_entry *
1836 bnxt_ulp_cntxt_ptr2_tun_tbl_get(struct bnxt_ulp_context *ulp_ctx)
1837 {
1838 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1839 		return NULL;
1840 
1841 	return ulp_ctx->cfg_data->tun_tbl;
1842 }
1843 
1844 /* Function to get the ulp context from eth device. */
1845 struct bnxt_ulp_context	*
1846 bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev	*dev)
1847 {
1848 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1849 
1850 	if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
1851 		struct bnxt_representor *vfr = dev->data->dev_private;
1852 
1853 		bp = vfr->parent_dev->data->dev_private;
1854 	}
1855 
1856 	if (!bp) {
1857 		BNXT_TF_DBG(ERR, "Bnxt private data is not initialized\n");
1858 		return NULL;
1859 	}
1860 	return bp->ulp_ctx;
1861 }
1862 
1863 int32_t
1864 bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx,
1865 				    void *mapper_data)
1866 {
1867 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1868 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1869 		return -EINVAL;
1870 	}
1871 
1872 	ulp_ctx->cfg_data->mapper_data = mapper_data;
1873 	return 0;
1874 }
1875 
1876 void *
1877 bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx)
1878 {
1879 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1880 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1881 		return NULL;
1882 	}
1883 
1884 	return ulp_ctx->cfg_data->mapper_data;
1885 }
1886 
1887 /* Function to set the port database to the ulp context. */
1888 int32_t
1889 bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context	*ulp_ctx,
1890 				struct bnxt_ulp_port_db	*port_db)
1891 {
1892 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1893 		return -EINVAL;
1894 
1895 	ulp_ctx->cfg_data->port_db = port_db;
1896 	return 0;
1897 }
1898 
1899 /* Function to get the port database from the ulp context. */
1900 struct bnxt_ulp_port_db *
1901 bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context	*ulp_ctx)
1902 {
1903 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1904 		return NULL;
1905 
1906 	return ulp_ctx->cfg_data->port_db;
1907 }
1908 
1909 /* Function to set the flow counter info into the context */
1910 int32_t
1911 bnxt_ulp_cntxt_ptr2_fc_info_set(struct bnxt_ulp_context *ulp_ctx,
1912 				struct bnxt_ulp_fc_info *ulp_fc_info)
1913 {
1914 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1915 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1916 		return -EINVAL;
1917 	}
1918 
1919 	ulp_ctx->cfg_data->fc_info = ulp_fc_info;
1920 
1921 	return 0;
1922 }
1923 
1924 /* Function to retrieve the flow counter info from the context. */
1925 struct bnxt_ulp_fc_info *
1926 bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx)
1927 {
1928 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1929 		return NULL;
1930 
1931 	return ulp_ctx->cfg_data->fc_info;
1932 }
1933 
1934 /* Function to get the ulp flags from the ulp context. */
1935 int32_t
1936 bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
1937 				  uint32_t *flags)
1938 {
1939 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1940 		return -1;
1941 
1942 	*flags =  ulp_ctx->cfg_data->ulp_flags;
1943 	return 0;
1944 }
1945 
1946 /* Function to get the ulp vfr info from the ulp context. */
1947 struct bnxt_ulp_vfr_rule_info*
1948 bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(struct bnxt_ulp_context *ulp_ctx,
1949 				     uint32_t port_id)
1950 {
1951 	if (!ulp_ctx || !ulp_ctx->cfg_data || port_id >= RTE_MAX_ETHPORTS)
1952 		return NULL;
1953 
1954 	return &ulp_ctx->cfg_data->vfr_rule_info[port_id];
1955 }
1956 
1957 /* Function to acquire the flow database lock from the ulp context. */
1958 int32_t
1959 bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context	*ulp_ctx)
1960 {
1961 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1962 		return -1;
1963 
1964 	if (pthread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) {
1965 		BNXT_TF_DBG(ERR, "unable to acquire fdb lock\n");
1966 		return -1;
1967 	}
1968 	return 0;
1969 }
1970 
1971 /* Function to release the flow database lock from the ulp context. */
1972 void
1973 bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context	*ulp_ctx)
1974 {
1975 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1976 		return;
1977 
1978 	pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);
1979 }
1980 
1981 /* Function to set the ha info into the context */
1982 int32_t
1983 bnxt_ulp_cntxt_ptr2_ha_info_set(struct bnxt_ulp_context *ulp_ctx,
1984 				struct bnxt_ulp_ha_mgr_info *ulp_ha_info)
1985 {
1986 	if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL) {
1987 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1988 		return -EINVAL;
1989 	}
1990 	ulp_ctx->cfg_data->ha_info = ulp_ha_info;
1991 	return 0;
1992 }
1993 
1994 /* Function to retrieve the ha info from the context. */
1995 struct bnxt_ulp_ha_mgr_info *
1996 bnxt_ulp_cntxt_ptr2_ha_info_get(struct bnxt_ulp_context *ulp_ctx)
1997 {
1998 	if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL)
1999 		return NULL;
2000 	return ulp_ctx->cfg_data->ha_info;
2001 }
2002 
2003 bool
2004 bnxt_ulp_cntxt_ha_enabled(struct bnxt_ulp_context *ulp_ctx)
2005 {
2006 	if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL)
2007 		return false;
2008 	return !!ULP_HIGH_AVAIL_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags);
2009 }
2010 
2011 static int32_t
2012 bnxt_ulp_cntxt_list_init(void)
2013 {
2014 	/* Create the cntxt spin lock only once*/
2015 	if (!bnxt_ulp_ctxt_lock_created)
2016 		rte_spinlock_init(&bnxt_ulp_ctxt_lock);
2017 	bnxt_ulp_ctxt_lock_created = 1;
2018 	return 0;
2019 }
2020 
2021 static int32_t
2022 bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx)
2023 {
2024 	struct ulp_context_list_entry	*entry;
2025 
2026 	entry = rte_zmalloc(NULL, sizeof(struct ulp_context_list_entry), 0);
2027 	if (entry == NULL) {
2028 		BNXT_TF_DBG(ERR, "unable to allocate memory\n");
2029 		return -ENOMEM;
2030 	}
2031 
2032 	rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
2033 	entry->ulp_ctx = ulp_ctx;
2034 	TAILQ_INSERT_TAIL(&ulp_cntx_list, entry, next);
2035 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2036 	return 0;
2037 }
2038 
2039 static void
2040 bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx)
2041 {
2042 	struct ulp_context_list_entry	*entry, *temp;
2043 
2044 	rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
2045 	RTE_TAILQ_FOREACH_SAFE(entry, &ulp_cntx_list, next, temp) {
2046 		if (entry->ulp_ctx == ulp_ctx) {
2047 			TAILQ_REMOVE(&ulp_cntx_list, entry, next);
2048 			rte_free(entry);
2049 			break;
2050 		}
2051 	}
2052 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2053 }
2054 
2055 struct bnxt_ulp_context *
2056 bnxt_ulp_cntxt_entry_acquire(void *arg)
2057 {
2058 	struct ulp_context_list_entry	*entry;
2059 
2060 	/* take a lock and get the first ulp context available */
2061 	if (rte_spinlock_trylock(&bnxt_ulp_ctxt_lock)) {
2062 		TAILQ_FOREACH(entry, &ulp_cntx_list, next)
2063 			if (entry->ulp_ctx->cfg_data == arg)
2064 				return entry->ulp_ctx;
2065 		rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2066 	}
2067 	return NULL;
2068 }
2069 
2070 void
2071 bnxt_ulp_cntxt_entry_release(void)
2072 {
2073 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2074 }
2075 
2076 /* Function to get the app tunnel details from the ulp context. */
2077 struct bnxt_flow_app_tun_ent *
2078 bnxt_ulp_cntxt_ptr2_app_tun_list_get(struct bnxt_ulp_context *ulp)
2079 {
2080 	if (!ulp || !ulp->cfg_data)
2081 		return NULL;
2082 
2083 	return ulp->cfg_data->app_tun;
2084 }
2085