xref: /dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp.c (revision be3af03f5563adffcd56a26b84ac862c5874d0cc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <rte_log.h>
7 #include <rte_malloc.h>
8 #include <rte_flow.h>
9 #include <rte_flow_driver.h>
10 #include <rte_tailq.h>
11 #include <rte_spinlock.h>
12 
13 #include "bnxt.h"
14 #include "bnxt_ulp.h"
15 #include "bnxt_tf_common.h"
16 #include "tf_core.h"
17 #include "tf_ext_flow_handle.h"
18 
19 #include "ulp_template_db_enum.h"
20 #include "ulp_template_struct.h"
21 #include "ulp_mark_mgr.h"
22 #include "ulp_fc_mgr.h"
23 #include "ulp_flow_db.h"
24 #include "ulp_mapper.h"
25 #include "ulp_port_db.h"
26 #include "ulp_tun.h"
27 #include "ulp_ha_mgr.h"
28 #include "bnxt_tf_pmd_shim.h"
29 
30 /* Linked list of all TF sessions. */
31 STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
32 			STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list);
33 
34 /* Mutex to synchronize bnxt_ulp_session_list operations. */
35 static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
36 
37 /* Spin lock to protect context global list */
38 rte_spinlock_t bnxt_ulp_ctxt_lock;
39 TAILQ_HEAD(cntx_list_entry_list, ulp_context_list_entry);
40 static struct cntx_list_entry_list ulp_cntx_list =
41 	TAILQ_HEAD_INITIALIZER(ulp_cntx_list);
42 
43 /* Static function declarations */
44 static int32_t bnxt_ulp_cntxt_list_init(void);
45 static int32_t bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx);
46 static void bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx);
47 
48 /*
49  * Allow the deletion of context only for the bnxt device that
50  * created the session.
51  */
52 bool
53 ulp_ctx_deinit_allowed(struct bnxt_ulp_context *ulp_ctx)
54 {
55 	if (!ulp_ctx || !ulp_ctx->cfg_data)
56 		return false;
57 
58 	if (!ulp_ctx->cfg_data->ref_cnt) {
59 		BNXT_TF_DBG(DEBUG, "ulp ctx shall initiate deinit\n");
60 		return true;
61 	}
62 
63 	return false;
64 }
65 
66 static int32_t
67 bnxt_ulp_devid_get(struct bnxt *bp,
68 		   enum bnxt_ulp_device_id  *ulp_dev_id)
69 {
70 	if (BNXT_CHIP_P5(bp)) {
71 		*ulp_dev_id = BNXT_ULP_DEVICE_ID_THOR;
72 		return 0;
73 	}
74 
75 	if (BNXT_STINGRAY(bp))
76 		*ulp_dev_id = BNXT_ULP_DEVICE_ID_STINGRAY;
77 	else
78 		/* Assuming Whitney */
79 		*ulp_dev_id = BNXT_ULP_DEVICE_ID_WH_PLUS;
80 
81 	return 0;
82 }
83 
84 struct bnxt_ulp_app_capabilities_info *
85 bnxt_ulp_app_cap_list_get(uint32_t *num_entries)
86 {
87 	if (!num_entries)
88 		return NULL;
89 	*num_entries = BNXT_ULP_APP_CAP_TBL_MAX_SZ;
90 	return ulp_app_cap_info_list;
91 }
92 
93 static struct bnxt_ulp_resource_resv_info *
94 bnxt_ulp_app_resource_resv_list_get(uint32_t *num_entries)
95 {
96 	if (num_entries == NULL)
97 		return NULL;
98 	*num_entries = BNXT_ULP_APP_RESOURCE_RESV_LIST_MAX_SZ;
99 	return ulp_app_resource_resv_list;
100 }
101 
102 struct bnxt_ulp_resource_resv_info *
103 bnxt_ulp_resource_resv_list_get(uint32_t *num_entries)
104 {
105 	if (!num_entries)
106 		return NULL;
107 	*num_entries = BNXT_ULP_RESOURCE_RESV_LIST_MAX_SZ;
108 	return ulp_resource_resv_list;
109 }
110 
111 struct bnxt_ulp_glb_resource_info *
112 bnxt_ulp_app_glb_resource_info_list_get(uint32_t *num_entries)
113 {
114 	if (!num_entries)
115 		return NULL;
116 	*num_entries = BNXT_ULP_APP_GLB_RESOURCE_TBL_MAX_SZ;
117 	return ulp_app_glb_resource_tbl;
118 }
119 
120 static int32_t
121 bnxt_ulp_named_resources_calc(struct bnxt_ulp_context *ulp_ctx,
122 			      struct bnxt_ulp_glb_resource_info *info,
123 			      uint32_t num,
124 			      struct tf_session_resources *res)
125 {
126 	uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST, res_type, i;
127 	enum tf_dir dir;
128 	uint8_t app_id;
129 	int32_t rc = 0;
130 
131 	if (ulp_ctx == NULL || info == NULL || res == NULL || num == 0) {
132 		BNXT_TF_DBG(ERR, "Invalid parms to named resources calc.\n");
133 		return -EINVAL;
134 	}
135 
136 	rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id);
137 	if (rc) {
138 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
139 		return -EINVAL;
140 	}
141 
142 	rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
143 	if (rc) {
144 		BNXT_TF_DBG(ERR, "Unable to get the dev id from ulp.\n");
145 		return -EINVAL;
146 	}
147 
148 	for (i = 0; i < num; i++) {
149 		if (dev_id != info[i].device_id || app_id != info[i].app_id)
150 			continue;
151 		dir = info[i].direction;
152 		res_type = info[i].resource_type;
153 
154 		switch (info[i].resource_func) {
155 		case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER:
156 			res->ident_cnt[dir].cnt[res_type]++;
157 			break;
158 		case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
159 			res->tbl_cnt[dir].cnt[res_type]++;
160 			break;
161 		case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
162 			res->tcam_cnt[dir].cnt[res_type]++;
163 			break;
164 		case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
165 			res->em_cnt[dir].cnt[res_type]++;
166 			break;
167 		default:
168 			BNXT_TF_DBG(ERR, "Unknown resource func (0x%x)\n,",
169 				    info[i].resource_func);
170 			continue;
171 		}
172 	}
173 
174 	return 0;
175 }
176 
177 static int32_t
178 bnxt_ulp_unnamed_resources_calc(struct bnxt_ulp_context *ulp_ctx,
179 				struct bnxt_ulp_resource_resv_info *info,
180 				uint32_t num,
181 				struct tf_session_resources *res)
182 {
183 	uint32_t dev_id, res_type, i;
184 	enum tf_dir dir;
185 	uint8_t app_id;
186 	int32_t rc = 0;
187 
188 	if (ulp_ctx == NULL || res == NULL || info == NULL || num == 0) {
189 		BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
190 		return -EINVAL;
191 	}
192 
193 	rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id);
194 	if (rc) {
195 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
196 		return -EINVAL;
197 	}
198 
199 	rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
200 	if (rc) {
201 		BNXT_TF_DBG(ERR, "Unable to get the dev id from ulp.\n");
202 		return -EINVAL;
203 	}
204 
205 	for (i = 0; i < num; i++) {
206 		if (app_id != info[i].app_id || dev_id != info[i].device_id)
207 			continue;
208 		dir = info[i].direction;
209 		res_type = info[i].resource_type;
210 
211 		switch (info[i].resource_func) {
212 		case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER:
213 			res->ident_cnt[dir].cnt[res_type] = info[i].count;
214 			break;
215 		case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
216 			res->tbl_cnt[dir].cnt[res_type] = info[i].count;
217 			break;
218 		case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
219 			res->tcam_cnt[dir].cnt[res_type] = info[i].count;
220 			break;
221 		case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
222 			res->em_cnt[dir].cnt[res_type] = info[i].count;
223 			break;
224 		default:
225 			break;
226 		}
227 	}
228 	return 0;
229 }
230 
231 static int32_t
232 bnxt_ulp_tf_resources_get(struct bnxt_ulp_context *ulp_ctx,
233 			  struct tf_session_resources *res)
234 {
235 	struct bnxt_ulp_resource_resv_info *unnamed = NULL;
236 	uint32_t unum;
237 	int32_t rc = 0;
238 
239 	if (ulp_ctx == NULL || res == NULL) {
240 		BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
241 		return -EINVAL;
242 	}
243 
244 	unnamed = bnxt_ulp_resource_resv_list_get(&unum);
245 	if (unnamed == NULL) {
246 		BNXT_TF_DBG(ERR, "Unable to get resource resv list.\n");
247 		return -EINVAL;
248 	}
249 
250 	rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, res);
251 	if (rc)
252 		BNXT_TF_DBG(ERR, "Unable to calc resources for session.\n");
253 
254 	return rc;
255 }
256 
257 static int32_t
258 bnxt_ulp_tf_shared_session_resources_get(struct bnxt_ulp_context *ulp_ctx,
259 					 struct tf_session_resources *res)
260 {
261 	struct bnxt_ulp_resource_resv_info *unnamed;
262 	struct bnxt_ulp_glb_resource_info *named;
263 	uint32_t unum, nnum;
264 	int32_t rc;
265 
266 	if (ulp_ctx == NULL || res == NULL) {
267 		BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
268 		return -EINVAL;
269 	}
270 
271 	/* Make sure the resources are zero before accumulating. */
272 	memset(res, 0, sizeof(struct tf_session_resources));
273 
274 	/*
275 	 * Shared resources are comprised of both named and unnamed resources.
276 	 * First get the unnamed counts, and then add the named to the result.
277 	 */
278 	/* Get the baseline counts */
279 	unnamed = bnxt_ulp_app_resource_resv_list_get(&unum);
280 	if (unnamed == NULL) {
281 		BNXT_TF_DBG(ERR, "Unable to get shared resource resv list.\n");
282 		return -EINVAL;
283 	}
284 	rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, res);
285 	if (rc) {
286 		BNXT_TF_DBG(ERR, "Unable to calc resources for shared session.\n");
287 		return -EINVAL;
288 	}
289 
290 	/* Get the named list and add the totals */
291 	named = bnxt_ulp_app_glb_resource_info_list_get(&nnum);
292 	if (named == NULL) {
293 		BNXT_TF_DBG(ERR, "Unable to get app global resource list\n");
294 		return -EINVAL;
295 	}
296 	rc = bnxt_ulp_named_resources_calc(ulp_ctx, named, nnum, res);
297 	if (rc)
298 		BNXT_TF_DBG(ERR, "Unable to calc named resources\n");
299 
300 	return rc;
301 }
302 
303 int32_t
304 bnxt_ulp_cntxt_app_caps_init(struct bnxt *bp,
305 			     uint8_t app_id, uint32_t dev_id)
306 {
307 	struct bnxt_ulp_app_capabilities_info *info;
308 	uint32_t num = 0;
309 	uint16_t i;
310 	bool found = false;
311 	struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx;
312 
313 	if (ULP_APP_DEV_UNSUPPORTED_ENABLED(ulp_ctx->cfg_data->ulp_flags)) {
314 		BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
315 			    app_id, dev_id);
316 		return -EINVAL;
317 	}
318 
319 	info = bnxt_ulp_app_cap_list_get(&num);
320 	if (!info || !num) {
321 		BNXT_TF_DBG(ERR, "Failed to get app capabilities.\n");
322 		return -EINVAL;
323 	}
324 
325 	for (i = 0; i < num; i++) {
326 		if (info[i].app_id != app_id || info[i].device_id != dev_id)
327 			continue;
328 		found = true;
329 		if (info[i].flags & BNXT_ULP_APP_CAP_SHARED_EN)
330 			ulp_ctx->cfg_data->ulp_flags |=
331 				BNXT_ULP_SHARED_SESSION_ENABLED;
332 		if (info[i].flags & BNXT_ULP_APP_CAP_HOT_UPGRADE_EN)
333 			ulp_ctx->cfg_data->ulp_flags |=
334 				BNXT_ULP_HIGH_AVAIL_ENABLED;
335 		if (info[i].flags & BNXT_ULP_APP_CAP_UNICAST_ONLY)
336 			ulp_ctx->cfg_data->ulp_flags |=
337 				BNXT_ULP_APP_UNICAST_ONLY;
338 		if (info[i].flags & BNXT_ULP_APP_CAP_SOCKET_DIRECT) {
339 			/* Enable socket direction only if MR is enabled in fw*/
340 			if (BNXT_MULTIROOT_EN(bp))
341 				ulp_ctx->cfg_data->ulp_flags |=
342 					BNXT_ULP_APP_SOCKET_DIRECT;
343 		}
344 	}
345 	if (!found) {
346 		BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
347 			    app_id, dev_id);
348 		ulp_ctx->cfg_data->ulp_flags |= BNXT_ULP_APP_DEV_UNSUPPORTED;
349 		return -EINVAL;
350 	}
351 
352 	return 0;
353 }
354 
355 static void
356 ulp_ctx_shared_session_close(struct bnxt *bp,
357 			     struct bnxt_ulp_session_state *session)
358 {
359 	struct tf *tfp;
360 	int32_t rc;
361 
362 	if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
363 		return;
364 
365 	tfp = bnxt_ulp_cntxt_shared_tfp_get(bp->ulp_ctx);
366 	if (!tfp) {
367 		/*
368 		 * Log it under debug since this is likely a case of the
369 		 * shared session not being created.  For example, a failed
370 		 * initialization.
371 		 */
372 		BNXT_TF_DBG(DEBUG, "Failed to get shared tfp on close.\n");
373 		return;
374 	}
375 	rc = tf_close_session(tfp);
376 	if (rc)
377 		BNXT_TF_DBG(ERR, "Failed to close the shared session rc=%d.\n",
378 			    rc);
379 	(void)bnxt_ulp_cntxt_shared_tfp_set(bp->ulp_ctx, NULL);
380 
381 	session->g_shared_tfp.session = NULL;
382 }
383 
384 static int32_t
385 ulp_ctx_shared_session_open(struct bnxt *bp,
386 			    struct bnxt_ulp_session_state *session)
387 {
388 	struct rte_eth_dev *ethdev = bp->eth_dev;
389 	struct tf_session_resources *resources;
390 	struct tf_open_session_parms parms;
391 	size_t copy_nbytes;
392 	uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
393 	int32_t	rc = 0;
394 	uint8_t app_id;
395 
396 	/* only perform this if shared session is enabled. */
397 	if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
398 		return 0;
399 
400 	memset(&parms, 0, sizeof(parms));
401 
402 	rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
403 					  parms.ctrl_chan_name);
404 	if (rc) {
405 		BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
406 			    ethdev->data->port_id, rc);
407 		return rc;
408 	}
409 	resources = &parms.resources;
410 
411 	/*
412 	 * Need to account for size of ctrl_chan_name and 1 extra for Null
413 	 * terminator
414 	 */
415 	copy_nbytes = sizeof(parms.ctrl_chan_name) -
416 		strlen(parms.ctrl_chan_name) - 1;
417 
418 	/*
419 	 * Build the ctrl_chan_name with shared token.
420 	 * When HA is enabled, the WC TCAM needs extra management by the core,
421 	 * so add the wc_tcam string to the control channel.
422 	 */
423 	if (bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx))
424 		strncat(parms.ctrl_chan_name, "-tf_shared-wc_tcam",
425 			copy_nbytes);
426 	else
427 		strncat(parms.ctrl_chan_name, "-tf_shared", copy_nbytes);
428 
429 	rc = bnxt_ulp_tf_shared_session_resources_get(bp->ulp_ctx, resources);
430 	if (rc)
431 		return rc;
432 
433 	rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
434 	if (rc) {
435 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
436 		return -EINVAL;
437 	}
438 
439 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
440 	if (rc) {
441 		BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
442 		return rc;
443 	}
444 
445 	switch (ulp_dev_id) {
446 	case BNXT_ULP_DEVICE_ID_WH_PLUS:
447 		parms.device_type = TF_DEVICE_TYPE_WH;
448 		break;
449 	case BNXT_ULP_DEVICE_ID_STINGRAY:
450 		parms.device_type = TF_DEVICE_TYPE_SR;
451 		break;
452 	case BNXT_ULP_DEVICE_ID_THOR:
453 		parms.device_type = TF_DEVICE_TYPE_THOR;
454 		break;
455 	default:
456 		BNXT_TF_DBG(ERR, "Unable to determine dev for opening session.\n");
457 		return rc;
458 	}
459 
460 	parms.shadow_copy = true;
461 	parms.bp = bp;
462 	if (app_id == 0 || app_id == 3)
463 		parms.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;
464 	else
465 		parms.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;
466 
467 	/*
468 	 * Open the session here, but the collect the resources during the
469 	 * mapper initialization.
470 	 */
471 	rc = tf_open_session(&bp->tfp_shared, &parms);
472 	if (rc)
473 		return rc;
474 
475 	if (parms.shared_session_creator)
476 		BNXT_TF_DBG(DEBUG, "Shared session creator.\n");
477 	else
478 		BNXT_TF_DBG(DEBUG, "Shared session attached.\n");
479 
480 	/* Save the shared session in global data */
481 	if (!session->g_shared_tfp.session)
482 		session->g_shared_tfp.session = bp->tfp_shared.session;
483 
484 	rc = bnxt_ulp_cntxt_shared_tfp_set(bp->ulp_ctx, &bp->tfp_shared);
485 	if (rc)
486 		BNXT_TF_DBG(ERR, "Failed to add shared tfp to ulp (%d)\n", rc);
487 
488 	return rc;
489 }
490 
491 static int32_t
492 ulp_ctx_shared_session_attach(struct bnxt *bp,
493 			      struct bnxt_ulp_session_state *session)
494 {
495 	int32_t rc = 0;
496 
497 	/* Simply return success if shared session not enabled */
498 	if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) {
499 		bp->tfp_shared.session = session->g_shared_tfp.session;
500 		rc = ulp_ctx_shared_session_open(bp, session);
501 	}
502 
503 	return rc;
504 }
505 
506 static void
507 ulp_ctx_shared_session_detach(struct bnxt *bp)
508 {
509 	if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) {
510 		if (bp->tfp_shared.session) {
511 			tf_close_session(&bp->tfp_shared);
512 			bp->tfp_shared.session = NULL;
513 		}
514 	}
515 }
516 
517 /*
518  * Initialize an ULP session.
519  * An ULP session will contain all the resources needed to support rte flow
520  * offloads. A session is initialized as part of rte_eth_device start.
521  * A single vswitch instance can have multiple uplinks which means
522  * rte_eth_device start will be called for each of these devices.
523  * ULP session manager will make sure that a single ULP session is only
524  * initialized once. Apart from this, it also initializes MARK database,
525  * EEM table & flow database. ULP session manager also manages a list of
526  * all opened ULP sessions.
527  */
528 static int32_t
529 ulp_ctx_session_open(struct bnxt *bp,
530 		     struct bnxt_ulp_session_state *session)
531 {
532 	struct rte_eth_dev		*ethdev = bp->eth_dev;
533 	int32_t				rc = 0;
534 	struct tf_open_session_parms	params;
535 	struct tf_session_resources	*resources;
536 	uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
537 	uint8_t app_id;
538 
539 	memset(&params, 0, sizeof(params));
540 
541 	rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
542 					  params.ctrl_chan_name);
543 	if (rc) {
544 		BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
545 			    ethdev->data->port_id, rc);
546 		return rc;
547 	}
548 
549 	params.shadow_copy = true;
550 
551 	rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
552 	if (rc) {
553 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
554 		return -EINVAL;
555 	}
556 
557 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
558 	if (rc) {
559 		BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
560 		return rc;
561 	}
562 
563 	switch (ulp_dev_id) {
564 	case BNXT_ULP_DEVICE_ID_WH_PLUS:
565 		params.device_type = TF_DEVICE_TYPE_WH;
566 		break;
567 	case BNXT_ULP_DEVICE_ID_STINGRAY:
568 		params.device_type = TF_DEVICE_TYPE_SR;
569 		break;
570 	case BNXT_ULP_DEVICE_ID_THOR:
571 		params.device_type = TF_DEVICE_TYPE_THOR;
572 		break;
573 	default:
574 		BNXT_TF_DBG(ERR, "Unable to determine device for opening session.\n");
575 		return rc;
576 	}
577 
578 	resources = &params.resources;
579 	rc = bnxt_ulp_tf_resources_get(bp->ulp_ctx, resources);
580 	if (rc)
581 		return rc;
582 
583 	params.bp = bp;
584 	if (app_id == 0 || app_id == 3)
585 		params.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;
586 	else
587 		params.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;
588 
589 	rc = tf_open_session(&bp->tfp, &params);
590 	if (rc) {
591 		BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",
592 			    params.ctrl_chan_name, rc);
593 		return -EINVAL;
594 	}
595 	if (!session->session_opened) {
596 		session->session_opened = 1;
597 		session->g_tfp = rte_zmalloc("bnxt_ulp_session_tfp",
598 					     sizeof(struct tf), 0);
599 		session->g_tfp->session = bp->tfp.session;
600 	}
601 	return rc;
602 }
603 
604 /*
605  * Close the ULP session.
606  * It takes the ulp context pointer.
607  */
608 static void
609 ulp_ctx_session_close(struct bnxt *bp,
610 		      struct bnxt_ulp_session_state *session)
611 {
612 	/* close the session in the hardware */
613 	if (session->session_opened)
614 		tf_close_session(&bp->tfp);
615 	session->session_opened = 0;
616 	rte_free(session->g_tfp);
617 	session->g_tfp = NULL;
618 }
619 
620 static void
621 bnxt_init_tbl_scope_parms(struct bnxt *bp,
622 			  struct tf_alloc_tbl_scope_parms *params)
623 {
624 	struct bnxt_ulp_device_params	*dparms;
625 	uint32_t dev_id;
626 	int rc;
627 
628 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
629 	if (rc)
630 		/* TBD: For now, just use default. */
631 		dparms = 0;
632 	else
633 		dparms = bnxt_ulp_device_params_get(dev_id);
634 
635 	/*
636 	 * Set the flush timer for EEM entries. The value is in 100ms intervals,
637 	 * so 100 is 10s.
638 	 */
639 	params->hw_flow_cache_flush_timer = 100;
640 
641 	if (!dparms) {
642 		params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
643 		params->rx_max_action_entry_sz_in_bits =
644 			BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
645 		params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
646 		params->rx_num_flows_in_k = BNXT_ULP_RX_NUM_FLOWS;
647 
648 		params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
649 		params->tx_max_action_entry_sz_in_bits =
650 			BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
651 		params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
652 		params->tx_num_flows_in_k = BNXT_ULP_TX_NUM_FLOWS;
653 	} else {
654 		params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
655 		params->rx_max_action_entry_sz_in_bits =
656 			BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
657 		params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
658 		params->rx_num_flows_in_k =
659 			dparms->ext_flow_db_num_entries / 1024;
660 
661 		params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
662 		params->tx_max_action_entry_sz_in_bits =
663 			BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
664 		params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
665 		params->tx_num_flows_in_k =
666 			dparms->ext_flow_db_num_entries / 1024;
667 	}
668 	BNXT_TF_DBG(INFO, "Table Scope initialized with %uK flows.\n",
669 		    params->rx_num_flows_in_k);
670 }
671 
672 /* Initialize Extended Exact Match host memory. */
673 static int32_t
674 ulp_eem_tbl_scope_init(struct bnxt *bp)
675 {
676 	struct tf_alloc_tbl_scope_parms params = {0};
677 	struct bnxt_ulp_device_params *dparms;
678 	enum bnxt_ulp_flow_mem_type mtype;
679 	uint32_t dev_id;
680 	int rc;
681 
682 	/* Get the dev specific number of flows that needed to be supported. */
683 	if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
684 		BNXT_TF_DBG(ERR, "Invalid device id\n");
685 		return -EINVAL;
686 	}
687 
688 	dparms = bnxt_ulp_device_params_get(dev_id);
689 	if (!dparms) {
690 		BNXT_TF_DBG(ERR, "could not fetch the device params\n");
691 		return -ENODEV;
692 	}
693 
694 	if (bnxt_ulp_cntxt_mem_type_get(bp->ulp_ctx, &mtype))
695 		return -EINVAL;
696 	if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
697 		BNXT_TF_DBG(INFO, "Table Scope alloc is not required\n");
698 		return 0;
699 	}
700 
701 	bnxt_init_tbl_scope_parms(bp, &params);
702 	rc = tf_alloc_tbl_scope(&bp->tfp, &params);
703 	if (rc) {
704 		BNXT_TF_DBG(ERR, "Unable to allocate eem table scope rc = %d\n",
705 			    rc);
706 		return rc;
707 	}
708 	rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id);
709 	if (rc) {
710 		BNXT_TF_DBG(ERR, "Unable to set table scope id\n");
711 		return rc;
712 	}
713 
714 	return 0;
715 }
716 
717 /* Free Extended Exact Match host memory */
718 static int32_t
719 ulp_eem_tbl_scope_deinit(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
720 {
721 	struct tf_free_tbl_scope_parms	params = {0};
722 	struct tf			*tfp;
723 	int32_t				rc = 0;
724 	struct bnxt_ulp_device_params *dparms;
725 	enum bnxt_ulp_flow_mem_type mtype;
726 	uint32_t dev_id;
727 
728 	if (!ulp_ctx || !ulp_ctx->cfg_data)
729 		return -EINVAL;
730 
731 	tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx, BNXT_ULP_SHARED_SESSION_NO);
732 	if (!tfp) {
733 		BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
734 		return -EINVAL;
735 	}
736 
737 	/* Get the dev specific number of flows that needed to be supported. */
738 	if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
739 		BNXT_TF_DBG(ERR, "Invalid device id\n");
740 		return -EINVAL;
741 	}
742 
743 	dparms = bnxt_ulp_device_params_get(dev_id);
744 	if (!dparms) {
745 		BNXT_TF_DBG(ERR, "could not fetch the device params\n");
746 		return -ENODEV;
747 	}
748 
749 	if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
750 		return -EINVAL;
751 	if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
752 		BNXT_TF_DBG(INFO, "Table Scope free is not required\n");
753 		return 0;
754 	}
755 
756 	rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp_ctx, &params.tbl_scope_id);
757 	if (rc) {
758 		BNXT_TF_DBG(ERR, "Failed to get the table scope id\n");
759 		return -EINVAL;
760 	}
761 
762 	rc = tf_free_tbl_scope(tfp, &params);
763 	if (rc) {
764 		BNXT_TF_DBG(ERR, "Unable to free table scope\n");
765 		return -EINVAL;
766 	}
767 	return rc;
768 }
769 
770 /* The function to free and deinit the ulp context data. */
771 static int32_t
772 ulp_ctx_deinit(struct bnxt *bp,
773 	       struct bnxt_ulp_session_state *session)
774 {
775 	/* close the tf session */
776 	ulp_ctx_session_close(bp, session);
777 
778 	/* The shared session must be closed last. */
779 	ulp_ctx_shared_session_close(bp, session);
780 
781 	/* Free the contents */
782 	if (session->cfg_data) {
783 		rte_free(session->cfg_data);
784 		bp->ulp_ctx->cfg_data = NULL;
785 		session->cfg_data = NULL;
786 	}
787 	return 0;
788 }
789 
790 /* The function to allocate and initialize the ulp context data. */
791 static int32_t
792 ulp_ctx_init(struct bnxt *bp,
793 	     struct bnxt_ulp_session_state *session)
794 {
795 	struct bnxt_ulp_data	*ulp_data;
796 	int32_t			rc = 0;
797 	enum bnxt_ulp_device_id devid;
798 
799 	/* Initialize the context entries list */
800 	bnxt_ulp_cntxt_list_init();
801 
802 	/* Add the context to the context entries list */
803 	rc = bnxt_ulp_cntxt_list_add(bp->ulp_ctx);
804 	if (rc) {
805 		BNXT_TF_DBG(ERR, "Failed to add the context list entry\n");
806 		return -ENOMEM;
807 	}
808 
809 	/* Allocate memory to hold ulp context data. */
810 	ulp_data = rte_zmalloc("bnxt_ulp_data",
811 			       sizeof(struct bnxt_ulp_data), 0);
812 	if (!ulp_data) {
813 		BNXT_TF_DBG(ERR, "Failed to allocate memory for ulp data\n");
814 		return -ENOMEM;
815 	}
816 
817 	/* Increment the ulp context data reference count usage. */
818 	bp->ulp_ctx->cfg_data = ulp_data;
819 	session->cfg_data = ulp_data;
820 	ulp_data->ref_cnt++;
821 	ulp_data->ulp_flags |= BNXT_ULP_VF_REP_ENABLED;
822 
823 	rc = bnxt_ulp_devid_get(bp, &devid);
824 	if (rc) {
825 		BNXT_TF_DBG(ERR, "Unable to determine device for ULP init.\n");
826 		goto error_deinit;
827 	}
828 
829 	rc = bnxt_ulp_cntxt_dev_id_set(bp->ulp_ctx, devid);
830 	if (rc) {
831 		BNXT_TF_DBG(ERR, "Unable to set device for ULP init.\n");
832 		goto error_deinit;
833 	}
834 
835 	rc = bnxt_ulp_cntxt_app_id_set(bp->ulp_ctx, bp->app_id);
836 	if (rc) {
837 		BNXT_TF_DBG(ERR, "Unable to set app_id for ULP init.\n");
838 		goto error_deinit;
839 	}
840 	BNXT_TF_DBG(DEBUG, "Ulp initialized with app id %d\n", bp->app_id);
841 
842 	rc = bnxt_ulp_cntxt_app_caps_init(bp, bp->app_id, devid);
843 	if (rc) {
844 		BNXT_TF_DBG(ERR, "Unable to set caps for app(%x)/dev(%x)\n",
845 			    bp->app_id, devid);
846 		goto error_deinit;
847 	}
848 
849 	/*
850 	 * Shared session must be created before first regular session but after
851 	 * the ulp_ctx is valid.
852 	 */
853 	rc = ulp_ctx_shared_session_open(bp, session);
854 	if (rc) {
855 		BNXT_TF_DBG(ERR, "Unable to open shared session (%d)\n", rc);
856 		goto error_deinit;
857 	}
858 
859 	/* Open the ulp session. */
860 	rc = ulp_ctx_session_open(bp, session);
861 	if (rc)
862 		goto error_deinit;
863 
864 	bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
865 	return rc;
866 
867 error_deinit:
868 	session->session_opened = 1;
869 	(void)ulp_ctx_deinit(bp, session);
870 	return rc;
871 }
872 
873 /* The function to initialize ulp dparms with devargs */
874 static int32_t
875 ulp_dparms_init(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
876 {
877 	struct bnxt_ulp_device_params *dparms;
878 	uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST;
879 
880 	if (!bp->max_num_kflows) {
881 		/* Defaults to Internal */
882 		bnxt_ulp_cntxt_mem_type_set(ulp_ctx,
883 					    BNXT_ULP_FLOW_MEM_TYPE_INT);
884 		return 0;
885 	}
886 
887 	/* The max_num_kflows were set, so move to external */
888 	if (bnxt_ulp_cntxt_mem_type_set(ulp_ctx, BNXT_ULP_FLOW_MEM_TYPE_EXT))
889 		return -EINVAL;
890 
891 	if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) {
892 		BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
893 		return -EINVAL;
894 	}
895 
896 	dparms = bnxt_ulp_device_params_get(dev_id);
897 	if (!dparms) {
898 		BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
899 		return -EINVAL;
900 	}
901 
902 	/* num_flows = max_num_kflows * 1024 */
903 	dparms->ext_flow_db_num_entries = bp->max_num_kflows * 1024;
904 	/* GFID =  2 * num_flows */
905 	dparms->mark_db_gfid_entries = dparms->ext_flow_db_num_entries * 2;
906 	BNXT_TF_DBG(DEBUG, "Set the number of flows = %" PRIu64 "\n",
907 		    dparms->ext_flow_db_num_entries);
908 
909 	return 0;
910 }
911 
912 /* The function to initialize bp flags with truflow features */
913 static int32_t
914 ulp_dparms_dev_port_intf_update(struct bnxt *bp,
915 				struct bnxt_ulp_context *ulp_ctx)
916 {
917 	enum bnxt_ulp_flow_mem_type mtype;
918 
919 	if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
920 		return -EINVAL;
921 	/* Update the bp flag with gfid flag */
922 	if (mtype == BNXT_ULP_FLOW_MEM_TYPE_EXT)
923 		bp->flags |= BNXT_FLAG_GFID_ENABLE;
924 
925 	return 0;
926 }
927 
928 static int32_t
929 ulp_ctx_attach(struct bnxt *bp,
930 	       struct bnxt_ulp_session_state *session)
931 {
932 	int32_t rc = 0;
933 	uint32_t flags, dev_id = BNXT_ULP_DEVICE_ID_LAST;
934 	uint8_t app_id;
935 
936 	/* Increment the ulp context data reference count usage. */
937 	bp->ulp_ctx->cfg_data = session->cfg_data;
938 	bp->ulp_ctx->cfg_data->ref_cnt++;
939 
940 	/* update the session details in bnxt tfp */
941 	bp->tfp.session = session->g_tfp->session;
942 
943 	/* Add the context to the context entries list */
944 	rc = bnxt_ulp_cntxt_list_add(bp->ulp_ctx);
945 	if (rc) {
946 		BNXT_TF_DBG(ERR, "Failed to add the context list entry\n");
947 		return -EINVAL;
948 	}
949 
950 	/*
951 	 * The supported flag will be set during the init. Use it now to
952 	 * know if we should go through the attach.
953 	 */
954 	rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
955 	if (rc) {
956 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
957 		return -EINVAL;
958 	}
959 
960 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
961 	if (rc) {
962 		BNXT_TF_DBG(ERR, "Unable do get the dev_id.\n");
963 		return -EINVAL;
964 	}
965 
966 	flags = bp->ulp_ctx->cfg_data->ulp_flags;
967 	if (ULP_APP_DEV_UNSUPPORTED_ENABLED(flags)) {
968 		BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
969 			    app_id, dev_id);
970 		return -EINVAL;
971 	}
972 
973 	/* Create a TF Client */
974 	rc = ulp_ctx_session_open(bp, session);
975 	if (rc) {
976 		PMD_DRV_LOG(ERR, "Failed to open ctxt session, rc:%d\n", rc);
977 		bp->tfp.session = NULL;
978 		return rc;
979 	}
980 
981 	bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
982 	return rc;
983 }
984 
985 static void
986 ulp_ctx_detach(struct bnxt *bp)
987 {
988 	if (bp->tfp.session) {
989 		tf_close_session(&bp->tfp);
990 		bp->tfp.session = NULL;
991 	}
992 }
993 
994 /*
995  * Initialize the state of an ULP session.
996  * If the state of an ULP session is not initialized, set it's state to
997  * initialized. If the state is already initialized, do nothing.
998  */
999 static void
1000 ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
1001 {
1002 	pthread_mutex_lock(&session->bnxt_ulp_mutex);
1003 
1004 	if (!session->bnxt_ulp_init) {
1005 		session->bnxt_ulp_init = true;
1006 		*init = false;
1007 	} else {
1008 		*init = true;
1009 	}
1010 
1011 	pthread_mutex_unlock(&session->bnxt_ulp_mutex);
1012 }
1013 
1014 /*
1015  * Check if an ULP session is already allocated for a specific PCI
1016  * domain & bus. If it is already allocated simply return the session
1017  * pointer, otherwise allocate a new session.
1018  */
1019 static struct bnxt_ulp_session_state *
1020 ulp_get_session(struct bnxt *bp, struct rte_pci_addr *pci_addr)
1021 {
1022 	struct bnxt_ulp_session_state *session;
1023 
1024 	/* if multi root capability is enabled, then ignore the pci bus id */
1025 	STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) {
1026 		if (session->pci_info.domain == pci_addr->domain &&
1027 		    (BNXT_MULTIROOT_EN(bp) ||
1028 		    session->pci_info.bus == pci_addr->bus)) {
1029 			return session;
1030 		}
1031 	}
1032 	return NULL;
1033 }
1034 
1035 /*
1036  * Allocate and Initialize an ULP session and set it's state to INITIALIZED.
1037  * If it's already initialized simply return the already existing session.
1038  */
1039 static struct bnxt_ulp_session_state *
1040 ulp_session_init(struct bnxt *bp,
1041 		 bool *init)
1042 {
1043 	struct rte_pci_device		*pci_dev;
1044 	struct rte_pci_addr		*pci_addr;
1045 	struct bnxt_ulp_session_state	*session;
1046 	int rc = 0;
1047 
1048 	if (!bp)
1049 		return NULL;
1050 
1051 	pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
1052 	pci_addr = &pci_dev->addr;
1053 
1054 	pthread_mutex_lock(&bnxt_ulp_global_mutex);
1055 
1056 	session = ulp_get_session(bp, pci_addr);
1057 	if (!session) {
1058 		/* Not Found the session  Allocate a new one */
1059 		session = rte_zmalloc("bnxt_ulp_session",
1060 				      sizeof(struct bnxt_ulp_session_state),
1061 				      0);
1062 		if (!session) {
1063 			BNXT_TF_DBG(ERR,
1064 				    "Allocation failed for bnxt_ulp_session\n");
1065 			pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1066 			return NULL;
1067 
1068 		} else {
1069 			/* Add it to the queue */
1070 			session->pci_info.domain = pci_addr->domain;
1071 			session->pci_info.bus = pci_addr->bus;
1072 			rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL);
1073 			if (rc) {
1074 				BNXT_TF_DBG(ERR, "mutex create failed\n");
1075 				pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1076 				return NULL;
1077 			}
1078 			STAILQ_INSERT_TAIL(&bnxt_ulp_session_list,
1079 					   session, next);
1080 		}
1081 	}
1082 	ulp_context_initialized(session, init);
1083 	pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1084 	return session;
1085 }
1086 
1087 /*
1088  * When a device is closed, remove it's associated session from the global
1089  * session list.
1090  */
1091 static void
1092 ulp_session_deinit(struct bnxt_ulp_session_state *session)
1093 {
1094 	if (!session)
1095 		return;
1096 
1097 	if (!session->cfg_data) {
1098 		pthread_mutex_lock(&bnxt_ulp_global_mutex);
1099 		STAILQ_REMOVE(&bnxt_ulp_session_list, session,
1100 			      bnxt_ulp_session_state, next);
1101 		pthread_mutex_destroy(&session->bnxt_ulp_mutex);
1102 		rte_free(session);
1103 		pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1104 	}
1105 }
1106 
1107 /*
1108  * Internal api to enable NAT feature.
1109  * Set set_flag to 1 to set the value or zero to reset the value.
1110  * returns 0 on success.
1111  */
1112 static int32_t
1113 bnxt_ulp_global_cfg_update(struct bnxt *bp,
1114 			   enum tf_dir dir,
1115 			   enum tf_global_config_type type,
1116 			   uint32_t offset,
1117 			   uint32_t value,
1118 			   uint32_t set_flag)
1119 {
1120 	uint32_t global_cfg = 0;
1121 	int rc;
1122 	struct tf_global_cfg_parms parms = { 0 };
1123 
1124 	/* Initialize the params */
1125 	parms.dir = dir,
1126 	parms.type = type,
1127 	parms.offset = offset,
1128 	parms.config = (uint8_t *)&global_cfg,
1129 	parms.config_sz_in_bytes = sizeof(global_cfg);
1130 
1131 	rc = tf_get_global_cfg(&bp->tfp, &parms);
1132 	if (rc) {
1133 		BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n",
1134 			    type, rc);
1135 		return rc;
1136 	}
1137 
1138 	if (set_flag)
1139 		global_cfg |= value;
1140 	else
1141 		global_cfg &= ~value;
1142 
1143 	/* SET the register RE_CFA_REG_ACT_TECT */
1144 	rc = tf_set_global_cfg(&bp->tfp, &parms);
1145 	if (rc) {
1146 		BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n",
1147 			    type, rc);
1148 		return rc;
1149 	}
1150 	return rc;
1151 }
1152 
1153 /* Internal function to delete all the flows belonging to the given port */
1154 static void
1155 bnxt_ulp_flush_port_flows(struct bnxt *bp)
1156 {
1157 	uint16_t func_id;
1158 
1159 	/* it is assumed that port is either TVF or PF */
1160 	if (ulp_port_db_port_func_id_get(bp->ulp_ctx,
1161 					 bp->eth_dev->data->port_id,
1162 					 &func_id)) {
1163 		BNXT_TF_DBG(ERR, "Invalid argument\n");
1164 		return;
1165 	}
1166 	(void)ulp_flow_db_function_flow_flush(bp->ulp_ctx, func_id);
1167 }
1168 
1169 /* Internal function to delete the VFR default flows */
1170 static void
1171 bnxt_ulp_destroy_vfr_default_rules(struct bnxt *bp, bool global)
1172 {
1173 	struct bnxt_ulp_vfr_rule_info *info;
1174 	uint16_t port_id;
1175 	struct rte_eth_dev *vfr_eth_dev;
1176 	struct bnxt_representor *vfr_bp;
1177 
1178 	if (!BNXT_TRUFLOW_EN(bp) || BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
1179 		return;
1180 
1181 	if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
1182 		return;
1183 
1184 	/* Delete default rules for all ports */
1185 	for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
1186 		info = &bp->ulp_ctx->cfg_data->vfr_rule_info[port_id];
1187 		if (!info->valid)
1188 			continue;
1189 
1190 		if (!global && info->parent_port_id !=
1191 		    bp->eth_dev->data->port_id)
1192 			continue;
1193 
1194 		/* Destroy the flows */
1195 		ulp_default_flow_destroy(bp->eth_dev, info->vfr_flow_id);
1196 		/* Clean up the tx action pointer */
1197 		vfr_eth_dev = &rte_eth_devices[port_id];
1198 		if (vfr_eth_dev) {
1199 			vfr_bp = vfr_eth_dev->data->dev_private;
1200 			vfr_bp->vfr_tx_cfa_action = 0;
1201 		}
1202 		memset(info, 0, sizeof(struct bnxt_ulp_vfr_rule_info));
1203 	}
1204 }
1205 
1206 /*
1207  * When a port is deinit'ed by dpdk. This function is called
1208  * and this function clears the ULP context and rest of the
1209  * infrastructure associated with it.
1210  */
1211 static void
1212 bnxt_ulp_deinit(struct bnxt *bp,
1213 		struct bnxt_ulp_session_state *session)
1214 {
1215 	bool ha_enabled;
1216 
1217 	if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
1218 		return;
1219 
1220 	ha_enabled = bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx);
1221 	if (ha_enabled && session->session_opened) {
1222 		int32_t rc = ulp_ha_mgr_close(bp->ulp_ctx);
1223 		if (rc)
1224 			BNXT_TF_DBG(ERR, "Failed to close HA (%d)\n", rc);
1225 	}
1226 
1227 	/* clean up default flows */
1228 	bnxt_ulp_destroy_df_rules(bp, true);
1229 
1230 	/* clean up default VFR flows */
1231 	bnxt_ulp_destroy_vfr_default_rules(bp, true);
1232 
1233 	/* clean up regular flows */
1234 	ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR);
1235 
1236 	/* cleanup the eem table scope */
1237 	ulp_eem_tbl_scope_deinit(bp, bp->ulp_ctx);
1238 
1239 	/* cleanup the flow database */
1240 	ulp_flow_db_deinit(bp->ulp_ctx);
1241 
1242 	/* Delete the Mark database */
1243 	ulp_mark_db_deinit(bp->ulp_ctx);
1244 
1245 	/* cleanup the ulp mapper */
1246 	ulp_mapper_deinit(bp->ulp_ctx);
1247 
1248 	/* Delete the Flow Counter Manager */
1249 	ulp_fc_mgr_deinit(bp->ulp_ctx);
1250 
1251 	/* Delete the Port database */
1252 	ulp_port_db_deinit(bp->ulp_ctx);
1253 
1254 	/* Disable NAT feature */
1255 	(void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
1256 					 TF_TUNNEL_ENCAP_NAT,
1257 					 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
1258 
1259 	(void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
1260 					 TF_TUNNEL_ENCAP_NAT,
1261 					 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
1262 
1263 	/* free the flow db lock */
1264 	pthread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
1265 
1266 	if (ha_enabled)
1267 		ulp_ha_mgr_deinit(bp->ulp_ctx);
1268 
1269 	/* Delete the ulp context and tf session and free the ulp context */
1270 	ulp_ctx_deinit(bp, session);
1271 	BNXT_TF_DBG(DEBUG, "ulp ctx has been deinitialized\n");
1272 }
1273 
1274 /*
1275  * When a port is initialized by dpdk. This functions is called
1276  * and this function initializes the ULP context and rest of the
1277  * infrastructure associated with it.
1278  */
1279 static int32_t
1280 bnxt_ulp_init(struct bnxt *bp,
1281 	      struct bnxt_ulp_session_state *session)
1282 {
1283 	int rc;
1284 
1285 	/* Allocate and Initialize the ulp context. */
1286 	rc = ulp_ctx_init(bp, session);
1287 	if (rc) {
1288 		BNXT_TF_DBG(ERR, "Failed to create the ulp context\n");
1289 		goto jump_to_error;
1290 	}
1291 
1292 	rc = pthread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock, NULL);
1293 	if (rc) {
1294 		BNXT_TF_DBG(ERR, "Unable to initialize flow db lock\n");
1295 		goto jump_to_error;
1296 	}
1297 
1298 	/* Initialize ulp dparms with values devargs passed */
1299 	rc = ulp_dparms_init(bp, bp->ulp_ctx);
1300 	if (rc) {
1301 		BNXT_TF_DBG(ERR, "Failed to initialize the dparms\n");
1302 		goto jump_to_error;
1303 	}
1304 
1305 	/* create the port database */
1306 	rc = ulp_port_db_init(bp->ulp_ctx, bp->port_cnt);
1307 	if (rc) {
1308 		BNXT_TF_DBG(ERR, "Failed to create the port database\n");
1309 		goto jump_to_error;
1310 	}
1311 
1312 	/* Create the Mark database. */
1313 	rc = ulp_mark_db_init(bp->ulp_ctx);
1314 	if (rc) {
1315 		BNXT_TF_DBG(ERR, "Failed to create the mark database\n");
1316 		goto jump_to_error;
1317 	}
1318 
1319 	/* Create the flow database. */
1320 	rc = ulp_flow_db_init(bp->ulp_ctx);
1321 	if (rc) {
1322 		BNXT_TF_DBG(ERR, "Failed to create the flow database\n");
1323 		goto jump_to_error;
1324 	}
1325 
1326 	/* Create the eem table scope. */
1327 	rc = ulp_eem_tbl_scope_init(bp);
1328 	if (rc) {
1329 		BNXT_TF_DBG(ERR, "Failed to create the eem scope table\n");
1330 		goto jump_to_error;
1331 	}
1332 
1333 	rc = ulp_mapper_init(bp->ulp_ctx);
1334 	if (rc) {
1335 		BNXT_TF_DBG(ERR, "Failed to initialize ulp mapper\n");
1336 		goto jump_to_error;
1337 	}
1338 
1339 	rc = ulp_fc_mgr_init(bp->ulp_ctx);
1340 	if (rc) {
1341 		BNXT_TF_DBG(ERR, "Failed to initialize ulp flow counter mgr\n");
1342 		goto jump_to_error;
1343 	}
1344 
1345 	/*
1346 	 * Enable NAT feature. Set the global configuration register
1347 	 * Tunnel encap to enable NAT with the reuse of existing inner
1348 	 * L2 header smac and dmac
1349 	 */
1350 	rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
1351 					TF_TUNNEL_ENCAP_NAT,
1352 					BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
1353 	if (rc) {
1354 		BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n");
1355 		goto jump_to_error;
1356 	}
1357 
1358 	rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
1359 					TF_TUNNEL_ENCAP_NAT,
1360 					BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
1361 	if (rc) {
1362 		BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n");
1363 		goto jump_to_error;
1364 	}
1365 
1366 	if (bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx)) {
1367 		rc = ulp_ha_mgr_init(bp->ulp_ctx);
1368 		if (rc) {
1369 			BNXT_TF_DBG(ERR, "Failed to initialize HA %d\n", rc);
1370 			goto jump_to_error;
1371 		}
1372 		rc = ulp_ha_mgr_open(bp->ulp_ctx);
1373 		if (rc) {
1374 			BNXT_TF_DBG(ERR, "Failed to Process HA Open %d\n", rc);
1375 			goto jump_to_error;
1376 		}
1377 	}
1378 	BNXT_TF_DBG(DEBUG, "ulp ctx has been initialized\n");
1379 	return rc;
1380 
1381 jump_to_error:
1382 	bnxt_ulp_deinit(bp, session);
1383 	return rc;
1384 }
1385 
1386 /*
1387  * When a port is initialized by dpdk. This functions sets up
1388  * the port specific details.
1389  */
1390 int32_t
1391 bnxt_ulp_port_init(struct bnxt *bp)
1392 {
1393 	struct bnxt_ulp_session_state *session;
1394 	bool initialized;
1395 	enum bnxt_ulp_device_id devid = BNXT_ULP_DEVICE_ID_LAST;
1396 	uint32_t ulp_flags;
1397 	int32_t rc = 0;
1398 
1399 	if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1400 		BNXT_TF_DBG(ERR,
1401 			    "Skip ulp init for port: %d, not a TVF or PF\n",
1402 			    bp->eth_dev->data->port_id);
1403 		return rc;
1404 	}
1405 
1406 	if (!BNXT_TRUFLOW_EN(bp)) {
1407 		BNXT_TF_DBG(ERR,
1408 			    "Skip ulp init for port: %d, truflow is not enabled\n",
1409 			    bp->eth_dev->data->port_id);
1410 		return rc;
1411 	}
1412 
1413 	if (bp->ulp_ctx) {
1414 		BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n");
1415 		return rc;
1416 	}
1417 
1418 	bp->ulp_ctx = rte_zmalloc("bnxt_ulp_ctx",
1419 				  sizeof(struct bnxt_ulp_context), 0);
1420 	if (!bp->ulp_ctx) {
1421 		BNXT_TF_DBG(ERR, "Failed to allocate ulp ctx\n");
1422 		return -ENOMEM;
1423 	}
1424 
1425 	/*
1426 	 * Multiple uplink ports can be associated with a single vswitch.
1427 	 * Make sure only the port that is started first will initialize
1428 	 * the TF session.
1429 	 */
1430 	session = ulp_session_init(bp, &initialized);
1431 	if (!session) {
1432 		BNXT_TF_DBG(ERR, "Failed to initialize the tf session\n");
1433 		rc = -EIO;
1434 		goto jump_to_error;
1435 	}
1436 
1437 	if (initialized) {
1438 		/*
1439 		 * If ULP is already initialized for a specific domain then
1440 		 * simply assign the ulp context to this rte_eth_dev.
1441 		 */
1442 		rc = ulp_ctx_attach(bp, session);
1443 		if (rc) {
1444 			BNXT_TF_DBG(ERR, "Failed to attach the ulp context\n");
1445 			goto jump_to_error;
1446 		}
1447 
1448 		/*
1449 		 * Attach to the shared session, must be called after the
1450 		 * ulp_ctx_attach in order to ensure that ulp data is available
1451 		 * for attaching.
1452 		 */
1453 		rc = ulp_ctx_shared_session_attach(bp, session);
1454 		if (rc) {
1455 			BNXT_TF_DBG(ERR,
1456 				    "Failed attach to shared session (%d)", rc);
1457 			goto jump_to_error;
1458 		}
1459 	} else {
1460 		rc = bnxt_ulp_init(bp, session);
1461 		if (rc) {
1462 			BNXT_TF_DBG(ERR, "Failed to initialize the ulp init\n");
1463 			goto jump_to_error;
1464 		}
1465 	}
1466 
1467 	/* Update bnxt driver flags */
1468 	rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx);
1469 	if (rc) {
1470 		BNXT_TF_DBG(ERR, "Failed to update driver flags\n");
1471 		goto jump_to_error;
1472 	}
1473 
1474 	/* update the port database for the given interface */
1475 	rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp->eth_dev);
1476 	if (rc) {
1477 		BNXT_TF_DBG(ERR, "Failed to update port database\n");
1478 		goto jump_to_error;
1479 	}
1480 	/* create the default rules */
1481 	rc = bnxt_ulp_create_df_rules(bp);
1482 	if (rc) {
1483 		BNXT_TF_DBG(ERR, "Failed to create default flow\n");
1484 		goto jump_to_error;
1485 	}
1486 
1487 	rc = bnxt_ulp_devid_get(bp, &devid);
1488 	if (rc) {
1489 		BNXT_TF_DBG(ERR, "Unable to determine device for ULP port init.\n");
1490 		goto jump_to_error;
1491 	}
1492 
1493 	/* set the accumulation of the stats */
1494 	if (BNXT_ACCUM_STATS_EN(bp))
1495 		bp->ulp_ctx->cfg_data->accum_stats = true;
1496 
1497 	BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port init, accum_stats:%d\n",
1498 		    bp->eth_dev->data->port_id,
1499 		    bp->ulp_ctx->cfg_data->accum_stats);
1500 
1501 	/* set the unicast mode */
1502 	if (bnxt_ulp_cntxt_ptr2_ulp_flags_get(bp->ulp_ctx, &ulp_flags)) {
1503 		BNXT_TF_DBG(ERR, "Error in getting ULP context flags\n");
1504 		goto jump_to_error;
1505 	}
1506 	if (ulp_flags & BNXT_ULP_APP_UNICAST_ONLY) {
1507 		if (bnxt_pmd_set_unicast_rxmask(bp->eth_dev)) {
1508 			BNXT_TF_DBG(ERR, "Error in setting unicast rxmode\n");
1509 			goto jump_to_error;
1510 		}
1511 	}
1512 
1513 	return rc;
1514 
1515 jump_to_error:
1516 	bnxt_ulp_port_deinit(bp);
1517 	return rc;
1518 }
1519 
1520 /*
1521  * When a port is de-initialized by dpdk. This functions clears up
1522  * the port specific details.
1523  */
1524 void
1525 bnxt_ulp_port_deinit(struct bnxt *bp)
1526 {
1527 	struct bnxt_ulp_session_state *session;
1528 	struct rte_pci_device *pci_dev;
1529 	struct rte_pci_addr *pci_addr;
1530 
1531 	if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1532 		BNXT_TF_DBG(ERR,
1533 			    "Skip ULP deinit port:%d, not a TVF or PF\n",
1534 			    bp->eth_dev->data->port_id);
1535 		return;
1536 	}
1537 
1538 	if (!BNXT_TRUFLOW_EN(bp)) {
1539 		BNXT_TF_DBG(ERR,
1540 			    "Skip ULP deinit for port:%d, truflow is not enabled\n",
1541 			    bp->eth_dev->data->port_id);
1542 		return;
1543 	}
1544 
1545 	if (!bp->ulp_ctx) {
1546 		BNXT_TF_DBG(DEBUG, "ulp ctx already de-allocated\n");
1547 		return;
1548 	}
1549 
1550 	BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port deinit\n",
1551 		    bp->eth_dev->data->port_id);
1552 
1553 	/* Free the ulp context in the context entry list */
1554 	bnxt_ulp_cntxt_list_del(bp->ulp_ctx);
1555 
1556 	/* Get the session details  */
1557 	pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
1558 	pci_addr = &pci_dev->addr;
1559 	pthread_mutex_lock(&bnxt_ulp_global_mutex);
1560 	session = ulp_get_session(bp, pci_addr);
1561 	pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1562 
1563 	/* session not found then just exit */
1564 	if (!session) {
1565 		/* Free the ulp context */
1566 		rte_free(bp->ulp_ctx);
1567 		bp->ulp_ctx = NULL;
1568 		return;
1569 	}
1570 
1571 	/* Check the reference count to deinit or deattach*/
1572 	if (bp->ulp_ctx->cfg_data && bp->ulp_ctx->cfg_data->ref_cnt) {
1573 		bp->ulp_ctx->cfg_data->ref_cnt--;
1574 		if (bp->ulp_ctx->cfg_data->ref_cnt) {
1575 			/* free the port details */
1576 			/* Free the default flow rule associated to this port */
1577 			bnxt_ulp_destroy_df_rules(bp, false);
1578 			bnxt_ulp_destroy_vfr_default_rules(bp, false);
1579 
1580 			/* free flows associated with this port */
1581 			bnxt_ulp_flush_port_flows(bp);
1582 
1583 			/* close the session associated with this port */
1584 			ulp_ctx_detach(bp);
1585 
1586 			/* always detach/close shared after the session. */
1587 			ulp_ctx_shared_session_detach(bp);
1588 		} else {
1589 			/* Perform ulp ctx deinit */
1590 			bnxt_ulp_deinit(bp, session);
1591 		}
1592 	}
1593 
1594 	/* clean up the session */
1595 	ulp_session_deinit(session);
1596 
1597 	/* Free the ulp context */
1598 	rte_free(bp->ulp_ctx);
1599 	bp->ulp_ctx = NULL;
1600 }
1601 
1602 /* Below are the access functions to access internal data of ulp context. */
1603 /* Function to set the Mark DB into the context */
1604 int32_t
1605 bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx,
1606 				struct bnxt_ulp_mark_tbl *mark_tbl)
1607 {
1608 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1609 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1610 		return -EINVAL;
1611 	}
1612 
1613 	ulp_ctx->cfg_data->mark_tbl = mark_tbl;
1614 
1615 	return 0;
1616 }
1617 
1618 /* Function to retrieve the Mark DB from the context. */
1619 struct bnxt_ulp_mark_tbl *
1620 bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx)
1621 {
1622 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1623 		return NULL;
1624 
1625 	return ulp_ctx->cfg_data->mark_tbl;
1626 }
1627 
1628 bool
1629 bnxt_ulp_cntxt_shared_session_enabled(struct bnxt_ulp_context *ulp_ctx)
1630 {
1631 	return ULP_SHARED_SESSION_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags);
1632 }
1633 
1634 int32_t
1635 bnxt_ulp_cntxt_app_id_set(struct bnxt_ulp_context *ulp_ctx, uint8_t app_id)
1636 {
1637 	if (!ulp_ctx)
1638 		return -EINVAL;
1639 	ulp_ctx->cfg_data->app_id = app_id;
1640 	return 0;
1641 }
1642 
1643 int32_t
1644 bnxt_ulp_cntxt_app_id_get(struct bnxt_ulp_context *ulp_ctx, uint8_t *app_id)
1645 {
1646 	/* Default APP id is zero */
1647 	if (!ulp_ctx || !app_id)
1648 		return -EINVAL;
1649 	*app_id = ulp_ctx->cfg_data->app_id;
1650 	return 0;
1651 }
1652 
1653 /* Function to set the device id of the hardware. */
1654 int32_t
1655 bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx,
1656 			  uint32_t dev_id)
1657 {
1658 	if (ulp_ctx && ulp_ctx->cfg_data) {
1659 		ulp_ctx->cfg_data->dev_id = dev_id;
1660 		return 0;
1661 	}
1662 
1663 	return -EINVAL;
1664 }
1665 
1666 /* Function to get the device id of the hardware. */
1667 int32_t
1668 bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx,
1669 			  uint32_t *dev_id)
1670 {
1671 	if (ulp_ctx && ulp_ctx->cfg_data) {
1672 		*dev_id = ulp_ctx->cfg_data->dev_id;
1673 		return 0;
1674 	}
1675 	*dev_id = BNXT_ULP_DEVICE_ID_LAST;
1676 	BNXT_TF_DBG(ERR, "Failed to read dev_id from ulp ctxt\n");
1677 	return -EINVAL;
1678 }
1679 
1680 int32_t
1681 bnxt_ulp_cntxt_mem_type_set(struct bnxt_ulp_context *ulp_ctx,
1682 			    enum bnxt_ulp_flow_mem_type mem_type)
1683 {
1684 	if (ulp_ctx && ulp_ctx->cfg_data) {
1685 		ulp_ctx->cfg_data->mem_type = mem_type;
1686 		return 0;
1687 	}
1688 	BNXT_TF_DBG(ERR, "Failed to write mem_type in ulp ctxt\n");
1689 	return -EINVAL;
1690 }
1691 
1692 int32_t
1693 bnxt_ulp_cntxt_mem_type_get(struct bnxt_ulp_context *ulp_ctx,
1694 			    enum bnxt_ulp_flow_mem_type *mem_type)
1695 {
1696 	if (ulp_ctx && ulp_ctx->cfg_data) {
1697 		*mem_type = ulp_ctx->cfg_data->mem_type;
1698 		return 0;
1699 	}
1700 	*mem_type = BNXT_ULP_FLOW_MEM_TYPE_LAST;
1701 	BNXT_TF_DBG(ERR, "Failed to read mem_type in ulp ctxt\n");
1702 	return -EINVAL;
1703 }
1704 
1705 /* Function to get the table scope id of the EEM table. */
1706 int32_t
1707 bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx,
1708 				uint32_t *tbl_scope_id)
1709 {
1710 	if (ulp_ctx && ulp_ctx->cfg_data) {
1711 		*tbl_scope_id = ulp_ctx->cfg_data->tbl_scope_id;
1712 		return 0;
1713 	}
1714 
1715 	return -EINVAL;
1716 }
1717 
1718 /* Function to set the table scope id of the EEM table. */
1719 int32_t
1720 bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx,
1721 				uint32_t tbl_scope_id)
1722 {
1723 	if (ulp_ctx && ulp_ctx->cfg_data) {
1724 		ulp_ctx->cfg_data->tbl_scope_id = tbl_scope_id;
1725 		return 0;
1726 	}
1727 
1728 	return -EINVAL;
1729 }
1730 
1731 /* Function to set the shared tfp session details from the ulp context. */
1732 int32_t
1733 bnxt_ulp_cntxt_shared_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1734 {
1735 	if (!ulp) {
1736 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1737 		return -EINVAL;
1738 	}
1739 
1740 	if (tfp == NULL) {
1741 		if (ulp->cfg_data->num_shared_clients > 0)
1742 			ulp->cfg_data->num_shared_clients--;
1743 	} else {
1744 		ulp->cfg_data->num_shared_clients++;
1745 	}
1746 
1747 	ulp->g_shared_tfp = tfp;
1748 	return 0;
1749 }
1750 
1751 /* Function to get the shared tfp session details from the ulp context. */
1752 struct tf *
1753 bnxt_ulp_cntxt_shared_tfp_get(struct bnxt_ulp_context *ulp)
1754 {
1755 	if (!ulp) {
1756 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1757 		return NULL;
1758 	}
1759 	return ulp->g_shared_tfp;
1760 }
1761 
1762 /* Function to get the number of shared clients attached */
1763 uint8_t
1764 bnxt_ulp_cntxt_num_shared_clients_get(struct bnxt_ulp_context *ulp)
1765 {
1766 	if (ulp == NULL || ulp->cfg_data == NULL) {
1767 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1768 		return 0;
1769 	}
1770 	return ulp->cfg_data->num_shared_clients;
1771 }
1772 
1773 /* Function to set the tfp session details from the ulp context. */
1774 int32_t
1775 bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1776 {
1777 	if (!ulp) {
1778 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1779 		return -EINVAL;
1780 	}
1781 
1782 	ulp->g_tfp = tfp;
1783 	return 0;
1784 }
1785 
1786 /* Function to get the tfp session details from the ulp context. */
1787 struct tf *
1788 bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp,
1789 		       enum bnxt_ulp_shared_session shared)
1790 {
1791 	if (!ulp) {
1792 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1793 		return NULL;
1794 	}
1795 	if (shared)
1796 		return ulp->g_shared_tfp;
1797 	else
1798 		return ulp->g_tfp;
1799 }
1800 
1801 /*
1802  * Get the device table entry based on the device id.
1803  *
1804  * dev_id [in] The device id of the hardware
1805  *
1806  * Returns the pointer to the device parameters.
1807  */
1808 struct bnxt_ulp_device_params *
1809 bnxt_ulp_device_params_get(uint32_t dev_id)
1810 {
1811 	if (dev_id < BNXT_ULP_MAX_NUM_DEVICES)
1812 		return &ulp_device_params[dev_id];
1813 	return NULL;
1814 }
1815 
1816 /* Function to set the flow database to the ulp context. */
1817 int32_t
1818 bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context	*ulp_ctx,
1819 				struct bnxt_ulp_flow_db	*flow_db)
1820 {
1821 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1822 		return -EINVAL;
1823 
1824 	ulp_ctx->cfg_data->flow_db = flow_db;
1825 	return 0;
1826 }
1827 
1828 /* Function to get the flow database from the ulp context. */
1829 struct bnxt_ulp_flow_db	*
1830 bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context	*ulp_ctx)
1831 {
1832 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1833 		return NULL;
1834 
1835 	return ulp_ctx->cfg_data->flow_db;
1836 }
1837 
1838 /* Function to get the tunnel cache table info from the ulp context. */
1839 struct bnxt_tun_cache_entry *
1840 bnxt_ulp_cntxt_ptr2_tun_tbl_get(struct bnxt_ulp_context *ulp_ctx)
1841 {
1842 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1843 		return NULL;
1844 
1845 	return ulp_ctx->cfg_data->tun_tbl;
1846 }
1847 
1848 /* Function to get the ulp context from eth device. */
1849 struct bnxt_ulp_context	*
1850 bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev	*dev)
1851 {
1852 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1853 
1854 	if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
1855 		struct bnxt_representor *vfr = dev->data->dev_private;
1856 
1857 		bp = vfr->parent_dev->data->dev_private;
1858 	}
1859 
1860 	if (!bp) {
1861 		BNXT_TF_DBG(ERR, "Bnxt private data is not initialized\n");
1862 		return NULL;
1863 	}
1864 	return bp->ulp_ctx;
1865 }
1866 
1867 int32_t
1868 bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx,
1869 				    void *mapper_data)
1870 {
1871 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1872 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1873 		return -EINVAL;
1874 	}
1875 
1876 	ulp_ctx->cfg_data->mapper_data = mapper_data;
1877 	return 0;
1878 }
1879 
1880 void *
1881 bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx)
1882 {
1883 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1884 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1885 		return NULL;
1886 	}
1887 
1888 	return ulp_ctx->cfg_data->mapper_data;
1889 }
1890 
1891 /* Function to set the port database to the ulp context. */
1892 int32_t
1893 bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context	*ulp_ctx,
1894 				struct bnxt_ulp_port_db	*port_db)
1895 {
1896 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1897 		return -EINVAL;
1898 
1899 	ulp_ctx->cfg_data->port_db = port_db;
1900 	return 0;
1901 }
1902 
1903 /* Function to get the port database from the ulp context. */
1904 struct bnxt_ulp_port_db *
1905 bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context	*ulp_ctx)
1906 {
1907 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1908 		return NULL;
1909 
1910 	return ulp_ctx->cfg_data->port_db;
1911 }
1912 
1913 /* Function to set the flow counter info into the context */
1914 int32_t
1915 bnxt_ulp_cntxt_ptr2_fc_info_set(struct bnxt_ulp_context *ulp_ctx,
1916 				struct bnxt_ulp_fc_info *ulp_fc_info)
1917 {
1918 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1919 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1920 		return -EINVAL;
1921 	}
1922 
1923 	ulp_ctx->cfg_data->fc_info = ulp_fc_info;
1924 
1925 	return 0;
1926 }
1927 
1928 /* Function to retrieve the flow counter info from the context. */
1929 struct bnxt_ulp_fc_info *
1930 bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx)
1931 {
1932 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1933 		return NULL;
1934 
1935 	return ulp_ctx->cfg_data->fc_info;
1936 }
1937 
1938 /* Function to get the ulp flags from the ulp context. */
1939 int32_t
1940 bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
1941 				  uint32_t *flags)
1942 {
1943 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1944 		return -1;
1945 
1946 	*flags =  ulp_ctx->cfg_data->ulp_flags;
1947 	return 0;
1948 }
1949 
1950 /* Function to get the ulp vfr info from the ulp context. */
1951 struct bnxt_ulp_vfr_rule_info*
1952 bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(struct bnxt_ulp_context *ulp_ctx,
1953 				     uint32_t port_id)
1954 {
1955 	if (!ulp_ctx || !ulp_ctx->cfg_data || port_id >= RTE_MAX_ETHPORTS)
1956 		return NULL;
1957 
1958 	return &ulp_ctx->cfg_data->vfr_rule_info[port_id];
1959 }
1960 
1961 /* Function to acquire the flow database lock from the ulp context. */
1962 int32_t
1963 bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context	*ulp_ctx)
1964 {
1965 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1966 		return -1;
1967 
1968 	if (pthread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) {
1969 		BNXT_TF_DBG(ERR, "unable to acquire fdb lock\n");
1970 		return -1;
1971 	}
1972 	return 0;
1973 }
1974 
1975 /* Function to release the flow database lock from the ulp context. */
1976 void
1977 bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context	*ulp_ctx)
1978 {
1979 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1980 		return;
1981 
1982 	pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);
1983 }
1984 
1985 /* Function to set the ha info into the context */
1986 int32_t
1987 bnxt_ulp_cntxt_ptr2_ha_info_set(struct bnxt_ulp_context *ulp_ctx,
1988 				struct bnxt_ulp_ha_mgr_info *ulp_ha_info)
1989 {
1990 	if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL) {
1991 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1992 		return -EINVAL;
1993 	}
1994 	ulp_ctx->cfg_data->ha_info = ulp_ha_info;
1995 	return 0;
1996 }
1997 
1998 /* Function to retrieve the ha info from the context. */
1999 struct bnxt_ulp_ha_mgr_info *
2000 bnxt_ulp_cntxt_ptr2_ha_info_get(struct bnxt_ulp_context *ulp_ctx)
2001 {
2002 	if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL)
2003 		return NULL;
2004 	return ulp_ctx->cfg_data->ha_info;
2005 }
2006 
2007 bool
2008 bnxt_ulp_cntxt_ha_enabled(struct bnxt_ulp_context *ulp_ctx)
2009 {
2010 	if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL)
2011 		return false;
2012 	return !!ULP_HIGH_AVAIL_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags);
2013 }
2014 
2015 static int32_t
2016 bnxt_ulp_cntxt_list_init(void)
2017 {
2018 	/* Create the cntxt spin lock */
2019 	rte_spinlock_init(&bnxt_ulp_ctxt_lock);
2020 
2021 	return 0;
2022 }
2023 
2024 static int32_t
2025 bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx)
2026 {
2027 	struct ulp_context_list_entry	*entry;
2028 
2029 	entry = rte_zmalloc(NULL, sizeof(struct ulp_context_list_entry), 0);
2030 	if (entry == NULL) {
2031 		BNXT_TF_DBG(ERR, "unable to allocate memory\n");
2032 		return -ENOMEM;
2033 	}
2034 
2035 	rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
2036 	entry->ulp_ctx = ulp_ctx;
2037 	TAILQ_INSERT_TAIL(&ulp_cntx_list, entry, next);
2038 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2039 	return 0;
2040 }
2041 
2042 static void
2043 bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx)
2044 {
2045 	struct ulp_context_list_entry	*entry, *temp;
2046 
2047 	rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
2048 	RTE_TAILQ_FOREACH_SAFE(entry, &ulp_cntx_list, next, temp) {
2049 		if (entry->ulp_ctx == ulp_ctx) {
2050 			TAILQ_REMOVE(&ulp_cntx_list, entry, next);
2051 			rte_free(entry);
2052 			break;
2053 		}
2054 	}
2055 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2056 }
2057 
2058 struct bnxt_ulp_context *
2059 bnxt_ulp_cntxt_entry_acquire(void)
2060 {
2061 	struct ulp_context_list_entry	*entry;
2062 
2063 	/* take a lock and get the first ulp context available */
2064 	if (rte_spinlock_trylock(&bnxt_ulp_ctxt_lock)) {
2065 		TAILQ_FOREACH(entry, &ulp_cntx_list, next)
2066 			if (entry->ulp_ctx)
2067 				return entry->ulp_ctx;
2068 	}
2069 	return NULL;
2070 }
2071 
2072 void
2073 bnxt_ulp_cntxt_entry_release(void)
2074 {
2075 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2076 }
2077 
2078 /* Function to get the app tunnel details from the ulp context. */
2079 struct bnxt_flow_app_tun_ent *
2080 bnxt_ulp_cntxt_ptr2_app_tun_list_get(struct bnxt_ulp_context *ulp)
2081 {
2082 	if (!ulp || !ulp->cfg_data)
2083 		return NULL;
2084 
2085 	return ulp->cfg_data->app_tun;
2086 }
2087