xref: /dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <rte_log.h>
7 #include <rte_malloc.h>
8 #include <rte_flow.h>
9 #include <rte_flow_driver.h>
10 #include <rte_tailq.h>
11 #include <rte_spinlock.h>
12 
13 #include "bnxt.h"
14 #include "bnxt_ulp.h"
15 #include "bnxt_tf_common.h"
16 #include "tf_core.h"
17 #include "tf_ext_flow_handle.h"
18 
19 #include "ulp_template_db_enum.h"
20 #include "ulp_template_struct.h"
21 #include "ulp_mark_mgr.h"
22 #include "ulp_fc_mgr.h"
23 #include "ulp_flow_db.h"
24 #include "ulp_mapper.h"
25 #include "ulp_port_db.h"
26 #include "ulp_tun.h"
27 #include "ulp_ha_mgr.h"
28 #include "bnxt_tf_pmd_shim.h"
29 
30 /* Linked list of all TF sessions. */
31 STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
32 			STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list);
33 
34 /* Mutex to synchronize bnxt_ulp_session_list operations. */
35 static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
36 
37 /* Spin lock to protect context global list */
38 rte_spinlock_t bnxt_ulp_ctxt_lock;
39 TAILQ_HEAD(cntx_list_entry_list, ulp_context_list_entry);
40 static struct cntx_list_entry_list ulp_cntx_list =
41 	TAILQ_HEAD_INITIALIZER(ulp_cntx_list);
42 
43 /* Static function declarations */
44 static int32_t bnxt_ulp_cntxt_list_init(void);
45 static int32_t bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx);
46 static void bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx);
47 
48 /*
49  * Allow the deletion of context only for the bnxt device that
50  * created the session.
51  */
52 bool
53 ulp_ctx_deinit_allowed(struct bnxt_ulp_context *ulp_ctx)
54 {
55 	if (!ulp_ctx || !ulp_ctx->cfg_data)
56 		return false;
57 
58 	if (!ulp_ctx->cfg_data->ref_cnt) {
59 		BNXT_TF_DBG(DEBUG, "ulp ctx shall initiate deinit\n");
60 		return true;
61 	}
62 
63 	return false;
64 }
65 
66 static int32_t
67 bnxt_ulp_devid_get(struct bnxt *bp,
68 		   enum bnxt_ulp_device_id  *ulp_dev_id)
69 {
70 	if (BNXT_CHIP_P5(bp)) {
71 		*ulp_dev_id = BNXT_ULP_DEVICE_ID_THOR;
72 		return 0;
73 	}
74 
75 	if (BNXT_STINGRAY(bp))
76 		*ulp_dev_id = BNXT_ULP_DEVICE_ID_STINGRAY;
77 	else
78 		/* Assuming Whitney */
79 		*ulp_dev_id = BNXT_ULP_DEVICE_ID_WH_PLUS;
80 
81 	return 0;
82 }
83 
84 struct bnxt_ulp_app_capabilities_info *
85 bnxt_ulp_app_cap_list_get(uint32_t *num_entries)
86 {
87 	if (!num_entries)
88 		return NULL;
89 	*num_entries = BNXT_ULP_APP_CAP_TBL_MAX_SZ;
90 	return ulp_app_cap_info_list;
91 }
92 
93 static struct bnxt_ulp_resource_resv_info *
94 bnxt_ulp_app_resource_resv_list_get(uint32_t *num_entries)
95 {
96 	if (num_entries == NULL)
97 		return NULL;
98 	*num_entries = BNXT_ULP_APP_RESOURCE_RESV_LIST_MAX_SZ;
99 	return ulp_app_resource_resv_list;
100 }
101 
102 struct bnxt_ulp_resource_resv_info *
103 bnxt_ulp_resource_resv_list_get(uint32_t *num_entries)
104 {
105 	if (!num_entries)
106 		return NULL;
107 	*num_entries = BNXT_ULP_RESOURCE_RESV_LIST_MAX_SZ;
108 	return ulp_resource_resv_list;
109 }
110 
111 struct bnxt_ulp_glb_resource_info *
112 bnxt_ulp_app_glb_resource_info_list_get(uint32_t *num_entries)
113 {
114 	if (!num_entries)
115 		return NULL;
116 	*num_entries = BNXT_ULP_APP_GLB_RESOURCE_TBL_MAX_SZ;
117 	return ulp_app_glb_resource_tbl;
118 }
119 
120 static int32_t
121 bnxt_ulp_named_resources_calc(struct bnxt_ulp_context *ulp_ctx,
122 			      struct bnxt_ulp_glb_resource_info *info,
123 			      uint32_t num,
124 			      struct tf_session_resources *res)
125 {
126 	uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST, res_type, i;
127 	enum tf_dir dir;
128 	uint8_t app_id;
129 	int32_t rc = 0;
130 
131 	if (ulp_ctx == NULL || info == NULL || res == NULL || num == 0) {
132 		BNXT_TF_DBG(ERR, "Invalid parms to named resources calc.\n");
133 		return -EINVAL;
134 	}
135 
136 	rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id);
137 	if (rc) {
138 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
139 		return -EINVAL;
140 	}
141 
142 	rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
143 	if (rc) {
144 		BNXT_TF_DBG(ERR, "Unable to get the dev id from ulp.\n");
145 		return -EINVAL;
146 	}
147 
148 	for (i = 0; i < num; i++) {
149 		if (dev_id != info[i].device_id || app_id != info[i].app_id)
150 			continue;
151 		dir = info[i].direction;
152 		res_type = info[i].resource_type;
153 
154 		switch (info[i].resource_func) {
155 		case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER:
156 			res->ident_cnt[dir].cnt[res_type]++;
157 			break;
158 		case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
159 			res->tbl_cnt[dir].cnt[res_type]++;
160 			break;
161 		case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
162 			res->tcam_cnt[dir].cnt[res_type]++;
163 			break;
164 		case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
165 			res->em_cnt[dir].cnt[res_type]++;
166 			break;
167 		default:
168 			BNXT_TF_DBG(ERR, "Unknown resource func (0x%x)\n,",
169 				    info[i].resource_func);
170 			continue;
171 		}
172 	}
173 
174 	return 0;
175 }
176 
177 static int32_t
178 bnxt_ulp_unnamed_resources_calc(struct bnxt_ulp_context *ulp_ctx,
179 				struct bnxt_ulp_resource_resv_info *info,
180 				uint32_t num,
181 				struct tf_session_resources *res)
182 {
183 	uint32_t dev_id, res_type, i;
184 	enum tf_dir dir;
185 	uint8_t app_id;
186 	int32_t rc = 0;
187 
188 	if (ulp_ctx == NULL || res == NULL || info == NULL || num == 0) {
189 		BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
190 		return -EINVAL;
191 	}
192 
193 	rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id);
194 	if (rc) {
195 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
196 		return -EINVAL;
197 	}
198 
199 	rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
200 	if (rc) {
201 		BNXT_TF_DBG(ERR, "Unable to get the dev id from ulp.\n");
202 		return -EINVAL;
203 	}
204 
205 	for (i = 0; i < num; i++) {
206 		if (app_id != info[i].app_id || dev_id != info[i].device_id)
207 			continue;
208 		dir = info[i].direction;
209 		res_type = info[i].resource_type;
210 
211 		switch (info[i].resource_func) {
212 		case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER:
213 			res->ident_cnt[dir].cnt[res_type] = info[i].count;
214 			break;
215 		case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
216 			res->tbl_cnt[dir].cnt[res_type] = info[i].count;
217 			break;
218 		case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
219 			res->tcam_cnt[dir].cnt[res_type] = info[i].count;
220 			break;
221 		case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
222 			res->em_cnt[dir].cnt[res_type] = info[i].count;
223 			break;
224 		default:
225 			break;
226 		}
227 	}
228 	return 0;
229 }
230 
231 static int32_t
232 bnxt_ulp_tf_resources_get(struct bnxt_ulp_context *ulp_ctx,
233 			  struct tf_session_resources *res)
234 {
235 	struct bnxt_ulp_resource_resv_info *unnamed = NULL;
236 	uint32_t unum;
237 	int32_t rc = 0;
238 
239 	if (ulp_ctx == NULL || res == NULL) {
240 		BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
241 		return -EINVAL;
242 	}
243 
244 	unnamed = bnxt_ulp_resource_resv_list_get(&unum);
245 	if (unnamed == NULL) {
246 		BNXT_TF_DBG(ERR, "Unable to get resource resv list.\n");
247 		return -EINVAL;
248 	}
249 
250 	rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, res);
251 	if (rc)
252 		BNXT_TF_DBG(ERR, "Unable to calc resources for session.\n");
253 
254 	return rc;
255 }
256 
257 static int32_t
258 bnxt_ulp_tf_shared_session_resources_get(struct bnxt_ulp_context *ulp_ctx,
259 					 struct tf_session_resources *res)
260 {
261 	struct bnxt_ulp_resource_resv_info *unnamed;
262 	struct bnxt_ulp_glb_resource_info *named;
263 	uint32_t unum, nnum;
264 	int32_t rc;
265 
266 	if (ulp_ctx == NULL || res == NULL) {
267 		BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
268 		return -EINVAL;
269 	}
270 
271 	/* Make sure the resources are zero before accumulating. */
272 	memset(res, 0, sizeof(struct tf_session_resources));
273 
274 	/*
275 	 * Shared resources are comprised of both named and unnamed resources.
276 	 * First get the unnamed counts, and then add the named to the result.
277 	 */
278 	/* Get the baseline counts */
279 	unnamed = bnxt_ulp_app_resource_resv_list_get(&unum);
280 	if (unnamed == NULL) {
281 		BNXT_TF_DBG(ERR, "Unable to get shared resource resv list.\n");
282 		return -EINVAL;
283 	}
284 	rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, res);
285 	if (rc) {
286 		BNXT_TF_DBG(ERR, "Unable to calc resources for shared session.\n");
287 		return -EINVAL;
288 	}
289 
290 	/* Get the named list and add the totals */
291 	named = bnxt_ulp_app_glb_resource_info_list_get(&nnum);
292 	if (named == NULL) {
293 		BNXT_TF_DBG(ERR, "Unable to get app global resource list\n");
294 		return -EINVAL;
295 	}
296 	rc = bnxt_ulp_named_resources_calc(ulp_ctx, named, nnum, res);
297 	if (rc)
298 		BNXT_TF_DBG(ERR, "Unable to calc named resources\n");
299 
300 	return rc;
301 }
302 
303 int32_t
304 bnxt_ulp_cntxt_app_caps_init(struct bnxt_ulp_context *ulp_ctx,
305 			     uint8_t app_id, uint32_t dev_id)
306 {
307 	struct bnxt_ulp_app_capabilities_info *info;
308 	uint32_t num = 0;
309 	uint16_t i;
310 	bool found = false;
311 
312 	if (ULP_APP_DEV_UNSUPPORTED_ENABLED(ulp_ctx->cfg_data->ulp_flags)) {
313 		BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
314 			    app_id, dev_id);
315 		return -EINVAL;
316 	}
317 
318 	info = bnxt_ulp_app_cap_list_get(&num);
319 	if (!info || !num) {
320 		BNXT_TF_DBG(ERR, "Failed to get app capabilities.\n");
321 		return -EINVAL;
322 	}
323 
324 	for (i = 0; i < num; i++) {
325 		if (info[i].app_id != app_id || info[i].device_id != dev_id)
326 			continue;
327 		found = true;
328 		if (info[i].flags & BNXT_ULP_APP_CAP_SHARED_EN)
329 			ulp_ctx->cfg_data->ulp_flags |=
330 				BNXT_ULP_SHARED_SESSION_ENABLED;
331 		if (info[i].flags & BNXT_ULP_APP_CAP_HOT_UPGRADE_EN)
332 			ulp_ctx->cfg_data->ulp_flags |=
333 				BNXT_ULP_HIGH_AVAIL_ENABLED;
334 		if (info[i].flags & BNXT_ULP_APP_CAP_UNICAST_ONLY)
335 			ulp_ctx->cfg_data->ulp_flags |=
336 				BNXT_ULP_APP_UNICAST_ONLY;
337 	}
338 	if (!found) {
339 		BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
340 			    app_id, dev_id);
341 		ulp_ctx->cfg_data->ulp_flags |= BNXT_ULP_APP_DEV_UNSUPPORTED;
342 		return -EINVAL;
343 	}
344 
345 	return 0;
346 }
347 
348 static void
349 ulp_ctx_shared_session_close(struct bnxt *bp,
350 			     struct bnxt_ulp_session_state *session)
351 {
352 	struct tf *tfp;
353 	int32_t rc;
354 
355 	if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
356 		return;
357 
358 	tfp = bnxt_ulp_cntxt_shared_tfp_get(bp->ulp_ctx);
359 	if (!tfp) {
360 		/*
361 		 * Log it under debug since this is likely a case of the
362 		 * shared session not being created.  For example, a failed
363 		 * initialization.
364 		 */
365 		BNXT_TF_DBG(DEBUG, "Failed to get shared tfp on close.\n");
366 		return;
367 	}
368 	rc = tf_close_session(tfp);
369 	if (rc)
370 		BNXT_TF_DBG(ERR, "Failed to close the shared session rc=%d.\n",
371 			    rc);
372 	(void)bnxt_ulp_cntxt_shared_tfp_set(bp->ulp_ctx, NULL);
373 
374 	session->g_shared_tfp.session = NULL;
375 }
376 
377 static int32_t
378 ulp_ctx_shared_session_open(struct bnxt *bp,
379 			    struct bnxt_ulp_session_state *session)
380 {
381 	struct rte_eth_dev *ethdev = bp->eth_dev;
382 	struct tf_session_resources *resources;
383 	struct tf_open_session_parms parms;
384 	size_t copy_nbytes;
385 	uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
386 	int32_t	rc = 0;
387 	uint8_t app_id;
388 
389 	/* only perform this if shared session is enabled. */
390 	if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
391 		return 0;
392 
393 	memset(&parms, 0, sizeof(parms));
394 
395 	rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
396 					  parms.ctrl_chan_name);
397 	if (rc) {
398 		BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
399 			    ethdev->data->port_id, rc);
400 		return rc;
401 	}
402 	resources = &parms.resources;
403 
404 	/*
405 	 * Need to account for size of ctrl_chan_name and 1 extra for Null
406 	 * terminator
407 	 */
408 	copy_nbytes = sizeof(parms.ctrl_chan_name) -
409 		strlen(parms.ctrl_chan_name) - 1;
410 
411 	/*
412 	 * Build the ctrl_chan_name with shared token.
413 	 * When HA is enabled, the WC TCAM needs extra management by the core,
414 	 * so add the wc_tcam string to the control channel.
415 	 */
416 	if (bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx))
417 		strncat(parms.ctrl_chan_name, "-tf_shared-wc_tcam",
418 			copy_nbytes);
419 	else
420 		strncat(parms.ctrl_chan_name, "-tf_shared", copy_nbytes);
421 
422 	rc = bnxt_ulp_tf_shared_session_resources_get(bp->ulp_ctx, resources);
423 	if (rc)
424 		return rc;
425 
426 	rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
427 	if (rc) {
428 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
429 		return -EINVAL;
430 	}
431 
432 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
433 	if (rc) {
434 		BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
435 		return rc;
436 	}
437 
438 	switch (ulp_dev_id) {
439 	case BNXT_ULP_DEVICE_ID_WH_PLUS:
440 		parms.device_type = TF_DEVICE_TYPE_WH;
441 		break;
442 	case BNXT_ULP_DEVICE_ID_STINGRAY:
443 		parms.device_type = TF_DEVICE_TYPE_SR;
444 		break;
445 	case BNXT_ULP_DEVICE_ID_THOR:
446 		parms.device_type = TF_DEVICE_TYPE_THOR;
447 		break;
448 	default:
449 		BNXT_TF_DBG(ERR, "Unable to determine dev for opening session.\n");
450 		return rc;
451 	}
452 
453 	parms.shadow_copy = true;
454 	parms.bp = bp;
455 	if (app_id == 0 || app_id == 3)
456 		parms.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;
457 	else
458 		parms.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;
459 
460 	/*
461 	 * Open the session here, but the collect the resources during the
462 	 * mapper initialization.
463 	 */
464 	rc = tf_open_session(&bp->tfp_shared, &parms);
465 	if (rc)
466 		return rc;
467 
468 	if (parms.shared_session_creator)
469 		BNXT_TF_DBG(DEBUG, "Shared session creator.\n");
470 	else
471 		BNXT_TF_DBG(DEBUG, "Shared session attached.\n");
472 
473 	/* Save the shared session in global data */
474 	if (!session->g_shared_tfp.session)
475 		session->g_shared_tfp.session = bp->tfp_shared.session;
476 
477 	rc = bnxt_ulp_cntxt_shared_tfp_set(bp->ulp_ctx, &bp->tfp_shared);
478 	if (rc)
479 		BNXT_TF_DBG(ERR, "Failed to add shared tfp to ulp (%d)\n", rc);
480 
481 	return rc;
482 }
483 
484 static int32_t
485 ulp_ctx_shared_session_attach(struct bnxt *bp,
486 			      struct bnxt_ulp_session_state *session)
487 {
488 	int32_t rc = 0;
489 
490 	/* Simply return success if shared session not enabled */
491 	if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) {
492 		bp->tfp_shared.session = session->g_shared_tfp.session;
493 		rc = ulp_ctx_shared_session_open(bp, session);
494 	}
495 
496 	return rc;
497 }
498 
499 static void
500 ulp_ctx_shared_session_detach(struct bnxt *bp)
501 {
502 	if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) {
503 		if (bp->tfp_shared.session) {
504 			tf_close_session(&bp->tfp_shared);
505 			bp->tfp_shared.session = NULL;
506 		}
507 	}
508 }
509 
510 /*
511  * Initialize an ULP session.
512  * An ULP session will contain all the resources needed to support rte flow
513  * offloads. A session is initialized as part of rte_eth_device start.
514  * A single vswitch instance can have multiple uplinks which means
515  * rte_eth_device start will be called for each of these devices.
516  * ULP session manager will make sure that a single ULP session is only
517  * initialized once. Apart from this, it also initializes MARK database,
518  * EEM table & flow database. ULP session manager also manages a list of
519  * all opened ULP sessions.
520  */
521 static int32_t
522 ulp_ctx_session_open(struct bnxt *bp,
523 		     struct bnxt_ulp_session_state *session)
524 {
525 	struct rte_eth_dev		*ethdev = bp->eth_dev;
526 	int32_t				rc = 0;
527 	struct tf_open_session_parms	params;
528 	struct tf_session_resources	*resources;
529 	uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
530 	uint8_t app_id;
531 
532 	memset(&params, 0, sizeof(params));
533 
534 	rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
535 					  params.ctrl_chan_name);
536 	if (rc) {
537 		BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
538 			    ethdev->data->port_id, rc);
539 		return rc;
540 	}
541 
542 	params.shadow_copy = true;
543 
544 	rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
545 	if (rc) {
546 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
547 		return -EINVAL;
548 	}
549 
550 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
551 	if (rc) {
552 		BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
553 		return rc;
554 	}
555 
556 	switch (ulp_dev_id) {
557 	case BNXT_ULP_DEVICE_ID_WH_PLUS:
558 		params.device_type = TF_DEVICE_TYPE_WH;
559 		break;
560 	case BNXT_ULP_DEVICE_ID_STINGRAY:
561 		params.device_type = TF_DEVICE_TYPE_SR;
562 		break;
563 	case BNXT_ULP_DEVICE_ID_THOR:
564 		params.device_type = TF_DEVICE_TYPE_THOR;
565 		break;
566 	default:
567 		BNXT_TF_DBG(ERR, "Unable to determine device for opening session.\n");
568 		return rc;
569 	}
570 
571 	resources = &params.resources;
572 	rc = bnxt_ulp_tf_resources_get(bp->ulp_ctx, resources);
573 	if (rc)
574 		return rc;
575 
576 	params.bp = bp;
577 	if (app_id == 0 || app_id == 3)
578 		params.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;
579 	else
580 		params.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;
581 
582 	rc = tf_open_session(&bp->tfp, &params);
583 	if (rc) {
584 		BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",
585 			    params.ctrl_chan_name, rc);
586 		return -EINVAL;
587 	}
588 	if (!session->session_opened) {
589 		session->session_opened = 1;
590 		session->g_tfp = rte_zmalloc("bnxt_ulp_session_tfp",
591 					     sizeof(struct tf), 0);
592 		session->g_tfp->session = bp->tfp.session;
593 	}
594 	return rc;
595 }
596 
597 /*
598  * Close the ULP session.
599  * It takes the ulp context pointer.
600  */
601 static void
602 ulp_ctx_session_close(struct bnxt *bp,
603 		      struct bnxt_ulp_session_state *session)
604 {
605 	/* close the session in the hardware */
606 	if (session->session_opened)
607 		tf_close_session(&bp->tfp);
608 	session->session_opened = 0;
609 	rte_free(session->g_tfp);
610 	session->g_tfp = NULL;
611 }
612 
613 static void
614 bnxt_init_tbl_scope_parms(struct bnxt *bp,
615 			  struct tf_alloc_tbl_scope_parms *params)
616 {
617 	struct bnxt_ulp_device_params	*dparms;
618 	uint32_t dev_id;
619 	int rc;
620 
621 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
622 	if (rc)
623 		/* TBD: For now, just use default. */
624 		dparms = 0;
625 	else
626 		dparms = bnxt_ulp_device_params_get(dev_id);
627 
628 	/*
629 	 * Set the flush timer for EEM entries. The value is in 100ms intervals,
630 	 * so 100 is 10s.
631 	 */
632 	params->hw_flow_cache_flush_timer = 100;
633 
634 	if (!dparms) {
635 		params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
636 		params->rx_max_action_entry_sz_in_bits =
637 			BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
638 		params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
639 		params->rx_num_flows_in_k = BNXT_ULP_RX_NUM_FLOWS;
640 
641 		params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
642 		params->tx_max_action_entry_sz_in_bits =
643 			BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
644 		params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
645 		params->tx_num_flows_in_k = BNXT_ULP_TX_NUM_FLOWS;
646 	} else {
647 		params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
648 		params->rx_max_action_entry_sz_in_bits =
649 			BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
650 		params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
651 		params->rx_num_flows_in_k =
652 			dparms->ext_flow_db_num_entries / 1024;
653 
654 		params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
655 		params->tx_max_action_entry_sz_in_bits =
656 			BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
657 		params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
658 		params->tx_num_flows_in_k =
659 			dparms->ext_flow_db_num_entries / 1024;
660 	}
661 	BNXT_TF_DBG(INFO, "Table Scope initialized with %uK flows.\n",
662 		    params->rx_num_flows_in_k);
663 }
664 
665 /* Initialize Extended Exact Match host memory. */
666 static int32_t
667 ulp_eem_tbl_scope_init(struct bnxt *bp)
668 {
669 	struct tf_alloc_tbl_scope_parms params = {0};
670 	struct bnxt_ulp_device_params *dparms;
671 	enum bnxt_ulp_flow_mem_type mtype;
672 	uint32_t dev_id;
673 	int rc;
674 
675 	/* Get the dev specific number of flows that needed to be supported. */
676 	if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
677 		BNXT_TF_DBG(ERR, "Invalid device id\n");
678 		return -EINVAL;
679 	}
680 
681 	dparms = bnxt_ulp_device_params_get(dev_id);
682 	if (!dparms) {
683 		BNXT_TF_DBG(ERR, "could not fetch the device params\n");
684 		return -ENODEV;
685 	}
686 
687 	if (bnxt_ulp_cntxt_mem_type_get(bp->ulp_ctx, &mtype))
688 		return -EINVAL;
689 	if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
690 		BNXT_TF_DBG(INFO, "Table Scope alloc is not required\n");
691 		return 0;
692 	}
693 
694 	bnxt_init_tbl_scope_parms(bp, &params);
695 	rc = tf_alloc_tbl_scope(&bp->tfp, &params);
696 	if (rc) {
697 		BNXT_TF_DBG(ERR, "Unable to allocate eem table scope rc = %d\n",
698 			    rc);
699 		return rc;
700 	}
701 	rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id);
702 	if (rc) {
703 		BNXT_TF_DBG(ERR, "Unable to set table scope id\n");
704 		return rc;
705 	}
706 
707 	return 0;
708 }
709 
710 /* Free Extended Exact Match host memory */
711 static int32_t
712 ulp_eem_tbl_scope_deinit(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
713 {
714 	struct tf_free_tbl_scope_parms	params = {0};
715 	struct tf			*tfp;
716 	int32_t				rc = 0;
717 	struct bnxt_ulp_device_params *dparms;
718 	enum bnxt_ulp_flow_mem_type mtype;
719 	uint32_t dev_id;
720 
721 	if (!ulp_ctx || !ulp_ctx->cfg_data)
722 		return -EINVAL;
723 
724 	tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx, BNXT_ULP_SHARED_SESSION_NO);
725 	if (!tfp) {
726 		BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
727 		return -EINVAL;
728 	}
729 
730 	/* Get the dev specific number of flows that needed to be supported. */
731 	if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
732 		BNXT_TF_DBG(ERR, "Invalid device id\n");
733 		return -EINVAL;
734 	}
735 
736 	dparms = bnxt_ulp_device_params_get(dev_id);
737 	if (!dparms) {
738 		BNXT_TF_DBG(ERR, "could not fetch the device params\n");
739 		return -ENODEV;
740 	}
741 
742 	if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
743 		return -EINVAL;
744 	if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
745 		BNXT_TF_DBG(INFO, "Table Scope free is not required\n");
746 		return 0;
747 	}
748 
749 	rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp_ctx, &params.tbl_scope_id);
750 	if (rc) {
751 		BNXT_TF_DBG(ERR, "Failed to get the table scope id\n");
752 		return -EINVAL;
753 	}
754 
755 	rc = tf_free_tbl_scope(tfp, &params);
756 	if (rc) {
757 		BNXT_TF_DBG(ERR, "Unable to free table scope\n");
758 		return -EINVAL;
759 	}
760 	return rc;
761 }
762 
763 /* The function to free and deinit the ulp context data. */
764 static int32_t
765 ulp_ctx_deinit(struct bnxt *bp,
766 	       struct bnxt_ulp_session_state *session)
767 {
768 	/* close the tf session */
769 	ulp_ctx_session_close(bp, session);
770 
771 	/* The shared session must be closed last. */
772 	ulp_ctx_shared_session_close(bp, session);
773 
774 	/* Free the contents */
775 	if (session->cfg_data) {
776 		rte_free(session->cfg_data);
777 		bp->ulp_ctx->cfg_data = NULL;
778 		session->cfg_data = NULL;
779 	}
780 	return 0;
781 }
782 
783 /* The function to allocate and initialize the ulp context data. */
784 static int32_t
785 ulp_ctx_init(struct bnxt *bp,
786 	     struct bnxt_ulp_session_state *session)
787 {
788 	struct bnxt_ulp_data	*ulp_data;
789 	int32_t			rc = 0;
790 	enum bnxt_ulp_device_id devid;
791 
792 	/* Initialize the context entries list */
793 	bnxt_ulp_cntxt_list_init();
794 
795 	/* Add the context to the context entries list */
796 	rc = bnxt_ulp_cntxt_list_add(bp->ulp_ctx);
797 	if (rc) {
798 		BNXT_TF_DBG(ERR, "Failed to add the context list entry\n");
799 		return -ENOMEM;
800 	}
801 
802 	/* Allocate memory to hold ulp context data. */
803 	ulp_data = rte_zmalloc("bnxt_ulp_data",
804 			       sizeof(struct bnxt_ulp_data), 0);
805 	if (!ulp_data) {
806 		BNXT_TF_DBG(ERR, "Failed to allocate memory for ulp data\n");
807 		return -ENOMEM;
808 	}
809 
810 	/* Increment the ulp context data reference count usage. */
811 	bp->ulp_ctx->cfg_data = ulp_data;
812 	session->cfg_data = ulp_data;
813 	ulp_data->ref_cnt++;
814 	ulp_data->ulp_flags |= BNXT_ULP_VF_REP_ENABLED;
815 
816 	rc = bnxt_ulp_devid_get(bp, &devid);
817 	if (rc) {
818 		BNXT_TF_DBG(ERR, "Unable to determine device for ULP init.\n");
819 		goto error_deinit;
820 	}
821 
822 	rc = bnxt_ulp_cntxt_dev_id_set(bp->ulp_ctx, devid);
823 	if (rc) {
824 		BNXT_TF_DBG(ERR, "Unable to set device for ULP init.\n");
825 		goto error_deinit;
826 	}
827 
828 	rc = bnxt_ulp_cntxt_app_id_set(bp->ulp_ctx, bp->app_id);
829 	if (rc) {
830 		BNXT_TF_DBG(ERR, "Unable to set app_id for ULP init.\n");
831 		goto error_deinit;
832 	}
833 	BNXT_TF_DBG(DEBUG, "Ulp initialized with app id %d\n", bp->app_id);
834 
835 	rc = bnxt_ulp_cntxt_app_caps_init(bp->ulp_ctx, bp->app_id, devid);
836 	if (rc) {
837 		BNXT_TF_DBG(ERR, "Unable to set caps for app(%x)/dev(%x)\n",
838 			    bp->app_id, devid);
839 		goto error_deinit;
840 	}
841 
842 	/*
843 	 * Shared session must be created before first regular session but after
844 	 * the ulp_ctx is valid.
845 	 */
846 	rc = ulp_ctx_shared_session_open(bp, session);
847 	if (rc) {
848 		BNXT_TF_DBG(ERR, "Unable to open shared session (%d)\n", rc);
849 		goto error_deinit;
850 	}
851 
852 	/* Open the ulp session. */
853 	rc = ulp_ctx_session_open(bp, session);
854 	if (rc)
855 		goto error_deinit;
856 
857 	bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
858 	return rc;
859 
860 error_deinit:
861 	session->session_opened = 1;
862 	(void)ulp_ctx_deinit(bp, session);
863 	return rc;
864 }
865 
866 /* The function to initialize ulp dparms with devargs */
867 static int32_t
868 ulp_dparms_init(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
869 {
870 	struct bnxt_ulp_device_params *dparms;
871 	uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST;
872 
873 	if (!bp->max_num_kflows) {
874 		/* Defaults to Internal */
875 		bnxt_ulp_cntxt_mem_type_set(ulp_ctx,
876 					    BNXT_ULP_FLOW_MEM_TYPE_INT);
877 		return 0;
878 	}
879 
880 	/* The max_num_kflows were set, so move to external */
881 	if (bnxt_ulp_cntxt_mem_type_set(ulp_ctx, BNXT_ULP_FLOW_MEM_TYPE_EXT))
882 		return -EINVAL;
883 
884 	if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) {
885 		BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
886 		return -EINVAL;
887 	}
888 
889 	dparms = bnxt_ulp_device_params_get(dev_id);
890 	if (!dparms) {
891 		BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
892 		return -EINVAL;
893 	}
894 
895 	/* num_flows = max_num_kflows * 1024 */
896 	dparms->ext_flow_db_num_entries = bp->max_num_kflows * 1024;
897 	/* GFID =  2 * num_flows */
898 	dparms->mark_db_gfid_entries = dparms->ext_flow_db_num_entries * 2;
899 	BNXT_TF_DBG(DEBUG, "Set the number of flows = %" PRIu64 "\n",
900 		    dparms->ext_flow_db_num_entries);
901 
902 	return 0;
903 }
904 
905 /* The function to initialize bp flags with truflow features */
906 static int32_t
907 ulp_dparms_dev_port_intf_update(struct bnxt *bp,
908 				struct bnxt_ulp_context *ulp_ctx)
909 {
910 	enum bnxt_ulp_flow_mem_type mtype;
911 
912 	if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
913 		return -EINVAL;
914 	/* Update the bp flag with gfid flag */
915 	if (mtype == BNXT_ULP_FLOW_MEM_TYPE_EXT)
916 		bp->flags |= BNXT_FLAG_GFID_ENABLE;
917 
918 	return 0;
919 }
920 
921 static int32_t
922 ulp_ctx_attach(struct bnxt *bp,
923 	       struct bnxt_ulp_session_state *session)
924 {
925 	int32_t rc = 0;
926 	uint32_t flags, dev_id = BNXT_ULP_DEVICE_ID_LAST;
927 	uint8_t app_id;
928 
929 	/* Increment the ulp context data reference count usage. */
930 	bp->ulp_ctx->cfg_data = session->cfg_data;
931 	bp->ulp_ctx->cfg_data->ref_cnt++;
932 
933 	/* update the session details in bnxt tfp */
934 	bp->tfp.session = session->g_tfp->session;
935 
936 	/* Add the context to the context entries list */
937 	rc = bnxt_ulp_cntxt_list_add(bp->ulp_ctx);
938 	if (rc) {
939 		BNXT_TF_DBG(ERR, "Failed to add the context list entry\n");
940 		return -EINVAL;
941 	}
942 
943 	/*
944 	 * The supported flag will be set during the init. Use it now to
945 	 * know if we should go through the attach.
946 	 */
947 	rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
948 	if (rc) {
949 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
950 		return -EINVAL;
951 	}
952 
953 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
954 	if (rc) {
955 		BNXT_TF_DBG(ERR, "Unable do get the dev_id.\n");
956 		return -EINVAL;
957 	}
958 
959 	flags = bp->ulp_ctx->cfg_data->ulp_flags;
960 	if (ULP_APP_DEV_UNSUPPORTED_ENABLED(flags)) {
961 		BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
962 			    app_id, dev_id);
963 		return -EINVAL;
964 	}
965 
966 	/* Create a TF Client */
967 	rc = ulp_ctx_session_open(bp, session);
968 	if (rc) {
969 		PMD_DRV_LOG(ERR, "Failed to open ctxt session, rc:%d\n", rc);
970 		bp->tfp.session = NULL;
971 		return rc;
972 	}
973 
974 	bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
975 	return rc;
976 }
977 
978 static void
979 ulp_ctx_detach(struct bnxt *bp)
980 {
981 	if (bp->tfp.session) {
982 		tf_close_session(&bp->tfp);
983 		bp->tfp.session = NULL;
984 	}
985 }
986 
987 /*
988  * Initialize the state of an ULP session.
989  * If the state of an ULP session is not initialized, set it's state to
990  * initialized. If the state is already initialized, do nothing.
991  */
992 static void
993 ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
994 {
995 	pthread_mutex_lock(&session->bnxt_ulp_mutex);
996 
997 	if (!session->bnxt_ulp_init) {
998 		session->bnxt_ulp_init = true;
999 		*init = false;
1000 	} else {
1001 		*init = true;
1002 	}
1003 
1004 	pthread_mutex_unlock(&session->bnxt_ulp_mutex);
1005 }
1006 
1007 /*
1008  * Check if an ULP session is already allocated for a specific PCI
1009  * domain & bus. If it is already allocated simply return the session
1010  * pointer, otherwise allocate a new session.
1011  */
1012 static struct bnxt_ulp_session_state *
1013 ulp_get_session(struct rte_pci_addr *pci_addr)
1014 {
1015 	struct bnxt_ulp_session_state *session;
1016 
1017 	STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) {
1018 		if (session->pci_info.domain == pci_addr->domain &&
1019 		    session->pci_info.bus == pci_addr->bus) {
1020 			return session;
1021 		}
1022 	}
1023 	return NULL;
1024 }
1025 
1026 /*
1027  * Allocate and Initialize an ULP session and set it's state to INITIALIZED.
1028  * If it's already initialized simply return the already existing session.
1029  */
1030 static struct bnxt_ulp_session_state *
1031 ulp_session_init(struct bnxt *bp,
1032 		 bool *init)
1033 {
1034 	struct rte_pci_device		*pci_dev;
1035 	struct rte_pci_addr		*pci_addr;
1036 	struct bnxt_ulp_session_state	*session;
1037 	int rc = 0;
1038 
1039 	if (!bp)
1040 		return NULL;
1041 
1042 	pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
1043 	pci_addr = &pci_dev->addr;
1044 
1045 	pthread_mutex_lock(&bnxt_ulp_global_mutex);
1046 
1047 	session = ulp_get_session(pci_addr);
1048 	if (!session) {
1049 		/* Not Found the session  Allocate a new one */
1050 		session = rte_zmalloc("bnxt_ulp_session",
1051 				      sizeof(struct bnxt_ulp_session_state),
1052 				      0);
1053 		if (!session) {
1054 			BNXT_TF_DBG(ERR,
1055 				    "Allocation failed for bnxt_ulp_session\n");
1056 			pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1057 			return NULL;
1058 
1059 		} else {
1060 			/* Add it to the queue */
1061 			session->pci_info.domain = pci_addr->domain;
1062 			session->pci_info.bus = pci_addr->bus;
1063 			rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL);
1064 			if (rc) {
1065 				BNXT_TF_DBG(ERR, "mutex create failed\n");
1066 				pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1067 				return NULL;
1068 			}
1069 			STAILQ_INSERT_TAIL(&bnxt_ulp_session_list,
1070 					   session, next);
1071 		}
1072 	}
1073 	ulp_context_initialized(session, init);
1074 	pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1075 	return session;
1076 }
1077 
1078 /*
1079  * When a device is closed, remove it's associated session from the global
1080  * session list.
1081  */
1082 static void
1083 ulp_session_deinit(struct bnxt_ulp_session_state *session)
1084 {
1085 	if (!session)
1086 		return;
1087 
1088 	if (!session->cfg_data) {
1089 		pthread_mutex_lock(&bnxt_ulp_global_mutex);
1090 		STAILQ_REMOVE(&bnxt_ulp_session_list, session,
1091 			      bnxt_ulp_session_state, next);
1092 		pthread_mutex_destroy(&session->bnxt_ulp_mutex);
1093 		rte_free(session);
1094 		pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1095 	}
1096 }
1097 
1098 /*
1099  * Internal api to enable NAT feature.
1100  * Set set_flag to 1 to set the value or zero to reset the value.
1101  * returns 0 on success.
1102  */
1103 static int32_t
1104 bnxt_ulp_global_cfg_update(struct bnxt *bp,
1105 			   enum tf_dir dir,
1106 			   enum tf_global_config_type type,
1107 			   uint32_t offset,
1108 			   uint32_t value,
1109 			   uint32_t set_flag)
1110 {
1111 	uint32_t global_cfg = 0;
1112 	int rc;
1113 	struct tf_global_cfg_parms parms = { 0 };
1114 
1115 	/* Initialize the params */
1116 	parms.dir = dir,
1117 	parms.type = type,
1118 	parms.offset = offset,
1119 	parms.config = (uint8_t *)&global_cfg,
1120 	parms.config_sz_in_bytes = sizeof(global_cfg);
1121 
1122 	rc = tf_get_global_cfg(&bp->tfp, &parms);
1123 	if (rc) {
1124 		BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n",
1125 			    type, rc);
1126 		return rc;
1127 	}
1128 
1129 	if (set_flag)
1130 		global_cfg |= value;
1131 	else
1132 		global_cfg &= ~value;
1133 
1134 	/* SET the register RE_CFA_REG_ACT_TECT */
1135 	rc = tf_set_global_cfg(&bp->tfp, &parms);
1136 	if (rc) {
1137 		BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n",
1138 			    type, rc);
1139 		return rc;
1140 	}
1141 	return rc;
1142 }
1143 
1144 /* Internal function to delete all the flows belonging to the given port */
1145 static void
1146 bnxt_ulp_flush_port_flows(struct bnxt *bp)
1147 {
1148 	uint16_t func_id;
1149 
1150 	/* it is assumed that port is either TVF or PF */
1151 	if (ulp_port_db_port_func_id_get(bp->ulp_ctx,
1152 					 bp->eth_dev->data->port_id,
1153 					 &func_id)) {
1154 		BNXT_TF_DBG(ERR, "Invalid argument\n");
1155 		return;
1156 	}
1157 	(void)ulp_flow_db_function_flow_flush(bp->ulp_ctx, func_id);
1158 }
1159 
1160 /* Internal function to delete the VFR default flows */
1161 static void
1162 bnxt_ulp_destroy_vfr_default_rules(struct bnxt *bp, bool global)
1163 {
1164 	struct bnxt_ulp_vfr_rule_info *info;
1165 	uint16_t port_id;
1166 	struct rte_eth_dev *vfr_eth_dev;
1167 	struct bnxt_representor *vfr_bp;
1168 
1169 	if (!BNXT_TRUFLOW_EN(bp) || BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
1170 		return;
1171 
1172 	if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
1173 		return;
1174 
1175 	/* Delete default rules for all ports */
1176 	for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
1177 		info = &bp->ulp_ctx->cfg_data->vfr_rule_info[port_id];
1178 		if (!info->valid)
1179 			continue;
1180 
1181 		if (!global && info->parent_port_id !=
1182 		    bp->eth_dev->data->port_id)
1183 			continue;
1184 
1185 		/* Destroy the flows */
1186 		ulp_default_flow_destroy(bp->eth_dev, info->vfr_flow_id);
1187 		/* Clean up the tx action pointer */
1188 		vfr_eth_dev = &rte_eth_devices[port_id];
1189 		if (vfr_eth_dev) {
1190 			vfr_bp = vfr_eth_dev->data->dev_private;
1191 			vfr_bp->vfr_tx_cfa_action = 0;
1192 		}
1193 		memset(info, 0, sizeof(struct bnxt_ulp_vfr_rule_info));
1194 	}
1195 }
1196 
1197 /*
1198  * When a port is deinit'ed by dpdk. This function is called
1199  * and this function clears the ULP context and rest of the
1200  * infrastructure associated with it.
1201  */
1202 static void
1203 bnxt_ulp_deinit(struct bnxt *bp,
1204 		struct bnxt_ulp_session_state *session)
1205 {
1206 	bool ha_enabled;
1207 
1208 	if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
1209 		return;
1210 
1211 	ha_enabled = bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx);
1212 	if (ha_enabled && session->session_opened) {
1213 		int32_t rc = ulp_ha_mgr_close(bp->ulp_ctx);
1214 		if (rc)
1215 			BNXT_TF_DBG(ERR, "Failed to close HA (%d)\n", rc);
1216 	}
1217 
1218 	/* clean up default flows */
1219 	bnxt_ulp_destroy_df_rules(bp, true);
1220 
1221 	/* clean up default VFR flows */
1222 	bnxt_ulp_destroy_vfr_default_rules(bp, true);
1223 
1224 	/* clean up regular flows */
1225 	ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR);
1226 
1227 	/* cleanup the eem table scope */
1228 	ulp_eem_tbl_scope_deinit(bp, bp->ulp_ctx);
1229 
1230 	/* cleanup the flow database */
1231 	ulp_flow_db_deinit(bp->ulp_ctx);
1232 
1233 	/* Delete the Mark database */
1234 	ulp_mark_db_deinit(bp->ulp_ctx);
1235 
1236 	/* cleanup the ulp mapper */
1237 	ulp_mapper_deinit(bp->ulp_ctx);
1238 
1239 	/* Delete the Flow Counter Manager */
1240 	ulp_fc_mgr_deinit(bp->ulp_ctx);
1241 
1242 	/* Delete the Port database */
1243 	ulp_port_db_deinit(bp->ulp_ctx);
1244 
1245 	/* Disable NAT feature */
1246 	(void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
1247 					 TF_TUNNEL_ENCAP_NAT,
1248 					 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
1249 
1250 	(void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
1251 					 TF_TUNNEL_ENCAP_NAT,
1252 					 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
1253 
1254 	/* free the flow db lock */
1255 	pthread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
1256 
1257 	if (ha_enabled)
1258 		ulp_ha_mgr_deinit(bp->ulp_ctx);
1259 
1260 	/* Delete the ulp context and tf session and free the ulp context */
1261 	ulp_ctx_deinit(bp, session);
1262 	BNXT_TF_DBG(DEBUG, "ulp ctx has been deinitialized\n");
1263 }
1264 
1265 /*
1266  * When a port is initialized by dpdk. This functions is called
1267  * and this function initializes the ULP context and rest of the
1268  * infrastructure associated with it.
1269  */
1270 static int32_t
1271 bnxt_ulp_init(struct bnxt *bp,
1272 	      struct bnxt_ulp_session_state *session)
1273 {
1274 	int rc;
1275 
1276 	/* Allocate and Initialize the ulp context. */
1277 	rc = ulp_ctx_init(bp, session);
1278 	if (rc) {
1279 		BNXT_TF_DBG(ERR, "Failed to create the ulp context\n");
1280 		goto jump_to_error;
1281 	}
1282 
1283 	rc = pthread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock, NULL);
1284 	if (rc) {
1285 		BNXT_TF_DBG(ERR, "Unable to initialize flow db lock\n");
1286 		goto jump_to_error;
1287 	}
1288 
1289 	/* Initialize ulp dparms with values devargs passed */
1290 	rc = ulp_dparms_init(bp, bp->ulp_ctx);
1291 	if (rc) {
1292 		BNXT_TF_DBG(ERR, "Failed to initialize the dparms\n");
1293 		goto jump_to_error;
1294 	}
1295 
1296 	/* create the port database */
1297 	rc = ulp_port_db_init(bp->ulp_ctx, bp->port_cnt);
1298 	if (rc) {
1299 		BNXT_TF_DBG(ERR, "Failed to create the port database\n");
1300 		goto jump_to_error;
1301 	}
1302 
1303 	/* Create the Mark database. */
1304 	rc = ulp_mark_db_init(bp->ulp_ctx);
1305 	if (rc) {
1306 		BNXT_TF_DBG(ERR, "Failed to create the mark database\n");
1307 		goto jump_to_error;
1308 	}
1309 
1310 	/* Create the flow database. */
1311 	rc = ulp_flow_db_init(bp->ulp_ctx);
1312 	if (rc) {
1313 		BNXT_TF_DBG(ERR, "Failed to create the flow database\n");
1314 		goto jump_to_error;
1315 	}
1316 
1317 	/* Create the eem table scope. */
1318 	rc = ulp_eem_tbl_scope_init(bp);
1319 	if (rc) {
1320 		BNXT_TF_DBG(ERR, "Failed to create the eem scope table\n");
1321 		goto jump_to_error;
1322 	}
1323 
1324 	rc = ulp_mapper_init(bp->ulp_ctx);
1325 	if (rc) {
1326 		BNXT_TF_DBG(ERR, "Failed to initialize ulp mapper\n");
1327 		goto jump_to_error;
1328 	}
1329 
1330 	rc = ulp_fc_mgr_init(bp->ulp_ctx);
1331 	if (rc) {
1332 		BNXT_TF_DBG(ERR, "Failed to initialize ulp flow counter mgr\n");
1333 		goto jump_to_error;
1334 	}
1335 
1336 	/*
1337 	 * Enable NAT feature. Set the global configuration register
1338 	 * Tunnel encap to enable NAT with the reuse of existing inner
1339 	 * L2 header smac and dmac
1340 	 */
1341 	rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
1342 					TF_TUNNEL_ENCAP_NAT,
1343 					BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
1344 	if (rc) {
1345 		BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n");
1346 		goto jump_to_error;
1347 	}
1348 
1349 	rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
1350 					TF_TUNNEL_ENCAP_NAT,
1351 					BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
1352 	if (rc) {
1353 		BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n");
1354 		goto jump_to_error;
1355 	}
1356 
1357 	if (bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx)) {
1358 		rc = ulp_ha_mgr_init(bp->ulp_ctx);
1359 		if (rc) {
1360 			BNXT_TF_DBG(ERR, "Failed to initialize HA %d\n", rc);
1361 			goto jump_to_error;
1362 		}
1363 		rc = ulp_ha_mgr_open(bp->ulp_ctx);
1364 		if (rc) {
1365 			BNXT_TF_DBG(ERR, "Failed to Process HA Open %d\n", rc);
1366 			goto jump_to_error;
1367 		}
1368 	}
1369 	BNXT_TF_DBG(DEBUG, "ulp ctx has been initialized\n");
1370 	return rc;
1371 
1372 jump_to_error:
1373 	bnxt_ulp_deinit(bp, session);
1374 	return rc;
1375 }
1376 
1377 /*
1378  * When a port is initialized by dpdk. This functions sets up
1379  * the port specific details.
1380  */
1381 int32_t
1382 bnxt_ulp_port_init(struct bnxt *bp)
1383 {
1384 	struct bnxt_ulp_session_state *session;
1385 	bool initialized;
1386 	enum bnxt_ulp_device_id devid = BNXT_ULP_DEVICE_ID_LAST;
1387 	uint32_t ulp_flags;
1388 	int32_t rc = 0;
1389 
1390 	if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1391 		BNXT_TF_DBG(ERR,
1392 			    "Skip ulp init for port: %d, not a TVF or PF\n",
1393 			    bp->eth_dev->data->port_id);
1394 		return rc;
1395 	}
1396 
1397 	if (!BNXT_TRUFLOW_EN(bp)) {
1398 		BNXT_TF_DBG(ERR,
1399 			    "Skip ulp init for port: %d, truflow is not enabled\n",
1400 			    bp->eth_dev->data->port_id);
1401 		return rc;
1402 	}
1403 
1404 	if (bp->ulp_ctx) {
1405 		BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n");
1406 		return rc;
1407 	}
1408 
1409 	bp->ulp_ctx = rte_zmalloc("bnxt_ulp_ctx",
1410 				  sizeof(struct bnxt_ulp_context), 0);
1411 	if (!bp->ulp_ctx) {
1412 		BNXT_TF_DBG(ERR, "Failed to allocate ulp ctx\n");
1413 		return -ENOMEM;
1414 	}
1415 
1416 	/*
1417 	 * Multiple uplink ports can be associated with a single vswitch.
1418 	 * Make sure only the port that is started first will initialize
1419 	 * the TF session.
1420 	 */
1421 	session = ulp_session_init(bp, &initialized);
1422 	if (!session) {
1423 		BNXT_TF_DBG(ERR, "Failed to initialize the tf session\n");
1424 		rc = -EIO;
1425 		goto jump_to_error;
1426 	}
1427 
1428 	if (initialized) {
1429 		/*
1430 		 * If ULP is already initialized for a specific domain then
1431 		 * simply assign the ulp context to this rte_eth_dev.
1432 		 */
1433 		rc = ulp_ctx_attach(bp, session);
1434 		if (rc) {
1435 			BNXT_TF_DBG(ERR, "Failed to attach the ulp context\n");
1436 			goto jump_to_error;
1437 		}
1438 
1439 		/*
1440 		 * Attach to the shared session, must be called after the
1441 		 * ulp_ctx_attach in order to ensure that ulp data is available
1442 		 * for attaching.
1443 		 */
1444 		rc = ulp_ctx_shared_session_attach(bp, session);
1445 		if (rc) {
1446 			BNXT_TF_DBG(ERR,
1447 				    "Failed attach to shared session (%d)", rc);
1448 			goto jump_to_error;
1449 		}
1450 	} else {
1451 		rc = bnxt_ulp_init(bp, session);
1452 		if (rc) {
1453 			BNXT_TF_DBG(ERR, "Failed to initialize the ulp init\n");
1454 			goto jump_to_error;
1455 		}
1456 	}
1457 
1458 	/* Update bnxt driver flags */
1459 	rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx);
1460 	if (rc) {
1461 		BNXT_TF_DBG(ERR, "Failed to update driver flags\n");
1462 		goto jump_to_error;
1463 	}
1464 
1465 	/* update the port database for the given interface */
1466 	rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp->eth_dev);
1467 	if (rc) {
1468 		BNXT_TF_DBG(ERR, "Failed to update port database\n");
1469 		goto jump_to_error;
1470 	}
1471 	/* create the default rules */
1472 	rc = bnxt_ulp_create_df_rules(bp);
1473 	if (rc) {
1474 		BNXT_TF_DBG(ERR, "Failed to create default flow\n");
1475 		goto jump_to_error;
1476 	}
1477 
1478 	rc = bnxt_ulp_devid_get(bp, &devid);
1479 	if (rc) {
1480 		BNXT_TF_DBG(ERR, "Unable to determine device for ULP port init.\n");
1481 		goto jump_to_error;
1482 	}
1483 
1484 	if (devid != BNXT_ULP_DEVICE_ID_THOR && BNXT_ACCUM_STATS_EN(bp))
1485 		bp->ulp_ctx->cfg_data->accum_stats = true;
1486 
1487 	BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port init, accum_stats:%d\n",
1488 		    bp->eth_dev->data->port_id,
1489 		    bp->ulp_ctx->cfg_data->accum_stats);
1490 
1491 	/* set the unicast mode */
1492 	if (bnxt_ulp_cntxt_ptr2_ulp_flags_get(bp->ulp_ctx, &ulp_flags)) {
1493 		BNXT_TF_DBG(ERR, "Error in getting ULP context flags\n");
1494 		goto jump_to_error;
1495 	}
1496 	if (ulp_flags & BNXT_ULP_APP_UNICAST_ONLY) {
1497 		if (bnxt_pmd_set_unicast_rxmask(bp->eth_dev)) {
1498 			BNXT_TF_DBG(ERR, "Error in setting unicast rxmode\n");
1499 			goto jump_to_error;
1500 		}
1501 	}
1502 
1503 	return rc;
1504 
1505 jump_to_error:
1506 	bnxt_ulp_port_deinit(bp);
1507 	return rc;
1508 }
1509 
1510 /*
1511  * When a port is de-initialized by dpdk. This functions clears up
1512  * the port specific details.
1513  */
1514 void
1515 bnxt_ulp_port_deinit(struct bnxt *bp)
1516 {
1517 	struct bnxt_ulp_session_state *session;
1518 	struct rte_pci_device *pci_dev;
1519 	struct rte_pci_addr *pci_addr;
1520 
1521 	if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1522 		BNXT_TF_DBG(ERR,
1523 			    "Skip ULP deinit port:%d, not a TVF or PF\n",
1524 			    bp->eth_dev->data->port_id);
1525 		return;
1526 	}
1527 
1528 	if (!BNXT_TRUFLOW_EN(bp)) {
1529 		BNXT_TF_DBG(ERR,
1530 			    "Skip ULP deinit for port:%d, truflow is not enabled\n",
1531 			    bp->eth_dev->data->port_id);
1532 		return;
1533 	}
1534 
1535 	if (!bp->ulp_ctx) {
1536 		BNXT_TF_DBG(DEBUG, "ulp ctx already de-allocated\n");
1537 		return;
1538 	}
1539 
1540 	BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port deinit\n",
1541 		    bp->eth_dev->data->port_id);
1542 
1543 	/* Free the ulp context in the context entry list */
1544 	bnxt_ulp_cntxt_list_del(bp->ulp_ctx);
1545 
1546 	/* Get the session details  */
1547 	pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
1548 	pci_addr = &pci_dev->addr;
1549 	pthread_mutex_lock(&bnxt_ulp_global_mutex);
1550 	session = ulp_get_session(pci_addr);
1551 	pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1552 
1553 	/* session not found then just exit */
1554 	if (!session) {
1555 		/* Free the ulp context */
1556 		rte_free(bp->ulp_ctx);
1557 		bp->ulp_ctx = NULL;
1558 		return;
1559 	}
1560 
1561 	/* Check the reference count to deinit or deattach*/
1562 	if (bp->ulp_ctx->cfg_data && bp->ulp_ctx->cfg_data->ref_cnt) {
1563 		bp->ulp_ctx->cfg_data->ref_cnt--;
1564 		if (bp->ulp_ctx->cfg_data->ref_cnt) {
1565 			/* free the port details */
1566 			/* Free the default flow rule associated to this port */
1567 			bnxt_ulp_destroy_df_rules(bp, false);
1568 			bnxt_ulp_destroy_vfr_default_rules(bp, false);
1569 
1570 			/* free flows associated with this port */
1571 			bnxt_ulp_flush_port_flows(bp);
1572 
1573 			/* close the session associated with this port */
1574 			ulp_ctx_detach(bp);
1575 
1576 			/* always detach/close shared after the session. */
1577 			ulp_ctx_shared_session_detach(bp);
1578 		} else {
1579 			/* Perform ulp ctx deinit */
1580 			bnxt_ulp_deinit(bp, session);
1581 		}
1582 	}
1583 
1584 	/* clean up the session */
1585 	ulp_session_deinit(session);
1586 
1587 	/* Free the ulp context */
1588 	rte_free(bp->ulp_ctx);
1589 	bp->ulp_ctx = NULL;
1590 }
1591 
1592 /* Below are the access functions to access internal data of ulp context. */
1593 /* Function to set the Mark DB into the context */
1594 int32_t
1595 bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx,
1596 				struct bnxt_ulp_mark_tbl *mark_tbl)
1597 {
1598 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1599 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1600 		return -EINVAL;
1601 	}
1602 
1603 	ulp_ctx->cfg_data->mark_tbl = mark_tbl;
1604 
1605 	return 0;
1606 }
1607 
1608 /* Function to retrieve the Mark DB from the context. */
1609 struct bnxt_ulp_mark_tbl *
1610 bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx)
1611 {
1612 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1613 		return NULL;
1614 
1615 	return ulp_ctx->cfg_data->mark_tbl;
1616 }
1617 
1618 bool
1619 bnxt_ulp_cntxt_shared_session_enabled(struct bnxt_ulp_context *ulp_ctx)
1620 {
1621 	return ULP_SHARED_SESSION_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags);
1622 }
1623 
1624 int32_t
1625 bnxt_ulp_cntxt_app_id_set(struct bnxt_ulp_context *ulp_ctx, uint8_t app_id)
1626 {
1627 	if (!ulp_ctx)
1628 		return -EINVAL;
1629 	ulp_ctx->cfg_data->app_id = app_id;
1630 	return 0;
1631 }
1632 
1633 int32_t
1634 bnxt_ulp_cntxt_app_id_get(struct bnxt_ulp_context *ulp_ctx, uint8_t *app_id)
1635 {
1636 	/* Default APP id is zero */
1637 	if (!ulp_ctx || !app_id)
1638 		return -EINVAL;
1639 	*app_id = ulp_ctx->cfg_data->app_id;
1640 	return 0;
1641 }
1642 
1643 /* Function to set the device id of the hardware. */
1644 int32_t
1645 bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx,
1646 			  uint32_t dev_id)
1647 {
1648 	if (ulp_ctx && ulp_ctx->cfg_data) {
1649 		ulp_ctx->cfg_data->dev_id = dev_id;
1650 		return 0;
1651 	}
1652 
1653 	return -EINVAL;
1654 }
1655 
1656 /* Function to get the device id of the hardware. */
1657 int32_t
1658 bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx,
1659 			  uint32_t *dev_id)
1660 {
1661 	if (ulp_ctx && ulp_ctx->cfg_data) {
1662 		*dev_id = ulp_ctx->cfg_data->dev_id;
1663 		return 0;
1664 	}
1665 	*dev_id = BNXT_ULP_DEVICE_ID_LAST;
1666 	BNXT_TF_DBG(ERR, "Failed to read dev_id from ulp ctxt\n");
1667 	return -EINVAL;
1668 }
1669 
1670 int32_t
1671 bnxt_ulp_cntxt_mem_type_set(struct bnxt_ulp_context *ulp_ctx,
1672 			    enum bnxt_ulp_flow_mem_type mem_type)
1673 {
1674 	if (ulp_ctx && ulp_ctx->cfg_data) {
1675 		ulp_ctx->cfg_data->mem_type = mem_type;
1676 		return 0;
1677 	}
1678 	BNXT_TF_DBG(ERR, "Failed to write mem_type in ulp ctxt\n");
1679 	return -EINVAL;
1680 }
1681 
1682 int32_t
1683 bnxt_ulp_cntxt_mem_type_get(struct bnxt_ulp_context *ulp_ctx,
1684 			    enum bnxt_ulp_flow_mem_type *mem_type)
1685 {
1686 	if (ulp_ctx && ulp_ctx->cfg_data) {
1687 		*mem_type = ulp_ctx->cfg_data->mem_type;
1688 		return 0;
1689 	}
1690 	*mem_type = BNXT_ULP_FLOW_MEM_TYPE_LAST;
1691 	BNXT_TF_DBG(ERR, "Failed to read mem_type in ulp ctxt\n");
1692 	return -EINVAL;
1693 }
1694 
1695 /* Function to get the table scope id of the EEM table. */
1696 int32_t
1697 bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx,
1698 				uint32_t *tbl_scope_id)
1699 {
1700 	if (ulp_ctx && ulp_ctx->cfg_data) {
1701 		*tbl_scope_id = ulp_ctx->cfg_data->tbl_scope_id;
1702 		return 0;
1703 	}
1704 
1705 	return -EINVAL;
1706 }
1707 
1708 /* Function to set the table scope id of the EEM table. */
1709 int32_t
1710 bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx,
1711 				uint32_t tbl_scope_id)
1712 {
1713 	if (ulp_ctx && ulp_ctx->cfg_data) {
1714 		ulp_ctx->cfg_data->tbl_scope_id = tbl_scope_id;
1715 		return 0;
1716 	}
1717 
1718 	return -EINVAL;
1719 }
1720 
1721 /* Function to set the shared tfp session details from the ulp context. */
1722 int32_t
1723 bnxt_ulp_cntxt_shared_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1724 {
1725 	if (!ulp) {
1726 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1727 		return -EINVAL;
1728 	}
1729 
1730 	if (tfp == NULL) {
1731 		if (ulp->cfg_data->num_shared_clients > 0)
1732 			ulp->cfg_data->num_shared_clients--;
1733 	} else {
1734 		ulp->cfg_data->num_shared_clients++;
1735 	}
1736 
1737 	ulp->g_shared_tfp = tfp;
1738 	return 0;
1739 }
1740 
1741 /* Function to get the shared tfp session details from the ulp context. */
1742 struct tf *
1743 bnxt_ulp_cntxt_shared_tfp_get(struct bnxt_ulp_context *ulp)
1744 {
1745 	if (!ulp) {
1746 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1747 		return NULL;
1748 	}
1749 	return ulp->g_shared_tfp;
1750 }
1751 
1752 /* Function to get the number of shared clients attached */
1753 uint8_t
1754 bnxt_ulp_cntxt_num_shared_clients_get(struct bnxt_ulp_context *ulp)
1755 {
1756 	if (ulp == NULL || ulp->cfg_data == NULL) {
1757 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1758 		return 0;
1759 	}
1760 	return ulp->cfg_data->num_shared_clients;
1761 }
1762 
1763 /* Function to set the tfp session details from the ulp context. */
1764 int32_t
1765 bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1766 {
1767 	if (!ulp) {
1768 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1769 		return -EINVAL;
1770 	}
1771 
1772 	ulp->g_tfp = tfp;
1773 	return 0;
1774 }
1775 
1776 /* Function to get the tfp session details from the ulp context. */
1777 struct tf *
1778 bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp,
1779 		       enum bnxt_ulp_shared_session shared)
1780 {
1781 	if (!ulp) {
1782 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1783 		return NULL;
1784 	}
1785 	if (shared)
1786 		return ulp->g_shared_tfp;
1787 	else
1788 		return ulp->g_tfp;
1789 }
1790 
1791 /*
1792  * Get the device table entry based on the device id.
1793  *
1794  * dev_id [in] The device id of the hardware
1795  *
1796  * Returns the pointer to the device parameters.
1797  */
1798 struct bnxt_ulp_device_params *
1799 bnxt_ulp_device_params_get(uint32_t dev_id)
1800 {
1801 	if (dev_id < BNXT_ULP_MAX_NUM_DEVICES)
1802 		return &ulp_device_params[dev_id];
1803 	return NULL;
1804 }
1805 
1806 /* Function to set the flow database to the ulp context. */
1807 int32_t
1808 bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context	*ulp_ctx,
1809 				struct bnxt_ulp_flow_db	*flow_db)
1810 {
1811 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1812 		return -EINVAL;
1813 
1814 	ulp_ctx->cfg_data->flow_db = flow_db;
1815 	return 0;
1816 }
1817 
1818 /* Function to get the flow database from the ulp context. */
1819 struct bnxt_ulp_flow_db	*
1820 bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context	*ulp_ctx)
1821 {
1822 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1823 		return NULL;
1824 
1825 	return ulp_ctx->cfg_data->flow_db;
1826 }
1827 
1828 /* Function to get the tunnel cache table info from the ulp context. */
1829 struct bnxt_tun_cache_entry *
1830 bnxt_ulp_cntxt_ptr2_tun_tbl_get(struct bnxt_ulp_context *ulp_ctx)
1831 {
1832 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1833 		return NULL;
1834 
1835 	return ulp_ctx->cfg_data->tun_tbl;
1836 }
1837 
1838 /* Function to get the ulp context from eth device. */
1839 struct bnxt_ulp_context	*
1840 bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev	*dev)
1841 {
1842 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1843 
1844 	if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
1845 		struct bnxt_representor *vfr = dev->data->dev_private;
1846 
1847 		bp = vfr->parent_dev->data->dev_private;
1848 	}
1849 
1850 	if (!bp) {
1851 		BNXT_TF_DBG(ERR, "Bnxt private data is not initialized\n");
1852 		return NULL;
1853 	}
1854 	return bp->ulp_ctx;
1855 }
1856 
1857 int32_t
1858 bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx,
1859 				    void *mapper_data)
1860 {
1861 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1862 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1863 		return -EINVAL;
1864 	}
1865 
1866 	ulp_ctx->cfg_data->mapper_data = mapper_data;
1867 	return 0;
1868 }
1869 
1870 void *
1871 bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx)
1872 {
1873 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1874 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1875 		return NULL;
1876 	}
1877 
1878 	return ulp_ctx->cfg_data->mapper_data;
1879 }
1880 
1881 /* Function to set the port database to the ulp context. */
1882 int32_t
1883 bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context	*ulp_ctx,
1884 				struct bnxt_ulp_port_db	*port_db)
1885 {
1886 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1887 		return -EINVAL;
1888 
1889 	ulp_ctx->cfg_data->port_db = port_db;
1890 	return 0;
1891 }
1892 
1893 /* Function to get the port database from the ulp context. */
1894 struct bnxt_ulp_port_db *
1895 bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context	*ulp_ctx)
1896 {
1897 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1898 		return NULL;
1899 
1900 	return ulp_ctx->cfg_data->port_db;
1901 }
1902 
1903 /* Function to set the flow counter info into the context */
1904 int32_t
1905 bnxt_ulp_cntxt_ptr2_fc_info_set(struct bnxt_ulp_context *ulp_ctx,
1906 				struct bnxt_ulp_fc_info *ulp_fc_info)
1907 {
1908 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1909 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1910 		return -EINVAL;
1911 	}
1912 
1913 	ulp_ctx->cfg_data->fc_info = ulp_fc_info;
1914 
1915 	return 0;
1916 }
1917 
1918 /* Function to retrieve the flow counter info from the context. */
1919 struct bnxt_ulp_fc_info *
1920 bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx)
1921 {
1922 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1923 		return NULL;
1924 
1925 	return ulp_ctx->cfg_data->fc_info;
1926 }
1927 
1928 /* Function to get the ulp flags from the ulp context. */
1929 int32_t
1930 bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
1931 				  uint32_t *flags)
1932 {
1933 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1934 		return -1;
1935 
1936 	*flags =  ulp_ctx->cfg_data->ulp_flags;
1937 	return 0;
1938 }
1939 
1940 /* Function to get the ulp vfr info from the ulp context. */
1941 struct bnxt_ulp_vfr_rule_info*
1942 bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(struct bnxt_ulp_context *ulp_ctx,
1943 				     uint32_t port_id)
1944 {
1945 	if (!ulp_ctx || !ulp_ctx->cfg_data || port_id >= RTE_MAX_ETHPORTS)
1946 		return NULL;
1947 
1948 	return &ulp_ctx->cfg_data->vfr_rule_info[port_id];
1949 }
1950 
1951 /* Function to acquire the flow database lock from the ulp context. */
1952 int32_t
1953 bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context	*ulp_ctx)
1954 {
1955 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1956 		return -1;
1957 
1958 	if (pthread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) {
1959 		BNXT_TF_DBG(ERR, "unable to acquire fdb lock\n");
1960 		return -1;
1961 	}
1962 	return 0;
1963 }
1964 
1965 /* Function to release the flow database lock from the ulp context. */
1966 void
1967 bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context	*ulp_ctx)
1968 {
1969 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1970 		return;
1971 
1972 	pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);
1973 }
1974 
1975 /* Function to set the ha info into the context */
1976 int32_t
1977 bnxt_ulp_cntxt_ptr2_ha_info_set(struct bnxt_ulp_context *ulp_ctx,
1978 				struct bnxt_ulp_ha_mgr_info *ulp_ha_info)
1979 {
1980 	if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL) {
1981 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1982 		return -EINVAL;
1983 	}
1984 	ulp_ctx->cfg_data->ha_info = ulp_ha_info;
1985 	return 0;
1986 }
1987 
1988 /* Function to retrieve the ha info from the context. */
1989 struct bnxt_ulp_ha_mgr_info *
1990 bnxt_ulp_cntxt_ptr2_ha_info_get(struct bnxt_ulp_context *ulp_ctx)
1991 {
1992 	if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL)
1993 		return NULL;
1994 	return ulp_ctx->cfg_data->ha_info;
1995 }
1996 
1997 bool
1998 bnxt_ulp_cntxt_ha_enabled(struct bnxt_ulp_context *ulp_ctx)
1999 {
2000 	if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL)
2001 		return false;
2002 	return !!ULP_HIGH_AVAIL_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags);
2003 }
2004 
2005 static int32_t
2006 bnxt_ulp_cntxt_list_init(void)
2007 {
2008 	/* Create the cntxt spin lock */
2009 	rte_spinlock_init(&bnxt_ulp_ctxt_lock);
2010 
2011 	return 0;
2012 }
2013 
2014 static int32_t
2015 bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx)
2016 {
2017 	struct ulp_context_list_entry	*entry;
2018 
2019 	entry = rte_zmalloc(NULL, sizeof(struct ulp_context_list_entry), 0);
2020 	if (entry == NULL) {
2021 		BNXT_TF_DBG(ERR, "unable to allocate memory\n");
2022 		return -ENOMEM;
2023 	}
2024 
2025 	rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
2026 	entry->ulp_ctx = ulp_ctx;
2027 	TAILQ_INSERT_TAIL(&ulp_cntx_list, entry, next);
2028 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2029 	return 0;
2030 }
2031 
2032 static void
2033 bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx)
2034 {
2035 	struct ulp_context_list_entry	*entry, *temp;
2036 
2037 	rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
2038 	RTE_TAILQ_FOREACH_SAFE(entry, &ulp_cntx_list, next, temp) {
2039 		if (entry->ulp_ctx == ulp_ctx) {
2040 			TAILQ_REMOVE(&ulp_cntx_list, entry, next);
2041 			rte_free(entry);
2042 			break;
2043 		}
2044 	}
2045 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2046 }
2047 
2048 struct bnxt_ulp_context *
2049 bnxt_ulp_cntxt_entry_acquire(void)
2050 {
2051 	struct ulp_context_list_entry	*entry;
2052 
2053 	/* take a lock and get the first ulp context available */
2054 	if (rte_spinlock_trylock(&bnxt_ulp_ctxt_lock)) {
2055 		TAILQ_FOREACH(entry, &ulp_cntx_list, next)
2056 			if (entry->ulp_ctx)
2057 				return entry->ulp_ctx;
2058 	}
2059 	return NULL;
2060 }
2061 
2062 void
2063 bnxt_ulp_cntxt_entry_release(void)
2064 {
2065 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2066 }
2067 
2068 /* Function to get the app tunnel details from the ulp context. */
2069 struct bnxt_flow_app_tun_ent *
2070 bnxt_ulp_cntxt_ptr2_app_tun_list_get(struct bnxt_ulp_context *ulp)
2071 {
2072 	if (!ulp || !ulp->cfg_data)
2073 		return NULL;
2074 
2075 	return ulp->cfg_data->app_tun;
2076 }
2077