xref: /dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp.c (revision 06d1a5d056b30dd18b1e4abc98c19cde57a8016c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <rte_log.h>
7 #include <rte_malloc.h>
8 #include <rte_flow.h>
9 #include <rte_flow_driver.h>
10 #include <rte_tailq.h>
11 #include <rte_spinlock.h>
12 
13 #include "bnxt.h"
14 #include "bnxt_ulp.h"
15 #include "bnxt_tf_common.h"
16 #include "tf_core.h"
17 #include "tf_ext_flow_handle.h"
18 
19 #include "ulp_template_db_enum.h"
20 #include "ulp_template_struct.h"
21 #include "ulp_mark_mgr.h"
22 #include "ulp_fc_mgr.h"
23 #include "ulp_flow_db.h"
24 #include "ulp_mapper.h"
25 #include "ulp_port_db.h"
26 #include "ulp_tun.h"
27 #include "ulp_ha_mgr.h"
28 #include "bnxt_tf_pmd_shim.h"
29 
30 /* Linked list of all TF sessions. */
31 STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
32 			STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list);
33 
34 /* Mutex to synchronize bnxt_ulp_session_list operations. */
35 static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
36 
37 /* Spin lock to protect context global list */
38 rte_spinlock_t bnxt_ulp_ctxt_lock;
39 TAILQ_HEAD(cntx_list_entry_list, ulp_context_list_entry);
40 static struct cntx_list_entry_list ulp_cntx_list =
41 	TAILQ_HEAD_INITIALIZER(ulp_cntx_list);
42 
43 /* Static function declarations */
44 static int32_t bnxt_ulp_cntxt_list_init(void);
45 static int32_t bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx);
46 static void bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx);
47 
48 /*
49  * Allow the deletion of context only for the bnxt device that
50  * created the session.
51  */
52 bool
53 ulp_ctx_deinit_allowed(struct bnxt_ulp_context *ulp_ctx)
54 {
55 	if (!ulp_ctx || !ulp_ctx->cfg_data)
56 		return false;
57 
58 	if (!ulp_ctx->cfg_data->ref_cnt) {
59 		BNXT_TF_DBG(DEBUG, "ulp ctx shall initiate deinit\n");
60 		return true;
61 	}
62 
63 	return false;
64 }
65 
66 static int32_t
67 bnxt_ulp_devid_get(struct bnxt *bp,
68 		   enum bnxt_ulp_device_id  *ulp_dev_id)
69 {
70 	if (BNXT_CHIP_P5(bp)) {
71 		*ulp_dev_id = BNXT_ULP_DEVICE_ID_THOR;
72 		return 0;
73 	}
74 
75 	if (BNXT_STINGRAY(bp))
76 		*ulp_dev_id = BNXT_ULP_DEVICE_ID_STINGRAY;
77 	else
78 		/* Assuming Whitney */
79 		*ulp_dev_id = BNXT_ULP_DEVICE_ID_WH_PLUS;
80 
81 	return 0;
82 }
83 
84 struct bnxt_ulp_app_capabilities_info *
85 bnxt_ulp_app_cap_list_get(uint32_t *num_entries)
86 {
87 	if (!num_entries)
88 		return NULL;
89 	*num_entries = BNXT_ULP_APP_CAP_TBL_MAX_SZ;
90 	return ulp_app_cap_info_list;
91 }
92 
93 static struct bnxt_ulp_resource_resv_info *
94 bnxt_ulp_app_resource_resv_list_get(uint32_t *num_entries)
95 {
96 	if (num_entries == NULL)
97 		return NULL;
98 	*num_entries = BNXT_ULP_APP_RESOURCE_RESV_LIST_MAX_SZ;
99 	return ulp_app_resource_resv_list;
100 }
101 
102 struct bnxt_ulp_resource_resv_info *
103 bnxt_ulp_resource_resv_list_get(uint32_t *num_entries)
104 {
105 	if (!num_entries)
106 		return NULL;
107 	*num_entries = BNXT_ULP_RESOURCE_RESV_LIST_MAX_SZ;
108 	return ulp_resource_resv_list;
109 }
110 
111 struct bnxt_ulp_glb_resource_info *
112 bnxt_ulp_app_glb_resource_info_list_get(uint32_t *num_entries)
113 {
114 	if (!num_entries)
115 		return NULL;
116 	*num_entries = BNXT_ULP_APP_GLB_RESOURCE_TBL_MAX_SZ;
117 	return ulp_app_glb_resource_tbl;
118 }
119 
120 static int32_t
121 bnxt_ulp_named_resources_calc(struct bnxt_ulp_context *ulp_ctx,
122 			      struct bnxt_ulp_glb_resource_info *info,
123 			      uint32_t num,
124 			      struct tf_session_resources *res)
125 {
126 	uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST, res_type, i;
127 	enum tf_dir dir;
128 	uint8_t app_id;
129 	int32_t rc = 0;
130 
131 	if (ulp_ctx == NULL || info == NULL || res == NULL || num == 0) {
132 		BNXT_TF_DBG(ERR, "Invalid parms to named resources calc.\n");
133 		return -EINVAL;
134 	}
135 
136 	rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id);
137 	if (rc) {
138 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
139 		return -EINVAL;
140 	}
141 
142 	rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
143 	if (rc) {
144 		BNXT_TF_DBG(ERR, "Unable to get the dev id from ulp.\n");
145 		return -EINVAL;
146 	}
147 
148 	for (i = 0; i < num; i++) {
149 		if (dev_id != info[i].device_id || app_id != info[i].app_id)
150 			continue;
151 		dir = info[i].direction;
152 		res_type = info[i].resource_type;
153 
154 		switch (info[i].resource_func) {
155 		case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER:
156 			res->ident_cnt[dir].cnt[res_type]++;
157 			break;
158 		case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
159 			res->tbl_cnt[dir].cnt[res_type]++;
160 			break;
161 		case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
162 			res->tcam_cnt[dir].cnt[res_type]++;
163 			break;
164 		case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
165 			res->em_cnt[dir].cnt[res_type]++;
166 			break;
167 		default:
168 			BNXT_TF_DBG(ERR, "Unknown resource func (0x%x)\n,",
169 				    info[i].resource_func);
170 			continue;
171 		}
172 	}
173 
174 	return 0;
175 }
176 
177 static int32_t
178 bnxt_ulp_unnamed_resources_calc(struct bnxt_ulp_context *ulp_ctx,
179 				struct bnxt_ulp_resource_resv_info *info,
180 				uint32_t num,
181 				struct tf_session_resources *res)
182 {
183 	uint32_t dev_id, res_type, i;
184 	enum tf_dir dir;
185 	uint8_t app_id;
186 	int32_t rc = 0;
187 
188 	if (ulp_ctx == NULL || res == NULL || info == NULL || num == 0) {
189 		BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
190 		return -EINVAL;
191 	}
192 
193 	rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id);
194 	if (rc) {
195 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
196 		return -EINVAL;
197 	}
198 
199 	rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
200 	if (rc) {
201 		BNXT_TF_DBG(ERR, "Unable to get the dev id from ulp.\n");
202 		return -EINVAL;
203 	}
204 
205 	for (i = 0; i < num; i++) {
206 		if (app_id != info[i].app_id || dev_id != info[i].device_id)
207 			continue;
208 		dir = info[i].direction;
209 		res_type = info[i].resource_type;
210 
211 		switch (info[i].resource_func) {
212 		case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER:
213 			res->ident_cnt[dir].cnt[res_type] = info[i].count;
214 			break;
215 		case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
216 			res->tbl_cnt[dir].cnt[res_type] = info[i].count;
217 			break;
218 		case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
219 			res->tcam_cnt[dir].cnt[res_type] = info[i].count;
220 			break;
221 		case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
222 			res->em_cnt[dir].cnt[res_type] = info[i].count;
223 			break;
224 		default:
225 			break;
226 		}
227 	}
228 	return 0;
229 }
230 
231 static int32_t
232 bnxt_ulp_tf_resources_get(struct bnxt_ulp_context *ulp_ctx,
233 			  struct tf_session_resources *res)
234 {
235 	struct bnxt_ulp_resource_resv_info *unnamed = NULL;
236 	uint32_t unum;
237 	int32_t rc = 0;
238 
239 	if (ulp_ctx == NULL || res == NULL) {
240 		BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
241 		return -EINVAL;
242 	}
243 
244 	unnamed = bnxt_ulp_resource_resv_list_get(&unum);
245 	if (unnamed == NULL) {
246 		BNXT_TF_DBG(ERR, "Unable to get resource resv list.\n");
247 		return -EINVAL;
248 	}
249 
250 	rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, res);
251 	if (rc)
252 		BNXT_TF_DBG(ERR, "Unable to calc resources for session.\n");
253 
254 	return rc;
255 }
256 
257 static int32_t
258 bnxt_ulp_tf_shared_session_resources_get(struct bnxt_ulp_context *ulp_ctx,
259 					 struct tf_session_resources *res)
260 {
261 	struct bnxt_ulp_resource_resv_info *unnamed;
262 	struct bnxt_ulp_glb_resource_info *named;
263 	uint32_t unum, nnum;
264 	int32_t rc;
265 
266 	if (ulp_ctx == NULL || res == NULL) {
267 		BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
268 		return -EINVAL;
269 	}
270 
271 	/* Make sure the resources are zero before accumulating. */
272 	memset(res, 0, sizeof(struct tf_session_resources));
273 
274 	/*
275 	 * Shared resources are comprised of both named and unnamed resources.
276 	 * First get the unnamed counts, and then add the named to the result.
277 	 */
278 	/* Get the baseline counts */
279 	unnamed = bnxt_ulp_app_resource_resv_list_get(&unum);
280 	if (unnamed == NULL) {
281 		BNXT_TF_DBG(ERR, "Unable to get shared resource resv list.\n");
282 		return -EINVAL;
283 	}
284 	rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, res);
285 	if (rc) {
286 		BNXT_TF_DBG(ERR, "Unable to calc resources for shared session.\n");
287 		return -EINVAL;
288 	}
289 
290 	/* Get the named list and add the totals */
291 	named = bnxt_ulp_app_glb_resource_info_list_get(&nnum);
292 	if (named == NULL) {
293 		BNXT_TF_DBG(ERR, "Unable to get app global resource list\n");
294 		return -EINVAL;
295 	}
296 	rc = bnxt_ulp_named_resources_calc(ulp_ctx, named, nnum, res);
297 	if (rc)
298 		BNXT_TF_DBG(ERR, "Unable to calc named resources\n");
299 
300 	return rc;
301 }
302 
303 int32_t
304 bnxt_ulp_cntxt_app_caps_init(struct bnxt_ulp_context *ulp_ctx,
305 			     uint8_t app_id, uint32_t dev_id)
306 {
307 	struct bnxt_ulp_app_capabilities_info *info;
308 	uint32_t num = 0;
309 	uint16_t i;
310 	bool found = false;
311 
312 	if (ULP_APP_DEV_UNSUPPORTED_ENABLED(ulp_ctx->cfg_data->ulp_flags)) {
313 		BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
314 			    app_id, dev_id);
315 		return -EINVAL;
316 	}
317 
318 	info = bnxt_ulp_app_cap_list_get(&num);
319 	if (!info || !num) {
320 		BNXT_TF_DBG(ERR, "Failed to get app capabilities.\n");
321 		return -EINVAL;
322 	}
323 
324 	for (i = 0; i < num; i++) {
325 		if (info[i].app_id != app_id || info[i].device_id != dev_id)
326 			continue;
327 		found = true;
328 		if (info[i].flags & BNXT_ULP_APP_CAP_SHARED_EN)
329 			ulp_ctx->cfg_data->ulp_flags |=
330 				BNXT_ULP_SHARED_SESSION_ENABLED;
331 		if (info[i].flags & BNXT_ULP_APP_CAP_HOT_UPGRADE_EN)
332 			ulp_ctx->cfg_data->ulp_flags |=
333 				BNXT_ULP_HIGH_AVAIL_ENABLED;
334 		if (info[i].flags & BNXT_ULP_APP_CAP_UNICAST_ONLY)
335 			ulp_ctx->cfg_data->ulp_flags |=
336 				BNXT_ULP_APP_UNICAST_ONLY;
337 	}
338 	if (!found) {
339 		BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
340 			    app_id, dev_id);
341 		ulp_ctx->cfg_data->ulp_flags |= BNXT_ULP_APP_DEV_UNSUPPORTED;
342 		return -EINVAL;
343 	}
344 
345 	return 0;
346 }
347 
348 static void
349 ulp_ctx_shared_session_close(struct bnxt *bp,
350 			     struct bnxt_ulp_session_state *session)
351 {
352 	struct tf *tfp;
353 	int32_t rc;
354 
355 	if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
356 		return;
357 
358 	tfp = bnxt_ulp_cntxt_shared_tfp_get(bp->ulp_ctx);
359 	if (!tfp) {
360 		/*
361 		 * Log it under debug since this is likely a case of the
362 		 * shared session not being created.  For example, a failed
363 		 * initialization.
364 		 */
365 		BNXT_TF_DBG(DEBUG, "Failed to get shared tfp on close.\n");
366 		return;
367 	}
368 	rc = tf_close_session(tfp);
369 	if (rc)
370 		BNXT_TF_DBG(ERR, "Failed to close the shared session rc=%d.\n",
371 			    rc);
372 	(void)bnxt_ulp_cntxt_shared_tfp_set(bp->ulp_ctx, NULL);
373 
374 	session->g_shared_tfp.session = NULL;
375 }
376 
377 static int32_t
378 ulp_ctx_shared_session_open(struct bnxt *bp,
379 			    struct bnxt_ulp_session_state *session)
380 {
381 	struct rte_eth_dev *ethdev = bp->eth_dev;
382 	struct tf_session_resources *resources;
383 	struct tf_open_session_parms parms;
384 	size_t copy_nbytes;
385 	uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
386 	int32_t	rc = 0;
387 	uint8_t app_id;
388 
389 	/* only perform this if shared session is enabled. */
390 	if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
391 		return 0;
392 
393 	memset(&parms, 0, sizeof(parms));
394 
395 	rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
396 					  parms.ctrl_chan_name);
397 	if (rc) {
398 		BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
399 			    ethdev->data->port_id, rc);
400 		return rc;
401 	}
402 	resources = &parms.resources;
403 
404 	/*
405 	 * Need to account for size of ctrl_chan_name and 1 extra for Null
406 	 * terminator
407 	 */
408 	copy_nbytes = sizeof(parms.ctrl_chan_name) -
409 		strlen(parms.ctrl_chan_name) - 1;
410 
411 	/*
412 	 * Build the ctrl_chan_name with shared token.
413 	 * When HA is enabled, the WC TCAM needs extra management by the core,
414 	 * so add the wc_tcam string to the control channel.
415 	 */
416 	if (bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx))
417 		strncat(parms.ctrl_chan_name, "-tf_shared-wc_tcam",
418 			copy_nbytes);
419 	else
420 		strncat(parms.ctrl_chan_name, "-tf_shared", copy_nbytes);
421 
422 	rc = bnxt_ulp_tf_shared_session_resources_get(bp->ulp_ctx, resources);
423 	if (rc)
424 		return rc;
425 
426 	rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
427 	if (rc) {
428 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
429 		return -EINVAL;
430 	}
431 
432 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
433 	if (rc) {
434 		BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
435 		return rc;
436 	}
437 
438 	switch (ulp_dev_id) {
439 	case BNXT_ULP_DEVICE_ID_WH_PLUS:
440 		parms.device_type = TF_DEVICE_TYPE_WH;
441 		break;
442 	case BNXT_ULP_DEVICE_ID_STINGRAY:
443 		parms.device_type = TF_DEVICE_TYPE_SR;
444 		break;
445 	case BNXT_ULP_DEVICE_ID_THOR:
446 		parms.device_type = TF_DEVICE_TYPE_THOR;
447 		break;
448 	default:
449 		BNXT_TF_DBG(ERR, "Unable to determine dev for opening session.\n");
450 		return rc;
451 	}
452 
453 	parms.shadow_copy = true;
454 	parms.bp = bp;
455 	if (app_id == 0 || app_id == 3)
456 		parms.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;
457 	else
458 		parms.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;
459 
460 	/*
461 	 * Open the session here, but the collect the resources during the
462 	 * mapper initialization.
463 	 */
464 	rc = tf_open_session(&bp->tfp_shared, &parms);
465 	if (rc)
466 		return rc;
467 
468 	if (parms.shared_session_creator)
469 		BNXT_TF_DBG(DEBUG, "Shared session creator.\n");
470 	else
471 		BNXT_TF_DBG(DEBUG, "Shared session attached.\n");
472 
473 	/* Save the shared session in global data */
474 	if (!session->g_shared_tfp.session)
475 		session->g_shared_tfp.session = bp->tfp_shared.session;
476 
477 	rc = bnxt_ulp_cntxt_shared_tfp_set(bp->ulp_ctx, &bp->tfp_shared);
478 	if (rc)
479 		BNXT_TF_DBG(ERR, "Failed to add shared tfp to ulp (%d)\n", rc);
480 
481 	return rc;
482 }
483 
484 static int32_t
485 ulp_ctx_shared_session_attach(struct bnxt *bp,
486 			      struct bnxt_ulp_session_state *session)
487 {
488 	int32_t rc = 0;
489 
490 	/* Simply return success if shared session not enabled */
491 	if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) {
492 		bp->tfp_shared.session = session->g_shared_tfp.session;
493 		rc = ulp_ctx_shared_session_open(bp, session);
494 	}
495 
496 	return rc;
497 }
498 
499 static void
500 ulp_ctx_shared_session_detach(struct bnxt *bp)
501 {
502 	if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) {
503 		if (bp->tfp_shared.session) {
504 			tf_close_session(&bp->tfp_shared);
505 			bp->tfp_shared.session = NULL;
506 		}
507 	}
508 }
509 
510 /*
511  * Initialize an ULP session.
512  * An ULP session will contain all the resources needed to support rte flow
513  * offloads. A session is initialized as part of rte_eth_device start.
514  * A single vswitch instance can have multiple uplinks which means
515  * rte_eth_device start will be called for each of these devices.
516  * ULP session manager will make sure that a single ULP session is only
517  * initialized once. Apart from this, it also initializes MARK database,
518  * EEM table & flow database. ULP session manager also manages a list of
519  * all opened ULP sessions.
520  */
521 static int32_t
522 ulp_ctx_session_open(struct bnxt *bp,
523 		     struct bnxt_ulp_session_state *session)
524 {
525 	struct rte_eth_dev		*ethdev = bp->eth_dev;
526 	int32_t				rc = 0;
527 	struct tf_open_session_parms	params;
528 	struct tf_session_resources	*resources;
529 	uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
530 	uint8_t app_id;
531 
532 	memset(&params, 0, sizeof(params));
533 
534 	rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
535 					  params.ctrl_chan_name);
536 	if (rc) {
537 		BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
538 			    ethdev->data->port_id, rc);
539 		return rc;
540 	}
541 
542 	params.shadow_copy = true;
543 
544 	rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
545 	if (rc) {
546 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
547 		return -EINVAL;
548 	}
549 
550 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
551 	if (rc) {
552 		BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
553 		return rc;
554 	}
555 
556 	switch (ulp_dev_id) {
557 	case BNXT_ULP_DEVICE_ID_WH_PLUS:
558 		params.device_type = TF_DEVICE_TYPE_WH;
559 		break;
560 	case BNXT_ULP_DEVICE_ID_STINGRAY:
561 		params.device_type = TF_DEVICE_TYPE_SR;
562 		break;
563 	case BNXT_ULP_DEVICE_ID_THOR:
564 		params.device_type = TF_DEVICE_TYPE_THOR;
565 		break;
566 	default:
567 		BNXT_TF_DBG(ERR, "Unable to determine device for opening session.\n");
568 		return rc;
569 	}
570 
571 	resources = &params.resources;
572 	rc = bnxt_ulp_tf_resources_get(bp->ulp_ctx, resources);
573 	if (rc)
574 		return rc;
575 
576 	params.bp = bp;
577 	if (app_id == 0 || app_id == 3)
578 		params.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;
579 	else
580 		params.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;
581 
582 	rc = tf_open_session(&bp->tfp, &params);
583 	if (rc) {
584 		BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",
585 			    params.ctrl_chan_name, rc);
586 		return -EINVAL;
587 	}
588 	if (!session->session_opened) {
589 		session->session_opened = 1;
590 		session->g_tfp = rte_zmalloc("bnxt_ulp_session_tfp",
591 					     sizeof(struct tf), 0);
592 		session->g_tfp->session = bp->tfp.session;
593 	}
594 	return rc;
595 }
596 
597 /*
598  * Close the ULP session.
599  * It takes the ulp context pointer.
600  */
601 static void
602 ulp_ctx_session_close(struct bnxt *bp,
603 		      struct bnxt_ulp_session_state *session)
604 {
605 	/* close the session in the hardware */
606 	if (session->session_opened)
607 		tf_close_session(&bp->tfp);
608 	session->session_opened = 0;
609 	rte_free(session->g_tfp);
610 	session->g_tfp = NULL;
611 }
612 
613 static void
614 bnxt_init_tbl_scope_parms(struct bnxt *bp,
615 			  struct tf_alloc_tbl_scope_parms *params)
616 {
617 	struct bnxt_ulp_device_params	*dparms;
618 	uint32_t dev_id;
619 	int rc;
620 
621 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
622 	if (rc)
623 		/* TBD: For now, just use default. */
624 		dparms = 0;
625 	else
626 		dparms = bnxt_ulp_device_params_get(dev_id);
627 
628 	/*
629 	 * Set the flush timer for EEM entries. The value is in 100ms intervals,
630 	 * so 100 is 10s.
631 	 */
632 	params->hw_flow_cache_flush_timer = 100;
633 
634 	if (!dparms) {
635 		params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
636 		params->rx_max_action_entry_sz_in_bits =
637 			BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
638 		params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
639 		params->rx_num_flows_in_k = BNXT_ULP_RX_NUM_FLOWS;
640 
641 		params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
642 		params->tx_max_action_entry_sz_in_bits =
643 			BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
644 		params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
645 		params->tx_num_flows_in_k = BNXT_ULP_TX_NUM_FLOWS;
646 	} else {
647 		params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
648 		params->rx_max_action_entry_sz_in_bits =
649 			BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
650 		params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
651 		params->rx_num_flows_in_k =
652 			dparms->ext_flow_db_num_entries / 1024;
653 
654 		params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
655 		params->tx_max_action_entry_sz_in_bits =
656 			BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
657 		params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
658 		params->tx_num_flows_in_k =
659 			dparms->ext_flow_db_num_entries / 1024;
660 	}
661 	BNXT_TF_DBG(INFO, "Table Scope initialized with %uK flows.\n",
662 		    params->rx_num_flows_in_k);
663 }
664 
665 /* Initialize Extended Exact Match host memory. */
666 static int32_t
667 ulp_eem_tbl_scope_init(struct bnxt *bp)
668 {
669 	struct tf_alloc_tbl_scope_parms params = {0};
670 	struct bnxt_ulp_device_params *dparms;
671 	enum bnxt_ulp_flow_mem_type mtype;
672 	uint32_t dev_id;
673 	int rc;
674 
675 	/* Get the dev specific number of flows that needed to be supported. */
676 	if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
677 		BNXT_TF_DBG(ERR, "Invalid device id\n");
678 		return -EINVAL;
679 	}
680 
681 	dparms = bnxt_ulp_device_params_get(dev_id);
682 	if (!dparms) {
683 		BNXT_TF_DBG(ERR, "could not fetch the device params\n");
684 		return -ENODEV;
685 	}
686 
687 	if (bnxt_ulp_cntxt_mem_type_get(bp->ulp_ctx, &mtype))
688 		return -EINVAL;
689 	if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
690 		BNXT_TF_DBG(INFO, "Table Scope alloc is not required\n");
691 		return 0;
692 	}
693 
694 	bnxt_init_tbl_scope_parms(bp, &params);
695 	rc = tf_alloc_tbl_scope(&bp->tfp, &params);
696 	if (rc) {
697 		BNXT_TF_DBG(ERR, "Unable to allocate eem table scope rc = %d\n",
698 			    rc);
699 		return rc;
700 	}
701 	rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id);
702 	if (rc) {
703 		BNXT_TF_DBG(ERR, "Unable to set table scope id\n");
704 		return rc;
705 	}
706 
707 	return 0;
708 }
709 
710 /* Free Extended Exact Match host memory */
711 static int32_t
712 ulp_eem_tbl_scope_deinit(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
713 {
714 	struct tf_free_tbl_scope_parms	params = {0};
715 	struct tf			*tfp;
716 	int32_t				rc = 0;
717 	struct bnxt_ulp_device_params *dparms;
718 	enum bnxt_ulp_flow_mem_type mtype;
719 	uint32_t dev_id;
720 
721 	if (!ulp_ctx || !ulp_ctx->cfg_data)
722 		return -EINVAL;
723 
724 	tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx, BNXT_ULP_SHARED_SESSION_NO);
725 	if (!tfp) {
726 		BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
727 		return -EINVAL;
728 	}
729 
730 	/* Get the dev specific number of flows that needed to be supported. */
731 	if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
732 		BNXT_TF_DBG(ERR, "Invalid device id\n");
733 		return -EINVAL;
734 	}
735 
736 	dparms = bnxt_ulp_device_params_get(dev_id);
737 	if (!dparms) {
738 		BNXT_TF_DBG(ERR, "could not fetch the device params\n");
739 		return -ENODEV;
740 	}
741 
742 	if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
743 		return -EINVAL;
744 	if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
745 		BNXT_TF_DBG(INFO, "Table Scope free is not required\n");
746 		return 0;
747 	}
748 
749 	rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp_ctx, &params.tbl_scope_id);
750 	if (rc) {
751 		BNXT_TF_DBG(ERR, "Failed to get the table scope id\n");
752 		return -EINVAL;
753 	}
754 
755 	rc = tf_free_tbl_scope(tfp, &params);
756 	if (rc) {
757 		BNXT_TF_DBG(ERR, "Unable to free table scope\n");
758 		return -EINVAL;
759 	}
760 	return rc;
761 }
762 
763 /* The function to free and deinit the ulp context data. */
764 static int32_t
765 ulp_ctx_deinit(struct bnxt *bp,
766 	       struct bnxt_ulp_session_state *session)
767 {
768 	/* close the tf session */
769 	ulp_ctx_session_close(bp, session);
770 
771 	/* The shared session must be closed last. */
772 	ulp_ctx_shared_session_close(bp, session);
773 
774 	/* Free the contents */
775 	if (session->cfg_data) {
776 		rte_free(session->cfg_data);
777 		bp->ulp_ctx->cfg_data = NULL;
778 		session->cfg_data = NULL;
779 	}
780 	return 0;
781 }
782 
783 /* The function to allocate and initialize the ulp context data. */
784 static int32_t
785 ulp_ctx_init(struct bnxt *bp,
786 	     struct bnxt_ulp_session_state *session)
787 {
788 	struct bnxt_ulp_data	*ulp_data;
789 	int32_t			rc = 0;
790 	enum bnxt_ulp_device_id devid;
791 
792 	/* Initialize the context entries list */
793 	bnxt_ulp_cntxt_list_init();
794 
795 	/* Add the context to the context entries list */
796 	rc = bnxt_ulp_cntxt_list_add(bp->ulp_ctx);
797 	if (rc) {
798 		BNXT_TF_DBG(ERR, "Failed to add the context list entry\n");
799 		return -ENOMEM;
800 	}
801 
802 	/* Allocate memory to hold ulp context data. */
803 	ulp_data = rte_zmalloc("bnxt_ulp_data",
804 			       sizeof(struct bnxt_ulp_data), 0);
805 	if (!ulp_data) {
806 		BNXT_TF_DBG(ERR, "Failed to allocate memory for ulp data\n");
807 		return -ENOMEM;
808 	}
809 
810 	/* Increment the ulp context data reference count usage. */
811 	bp->ulp_ctx->cfg_data = ulp_data;
812 	session->cfg_data = ulp_data;
813 	ulp_data->ref_cnt++;
814 	ulp_data->ulp_flags |= BNXT_ULP_VF_REP_ENABLED;
815 
816 	rc = bnxt_ulp_devid_get(bp, &devid);
817 	if (rc) {
818 		BNXT_TF_DBG(ERR, "Unable to determine device for ULP init.\n");
819 		goto error_deinit;
820 	}
821 
822 	rc = bnxt_ulp_cntxt_dev_id_set(bp->ulp_ctx, devid);
823 	if (rc) {
824 		BNXT_TF_DBG(ERR, "Unable to set device for ULP init.\n");
825 		goto error_deinit;
826 	}
827 
828 	rc = bnxt_ulp_cntxt_app_id_set(bp->ulp_ctx, bp->app_id);
829 	if (rc) {
830 		BNXT_TF_DBG(ERR, "Unable to set app_id for ULP init.\n");
831 		goto error_deinit;
832 	}
833 	BNXT_TF_DBG(DEBUG, "Ulp initialized with app id %d\n", bp->app_id);
834 
835 	rc = bnxt_ulp_cntxt_app_caps_init(bp->ulp_ctx, bp->app_id, devid);
836 	if (rc) {
837 		BNXT_TF_DBG(ERR, "Unable to set caps for app(%x)/dev(%x)\n",
838 			    bp->app_id, devid);
839 		goto error_deinit;
840 	}
841 
842 	/*
843 	 * Shared session must be created before first regular session but after
844 	 * the ulp_ctx is valid.
845 	 */
846 	rc = ulp_ctx_shared_session_open(bp, session);
847 	if (rc) {
848 		BNXT_TF_DBG(ERR, "Unable to open shared session (%d)\n", rc);
849 		goto error_deinit;
850 	}
851 
852 	/* Open the ulp session. */
853 	rc = ulp_ctx_session_open(bp, session);
854 	if (rc)
855 		goto error_deinit;
856 
857 	bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
858 	return rc;
859 
860 error_deinit:
861 	session->session_opened = 1;
862 	(void)ulp_ctx_deinit(bp, session);
863 	return rc;
864 }
865 
866 /* The function to initialize ulp dparms with devargs */
867 static int32_t
868 ulp_dparms_init(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
869 {
870 	struct bnxt_ulp_device_params *dparms;
871 	uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST;
872 
873 	if (!bp->max_num_kflows) {
874 		/* Defaults to Internal */
875 		bnxt_ulp_cntxt_mem_type_set(ulp_ctx,
876 					    BNXT_ULP_FLOW_MEM_TYPE_INT);
877 		return 0;
878 	}
879 
880 	/* The max_num_kflows were set, so move to external */
881 	if (bnxt_ulp_cntxt_mem_type_set(ulp_ctx, BNXT_ULP_FLOW_MEM_TYPE_EXT))
882 		return -EINVAL;
883 
884 	if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) {
885 		BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
886 		return -EINVAL;
887 	}
888 
889 	dparms = bnxt_ulp_device_params_get(dev_id);
890 	if (!dparms) {
891 		BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
892 		return -EINVAL;
893 	}
894 
895 	/* num_flows = max_num_kflows * 1024 */
896 	dparms->ext_flow_db_num_entries = bp->max_num_kflows * 1024;
897 	/* GFID =  2 * num_flows */
898 	dparms->mark_db_gfid_entries = dparms->ext_flow_db_num_entries * 2;
899 	BNXT_TF_DBG(DEBUG, "Set the number of flows = %" PRIu64 "\n",
900 		    dparms->ext_flow_db_num_entries);
901 
902 	return 0;
903 }
904 
905 /* The function to initialize bp flags with truflow features */
906 static int32_t
907 ulp_dparms_dev_port_intf_update(struct bnxt *bp,
908 				struct bnxt_ulp_context *ulp_ctx)
909 {
910 	enum bnxt_ulp_flow_mem_type mtype;
911 
912 	if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
913 		return -EINVAL;
914 	/* Update the bp flag with gfid flag */
915 	if (mtype == BNXT_ULP_FLOW_MEM_TYPE_EXT)
916 		bp->flags |= BNXT_FLAG_GFID_ENABLE;
917 
918 	return 0;
919 }
920 
921 static int32_t
922 ulp_ctx_attach(struct bnxt *bp,
923 	       struct bnxt_ulp_session_state *session)
924 {
925 	int32_t rc = 0;
926 	uint32_t flags, dev_id = BNXT_ULP_DEVICE_ID_LAST;
927 	uint8_t app_id;
928 
929 	/* Increment the ulp context data reference count usage. */
930 	bp->ulp_ctx->cfg_data = session->cfg_data;
931 	bp->ulp_ctx->cfg_data->ref_cnt++;
932 
933 	/* update the session details in bnxt tfp */
934 	bp->tfp.session = session->g_tfp->session;
935 
936 	/* Add the context to the context entries list */
937 	rc = bnxt_ulp_cntxt_list_add(bp->ulp_ctx);
938 	if (rc) {
939 		BNXT_TF_DBG(ERR, "Failed to add the context list entry\n");
940 		return -EINVAL;
941 	}
942 
943 	/*
944 	 * The supported flag will be set during the init. Use it now to
945 	 * know if we should go through the attach.
946 	 */
947 	rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
948 	if (rc) {
949 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
950 		return -EINVAL;
951 	}
952 
953 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
954 	if (rc) {
955 		BNXT_TF_DBG(ERR, "Unable do get the dev_id.\n");
956 		return -EINVAL;
957 	}
958 
959 	flags = bp->ulp_ctx->cfg_data->ulp_flags;
960 	if (ULP_APP_DEV_UNSUPPORTED_ENABLED(flags)) {
961 		BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
962 			    app_id, dev_id);
963 		return -EINVAL;
964 	}
965 
966 	/* Create a TF Client */
967 	rc = ulp_ctx_session_open(bp, session);
968 	if (rc) {
969 		PMD_DRV_LOG(ERR, "Failed to open ctxt session, rc:%d\n", rc);
970 		bp->tfp.session = NULL;
971 		return rc;
972 	}
973 
974 	bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
975 	return rc;
976 }
977 
978 static void
979 ulp_ctx_detach(struct bnxt *bp)
980 {
981 	if (bp->tfp.session) {
982 		tf_close_session(&bp->tfp);
983 		bp->tfp.session = NULL;
984 	}
985 }
986 
987 /*
988  * Initialize the state of an ULP session.
989  * If the state of an ULP session is not initialized, set it's state to
990  * initialized. If the state is already initialized, do nothing.
991  */
992 static void
993 ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
994 {
995 	pthread_mutex_lock(&session->bnxt_ulp_mutex);
996 
997 	if (!session->bnxt_ulp_init) {
998 		session->bnxt_ulp_init = true;
999 		*init = false;
1000 	} else {
1001 		*init = true;
1002 	}
1003 
1004 	pthread_mutex_unlock(&session->bnxt_ulp_mutex);
1005 }
1006 
1007 /*
1008  * Check if an ULP session is already allocated for a specific PCI
1009  * domain & bus. If it is already allocated simply return the session
1010  * pointer, otherwise allocate a new session.
1011  */
1012 static struct bnxt_ulp_session_state *
1013 ulp_get_session(struct bnxt *bp, struct rte_pci_addr *pci_addr)
1014 {
1015 	struct bnxt_ulp_session_state *session;
1016 
1017 	/* if multi root capability is enabled, then ignore the pci bus id */
1018 	STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) {
1019 		if (session->pci_info.domain == pci_addr->domain &&
1020 		    (BNXT_MULTIROOT_EN(bp) ||
1021 		    session->pci_info.bus == pci_addr->bus)) {
1022 			return session;
1023 		}
1024 	}
1025 	return NULL;
1026 }
1027 
1028 /*
1029  * Allocate and Initialize an ULP session and set it's state to INITIALIZED.
1030  * If it's already initialized simply return the already existing session.
1031  */
1032 static struct bnxt_ulp_session_state *
1033 ulp_session_init(struct bnxt *bp,
1034 		 bool *init)
1035 {
1036 	struct rte_pci_device		*pci_dev;
1037 	struct rte_pci_addr		*pci_addr;
1038 	struct bnxt_ulp_session_state	*session;
1039 	int rc = 0;
1040 
1041 	if (!bp)
1042 		return NULL;
1043 
1044 	pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
1045 	pci_addr = &pci_dev->addr;
1046 
1047 	pthread_mutex_lock(&bnxt_ulp_global_mutex);
1048 
1049 	session = ulp_get_session(bp, pci_addr);
1050 	if (!session) {
1051 		/* Not Found the session  Allocate a new one */
1052 		session = rte_zmalloc("bnxt_ulp_session",
1053 				      sizeof(struct bnxt_ulp_session_state),
1054 				      0);
1055 		if (!session) {
1056 			BNXT_TF_DBG(ERR,
1057 				    "Allocation failed for bnxt_ulp_session\n");
1058 			pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1059 			return NULL;
1060 
1061 		} else {
1062 			/* Add it to the queue */
1063 			session->pci_info.domain = pci_addr->domain;
1064 			session->pci_info.bus = pci_addr->bus;
1065 			rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL);
1066 			if (rc) {
1067 				BNXT_TF_DBG(ERR, "mutex create failed\n");
1068 				pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1069 				return NULL;
1070 			}
1071 			STAILQ_INSERT_TAIL(&bnxt_ulp_session_list,
1072 					   session, next);
1073 		}
1074 	}
1075 	ulp_context_initialized(session, init);
1076 	pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1077 	return session;
1078 }
1079 
1080 /*
1081  * When a device is closed, remove it's associated session from the global
1082  * session list.
1083  */
1084 static void
1085 ulp_session_deinit(struct bnxt_ulp_session_state *session)
1086 {
1087 	if (!session)
1088 		return;
1089 
1090 	if (!session->cfg_data) {
1091 		pthread_mutex_lock(&bnxt_ulp_global_mutex);
1092 		STAILQ_REMOVE(&bnxt_ulp_session_list, session,
1093 			      bnxt_ulp_session_state, next);
1094 		pthread_mutex_destroy(&session->bnxt_ulp_mutex);
1095 		rte_free(session);
1096 		pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1097 	}
1098 }
1099 
1100 /*
1101  * Internal api to enable NAT feature.
1102  * Set set_flag to 1 to set the value or zero to reset the value.
1103  * returns 0 on success.
1104  */
1105 static int32_t
1106 bnxt_ulp_global_cfg_update(struct bnxt *bp,
1107 			   enum tf_dir dir,
1108 			   enum tf_global_config_type type,
1109 			   uint32_t offset,
1110 			   uint32_t value,
1111 			   uint32_t set_flag)
1112 {
1113 	uint32_t global_cfg = 0;
1114 	int rc;
1115 	struct tf_global_cfg_parms parms = { 0 };
1116 
1117 	/* Initialize the params */
1118 	parms.dir = dir,
1119 	parms.type = type,
1120 	parms.offset = offset,
1121 	parms.config = (uint8_t *)&global_cfg,
1122 	parms.config_sz_in_bytes = sizeof(global_cfg);
1123 
1124 	rc = tf_get_global_cfg(&bp->tfp, &parms);
1125 	if (rc) {
1126 		BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n",
1127 			    type, rc);
1128 		return rc;
1129 	}
1130 
1131 	if (set_flag)
1132 		global_cfg |= value;
1133 	else
1134 		global_cfg &= ~value;
1135 
1136 	/* SET the register RE_CFA_REG_ACT_TECT */
1137 	rc = tf_set_global_cfg(&bp->tfp, &parms);
1138 	if (rc) {
1139 		BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n",
1140 			    type, rc);
1141 		return rc;
1142 	}
1143 	return rc;
1144 }
1145 
1146 /* Internal function to delete all the flows belonging to the given port */
1147 static void
1148 bnxt_ulp_flush_port_flows(struct bnxt *bp)
1149 {
1150 	uint16_t func_id;
1151 
1152 	/* it is assumed that port is either TVF or PF */
1153 	if (ulp_port_db_port_func_id_get(bp->ulp_ctx,
1154 					 bp->eth_dev->data->port_id,
1155 					 &func_id)) {
1156 		BNXT_TF_DBG(ERR, "Invalid argument\n");
1157 		return;
1158 	}
1159 	(void)ulp_flow_db_function_flow_flush(bp->ulp_ctx, func_id);
1160 }
1161 
1162 /* Internal function to delete the VFR default flows */
1163 static void
1164 bnxt_ulp_destroy_vfr_default_rules(struct bnxt *bp, bool global)
1165 {
1166 	struct bnxt_ulp_vfr_rule_info *info;
1167 	uint16_t port_id;
1168 	struct rte_eth_dev *vfr_eth_dev;
1169 	struct bnxt_representor *vfr_bp;
1170 
1171 	if (!BNXT_TRUFLOW_EN(bp) || BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
1172 		return;
1173 
1174 	if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
1175 		return;
1176 
1177 	/* Delete default rules for all ports */
1178 	for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
1179 		info = &bp->ulp_ctx->cfg_data->vfr_rule_info[port_id];
1180 		if (!info->valid)
1181 			continue;
1182 
1183 		if (!global && info->parent_port_id !=
1184 		    bp->eth_dev->data->port_id)
1185 			continue;
1186 
1187 		/* Destroy the flows */
1188 		ulp_default_flow_destroy(bp->eth_dev, info->vfr_flow_id);
1189 		/* Clean up the tx action pointer */
1190 		vfr_eth_dev = &rte_eth_devices[port_id];
1191 		if (vfr_eth_dev) {
1192 			vfr_bp = vfr_eth_dev->data->dev_private;
1193 			vfr_bp->vfr_tx_cfa_action = 0;
1194 		}
1195 		memset(info, 0, sizeof(struct bnxt_ulp_vfr_rule_info));
1196 	}
1197 }
1198 
1199 /*
1200  * When a port is deinit'ed by dpdk. This function is called
1201  * and this function clears the ULP context and rest of the
1202  * infrastructure associated with it.
1203  */
1204 static void
1205 bnxt_ulp_deinit(struct bnxt *bp,
1206 		struct bnxt_ulp_session_state *session)
1207 {
1208 	bool ha_enabled;
1209 
1210 	if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
1211 		return;
1212 
1213 	ha_enabled = bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx);
1214 	if (ha_enabled && session->session_opened) {
1215 		int32_t rc = ulp_ha_mgr_close(bp->ulp_ctx);
1216 		if (rc)
1217 			BNXT_TF_DBG(ERR, "Failed to close HA (%d)\n", rc);
1218 	}
1219 
1220 	/* clean up default flows */
1221 	bnxt_ulp_destroy_df_rules(bp, true);
1222 
1223 	/* clean up default VFR flows */
1224 	bnxt_ulp_destroy_vfr_default_rules(bp, true);
1225 
1226 	/* clean up regular flows */
1227 	ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR);
1228 
1229 	/* cleanup the eem table scope */
1230 	ulp_eem_tbl_scope_deinit(bp, bp->ulp_ctx);
1231 
1232 	/* cleanup the flow database */
1233 	ulp_flow_db_deinit(bp->ulp_ctx);
1234 
1235 	/* Delete the Mark database */
1236 	ulp_mark_db_deinit(bp->ulp_ctx);
1237 
1238 	/* cleanup the ulp mapper */
1239 	ulp_mapper_deinit(bp->ulp_ctx);
1240 
1241 	/* Delete the Flow Counter Manager */
1242 	ulp_fc_mgr_deinit(bp->ulp_ctx);
1243 
1244 	/* Delete the Port database */
1245 	ulp_port_db_deinit(bp->ulp_ctx);
1246 
1247 	/* Disable NAT feature */
1248 	(void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
1249 					 TF_TUNNEL_ENCAP_NAT,
1250 					 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
1251 
1252 	(void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
1253 					 TF_TUNNEL_ENCAP_NAT,
1254 					 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
1255 
1256 	/* free the flow db lock */
1257 	pthread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
1258 
1259 	if (ha_enabled)
1260 		ulp_ha_mgr_deinit(bp->ulp_ctx);
1261 
1262 	/* Delete the ulp context and tf session and free the ulp context */
1263 	ulp_ctx_deinit(bp, session);
1264 	BNXT_TF_DBG(DEBUG, "ulp ctx has been deinitialized\n");
1265 }
1266 
1267 /*
1268  * When a port is initialized by dpdk. This functions is called
1269  * and this function initializes the ULP context and rest of the
1270  * infrastructure associated with it.
1271  */
1272 static int32_t
1273 bnxt_ulp_init(struct bnxt *bp,
1274 	      struct bnxt_ulp_session_state *session)
1275 {
1276 	int rc;
1277 
1278 	/* Allocate and Initialize the ulp context. */
1279 	rc = ulp_ctx_init(bp, session);
1280 	if (rc) {
1281 		BNXT_TF_DBG(ERR, "Failed to create the ulp context\n");
1282 		goto jump_to_error;
1283 	}
1284 
1285 	rc = pthread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock, NULL);
1286 	if (rc) {
1287 		BNXT_TF_DBG(ERR, "Unable to initialize flow db lock\n");
1288 		goto jump_to_error;
1289 	}
1290 
1291 	/* Initialize ulp dparms with values devargs passed */
1292 	rc = ulp_dparms_init(bp, bp->ulp_ctx);
1293 	if (rc) {
1294 		BNXT_TF_DBG(ERR, "Failed to initialize the dparms\n");
1295 		goto jump_to_error;
1296 	}
1297 
1298 	/* create the port database */
1299 	rc = ulp_port_db_init(bp->ulp_ctx, bp->port_cnt);
1300 	if (rc) {
1301 		BNXT_TF_DBG(ERR, "Failed to create the port database\n");
1302 		goto jump_to_error;
1303 	}
1304 
1305 	/* Create the Mark database. */
1306 	rc = ulp_mark_db_init(bp->ulp_ctx);
1307 	if (rc) {
1308 		BNXT_TF_DBG(ERR, "Failed to create the mark database\n");
1309 		goto jump_to_error;
1310 	}
1311 
1312 	/* Create the flow database. */
1313 	rc = ulp_flow_db_init(bp->ulp_ctx);
1314 	if (rc) {
1315 		BNXT_TF_DBG(ERR, "Failed to create the flow database\n");
1316 		goto jump_to_error;
1317 	}
1318 
1319 	/* Create the eem table scope. */
1320 	rc = ulp_eem_tbl_scope_init(bp);
1321 	if (rc) {
1322 		BNXT_TF_DBG(ERR, "Failed to create the eem scope table\n");
1323 		goto jump_to_error;
1324 	}
1325 
1326 	rc = ulp_mapper_init(bp->ulp_ctx);
1327 	if (rc) {
1328 		BNXT_TF_DBG(ERR, "Failed to initialize ulp mapper\n");
1329 		goto jump_to_error;
1330 	}
1331 
1332 	rc = ulp_fc_mgr_init(bp->ulp_ctx);
1333 	if (rc) {
1334 		BNXT_TF_DBG(ERR, "Failed to initialize ulp flow counter mgr\n");
1335 		goto jump_to_error;
1336 	}
1337 
1338 	/*
1339 	 * Enable NAT feature. Set the global configuration register
1340 	 * Tunnel encap to enable NAT with the reuse of existing inner
1341 	 * L2 header smac and dmac
1342 	 */
1343 	rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
1344 					TF_TUNNEL_ENCAP_NAT,
1345 					BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
1346 	if (rc) {
1347 		BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n");
1348 		goto jump_to_error;
1349 	}
1350 
1351 	rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
1352 					TF_TUNNEL_ENCAP_NAT,
1353 					BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
1354 	if (rc) {
1355 		BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n");
1356 		goto jump_to_error;
1357 	}
1358 
1359 	if (bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx)) {
1360 		rc = ulp_ha_mgr_init(bp->ulp_ctx);
1361 		if (rc) {
1362 			BNXT_TF_DBG(ERR, "Failed to initialize HA %d\n", rc);
1363 			goto jump_to_error;
1364 		}
1365 		rc = ulp_ha_mgr_open(bp->ulp_ctx);
1366 		if (rc) {
1367 			BNXT_TF_DBG(ERR, "Failed to Process HA Open %d\n", rc);
1368 			goto jump_to_error;
1369 		}
1370 	}
1371 	BNXT_TF_DBG(DEBUG, "ulp ctx has been initialized\n");
1372 	return rc;
1373 
1374 jump_to_error:
1375 	bnxt_ulp_deinit(bp, session);
1376 	return rc;
1377 }
1378 
1379 /*
1380  * When a port is initialized by dpdk. This functions sets up
1381  * the port specific details.
1382  */
1383 int32_t
1384 bnxt_ulp_port_init(struct bnxt *bp)
1385 {
1386 	struct bnxt_ulp_session_state *session;
1387 	bool initialized;
1388 	enum bnxt_ulp_device_id devid = BNXT_ULP_DEVICE_ID_LAST;
1389 	uint32_t ulp_flags;
1390 	int32_t rc = 0;
1391 
1392 	if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1393 		BNXT_TF_DBG(ERR,
1394 			    "Skip ulp init for port: %d, not a TVF or PF\n",
1395 			    bp->eth_dev->data->port_id);
1396 		return rc;
1397 	}
1398 
1399 	if (!BNXT_TRUFLOW_EN(bp)) {
1400 		BNXT_TF_DBG(ERR,
1401 			    "Skip ulp init for port: %d, truflow is not enabled\n",
1402 			    bp->eth_dev->data->port_id);
1403 		return rc;
1404 	}
1405 
1406 	if (bp->ulp_ctx) {
1407 		BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n");
1408 		return rc;
1409 	}
1410 
1411 	bp->ulp_ctx = rte_zmalloc("bnxt_ulp_ctx",
1412 				  sizeof(struct bnxt_ulp_context), 0);
1413 	if (!bp->ulp_ctx) {
1414 		BNXT_TF_DBG(ERR, "Failed to allocate ulp ctx\n");
1415 		return -ENOMEM;
1416 	}
1417 
1418 	/*
1419 	 * Multiple uplink ports can be associated with a single vswitch.
1420 	 * Make sure only the port that is started first will initialize
1421 	 * the TF session.
1422 	 */
1423 	session = ulp_session_init(bp, &initialized);
1424 	if (!session) {
1425 		BNXT_TF_DBG(ERR, "Failed to initialize the tf session\n");
1426 		rc = -EIO;
1427 		goto jump_to_error;
1428 	}
1429 
1430 	if (initialized) {
1431 		/*
1432 		 * If ULP is already initialized for a specific domain then
1433 		 * simply assign the ulp context to this rte_eth_dev.
1434 		 */
1435 		rc = ulp_ctx_attach(bp, session);
1436 		if (rc) {
1437 			BNXT_TF_DBG(ERR, "Failed to attach the ulp context\n");
1438 			goto jump_to_error;
1439 		}
1440 
1441 		/*
1442 		 * Attach to the shared session, must be called after the
1443 		 * ulp_ctx_attach in order to ensure that ulp data is available
1444 		 * for attaching.
1445 		 */
1446 		rc = ulp_ctx_shared_session_attach(bp, session);
1447 		if (rc) {
1448 			BNXT_TF_DBG(ERR,
1449 				    "Failed attach to shared session (%d)", rc);
1450 			goto jump_to_error;
1451 		}
1452 	} else {
1453 		rc = bnxt_ulp_init(bp, session);
1454 		if (rc) {
1455 			BNXT_TF_DBG(ERR, "Failed to initialize the ulp init\n");
1456 			goto jump_to_error;
1457 		}
1458 	}
1459 
1460 	/* Update bnxt driver flags */
1461 	rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx);
1462 	if (rc) {
1463 		BNXT_TF_DBG(ERR, "Failed to update driver flags\n");
1464 		goto jump_to_error;
1465 	}
1466 
1467 	/* update the port database for the given interface */
1468 	rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp->eth_dev);
1469 	if (rc) {
1470 		BNXT_TF_DBG(ERR, "Failed to update port database\n");
1471 		goto jump_to_error;
1472 	}
1473 	/* create the default rules */
1474 	rc = bnxt_ulp_create_df_rules(bp);
1475 	if (rc) {
1476 		BNXT_TF_DBG(ERR, "Failed to create default flow\n");
1477 		goto jump_to_error;
1478 	}
1479 
1480 	rc = bnxt_ulp_devid_get(bp, &devid);
1481 	if (rc) {
1482 		BNXT_TF_DBG(ERR, "Unable to determine device for ULP port init.\n");
1483 		goto jump_to_error;
1484 	}
1485 
1486 	/* set the accumulation of the stats */
1487 	if (BNXT_ACCUM_STATS_EN(bp))
1488 		bp->ulp_ctx->cfg_data->accum_stats = true;
1489 
1490 	BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port init, accum_stats:%d\n",
1491 		    bp->eth_dev->data->port_id,
1492 		    bp->ulp_ctx->cfg_data->accum_stats);
1493 
1494 	/* set the unicast mode */
1495 	if (bnxt_ulp_cntxt_ptr2_ulp_flags_get(bp->ulp_ctx, &ulp_flags)) {
1496 		BNXT_TF_DBG(ERR, "Error in getting ULP context flags\n");
1497 		goto jump_to_error;
1498 	}
1499 	if (ulp_flags & BNXT_ULP_APP_UNICAST_ONLY) {
1500 		if (bnxt_pmd_set_unicast_rxmask(bp->eth_dev)) {
1501 			BNXT_TF_DBG(ERR, "Error in setting unicast rxmode\n");
1502 			goto jump_to_error;
1503 		}
1504 	}
1505 
1506 	return rc;
1507 
1508 jump_to_error:
1509 	bnxt_ulp_port_deinit(bp);
1510 	return rc;
1511 }
1512 
1513 /*
1514  * When a port is de-initialized by dpdk. This functions clears up
1515  * the port specific details.
1516  */
1517 void
1518 bnxt_ulp_port_deinit(struct bnxt *bp)
1519 {
1520 	struct bnxt_ulp_session_state *session;
1521 	struct rte_pci_device *pci_dev;
1522 	struct rte_pci_addr *pci_addr;
1523 
1524 	if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1525 		BNXT_TF_DBG(ERR,
1526 			    "Skip ULP deinit port:%d, not a TVF or PF\n",
1527 			    bp->eth_dev->data->port_id);
1528 		return;
1529 	}
1530 
1531 	if (!BNXT_TRUFLOW_EN(bp)) {
1532 		BNXT_TF_DBG(ERR,
1533 			    "Skip ULP deinit for port:%d, truflow is not enabled\n",
1534 			    bp->eth_dev->data->port_id);
1535 		return;
1536 	}
1537 
1538 	if (!bp->ulp_ctx) {
1539 		BNXT_TF_DBG(DEBUG, "ulp ctx already de-allocated\n");
1540 		return;
1541 	}
1542 
1543 	BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port deinit\n",
1544 		    bp->eth_dev->data->port_id);
1545 
1546 	/* Free the ulp context in the context entry list */
1547 	bnxt_ulp_cntxt_list_del(bp->ulp_ctx);
1548 
1549 	/* Get the session details  */
1550 	pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
1551 	pci_addr = &pci_dev->addr;
1552 	pthread_mutex_lock(&bnxt_ulp_global_mutex);
1553 	session = ulp_get_session(bp, pci_addr);
1554 	pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1555 
1556 	/* session not found then just exit */
1557 	if (!session) {
1558 		/* Free the ulp context */
1559 		rte_free(bp->ulp_ctx);
1560 		bp->ulp_ctx = NULL;
1561 		return;
1562 	}
1563 
1564 	/* Check the reference count to deinit or deattach*/
1565 	if (bp->ulp_ctx->cfg_data && bp->ulp_ctx->cfg_data->ref_cnt) {
1566 		bp->ulp_ctx->cfg_data->ref_cnt--;
1567 		if (bp->ulp_ctx->cfg_data->ref_cnt) {
1568 			/* free the port details */
1569 			/* Free the default flow rule associated to this port */
1570 			bnxt_ulp_destroy_df_rules(bp, false);
1571 			bnxt_ulp_destroy_vfr_default_rules(bp, false);
1572 
1573 			/* free flows associated with this port */
1574 			bnxt_ulp_flush_port_flows(bp);
1575 
1576 			/* close the session associated with this port */
1577 			ulp_ctx_detach(bp);
1578 
1579 			/* always detach/close shared after the session. */
1580 			ulp_ctx_shared_session_detach(bp);
1581 		} else {
1582 			/* Perform ulp ctx deinit */
1583 			bnxt_ulp_deinit(bp, session);
1584 		}
1585 	}
1586 
1587 	/* clean up the session */
1588 	ulp_session_deinit(session);
1589 
1590 	/* Free the ulp context */
1591 	rte_free(bp->ulp_ctx);
1592 	bp->ulp_ctx = NULL;
1593 }
1594 
1595 /* Below are the access functions to access internal data of ulp context. */
1596 /* Function to set the Mark DB into the context */
1597 int32_t
1598 bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx,
1599 				struct bnxt_ulp_mark_tbl *mark_tbl)
1600 {
1601 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1602 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1603 		return -EINVAL;
1604 	}
1605 
1606 	ulp_ctx->cfg_data->mark_tbl = mark_tbl;
1607 
1608 	return 0;
1609 }
1610 
1611 /* Function to retrieve the Mark DB from the context. */
1612 struct bnxt_ulp_mark_tbl *
1613 bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx)
1614 {
1615 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1616 		return NULL;
1617 
1618 	return ulp_ctx->cfg_data->mark_tbl;
1619 }
1620 
1621 bool
1622 bnxt_ulp_cntxt_shared_session_enabled(struct bnxt_ulp_context *ulp_ctx)
1623 {
1624 	return ULP_SHARED_SESSION_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags);
1625 }
1626 
1627 int32_t
1628 bnxt_ulp_cntxt_app_id_set(struct bnxt_ulp_context *ulp_ctx, uint8_t app_id)
1629 {
1630 	if (!ulp_ctx)
1631 		return -EINVAL;
1632 	ulp_ctx->cfg_data->app_id = app_id;
1633 	return 0;
1634 }
1635 
1636 int32_t
1637 bnxt_ulp_cntxt_app_id_get(struct bnxt_ulp_context *ulp_ctx, uint8_t *app_id)
1638 {
1639 	/* Default APP id is zero */
1640 	if (!ulp_ctx || !app_id)
1641 		return -EINVAL;
1642 	*app_id = ulp_ctx->cfg_data->app_id;
1643 	return 0;
1644 }
1645 
1646 /* Function to set the device id of the hardware. */
1647 int32_t
1648 bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx,
1649 			  uint32_t dev_id)
1650 {
1651 	if (ulp_ctx && ulp_ctx->cfg_data) {
1652 		ulp_ctx->cfg_data->dev_id = dev_id;
1653 		return 0;
1654 	}
1655 
1656 	return -EINVAL;
1657 }
1658 
1659 /* Function to get the device id of the hardware. */
1660 int32_t
1661 bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx,
1662 			  uint32_t *dev_id)
1663 {
1664 	if (ulp_ctx && ulp_ctx->cfg_data) {
1665 		*dev_id = ulp_ctx->cfg_data->dev_id;
1666 		return 0;
1667 	}
1668 	*dev_id = BNXT_ULP_DEVICE_ID_LAST;
1669 	BNXT_TF_DBG(ERR, "Failed to read dev_id from ulp ctxt\n");
1670 	return -EINVAL;
1671 }
1672 
1673 int32_t
1674 bnxt_ulp_cntxt_mem_type_set(struct bnxt_ulp_context *ulp_ctx,
1675 			    enum bnxt_ulp_flow_mem_type mem_type)
1676 {
1677 	if (ulp_ctx && ulp_ctx->cfg_data) {
1678 		ulp_ctx->cfg_data->mem_type = mem_type;
1679 		return 0;
1680 	}
1681 	BNXT_TF_DBG(ERR, "Failed to write mem_type in ulp ctxt\n");
1682 	return -EINVAL;
1683 }
1684 
1685 int32_t
1686 bnxt_ulp_cntxt_mem_type_get(struct bnxt_ulp_context *ulp_ctx,
1687 			    enum bnxt_ulp_flow_mem_type *mem_type)
1688 {
1689 	if (ulp_ctx && ulp_ctx->cfg_data) {
1690 		*mem_type = ulp_ctx->cfg_data->mem_type;
1691 		return 0;
1692 	}
1693 	*mem_type = BNXT_ULP_FLOW_MEM_TYPE_LAST;
1694 	BNXT_TF_DBG(ERR, "Failed to read mem_type in ulp ctxt\n");
1695 	return -EINVAL;
1696 }
1697 
1698 /* Function to get the table scope id of the EEM table. */
1699 int32_t
1700 bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx,
1701 				uint32_t *tbl_scope_id)
1702 {
1703 	if (ulp_ctx && ulp_ctx->cfg_data) {
1704 		*tbl_scope_id = ulp_ctx->cfg_data->tbl_scope_id;
1705 		return 0;
1706 	}
1707 
1708 	return -EINVAL;
1709 }
1710 
1711 /* Function to set the table scope id of the EEM table. */
1712 int32_t
1713 bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx,
1714 				uint32_t tbl_scope_id)
1715 {
1716 	if (ulp_ctx && ulp_ctx->cfg_data) {
1717 		ulp_ctx->cfg_data->tbl_scope_id = tbl_scope_id;
1718 		return 0;
1719 	}
1720 
1721 	return -EINVAL;
1722 }
1723 
1724 /* Function to set the shared tfp session details from the ulp context. */
1725 int32_t
1726 bnxt_ulp_cntxt_shared_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1727 {
1728 	if (!ulp) {
1729 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1730 		return -EINVAL;
1731 	}
1732 
1733 	if (tfp == NULL) {
1734 		if (ulp->cfg_data->num_shared_clients > 0)
1735 			ulp->cfg_data->num_shared_clients--;
1736 	} else {
1737 		ulp->cfg_data->num_shared_clients++;
1738 	}
1739 
1740 	ulp->g_shared_tfp = tfp;
1741 	return 0;
1742 }
1743 
1744 /* Function to get the shared tfp session details from the ulp context. */
1745 struct tf *
1746 bnxt_ulp_cntxt_shared_tfp_get(struct bnxt_ulp_context *ulp)
1747 {
1748 	if (!ulp) {
1749 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1750 		return NULL;
1751 	}
1752 	return ulp->g_shared_tfp;
1753 }
1754 
1755 /* Function to get the number of shared clients attached */
1756 uint8_t
1757 bnxt_ulp_cntxt_num_shared_clients_get(struct bnxt_ulp_context *ulp)
1758 {
1759 	if (ulp == NULL || ulp->cfg_data == NULL) {
1760 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1761 		return 0;
1762 	}
1763 	return ulp->cfg_data->num_shared_clients;
1764 }
1765 
1766 /* Function to set the tfp session details from the ulp context. */
1767 int32_t
1768 bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1769 {
1770 	if (!ulp) {
1771 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1772 		return -EINVAL;
1773 	}
1774 
1775 	ulp->g_tfp = tfp;
1776 	return 0;
1777 }
1778 
1779 /* Function to get the tfp session details from the ulp context. */
1780 struct tf *
1781 bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp,
1782 		       enum bnxt_ulp_shared_session shared)
1783 {
1784 	if (!ulp) {
1785 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1786 		return NULL;
1787 	}
1788 	if (shared)
1789 		return ulp->g_shared_tfp;
1790 	else
1791 		return ulp->g_tfp;
1792 }
1793 
1794 /*
1795  * Get the device table entry based on the device id.
1796  *
1797  * dev_id [in] The device id of the hardware
1798  *
1799  * Returns the pointer to the device parameters.
1800  */
1801 struct bnxt_ulp_device_params *
1802 bnxt_ulp_device_params_get(uint32_t dev_id)
1803 {
1804 	if (dev_id < BNXT_ULP_MAX_NUM_DEVICES)
1805 		return &ulp_device_params[dev_id];
1806 	return NULL;
1807 }
1808 
1809 /* Function to set the flow database to the ulp context. */
1810 int32_t
1811 bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context	*ulp_ctx,
1812 				struct bnxt_ulp_flow_db	*flow_db)
1813 {
1814 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1815 		return -EINVAL;
1816 
1817 	ulp_ctx->cfg_data->flow_db = flow_db;
1818 	return 0;
1819 }
1820 
1821 /* Function to get the flow database from the ulp context. */
1822 struct bnxt_ulp_flow_db	*
1823 bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context	*ulp_ctx)
1824 {
1825 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1826 		return NULL;
1827 
1828 	return ulp_ctx->cfg_data->flow_db;
1829 }
1830 
1831 /* Function to get the tunnel cache table info from the ulp context. */
1832 struct bnxt_tun_cache_entry *
1833 bnxt_ulp_cntxt_ptr2_tun_tbl_get(struct bnxt_ulp_context *ulp_ctx)
1834 {
1835 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1836 		return NULL;
1837 
1838 	return ulp_ctx->cfg_data->tun_tbl;
1839 }
1840 
1841 /* Function to get the ulp context from eth device. */
1842 struct bnxt_ulp_context	*
1843 bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev	*dev)
1844 {
1845 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1846 
1847 	if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
1848 		struct bnxt_representor *vfr = dev->data->dev_private;
1849 
1850 		bp = vfr->parent_dev->data->dev_private;
1851 	}
1852 
1853 	if (!bp) {
1854 		BNXT_TF_DBG(ERR, "Bnxt private data is not initialized\n");
1855 		return NULL;
1856 	}
1857 	return bp->ulp_ctx;
1858 }
1859 
1860 int32_t
1861 bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx,
1862 				    void *mapper_data)
1863 {
1864 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1865 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1866 		return -EINVAL;
1867 	}
1868 
1869 	ulp_ctx->cfg_data->mapper_data = mapper_data;
1870 	return 0;
1871 }
1872 
1873 void *
1874 bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx)
1875 {
1876 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1877 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1878 		return NULL;
1879 	}
1880 
1881 	return ulp_ctx->cfg_data->mapper_data;
1882 }
1883 
1884 /* Function to set the port database to the ulp context. */
1885 int32_t
1886 bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context	*ulp_ctx,
1887 				struct bnxt_ulp_port_db	*port_db)
1888 {
1889 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1890 		return -EINVAL;
1891 
1892 	ulp_ctx->cfg_data->port_db = port_db;
1893 	return 0;
1894 }
1895 
1896 /* Function to get the port database from the ulp context. */
1897 struct bnxt_ulp_port_db *
1898 bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context	*ulp_ctx)
1899 {
1900 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1901 		return NULL;
1902 
1903 	return ulp_ctx->cfg_data->port_db;
1904 }
1905 
1906 /* Function to set the flow counter info into the context */
1907 int32_t
1908 bnxt_ulp_cntxt_ptr2_fc_info_set(struct bnxt_ulp_context *ulp_ctx,
1909 				struct bnxt_ulp_fc_info *ulp_fc_info)
1910 {
1911 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1912 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1913 		return -EINVAL;
1914 	}
1915 
1916 	ulp_ctx->cfg_data->fc_info = ulp_fc_info;
1917 
1918 	return 0;
1919 }
1920 
1921 /* Function to retrieve the flow counter info from the context. */
1922 struct bnxt_ulp_fc_info *
1923 bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx)
1924 {
1925 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1926 		return NULL;
1927 
1928 	return ulp_ctx->cfg_data->fc_info;
1929 }
1930 
1931 /* Function to get the ulp flags from the ulp context. */
1932 int32_t
1933 bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
1934 				  uint32_t *flags)
1935 {
1936 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1937 		return -1;
1938 
1939 	*flags =  ulp_ctx->cfg_data->ulp_flags;
1940 	return 0;
1941 }
1942 
1943 /* Function to get the ulp vfr info from the ulp context. */
1944 struct bnxt_ulp_vfr_rule_info*
1945 bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(struct bnxt_ulp_context *ulp_ctx,
1946 				     uint32_t port_id)
1947 {
1948 	if (!ulp_ctx || !ulp_ctx->cfg_data || port_id >= RTE_MAX_ETHPORTS)
1949 		return NULL;
1950 
1951 	return &ulp_ctx->cfg_data->vfr_rule_info[port_id];
1952 }
1953 
1954 /* Function to acquire the flow database lock from the ulp context. */
1955 int32_t
1956 bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context	*ulp_ctx)
1957 {
1958 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1959 		return -1;
1960 
1961 	if (pthread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) {
1962 		BNXT_TF_DBG(ERR, "unable to acquire fdb lock\n");
1963 		return -1;
1964 	}
1965 	return 0;
1966 }
1967 
1968 /* Function to release the flow database lock from the ulp context. */
1969 void
1970 bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context	*ulp_ctx)
1971 {
1972 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1973 		return;
1974 
1975 	pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);
1976 }
1977 
1978 /* Function to set the ha info into the context */
1979 int32_t
1980 bnxt_ulp_cntxt_ptr2_ha_info_set(struct bnxt_ulp_context *ulp_ctx,
1981 				struct bnxt_ulp_ha_mgr_info *ulp_ha_info)
1982 {
1983 	if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL) {
1984 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1985 		return -EINVAL;
1986 	}
1987 	ulp_ctx->cfg_data->ha_info = ulp_ha_info;
1988 	return 0;
1989 }
1990 
1991 /* Function to retrieve the ha info from the context. */
1992 struct bnxt_ulp_ha_mgr_info *
1993 bnxt_ulp_cntxt_ptr2_ha_info_get(struct bnxt_ulp_context *ulp_ctx)
1994 {
1995 	if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL)
1996 		return NULL;
1997 	return ulp_ctx->cfg_data->ha_info;
1998 }
1999 
2000 bool
2001 bnxt_ulp_cntxt_ha_enabled(struct bnxt_ulp_context *ulp_ctx)
2002 {
2003 	if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL)
2004 		return false;
2005 	return !!ULP_HIGH_AVAIL_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags);
2006 }
2007 
2008 static int32_t
2009 bnxt_ulp_cntxt_list_init(void)
2010 {
2011 	/* Create the cntxt spin lock */
2012 	rte_spinlock_init(&bnxt_ulp_ctxt_lock);
2013 
2014 	return 0;
2015 }
2016 
2017 static int32_t
2018 bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx)
2019 {
2020 	struct ulp_context_list_entry	*entry;
2021 
2022 	entry = rte_zmalloc(NULL, sizeof(struct ulp_context_list_entry), 0);
2023 	if (entry == NULL) {
2024 		BNXT_TF_DBG(ERR, "unable to allocate memory\n");
2025 		return -ENOMEM;
2026 	}
2027 
2028 	rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
2029 	entry->ulp_ctx = ulp_ctx;
2030 	TAILQ_INSERT_TAIL(&ulp_cntx_list, entry, next);
2031 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2032 	return 0;
2033 }
2034 
2035 static void
2036 bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx)
2037 {
2038 	struct ulp_context_list_entry	*entry, *temp;
2039 
2040 	rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
2041 	RTE_TAILQ_FOREACH_SAFE(entry, &ulp_cntx_list, next, temp) {
2042 		if (entry->ulp_ctx == ulp_ctx) {
2043 			TAILQ_REMOVE(&ulp_cntx_list, entry, next);
2044 			rte_free(entry);
2045 			break;
2046 		}
2047 	}
2048 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2049 }
2050 
2051 struct bnxt_ulp_context *
2052 bnxt_ulp_cntxt_entry_acquire(void)
2053 {
2054 	struct ulp_context_list_entry	*entry;
2055 
2056 	/* take a lock and get the first ulp context available */
2057 	if (rte_spinlock_trylock(&bnxt_ulp_ctxt_lock)) {
2058 		TAILQ_FOREACH(entry, &ulp_cntx_list, next)
2059 			if (entry->ulp_ctx)
2060 				return entry->ulp_ctx;
2061 	}
2062 	return NULL;
2063 }
2064 
2065 void
2066 bnxt_ulp_cntxt_entry_release(void)
2067 {
2068 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2069 }
2070 
2071 /* Function to get the app tunnel details from the ulp context. */
2072 struct bnxt_flow_app_tun_ent *
2073 bnxt_ulp_cntxt_ptr2_app_tun_list_get(struct bnxt_ulp_context *ulp)
2074 {
2075 	if (!ulp || !ulp->cfg_data)
2076 		return NULL;
2077 
2078 	return ulp->cfg_data->app_tun;
2079 }
2080