xref: /dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp.c (revision 7d32c003ac175d7ac8669dc11684c75cc7eb56b8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2023 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <rte_log.h>
7 #include <rte_malloc.h>
8 #include <rte_flow.h>
9 #include <rte_flow_driver.h>
10 #include <rte_tailq.h>
11 #include <rte_spinlock.h>
12 
13 #include "bnxt.h"
14 #include "bnxt_ulp.h"
15 #include "bnxt_ulp_utils.h"
16 #include "bnxt_tf_common.h"
17 #include "bnxt_hwrm.h"
18 #include "hsi_struct_def_dpdk.h"
19 #include "tf_core.h"
20 #include "tf_ext_flow_handle.h"
21 
22 #include "ulp_template_db_enum.h"
23 #include "ulp_template_struct.h"
24 #include "ulp_mark_mgr.h"
25 #include "ulp_fc_mgr.h"
26 #include "ulp_flow_db.h"
27 #include "ulp_mapper.h"
28 #include "ulp_matcher.h"
29 #include "ulp_port_db.h"
30 #include "ulp_tun.h"
31 #include "ulp_ha_mgr.h"
32 #include "bnxt_tf_pmd_shim.h"
33 #include "ulp_template_db_tbl.h"
34 
35 /* Linked list of all TF sessions. */
36 STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
37 			STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list);
38 
39 /* Mutex to synchronize bnxt_ulp_session_list operations. */
40 static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
41 
42 /* Spin lock to protect context global list */
43 uint32_t bnxt_ulp_ctxt_lock_created;
44 rte_spinlock_t bnxt_ulp_ctxt_lock;
45 TAILQ_HEAD(cntx_list_entry_list, ulp_context_list_entry);
46 static struct cntx_list_entry_list ulp_cntx_list =
47 	TAILQ_HEAD_INITIALIZER(ulp_cntx_list);
48 
49 bool
50 ulp_is_default_session_active(struct bnxt_ulp_context *ulp_ctx)
51 {
52 	if (unlikely(ulp_ctx == NULL || ulp_ctx->g_tfp[0] == NULL))
53 		return false;
54 
55 	return true;
56 }
57 
58 /*
59  * Allow the deletion of context only for the bnxt device that
60  * created the session.
61  */
62 bool
63 ulp_ctx_deinit_allowed(struct bnxt_ulp_context *ulp_ctx)
64 {
65 	if (unlikely(!ulp_ctx || !ulp_ctx->cfg_data))
66 		return false;
67 
68 	if (!ulp_ctx->cfg_data->ref_cnt) {
69 		BNXT_DRV_DBG(DEBUG, "ulp ctx shall initiate deinit\n");
70 		return true;
71 	}
72 
73 	return false;
74 }
75 
76 /* The function to initialize bp flags with truflow features */
77 static int32_t
78 ulp_dparms_dev_port_intf_update(struct bnxt *bp,
79 				struct bnxt_ulp_context *ulp_ctx)
80 {
81 	enum bnxt_ulp_flow_mem_type mtype;
82 
83 	if (unlikely(bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype)))
84 		return -EINVAL;
85 	/* Update the bp flag with gfid flag */
86 	if (mtype == BNXT_ULP_FLOW_MEM_TYPE_EXT)
87 		bp->flags |= BNXT_FLAG_GFID_ENABLE;
88 
89 	return 0;
90 }
91 
92 int32_t
93 ulp_ctx_mh_get_session_name(struct bnxt *bp,
94 			    struct tf_open_session_parms *parms)
95 {
96 	int32_t	rc = 0;
97 	unsigned int domain = 0, bus = 0, slot = 0, device = 0;
98 	rc = sscanf(parms->ctrl_chan_name,
99 		    "%x:%x:%x.%u",
100 		    &domain,
101 		    &bus,
102 		    &slot,
103 		    &device);
104 	if (rc != 4) {
105 		/* PCI Domain not provided (optional in DPDK), thus we
106 		 * force domain to 0 and recheck.
107 		 */
108 		domain = 0;
109 		/* Check parsing of bus/slot/device */
110 		rc = sscanf(parms->ctrl_chan_name,
111 			    "%x:%x.%u",
112 			    &bus,
113 			    &slot,
114 			    &device);
115 		if (unlikely(rc != 3)) {
116 			BNXT_DRV_DBG(DEBUG,
117 				    "Failed to scan device ctrl_chan_name\n");
118 			return -EINVAL;
119 		}
120 	}
121 
122 	/* change domain name for multi-host system */
123 	domain = domain + (0xf & bp->multi_host_pf_pci_id);
124 	sprintf(parms->ctrl_chan_name,
125 		"%x:%x:%x.%u",
126 		domain,
127 		bus,
128 		slot,
129 		device);
130 	BNXT_DRV_DBG(DEBUG,
131 		    "Session name for Multi-Host: ctrl_chan_name:%s\n", parms->ctrl_chan_name);
132 	return 0;
133 }
134 
135 /*
136  * Initialize the state of an ULP session.
137  * If the state of an ULP session is not initialized, set it's state to
138  * initialized. If the state is already initialized, do nothing.
139  */
140 static void
141 ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
142 {
143 	pthread_mutex_lock(&session->bnxt_ulp_mutex);
144 
145 	if (!session->bnxt_ulp_init) {
146 		session->bnxt_ulp_init = true;
147 		*init = false;
148 	} else {
149 		*init = true;
150 	}
151 
152 	pthread_mutex_unlock(&session->bnxt_ulp_mutex);
153 }
154 
155 /*
156  * Check if an ULP session is already allocated for a specific PCI
157  * domain & bus. If it is already allocated simply return the session
158  * pointer, otherwise allocate a new session.
159  */
160 static struct bnxt_ulp_session_state *
161 ulp_get_session(struct bnxt *bp, struct rte_pci_addr *pci_addr)
162 {
163 	struct bnxt_ulp_session_state *session;
164 
165 	/* if multi root capability is enabled, then ignore the pci bus id */
166 	STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) {
167 		if (BNXT_MULTIROOT_EN(bp)) {
168 			if (!memcmp(bp->dsn, session->dsn,
169 				    sizeof(session->dsn))) {
170 				return session;
171 			}
172 		} else if (session->pci_info.domain == pci_addr->domain &&
173 			   session->pci_info.bus == pci_addr->bus) {
174 			return session;
175 		}
176 	}
177 	return NULL;
178 }
179 
180 /*
181  * Allocate and Initialize an ULP session and set it's state to INITIALIZED.
182  * If it's already initialized simply return the already existing session.
183  */
184 static struct bnxt_ulp_session_state *
185 ulp_session_init(struct bnxt *bp,
186 		 bool *init)
187 {
188 	struct rte_pci_device		*pci_dev;
189 	struct rte_pci_addr		*pci_addr;
190 	struct bnxt_ulp_session_state	*session;
191 
192 	if (!bp)
193 		return NULL;
194 
195 	pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
196 	pci_addr = &pci_dev->addr;
197 
198 	pthread_mutex_lock(&bnxt_ulp_global_mutex);
199 
200 	session = ulp_get_session(bp, pci_addr);
201 	if (!session) {
202 		/* Not Found the session  Allocate a new one */
203 		session = rte_zmalloc("bnxt_ulp_session",
204 				      sizeof(struct bnxt_ulp_session_state),
205 				      0);
206 		if (!session) {
207 			BNXT_DRV_DBG(ERR,
208 				    "Allocation failed for bnxt_ulp_session\n");
209 			pthread_mutex_unlock(&bnxt_ulp_global_mutex);
210 			return NULL;
211 
212 		} else {
213 			/* Add it to the queue */
214 			session->pci_info.domain = pci_addr->domain;
215 			session->pci_info.bus = pci_addr->bus;
216 			memcpy(session->dsn, bp->dsn, sizeof(session->dsn));
217 			pthread_mutex_init(&session->bnxt_ulp_mutex, NULL);
218 			STAILQ_INSERT_TAIL(&bnxt_ulp_session_list,
219 					   session, next);
220 		}
221 	}
222 	ulp_context_initialized(session, init);
223 	pthread_mutex_unlock(&bnxt_ulp_global_mutex);
224 	return session;
225 }
226 
227 /*
228  * When a device is closed, remove it's associated session from the global
229  * session list.
230  */
231 static void
232 ulp_session_deinit(struct bnxt_ulp_session_state *session)
233 {
234 	if (!session)
235 		return;
236 
237 	if (!session->cfg_data) {
238 		pthread_mutex_lock(&bnxt_ulp_global_mutex);
239 		STAILQ_REMOVE(&bnxt_ulp_session_list, session,
240 			      bnxt_ulp_session_state, next);
241 		pthread_mutex_destroy(&session->bnxt_ulp_mutex);
242 		rte_free(session);
243 		pthread_mutex_unlock(&bnxt_ulp_global_mutex);
244 	}
245 }
246 
247 /* Internal function to delete all the flows belonging to the given port */
248 static void
249 bnxt_ulp_flush_port_flows(struct bnxt *bp)
250 {
251 	uint16_t func_id;
252 
253 	/* it is assumed that port is either TVF or PF */
254 	if (unlikely(ulp_port_db_port_func_id_get(bp->ulp_ctx,
255 						  bp->eth_dev->data->port_id,
256 						  &func_id))) {
257 		BNXT_DRV_DBG(ERR, "Invalid argument\n");
258 		return;
259 	}
260 	(void)ulp_flow_db_function_flow_flush(bp->ulp_ctx, func_id);
261 }
262 
263 /* Internal function to delete the VFR default flows */
264 void
265 bnxt_ulp_destroy_vfr_default_rules(struct bnxt *bp, bool global)
266 {
267 	struct bnxt_ulp_vfr_rule_info *info;
268 	uint16_t port_id;
269 	struct rte_eth_dev *vfr_eth_dev;
270 	struct bnxt_representor *vfr_bp;
271 
272 	if (unlikely(!BNXT_TRUFLOW_EN(bp) ||
273 		     BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev)))
274 		return;
275 
276 	if (unlikely(!bp->ulp_ctx || !bp->ulp_ctx->cfg_data))
277 		return;
278 
279 	/* Delete default rules for all ports */
280 	for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
281 		info = &bp->ulp_ctx->cfg_data->vfr_rule_info[port_id];
282 		if (!info->valid)
283 			continue;
284 
285 		if (!global && info->parent_port_id !=
286 		    bp->eth_dev->data->port_id)
287 			continue;
288 
289 		/* Destroy the flows */
290 		ulp_default_flow_destroy(bp->eth_dev, info->vfr_flow_id);
291 		/* Clean up the tx action pointer */
292 		vfr_eth_dev = &rte_eth_devices[port_id];
293 		if (vfr_eth_dev) {
294 			vfr_bp = vfr_eth_dev->data->dev_private;
295 			vfr_bp->vfr_tx_cfa_action = 0;
296 		}
297 		memset(info, 0, sizeof(struct bnxt_ulp_vfr_rule_info));
298 	}
299 }
300 
301 static int
302 ulp_l2_etype_tunnel_alloc(struct bnxt *bp)
303 {
304 	int rc = 0;
305 
306 	if (!ULP_APP_L2_ETYPE_SUPPORT(bp->ulp_ctx))
307 		return rc;
308 
309 	if (bp->l2_etype_tunnel_cnt) {
310 		BNXT_DRV_DBG(DEBUG, "L2 ETYPE Custom Tunnel already allocated\n");
311 		return rc;
312 	}
313 	rc = bnxt_tunnel_dst_port_alloc(bp,
314 					BNXT_L2_ETYPE_TUNNEL_ID,
315 					HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_L2_ETYPE);
316 	if (unlikely(rc))
317 		BNXT_DRV_DBG(ERR, "Failed to set global L2 ETYPE Custom Tunnel\n");
318 	else
319 		bp->l2_etype_tunnel_cnt++;
320 
321 	return rc;
322 }
323 
324 static const struct bnxt_ulp_core_ops *
325 bnxt_ulp_port_func_ops_get(struct bnxt *bp)
326 {
327 	int32_t rc;
328 	enum bnxt_ulp_device_id  dev_id;
329 	const struct bnxt_ulp_core_ops *func_ops;
330 
331 	rc = bnxt_ulp_devid_get(bp, &dev_id);
332 	if (unlikely(rc))
333 		return NULL;
334 
335 	switch (dev_id) {
336 	case BNXT_ULP_DEVICE_ID_THOR2:
337 		func_ops = &bnxt_ulp_tfc_core_ops;
338 		break;
339 	case BNXT_ULP_DEVICE_ID_THOR:
340 	case BNXT_ULP_DEVICE_ID_STINGRAY:
341 	case BNXT_ULP_DEVICE_ID_WH_PLUS:
342 		func_ops = &bnxt_ulp_tf_core_ops;
343 		break;
344 	default:
345 		func_ops = NULL;
346 		break;
347 	}
348 	return func_ops;
349 }
350 
351 /*
352  * When a port is initialized by dpdk. This functions sets up
353  * the port specific details.
354  */
355 int32_t
356 bnxt_ulp_port_init(struct bnxt *bp)
357 {
358 	struct bnxt_ulp_session_state *session;
359 	bool initialized;
360 	uint32_t ulp_flags;
361 	int32_t rc = 0;
362 	enum bnxt_ulp_device_id dev_id;
363 
364 	if (!BNXT_TRUFLOW_EN(bp)) {
365 		BNXT_DRV_DBG(DEBUG,
366 			     "Skip ulp init for port: %d, TF is not enabled\n",
367 			     bp->eth_dev->data->port_id);
368 		return rc;
369 	}
370 
371 	if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
372 		BNXT_DRV_DBG(DEBUG,
373 			     "Skip ulp init for port: %d, not a TVF or PF\n",
374 			     bp->eth_dev->data->port_id);
375 		return rc;
376 	}
377 
378 	rc = bnxt_ulp_devid_get(bp, &dev_id);
379 	if (unlikely(rc)) {
380 		BNXT_DRV_DBG(DEBUG, "Unsupported device %x\n", rc);
381 		return rc;
382 	}
383 
384 	if (unlikely(bp->ulp_ctx)) {
385 		BNXT_DRV_DBG(DEBUG, "ulp ctx already allocated\n");
386 		return rc;
387 	}
388 
389 	bp->ulp_ctx = rte_zmalloc("bnxt_ulp_ctx",
390 				  sizeof(struct bnxt_ulp_context), 0);
391 	if (unlikely(!bp->ulp_ctx)) {
392 		BNXT_DRV_DBG(ERR, "Failed to allocate ulp ctx\n");
393 		return -ENOMEM;
394 	}
395 
396 	rc = bnxt_ulp_cntxt_bp_set(bp->ulp_ctx, bp);
397 	if (unlikely(rc)) {
398 		BNXT_DRV_DBG(ERR, "Failed to set bp in ulp_ctx\n");
399 		rte_free(bp->ulp_ctx);
400 		return -EIO;
401 	}
402 
403 	/* This shouldn't fail, unless we have a unknown device */
404 	bp->ulp_ctx->ops = bnxt_ulp_port_func_ops_get(bp);
405 	if (unlikely(!bp->ulp_ctx->ops)) {
406 		BNXT_DRV_DBG(ERR, "Failed to get ulp ops\n");
407 		rte_free(bp->ulp_ctx);
408 		return -EIO;
409 	}
410 
411 	/*
412 	 * Multiple uplink ports can be associated with a single vswitch.
413 	 * Make sure only the port that is started first will initialize
414 	 * the TF session.
415 	 */
416 	session = ulp_session_init(bp, &initialized);
417 	if (unlikely(!session)) {
418 		BNXT_DRV_DBG(ERR, "Failed to initialize the tf session\n");
419 		rc = -EIO;
420 		goto jump_to_error;
421 	}
422 
423 	if (initialized) {
424 		/*
425 		 * If ULP is already initialized for a specific domain then
426 		 * simply assign the ulp context to this rte_eth_dev.
427 		 */
428 		rc = bp->ulp_ctx->ops->ulp_ctx_attach(bp, session);
429 		if (unlikely(rc)) {
430 			BNXT_DRV_DBG(ERR, "Failed to attach the ulp context\n");
431 			goto jump_to_error;
432 		}
433 	} else {
434 		rc = bp->ulp_ctx->ops->ulp_init(bp, session);
435 		if (unlikely(rc)) {
436 			BNXT_DRV_DBG(ERR, "Failed to initialize the ulp init\n");
437 			goto jump_to_error;
438 		}
439 	}
440 
441 	/* setup the l2 etype tunnel for custom l2 encap/decap */
442 	rc = ulp_l2_etype_tunnel_alloc(bp);
443 	if (unlikely(rc))
444 		goto jump_to_error;
445 
446 
447 	/* Update bnxt driver flags */
448 	rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx);
449 	if (unlikely(rc)) {
450 		BNXT_DRV_DBG(ERR, "Failed to update driver flags\n");
451 		goto jump_to_error;
452 	}
453 
454 	/* update the port database for the given interface */
455 	rc = ulp_port_db_port_update(bp->ulp_ctx, bp->eth_dev);
456 	if (unlikely(rc)) {
457 		BNXT_DRV_DBG(ERR, "Failed to update port database\n");
458 		goto jump_to_error;
459 	}
460 
461 	/* create the default rules */
462 	rc = bnxt_ulp_create_df_rules(bp);
463 	if (unlikely(rc)) {
464 		BNXT_DRV_DBG(ERR, "Failed to create default flow\n");
465 		goto jump_to_error;
466 	}
467 
468 	/* set the unicast mode */
469 	if (unlikely(bnxt_ulp_cntxt_ptr2_ulp_flags_get(bp->ulp_ctx, &ulp_flags))) {
470 		BNXT_DRV_DBG(ERR, "Error in getting ULP context flags\n");
471 		goto jump_to_error;
472 	}
473 	if (ulp_flags & BNXT_ULP_APP_UNICAST_ONLY) {
474 		if (unlikely(bnxt_pmd_set_unicast_rxmask(bp->eth_dev))) {
475 			BNXT_DRV_DBG(ERR, "Error in setting unicast rxmode\n");
476 			goto jump_to_error;
477 		}
478 	}
479 
480 	/* Make sure that custom header data is selected */
481 	if (dev_id > BNXT_ULP_DEVICE_ID_WH_PLUS) {
482 		struct bnxt_vnic_info *vnic = bp->vnic_info;
483 		vnic->metadata_format = HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_3;
484 		rc = bnxt_hwrm_vnic_update(bp,
485 					vnic,
486 					HWRM_VNIC_UPDATE_INPUT_ENABLES_METADATA_FORMAT_TYPE_VALID);
487 		if (unlikely(rc)) {
488 			BNXT_DRV_DBG(ERR, "Failed to set metadata format\n");
489 			goto jump_to_error;
490 		}
491 	}
492 
493 	rc = ulp_l2_etype_tunnel_alloc(bp);
494 	if (unlikely(rc))
495 		goto jump_to_error;
496 
497 	return rc;
498 
499 jump_to_error:
500 	bnxt_ulp_port_deinit(bp);
501 	return rc;
502 }
503 
504 static void
505 ulp_l2_etype_tunnel_free(struct bnxt *bp)
506 {
507 	int rc;
508 
509 	if (!ULP_APP_L2_ETYPE_SUPPORT(bp->ulp_ctx))
510 		return;
511 
512 	if (unlikely(bp->l2_etype_tunnel_cnt == 0)) {
513 		BNXT_DRV_DBG(DEBUG, "L2 ETYPE Custom Tunnel already freed\n");
514 		return;
515 	}
516 	rc = bnxt_tunnel_dst_port_free(bp,
517 				       BNXT_L2_ETYPE_TUNNEL_ID,
518 				       HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_L2_ETYPE);
519 	if (unlikely(rc))
520 		BNXT_DRV_DBG(ERR, "Failed to clear L2 ETYPE Custom Tunnel\n");
521 
522 	bp->l2_etype_tunnel_cnt--;
523 }
524 
525 /*
526  * When a port is de-initialized by dpdk. This functions clears up
527  * the port specific details.
528  */
529 void
530 bnxt_ulp_port_deinit(struct bnxt *bp)
531 {
532 	struct bnxt_ulp_session_state *session;
533 	struct rte_pci_device *pci_dev;
534 	struct rte_pci_addr *pci_addr;
535 
536 	if (unlikely(!BNXT_TRUFLOW_EN(bp))) {
537 		BNXT_DRV_DBG(DEBUG,
538 			     "Skip ULP deinit for port:%d, TF is not enabled\n",
539 			     bp->eth_dev->data->port_id);
540 		return;
541 	}
542 
543 	if (unlikely(!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp))) {
544 		BNXT_DRV_DBG(DEBUG,
545 			     "Skip ULP deinit port:%d, not a TVF or PF\n",
546 			     bp->eth_dev->data->port_id);
547 		return;
548 	}
549 
550 	if (unlikely(!bp->ulp_ctx)) {
551 		BNXT_DRV_DBG(DEBUG, "ulp ctx already de-allocated\n");
552 		return;
553 	}
554 
555 	BNXT_DRV_DBG(DEBUG, "BNXT Port:%d ULP port deinit\n",
556 		     bp->eth_dev->data->port_id);
557 
558 	/* Get the session details  */
559 	pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
560 	pci_addr = &pci_dev->addr;
561 	pthread_mutex_lock(&bnxt_ulp_global_mutex);
562 	session = ulp_get_session(bp, pci_addr);
563 	pthread_mutex_unlock(&bnxt_ulp_global_mutex);
564 
565 	/* session not found then just exit */
566 	if (unlikely(!session)) {
567 		/* Free the ulp context */
568 		rte_free(bp->ulp_ctx);
569 		bp->ulp_ctx = NULL;
570 		return;
571 	}
572 
573 	/* Check the reference count to deinit or deattach*/
574 	if (bp->ulp_ctx->cfg_data && bp->ulp_ctx->cfg_data->ref_cnt) {
575 		bp->ulp_ctx->cfg_data->ref_cnt--;
576 		/* Free tunnels for each port */
577 		ulp_l2_etype_tunnel_free(bp);
578 		if (bp->ulp_ctx->cfg_data->ref_cnt) {
579 			/* Free the ulp context in the context entry list */
580 			bnxt_ulp_cntxt_list_del(bp->ulp_ctx);
581 
582 			/* free the port details */
583 			/* Free the default flow rule associated to this port */
584 			bnxt_ulp_destroy_df_rules(bp, false);
585 			bnxt_ulp_destroy_vfr_default_rules(bp, false);
586 
587 			/* free flows associated with this port */
588 			bnxt_ulp_flush_port_flows(bp);
589 
590 			/* close the session associated with this port */
591 			bp->ulp_ctx->ops->ulp_ctx_detach(bp, session);
592 		} else {
593 			/* Free the ulp context in the context entry list */
594 			bnxt_ulp_cntxt_list_del(bp->ulp_ctx);
595 
596 			/* clean up default flows */
597 			bnxt_ulp_destroy_df_rules(bp, true);
598 
599 			/* clean up default VFR flows */
600 			bnxt_ulp_destroy_vfr_default_rules(bp, true);
601 
602 			/* clean up regular flows */
603 			ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR);
604 
605 			/* Perform ulp ctx deinit */
606 			bp->ulp_ctx->ops->ulp_deinit(bp, session);
607 		}
608 	}
609 
610 	/* clean up the session */
611 	ulp_session_deinit(session);
612 
613 	/* Free the ulp context */
614 	rte_free(bp->ulp_ctx);
615 	bp->ulp_ctx = NULL;
616 }
617 
618 int32_t
619 bnxt_ulp_cntxt_list_init(void)
620 {
621 	/* Create the cntxt spin lock only once*/
622 	if (!bnxt_ulp_ctxt_lock_created)
623 		rte_spinlock_init(&bnxt_ulp_ctxt_lock);
624 	bnxt_ulp_ctxt_lock_created = 1;
625 	return 0;
626 }
627 
628 int32_t
629 bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx)
630 {
631 	struct ulp_context_list_entry	*entry;
632 
633 	entry = rte_zmalloc(NULL, sizeof(struct ulp_context_list_entry), 0);
634 	if (unlikely(entry == NULL)) {
635 		BNXT_DRV_DBG(ERR, "unable to allocate memory\n");
636 		return -ENOMEM;
637 	}
638 
639 	rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
640 	entry->ulp_ctx = ulp_ctx;
641 	TAILQ_INSERT_TAIL(&ulp_cntx_list, entry, next);
642 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
643 	return 0;
644 }
645 
646 void
647 bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx)
648 {
649 	struct ulp_context_list_entry	*entry, *temp;
650 
651 	rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
652 	RTE_TAILQ_FOREACH_SAFE(entry, &ulp_cntx_list, next, temp) {
653 		if (entry->ulp_ctx == ulp_ctx) {
654 			TAILQ_REMOVE(&ulp_cntx_list, entry, next);
655 			rte_free(entry);
656 			break;
657 		}
658 	}
659 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
660 }
661 
662 int
663 bnxt_ulp_cntxt_list_count(void)
664 {
665 	struct ulp_context_list_entry *entry, *temp;
666 	int count_1 = 0;
667 
668 	rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
669 	RTE_TAILQ_FOREACH_SAFE(entry, &ulp_cntx_list, next, temp) {
670 		count_1++;
671 	}
672 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
673 	return count_1;
674 }
675 
676 struct bnxt_ulp_context *
677 bnxt_ulp_cntxt_entry_acquire(void *arg)
678 {
679 	struct ulp_context_list_entry	*entry;
680 
681 	/* take a lock and get the first ulp context available */
682 	if (rte_spinlock_trylock(&bnxt_ulp_ctxt_lock)) {
683 		TAILQ_FOREACH(entry, &ulp_cntx_list, next) {
684 			if (entry->ulp_ctx->cfg_data == arg)
685 				return entry->ulp_ctx;
686 		}
687 		rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
688 	}
689 	return NULL;
690 }
691 
692 void
693 bnxt_ulp_cntxt_entry_release(void)
694 {
695 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
696 }
697