xref: /dpdk/drivers/net/bnxt/tf_core/tf_rm.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <string.h>
7 
8 #include <rte_common.h>
9 
10 #include <cfa_resource_types.h>
11 
12 #include "tf_rm.h"
13 #include "tf_common.h"
14 #include "tf_util.h"
15 #include "tf_session.h"
16 #include "tf_device.h"
17 #include "tfp.h"
18 #include "tf_msg.h"
19 
20 /* Logging defines */
21 #define TF_RM_DEBUG  0
22 
23 /**
24  * Generic RM Element data type that an RM DB is build upon.
25  */
26 struct tf_rm_element {
27 	/**
28 	 * RM Element configuration type. If Private then the
29 	 * hcapi_type can be ignored. If Null then the element is not
30 	 * valid for the device.
31 	 */
32 	enum tf_rm_elem_cfg_type cfg_type;
33 
34 	/**
35 	 * HCAPI RM Type for the element.
36 	 */
37 	uint16_t hcapi_type;
38 
39 	/**
40 	 * HCAPI RM allocated range information for the element.
41 	 */
42 	struct tf_rm_alloc_info alloc;
43 
44 	/**
45 	 * Bit allocator pool for the element. Pool size is controlled
46 	 * by the struct tf_session_resources at time of session creation.
47 	 * Null indicates that the element is not used for the device.
48 	 */
49 	struct bitalloc *pool;
50 };
51 
52 /**
53  * TF RM DB definition
54  */
55 struct tf_rm_new_db {
56 	/**
57 	 * Number of elements in the DB
58 	 */
59 	uint16_t num_entries;
60 
61 	/**
62 	 * Direction this DB controls.
63 	 */
64 	enum tf_dir dir;
65 
66 	/**
67 	 * Module type, used for logging purposes.
68 	 */
69 	enum tf_device_module_type type;
70 
71 	/**
72 	 * The DB consists of an array of elements
73 	 */
74 	struct tf_rm_element *db;
75 };
76 
77 /**
78  * Adjust an index according to the allocation information.
79  *
80  * All resources are controlled in a 0 based pool. Some resources, by
81  * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
82  * need to be adjusted before they are handed out.
83  *
84  * [in] cfg
85  *   Pointer to the DB configuration
86  *
87  * [in] reservations
88  *   Pointer to the allocation values associated with the module
89  *
90  * [in] count
91  *   Number of DB configuration elements
92  *
93  * [out] valid_count
94  *   Number of HCAPI entries with a reservation value greater than 0
95  *
96  * Returns:
97  *     0          - Success
98  *   - EOPNOTSUPP - Operation not supported
99  */
100 static void
101 tf_rm_count_hcapi_reservations(enum tf_dir dir,
102 			       enum tf_device_module_type type,
103 			       struct tf_rm_element_cfg *cfg,
104 			       uint16_t *reservations,
105 			       uint16_t count,
106 			       uint16_t *valid_count)
107 {
108 	int i;
109 	uint16_t cnt = 0;
110 
111 	for (i = 0; i < count; i++) {
112 		if ((cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI ||
113 		     cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) &&
114 		    reservations[i] > 0)
115 			cnt++;
116 
117 		/* Only log msg if a type is attempted reserved and
118 		 * not supported. We ignore EM module as its using a
119 		 * split configuration array thus it would fail for
120 		 * this type of check.
121 		 */
122 		if (type != TF_DEVICE_MODULE_TYPE_EM &&
123 		    cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL &&
124 		    reservations[i] > 0) {
125 			TFP_DRV_LOG(ERR,
126 				"%s, %s, %s allocation of %d not supported\n",
127 				tf_device_module_type_2_str(type),
128 				tf_dir_2_str(dir),
129 				tf_device_module_type_subtype_2_str(type, i),
130 				reservations[i]);
131 		}
132 	}
133 
134 	*valid_count = cnt;
135 }
136 
137 /**
138  * Resource Manager Adjust of base index definitions.
139  */
140 enum tf_rm_adjust_type {
141 	TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */
142 	TF_RM_ADJUST_RM_BASE   /**< Removes base from the index */
143 };
144 
145 /**
146  * Adjust an index according to the allocation information.
147  *
148  * All resources are controlled in a 0 based pool. Some resources, by
149  * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
150  * need to be adjusted before they are handed out.
151  *
152  * [in] db
153  *   Pointer to the db, used for the lookup
154  *
155  * [in] action
156  *   Adjust action
157  *
158  * [in] db_index
159  *   DB index for the element type
160  *
161  * [in] index
162  *   Index to convert
163  *
164  * [out] adj_index
165  *   Adjusted index
166  *
167  * Returns:
168  *     0          - Success
169  *   - EOPNOTSUPP - Operation not supported
170  */
171 static int
172 tf_rm_adjust_index(struct tf_rm_element *db,
173 		   enum tf_rm_adjust_type action,
174 		   uint32_t db_index,
175 		   uint32_t index,
176 		   uint32_t *adj_index)
177 {
178 	int rc = 0;
179 	uint32_t base_index;
180 
181 	base_index = db[db_index].alloc.entry.start;
182 
183 	switch (action) {
184 	case TF_RM_ADJUST_RM_BASE:
185 		*adj_index = index - base_index;
186 		break;
187 	case TF_RM_ADJUST_ADD_BASE:
188 		*adj_index = index + base_index;
189 		break;
190 	default:
191 		return -EOPNOTSUPP;
192 	}
193 
194 	return rc;
195 }
196 
197 /**
198  * Logs an array of found residual entries to the console.
199  *
200  * [in] dir
201  *   Receive or transmit direction
202  *
203  * [in] type
204  *   Type of Device Module
205  *
206  * [in] count
207  *   Number of entries in the residual array
208  *
209  * [in] residuals
210  *   Pointer to an array of residual entries. Array is index same as
211  *   the DB in which this function is used. Each entry holds residual
212  *   value for that entry.
213  */
214 static void
215 tf_rm_log_residuals(enum tf_dir dir,
216 		    enum tf_device_module_type type,
217 		    uint16_t count,
218 		    uint16_t *residuals)
219 {
220 	int i;
221 
222 	/* Walk the residual array and log the types that wasn't
223 	 * cleaned up to the console.
224 	 */
225 	for (i = 0; i < count; i++) {
226 		if (residuals[i] != 0)
227 			TFP_DRV_LOG(ERR,
228 				"%s, %s was not cleaned up, %d outstanding\n",
229 				tf_dir_2_str(dir),
230 				tf_device_module_type_subtype_2_str(type, i),
231 				residuals[i]);
232 	}
233 }
234 
235 /**
236  * Performs a check of the passed in DB for any lingering elements. If
237  * a resource type was found to not have been cleaned up by the caller
238  * then its residual values are recorded, logged and passed back in an
239  * allocate reservation array that the caller can pass to the FW for
240  * cleanup.
241  *
242  * [in] db
243  *   Pointer to the db, used for the lookup
244  *
245  * [out] resv_size
246  *   Pointer to the reservation size of the generated reservation
247  *   array.
248  *
249  * [in/out] resv
250  *   Pointer Pointer to a reservation array. The reservation array is
251  *   allocated after the residual scan and holds any found residual
252  *   entries. Thus it can be smaller than the DB that the check was
253  *   performed on. Array must be freed by the caller.
254  *
255  * [out] residuals_present
256  *   Pointer to a bool flag indicating if residual was present in the
257  *   DB
258  *
259  * Returns:
260  *     0          - Success
261  *   - EOPNOTSUPP - Operation not supported
262  */
263 static int
264 tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
265 		      uint16_t *resv_size,
266 		      struct tf_rm_resc_entry **resv,
267 		      bool *residuals_present)
268 {
269 	int rc;
270 	int i;
271 	int f;
272 	uint16_t count;
273 	uint16_t found;
274 	uint16_t *residuals = NULL;
275 	uint16_t hcapi_type;
276 	struct tf_rm_get_inuse_count_parms iparms;
277 	struct tf_rm_get_alloc_info_parms aparms;
278 	struct tf_rm_get_hcapi_parms hparms;
279 	struct tf_rm_alloc_info info;
280 	struct tfp_calloc_parms cparms;
281 	struct tf_rm_resc_entry *local_resv = NULL;
282 
283 	/* Create array to hold the entries that have residuals */
284 	cparms.nitems = rm_db->num_entries;
285 	cparms.size = sizeof(uint16_t);
286 	cparms.alignment = 0;
287 	rc = tfp_calloc(&cparms);
288 	if (rc)
289 		return rc;
290 
291 	residuals = (uint16_t *)cparms.mem_va;
292 
293 	/* Traverse the DB and collect any residual elements */
294 	iparms.rm_db = rm_db;
295 	iparms.count = &count;
296 	for (i = 0, found = 0; i < rm_db->num_entries; i++) {
297 		iparms.db_index = i;
298 		rc = tf_rm_get_inuse_count(&iparms);
299 		/* Not a device supported entry, just skip */
300 		if (rc == -ENOTSUP)
301 			continue;
302 		if (rc)
303 			goto cleanup_residuals;
304 
305 		if (count) {
306 			found++;
307 			residuals[i] = count;
308 			*residuals_present = true;
309 		}
310 	}
311 
312 	if (*residuals_present) {
313 		/* Populate a reduced resv array with only the entries
314 		 * that have residuals.
315 		 */
316 		cparms.nitems = found;
317 		cparms.size = sizeof(struct tf_rm_resc_entry);
318 		cparms.alignment = 0;
319 		rc = tfp_calloc(&cparms);
320 		if (rc)
321 			return rc;
322 
323 		local_resv = (struct tf_rm_resc_entry *)cparms.mem_va;
324 
325 		aparms.rm_db = rm_db;
326 		hparms.rm_db = rm_db;
327 		hparms.hcapi_type = &hcapi_type;
328 		for (i = 0, f = 0; i < rm_db->num_entries; i++) {
329 			if (residuals[i] == 0)
330 				continue;
331 			aparms.db_index = i;
332 			aparms.info = &info;
333 			rc = tf_rm_get_info(&aparms);
334 			if (rc)
335 				goto cleanup_all;
336 
337 			hparms.db_index = i;
338 			rc = tf_rm_get_hcapi_type(&hparms);
339 			if (rc)
340 				goto cleanup_all;
341 
342 			local_resv[f].type = hcapi_type;
343 			local_resv[f].start = info.entry.start;
344 			local_resv[f].stride = info.entry.stride;
345 			f++;
346 		}
347 		*resv_size = found;
348 	}
349 
350 	tf_rm_log_residuals(rm_db->dir,
351 			    rm_db->type,
352 			    rm_db->num_entries,
353 			    residuals);
354 
355 	tfp_free((void *)residuals);
356 	*resv = local_resv;
357 
358 	return 0;
359 
360  cleanup_all:
361 	tfp_free((void *)local_resv);
362 	*resv = NULL;
363  cleanup_residuals:
364 	tfp_free((void *)residuals);
365 
366 	return rc;
367 }
368 
369 int
370 tf_rm_create_db(struct tf *tfp,
371 		struct tf_rm_create_db_parms *parms)
372 {
373 	int rc;
374 	int i;
375 	int j;
376 	struct tf_session *tfs;
377 	struct tf_dev_info *dev;
378 	uint16_t max_types;
379 	struct tfp_calloc_parms cparms;
380 	struct tf_rm_resc_req_entry *query;
381 	enum tf_rm_resc_resv_strategy resv_strategy;
382 	struct tf_rm_resc_req_entry *req;
383 	struct tf_rm_resc_entry *resv;
384 	struct tf_rm_new_db *rm_db;
385 	struct tf_rm_element *db;
386 	uint32_t pool_size;
387 	uint16_t hcapi_items;
388 
389 	TF_CHECK_PARMS2(tfp, parms);
390 
391 	/* Retrieve the session information */
392 	rc = tf_session_get_session_internal(tfp, &tfs);
393 	if (rc)
394 		return rc;
395 
396 	/* Retrieve device information */
397 	rc = tf_session_get_device(tfs, &dev);
398 	if (rc)
399 		return rc;
400 
401 	/* Need device max number of elements for the RM QCAPS */
402 	rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
403 	if (rc)
404 		return rc;
405 
406 	cparms.nitems = max_types;
407 	cparms.size = sizeof(struct tf_rm_resc_req_entry);
408 	cparms.alignment = 0;
409 	rc = tfp_calloc(&cparms);
410 	if (rc)
411 		return rc;
412 
413 	query = (struct tf_rm_resc_req_entry *)cparms.mem_va;
414 
415 	/* Get Firmware Capabilities */
416 	rc = tf_msg_session_resc_qcaps(tfp,
417 				       parms->dir,
418 				       max_types,
419 				       query,
420 				       &resv_strategy);
421 	if (rc)
422 		return rc;
423 
424 	/* Process capabilities against DB requirements. However, as a
425 	 * DB can hold elements that are not HCAPI we can reduce the
426 	 * req msg content by removing those out of the request yet
427 	 * the DB holds them all as to give a fast lookup. We can also
428 	 * remove entries where there are no request for elements.
429 	 */
430 	tf_rm_count_hcapi_reservations(parms->dir,
431 				       parms->type,
432 				       parms->cfg,
433 				       parms->alloc_cnt,
434 				       parms->num_elements,
435 				       &hcapi_items);
436 
437 	/* Handle the case where a DB create request really ends up
438 	 * being empty. Unsupported (if not rare) case but possible
439 	 * that no resources are necessary for a 'direction'.
440 	 */
441 	if (hcapi_items == 0) {
442 		TFP_DRV_LOG(ERR,
443 			"%s: DB create request for Zero elements, DB Type:%s\n",
444 			tf_dir_2_str(parms->dir),
445 			tf_device_module_type_2_str(parms->type));
446 
447 		parms->rm_db = NULL;
448 		return -ENOMEM;
449 	}
450 
451 	/* Alloc request, alignment already set */
452 	cparms.nitems = (size_t)hcapi_items;
453 	cparms.size = sizeof(struct tf_rm_resc_req_entry);
454 	rc = tfp_calloc(&cparms);
455 	if (rc)
456 		return rc;
457 	req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
458 
459 	/* Alloc reservation, alignment and nitems already set */
460 	cparms.size = sizeof(struct tf_rm_resc_entry);
461 	rc = tfp_calloc(&cparms);
462 	if (rc)
463 		return rc;
464 	resv = (struct tf_rm_resc_entry *)cparms.mem_va;
465 
466 	/* Build the request */
467 	for (i = 0, j = 0; i < parms->num_elements; i++) {
468 		/* Skip any non HCAPI cfg elements */
469 		if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI ||
470 		    parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) {
471 			/* Only perform reservation for entries that
472 			 * has been requested
473 			 */
474 			if (parms->alloc_cnt[i] == 0)
475 				continue;
476 
477 			/* Verify that we can get the full amount
478 			 * allocated per the qcaps availability.
479 			 */
480 			if (parms->alloc_cnt[i] <=
481 			    query[parms->cfg[i].hcapi_type].max) {
482 				req[j].type = parms->cfg[i].hcapi_type;
483 				req[j].min = parms->alloc_cnt[i];
484 				req[j].max = parms->alloc_cnt[i];
485 				j++;
486 			} else {
487 				TFP_DRV_LOG(ERR,
488 					    "%s: Resource failure, type:%d\n",
489 					    tf_dir_2_str(parms->dir),
490 					    parms->cfg[i].hcapi_type);
491 				TFP_DRV_LOG(ERR,
492 					"req:%d, avail:%d\n",
493 					parms->alloc_cnt[i],
494 					query[parms->cfg[i].hcapi_type].max);
495 				return -EINVAL;
496 			}
497 		}
498 	}
499 
500 	rc = tf_msg_session_resc_alloc(tfp,
501 				       parms->dir,
502 				       hcapi_items,
503 				       req,
504 				       resv);
505 	if (rc)
506 		return rc;
507 
508 	/* Build the RM DB per the request */
509 	cparms.nitems = 1;
510 	cparms.size = sizeof(struct tf_rm_new_db);
511 	rc = tfp_calloc(&cparms);
512 	if (rc)
513 		return rc;
514 	rm_db = (void *)cparms.mem_va;
515 
516 	/* Build the DB within RM DB */
517 	cparms.nitems = parms->num_elements;
518 	cparms.size = sizeof(struct tf_rm_element);
519 	rc = tfp_calloc(&cparms);
520 	if (rc)
521 		return rc;
522 	rm_db->db = (struct tf_rm_element *)cparms.mem_va;
523 
524 	db = rm_db->db;
525 	for (i = 0, j = 0; i < parms->num_elements; i++) {
526 		db[i].cfg_type = parms->cfg[i].cfg_type;
527 		db[i].hcapi_type = parms->cfg[i].hcapi_type;
528 
529 		/* Skip any non HCAPI types as we didn't include them
530 		 * in the reservation request.
531 		 */
532 		if (parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI &&
533 		    parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
534 			continue;
535 
536 		/* If the element didn't request an allocation no need
537 		 * to create a pool nor verify if we got a reservation.
538 		 */
539 		if (parms->alloc_cnt[i] == 0)
540 			continue;
541 
542 		/* If the element had requested an allocation and that
543 		 * allocation was a success (full amount) then
544 		 * allocate the pool.
545 		 */
546 		if (parms->alloc_cnt[i] == resv[j].stride) {
547 			db[i].alloc.entry.start = resv[j].start;
548 			db[i].alloc.entry.stride = resv[j].stride;
549 
550 			/* Only allocate BA pool if so requested */
551 			if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) {
552 				/* Create pool */
553 				pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
554 					     sizeof(struct bitalloc));
555 				/* Alloc request, alignment already set */
556 				cparms.nitems = pool_size;
557 				cparms.size = sizeof(struct bitalloc);
558 				rc = tfp_calloc(&cparms);
559 				if (rc) {
560 					TFP_DRV_LOG(ERR,
561 					     "%s: Pool alloc failed, type:%d\n",
562 					     tf_dir_2_str(parms->dir),
563 					     db[i].cfg_type);
564 					goto fail;
565 				}
566 				db[i].pool = (struct bitalloc *)cparms.mem_va;
567 
568 				rc = ba_init(db[i].pool, resv[j].stride);
569 				if (rc) {
570 					TFP_DRV_LOG(ERR,
571 					     "%s: Pool init failed, type:%d\n",
572 					     tf_dir_2_str(parms->dir),
573 					     db[i].cfg_type);
574 					goto fail;
575 				}
576 			}
577 			j++;
578 		} else {
579 			/* Bail out as we want what we requested for
580 			 * all elements, not any less.
581 			 */
582 			TFP_DRV_LOG(ERR,
583 				    "%s: Alloc failed, type:%d\n",
584 				    tf_dir_2_str(parms->dir),
585 				    db[i].cfg_type);
586 			TFP_DRV_LOG(ERR,
587 				    "req:%d, alloc:%d\n",
588 				    parms->alloc_cnt[i],
589 				    resv[j].stride);
590 			goto fail;
591 		}
592 	}
593 
594 	rm_db->num_entries = parms->num_elements;
595 	rm_db->dir = parms->dir;
596 	rm_db->type = parms->type;
597 	*parms->rm_db = (void *)rm_db;
598 
599 #if (TF_RM_DEBUG == 1)
600 	printf("%s: type:%d num_entries:%d\n",
601 	       tf_dir_2_str(parms->dir),
602 	       parms->type,
603 	       i);
604 #endif /* (TF_RM_DEBUG == 1) */
605 
606 	tfp_free((void *)req);
607 	tfp_free((void *)resv);
608 
609 	return 0;
610 
611  fail:
612 	tfp_free((void *)req);
613 	tfp_free((void *)resv);
614 	tfp_free((void *)db->pool);
615 	tfp_free((void *)db);
616 	tfp_free((void *)rm_db);
617 	parms->rm_db = NULL;
618 
619 	return -EINVAL;
620 }
621 
622 int
623 tf_rm_free_db(struct tf *tfp,
624 	      struct tf_rm_free_db_parms *parms)
625 {
626 	int rc;
627 	int i;
628 	uint16_t resv_size = 0;
629 	struct tf_rm_new_db *rm_db;
630 	struct tf_rm_resc_entry *resv;
631 	bool residuals_found = false;
632 
633 	TF_CHECK_PARMS2(parms, parms->rm_db);
634 
635 	/* Device unbind happens when the TF Session is closed and the
636 	 * session ref count is 0. Device unbind will cleanup each of
637 	 * its support modules, i.e. Identifier, thus we're ending up
638 	 * here to close the DB.
639 	 *
640 	 * On TF Session close it is assumed that the session has already
641 	 * cleaned up all its resources, individually, while
642 	 * destroying its flows.
643 	 *
644 	 * To assist in the 'cleanup checking' the DB is checked for any
645 	 * remaining elements and logged if found to be the case.
646 	 *
647 	 * Any such elements will need to be 'cleared' ahead of
648 	 * returning the resources to the HCAPI RM.
649 	 *
650 	 * RM will signal FW to flush the DB resources. FW will
651 	 * perform the invalidation. TF Session close will return the
652 	 * previous allocated elements to the RM and then close the
653 	 * HCAPI RM registration. That then saves several 'free' msgs
654 	 * from being required.
655 	 */
656 
657 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
658 
659 	/* Check for residuals that the client didn't clean up */
660 	rc = tf_rm_check_residuals(rm_db,
661 				   &resv_size,
662 				   &resv,
663 				   &residuals_found);
664 	if (rc)
665 		return rc;
666 
667 	/* Invalidate any residuals followed by a DB traversal for
668 	 * pool cleanup.
669 	 */
670 	if (residuals_found) {
671 		rc = tf_msg_session_resc_flush(tfp,
672 					       parms->dir,
673 					       resv_size,
674 					       resv);
675 		tfp_free((void *)resv);
676 		/* On failure we still have to cleanup so we can only
677 		 * log that FW failed.
678 		 */
679 		if (rc)
680 			TFP_DRV_LOG(ERR,
681 				    "%s: Internal Flush error, module:%s\n",
682 				    tf_dir_2_str(parms->dir),
683 				    tf_device_module_type_2_str(rm_db->type));
684 	}
685 
686 	/* No need to check for configuration type, even if we do not
687 	 * have a BA pool we just delete on a null ptr, no harm
688 	 */
689 	for (i = 0; i < rm_db->num_entries; i++)
690 		tfp_free((void *)rm_db->db[i].pool);
691 
692 	tfp_free((void *)parms->rm_db);
693 
694 	return rc;
695 }
696 
697 int
698 tf_rm_allocate(struct tf_rm_allocate_parms *parms)
699 {
700 	int rc;
701 	int id;
702 	uint32_t index;
703 	struct tf_rm_new_db *rm_db;
704 	enum tf_rm_elem_cfg_type cfg_type;
705 
706 	TF_CHECK_PARMS2(parms, parms->rm_db);
707 
708 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
709 	if (!rm_db->db)
710 		return -EINVAL;
711 	cfg_type = rm_db->db[parms->db_index].cfg_type;
712 
713 	/* Bail out if not controlled by RM */
714 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
715 		return -ENOTSUP;
716 
717 	/* Bail out if the pool is not valid, should never happen */
718 	if (rm_db->db[parms->db_index].pool == NULL) {
719 		rc = -ENOTSUP;
720 		TFP_DRV_LOG(ERR,
721 			    "%s: Invalid pool for this type:%d, rc:%s\n",
722 			    tf_dir_2_str(rm_db->dir),
723 			    parms->db_index,
724 			    strerror(-rc));
725 		return rc;
726 	}
727 
728 	/*
729 	 * priority  0: allocate from top of the tcam i.e. high
730 	 * priority !0: allocate index from bottom i.e lowest
731 	 */
732 	if (parms->priority)
733 		id = ba_alloc_reverse(rm_db->db[parms->db_index].pool);
734 	else
735 		id = ba_alloc(rm_db->db[parms->db_index].pool);
736 	if (id == BA_FAIL) {
737 		rc = -ENOMEM;
738 		TFP_DRV_LOG(ERR,
739 			    "%s: Allocation failed, rc:%s\n",
740 			    tf_dir_2_str(rm_db->dir),
741 			    strerror(-rc));
742 		return rc;
743 	}
744 
745 	/* Adjust for any non zero start value */
746 	rc = tf_rm_adjust_index(rm_db->db,
747 				TF_RM_ADJUST_ADD_BASE,
748 				parms->db_index,
749 				id,
750 				&index);
751 	if (rc) {
752 		TFP_DRV_LOG(ERR,
753 			    "%s: Alloc adjust of base index failed, rc:%s\n",
754 			    tf_dir_2_str(rm_db->dir),
755 			    strerror(-rc));
756 		return -EINVAL;
757 	}
758 
759 	*parms->index = index;
760 	if (parms->base_index)
761 		*parms->base_index = id;
762 
763 	return rc;
764 }
765 
766 int
767 tf_rm_free(struct tf_rm_free_parms *parms)
768 {
769 	int rc;
770 	uint32_t adj_index;
771 	struct tf_rm_new_db *rm_db;
772 	enum tf_rm_elem_cfg_type cfg_type;
773 
774 	TF_CHECK_PARMS2(parms, parms->rm_db);
775 
776 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
777 	if (!rm_db->db)
778 		return -EINVAL;
779 	cfg_type = rm_db->db[parms->db_index].cfg_type;
780 
781 	/* Bail out if not controlled by RM */
782 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
783 		return -ENOTSUP;
784 
785 	/* Bail out if the pool is not valid, should never happen */
786 	if (rm_db->db[parms->db_index].pool == NULL) {
787 		rc = -ENOTSUP;
788 		TFP_DRV_LOG(ERR,
789 			    "%s: Invalid pool for this type:%d, rc:%s\n",
790 			    tf_dir_2_str(rm_db->dir),
791 			    parms->db_index,
792 			    strerror(-rc));
793 		return rc;
794 	}
795 
796 	/* Adjust for any non zero start value */
797 	rc = tf_rm_adjust_index(rm_db->db,
798 				TF_RM_ADJUST_RM_BASE,
799 				parms->db_index,
800 				parms->index,
801 				&adj_index);
802 	if (rc)
803 		return rc;
804 
805 	rc = ba_free(rm_db->db[parms->db_index].pool, adj_index);
806 	/* No logging direction matters and that is not available here */
807 	if (rc)
808 		return rc;
809 
810 	return rc;
811 }
812 
813 int
814 tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
815 {
816 	int rc;
817 	uint32_t adj_index;
818 	struct tf_rm_new_db *rm_db;
819 	enum tf_rm_elem_cfg_type cfg_type;
820 
821 	TF_CHECK_PARMS2(parms, parms->rm_db);
822 
823 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
824 	if (!rm_db->db)
825 		return -EINVAL;
826 	cfg_type = rm_db->db[parms->db_index].cfg_type;
827 
828 	/* Bail out if not controlled by RM */
829 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
830 		return -ENOTSUP;
831 
832 	/* Bail out if the pool is not valid, should never happen */
833 	if (rm_db->db[parms->db_index].pool == NULL) {
834 		rc = -ENOTSUP;
835 		TFP_DRV_LOG(ERR,
836 			    "%s: Invalid pool for this type:%d, rc:%s\n",
837 			    tf_dir_2_str(rm_db->dir),
838 			    parms->db_index,
839 			    strerror(-rc));
840 		return rc;
841 	}
842 
843 	/* Adjust for any non zero start value */
844 	rc = tf_rm_adjust_index(rm_db->db,
845 				TF_RM_ADJUST_RM_BASE,
846 				parms->db_index,
847 				parms->index,
848 				&adj_index);
849 	if (rc)
850 		return rc;
851 
852 	if (parms->base_index)
853 		*parms->base_index = adj_index;
854 	*parms->allocated = ba_inuse(rm_db->db[parms->db_index].pool,
855 				     adj_index);
856 
857 	return rc;
858 }
859 
860 int
861 tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
862 {
863 	struct tf_rm_new_db *rm_db;
864 	enum tf_rm_elem_cfg_type cfg_type;
865 
866 	TF_CHECK_PARMS2(parms, parms->rm_db);
867 
868 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
869 	if (!rm_db->db)
870 		return -EINVAL;
871 	cfg_type = rm_db->db[parms->db_index].cfg_type;
872 
873 	/* Bail out if not controlled by HCAPI */
874 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
875 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
876 		return -ENOTSUP;
877 
878 	memcpy(parms->info,
879 	       &rm_db->db[parms->db_index].alloc,
880 	       sizeof(struct tf_rm_alloc_info));
881 
882 	return 0;
883 }
884 
885 int
886 tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
887 {
888 	struct tf_rm_new_db *rm_db;
889 	enum tf_rm_elem_cfg_type cfg_type;
890 
891 	TF_CHECK_PARMS2(parms, parms->rm_db);
892 
893 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
894 	if (!rm_db->db)
895 		return -EINVAL;
896 	cfg_type = rm_db->db[parms->db_index].cfg_type;
897 
898 	/* Bail out if not controlled by HCAPI */
899 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
900 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
901 		return -ENOTSUP;
902 
903 	*parms->hcapi_type = rm_db->db[parms->db_index].hcapi_type;
904 
905 	return 0;
906 }
907 
908 int
909 tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
910 {
911 	int rc = 0;
912 	struct tf_rm_new_db *rm_db;
913 	enum tf_rm_elem_cfg_type cfg_type;
914 
915 	TF_CHECK_PARMS2(parms, parms->rm_db);
916 
917 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
918 	if (!rm_db->db)
919 		return -EINVAL;
920 	cfg_type = rm_db->db[parms->db_index].cfg_type;
921 
922 	/* Bail out if not controlled by RM */
923 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
924 		return -ENOTSUP;
925 
926 	/* Bail silently (no logging), if the pool is not valid there
927 	 * was no elements allocated for it.
928 	 */
929 	if (rm_db->db[parms->db_index].pool == NULL) {
930 		*parms->count = 0;
931 		return 0;
932 	}
933 
934 	*parms->count = ba_inuse_count(rm_db->db[parms->db_index].pool);
935 
936 	return rc;
937 
938 }
939 
940 int
941 tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms)
942 {
943 	struct tf_rm_new_db *rm_db;
944 	enum tf_rm_elem_cfg_type cfg_type;
945 	uint32_t base_index;
946 	uint32_t stride;
947 	int rc = 0;
948 
949 	TF_CHECK_PARMS2(parms, parms->rm_db);
950 
951 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
952 	if (!rm_db->db)
953 		return -EINVAL;
954 	cfg_type = rm_db->db[parms->db_index].cfg_type;
955 
956 	/* Bail out if not controlled by RM */
957 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
958 		return -ENOTSUP;
959 
960 	/* Bail out if the pool is not valid, should never happen */
961 	if (rm_db->db[parms->db_index].pool == NULL) {
962 		rc = -ENOTSUP;
963 		TFP_DRV_LOG(ERR,
964 			    "%s: Invalid pool for this type:%d, rc:%s\n",
965 			    tf_dir_2_str(rm_db->dir),
966 			    parms->db_index,
967 			    strerror(-rc));
968 		return rc;
969 	}
970 
971 	base_index = rm_db->db[parms->db_index].alloc.entry.start;
972 	stride = rm_db->db[parms->db_index].alloc.entry.stride;
973 
974 	if (parms->starting_index < base_index ||
975 	    parms->starting_index + parms->num_entries > base_index + stride)
976 		return -EINVAL;
977 
978 	return rc;
979 }
980