xref: /dpdk/drivers/net/bnxt/tf_core/tf_rm.c (revision f8dbaebbf1c9efcbb2e2354b341ed62175466a57)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <string.h>
7 
8 #include <rte_common.h>
9 #include <rte_debug.h>
10 
11 #include <cfa_resource_types.h>
12 
13 #include "tf_rm.h"
14 #include "tf_common.h"
15 #include "tf_util.h"
16 #include "tf_session.h"
17 #include "tf_device.h"
18 #include "tfp.h"
19 #include "tf_msg.h"
20 
21 /* Logging defines */
22 #define TF_RM_DEBUG  0
23 
24 /**
25  * Generic RM Element data type that an RM DB is build upon.
26  */
27 struct tf_rm_element {
28 	/**
29 	 * RM Element configuration type. If Private then the
30 	 * hcapi_type can be ignored. If Null then the element is not
31 	 * valid for the device.
32 	 */
33 	enum tf_rm_elem_cfg_type cfg_type;
34 
35 	/**
36 	 * HCAPI RM Type for the element.
37 	 */
38 	uint16_t hcapi_type;
39 
40 	/**
41 	 * Resource slices.  How many slices will fit in the
42 	 * resource pool chunk size.
43 	 */
44 	uint8_t slices;
45 
46 	/**
47 	 * HCAPI RM allocated range information for the element.
48 	 */
49 	struct tf_rm_alloc_info alloc;
50 
51 	/**
52 	 * If cfg_type == HCAPI_BA_CHILD, this field indicates
53 	 * the parent module subtype for look up into the parent pool.
54 	 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
55 	 * module subtype of TF_MODULE_TYPE_TABLE.
56 	 */
57 	uint16_t parent_subtype;
58 
59 	/**
60 	 * Bit allocator pool for the element. Pool size is controlled
61 	 * by the struct tf_session_resources at time of session creation.
62 	 * Null indicates that the pool is not used for the element.
63 	 */
64 	struct bitalloc *pool;
65 };
66 
67 /**
68  * TF RM DB definition
69  */
70 struct tf_rm_new_db {
71 	/**
72 	 * Number of elements in the DB
73 	 */
74 	uint16_t num_entries;
75 
76 	/**
77 	 * Direction this DB controls.
78 	 */
79 	enum tf_dir dir;
80 
81 	/**
82 	 * Module type, used for logging purposes.
83 	 */
84 	enum tf_module_type module;
85 
86 	/**
87 	 * The DB consists of an array of elements
88 	 */
89 	struct tf_rm_element *db;
90 };
91 
92 /**
93  * Adjust an index according to the allocation information.
94  *
95  * All resources are controlled in a 0 based pool. Some resources, by
96  * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
97  * need to be adjusted before they are handed out.
98  *
99  * [in] cfg
100  *   Pointer to the DB configuration
101  *
102  * [in] reservations
103  *   Pointer to the allocation values associated with the module
104  *
105  * [in] count
106  *   Number of DB configuration elements
107  *
108  * [out] valid_count
109  *   Number of HCAPI entries with a reservation value greater than 0
110  *
111  * Returns:
112  *     0          - Success
113  *   - EOPNOTSUPP - Operation not supported
114  */
115 static void
116 tf_rm_count_hcapi_reservations(enum tf_dir dir,
117 			       enum tf_module_type module,
118 			       struct tf_rm_element_cfg *cfg,
119 			       uint16_t *reservations,
120 			       uint16_t count,
121 			       uint16_t *valid_count)
122 {
123 	int i;
124 	uint16_t cnt = 0;
125 
126 	for (i = 0; i < count; i++) {
127 		if (cfg[i].cfg_type != TF_RM_ELEM_CFG_NULL &&
128 		    reservations[i] > 0)
129 			cnt++;
130 
131 		/* Only log msg if a type is attempted reserved and
132 		 * not supported. We ignore EM module as its using a
133 		 * split configuration array thus it would fail for
134 		 * this type of check.
135 		 */
136 		if (module != TF_MODULE_TYPE_EM &&
137 		    cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL &&
138 		    reservations[i] > 0) {
139 			TFP_DRV_LOG(ERR,
140 				"%s, %s, %s allocation of %d not supported\n",
141 				tf_module_2_str(module),
142 				tf_dir_2_str(dir),
143 				tf_module_subtype_2_str(module, i),
144 				reservations[i]);
145 		}
146 	}
147 
148 	*valid_count = cnt;
149 }
150 
151 /**
152  * Resource Manager Adjust of base index definitions.
153  */
154 enum tf_rm_adjust_type {
155 	TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */
156 	TF_RM_ADJUST_RM_BASE   /**< Removes base from the index */
157 };
158 
159 /**
160  * Adjust an index according to the allocation information.
161  *
162  * All resources are controlled in a 0 based pool. Some resources, by
163  * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
164  * need to be adjusted before they are handed out.
165  *
166  * [in] db
167  *   Pointer to the db, used for the lookup
168  *
169  * [in] action
170  *   Adjust action
171  *
172  * [in] subtype
173  *   TF module subtype used as an index into the database.
174  *   An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
175  *   module subtype of TF_MODULE_TYPE_TABLE.
176  *
177  * [in] index
178  *   Index to convert
179  *
180  * [out] adj_index
181  *   Adjusted index
182  *
183  * Returns:
184  *     0          - Success
185  *   - EOPNOTSUPP - Operation not supported
186  */
187 static int
188 tf_rm_adjust_index(struct tf_rm_element *db,
189 		   enum tf_rm_adjust_type action,
190 		   uint32_t subtype,
191 		   uint32_t index,
192 		   uint32_t *adj_index)
193 {
194 	int rc = 0;
195 	uint32_t base_index;
196 
197 	base_index = db[subtype].alloc.entry.start;
198 
199 	switch (action) {
200 	case TF_RM_ADJUST_RM_BASE:
201 		*adj_index = index - base_index;
202 		break;
203 	case TF_RM_ADJUST_ADD_BASE:
204 		*adj_index = index + base_index;
205 		break;
206 	default:
207 		return -EOPNOTSUPP;
208 	}
209 
210 	return rc;
211 }
212 
213 /**
214  * Logs an array of found residual entries to the console.
215  *
216  * [in] dir
217  *   Receive or transmit direction
218  *
219  * [in] module
220  *   Type of Device Module
221  *
222  * [in] count
223  *   Number of entries in the residual array
224  *
225  * [in] residuals
226  *   Pointer to an array of residual entries. Array is index same as
227  *   the DB in which this function is used. Each entry holds residual
228  *   value for that entry.
229  */
230 #if (TF_RM_DEBUG == 1)
231 static void
232 tf_rm_log_residuals(enum tf_dir dir,
233 		    enum tf_module_type module,
234 		    uint16_t count,
235 		    uint16_t *residuals)
236 {
237 	int i;
238 
239 	/* Walk the residual array and log the types that wasn't
240 	 * cleaned up to the console.
241 	 */
242 	for (i = 0; i < count; i++) {
243 		if (residuals[i] != 0)
244 			TFP_DRV_LOG(INFO,
245 				"%s, %s was not cleaned up, %d outstanding\n",
246 				tf_dir_2_str(dir),
247 				tf_module_subtype_2_str(module, i),
248 				residuals[i]);
249 	}
250 }
251 #endif /* TF_RM_DEBUG == 1 */
252 /**
253  * Performs a check of the passed in DB for any lingering elements. If
254  * a resource type was found to not have been cleaned up by the caller
255  * then its residual values are recorded, logged and passed back in an
256  * allocate reservation array that the caller can pass to the FW for
257  * cleanup.
258  *
259  * [in] db
260  *   Pointer to the db, used for the lookup
261  *
262  * [out] resv_size
263  *   Pointer to the reservation size of the generated reservation
264  *   array.
265  *
266  * [in/out] resv
267  *   Pointer Pointer to a reservation array. The reservation array is
268  *   allocated after the residual scan and holds any found residual
269  *   entries. Thus it can be smaller than the DB that the check was
270  *   performed on. Array must be freed by the caller.
271  *
272  * [out] residuals_present
273  *   Pointer to a bool flag indicating if residual was present in the
274  *   DB
275  *
276  * Returns:
277  *     0          - Success
278  *   - EOPNOTSUPP - Operation not supported
279  */
280 static int
281 tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
282 		      uint16_t *resv_size,
283 		      struct tf_rm_resc_entry **resv,
284 		      bool *residuals_present)
285 {
286 	int rc;
287 	int i;
288 	int f;
289 	uint16_t count;
290 	uint16_t found;
291 	uint16_t *residuals = NULL;
292 	uint16_t hcapi_type;
293 	struct tf_rm_get_inuse_count_parms iparms;
294 	struct tf_rm_get_alloc_info_parms aparms;
295 	struct tf_rm_get_hcapi_parms hparms;
296 	struct tf_rm_alloc_info info;
297 	struct tfp_calloc_parms cparms;
298 	struct tf_rm_resc_entry *local_resv = NULL;
299 
300 	/* Create array to hold the entries that have residuals */
301 	cparms.nitems = rm_db->num_entries;
302 	cparms.size = sizeof(uint16_t);
303 	cparms.alignment = 0;
304 	rc = tfp_calloc(&cparms);
305 	if (rc)
306 		return rc;
307 
308 	residuals = (uint16_t *)cparms.mem_va;
309 
310 	/* Traverse the DB and collect any residual elements */
311 	iparms.rm_db = rm_db;
312 	iparms.count = &count;
313 	for (i = 0, found = 0; i < rm_db->num_entries; i++) {
314 		iparms.subtype = i;
315 		rc = tf_rm_get_inuse_count(&iparms);
316 		/* Not a device supported entry, just skip */
317 		if (rc == -ENOTSUP)
318 			continue;
319 		if (rc)
320 			goto cleanup_residuals;
321 
322 		if (count) {
323 			found++;
324 			residuals[i] = count;
325 			*residuals_present = true;
326 		}
327 	}
328 
329 	if (*residuals_present) {
330 		/* Populate a reduced resv array with only the entries
331 		 * that have residuals.
332 		 */
333 		cparms.nitems = found;
334 		cparms.size = sizeof(struct tf_rm_resc_entry);
335 		cparms.alignment = 0;
336 		rc = tfp_calloc(&cparms);
337 		if (rc)
338 			return rc;
339 
340 		local_resv = (struct tf_rm_resc_entry *)cparms.mem_va;
341 
342 		aparms.rm_db = rm_db;
343 		hparms.rm_db = rm_db;
344 		hparms.hcapi_type = &hcapi_type;
345 		for (i = 0, f = 0; i < rm_db->num_entries; i++) {
346 			if (residuals[i] == 0)
347 				continue;
348 			aparms.subtype = i;
349 			aparms.info = &info;
350 			rc = tf_rm_get_info(&aparms);
351 			if (rc)
352 				goto cleanup_all;
353 
354 			hparms.subtype = i;
355 			rc = tf_rm_get_hcapi_type(&hparms);
356 			if (rc)
357 				goto cleanup_all;
358 
359 			local_resv[f].type = hcapi_type;
360 			local_resv[f].start = info.entry.start;
361 			local_resv[f].stride = info.entry.stride;
362 			f++;
363 		}
364 		*resv_size = found;
365 	}
366 
367 #if (TF_RM_DEBUG == 1)
368 	tf_rm_log_residuals(rm_db->dir,
369 			    rm_db->module,
370 			    rm_db->num_entries,
371 			    residuals);
372 #endif
373 	tfp_free((void *)residuals);
374 	*resv = local_resv;
375 
376 	return 0;
377 
378  cleanup_all:
379 	tfp_free((void *)local_resv);
380 	*resv = NULL;
381  cleanup_residuals:
382 	tfp_free((void *)residuals);
383 
384 	return rc;
385 }
386 
387 /**
388  * Some resources do not have a 1:1 mapping between the Truflow type and the cfa
389  * resource type (HCAPI RM).  These resources have multiple Truflow types which
390  * map to a single HCAPI RM type.  In order to support this, one Truflow type
391  * sharing the HCAPI resources is designated the parent.  All other Truflow
392  * types associated with that HCAPI RM type are designated the children.
393  *
394  * This function updates the resource counts of any HCAPI_BA_PARENT with the
395  * counts of the HCAPI_BA_CHILDREN.  These are read from the alloc_cnt and
396  * written back to the req_cnt.
397  *
398  * [in] cfg
399  *   Pointer to an array of module specific Truflow type indexed RM cfg items
400  *
401  * [in] alloc_cnt
402  *   Pointer to the tf_open_session() configured array of module specific
403  *   Truflow type indexed requested counts.
404  *
405  * [in/out] req_cnt
406  *   Pointer to the location to put the updated resource counts.
407  *
408  * Returns:
409  *     0          - Success
410  *     -          - Failure if negative
411  */
412 static int
413 tf_rm_update_parent_reservations(struct tf *tfp,
414 				 struct tf_dev_info *dev,
415 				 struct tf_rm_element_cfg *cfg,
416 				 uint16_t *alloc_cnt,
417 				 uint16_t num_elements,
418 				 uint16_t *req_cnt,
419 				 bool shared_session)
420 {
421 	int parent, child;
422 	const char *type_str;
423 
424 	/* Search through all the elements */
425 	for (parent = 0; parent < num_elements; parent++) {
426 		uint16_t combined_cnt = 0;
427 
428 		/* If I am a parent */
429 		if (cfg[parent].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
430 			uint8_t p_slices = 1;
431 
432 			/* Shared session doesn't support slices */
433 			if (!shared_session)
434 				p_slices = cfg[parent].slices;
435 
436 			RTE_ASSERT(p_slices);
437 
438 			combined_cnt = alloc_cnt[parent] / p_slices;
439 
440 			if (alloc_cnt[parent] % p_slices)
441 				combined_cnt++;
442 
443 			if (alloc_cnt[parent]) {
444 				dev->ops->tf_dev_get_resource_str(tfp,
445 							 cfg[parent].hcapi_type,
446 							 &type_str);
447 #if (TF_RM_DEBUG == 1)
448 				printf("%s:%s cnt(%d) slices(%d)\n",
449 				       type_str, tf_tbl_type_2_str(parent),
450 				       alloc_cnt[parent], p_slices);
451 #endif /* (TF_RM_DEBUG == 1) */
452 			}
453 
454 			/* Search again through all the elements */
455 			for (child = 0; child < num_elements; child++) {
456 				/* If this is one of my children */
457 				if (cfg[child].cfg_type ==
458 				    TF_RM_ELEM_CFG_HCAPI_BA_CHILD &&
459 				    cfg[child].parent_subtype == parent &&
460 				    alloc_cnt[child]) {
461 					uint8_t c_slices = 1;
462 					uint16_t cnt = 0;
463 
464 					if (!shared_session)
465 						c_slices = cfg[child].slices;
466 
467 					RTE_ASSERT(c_slices);
468 
469 					dev->ops->tf_dev_get_resource_str(tfp,
470 							  cfg[child].hcapi_type,
471 							   &type_str);
472 #if (TF_RM_DEBUG == 1)
473 					printf("%s:%s cnt(%d) slices(%d)\n",
474 					       type_str,
475 					       tf_tbl_type_2_str(child),
476 					       alloc_cnt[child],
477 					       c_slices);
478 #endif /* (TF_RM_DEBUG == 1) */
479 					/* Increment the parents combined count
480 					 * with each child's count adjusted for
481 					 * number of slices per RM alloc item.
482 					 */
483 					cnt = alloc_cnt[child] / c_slices;
484 
485 					if (alloc_cnt[child] % c_slices)
486 						cnt++;
487 
488 					combined_cnt += cnt;
489 					/* Clear the requested child count */
490 					req_cnt[child] = 0;
491 				}
492 			}
493 			/* Save the parent count to be requested */
494 			req_cnt[parent] = combined_cnt;
495 #if (TF_RM_DEBUG == 1)
496 			printf("%s calculated total:%d\n\n",
497 			       type_str, req_cnt[parent]);
498 #endif /* (TF_RM_DEBUG == 1) */
499 		}
500 	}
501 	return 0;
502 }
503 
504 int
505 tf_rm_create_db(struct tf *tfp,
506 		struct tf_rm_create_db_parms *parms)
507 {
508 	int rc;
509 	struct tf_session *tfs;
510 	struct tf_dev_info *dev;
511 	int i, j;
512 	uint16_t max_types, hcapi_items, *req_cnt;
513 	struct tfp_calloc_parms cparms;
514 	struct tf_rm_resc_req_entry *query;
515 	enum tf_rm_resc_resv_strategy resv_strategy;
516 	struct tf_rm_resc_req_entry *req;
517 	struct tf_rm_resc_entry *resv;
518 	struct tf_rm_new_db *rm_db;
519 	struct tf_rm_element *db;
520 	uint32_t pool_size;
521 	bool shared_session = 0;
522 
523 	TF_CHECK_PARMS2(tfp, parms);
524 
525 	/* Retrieve the session information */
526 	rc = tf_session_get_session_internal(tfp, &tfs);
527 	if (rc)
528 		return rc;
529 
530 	/* Retrieve device information */
531 	rc = tf_session_get_device(tfs, &dev);
532 	if (rc)
533 		return rc;
534 
535 	/* Need device max number of elements for the RM QCAPS */
536 	rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
537 
538 	/* Allocate memory for RM QCAPS request */
539 	cparms.nitems = max_types;
540 	cparms.size = sizeof(struct tf_rm_resc_req_entry);
541 	cparms.alignment = 0;
542 	rc = tfp_calloc(&cparms);
543 	if (rc)
544 		return rc;
545 
546 	query = (struct tf_rm_resc_req_entry *)cparms.mem_va;
547 
548 	/* Get Firmware Capabilities */
549 	rc = tf_msg_session_resc_qcaps(tfp,
550 				       dev,
551 				       parms->dir,
552 				       max_types,
553 				       query,
554 				       &resv_strategy,
555 				       NULL);
556 	if (rc)
557 		return rc;
558 
559 	/* Copy requested counts (alloc_cnt) from tf_open_session() to local
560 	 * copy (req_cnt) so that it can be updated if required.
561 	 */
562 
563 	cparms.nitems = parms->num_elements;
564 	cparms.size = sizeof(uint16_t);
565 	rc = tfp_calloc(&cparms);
566 	if (rc)
567 		return rc;
568 
569 	req_cnt = (uint16_t *)cparms.mem_va;
570 
571 	tfp_memcpy(req_cnt, parms->alloc_cnt,
572 		   parms->num_elements * sizeof(uint16_t));
573 
574 	shared_session = tf_session_is_shared_session(tfs);
575 
576 	/* Update the req_cnt based upon the element configuration
577 	 */
578 	tf_rm_update_parent_reservations(tfp, dev, parms->cfg,
579 					 parms->alloc_cnt,
580 					 parms->num_elements,
581 					 req_cnt,
582 					 shared_session);
583 
584 	/* Process capabilities against DB requirements. However, as a
585 	 * DB can hold elements that are not HCAPI we can reduce the
586 	 * req msg content by removing those out of the request yet
587 	 * the DB holds them all as to give a fast lookup. We can also
588 	 * remove entries where there are no request for elements.
589 	 */
590 	tf_rm_count_hcapi_reservations(parms->dir,
591 				       parms->module,
592 				       parms->cfg,
593 				       req_cnt,
594 				       parms->num_elements,
595 				       &hcapi_items);
596 
597 	if (hcapi_items == 0) {
598 #if (TF_RM_DEBUG == 1)
599 		TFP_DRV_LOG(INFO,
600 			"%s: module: %s Empty RM DB create request\n",
601 			tf_dir_2_str(parms->dir),
602 			tf_module_2_str(parms->module));
603 #endif
604 		parms->rm_db = NULL;
605 		return -ENOMEM;
606 	}
607 
608 	/* Alloc request, alignment already set */
609 	cparms.nitems = (size_t)hcapi_items;
610 	cparms.size = sizeof(struct tf_rm_resc_req_entry);
611 	rc = tfp_calloc(&cparms);
612 	if (rc)
613 		return rc;
614 	req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
615 
616 	/* Alloc reservation, alignment and nitems already set */
617 	cparms.size = sizeof(struct tf_rm_resc_entry);
618 	rc = tfp_calloc(&cparms);
619 	if (rc)
620 		return rc;
621 	resv = (struct tf_rm_resc_entry *)cparms.mem_va;
622 
623 	/* Build the request */
624 	for (i = 0, j = 0; i < parms->num_elements; i++) {
625 		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
626 		uint16_t hcapi_type = cfg->hcapi_type;
627 
628 		/* Only perform reservation for requested entries
629 		 */
630 		if (req_cnt[i] == 0)
631 			continue;
632 
633 		/* Skip any children in the request */
634 		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
635 		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
636 		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
637 
638 			/* Verify that we can get the full amount per qcaps.
639 			 */
640 			if (req_cnt[i] <= query[hcapi_type].max) {
641 				req[j].type = hcapi_type;
642 				req[j].min = req_cnt[i];
643 				req[j].max = req_cnt[i];
644 				j++;
645 			} else {
646 				const char *type_str;
647 
648 				dev->ops->tf_dev_get_resource_str(tfp,
649 							      hcapi_type,
650 							      &type_str);
651 				TFP_DRV_LOG(ERR,
652 					"Failure, %s:%d:%s req:%d avail:%d\n",
653 					tf_dir_2_str(parms->dir),
654 					hcapi_type, type_str,
655 					req_cnt[i],
656 					query[hcapi_type].max);
657 				return -EINVAL;
658 			}
659 		}
660 	}
661 
662 	/* Allocate all resources for the module type
663 	 */
664 	rc = tf_msg_session_resc_alloc(tfp,
665 				       dev,
666 				       parms->dir,
667 				       hcapi_items,
668 				       req,
669 				       resv);
670 	if (rc)
671 		return rc;
672 
673 	/* Build the RM DB per the request */
674 	cparms.nitems = 1;
675 	cparms.size = sizeof(struct tf_rm_new_db);
676 	rc = tfp_calloc(&cparms);
677 	if (rc)
678 		return rc;
679 	rm_db = (void *)cparms.mem_va;
680 
681 	/* Build the DB within RM DB */
682 	cparms.nitems = parms->num_elements;
683 	cparms.size = sizeof(struct tf_rm_element);
684 	rc = tfp_calloc(&cparms);
685 	if (rc)
686 		return rc;
687 	rm_db->db = (struct tf_rm_element *)cparms.mem_va;
688 
689 	db = rm_db->db;
690 	for (i = 0, j = 0; i < parms->num_elements; i++) {
691 		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
692 		const char *type_str;
693 
694 		dev->ops->tf_dev_get_resource_str(tfp,
695 						  cfg->hcapi_type,
696 						  &type_str);
697 
698 		db[i].cfg_type = cfg->cfg_type;
699 		db[i].hcapi_type = cfg->hcapi_type;
700 		db[i].slices = cfg->slices;
701 
702 		/* Save the parent subtype for later use to find the pool
703 		 */
704 		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
705 			db[i].parent_subtype = cfg->parent_subtype;
706 
707 		/* If the element didn't request an allocation no need
708 		 * to create a pool nor verify if we got a reservation.
709 		 */
710 		if (req_cnt[i] == 0)
711 			continue;
712 
713 		/* Skip any children or invalid
714 		 */
715 		if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
716 		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
717 		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
718 			continue;
719 
720 		/* If the element had requested an allocation and that
721 		 * allocation was a success (full amount) then
722 		 * allocate the pool.
723 		 */
724 		if (req_cnt[i] == resv[j].stride) {
725 			db[i].alloc.entry.start = resv[j].start;
726 			db[i].alloc.entry.stride = resv[j].stride;
727 
728 			/* Only allocate BA pool if a BA type not a child */
729 			if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
730 			    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
731 				/* Create pool */
732 				pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
733 					     sizeof(struct bitalloc));
734 				/* Alloc request, alignment already set */
735 				cparms.nitems = pool_size;
736 				cparms.size = sizeof(struct bitalloc);
737 				rc = tfp_calloc(&cparms);
738 				if (rc) {
739 					TFP_DRV_LOG(ERR,
740 					 "%s: Pool alloc failed, type:%d:%s\n",
741 					 tf_dir_2_str(parms->dir),
742 					 cfg->hcapi_type, type_str);
743 					goto fail;
744 				}
745 				db[i].pool = (struct bitalloc *)cparms.mem_va;
746 
747 				rc = ba_init(db[i].pool,
748 					     resv[j].stride,
749 					     !tf_session_is_shared_session(tfs));
750 				if (rc) {
751 					TFP_DRV_LOG(ERR,
752 					  "%s: Pool init failed, type:%d:%s\n",
753 					  tf_dir_2_str(parms->dir),
754 					  cfg->hcapi_type, type_str);
755 					goto fail;
756 				}
757 			}
758 			j++;
759 		} else {
760 			/* Bail out as we want what we requested for
761 			 * all elements, not any less.
762 			 */
763 			TFP_DRV_LOG(ERR,
764 				    "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
765 				    tf_dir_2_str(parms->dir), cfg->hcapi_type,
766 				    type_str, req_cnt[i], resv[j].stride);
767 			goto fail;
768 		}
769 	}
770 
771 	rm_db->num_entries = parms->num_elements;
772 	rm_db->dir = parms->dir;
773 	rm_db->module = parms->module;
774 	*parms->rm_db = (void *)rm_db;
775 
776 #if (TF_RM_DEBUG == 1)
777 
778 	printf("%s: module:%s\n",
779 	       tf_dir_2_str(parms->dir),
780 	       tf_module_2_str(parms->module));
781 #endif /* (TF_RM_DEBUG == 1) */
782 
783 	tfp_free((void *)req);
784 	tfp_free((void *)resv);
785 	tfp_free((void *)req_cnt);
786 	return 0;
787 
788  fail:
789 	tfp_free((void *)req);
790 	tfp_free((void *)resv);
791 	tfp_free((void *)db->pool);
792 	tfp_free((void *)db);
793 	tfp_free((void *)rm_db);
794 	tfp_free((void *)req_cnt);
795 	parms->rm_db = NULL;
796 
797 	return -EINVAL;
798 }
799 
800 int
801 tf_rm_create_db_no_reservation(struct tf *tfp,
802 			       struct tf_rm_create_db_parms *parms)
803 {
804 	int rc;
805 	struct tf_session *tfs;
806 	struct tf_dev_info *dev;
807 	int i, j;
808 	uint16_t hcapi_items, *req_cnt;
809 	struct tfp_calloc_parms cparms;
810 	struct tf_rm_resc_req_entry *req;
811 	struct tf_rm_resc_entry *resv;
812 	struct tf_rm_new_db *rm_db;
813 	struct tf_rm_element *db;
814 	uint32_t pool_size;
815 
816 	TF_CHECK_PARMS2(tfp, parms);
817 
818 	/* Retrieve the session information */
819 	rc = tf_session_get_session_internal(tfp, &tfs);
820 	if (rc)
821 		return rc;
822 
823 	/* Retrieve device information */
824 	rc = tf_session_get_device(tfs, &dev);
825 	if (rc)
826 		return rc;
827 
828 	/* Copy requested counts (alloc_cnt) from tf_open_session() to local
829 	 * copy (req_cnt) so that it can be updated if required.
830 	 */
831 
832 	cparms.nitems = parms->num_elements;
833 	cparms.size = sizeof(uint16_t);
834 	cparms.alignment = 0;
835 	rc = tfp_calloc(&cparms);
836 	if (rc)
837 		return rc;
838 
839 	req_cnt = (uint16_t *)cparms.mem_va;
840 
841 	tfp_memcpy(req_cnt, parms->alloc_cnt,
842 		   parms->num_elements * sizeof(uint16_t));
843 
844 	/* Process capabilities against DB requirements. However, as a
845 	 * DB can hold elements that are not HCAPI we can reduce the
846 	 * req msg content by removing those out of the request yet
847 	 * the DB holds them all as to give a fast lookup. We can also
848 	 * remove entries where there are no request for elements.
849 	 */
850 	tf_rm_count_hcapi_reservations(parms->dir,
851 				       parms->module,
852 				       parms->cfg,
853 				       req_cnt,
854 				       parms->num_elements,
855 				       &hcapi_items);
856 
857 	if (hcapi_items == 0) {
858 		TFP_DRV_LOG(ERR,
859 			"%s: module:%s Empty RM DB create request\n",
860 			tf_dir_2_str(parms->dir),
861 			tf_module_2_str(parms->module));
862 
863 		parms->rm_db = NULL;
864 		return -ENOMEM;
865 	}
866 
867 	/* Alloc request, alignment already set */
868 	cparms.nitems = (size_t)hcapi_items;
869 	cparms.size = sizeof(struct tf_rm_resc_req_entry);
870 	rc = tfp_calloc(&cparms);
871 	if (rc)
872 		return rc;
873 	req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
874 
875 	/* Alloc reservation, alignment and nitems already set */
876 	cparms.size = sizeof(struct tf_rm_resc_entry);
877 	rc = tfp_calloc(&cparms);
878 	if (rc)
879 		return rc;
880 	resv = (struct tf_rm_resc_entry *)cparms.mem_va;
881 
882 	/* Build the request */
883 	for (i = 0, j = 0; i < parms->num_elements; i++) {
884 		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
885 		uint16_t hcapi_type = cfg->hcapi_type;
886 
887 		/* Only perform reservation for requested entries
888 		 */
889 		if (req_cnt[i] == 0)
890 			continue;
891 
892 		/* Skip any children in the request */
893 		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
894 		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
895 		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
896 			req[j].type = hcapi_type;
897 			req[j].min = req_cnt[i];
898 			req[j].max = req_cnt[i];
899 			j++;
900 		}
901 	}
902 
903 	/* Get all resources info for the module type
904 	 */
905 	rc = tf_msg_session_resc_info(tfp,
906 				      dev,
907 				      parms->dir,
908 				      hcapi_items,
909 				      req,
910 				      resv);
911 	if (rc)
912 		return rc;
913 
914 	/* Build the RM DB per the request */
915 	cparms.nitems = 1;
916 	cparms.size = sizeof(struct tf_rm_new_db);
917 	rc = tfp_calloc(&cparms);
918 	if (rc)
919 		return rc;
920 	rm_db = (void *)cparms.mem_va;
921 
922 	/* Build the DB within RM DB */
923 	cparms.nitems = parms->num_elements;
924 	cparms.size = sizeof(struct tf_rm_element);
925 	rc = tfp_calloc(&cparms);
926 	if (rc)
927 		return rc;
928 	rm_db->db = (struct tf_rm_element *)cparms.mem_va;
929 
930 	db = rm_db->db;
931 	for (i = 0, j = 0; i < parms->num_elements; i++) {
932 		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
933 		const char *type_str;
934 
935 		dev->ops->tf_dev_get_resource_str(tfp,
936 						  cfg->hcapi_type,
937 						  &type_str);
938 
939 		db[i].cfg_type = cfg->cfg_type;
940 		db[i].hcapi_type = cfg->hcapi_type;
941 
942 		/* Save the parent subtype for later use to find the pool
943 		 */
944 		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
945 			db[i].parent_subtype = cfg->parent_subtype;
946 
947 		/* If the element didn't request an allocation no need
948 		 * to create a pool nor verify if we got a reservation.
949 		 */
950 		if (req_cnt[i] == 0)
951 			continue;
952 
953 		/* Skip any children or invalid
954 		 */
955 		if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
956 		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
957 		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
958 			continue;
959 
960 		/* If the element had requested an allocation and that
961 		 * allocation was a success (full amount) then
962 		 * allocate the pool.
963 		 */
964 		if (req_cnt[i] == resv[j].stride) {
965 			db[i].alloc.entry.start = resv[j].start;
966 			db[i].alloc.entry.stride = resv[j].stride;
967 
968 			/* Only allocate BA pool if a BA type not a child */
969 			if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
970 			    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
971 				/* Create pool */
972 				pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
973 					     sizeof(struct bitalloc));
974 				/* Alloc request, alignment already set */
975 				cparms.nitems = pool_size;
976 				cparms.size = sizeof(struct bitalloc);
977 				rc = tfp_calloc(&cparms);
978 				if (rc) {
979 					TFP_DRV_LOG(ERR,
980 					 "%s: Pool alloc failed, type:%d:%s\n",
981 					 tf_dir_2_str(parms->dir),
982 					 cfg->hcapi_type, type_str);
983 					goto fail;
984 				}
985 				db[i].pool = (struct bitalloc *)cparms.mem_va;
986 
987 				rc = ba_init(db[i].pool,
988 					     resv[j].stride,
989 					     !tf_session_is_shared_session(tfs));
990 				if (rc) {
991 					TFP_DRV_LOG(ERR,
992 					  "%s: Pool init failed, type:%d:%s\n",
993 					  tf_dir_2_str(parms->dir),
994 					  cfg->hcapi_type, type_str);
995 					goto fail;
996 				}
997 			}
998 			j++;
999 		} else {
1000 			/* Bail out as we want what we requested for
1001 			 * all elements, not any less.
1002 			 */
1003 			TFP_DRV_LOG(ERR,
1004 				    "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
1005 				    tf_dir_2_str(parms->dir), cfg->hcapi_type,
1006 				    type_str, req_cnt[i], resv[j].stride);
1007 			goto fail;
1008 		}
1009 	}
1010 
1011 	rm_db->num_entries = parms->num_elements;
1012 	rm_db->dir = parms->dir;
1013 	rm_db->module = parms->module;
1014 	*parms->rm_db = (void *)rm_db;
1015 
1016 #if (TF_RM_DEBUG == 1)
1017 
1018 	printf("%s: module:%s\n",
1019 	       tf_dir_2_str(parms->dir),
1020 	       tf_module_2_str(parms->module));
1021 #endif /* (TF_RM_DEBUG == 1) */
1022 
1023 	tfp_free((void *)req);
1024 	tfp_free((void *)resv);
1025 	tfp_free((void *)req_cnt);
1026 	return 0;
1027 
1028  fail:
1029 	tfp_free((void *)req);
1030 	tfp_free((void *)resv);
1031 	tfp_free((void *)db->pool);
1032 	tfp_free((void *)db);
1033 	tfp_free((void *)rm_db);
1034 	tfp_free((void *)req_cnt);
1035 	parms->rm_db = NULL;
1036 
1037 	return -EINVAL;
1038 }
1039 int
1040 tf_rm_free_db(struct tf *tfp,
1041 	      struct tf_rm_free_db_parms *parms)
1042 {
1043 	int rc;
1044 	int i;
1045 	uint16_t resv_size = 0;
1046 	struct tf_rm_new_db *rm_db;
1047 	struct tf_rm_resc_entry *resv;
1048 	bool residuals_found = false;
1049 
1050 	TF_CHECK_PARMS2(parms, parms->rm_db);
1051 
1052 	/* Device unbind happens when the TF Session is closed and the
1053 	 * session ref count is 0. Device unbind will cleanup each of
1054 	 * its support modules, i.e. Identifier, thus we're ending up
1055 	 * here to close the DB.
1056 	 *
1057 	 * On TF Session close it is assumed that the session has already
1058 	 * cleaned up all its resources, individually, while
1059 	 * destroying its flows.
1060 	 *
1061 	 * To assist in the 'cleanup checking' the DB is checked for any
1062 	 * remaining elements and logged if found to be the case.
1063 	 *
1064 	 * Any such elements will need to be 'cleared' ahead of
1065 	 * returning the resources to the HCAPI RM.
1066 	 *
1067 	 * RM will signal FW to flush the DB resources. FW will
1068 	 * perform the invalidation. TF Session close will return the
1069 	 * previous allocated elements to the RM and then close the
1070 	 * HCAPI RM registration. That then saves several 'free' msgs
1071 	 * from being required.
1072 	 */
1073 
1074 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1075 
1076 	/* Check for residuals that the client didn't clean up */
1077 	rc = tf_rm_check_residuals(rm_db,
1078 				   &resv_size,
1079 				   &resv,
1080 				   &residuals_found);
1081 	if (rc)
1082 		return rc;
1083 
1084 	/* Invalidate any residuals followed by a DB traversal for
1085 	 * pool cleanup.
1086 	 */
1087 	if (residuals_found) {
1088 		rc = tf_msg_session_resc_flush(tfp,
1089 					       parms->dir,
1090 					       resv_size,
1091 					       resv);
1092 		tfp_free((void *)resv);
1093 		/* On failure we still have to cleanup so we can only
1094 		 * log that FW failed.
1095 		 */
1096 		if (rc)
1097 			TFP_DRV_LOG(ERR,
1098 				    "%s: Internal Flush error, module:%s\n",
1099 				    tf_dir_2_str(parms->dir),
1100 				    tf_module_2_str(rm_db->module));
1101 	}
1102 
1103 	/* No need to check for configuration type, even if we do not
1104 	 * have a BA pool we just delete on a null ptr, no harm
1105 	 */
1106 	for (i = 0; i < rm_db->num_entries; i++)
1107 		tfp_free((void *)rm_db->db[i].pool);
1108 
1109 	tfp_free((void *)parms->rm_db);
1110 
1111 	return rc;
1112 }
1113 /**
1114  * Get the bit allocator pool associated with the subtype and the db
1115  *
1116  * [in] rm_db
1117  *   Pointer to the DB
1118  *
1119  * [in] subtype
1120  *   Module subtype used to index into the module specific database.
1121  *   An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
1122  *   module subtype of TF_MODULE_TYPE_TABLE.
1123  *
1124  * [in/out] pool
1125  *   Pointer to the bit allocator pool used
1126  *
1127  * [in/out] new_subtype
1128  *   Pointer to the subtype of the actual pool used
1129  * Returns:
1130  *     0          - Success
1131  *   - ENOTSUP    - Operation not supported
1132  */
1133 static int
1134 tf_rm_get_pool(struct tf_rm_new_db *rm_db,
1135 	       uint16_t subtype,
1136 	       struct bitalloc **pool,
1137 	       uint16_t *new_subtype)
1138 {
1139 	int rc = 0;
1140 	uint16_t tmp_subtype = subtype;
1141 
1142 	/* If we are a child, get the parent table index */
1143 	if (rm_db->db[subtype].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1144 		tmp_subtype = rm_db->db[subtype].parent_subtype;
1145 
1146 	*pool = rm_db->db[tmp_subtype].pool;
1147 
1148 	/* Bail out if the pool is not valid, should never happen */
1149 	if (rm_db->db[tmp_subtype].pool == NULL) {
1150 		rc = -ENOTSUP;
1151 		TFP_DRV_LOG(ERR,
1152 			    "%s: Invalid pool for this type:%d, rc:%s\n",
1153 			    tf_dir_2_str(rm_db->dir),
1154 			    tmp_subtype,
1155 			    strerror(-rc));
1156 		return rc;
1157 	}
1158 	*new_subtype = tmp_subtype;
1159 	return rc;
1160 }
1161 
1162 int
1163 tf_rm_allocate(struct tf_rm_allocate_parms *parms)
1164 {
1165 	int rc;
1166 	int id;
1167 	uint32_t index;
1168 	struct tf_rm_new_db *rm_db;
1169 	enum tf_rm_elem_cfg_type cfg_type;
1170 	struct bitalloc *pool;
1171 	uint16_t subtype;
1172 
1173 	TF_CHECK_PARMS2(parms, parms->rm_db);
1174 
1175 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1176 	TF_CHECK_PARMS1(rm_db->db);
1177 
1178 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1179 
1180 	/* Bail out if not controlled by RM */
1181 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1182 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1183 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1184 		return -ENOTSUP;
1185 
1186 	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1187 	if (rc)
1188 		return rc;
1189 	/*
1190 	 * priority  0: allocate from top of the tcam i.e. high
1191 	 * priority !0: allocate index from bottom i.e lowest
1192 	 */
1193 	if (parms->priority)
1194 		id = ba_alloc_reverse(pool);
1195 	else
1196 		id = ba_alloc(pool);
1197 	if (id == BA_FAIL) {
1198 		rc = -ENOMEM;
1199 		TFP_DRV_LOG(ERR,
1200 			    "%s: Allocation failed, rc:%s\n",
1201 			    tf_dir_2_str(rm_db->dir),
1202 			    strerror(-rc));
1203 		return rc;
1204 	}
1205 
1206 	/* Adjust for any non zero start value */
1207 	rc = tf_rm_adjust_index(rm_db->db,
1208 				TF_RM_ADJUST_ADD_BASE,
1209 				subtype,
1210 				id,
1211 				&index);
1212 	if (rc) {
1213 		TFP_DRV_LOG(ERR,
1214 			    "%s: Alloc adjust of base index failed, rc:%s\n",
1215 			    tf_dir_2_str(rm_db->dir),
1216 			    strerror(-rc));
1217 		return -EINVAL;
1218 	}
1219 
1220 	*parms->index = index;
1221 	if (parms->base_index)
1222 		*parms->base_index = id;
1223 
1224 	return rc;
1225 }
1226 
1227 int
1228 tf_rm_free(struct tf_rm_free_parms *parms)
1229 {
1230 	int rc;
1231 	uint32_t adj_index;
1232 	struct tf_rm_new_db *rm_db;
1233 	enum tf_rm_elem_cfg_type cfg_type;
1234 	struct bitalloc *pool;
1235 	uint16_t subtype;
1236 
1237 	TF_CHECK_PARMS2(parms, parms->rm_db);
1238 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1239 	TF_CHECK_PARMS1(rm_db->db);
1240 
1241 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1242 
1243 	/* Bail out if not controlled by RM */
1244 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1245 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1246 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1247 		return -ENOTSUP;
1248 
1249 	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1250 	if (rc)
1251 		return rc;
1252 
1253 	/* Adjust for any non zero start value */
1254 	rc = tf_rm_adjust_index(rm_db->db,
1255 				TF_RM_ADJUST_RM_BASE,
1256 				subtype,
1257 				parms->index,
1258 				&adj_index);
1259 	if (rc)
1260 		return rc;
1261 
1262 	rc = ba_free(pool, adj_index);
1263 	/* No logging direction matters and that is not available here */
1264 	if (rc)
1265 		return rc;
1266 
1267 	return rc;
1268 }
1269 
1270 int
1271 tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
1272 {
1273 	int rc;
1274 	uint32_t adj_index;
1275 	struct tf_rm_new_db *rm_db;
1276 	enum tf_rm_elem_cfg_type cfg_type;
1277 	struct bitalloc *pool;
1278 	uint16_t subtype;
1279 
1280 	TF_CHECK_PARMS2(parms, parms->rm_db);
1281 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1282 	TF_CHECK_PARMS1(rm_db->db);
1283 
1284 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1285 
1286 	/* Bail out if not controlled by RM */
1287 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1288 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1289 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1290 		return -ENOTSUP;
1291 
1292 	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1293 	if (rc)
1294 		return rc;
1295 
1296 	/* Adjust for any non zero start value */
1297 	rc = tf_rm_adjust_index(rm_db->db,
1298 				TF_RM_ADJUST_RM_BASE,
1299 				subtype,
1300 				parms->index,
1301 				&adj_index);
1302 	if (rc)
1303 		return rc;
1304 
1305 	if (parms->base_index)
1306 		*parms->base_index = adj_index;
1307 	*parms->allocated = ba_inuse(pool, adj_index);
1308 
1309 	return rc;
1310 }
1311 
1312 int
1313 tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
1314 {
1315 	struct tf_rm_new_db *rm_db;
1316 	enum tf_rm_elem_cfg_type cfg_type;
1317 
1318 	TF_CHECK_PARMS2(parms, parms->rm_db);
1319 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1320 	TF_CHECK_PARMS1(rm_db->db);
1321 
1322 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1323 
1324 	/* Bail out if not controlled by HCAPI */
1325 	if (cfg_type == TF_RM_ELEM_CFG_NULL)
1326 		return -ENOTSUP;
1327 
1328 	memcpy(parms->info,
1329 	       &rm_db->db[parms->subtype].alloc,
1330 	       sizeof(struct tf_rm_alloc_info));
1331 
1332 	return 0;
1333 }
1334 
1335 int
1336 tf_rm_get_all_info(struct tf_rm_get_alloc_info_parms *parms, int size)
1337 {
1338 	struct tf_rm_new_db *rm_db;
1339 	enum tf_rm_elem_cfg_type cfg_type;
1340 	struct tf_rm_alloc_info *info = parms->info;
1341 	int i;
1342 
1343 	TF_CHECK_PARMS1(parms);
1344 
1345 	/* No rm info available for this module type
1346 	 */
1347 	if (!parms->rm_db)
1348 		return -ENOMEM;
1349 
1350 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1351 	TF_CHECK_PARMS1(rm_db->db);
1352 
1353 	for (i = 0; i < size; i++) {
1354 		cfg_type = rm_db->db[i].cfg_type;
1355 
1356 		/* Bail out if not controlled by HCAPI */
1357 		if (cfg_type == TF_RM_ELEM_CFG_NULL) {
1358 			info++;
1359 			continue;
1360 		}
1361 
1362 		memcpy(info,
1363 		       &rm_db->db[i].alloc,
1364 		       sizeof(struct tf_rm_alloc_info));
1365 		info++;
1366 	}
1367 
1368 	return 0;
1369 }
1370 
1371 int
1372 tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
1373 {
1374 	struct tf_rm_new_db *rm_db;
1375 	enum tf_rm_elem_cfg_type cfg_type;
1376 
1377 	TF_CHECK_PARMS2(parms, parms->rm_db);
1378 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1379 	TF_CHECK_PARMS1(rm_db->db);
1380 
1381 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1382 
1383 	/* Bail out if not controlled by HCAPI */
1384 	if (cfg_type == TF_RM_ELEM_CFG_NULL)
1385 		return -ENOTSUP;
1386 
1387 	*parms->hcapi_type = rm_db->db[parms->subtype].hcapi_type;
1388 
1389 	return 0;
1390 }
1391 int
1392 tf_rm_get_slices(struct tf_rm_get_slices_parms *parms)
1393 {
1394 	struct tf_rm_new_db *rm_db;
1395 	enum tf_rm_elem_cfg_type cfg_type;
1396 
1397 	TF_CHECK_PARMS2(parms, parms->rm_db);
1398 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1399 	TF_CHECK_PARMS1(rm_db->db);
1400 
1401 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1402 
1403 	/* Bail out if not controlled by HCAPI */
1404 	if (cfg_type == TF_RM_ELEM_CFG_NULL)
1405 		return -ENOTSUP;
1406 
1407 	*parms->slices = rm_db->db[parms->subtype].slices;
1408 
1409 	return 0;
1410 }
1411 
1412 int
1413 tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
1414 {
1415 	int rc = 0;
1416 	struct tf_rm_new_db *rm_db;
1417 	enum tf_rm_elem_cfg_type cfg_type;
1418 
1419 	TF_CHECK_PARMS2(parms, parms->rm_db);
1420 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1421 	TF_CHECK_PARMS1(rm_db->db);
1422 
1423 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1424 
1425 	/* Bail out if not a BA pool */
1426 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1427 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1428 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1429 		return -ENOTSUP;
1430 
1431 	/* Bail silently (no logging), if the pool is not valid there
1432 	 * was no elements allocated for it.
1433 	 */
1434 	if (rm_db->db[parms->subtype].pool == NULL) {
1435 		*parms->count = 0;
1436 		return 0;
1437 	}
1438 
1439 	*parms->count = ba_inuse_count(rm_db->db[parms->subtype].pool);
1440 
1441 	return rc;
1442 }
1443 /* Only used for table bulk get at this time
1444  */
1445 int
1446 tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms)
1447 {
1448 	struct tf_rm_new_db *rm_db;
1449 	enum tf_rm_elem_cfg_type cfg_type;
1450 	uint32_t base_index;
1451 	uint32_t stride;
1452 	int rc = 0;
1453 	struct bitalloc *pool;
1454 	uint16_t subtype;
1455 
1456 	TF_CHECK_PARMS2(parms, parms->rm_db);
1457 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1458 	TF_CHECK_PARMS1(rm_db->db);
1459 
1460 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1461 
1462 	/* Bail out if not a BA pool */
1463 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1464 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1465 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1466 		return -ENOTSUP;
1467 
1468 	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1469 	if (rc)
1470 		return rc;
1471 
1472 	base_index = rm_db->db[subtype].alloc.entry.start;
1473 	stride = rm_db->db[subtype].alloc.entry.stride;
1474 
1475 	if (parms->starting_index < base_index ||
1476 	    parms->starting_index + parms->num_entries > base_index + stride)
1477 		return -EINVAL;
1478 
1479 	return rc;
1480 }
1481