xref: /dpdk/drivers/net/bnxt/tf_core/tf_rm.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <string.h>
7 
8 #include <rte_common.h>
9 #include <rte_debug.h>
10 
11 #include <cfa_resource_types.h>
12 
13 #include "tf_rm.h"
14 #include "tf_common.h"
15 #include "tf_util.h"
16 #include "tf_session.h"
17 #include "tf_device.h"
18 #include "tfp.h"
19 #include "tf_msg.h"
20 
21 /* Logging defines */
22 #define TF_RM_DEBUG  0
23 
24 /**
25  * Generic RM Element data type that an RM DB is build upon.
26  */
27 struct tf_rm_element {
28 	/**
29 	 * RM Element configuration type. If Private then the
30 	 * hcapi_type can be ignored. If Null then the element is not
31 	 * valid for the device.
32 	 */
33 	enum tf_rm_elem_cfg_type cfg_type;
34 
35 	/**
36 	 * HCAPI RM Type for the element.
37 	 */
38 	uint16_t hcapi_type;
39 
40 	/**
41 	 * Resource slices.  How many slices will fit in the
42 	 * resource pool chunk size.
43 	 */
44 	uint8_t slices;
45 
46 	/**
47 	 * HCAPI RM allocated range information for the element.
48 	 */
49 	struct tf_rm_alloc_info alloc;
50 
51 	/**
52 	 * If cfg_type == HCAPI_BA_CHILD, this field indicates
53 	 * the parent module subtype for look up into the parent pool.
54 	 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
55 	 * module subtype of TF_MODULE_TYPE_TABLE.
56 	 */
57 	uint16_t parent_subtype;
58 
59 	/**
60 	 * Bit allocator pool for the element. Pool size is controlled
61 	 * by the struct tf_session_resources at time of session creation.
62 	 * Null indicates that the pool is not used for the element.
63 	 */
64 	struct bitalloc *pool;
65 };
66 
67 /**
68  * TF RM DB definition
69  */
70 struct tf_rm_new_db {
71 	/**
72 	 * Number of elements in the DB
73 	 */
74 	uint16_t num_entries;
75 
76 	/**
77 	 * Direction this DB controls.
78 	 */
79 	enum tf_dir dir;
80 
81 	/**
82 	 * Module type, used for logging purposes.
83 	 */
84 	enum tf_module_type module;
85 
86 	/**
87 	 * The DB consists of an array of elements
88 	 */
89 	struct tf_rm_element *db;
90 };
91 
92 /**
93  * Adjust an index according to the allocation information.
94  *
95  * All resources are controlled in a 0 based pool. Some resources, by
96  * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
97  * need to be adjusted before they are handed out.
98  *
99  * [in] cfg
100  *   Pointer to the DB configuration
101  *
102  * [in] reservations
103  *   Pointer to the allocation values associated with the module
104  *
105  * [in] count
106  *   Number of DB configuration elements
107  *
108  * [out] valid_count
109  *   Number of HCAPI entries with a reservation value greater than 0
110  *
111  * Returns:
112  *     0          - Success
113  *   - EOPNOTSUPP - Operation not supported
114  */
115 static void
116 tf_rm_count_hcapi_reservations(enum tf_dir dir,
117 			       enum tf_module_type module,
118 			       struct tf_rm_element_cfg *cfg,
119 			       uint16_t *reservations,
120 			       uint16_t count,
121 			       uint16_t *valid_count)
122 {
123 	int i;
124 	uint16_t cnt = 0;
125 
126 	for (i = 0; i < count; i++) {
127 		if (cfg[i].cfg_type != TF_RM_ELEM_CFG_NULL &&
128 		    reservations[i] > 0)
129 			cnt++;
130 
131 		/* Only log msg if a type is attempted reserved and
132 		 * not supported. We ignore EM module as its using a
133 		 * split configuration array thus it would fail for
134 		 * this type of check.
135 		 */
136 		if (module != TF_MODULE_TYPE_EM &&
137 		    cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL &&
138 		    reservations[i] > 0) {
139 			TFP_DRV_LOG(ERR,
140 				"%s, %s, %s allocation of %d not supported\n",
141 				tf_module_2_str(module),
142 				tf_dir_2_str(dir),
143 				tf_module_subtype_2_str(module, i),
144 				reservations[i]);
145 		}
146 	}
147 
148 	*valid_count = cnt;
149 }
150 
151 /**
152  * Resource Manager Adjust of base index definitions.
153  */
154 enum tf_rm_adjust_type {
155 	TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */
156 	TF_RM_ADJUST_RM_BASE   /**< Removes base from the index */
157 };
158 
159 /**
160  * Adjust an index according to the allocation information.
161  *
162  * All resources are controlled in a 0 based pool. Some resources, by
163  * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
164  * need to be adjusted before they are handed out.
165  *
166  * [in] db
167  *   Pointer to the db, used for the lookup
168  *
169  * [in] action
170  *   Adjust action
171  *
172  * [in] subtype
173  *   TF module subtype used as an index into the database.
174  *   An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
175  *   module subtype of TF_MODULE_TYPE_TABLE.
176  *
177  * [in] index
178  *   Index to convert
179  *
180  * [out] adj_index
181  *   Adjusted index
182  *
183  * Returns:
184  *     0          - Success
185  *   - EOPNOTSUPP - Operation not supported
186  */
187 static int
188 tf_rm_adjust_index(struct tf_rm_element *db,
189 		   enum tf_rm_adjust_type action,
190 		   uint32_t subtype,
191 		   uint32_t index,
192 		   uint32_t *adj_index)
193 {
194 	int rc = 0;
195 	uint32_t base_index;
196 
197 	base_index = db[subtype].alloc.entry.start;
198 
199 	switch (action) {
200 	case TF_RM_ADJUST_RM_BASE:
201 		*adj_index = index - base_index;
202 		break;
203 	case TF_RM_ADJUST_ADD_BASE:
204 		*adj_index = index + base_index;
205 		break;
206 	default:
207 		return -EOPNOTSUPP;
208 	}
209 
210 	return rc;
211 }
212 
213 /**
214  * Logs an array of found residual entries to the console.
215  *
216  * [in] dir
217  *   Receive or transmit direction
218  *
219  * [in] module
220  *   Type of Device Module
221  *
222  * [in] count
223  *   Number of entries in the residual array
224  *
225  * [in] residuals
226  *   Pointer to an array of residual entries. Array is index same as
227  *   the DB in which this function is used. Each entry holds residual
228  *   value for that entry.
229  */
230 #if (TF_RM_DEBUG == 1)
231 static void
232 tf_rm_log_residuals(enum tf_dir dir,
233 		    enum tf_module_type module,
234 		    uint16_t count,
235 		    uint16_t *residuals)
236 {
237 	int i;
238 
239 	/* Walk the residual array and log the types that wasn't
240 	 * cleaned up to the console.
241 	 */
242 	for (i = 0; i < count; i++) {
243 		if (residuals[i] != 0)
244 			TFP_DRV_LOG(INFO,
245 				"%s, %s was not cleaned up, %d outstanding\n",
246 				tf_dir_2_str(dir),
247 				tf_module_subtype_2_str(module, i),
248 				residuals[i]);
249 	}
250 }
251 #endif /* TF_RM_DEBUG == 1 */
252 /**
253  * Performs a check of the passed in DB for any lingering elements. If
254  * a resource type was found to not have been cleaned up by the caller
255  * then its residual values are recorded, logged and passed back in an
256  * allocate reservation array that the caller can pass to the FW for
257  * cleanup.
258  *
259  * [in] db
260  *   Pointer to the db, used for the lookup
261  *
262  * [out] resv_size
263  *   Pointer to the reservation size of the generated reservation
264  *   array.
265  *
266  * [in/out] resv
267  *   Pointer Pointer to a reservation array. The reservation array is
268  *   allocated after the residual scan and holds any found residual
269  *   entries. Thus it can be smaller than the DB that the check was
270  *   performed on. Array must be freed by the caller.
271  *
272  * [out] residuals_present
273  *   Pointer to a bool flag indicating if residual was present in the
274  *   DB
275  *
276  * Returns:
277  *     0          - Success
278  *   - EOPNOTSUPP - Operation not supported
279  */
280 static int
281 tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
282 		      uint16_t *resv_size,
283 		      struct tf_rm_resc_entry **resv,
284 		      bool *residuals_present)
285 {
286 	int rc;
287 	int i;
288 	int f;
289 	uint16_t count;
290 	uint16_t found;
291 	uint16_t *residuals = NULL;
292 	uint16_t hcapi_type;
293 	struct tf_rm_get_inuse_count_parms iparms;
294 	struct tf_rm_get_alloc_info_parms aparms;
295 	struct tf_rm_get_hcapi_parms hparms;
296 	struct tf_rm_alloc_info info;
297 	struct tfp_calloc_parms cparms;
298 	struct tf_rm_resc_entry *local_resv = NULL;
299 
300 	/* Create array to hold the entries that have residuals */
301 	cparms.nitems = rm_db->num_entries;
302 	cparms.size = sizeof(uint16_t);
303 	cparms.alignment = 0;
304 	rc = tfp_calloc(&cparms);
305 	if (rc)
306 		return rc;
307 
308 	residuals = (uint16_t *)cparms.mem_va;
309 
310 	/* Traverse the DB and collect any residual elements */
311 	iparms.rm_db = rm_db;
312 	iparms.count = &count;
313 	for (i = 0, found = 0; i < rm_db->num_entries; i++) {
314 		iparms.subtype = i;
315 		rc = tf_rm_get_inuse_count(&iparms);
316 		/* Not a device supported entry, just skip */
317 		if (rc == -ENOTSUP)
318 			continue;
319 		if (rc)
320 			goto cleanup_residuals;
321 
322 		if (count) {
323 			found++;
324 			residuals[i] = count;
325 			*residuals_present = true;
326 		}
327 	}
328 
329 	if (*residuals_present) {
330 		/* Populate a reduced resv array with only the entries
331 		 * that have residuals.
332 		 */
333 		cparms.nitems = found;
334 		cparms.size = sizeof(struct tf_rm_resc_entry);
335 		cparms.alignment = 0;
336 		rc = tfp_calloc(&cparms);
337 		if (rc)
338 			return rc;
339 
340 		local_resv = (struct tf_rm_resc_entry *)cparms.mem_va;
341 
342 		aparms.rm_db = rm_db;
343 		hparms.rm_db = rm_db;
344 		hparms.hcapi_type = &hcapi_type;
345 		for (i = 0, f = 0; i < rm_db->num_entries; i++) {
346 			if (residuals[i] == 0)
347 				continue;
348 			aparms.subtype = i;
349 			aparms.info = &info;
350 			rc = tf_rm_get_info(&aparms);
351 			if (rc)
352 				goto cleanup_all;
353 
354 			hparms.subtype = i;
355 			rc = tf_rm_get_hcapi_type(&hparms);
356 			if (rc)
357 				goto cleanup_all;
358 
359 			local_resv[f].type = hcapi_type;
360 			local_resv[f].start = info.entry.start;
361 			local_resv[f].stride = info.entry.stride;
362 			f++;
363 		}
364 		*resv_size = found;
365 	}
366 
367 #if (TF_RM_DEBUG == 1)
368 	tf_rm_log_residuals(rm_db->dir,
369 			    rm_db->module,
370 			    rm_db->num_entries,
371 			    residuals);
372 #endif
373 	tfp_free((void *)residuals);
374 	*resv = local_resv;
375 
376 	return 0;
377 
378  cleanup_all:
379 	tfp_free((void *)local_resv);
380 	*resv = NULL;
381  cleanup_residuals:
382 	tfp_free((void *)residuals);
383 
384 	return rc;
385 }
386 
387 /**
388  * Some resources do not have a 1:1 mapping between the Truflow type and the cfa
389  * resource type (HCAPI RM).  These resources have multiple Truflow types which
390  * map to a single HCAPI RM type.  In order to support this, one Truflow type
391  * sharing the HCAPI resources is designated the parent.  All other Truflow
392  * types associated with that HCAPI RM type are designated the children.
393  *
394  * This function updates the resource counts of any HCAPI_BA_PARENT with the
395  * counts of the HCAPI_BA_CHILDREN.  These are read from the alloc_cnt and
396  * written back to the req_cnt.
397  *
398  * [in] cfg
399  *   Pointer to an array of module specific Truflow type indexed RM cfg items
400  *
401  * [in] alloc_cnt
402  *   Pointer to the tf_open_session() configured array of module specific
403  *   Truflow type indexed requested counts.
404  *
405  * [in/out] req_cnt
406  *   Pointer to the location to put the updated resource counts.
407  *
408  * Returns:
409  *     0          - Success
410  *     -          - Failure if negative
411  */
412 static int
413 tf_rm_update_parent_reservations(struct tf *tfp,
414 				 struct tf_dev_info *dev,
415 				 struct tf_rm_element_cfg *cfg,
416 				 uint16_t *alloc_cnt,
417 				 uint16_t num_elements,
418 				 uint16_t *req_cnt,
419 				 bool shared_session)
420 {
421 	int parent, child;
422 	const char *type_str;
423 
424 	/* Search through all the elements */
425 	for (parent = 0; parent < num_elements; parent++) {
426 		uint16_t combined_cnt = 0;
427 
428 		/* If I am a parent */
429 		if (cfg[parent].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
430 			uint8_t p_slices = 1;
431 
432 			/* Shared session doesn't support slices */
433 			if (!shared_session)
434 				p_slices = cfg[parent].slices;
435 
436 			RTE_ASSERT(p_slices);
437 
438 			combined_cnt = alloc_cnt[parent] / p_slices;
439 
440 			if (alloc_cnt[parent] % p_slices)
441 				combined_cnt++;
442 
443 			if (alloc_cnt[parent]) {
444 				dev->ops->tf_dev_get_resource_str(tfp,
445 							 cfg[parent].hcapi_type,
446 							 &type_str);
447 #if (TF_RM_DEBUG == 1)
448 				printf("%s:%s cnt(%d) slices(%d)\n",
449 				       type_str, tf_tbl_type_2_str(parent),
450 				       alloc_cnt[parent], p_slices);
451 #endif /* (TF_RM_DEBUG == 1) */
452 			}
453 
454 			/* Search again through all the elements */
455 			for (child = 0; child < num_elements; child++) {
456 				/* If this is one of my children */
457 				if (cfg[child].cfg_type ==
458 				    TF_RM_ELEM_CFG_HCAPI_BA_CHILD &&
459 				    cfg[child].parent_subtype == parent &&
460 				    alloc_cnt[child]) {
461 					uint8_t c_slices = 1;
462 					uint16_t cnt = 0;
463 
464 					if (!shared_session)
465 						c_slices = cfg[child].slices;
466 
467 					RTE_ASSERT(c_slices);
468 
469 					dev->ops->tf_dev_get_resource_str(tfp,
470 							  cfg[child].hcapi_type,
471 							   &type_str);
472 #if (TF_RM_DEBUG == 1)
473 					printf("%s:%s cnt(%d) slices(%d)\n",
474 					       type_str,
475 					       tf_tbl_type_2_str(child),
476 					       alloc_cnt[child],
477 					       c_slices);
478 #endif /* (TF_RM_DEBUG == 1) */
479 					/* Increment the parents combined count
480 					 * with each child's count adjusted for
481 					 * number of slices per RM alloc item.
482 					 */
483 					cnt = alloc_cnt[child] / c_slices;
484 
485 					if (alloc_cnt[child] % c_slices)
486 						cnt++;
487 
488 					combined_cnt += cnt;
489 					/* Clear the requested child count */
490 					req_cnt[child] = 0;
491 				}
492 			}
493 			/* Save the parent count to be requested */
494 			req_cnt[parent] = combined_cnt;
495 #if (TF_RM_DEBUG == 1)
496 			printf("%s calculated total:%d\n\n",
497 			       type_str, req_cnt[parent]);
498 #endif /* (TF_RM_DEBUG == 1) */
499 		}
500 	}
501 	return 0;
502 }
503 
504 int
505 tf_rm_create_db(struct tf *tfp,
506 		struct tf_rm_create_db_parms *parms)
507 {
508 	int rc;
509 	struct tf_session *tfs;
510 	struct tf_dev_info *dev;
511 	int i, j;
512 	uint16_t max_types, hcapi_items, *req_cnt;
513 	struct tfp_calloc_parms cparms;
514 	struct tf_rm_resc_req_entry *query;
515 	enum tf_rm_resc_resv_strategy resv_strategy;
516 	struct tf_rm_resc_req_entry *req;
517 	struct tf_rm_resc_entry *resv;
518 	struct tf_rm_new_db *rm_db;
519 	struct tf_rm_element *db;
520 	uint32_t pool_size;
521 	bool shared_session = 0;
522 
523 	TF_CHECK_PARMS2(tfp, parms);
524 
525 	/* Retrieve the session information */
526 	rc = tf_session_get_session_internal(tfp, &tfs);
527 	if (rc)
528 		return rc;
529 
530 	/* Retrieve device information */
531 	rc = tf_session_get_device(tfs, &dev);
532 	if (rc)
533 		return rc;
534 
535 	/* Need device max number of elements for the RM QCAPS */
536 	rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
537 
538 	/* Allocate memory for RM QCAPS request */
539 	cparms.nitems = max_types;
540 	cparms.size = sizeof(struct tf_rm_resc_req_entry);
541 	cparms.alignment = 0;
542 	rc = tfp_calloc(&cparms);
543 	if (rc)
544 		return rc;
545 
546 	query = (struct tf_rm_resc_req_entry *)cparms.mem_va;
547 
548 	/* Get Firmware Capabilities */
549 	rc = tf_msg_session_resc_qcaps(tfp,
550 				       dev,
551 				       parms->dir,
552 				       max_types,
553 				       query,
554 				       &resv_strategy);
555 	if (rc)
556 		return rc;
557 
558 	/* Copy requested counts (alloc_cnt) from tf_open_session() to local
559 	 * copy (req_cnt) so that it can be updated if required.
560 	 */
561 
562 	cparms.nitems = parms->num_elements;
563 	cparms.size = sizeof(uint16_t);
564 	rc = tfp_calloc(&cparms);
565 	if (rc)
566 		return rc;
567 
568 	req_cnt = (uint16_t *)cparms.mem_va;
569 
570 	tfp_memcpy(req_cnt, parms->alloc_cnt,
571 		   parms->num_elements * sizeof(uint16_t));
572 
573 	shared_session = tf_session_is_shared_session(tfs);
574 
575 	/* Update the req_cnt based upon the element configuration
576 	 */
577 	tf_rm_update_parent_reservations(tfp, dev, parms->cfg,
578 					 parms->alloc_cnt,
579 					 parms->num_elements,
580 					 req_cnt,
581 					 shared_session);
582 
583 	/* Process capabilities against DB requirements. However, as a
584 	 * DB can hold elements that are not HCAPI we can reduce the
585 	 * req msg content by removing those out of the request yet
586 	 * the DB holds them all as to give a fast lookup. We can also
587 	 * remove entries where there are no request for elements.
588 	 */
589 	tf_rm_count_hcapi_reservations(parms->dir,
590 				       parms->module,
591 				       parms->cfg,
592 				       req_cnt,
593 				       parms->num_elements,
594 				       &hcapi_items);
595 
596 	if (hcapi_items == 0) {
597 #if (TF_RM_DEBUG == 1)
598 		TFP_DRV_LOG(INFO,
599 			"%s: module: %s Empty RM DB create request\n",
600 			tf_dir_2_str(parms->dir),
601 			tf_module_2_str(parms->module));
602 #endif
603 		parms->rm_db = NULL;
604 		return -ENOMEM;
605 	}
606 
607 	/* Alloc request, alignment already set */
608 	cparms.nitems = (size_t)hcapi_items;
609 	cparms.size = sizeof(struct tf_rm_resc_req_entry);
610 	rc = tfp_calloc(&cparms);
611 	if (rc)
612 		return rc;
613 	req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
614 
615 	/* Alloc reservation, alignment and nitems already set */
616 	cparms.size = sizeof(struct tf_rm_resc_entry);
617 	rc = tfp_calloc(&cparms);
618 	if (rc)
619 		return rc;
620 	resv = (struct tf_rm_resc_entry *)cparms.mem_va;
621 
622 	/* Build the request */
623 	for (i = 0, j = 0; i < parms->num_elements; i++) {
624 		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
625 		uint16_t hcapi_type = cfg->hcapi_type;
626 
627 		/* Only perform reservation for requested entries
628 		 */
629 		if (req_cnt[i] == 0)
630 			continue;
631 
632 		/* Skip any children in the request */
633 		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
634 		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
635 		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
636 
637 			/* Verify that we can get the full amount per qcaps.
638 			 */
639 			if (req_cnt[i] <= query[hcapi_type].max) {
640 				req[j].type = hcapi_type;
641 				req[j].min = req_cnt[i];
642 				req[j].max = req_cnt[i];
643 				j++;
644 			} else {
645 				const char *type_str;
646 
647 				dev->ops->tf_dev_get_resource_str(tfp,
648 							      hcapi_type,
649 							      &type_str);
650 				TFP_DRV_LOG(ERR,
651 					"Failure, %s:%d:%s req:%d avail:%d\n",
652 					tf_dir_2_str(parms->dir),
653 					hcapi_type, type_str,
654 					req_cnt[i],
655 					query[hcapi_type].max);
656 				return -EINVAL;
657 			}
658 		}
659 	}
660 
661 	/* Allocate all resources for the module type
662 	 */
663 	rc = tf_msg_session_resc_alloc(tfp,
664 				       dev,
665 				       parms->dir,
666 				       hcapi_items,
667 				       req,
668 				       resv);
669 	if (rc)
670 		return rc;
671 
672 	/* Build the RM DB per the request */
673 	cparms.nitems = 1;
674 	cparms.size = sizeof(struct tf_rm_new_db);
675 	rc = tfp_calloc(&cparms);
676 	if (rc)
677 		return rc;
678 	rm_db = (void *)cparms.mem_va;
679 
680 	/* Build the DB within RM DB */
681 	cparms.nitems = parms->num_elements;
682 	cparms.size = sizeof(struct tf_rm_element);
683 	rc = tfp_calloc(&cparms);
684 	if (rc)
685 		return rc;
686 	rm_db->db = (struct tf_rm_element *)cparms.mem_va;
687 
688 	db = rm_db->db;
689 	for (i = 0, j = 0; i < parms->num_elements; i++) {
690 		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
691 		const char *type_str;
692 
693 		dev->ops->tf_dev_get_resource_str(tfp,
694 						  cfg->hcapi_type,
695 						  &type_str);
696 
697 		db[i].cfg_type = cfg->cfg_type;
698 		db[i].hcapi_type = cfg->hcapi_type;
699 		db[i].slices = cfg->slices;
700 
701 		/* Save the parent subtype for later use to find the pool
702 		 */
703 		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
704 			db[i].parent_subtype = cfg->parent_subtype;
705 
706 		/* If the element didn't request an allocation no need
707 		 * to create a pool nor verify if we got a reservation.
708 		 */
709 		if (req_cnt[i] == 0)
710 			continue;
711 
712 		/* Skip any children or invalid
713 		 */
714 		if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
715 		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
716 		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
717 			continue;
718 
719 		/* If the element had requested an allocation and that
720 		 * allocation was a success (full amount) then
721 		 * allocate the pool.
722 		 */
723 		if (req_cnt[i] == resv[j].stride) {
724 			db[i].alloc.entry.start = resv[j].start;
725 			db[i].alloc.entry.stride = resv[j].stride;
726 
727 			/* Only allocate BA pool if a BA type not a child */
728 			if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
729 			    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
730 				/* Create pool */
731 				pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
732 					     sizeof(struct bitalloc));
733 				/* Alloc request, alignment already set */
734 				cparms.nitems = pool_size;
735 				cparms.size = sizeof(struct bitalloc);
736 				rc = tfp_calloc(&cparms);
737 				if (rc) {
738 					TFP_DRV_LOG(ERR,
739 					 "%s: Pool alloc failed, type:%d:%s\n",
740 					 tf_dir_2_str(parms->dir),
741 					 cfg->hcapi_type, type_str);
742 					goto fail;
743 				}
744 				db[i].pool = (struct bitalloc *)cparms.mem_va;
745 
746 				rc = ba_init(db[i].pool,
747 					     resv[j].stride,
748 					     !tf_session_is_shared_session(tfs));
749 				if (rc) {
750 					TFP_DRV_LOG(ERR,
751 					  "%s: Pool init failed, type:%d:%s\n",
752 					  tf_dir_2_str(parms->dir),
753 					  cfg->hcapi_type, type_str);
754 					goto fail;
755 				}
756 			}
757 			j++;
758 		} else {
759 			/* Bail out as we want what we requested for
760 			 * all elements, not any less.
761 			 */
762 			TFP_DRV_LOG(ERR,
763 				    "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
764 				    tf_dir_2_str(parms->dir), cfg->hcapi_type,
765 				    type_str, req_cnt[i], resv[j].stride);
766 			goto fail;
767 		}
768 	}
769 
770 	rm_db->num_entries = parms->num_elements;
771 	rm_db->dir = parms->dir;
772 	rm_db->module = parms->module;
773 	*parms->rm_db = (void *)rm_db;
774 
775 #if (TF_RM_DEBUG == 1)
776 
777 	printf("%s: module:%s\n",
778 	       tf_dir_2_str(parms->dir),
779 	       tf_module_2_str(parms->module));
780 #endif /* (TF_RM_DEBUG == 1) */
781 
782 	tfp_free((void *)req);
783 	tfp_free((void *)resv);
784 	tfp_free((void *)req_cnt);
785 	return 0;
786 
787  fail:
788 	tfp_free((void *)req);
789 	tfp_free((void *)resv);
790 	tfp_free((void *)db->pool);
791 	tfp_free((void *)db);
792 	tfp_free((void *)rm_db);
793 	tfp_free((void *)req_cnt);
794 	parms->rm_db = NULL;
795 
796 	return -EINVAL;
797 }
798 
799 int
800 tf_rm_create_db_no_reservation(struct tf *tfp,
801 			       struct tf_rm_create_db_parms *parms)
802 {
803 	int rc;
804 	struct tf_session *tfs;
805 	struct tf_dev_info *dev;
806 	int i, j;
807 	uint16_t hcapi_items, *req_cnt;
808 	struct tfp_calloc_parms cparms;
809 	struct tf_rm_resc_req_entry *req;
810 	struct tf_rm_resc_entry *resv;
811 	struct tf_rm_new_db *rm_db;
812 	struct tf_rm_element *db;
813 	uint32_t pool_size;
814 
815 	TF_CHECK_PARMS2(tfp, parms);
816 
817 	/* Retrieve the session information */
818 	rc = tf_session_get_session_internal(tfp, &tfs);
819 	if (rc)
820 		return rc;
821 
822 	/* Retrieve device information */
823 	rc = tf_session_get_device(tfs, &dev);
824 	if (rc)
825 		return rc;
826 
827 	/* Copy requested counts (alloc_cnt) from tf_open_session() to local
828 	 * copy (req_cnt) so that it can be updated if required.
829 	 */
830 
831 	cparms.nitems = parms->num_elements;
832 	cparms.size = sizeof(uint16_t);
833 	cparms.alignment = 0;
834 	rc = tfp_calloc(&cparms);
835 	if (rc)
836 		return rc;
837 
838 	req_cnt = (uint16_t *)cparms.mem_va;
839 
840 	tfp_memcpy(req_cnt, parms->alloc_cnt,
841 		   parms->num_elements * sizeof(uint16_t));
842 
843 	/* Process capabilities against DB requirements. However, as a
844 	 * DB can hold elements that are not HCAPI we can reduce the
845 	 * req msg content by removing those out of the request yet
846 	 * the DB holds them all as to give a fast lookup. We can also
847 	 * remove entries where there are no request for elements.
848 	 */
849 	tf_rm_count_hcapi_reservations(parms->dir,
850 				       parms->module,
851 				       parms->cfg,
852 				       req_cnt,
853 				       parms->num_elements,
854 				       &hcapi_items);
855 
856 	if (hcapi_items == 0) {
857 		TFP_DRV_LOG(ERR,
858 			"%s: module:%s Empty RM DB create request\n",
859 			tf_dir_2_str(parms->dir),
860 			tf_module_2_str(parms->module));
861 
862 		parms->rm_db = NULL;
863 		return -ENOMEM;
864 	}
865 
866 	/* Alloc request, alignment already set */
867 	cparms.nitems = (size_t)hcapi_items;
868 	cparms.size = sizeof(struct tf_rm_resc_req_entry);
869 	rc = tfp_calloc(&cparms);
870 	if (rc)
871 		return rc;
872 	req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
873 
874 	/* Alloc reservation, alignment and nitems already set */
875 	cparms.size = sizeof(struct tf_rm_resc_entry);
876 	rc = tfp_calloc(&cparms);
877 	if (rc)
878 		return rc;
879 	resv = (struct tf_rm_resc_entry *)cparms.mem_va;
880 
881 	/* Build the request */
882 	for (i = 0, j = 0; i < parms->num_elements; i++) {
883 		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
884 		uint16_t hcapi_type = cfg->hcapi_type;
885 
886 		/* Only perform reservation for requested entries
887 		 */
888 		if (req_cnt[i] == 0)
889 			continue;
890 
891 		/* Skip any children in the request */
892 		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
893 		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
894 		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
895 			req[j].type = hcapi_type;
896 			req[j].min = req_cnt[i];
897 			req[j].max = req_cnt[i];
898 			j++;
899 		}
900 	}
901 
902 	/* Get all resources info for the module type
903 	 */
904 	rc = tf_msg_session_resc_info(tfp,
905 				      dev,
906 				      parms->dir,
907 				      hcapi_items,
908 				      req,
909 				      resv);
910 	if (rc)
911 		return rc;
912 
913 	/* Build the RM DB per the request */
914 	cparms.nitems = 1;
915 	cparms.size = sizeof(struct tf_rm_new_db);
916 	rc = tfp_calloc(&cparms);
917 	if (rc)
918 		return rc;
919 	rm_db = (void *)cparms.mem_va;
920 
921 	/* Build the DB within RM DB */
922 	cparms.nitems = parms->num_elements;
923 	cparms.size = sizeof(struct tf_rm_element);
924 	rc = tfp_calloc(&cparms);
925 	if (rc)
926 		return rc;
927 	rm_db->db = (struct tf_rm_element *)cparms.mem_va;
928 
929 	db = rm_db->db;
930 	for (i = 0, j = 0; i < parms->num_elements; i++) {
931 		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
932 		const char *type_str;
933 
934 		dev->ops->tf_dev_get_resource_str(tfp,
935 						  cfg->hcapi_type,
936 						  &type_str);
937 
938 		db[i].cfg_type = cfg->cfg_type;
939 		db[i].hcapi_type = cfg->hcapi_type;
940 
941 		/* Save the parent subtype for later use to find the pool
942 		 */
943 		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
944 			db[i].parent_subtype = cfg->parent_subtype;
945 
946 		/* If the element didn't request an allocation no need
947 		 * to create a pool nor verify if we got a reservation.
948 		 */
949 		if (req_cnt[i] == 0)
950 			continue;
951 
952 		/* Skip any children or invalid
953 		 */
954 		if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
955 		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
956 		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
957 			continue;
958 
959 		/* If the element had requested an allocation and that
960 		 * allocation was a success (full amount) then
961 		 * allocate the pool.
962 		 */
963 		if (req_cnt[i] == resv[j].stride) {
964 			db[i].alloc.entry.start = resv[j].start;
965 			db[i].alloc.entry.stride = resv[j].stride;
966 
967 			/* Only allocate BA pool if a BA type not a child */
968 			if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
969 			    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
970 				/* Create pool */
971 				pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
972 					     sizeof(struct bitalloc));
973 				/* Alloc request, alignment already set */
974 				cparms.nitems = pool_size;
975 				cparms.size = sizeof(struct bitalloc);
976 				rc = tfp_calloc(&cparms);
977 				if (rc) {
978 					TFP_DRV_LOG(ERR,
979 					 "%s: Pool alloc failed, type:%d:%s\n",
980 					 tf_dir_2_str(parms->dir),
981 					 cfg->hcapi_type, type_str);
982 					goto fail;
983 				}
984 				db[i].pool = (struct bitalloc *)cparms.mem_va;
985 
986 				rc = ba_init(db[i].pool,
987 					     resv[j].stride,
988 					     !tf_session_is_shared_session(tfs));
989 				if (rc) {
990 					TFP_DRV_LOG(ERR,
991 					  "%s: Pool init failed, type:%d:%s\n",
992 					  tf_dir_2_str(parms->dir),
993 					  cfg->hcapi_type, type_str);
994 					goto fail;
995 				}
996 			}
997 			j++;
998 		} else {
999 			/* Bail out as we want what we requested for
1000 			 * all elements, not any less.
1001 			 */
1002 			TFP_DRV_LOG(ERR,
1003 				    "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
1004 				    tf_dir_2_str(parms->dir), cfg->hcapi_type,
1005 				    type_str, req_cnt[i], resv[j].stride);
1006 			goto fail;
1007 		}
1008 	}
1009 
1010 	rm_db->num_entries = parms->num_elements;
1011 	rm_db->dir = parms->dir;
1012 	rm_db->module = parms->module;
1013 	*parms->rm_db = (void *)rm_db;
1014 
1015 #if (TF_RM_DEBUG == 1)
1016 
1017 	printf("%s: module:%s\n",
1018 	       tf_dir_2_str(parms->dir),
1019 	       tf_module_2_str(parms->module));
1020 #endif /* (TF_RM_DEBUG == 1) */
1021 
1022 	tfp_free((void *)req);
1023 	tfp_free((void *)resv);
1024 	tfp_free((void *)req_cnt);
1025 	return 0;
1026 
1027  fail:
1028 	tfp_free((void *)req);
1029 	tfp_free((void *)resv);
1030 	tfp_free((void *)db->pool);
1031 	tfp_free((void *)db);
1032 	tfp_free((void *)rm_db);
1033 	tfp_free((void *)req_cnt);
1034 	parms->rm_db = NULL;
1035 
1036 	return -EINVAL;
1037 }
1038 int
1039 tf_rm_free_db(struct tf *tfp,
1040 	      struct tf_rm_free_db_parms *parms)
1041 {
1042 	int rc;
1043 	int i;
1044 	uint16_t resv_size = 0;
1045 	struct tf_rm_new_db *rm_db;
1046 	struct tf_rm_resc_entry *resv;
1047 	bool residuals_found = false;
1048 
1049 	TF_CHECK_PARMS2(parms, parms->rm_db);
1050 
1051 	/* Device unbind happens when the TF Session is closed and the
1052 	 * session ref count is 0. Device unbind will cleanup each of
1053 	 * its support modules, i.e. Identifier, thus we're ending up
1054 	 * here to close the DB.
1055 	 *
1056 	 * On TF Session close it is assumed that the session has already
1057 	 * cleaned up all its resources, individually, while
1058 	 * destroying its flows.
1059 	 *
1060 	 * To assist in the 'cleanup checking' the DB is checked for any
1061 	 * remaining elements and logged if found to be the case.
1062 	 *
1063 	 * Any such elements will need to be 'cleared' ahead of
1064 	 * returning the resources to the HCAPI RM.
1065 	 *
1066 	 * RM will signal FW to flush the DB resources. FW will
1067 	 * perform the invalidation. TF Session close will return the
1068 	 * previous allocated elements to the RM and then close the
1069 	 * HCAPI RM registration. That then saves several 'free' msgs
1070 	 * from being required.
1071 	 */
1072 
1073 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1074 
1075 	/* Check for residuals that the client didn't clean up */
1076 	rc = tf_rm_check_residuals(rm_db,
1077 				   &resv_size,
1078 				   &resv,
1079 				   &residuals_found);
1080 	if (rc)
1081 		return rc;
1082 
1083 	/* Invalidate any residuals followed by a DB traversal for
1084 	 * pool cleanup.
1085 	 */
1086 	if (residuals_found) {
1087 		rc = tf_msg_session_resc_flush(tfp,
1088 					       parms->dir,
1089 					       resv_size,
1090 					       resv);
1091 		tfp_free((void *)resv);
1092 		/* On failure we still have to cleanup so we can only
1093 		 * log that FW failed.
1094 		 */
1095 		if (rc)
1096 			TFP_DRV_LOG(ERR,
1097 				    "%s: Internal Flush error, module:%s\n",
1098 				    tf_dir_2_str(parms->dir),
1099 				    tf_module_2_str(rm_db->module));
1100 	}
1101 
1102 	/* No need to check for configuration type, even if we do not
1103 	 * have a BA pool we just delete on a null ptr, no harm
1104 	 */
1105 	for (i = 0; i < rm_db->num_entries; i++)
1106 		tfp_free((void *)rm_db->db[i].pool);
1107 
1108 	tfp_free((void *)parms->rm_db);
1109 
1110 	return rc;
1111 }
1112 /**
1113  * Get the bit allocator pool associated with the subtype and the db
1114  *
1115  * [in] rm_db
1116  *   Pointer to the DB
1117  *
1118  * [in] subtype
1119  *   Module subtype used to index into the module specific database.
1120  *   An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
1121  *   module subtype of TF_MODULE_TYPE_TABLE.
1122  *
1123  * [in/out] pool
1124  *   Pointer to the bit allocator pool used
1125  *
1126  * [in/out] new_subtype
1127  *   Pointer to the subtype of the actual pool used
1128  * Returns:
1129  *     0          - Success
1130  *   - ENOTSUP    - Operation not supported
1131  */
1132 static int
1133 tf_rm_get_pool(struct tf_rm_new_db *rm_db,
1134 	       uint16_t subtype,
1135 	       struct bitalloc **pool,
1136 	       uint16_t *new_subtype)
1137 {
1138 	int rc = 0;
1139 	uint16_t tmp_subtype = subtype;
1140 
1141 	/* If we are a child, get the parent table index */
1142 	if (rm_db->db[subtype].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1143 		tmp_subtype = rm_db->db[subtype].parent_subtype;
1144 
1145 	*pool = rm_db->db[tmp_subtype].pool;
1146 
1147 	/* Bail out if the pool is not valid, should never happen */
1148 	if (rm_db->db[tmp_subtype].pool == NULL) {
1149 		rc = -ENOTSUP;
1150 		TFP_DRV_LOG(ERR,
1151 			    "%s: Invalid pool for this type:%d, rc:%s\n",
1152 			    tf_dir_2_str(rm_db->dir),
1153 			    tmp_subtype,
1154 			    strerror(-rc));
1155 		return rc;
1156 	}
1157 	*new_subtype = tmp_subtype;
1158 	return rc;
1159 }
1160 
1161 int
1162 tf_rm_allocate(struct tf_rm_allocate_parms *parms)
1163 {
1164 	int rc;
1165 	int id;
1166 	uint32_t index;
1167 	struct tf_rm_new_db *rm_db;
1168 	enum tf_rm_elem_cfg_type cfg_type;
1169 	struct bitalloc *pool;
1170 	uint16_t subtype;
1171 
1172 	TF_CHECK_PARMS2(parms, parms->rm_db);
1173 
1174 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1175 	TF_CHECK_PARMS1(rm_db->db);
1176 
1177 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1178 
1179 	/* Bail out if not controlled by RM */
1180 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1181 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1182 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1183 		return -ENOTSUP;
1184 
1185 	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1186 	if (rc)
1187 		return rc;
1188 	/*
1189 	 * priority  0: allocate from top of the tcam i.e. high
1190 	 * priority !0: allocate index from bottom i.e lowest
1191 	 */
1192 	if (parms->priority)
1193 		id = ba_alloc_reverse(pool);
1194 	else
1195 		id = ba_alloc(pool);
1196 	if (id == BA_FAIL) {
1197 		rc = -ENOMEM;
1198 		TFP_DRV_LOG(ERR,
1199 			    "%s: Allocation failed, rc:%s\n",
1200 			    tf_dir_2_str(rm_db->dir),
1201 			    strerror(-rc));
1202 		return rc;
1203 	}
1204 
1205 	/* Adjust for any non zero start value */
1206 	rc = tf_rm_adjust_index(rm_db->db,
1207 				TF_RM_ADJUST_ADD_BASE,
1208 				subtype,
1209 				id,
1210 				&index);
1211 	if (rc) {
1212 		TFP_DRV_LOG(ERR,
1213 			    "%s: Alloc adjust of base index failed, rc:%s\n",
1214 			    tf_dir_2_str(rm_db->dir),
1215 			    strerror(-rc));
1216 		return -EINVAL;
1217 	}
1218 
1219 	*parms->index = index;
1220 	if (parms->base_index)
1221 		*parms->base_index = id;
1222 
1223 	return rc;
1224 }
1225 
1226 int
1227 tf_rm_free(struct tf_rm_free_parms *parms)
1228 {
1229 	int rc;
1230 	uint32_t adj_index;
1231 	struct tf_rm_new_db *rm_db;
1232 	enum tf_rm_elem_cfg_type cfg_type;
1233 	struct bitalloc *pool;
1234 	uint16_t subtype;
1235 
1236 	TF_CHECK_PARMS2(parms, parms->rm_db);
1237 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1238 	TF_CHECK_PARMS1(rm_db->db);
1239 
1240 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1241 
1242 	/* Bail out if not controlled by RM */
1243 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1244 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1245 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1246 		return -ENOTSUP;
1247 
1248 	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1249 	if (rc)
1250 		return rc;
1251 
1252 	/* Adjust for any non zero start value */
1253 	rc = tf_rm_adjust_index(rm_db->db,
1254 				TF_RM_ADJUST_RM_BASE,
1255 				subtype,
1256 				parms->index,
1257 				&adj_index);
1258 	if (rc)
1259 		return rc;
1260 
1261 	rc = ba_free(pool, adj_index);
1262 	/* No logging direction matters and that is not available here */
1263 	if (rc)
1264 		return rc;
1265 
1266 	return rc;
1267 }
1268 
1269 int
1270 tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
1271 {
1272 	int rc;
1273 	uint32_t adj_index;
1274 	struct tf_rm_new_db *rm_db;
1275 	enum tf_rm_elem_cfg_type cfg_type;
1276 	struct bitalloc *pool;
1277 	uint16_t subtype;
1278 
1279 	TF_CHECK_PARMS2(parms, parms->rm_db);
1280 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1281 	TF_CHECK_PARMS1(rm_db->db);
1282 
1283 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1284 
1285 	/* Bail out if not controlled by RM */
1286 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1287 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1288 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1289 		return -ENOTSUP;
1290 
1291 	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1292 	if (rc)
1293 		return rc;
1294 
1295 	/* Adjust for any non zero start value */
1296 	rc = tf_rm_adjust_index(rm_db->db,
1297 				TF_RM_ADJUST_RM_BASE,
1298 				subtype,
1299 				parms->index,
1300 				&adj_index);
1301 	if (rc)
1302 		return rc;
1303 
1304 	if (parms->base_index)
1305 		*parms->base_index = adj_index;
1306 	*parms->allocated = ba_inuse(pool, adj_index);
1307 
1308 	return rc;
1309 }
1310 
1311 int
1312 tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
1313 {
1314 	struct tf_rm_new_db *rm_db;
1315 	enum tf_rm_elem_cfg_type cfg_type;
1316 
1317 	TF_CHECK_PARMS2(parms, parms->rm_db);
1318 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1319 	TF_CHECK_PARMS1(rm_db->db);
1320 
1321 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1322 
1323 	/* Bail out if not controlled by HCAPI */
1324 	if (cfg_type == TF_RM_ELEM_CFG_NULL)
1325 		return -ENOTSUP;
1326 
1327 	memcpy(parms->info,
1328 	       &rm_db->db[parms->subtype].alloc,
1329 	       sizeof(struct tf_rm_alloc_info));
1330 
1331 	return 0;
1332 }
1333 
1334 int
1335 tf_rm_get_all_info(struct tf_rm_get_alloc_info_parms *parms, int size)
1336 {
1337 	struct tf_rm_new_db *rm_db;
1338 	enum tf_rm_elem_cfg_type cfg_type;
1339 	struct tf_rm_alloc_info *info = parms->info;
1340 	int i;
1341 
1342 	TF_CHECK_PARMS1(parms);
1343 
1344 	/* No rm info available for this module type
1345 	 */
1346 	if (!parms->rm_db)
1347 		return -ENOMEM;
1348 
1349 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1350 	TF_CHECK_PARMS1(rm_db->db);
1351 
1352 	for (i = 0; i < size; i++) {
1353 		cfg_type = rm_db->db[i].cfg_type;
1354 
1355 		/* Bail out if not controlled by HCAPI */
1356 		if (cfg_type == TF_RM_ELEM_CFG_NULL) {
1357 			info++;
1358 			continue;
1359 		}
1360 
1361 		memcpy(info,
1362 		       &rm_db->db[i].alloc,
1363 		       sizeof(struct tf_rm_alloc_info));
1364 		info++;
1365 	}
1366 
1367 	return 0;
1368 }
1369 
1370 int
1371 tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
1372 {
1373 	struct tf_rm_new_db *rm_db;
1374 	enum tf_rm_elem_cfg_type cfg_type;
1375 
1376 	TF_CHECK_PARMS2(parms, parms->rm_db);
1377 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1378 	TF_CHECK_PARMS1(rm_db->db);
1379 
1380 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1381 
1382 	/* Bail out if not controlled by HCAPI */
1383 	if (cfg_type == TF_RM_ELEM_CFG_NULL)
1384 		return -ENOTSUP;
1385 
1386 	*parms->hcapi_type = rm_db->db[parms->subtype].hcapi_type;
1387 
1388 	return 0;
1389 }
1390 int
1391 tf_rm_get_slices(struct tf_rm_get_slices_parms *parms)
1392 {
1393 	struct tf_rm_new_db *rm_db;
1394 	enum tf_rm_elem_cfg_type cfg_type;
1395 
1396 	TF_CHECK_PARMS2(parms, parms->rm_db);
1397 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1398 	TF_CHECK_PARMS1(rm_db->db);
1399 
1400 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1401 
1402 	/* Bail out if not controlled by HCAPI */
1403 	if (cfg_type == TF_RM_ELEM_CFG_NULL)
1404 		return -ENOTSUP;
1405 
1406 	*parms->slices = rm_db->db[parms->subtype].slices;
1407 
1408 	return 0;
1409 }
1410 
1411 int
1412 tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
1413 {
1414 	int rc = 0;
1415 	struct tf_rm_new_db *rm_db;
1416 	enum tf_rm_elem_cfg_type cfg_type;
1417 
1418 	TF_CHECK_PARMS2(parms, parms->rm_db);
1419 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1420 	TF_CHECK_PARMS1(rm_db->db);
1421 
1422 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1423 
1424 	/* Bail out if not a BA pool */
1425 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1426 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1427 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1428 		return -ENOTSUP;
1429 
1430 	/* Bail silently (no logging), if the pool is not valid there
1431 	 * was no elements allocated for it.
1432 	 */
1433 	if (rm_db->db[parms->subtype].pool == NULL) {
1434 		*parms->count = 0;
1435 		return 0;
1436 	}
1437 
1438 	*parms->count = ba_inuse_count(rm_db->db[parms->subtype].pool);
1439 
1440 	return rc;
1441 }
1442 /* Only used for table bulk get at this time
1443  */
1444 int
1445 tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms)
1446 {
1447 	struct tf_rm_new_db *rm_db;
1448 	enum tf_rm_elem_cfg_type cfg_type;
1449 	uint32_t base_index;
1450 	uint32_t stride;
1451 	int rc = 0;
1452 	struct bitalloc *pool;
1453 	uint16_t subtype;
1454 
1455 	TF_CHECK_PARMS2(parms, parms->rm_db);
1456 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1457 	TF_CHECK_PARMS1(rm_db->db);
1458 
1459 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1460 
1461 	/* Bail out if not a BA pool */
1462 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1463 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1464 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1465 		return -ENOTSUP;
1466 
1467 	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1468 	if (rc)
1469 		return rc;
1470 
1471 	base_index = rm_db->db[subtype].alloc.entry.start;
1472 	stride = rm_db->db[subtype].alloc.entry.stride;
1473 
1474 	if (parms->starting_index < base_index ||
1475 	    parms->starting_index + parms->num_entries > base_index + stride)
1476 		return -EINVAL;
1477 
1478 	return rc;
1479 }
1480