xref: /dpdk/drivers/net/bnxt/tf_core/tf_rm.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2023 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <string.h>
7 #include <rte_common.h>
8 #include <rte_debug.h>
9 #include <cfa_resource_types.h>
10 #include "tf_rm.h"
11 #include "tf_common.h"
12 #include "tf_util.h"
13 #include "tf_session.h"
14 #include "tf_device.h"
15 #include "tfp.h"
16 #include "tf_msg.h"
17 
18 /**
19  * Generic RM Element data type that an RM DB is build upon.
20  */
21 struct tf_rm_element {
22 	/**
23 	 * RM Element configuration type. If Private then the
24 	 * hcapi_type can be ignored. If Null then the element is not
25 	 * valid for the device.
26 	 */
27 	enum tf_rm_elem_cfg_type cfg_type;
28 
29 	/**
30 	 * HCAPI RM Type for the element.
31 	 */
32 	uint16_t hcapi_type;
33 
34 	/**
35 	 * Resource slices.  How many slices will fit in the
36 	 * resource pool chunk size.
37 	 */
38 	uint8_t slices;
39 
40 	/**
41 	 * HCAPI RM allocated range information for the element.
42 	 */
43 	struct tf_rm_alloc_info alloc;
44 
45 	/**
46 	 * If cfg_type == HCAPI_BA_CHILD, this field indicates
47 	 * the parent module subtype for look up into the parent pool.
48 	 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
49 	 * module subtype of TF_MODULE_TYPE_TABLE.
50 	 */
51 	uint16_t parent_subtype;
52 
53 	/**
54 	 * Bit allocator pool for the element. Pool size is controlled
55 	 * by the struct tf_session_resources at time of session creation.
56 	 * Null indicates that the pool is not used for the element.
57 	 */
58 	struct bitalloc *pool;
59 };
60 
61 /**
62  * TF RM DB definition
63  */
64 struct tf_rm_new_db {
65 	/**
66 	 * Number of elements in the DB
67 	 */
68 	uint16_t num_entries;
69 
70 	/**
71 	 * Direction this DB controls.
72 	 */
73 	enum tf_dir dir;
74 
75 	/**
76 	 * Module type, used for logging purposes.
77 	 */
78 	enum tf_module_type module;
79 
80 	/**
81 	 * The DB consists of an array of elements
82 	 */
83 	struct tf_rm_element *db;
84 };
85 
86 /**
87  * Adjust an index according to the allocation information.
88  *
89  * All resources are controlled in a 0 based pool. Some resources, by
90  * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
91  * need to be adjusted before they are handed out.
92  *
93  * [in] cfg
94  *   Pointer to the DB configuration
95  *
96  * [in] reservations
97  *   Pointer to the allocation values associated with the module
98  *
99  * [in] count
100  *   Number of DB configuration elements
101  *
102  * [out] valid_count
103  *   Number of HCAPI entries with a reservation value greater than 0
104  *
105  * Returns:
106  *     0          - Success
107  *   - EOPNOTSUPP - Operation not supported
108  */
109 static void
110 tf_rm_count_hcapi_reservations(enum tf_dir dir,
111 			       enum tf_module_type module,
112 			       struct tf_rm_element_cfg *cfg,
113 			       uint16_t *reservations,
114 			       uint16_t count,
115 			       uint16_t *valid_count)
116 {
117 	int i;
118 	uint16_t cnt = 0;
119 
120 	for (i = 0; i < count; i++) {
121 		if (cfg[i].cfg_type != TF_RM_ELEM_CFG_NULL &&
122 		    reservations[i] > 0)
123 			cnt++;
124 
125 		/* Only log msg if a type is attempted reserved and
126 		 * not supported. We ignore EM module as its using a
127 		 * split configuration array thus it would fail for
128 		 * this type of check.
129 		 */
130 		if (module != TF_MODULE_TYPE_EM &&
131 		    cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL &&
132 		    reservations[i] > 0) {
133 			TFP_DRV_LOG(ERR,
134 				"%s, %s, %s allocation of %d not supported\n",
135 				tf_module_2_str(module),
136 				tf_dir_2_str(dir),
137 				tf_module_subtype_2_str(module, i),
138 				reservations[i]);
139 		}
140 	}
141 
142 	*valid_count = cnt;
143 }
144 
145 /**
146  * Resource Manager Adjust of base index definitions.
147  */
148 enum tf_rm_adjust_type {
149 	TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */
150 	TF_RM_ADJUST_RM_BASE   /**< Removes base from the index */
151 };
152 
153 /**
154  * Adjust an index according to the allocation information.
155  *
156  * All resources are controlled in a 0 based pool. Some resources, by
157  * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
158  * need to be adjusted before they are handed out.
159  *
160  * [in] db
161  *   Pointer to the db, used for the lookup
162  *
163  * [in] action
164  *   Adjust action
165  *
166  * [in] subtype
167  *   TF module subtype used as an index into the database.
168  *   An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
169  *   module subtype of TF_MODULE_TYPE_TABLE.
170  *
171  * [in] index
172  *   Index to convert
173  *
174  * [out] adj_index
175  *   Adjusted index
176  *
177  * Returns:
178  *     0          - Success
179  *   - EOPNOTSUPP - Operation not supported
180  */
181 static int
182 tf_rm_adjust_index(struct tf_rm_element *db,
183 		   enum tf_rm_adjust_type action,
184 		   uint32_t subtype,
185 		   uint32_t index,
186 		   uint32_t *adj_index)
187 {
188 	int rc = 0;
189 	uint32_t base_index;
190 
191 	base_index = db[subtype].alloc.entry.start;
192 
193 	switch (action) {
194 	case TF_RM_ADJUST_RM_BASE:
195 		*adj_index = index - base_index;
196 		break;
197 	case TF_RM_ADJUST_ADD_BASE:
198 		*adj_index = index + base_index;
199 		break;
200 	default:
201 		return -EOPNOTSUPP;
202 	}
203 
204 	return rc;
205 }
206 
207 /**
208  * Performs a check of the passed in DB for any lingering elements. If
209  * a resource type was found to not have been cleaned up by the caller
210  * then its residual values are recorded, logged and passed back in an
211  * allocate reservation array that the caller can pass to the FW for
212  * cleanup.
213  *
214  * [in] db
215  *   Pointer to the db, used for the lookup
216  *
217  * [out] resv_size
218  *   Pointer to the reservation size of the generated reservation
219  *   array.
220  *
221  * [in/out] resv
222  *   Pointer Pointer to a reservation array. The reservation array is
223  *   allocated after the residual scan and holds any found residual
224  *   entries. Thus it can be smaller than the DB that the check was
225  *   performed on. Array must be freed by the caller.
226  *
227  * [out] residuals_present
228  *   Pointer to a bool flag indicating if residual was present in the
229  *   DB
230  *
231  * Returns:
232  *     0          - Success
233  *   - EOPNOTSUPP - Operation not supported
234  */
235 static int
236 tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
237 		      uint16_t *resv_size,
238 		      struct tf_rm_resc_entry **resv,
239 		      bool *residuals_present)
240 {
241 	int rc;
242 	int i;
243 	int f;
244 	uint16_t count;
245 	uint16_t found;
246 	uint16_t *residuals = NULL;
247 	uint16_t hcapi_type;
248 	struct tf_rm_get_inuse_count_parms iparms;
249 	struct tf_rm_get_alloc_info_parms aparms;
250 	struct tf_rm_get_hcapi_parms hparms;
251 	struct tf_rm_alloc_info info;
252 	struct tfp_calloc_parms cparms;
253 	struct tf_rm_resc_entry *local_resv = NULL;
254 
255 	/* Create array to hold the entries that have residuals */
256 	cparms.nitems = rm_db->num_entries;
257 	cparms.size = sizeof(uint16_t);
258 	cparms.alignment = 0;
259 	rc = tfp_calloc(&cparms);
260 	if (rc)
261 		return rc;
262 
263 	residuals = (uint16_t *)cparms.mem_va;
264 
265 	/* Traverse the DB and collect any residual elements */
266 	iparms.rm_db = rm_db;
267 	iparms.count = &count;
268 	for (i = 0, found = 0; i < rm_db->num_entries; i++) {
269 		iparms.subtype = i;
270 		rc = tf_rm_get_inuse_count(&iparms);
271 		/* Not a device supported entry, just skip */
272 		if (rc == -ENOTSUP)
273 			continue;
274 		if (rc)
275 			goto cleanup_residuals;
276 
277 		if (count) {
278 			found++;
279 			residuals[i] = count;
280 			*residuals_present = true;
281 		}
282 	}
283 
284 	if (*residuals_present) {
285 		/* Populate a reduced resv array with only the entries
286 		 * that have residuals.
287 		 */
288 		cparms.nitems = found;
289 		cparms.size = sizeof(struct tf_rm_resc_entry);
290 		cparms.alignment = 0;
291 		rc = tfp_calloc(&cparms);
292 		if (rc)
293 			return rc;
294 
295 		local_resv = (struct tf_rm_resc_entry *)cparms.mem_va;
296 
297 		aparms.rm_db = rm_db;
298 		hparms.rm_db = rm_db;
299 		hparms.hcapi_type = &hcapi_type;
300 		for (i = 0, f = 0; i < rm_db->num_entries; i++) {
301 			if (residuals[i] == 0)
302 				continue;
303 			aparms.subtype = i;
304 			aparms.info = &info;
305 			rc = tf_rm_get_info(&aparms);
306 			if (rc)
307 				goto cleanup_all;
308 
309 			hparms.subtype = i;
310 			rc = tf_rm_get_hcapi_type(&hparms);
311 			if (rc)
312 				goto cleanup_all;
313 
314 			local_resv[f].type = hcapi_type;
315 			local_resv[f].start = info.entry.start;
316 			local_resv[f].stride = info.entry.stride;
317 			f++;
318 		}
319 		*resv_size = found;
320 	}
321 
322 	tfp_free((void *)residuals);
323 	*resv = local_resv;
324 
325 	return 0;
326 
327  cleanup_all:
328 	tfp_free((void *)local_resv);
329 	*resv = NULL;
330  cleanup_residuals:
331 	tfp_free((void *)residuals);
332 
333 	return rc;
334 }
335 
336 /**
337  * Some resources do not have a 1:1 mapping between the Truflow type and the cfa
338  * resource type (HCAPI RM).  These resources have multiple Truflow types which
339  * map to a single HCAPI RM type.  In order to support this, one Truflow type
340  * sharing the HCAPI resources is designated the parent.  All other Truflow
341  * types associated with that HCAPI RM type are designated the children.
342  *
343  * This function updates the resource counts of any HCAPI_BA_PARENT with the
344  * counts of the HCAPI_BA_CHILDREN.  These are read from the alloc_cnt and
345  * written back to the req_cnt.
346  *
347  * [in] cfg
348  *   Pointer to an array of module specific Truflow type indexed RM cfg items
349  *
350  * [in] alloc_cnt
351  *   Pointer to the tf_open_session() configured array of module specific
352  *   Truflow type indexed requested counts.
353  *
354  * [in/out] req_cnt
355  *   Pointer to the location to put the updated resource counts.
356  *
357  * Returns:
358  *     0          - Success
359  *     -          - Failure if negative
360  */
361 static int
362 tf_rm_update_parent_reservations(struct tf *tfp,
363 				 struct tf_dev_info *dev,
364 				 struct tf_rm_element_cfg *cfg,
365 				 uint16_t *alloc_cnt,
366 				 uint16_t num_elements,
367 				 uint16_t *req_cnt)
368 {
369 	int parent, child;
370 	const char *type_str = NULL;
371 
372 	/* Search through all the elements */
373 	for (parent = 0; parent < num_elements; parent++) {
374 		uint16_t combined_cnt = 0;
375 
376 		/* If I am a parent */
377 		if (cfg[parent].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
378 			uint8_t p_slices = cfg[parent].slices;
379 
380 			RTE_ASSERT(p_slices);
381 
382 			combined_cnt = alloc_cnt[parent] / p_slices;
383 
384 			if (alloc_cnt[parent] % p_slices)
385 				combined_cnt++;
386 
387 			if (alloc_cnt[parent]) {
388 				dev->ops->tf_dev_get_resource_str(tfp,
389 							 cfg[parent].hcapi_type,
390 							 &type_str);
391 			}
392 
393 			/* Search again through all the elements */
394 			for (child = 0; child < num_elements; child++) {
395 				/* If this is one of my children */
396 				if (cfg[child].cfg_type ==
397 				    TF_RM_ELEM_CFG_HCAPI_BA_CHILD &&
398 				    cfg[child].parent_subtype == parent &&
399 				    alloc_cnt[child]) {
400 					uint8_t c_slices = cfg[child].slices;
401 					uint16_t cnt = 0;
402 
403 					RTE_ASSERT(c_slices);
404 
405 					dev->ops->tf_dev_get_resource_str(tfp,
406 							  cfg[child].hcapi_type,
407 							   &type_str);
408 
409 					/* Increment the parents combined count
410 					 * with each child's count adjusted for
411 					 * number of slices per RM alloc item.
412 					 */
413 					cnt = alloc_cnt[child] / c_slices;
414 
415 					if (alloc_cnt[child] % c_slices)
416 						cnt++;
417 
418 					combined_cnt += cnt;
419 					/* Clear the requested child count */
420 					req_cnt[child] = 0;
421 				}
422 			}
423 			/* Save the parent count to be requested */
424 			req_cnt[parent] = combined_cnt * 2;
425 		}
426 	}
427 	return 0;
428 }
429 
430 int
431 tf_rm_create_db(struct tf *tfp,
432 		struct tf_rm_create_db_parms *parms)
433 {
434 	int rc;
435 	struct tf_session *tfs;
436 	struct tf_dev_info *dev;
437 	int i, j;
438 	uint16_t max_types, hcapi_items, *req_cnt;
439 	struct tfp_calloc_parms cparms;
440 	struct tf_rm_resc_req_entry *query;
441 	enum tf_rm_resc_resv_strategy resv_strategy;
442 	struct tf_rm_resc_req_entry *req;
443 	struct tf_rm_resc_entry *resv;
444 	struct tf_rm_new_db *rm_db;
445 	struct tf_rm_element *db;
446 	uint32_t pool_size;
447 
448 	TF_CHECK_PARMS2(tfp, parms);
449 
450 	/* Retrieve the session information */
451 	rc = tf_session_get_session_internal(tfp, &tfs);
452 	if (rc)
453 		return rc;
454 
455 	/* Retrieve device information */
456 	rc = tf_session_get_device(tfs, &dev);
457 	if (rc)
458 		return rc;
459 
460 	/* Need device max number of elements for the RM QCAPS */
461 	rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
462 
463 	/* Allocate memory for RM QCAPS request */
464 	cparms.nitems = max_types;
465 	cparms.size = sizeof(struct tf_rm_resc_req_entry);
466 	cparms.alignment = 0;
467 	rc = tfp_calloc(&cparms);
468 	if (rc)
469 		return rc;
470 
471 	query = (struct tf_rm_resc_req_entry *)cparms.mem_va;
472 
473 	/* Get Firmware Capabilities */
474 	rc = tf_msg_session_resc_qcaps(tfp,
475 				       dev,
476 				       parms->dir,
477 				       max_types,
478 				       query,
479 				       &resv_strategy,
480 				       NULL);
481 	if (rc)
482 		return rc;
483 
484 	/* Copy requested counts (alloc_cnt) from tf_open_session() to local
485 	 * copy (req_cnt) so that it can be updated if required.
486 	 */
487 
488 	cparms.nitems = parms->num_elements;
489 	cparms.size = sizeof(uint16_t);
490 	rc = tfp_calloc(&cparms);
491 	if (rc)
492 		return rc;
493 
494 	req_cnt = (uint16_t *)cparms.mem_va;
495 
496 	tfp_memcpy(req_cnt, parms->alloc_cnt,
497 		   parms->num_elements * sizeof(uint16_t));
498 
499 	/* Update the req_cnt based upon the element configuration
500 	 */
501 	tf_rm_update_parent_reservations(tfp, dev, parms->cfg,
502 					 parms->alloc_cnt,
503 					 parms->num_elements,
504 					 req_cnt);
505 
506 	/* Process capabilities against DB requirements. However, as a
507 	 * DB can hold elements that are not HCAPI we can reduce the
508 	 * req msg content by removing those out of the request yet
509 	 * the DB holds them all as to give a fast lookup. We can also
510 	 * remove entries where there are no request for elements.
511 	 */
512 	tf_rm_count_hcapi_reservations(parms->dir,
513 				       parms->module,
514 				       parms->cfg,
515 				       req_cnt,
516 				       parms->num_elements,
517 				       &hcapi_items);
518 
519 	if (hcapi_items == 0) {
520 		parms->rm_db = NULL;
521 		return -ENOMEM;
522 	}
523 
524 	/* Alloc request, alignment already set */
525 	cparms.nitems = (size_t)hcapi_items;
526 	cparms.size = sizeof(struct tf_rm_resc_req_entry);
527 	rc = tfp_calloc(&cparms);
528 	if (rc)
529 		return rc;
530 	req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
531 
532 	/* Alloc reservation, alignment and nitems already set */
533 	cparms.size = sizeof(struct tf_rm_resc_entry);
534 	rc = tfp_calloc(&cparms);
535 	if (rc)
536 		return rc;
537 	resv = (struct tf_rm_resc_entry *)cparms.mem_va;
538 
539 	/* Build the request */
540 	for (i = 0, j = 0; i < parms->num_elements; i++) {
541 		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
542 		uint16_t hcapi_type = cfg->hcapi_type;
543 
544 		/* Only perform reservation for requested entries
545 		 */
546 		if (req_cnt[i] == 0)
547 			continue;
548 
549 		/* Skip any children in the request */
550 		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
551 		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
552 		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
553 
554 			/* Verify that we can get the full amount per qcaps.
555 			 */
556 			if (req_cnt[i] <= query[hcapi_type].max) {
557 				req[j].type = hcapi_type;
558 				req[j].min = req_cnt[i];
559 				req[j].max = req_cnt[i];
560 				j++;
561 			} else {
562 				const char *type_str;
563 
564 				dev->ops->tf_dev_get_resource_str(tfp,
565 							      hcapi_type,
566 							      &type_str);
567 				TFP_DRV_LOG(ERR,
568 					"Failure, %s:%d:%s req:%d avail:%d\n",
569 					tf_dir_2_str(parms->dir),
570 					hcapi_type, type_str,
571 					req_cnt[i],
572 					query[hcapi_type].max);
573 				return -EINVAL;
574 			}
575 		}
576 	}
577 
578 	/* Allocate all resources for the module type
579 	 */
580 	rc = tf_msg_session_resc_alloc(tfp,
581 				       dev,
582 				       parms->dir,
583 				       hcapi_items,
584 				       req,
585 				       resv);
586 	if (rc)
587 		return rc;
588 
589 	/* Build the RM DB per the request */
590 	cparms.nitems = 1;
591 	cparms.size = sizeof(struct tf_rm_new_db);
592 	rc = tfp_calloc(&cparms);
593 	if (rc)
594 		return rc;
595 	rm_db = (void *)cparms.mem_va;
596 
597 	/* Build the DB within RM DB */
598 	cparms.nitems = parms->num_elements;
599 	cparms.size = sizeof(struct tf_rm_element);
600 	rc = tfp_calloc(&cparms);
601 	if (rc)
602 		return rc;
603 	rm_db->db = (struct tf_rm_element *)cparms.mem_va;
604 
605 	db = rm_db->db;
606 	for (i = 0, j = 0; i < parms->num_elements; i++) {
607 		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
608 		const char *type_str;
609 
610 		dev->ops->tf_dev_get_resource_str(tfp,
611 						  cfg->hcapi_type,
612 						  &type_str);
613 
614 		db[i].cfg_type = cfg->cfg_type;
615 		db[i].hcapi_type = cfg->hcapi_type;
616 		db[i].slices = cfg->slices;
617 
618 		/* Save the parent subtype for later use to find the pool
619 		 */
620 		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
621 			db[i].parent_subtype = cfg->parent_subtype;
622 
623 		/* If the element didn't request an allocation no need
624 		 * to create a pool nor verify if we got a reservation.
625 		 */
626 		if (req_cnt[i] == 0)
627 			continue;
628 
629 		/* Skip any children or invalid
630 		 */
631 		if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
632 		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
633 		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
634 			continue;
635 
636 		/* If the element had requested an allocation and that
637 		 * allocation was a success (full amount) then
638 		 * allocate the pool.
639 		 */
640 		if (req_cnt[i] == resv[j].stride) {
641 			db[i].alloc.entry.start = resv[j].start;
642 			db[i].alloc.entry.stride = resv[j].stride;
643 
644 			/* Only allocate BA pool if a BA type not a child */
645 			if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
646 			    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
647 				/* Create pool */
648 				pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
649 					     sizeof(struct bitalloc));
650 				/* Alloc request, alignment already set */
651 				cparms.nitems = pool_size;
652 				cparms.size = sizeof(struct bitalloc);
653 				rc = tfp_calloc(&cparms);
654 				if (rc) {
655 					TFP_DRV_LOG(ERR,
656 					 "%s: Pool alloc failed, type:%d:%s\n",
657 					 tf_dir_2_str(parms->dir),
658 					 cfg->hcapi_type, type_str);
659 					goto fail;
660 				}
661 				db[i].pool = (struct bitalloc *)cparms.mem_va;
662 
663 				rc = ba_init(db[i].pool,
664 					     resv[j].stride,
665 					     true);
666 				if (rc) {
667 					TFP_DRV_LOG(ERR,
668 					  "%s: Pool init failed, type:%d:%s\n",
669 					  tf_dir_2_str(parms->dir),
670 					  cfg->hcapi_type, type_str);
671 					goto fail;
672 				}
673 			}
674 			j++;
675 		} else {
676 			/* Bail out as we want what we requested for
677 			 * all elements, not any less.
678 			 */
679 			TFP_DRV_LOG(ERR,
680 				    "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
681 				    tf_dir_2_str(parms->dir), cfg->hcapi_type,
682 				    type_str, req_cnt[i], resv[j].stride);
683 			goto fail;
684 		}
685 	}
686 
687 	rm_db->num_entries = parms->num_elements;
688 	rm_db->dir = parms->dir;
689 	rm_db->module = parms->module;
690 	*parms->rm_db = (void *)rm_db;
691 
692 	tfp_free((void *)req);
693 	tfp_free((void *)resv);
694 	tfp_free((void *)req_cnt);
695 	return 0;
696 
697  fail:
698 	tfp_free((void *)req);
699 	tfp_free((void *)resv);
700 	tfp_free((void *)db->pool);
701 	tfp_free((void *)db);
702 	tfp_free((void *)rm_db);
703 	tfp_free((void *)req_cnt);
704 	parms->rm_db = NULL;
705 
706 	return -EINVAL;
707 }
708 
709 int
710 tf_rm_create_db_no_reservation(struct tf *tfp,
711 			       struct tf_rm_create_db_parms *parms)
712 {
713 	int rc;
714 	struct tf_session *tfs;
715 	struct tf_dev_info *dev;
716 	int i, j;
717 	uint16_t hcapi_items, *req_cnt;
718 	struct tfp_calloc_parms cparms;
719 	struct tf_rm_resc_req_entry *req;
720 	struct tf_rm_resc_entry *resv;
721 	struct tf_rm_new_db *rm_db;
722 	struct tf_rm_element *db;
723 	uint32_t pool_size;
724 
725 	TF_CHECK_PARMS2(tfp, parms);
726 
727 	/* Retrieve the session information */
728 	rc = tf_session_get_session_internal(tfp, &tfs);
729 	if (rc)
730 		return rc;
731 
732 	/* Retrieve device information */
733 	rc = tf_session_get_device(tfs, &dev);
734 	if (rc)
735 		return rc;
736 
737 	/* Copy requested counts (alloc_cnt) from tf_open_session() to local
738 	 * copy (req_cnt) so that it can be updated if required.
739 	 */
740 
741 	cparms.nitems = parms->num_elements;
742 	cparms.size = sizeof(uint16_t);
743 	cparms.alignment = 0;
744 	rc = tfp_calloc(&cparms);
745 	if (rc)
746 		return rc;
747 
748 	req_cnt = (uint16_t *)cparms.mem_va;
749 
750 	tfp_memcpy(req_cnt, parms->alloc_cnt,
751 		   parms->num_elements * sizeof(uint16_t));
752 
753 	/* Update the req_cnt based upon the element configuration
754 	 */
755 	tf_rm_update_parent_reservations(tfp, dev, parms->cfg,
756 					 parms->alloc_cnt,
757 					 parms->num_elements,
758 					 req_cnt);
759 
760 	/* Process capabilities against DB requirements. However, as a
761 	 * DB can hold elements that are not HCAPI we can reduce the
762 	 * req msg content by removing those out of the request yet
763 	 * the DB holds them all as to give a fast lookup. We can also
764 	 * remove entries where there are no request for elements.
765 	 */
766 	tf_rm_count_hcapi_reservations(parms->dir,
767 				       parms->module,
768 				       parms->cfg,
769 				       req_cnt,
770 				       parms->num_elements,
771 				       &hcapi_items);
772 
773 	if (hcapi_items == 0) {
774 		parms->rm_db = NULL;
775 		return -ENOMEM;
776 	}
777 
778 	/* Alloc request, alignment already set */
779 	cparms.nitems = (size_t)hcapi_items;
780 	cparms.size = sizeof(struct tf_rm_resc_req_entry);
781 	rc = tfp_calloc(&cparms);
782 	if (rc)
783 		return rc;
784 	req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
785 
786 	/* Alloc reservation, alignment and nitems already set */
787 	cparms.size = sizeof(struct tf_rm_resc_entry);
788 	rc = tfp_calloc(&cparms);
789 	if (rc)
790 		return rc;
791 	resv = (struct tf_rm_resc_entry *)cparms.mem_va;
792 
793 	/* Build the request */
794 	for (i = 0, j = 0; i < parms->num_elements; i++) {
795 		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
796 		uint16_t hcapi_type = cfg->hcapi_type;
797 
798 		/* Only perform reservation for requested entries
799 		 */
800 		if (req_cnt[i] == 0)
801 			continue;
802 
803 		/* Skip any children in the request */
804 		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
805 		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
806 		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
807 			req[j].type = hcapi_type;
808 			req[j].min = req_cnt[i];
809 			req[j].max = req_cnt[i];
810 			j++;
811 		}
812 	}
813 
814 	/* Get all resources info for the module type
815 	 */
816 	rc = tf_msg_session_resc_info(tfp,
817 				      dev,
818 				      parms->dir,
819 				      hcapi_items,
820 				      req,
821 				      resv);
822 	if (rc)
823 		return rc;
824 
825 	/* Build the RM DB per the request */
826 	cparms.nitems = 1;
827 	cparms.size = sizeof(struct tf_rm_new_db);
828 	rc = tfp_calloc(&cparms);
829 	if (rc)
830 		return rc;
831 	rm_db = (void *)cparms.mem_va;
832 
833 	/* Build the DB within RM DB */
834 	cparms.nitems = parms->num_elements;
835 	cparms.size = sizeof(struct tf_rm_element);
836 	rc = tfp_calloc(&cparms);
837 	if (rc)
838 		return rc;
839 	rm_db->db = (struct tf_rm_element *)cparms.mem_va;
840 
841 	db = rm_db->db;
842 	for (i = 0, j = 0; i < parms->num_elements; i++) {
843 		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
844 		const char *type_str;
845 
846 		dev->ops->tf_dev_get_resource_str(tfp,
847 						  cfg->hcapi_type,
848 						  &type_str);
849 
850 		db[i].cfg_type = cfg->cfg_type;
851 		db[i].hcapi_type = cfg->hcapi_type;
852 		db[i].slices = cfg->slices;
853 
854 		/* Save the parent subtype for later use to find the pool
855 		 */
856 		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
857 			db[i].parent_subtype = cfg->parent_subtype;
858 
859 		/* If the element didn't request an allocation no need
860 		 * to create a pool nor verify if we got a reservation.
861 		 */
862 		if (req_cnt[i] == 0)
863 			continue;
864 
865 		/* Skip any children or invalid
866 		 */
867 		if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
868 		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
869 		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
870 			continue;
871 
872 		/* If the element had requested an allocation and that
873 		 * allocation was a success (full amount) then
874 		 * allocate the pool.
875 		 */
876 		if (req_cnt[i] == resv[j].stride) {
877 			db[i].alloc.entry.start = resv[j].start;
878 			db[i].alloc.entry.stride = resv[j].stride;
879 
880 			/* Only allocate BA pool if a BA type not a child */
881 			if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
882 			    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
883 				/* Create pool */
884 				pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
885 					     sizeof(struct bitalloc));
886 				/* Alloc request, alignment already set */
887 				cparms.nitems = pool_size;
888 				cparms.size = sizeof(struct bitalloc);
889 				rc = tfp_calloc(&cparms);
890 				if (rc) {
891 					TFP_DRV_LOG(ERR,
892 					 "%s: Pool alloc failed, type:%d:%s\n",
893 					 tf_dir_2_str(parms->dir),
894 					 cfg->hcapi_type, type_str);
895 					goto fail;
896 				}
897 				db[i].pool = (struct bitalloc *)cparms.mem_va;
898 
899 				rc = ba_init(db[i].pool,
900 					     resv[j].stride,
901 					     true);
902 				if (rc) {
903 					TFP_DRV_LOG(ERR,
904 					  "%s: Pool init failed, type:%d:%s\n",
905 					  tf_dir_2_str(parms->dir),
906 					  cfg->hcapi_type, type_str);
907 					goto fail;
908 				}
909 			}
910 			j++;
911 		} else {
912 			/* Bail out as we want what we requested for
913 			 * all elements, not any less.
914 			 */
915 			TFP_DRV_LOG(ERR,
916 				    "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
917 				    tf_dir_2_str(parms->dir), cfg->hcapi_type,
918 				    type_str, req_cnt[i], resv[j].stride);
919 			goto fail;
920 		}
921 	}
922 
923 	rm_db->num_entries = parms->num_elements;
924 	rm_db->dir = parms->dir;
925 	rm_db->module = parms->module;
926 	*parms->rm_db = (void *)rm_db;
927 
928 	tfp_free((void *)req);
929 	tfp_free((void *)resv);
930 	tfp_free((void *)req_cnt);
931 	return 0;
932 
933  fail:
934 	tfp_free((void *)req);
935 	tfp_free((void *)resv);
936 	tfp_free((void *)db->pool);
937 	tfp_free((void *)db);
938 	tfp_free((void *)rm_db);
939 	tfp_free((void *)req_cnt);
940 	parms->rm_db = NULL;
941 
942 	return -EINVAL;
943 }
944 
945 int
946 tf_rm_free_db(struct tf *tfp,
947 	      struct tf_rm_free_db_parms *parms)
948 {
949 	int rc;
950 	int i;
951 	uint16_t resv_size = 0;
952 	struct tf_rm_new_db *rm_db;
953 	struct tf_rm_resc_entry *resv;
954 	bool residuals_found = false;
955 
956 	TF_CHECK_PARMS2(parms, parms->rm_db);
957 
958 	/* Device unbind happens when the TF Session is closed and the
959 	 * session ref count is 0. Device unbind will cleanup each of
960 	 * its support modules, i.e. Identifier, thus we're ending up
961 	 * here to close the DB.
962 	 *
963 	 * On TF Session close it is assumed that the session has already
964 	 * cleaned up all its resources, individually, while
965 	 * destroying its flows.
966 	 *
967 	 * To assist in the 'cleanup checking' the DB is checked for any
968 	 * remaining elements and logged if found to be the case.
969 	 *
970 	 * Any such elements will need to be 'cleared' ahead of
971 	 * returning the resources to the HCAPI RM.
972 	 *
973 	 * RM will signal FW to flush the DB resources. FW will
974 	 * perform the invalidation. TF Session close will return the
975 	 * previous allocated elements to the RM and then close the
976 	 * HCAPI RM registration. That then saves several 'free' msgs
977 	 * from being required.
978 	 */
979 
980 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
981 
982 	/* Check for residuals that the client didn't clean up */
983 	rc = tf_rm_check_residuals(rm_db,
984 				   &resv_size,
985 				   &resv,
986 				   &residuals_found);
987 	if (rc)
988 		return rc;
989 
990 	/* Invalidate any residuals followed by a DB traversal for
991 	 * pool cleanup.
992 	 */
993 	if (residuals_found) {
994 		rc = tf_msg_session_resc_flush(tfp,
995 					       parms->dir,
996 					       resv_size,
997 					       resv);
998 		tfp_free((void *)resv);
999 		/* On failure we still have to cleanup so we can only
1000 		 * log that FW failed.
1001 		 */
1002 		if (rc)
1003 			TFP_DRV_LOG(ERR,
1004 				    "%s: Internal Flush error, module:%s\n",
1005 				    tf_dir_2_str(parms->dir),
1006 				    tf_module_2_str(rm_db->module));
1007 	}
1008 
1009 	/* No need to check for configuration type, even if we do not
1010 	 * have a BA pool we just delete on a null ptr, no harm
1011 	 */
1012 	for (i = 0; i < rm_db->num_entries; i++)
1013 		tfp_free((void *)rm_db->db[i].pool);
1014 
1015 	tfp_free((void *)parms->rm_db);
1016 
1017 	return rc;
1018 }
1019 
1020 /**
1021  * Get the bit allocator pool associated with the subtype and the db
1022  *
1023  * [in] rm_db
1024  *   Pointer to the DB
1025  *
1026  * [in] subtype
1027  *   Module subtype used to index into the module specific database.
1028  *   An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
1029  *   module subtype of TF_MODULE_TYPE_TABLE.
1030  *
1031  * [in/out] pool
1032  *   Pointer to the bit allocator pool used
1033  *
1034  * [in/out] new_subtype
1035  *   Pointer to the subtype of the actual pool used
1036  * Returns:
1037  *     0          - Success
1038  *   - ENOTSUP    - Operation not supported
1039  */
1040 static int
1041 tf_rm_get_pool(struct tf_rm_new_db *rm_db,
1042 	       uint16_t subtype,
1043 	       struct bitalloc **pool,
1044 	       uint16_t *new_subtype)
1045 {
1046 	int rc = 0;
1047 	uint16_t tmp_subtype = subtype;
1048 
1049 	/* If we are a child, get the parent table index */
1050 	if (rm_db->db[subtype].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1051 		tmp_subtype = rm_db->db[subtype].parent_subtype;
1052 
1053 	*pool = rm_db->db[tmp_subtype].pool;
1054 
1055 	/* Bail out if the pool is not valid, should never happen */
1056 	if (rm_db->db[tmp_subtype].pool == NULL) {
1057 		rc = -ENOTSUP;
1058 		TFP_DRV_LOG(ERR,
1059 			    "%s: Invalid pool for this type:%d, rc:%s\n",
1060 			    tf_dir_2_str(rm_db->dir),
1061 			    tmp_subtype,
1062 			    strerror(-rc));
1063 		return rc;
1064 	}
1065 	*new_subtype = tmp_subtype;
1066 	return rc;
1067 }
1068 
1069 int
1070 tf_rm_allocate(struct tf_rm_allocate_parms *parms)
1071 {
1072 	int rc;
1073 	int id;
1074 	uint32_t index;
1075 	struct tf_rm_new_db *rm_db;
1076 	enum tf_rm_elem_cfg_type cfg_type;
1077 	struct bitalloc *pool;
1078 	uint16_t subtype;
1079 
1080 	TF_CHECK_PARMS2(parms, parms->rm_db);
1081 
1082 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1083 	TF_CHECK_PARMS1(rm_db->db);
1084 
1085 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1086 
1087 	/* Bail out if not controlled by RM */
1088 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1089 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1090 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1091 		return -ENOTSUP;
1092 
1093 	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1094 	if (rc)
1095 		return rc;
1096 	/*
1097 	 * priority  0: allocate from top of the tcam i.e. high
1098 	 * priority !0: allocate index from bottom i.e lowest
1099 	 */
1100 	if (parms->priority)
1101 		id = ba_alloc_reverse(pool);
1102 	else
1103 		id = ba_alloc(pool);
1104 	if (id == BA_FAIL) {
1105 		rc = -ENOMEM;
1106 		TFP_DRV_LOG(ERR,
1107 			    "%s: Allocation failed, rc:%s\n",
1108 			    tf_dir_2_str(rm_db->dir),
1109 			    strerror(-rc));
1110 		return rc;
1111 	}
1112 
1113 	/* Adjust for any non zero start value */
1114 	rc = tf_rm_adjust_index(rm_db->db,
1115 				TF_RM_ADJUST_ADD_BASE,
1116 				subtype,
1117 				id,
1118 				&index);
1119 	if (rc) {
1120 		TFP_DRV_LOG(ERR,
1121 			    "%s: Alloc adjust of base index failed, rc:%s\n",
1122 			    tf_dir_2_str(rm_db->dir),
1123 			    strerror(-rc));
1124 		return -EINVAL;
1125 	}
1126 
1127 	*parms->index = index;
1128 	if (parms->base_index)
1129 		*parms->base_index = id;
1130 
1131 	return rc;
1132 }
1133 
1134 int
1135 tf_rm_free(struct tf_rm_free_parms *parms)
1136 {
1137 	int rc;
1138 	uint32_t adj_index;
1139 	struct tf_rm_new_db *rm_db;
1140 	enum tf_rm_elem_cfg_type cfg_type;
1141 	struct bitalloc *pool;
1142 	uint16_t subtype;
1143 
1144 	TF_CHECK_PARMS2(parms, parms->rm_db);
1145 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1146 	TF_CHECK_PARMS1(rm_db->db);
1147 
1148 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1149 
1150 	/* Bail out if not controlled by RM */
1151 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1152 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1153 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1154 		return -ENOTSUP;
1155 
1156 	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1157 	if (rc)
1158 		return rc;
1159 
1160 	/* Adjust for any non zero start value */
1161 	rc = tf_rm_adjust_index(rm_db->db,
1162 				TF_RM_ADJUST_RM_BASE,
1163 				subtype,
1164 				parms->index,
1165 				&adj_index);
1166 	if (rc)
1167 		return rc;
1168 
1169 	rc = ba_free(pool, adj_index);
1170 	/* No logging direction matters and that is not available here */
1171 	if (rc)
1172 		return rc;
1173 
1174 	return rc;
1175 }
1176 
1177 int
1178 tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
1179 {
1180 	int rc;
1181 	uint32_t adj_index;
1182 	struct tf_rm_new_db *rm_db;
1183 	enum tf_rm_elem_cfg_type cfg_type;
1184 	struct bitalloc *pool;
1185 	uint16_t subtype;
1186 
1187 	TF_CHECK_PARMS2(parms, parms->rm_db);
1188 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1189 	TF_CHECK_PARMS1(rm_db->db);
1190 
1191 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1192 
1193 	/* Bail out if not controlled by RM */
1194 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1195 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1196 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1197 		return -ENOTSUP;
1198 
1199 	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1200 	if (rc)
1201 		return rc;
1202 
1203 	/* Adjust for any non zero start value */
1204 	rc = tf_rm_adjust_index(rm_db->db,
1205 				TF_RM_ADJUST_RM_BASE,
1206 				subtype,
1207 				parms->index,
1208 				&adj_index);
1209 	if (rc)
1210 		return rc;
1211 
1212 	if (parms->base_index)
1213 		*parms->base_index = adj_index;
1214 	*parms->allocated = ba_inuse(pool, adj_index);
1215 
1216 	return rc;
1217 }
1218 
1219 int
1220 tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
1221 {
1222 	struct tf_rm_new_db *rm_db;
1223 	enum tf_rm_elem_cfg_type cfg_type;
1224 
1225 	TF_CHECK_PARMS2(parms, parms->rm_db);
1226 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1227 	TF_CHECK_PARMS1(rm_db->db);
1228 
1229 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1230 
1231 	/* Bail out if not controlled by HCAPI */
1232 	if (cfg_type == TF_RM_ELEM_CFG_NULL)
1233 		return -ENOTSUP;
1234 
1235 	memcpy(parms->info,
1236 	       &rm_db->db[parms->subtype].alloc,
1237 	       sizeof(struct tf_rm_alloc_info));
1238 
1239 	return 0;
1240 }
1241 
1242 int
1243 tf_rm_get_all_info(struct tf_rm_get_alloc_info_parms *parms, int size)
1244 {
1245 	struct tf_rm_new_db *rm_db;
1246 	enum tf_rm_elem_cfg_type cfg_type;
1247 	struct tf_rm_alloc_info *info = parms->info;
1248 	int i;
1249 
1250 	TF_CHECK_PARMS1(parms);
1251 
1252 	/* No rm info available for this module type
1253 	 */
1254 	if (!parms->rm_db)
1255 		return -ENOMEM;
1256 
1257 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1258 	TF_CHECK_PARMS1(rm_db->db);
1259 
1260 	for (i = 0; i < size; i++) {
1261 		cfg_type = rm_db->db[i].cfg_type;
1262 
1263 		/* Bail out if not controlled by HCAPI */
1264 		if (cfg_type == TF_RM_ELEM_CFG_NULL) {
1265 			info++;
1266 			continue;
1267 		}
1268 
1269 		memcpy(info,
1270 		       &rm_db->db[i].alloc,
1271 		       sizeof(struct tf_rm_alloc_info));
1272 		info++;
1273 	}
1274 
1275 	return 0;
1276 }
1277 
1278 int
1279 tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
1280 {
1281 	struct tf_rm_new_db *rm_db;
1282 	enum tf_rm_elem_cfg_type cfg_type;
1283 
1284 	TF_CHECK_PARMS2(parms, parms->rm_db);
1285 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1286 	TF_CHECK_PARMS1(rm_db->db);
1287 
1288 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1289 
1290 	/* Bail out if not controlled by HCAPI */
1291 	if (cfg_type == TF_RM_ELEM_CFG_NULL)
1292 		return -ENOTSUP;
1293 
1294 	*parms->hcapi_type = rm_db->db[parms->subtype].hcapi_type;
1295 
1296 	return 0;
1297 }
1298 
1299 int
1300 tf_rm_get_slices(struct tf_rm_get_slices_parms *parms)
1301 {
1302 	struct tf_rm_new_db *rm_db;
1303 	enum tf_rm_elem_cfg_type cfg_type;
1304 
1305 	TF_CHECK_PARMS2(parms, parms->rm_db);
1306 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1307 	TF_CHECK_PARMS1(rm_db->db);
1308 
1309 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1310 
1311 	/* Bail out if not controlled by HCAPI */
1312 	if (cfg_type == TF_RM_ELEM_CFG_NULL)
1313 		return -ENOTSUP;
1314 
1315 	*parms->slices = rm_db->db[parms->subtype].slices;
1316 
1317 	return 0;
1318 }
1319 
1320 int
1321 tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
1322 {
1323 	int rc = 0;
1324 	struct tf_rm_new_db *rm_db;
1325 	enum tf_rm_elem_cfg_type cfg_type;
1326 
1327 	TF_CHECK_PARMS2(parms, parms->rm_db);
1328 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1329 	TF_CHECK_PARMS1(rm_db->db);
1330 
1331 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1332 
1333 	/* Bail out if not a BA pool */
1334 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1335 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1336 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1337 		return -ENOTSUP;
1338 
1339 	/* Bail silently (no logging), if the pool is not valid there
1340 	 * was no elements allocated for it.
1341 	 */
1342 	if (rm_db->db[parms->subtype].pool == NULL) {
1343 		*parms->count = 0;
1344 		return 0;
1345 	}
1346 
1347 	*parms->count = ba_inuse_count(rm_db->db[parms->subtype].pool);
1348 
1349 	return rc;
1350 }
1351 
1352 /* Only used for table bulk get at this time
1353  */
1354 int
1355 tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms)
1356 {
1357 	struct tf_rm_new_db *rm_db;
1358 	enum tf_rm_elem_cfg_type cfg_type;
1359 	uint32_t base_index;
1360 	uint32_t stride;
1361 	int rc = 0;
1362 	struct bitalloc *pool;
1363 	uint16_t subtype;
1364 
1365 	TF_CHECK_PARMS2(parms, parms->rm_db);
1366 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1367 	TF_CHECK_PARMS1(rm_db->db);
1368 
1369 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1370 
1371 	/* Bail out if not a BA pool */
1372 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1373 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1374 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1375 		return -ENOTSUP;
1376 
1377 	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1378 	if (rc)
1379 		return rc;
1380 
1381 	base_index = rm_db->db[subtype].alloc.entry.start;
1382 	stride = rm_db->db[subtype].alloc.entry.stride;
1383 
1384 	if (parms->starting_index < base_index ||
1385 	    parms->starting_index + parms->num_entries > base_index + stride)
1386 		return -EINVAL;
1387 
1388 	return rc;
1389 }
1390