xref: /dpdk/drivers/net/bnxt/tf_core/tf_rm.c (revision d38febb08d57fec29fed27a2d12a507fc6fcdfa1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <string.h>
7 
8 #include <rte_common.h>
9 #include <rte_debug.h>
10 
11 #include <cfa_resource_types.h>
12 
13 #include "tf_rm.h"
14 #include "tf_common.h"
15 #include "tf_util.h"
16 #include "tf_session.h"
17 #include "tf_device.h"
18 #include "tfp.h"
19 #include "tf_msg.h"
20 
21 /**
22  * Generic RM Element data type that an RM DB is build upon.
23  */
24 struct tf_rm_element {
25 	/**
26 	 * RM Element configuration type. If Private then the
27 	 * hcapi_type can be ignored. If Null then the element is not
28 	 * valid for the device.
29 	 */
30 	enum tf_rm_elem_cfg_type cfg_type;
31 
32 	/**
33 	 * HCAPI RM Type for the element.
34 	 */
35 	uint16_t hcapi_type;
36 
37 	/**
38 	 * HCAPI RM allocated range information for the element.
39 	 */
40 	struct tf_rm_alloc_info alloc;
41 
42 	/**
43 	 * If cfg_type == HCAPI_BA_CHILD, this field indicates
44 	 * the parent module subtype for look up into the parent pool.
45 	 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
46 	 * module subtype of TF_MODULE_TYPE_TABLE.
47 	 */
48 	uint16_t parent_subtype;
49 
50 	/**
51 	 * Bit allocator pool for the element. Pool size is controlled
52 	 * by the struct tf_session_resources at time of session creation.
53 	 * Null indicates that the pool is not used for the element.
54 	 */
55 	struct bitalloc *pool;
56 };
57 
58 /**
59  * TF RM DB definition
60  */
61 struct tf_rm_new_db {
62 	/**
63 	 * Number of elements in the DB
64 	 */
65 	uint16_t num_entries;
66 
67 	/**
68 	 * Direction this DB controls.
69 	 */
70 	enum tf_dir dir;
71 
72 	/**
73 	 * Module type, used for logging purposes.
74 	 */
75 	enum tf_module_type module;
76 
77 	/**
78 	 * The DB consists of an array of elements
79 	 */
80 	struct tf_rm_element *db;
81 };
82 
83 /**
84  * Adjust an index according to the allocation information.
85  *
86  * All resources are controlled in a 0 based pool. Some resources, by
87  * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
88  * need to be adjusted before they are handed out.
89  *
90  * [in] cfg
91  *   Pointer to the DB configuration
92  *
93  * [in] reservations
94  *   Pointer to the allocation values associated with the module
95  *
96  * [in] count
97  *   Number of DB configuration elements
98  *
99  * [out] valid_count
100  *   Number of HCAPI entries with a reservation value greater than 0
101  *
102  * Returns:
103  *     0          - Success
104  *   - EOPNOTSUPP - Operation not supported
105  */
106 static void
107 tf_rm_count_hcapi_reservations(enum tf_dir dir,
108 			       enum tf_module_type module,
109 			       struct tf_rm_element_cfg *cfg,
110 			       uint16_t *reservations,
111 			       uint16_t count,
112 			       uint16_t *valid_count)
113 {
114 	int i;
115 	uint16_t cnt = 0;
116 
117 	for (i = 0; i < count; i++) {
118 		if (cfg[i].cfg_type != TF_RM_ELEM_CFG_NULL &&
119 		    reservations[i] > 0)
120 			cnt++;
121 
122 		/* Only log msg if a type is attempted reserved and
123 		 * not supported. We ignore EM module as its using a
124 		 * split configuration array thus it would fail for
125 		 * this type of check.
126 		 */
127 		if (module != TF_MODULE_TYPE_EM &&
128 		    cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL &&
129 		    reservations[i] > 0) {
130 			TFP_DRV_LOG(ERR,
131 				"%s, %s, %s allocation of %d not supported\n",
132 				tf_module_2_str(module),
133 				tf_dir_2_str(dir),
134 				tf_module_subtype_2_str(module, i),
135 				reservations[i]);
136 		}
137 	}
138 
139 	*valid_count = cnt;
140 }
141 
142 /**
143  * Resource Manager Adjust of base index definitions.
144  */
145 enum tf_rm_adjust_type {
146 	TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */
147 	TF_RM_ADJUST_RM_BASE   /**< Removes base from the index */
148 };
149 
150 /**
151  * Adjust an index according to the allocation information.
152  *
153  * All resources are controlled in a 0 based pool. Some resources, by
154  * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
155  * need to be adjusted before they are handed out.
156  *
157  * [in] db
158  *   Pointer to the db, used for the lookup
159  *
160  * [in] action
161  *   Adjust action
162  *
163  * [in] subtype
164  *   TF module subtype used as an index into the database.
165  *   An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
166  *   module subtype of TF_MODULE_TYPE_TABLE.
167  *
168  * [in] index
169  *   Index to convert
170  *
171  * [out] adj_index
172  *   Adjusted index
173  *
174  * Returns:
175  *     0          - Success
176  *   - EOPNOTSUPP - Operation not supported
177  */
178 static int
179 tf_rm_adjust_index(struct tf_rm_element *db,
180 		   enum tf_rm_adjust_type action,
181 		   uint32_t subtype,
182 		   uint32_t index,
183 		   uint32_t *adj_index)
184 {
185 	int rc = 0;
186 	uint32_t base_index;
187 
188 	base_index = db[subtype].alloc.entry.start;
189 
190 	switch (action) {
191 	case TF_RM_ADJUST_RM_BASE:
192 		*adj_index = index - base_index;
193 		break;
194 	case TF_RM_ADJUST_ADD_BASE:
195 		*adj_index = index + base_index;
196 		break;
197 	default:
198 		return -EOPNOTSUPP;
199 	}
200 
201 	return rc;
202 }
203 
204 /**
205  * Performs a check of the passed in DB for any lingering elements. If
206  * a resource type was found to not have been cleaned up by the caller
207  * then its residual values are recorded, logged and passed back in an
208  * allocate reservation array that the caller can pass to the FW for
209  * cleanup.
210  *
211  * [in] db
212  *   Pointer to the db, used for the lookup
213  *
214  * [out] resv_size
215  *   Pointer to the reservation size of the generated reservation
216  *   array.
217  *
218  * [in/out] resv
219  *   Pointer Pointer to a reservation array. The reservation array is
220  *   allocated after the residual scan and holds any found residual
221  *   entries. Thus it can be smaller than the DB that the check was
222  *   performed on. Array must be freed by the caller.
223  *
224  * [out] residuals_present
225  *   Pointer to a bool flag indicating if residual was present in the
226  *   DB
227  *
228  * Returns:
229  *     0          - Success
230  *   - EOPNOTSUPP - Operation not supported
231  */
232 static int
233 tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
234 		      uint16_t *resv_size,
235 		      struct tf_rm_resc_entry **resv,
236 		      bool *residuals_present)
237 {
238 	int rc;
239 	int i;
240 	int f;
241 	uint16_t count;
242 	uint16_t found;
243 	uint16_t *residuals = NULL;
244 	uint16_t hcapi_type;
245 	struct tf_rm_get_inuse_count_parms iparms;
246 	struct tf_rm_get_alloc_info_parms aparms;
247 	struct tf_rm_get_hcapi_parms hparms;
248 	struct tf_rm_alloc_info info;
249 	struct tfp_calloc_parms cparms;
250 	struct tf_rm_resc_entry *local_resv = NULL;
251 
252 	/* Create array to hold the entries that have residuals */
253 	cparms.nitems = rm_db->num_entries;
254 	cparms.size = sizeof(uint16_t);
255 	cparms.alignment = 0;
256 	rc = tfp_calloc(&cparms);
257 	if (rc)
258 		return rc;
259 
260 	residuals = (uint16_t *)cparms.mem_va;
261 
262 	/* Traverse the DB and collect any residual elements */
263 	iparms.rm_db = rm_db;
264 	iparms.count = &count;
265 	for (i = 0, found = 0; i < rm_db->num_entries; i++) {
266 		iparms.subtype = i;
267 		rc = tf_rm_get_inuse_count(&iparms);
268 		/* Not a device supported entry, just skip */
269 		if (rc == -ENOTSUP)
270 			continue;
271 		if (rc)
272 			goto cleanup_residuals;
273 
274 		if (count) {
275 			found++;
276 			residuals[i] = count;
277 			*residuals_present = true;
278 		}
279 	}
280 
281 	if (*residuals_present) {
282 		/* Populate a reduced resv array with only the entries
283 		 * that have residuals.
284 		 */
285 		cparms.nitems = found;
286 		cparms.size = sizeof(struct tf_rm_resc_entry);
287 		cparms.alignment = 0;
288 		rc = tfp_calloc(&cparms);
289 		if (rc)
290 			return rc;
291 
292 		local_resv = (struct tf_rm_resc_entry *)cparms.mem_va;
293 
294 		aparms.rm_db = rm_db;
295 		hparms.rm_db = rm_db;
296 		hparms.hcapi_type = &hcapi_type;
297 		for (i = 0, f = 0; i < rm_db->num_entries; i++) {
298 			if (residuals[i] == 0)
299 				continue;
300 			aparms.subtype = i;
301 			aparms.info = &info;
302 			rc = tf_rm_get_info(&aparms);
303 			if (rc)
304 				goto cleanup_all;
305 
306 			hparms.subtype = i;
307 			rc = tf_rm_get_hcapi_type(&hparms);
308 			if (rc)
309 				goto cleanup_all;
310 
311 			local_resv[f].type = hcapi_type;
312 			local_resv[f].start = info.entry.start;
313 			local_resv[f].stride = info.entry.stride;
314 			f++;
315 		}
316 		*resv_size = found;
317 	}
318 
319 	tfp_free((void *)residuals);
320 	*resv = local_resv;
321 
322 	return 0;
323 
324  cleanup_all:
325 	tfp_free((void *)local_resv);
326 	*resv = NULL;
327  cleanup_residuals:
328 	tfp_free((void *)residuals);
329 
330 	return rc;
331 }
332 
333 /**
334  * Some resources do not have a 1:1 mapping between the Truflow type and the cfa
335  * resource type (HCAPI RM).  These resources have multiple Truflow types which
336  * map to a single HCAPI RM type.  In order to support this, one Truflow type
337  * sharing the HCAPI resources is designated the parent.  All other Truflow
338  * types associated with that HCAPI RM type are designated the children.
339  *
340  * This function updates the resource counts of any HCAPI_BA_PARENT with the
341  * counts of the HCAPI_BA_CHILDREN.  These are read from the alloc_cnt and
342  * written back to the req_cnt.
343  *
344  * [in] cfg
345  *   Pointer to an array of module specific Truflow type indexed RM cfg items
346  *
347  * [in] alloc_cnt
348  *   Pointer to the tf_open_session() configured array of module specific
349  *   Truflow type indexed requested counts.
350  *
351  * [in/out] req_cnt
352  *   Pointer to the location to put the updated resource counts.
353  *
354  * Returns:
355  *     0          - Success
356  *     -          - Failure if negative
357  */
358 static int
359 tf_rm_update_parent_reservations(struct tf_rm_element_cfg *cfg,
360 				 uint16_t *alloc_cnt,
361 				 uint16_t num_elements,
362 				 uint16_t *req_cnt)
363 {
364 	int parent, child;
365 
366 	/* Search through all the elements */
367 	for (parent = 0; parent < num_elements; parent++) {
368 		uint16_t combined_cnt = 0;
369 
370 		/* If I am a parent */
371 		if (cfg[parent].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
372 			/* start with my own count */
373 			RTE_ASSERT(cfg[parent].slices);
374 			combined_cnt =
375 				alloc_cnt[parent] / cfg[parent].slices;
376 
377 			if (alloc_cnt[parent] % cfg[parent].slices)
378 				combined_cnt++;
379 
380 			/* Search again through all the elements */
381 			for (child = 0; child < num_elements; child++) {
382 				/* If this is one of my children */
383 				if (cfg[child].cfg_type ==
384 				    TF_RM_ELEM_CFG_HCAPI_BA_CHILD &&
385 				    cfg[child].parent_subtype == parent) {
386 					uint16_t cnt = 0;
387 					RTE_ASSERT(cfg[child].slices);
388 
389 					/* Increment the parents combined count
390 					 * with each child's count adjusted for
391 					 * number of slices per RM allocated item.
392 					 */
393 					cnt =
394 					 alloc_cnt[child] / cfg[child].slices;
395 
396 					if (alloc_cnt[child] % cfg[child].slices)
397 						cnt++;
398 
399 					combined_cnt += cnt;
400 					/* Clear the requested child count */
401 					req_cnt[child] = 0;
402 				}
403 			}
404 			/* Save the parent count to be requested */
405 			req_cnt[parent] = combined_cnt;
406 		}
407 	}
408 	return 0;
409 }
410 
411 int
412 tf_rm_create_db(struct tf *tfp,
413 		struct tf_rm_create_db_parms *parms)
414 {
415 	int rc;
416 	struct tf_session *tfs;
417 	struct tf_dev_info *dev;
418 	int i, j;
419 	uint16_t max_types, hcapi_items, *req_cnt;
420 	struct tfp_calloc_parms cparms;
421 	struct tf_rm_resc_req_entry *query;
422 	enum tf_rm_resc_resv_strategy resv_strategy;
423 	struct tf_rm_resc_req_entry *req;
424 	struct tf_rm_resc_entry *resv;
425 	struct tf_rm_new_db *rm_db;
426 	struct tf_rm_element *db;
427 	uint32_t pool_size;
428 
429 	TF_CHECK_PARMS2(tfp, parms);
430 
431 	/* Retrieve the session information */
432 	rc = tf_session_get_session_internal(tfp, &tfs);
433 	if (rc)
434 		return rc;
435 
436 	/* Retrieve device information */
437 	rc = tf_session_get_device(tfs, &dev);
438 	if (rc)
439 		return rc;
440 
441 	/* Need device max number of elements for the RM QCAPS */
442 	rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
443 
444 
445 	/* Allocate memory for RM QCAPS request */
446 	cparms.nitems = max_types;
447 	cparms.size = sizeof(struct tf_rm_resc_req_entry);
448 	cparms.alignment = 0;
449 	rc = tfp_calloc(&cparms);
450 	if (rc)
451 		return rc;
452 
453 	query = (struct tf_rm_resc_req_entry *)cparms.mem_va;
454 
455 	/* Get Firmware Capabilities */
456 	rc = tf_msg_session_resc_qcaps(tfp,
457 				       dev,
458 				       parms->dir,
459 				       max_types,
460 				       query,
461 				       &resv_strategy);
462 	if (rc)
463 		return rc;
464 
465 	/* Copy requested counts (alloc_cnt) from tf_open_session() to local
466 	 * copy (req_cnt) so that it can be updated if required.
467 	 */
468 
469 	cparms.nitems = parms->num_elements;
470 	cparms.size = sizeof(uint16_t);
471 	rc = tfp_calloc(&cparms);
472 	if (rc)
473 		return rc;
474 
475 	req_cnt = (uint16_t *)cparms.mem_va;
476 
477 	tfp_memcpy(req_cnt, parms->alloc_cnt,
478 		   parms->num_elements * sizeof(uint16_t));
479 
480 	/* Update the req_cnt based upon the element configuration
481 	 */
482 	tf_rm_update_parent_reservations(parms->cfg,
483 					 parms->alloc_cnt,
484 					 parms->num_elements,
485 					 req_cnt);
486 
487 	/* Process capabilities against DB requirements. However, as a
488 	 * DB can hold elements that are not HCAPI we can reduce the
489 	 * req msg content by removing those out of the request yet
490 	 * the DB holds them all as to give a fast lookup. We can also
491 	 * remove entries where there are no request for elements.
492 	 */
493 	tf_rm_count_hcapi_reservations(parms->dir,
494 				       parms->module,
495 				       parms->cfg,
496 				       req_cnt,
497 				       parms->num_elements,
498 				       &hcapi_items);
499 
500 	if (hcapi_items == 0) {
501 		parms->rm_db = NULL;
502 		return -ENOMEM;
503 	}
504 
505 	/* Alloc request, alignment already set */
506 	cparms.nitems = (size_t)hcapi_items;
507 	cparms.size = sizeof(struct tf_rm_resc_req_entry);
508 	rc = tfp_calloc(&cparms);
509 	if (rc)
510 		return rc;
511 	req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
512 
513 	/* Alloc reservation, alignment and nitems already set */
514 	cparms.size = sizeof(struct tf_rm_resc_entry);
515 	rc = tfp_calloc(&cparms);
516 	if (rc)
517 		return rc;
518 	resv = (struct tf_rm_resc_entry *)cparms.mem_va;
519 
520 	/* Build the request */
521 	for (i = 0, j = 0; i < parms->num_elements; i++) {
522 		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
523 		uint16_t hcapi_type = cfg->hcapi_type;
524 
525 		/* Only perform reservation for requested entries
526 		 */
527 		if (req_cnt[i] == 0)
528 			continue;
529 
530 		/* Skip any children in the request */
531 		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
532 		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
533 		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
534 
535 			/* Verify that we can get the full amount per qcaps.
536 			 */
537 			if (req_cnt[i] <= query[hcapi_type].max) {
538 				req[j].type = hcapi_type;
539 				req[j].min = req_cnt[i];
540 				req[j].max = req_cnt[i];
541 				j++;
542 			} else {
543 				const char *type_str;
544 
545 				dev->ops->tf_dev_get_resource_str(tfp,
546 							      hcapi_type,
547 							      &type_str);
548 				TFP_DRV_LOG(ERR,
549 					    "Failure, %s:%d:%s req:%d avail:%d\n",
550 					    tf_dir_2_str(parms->dir),
551 					    hcapi_type, type_str,
552 					    req_cnt[i],
553 					    query[hcapi_type].max);
554 				return -EINVAL;
555 			}
556 		}
557 	}
558 
559 	/* Allocate all resources for the module type
560 	 */
561 	rc = tf_msg_session_resc_alloc(tfp,
562 				       dev,
563 				       parms->dir,
564 				       hcapi_items,
565 				       req,
566 				       resv);
567 	if (rc)
568 		return rc;
569 
570 	/* Build the RM DB per the request */
571 	cparms.nitems = 1;
572 	cparms.size = sizeof(struct tf_rm_new_db);
573 	rc = tfp_calloc(&cparms);
574 	if (rc)
575 		return rc;
576 	rm_db = (void *)cparms.mem_va;
577 
578 	/* Build the DB within RM DB */
579 	cparms.nitems = parms->num_elements;
580 	cparms.size = sizeof(struct tf_rm_element);
581 	rc = tfp_calloc(&cparms);
582 	if (rc)
583 		return rc;
584 	rm_db->db = (struct tf_rm_element *)cparms.mem_va;
585 
586 	db = rm_db->db;
587 	for (i = 0, j = 0; i < parms->num_elements; i++) {
588 		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
589 		const char *type_str;
590 
591 		dev->ops->tf_dev_get_resource_str(tfp,
592 						  cfg->hcapi_type,
593 						  &type_str);
594 
595 		db[i].cfg_type = cfg->cfg_type;
596 		db[i].hcapi_type = cfg->hcapi_type;
597 
598 		/* Save the parent subtype for later use to find the pool
599 		 */
600 		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
601 			db[i].parent_subtype = cfg->parent_subtype;
602 
603 		/* If the element didn't request an allocation no need
604 		 * to create a pool nor verify if we got a reservation.
605 		 */
606 		if (req_cnt[i] == 0)
607 			continue;
608 
609 		/* Skip any children or invalid
610 		 */
611 		if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
612 		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
613 		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
614 			continue;
615 
616 		/* If the element had requested an allocation and that
617 		 * allocation was a success (full amount) then
618 		 * allocate the pool.
619 		 */
620 		if (req_cnt[i] == resv[j].stride) {
621 			db[i].alloc.entry.start = resv[j].start;
622 			db[i].alloc.entry.stride = resv[j].stride;
623 
624 			/* Only allocate BA pool if a BA type not a child */
625 			if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
626 			    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
627 				/* Create pool */
628 				pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
629 					     sizeof(struct bitalloc));
630 				/* Alloc request, alignment already set */
631 				cparms.nitems = pool_size;
632 				cparms.size = sizeof(struct bitalloc);
633 				rc = tfp_calloc(&cparms);
634 				if (rc) {
635 					TFP_DRV_LOG(ERR,
636 					 "%s: Pool alloc failed, type:%d:%s\n",
637 					 tf_dir_2_str(parms->dir),
638 					 cfg->hcapi_type, type_str);
639 					goto fail;
640 				}
641 				db[i].pool = (struct bitalloc *)cparms.mem_va;
642 
643 				rc = ba_init(db[i].pool,
644 					     resv[j].stride,
645 					     !tf_session_is_shared_session(tfs));
646 				if (rc) {
647 					TFP_DRV_LOG(ERR,
648 					  "%s: Pool init failed, type:%d:%s\n",
649 					  tf_dir_2_str(parms->dir),
650 					  cfg->hcapi_type, type_str);
651 					goto fail;
652 				}
653 			}
654 			j++;
655 		} else {
656 			/* Bail out as we want what we requested for
657 			 * all elements, not any less.
658 			 */
659 			TFP_DRV_LOG(ERR,
660 				    "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
661 				    tf_dir_2_str(parms->dir), cfg->hcapi_type,
662 				    type_str, req_cnt[i], resv[j].stride);
663 			goto fail;
664 		}
665 	}
666 
667 	rm_db->num_entries = parms->num_elements;
668 	rm_db->dir = parms->dir;
669 	rm_db->module = parms->module;
670 	*parms->rm_db = (void *)rm_db;
671 
672 	tfp_free((void *)req);
673 	tfp_free((void *)resv);
674 	tfp_free((void *)req_cnt);
675 	return 0;
676 
677  fail:
678 	tfp_free((void *)req);
679 	tfp_free((void *)resv);
680 	tfp_free((void *)db->pool);
681 	tfp_free((void *)db);
682 	tfp_free((void *)rm_db);
683 	tfp_free((void *)req_cnt);
684 	parms->rm_db = NULL;
685 
686 	return -EINVAL;
687 }
688 
689 int
690 tf_rm_create_db_no_reservation(struct tf *tfp,
691 			       struct tf_rm_create_db_parms *parms)
692 {
693 	int rc;
694 	struct tf_session *tfs;
695 	struct tf_dev_info *dev;
696 	int i, j;
697 	uint16_t hcapi_items, *req_cnt;
698 	struct tfp_calloc_parms cparms;
699 	struct tf_rm_resc_req_entry *req;
700 	struct tf_rm_resc_entry *resv;
701 	struct tf_rm_new_db *rm_db;
702 	struct tf_rm_element *db;
703 	uint32_t pool_size;
704 
705 	TF_CHECK_PARMS2(tfp, parms);
706 
707 	/* Retrieve the session information */
708 	rc = tf_session_get_session_internal(tfp, &tfs);
709 	if (rc)
710 		return rc;
711 
712 	/* Retrieve device information */
713 	rc = tf_session_get_device(tfs, &dev);
714 	if (rc)
715 		return rc;
716 
717 	/* Copy requested counts (alloc_cnt) from tf_open_session() to local
718 	 * copy (req_cnt) so that it can be updated if required.
719 	 */
720 
721 	cparms.nitems = parms->num_elements;
722 	cparms.size = sizeof(uint16_t);
723 	cparms.alignment = 0;
724 	rc = tfp_calloc(&cparms);
725 	if (rc)
726 		return rc;
727 
728 	req_cnt = (uint16_t *)cparms.mem_va;
729 
730 	tfp_memcpy(req_cnt, parms->alloc_cnt,
731 		   parms->num_elements * sizeof(uint16_t));
732 
733 	/* Process capabilities against DB requirements. However, as a
734 	 * DB can hold elements that are not HCAPI we can reduce the
735 	 * req msg content by removing those out of the request yet
736 	 * the DB holds them all as to give a fast lookup. We can also
737 	 * remove entries where there are no request for elements.
738 	 */
739 	tf_rm_count_hcapi_reservations(parms->dir,
740 				       parms->module,
741 				       parms->cfg,
742 				       req_cnt,
743 				       parms->num_elements,
744 				       &hcapi_items);
745 
746 	if (hcapi_items == 0) {
747 		TFP_DRV_LOG(ERR,
748 			"%s: module:%s Empty RM DB create request\n",
749 			tf_dir_2_str(parms->dir),
750 			tf_module_2_str(parms->module));
751 
752 		parms->rm_db = NULL;
753 		return -ENOMEM;
754 	}
755 
756 	/* Alloc request, alignment already set */
757 	cparms.nitems = (size_t)hcapi_items;
758 	cparms.size = sizeof(struct tf_rm_resc_req_entry);
759 	rc = tfp_calloc(&cparms);
760 	if (rc)
761 		return rc;
762 	req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
763 
764 	/* Alloc reservation, alignment and nitems already set */
765 	cparms.size = sizeof(struct tf_rm_resc_entry);
766 	rc = tfp_calloc(&cparms);
767 	if (rc)
768 		return rc;
769 	resv = (struct tf_rm_resc_entry *)cparms.mem_va;
770 
771 	/* Build the request */
772 	for (i = 0, j = 0; i < parms->num_elements; i++) {
773 		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
774 		uint16_t hcapi_type = cfg->hcapi_type;
775 
776 		/* Only perform reservation for requested entries
777 		 */
778 		if (req_cnt[i] == 0)
779 			continue;
780 
781 		/* Skip any children in the request */
782 		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
783 		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
784 		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
785 			req[j].type = hcapi_type;
786 			req[j].min = req_cnt[i];
787 			req[j].max = req_cnt[i];
788 			j++;
789 		}
790 	}
791 
792 	/* Get all resources info for the module type
793 	 */
794 	rc = tf_msg_session_resc_info(tfp,
795 				      dev,
796 				      parms->dir,
797 				      hcapi_items,
798 				      req,
799 				      resv);
800 	if (rc)
801 		return rc;
802 
803 	/* Build the RM DB per the request */
804 	cparms.nitems = 1;
805 	cparms.size = sizeof(struct tf_rm_new_db);
806 	rc = tfp_calloc(&cparms);
807 	if (rc)
808 		return rc;
809 	rm_db = (void *)cparms.mem_va;
810 
811 	/* Build the DB within RM DB */
812 	cparms.nitems = parms->num_elements;
813 	cparms.size = sizeof(struct tf_rm_element);
814 	rc = tfp_calloc(&cparms);
815 	if (rc)
816 		return rc;
817 	rm_db->db = (struct tf_rm_element *)cparms.mem_va;
818 
819 	db = rm_db->db;
820 	for (i = 0, j = 0; i < parms->num_elements; i++) {
821 		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
822 		const char *type_str;
823 
824 		dev->ops->tf_dev_get_resource_str(tfp,
825 						  cfg->hcapi_type,
826 						  &type_str);
827 
828 		db[i].cfg_type = cfg->cfg_type;
829 		db[i].hcapi_type = cfg->hcapi_type;
830 
831 		/* Save the parent subtype for later use to find the pool
832 		 */
833 		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
834 			db[i].parent_subtype = cfg->parent_subtype;
835 
836 		/* If the element didn't request an allocation no need
837 		 * to create a pool nor verify if we got a reservation.
838 		 */
839 		if (req_cnt[i] == 0)
840 			continue;
841 
842 		/* Skip any children or invalid
843 		 */
844 		if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
845 		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
846 		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
847 			continue;
848 
849 		/* If the element had requested an allocation and that
850 		 * allocation was a success (full amount) then
851 		 * allocate the pool.
852 		 */
853 		if (req_cnt[i] == resv[j].stride) {
854 			db[i].alloc.entry.start = resv[j].start;
855 			db[i].alloc.entry.stride = resv[j].stride;
856 
857 			/* Only allocate BA pool if a BA type not a child */
858 			if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
859 			    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
860 				/* Create pool */
861 				pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
862 					     sizeof(struct bitalloc));
863 				/* Alloc request, alignment already set */
864 				cparms.nitems = pool_size;
865 				cparms.size = sizeof(struct bitalloc);
866 				rc = tfp_calloc(&cparms);
867 				if (rc) {
868 					TFP_DRV_LOG(ERR,
869 					 "%s: Pool alloc failed, type:%d:%s\n",
870 					 tf_dir_2_str(parms->dir),
871 					 cfg->hcapi_type, type_str);
872 					goto fail;
873 				}
874 				db[i].pool = (struct bitalloc *)cparms.mem_va;
875 
876 				rc = ba_init(db[i].pool,
877 					     resv[j].stride,
878 					     !tf_session_is_shared_session(tfs));
879 				if (rc) {
880 					TFP_DRV_LOG(ERR,
881 					  "%s: Pool init failed, type:%d:%s\n",
882 					  tf_dir_2_str(parms->dir),
883 					  cfg->hcapi_type, type_str);
884 					goto fail;
885 				}
886 			}
887 			j++;
888 		} else {
889 			/* Bail out as we want what we requested for
890 			 * all elements, not any less.
891 			 */
892 			TFP_DRV_LOG(ERR,
893 				    "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
894 				    tf_dir_2_str(parms->dir), cfg->hcapi_type,
895 				    type_str, req_cnt[i], resv[j].stride);
896 			goto fail;
897 		}
898 	}
899 
900 	rm_db->num_entries = parms->num_elements;
901 	rm_db->dir = parms->dir;
902 	rm_db->module = parms->module;
903 	*parms->rm_db = (void *)rm_db;
904 
905 	tfp_free((void *)req);
906 	tfp_free((void *)resv);
907 	tfp_free((void *)req_cnt);
908 	return 0;
909 
910  fail:
911 	tfp_free((void *)req);
912 	tfp_free((void *)resv);
913 	tfp_free((void *)db->pool);
914 	tfp_free((void *)db);
915 	tfp_free((void *)rm_db);
916 	tfp_free((void *)req_cnt);
917 	parms->rm_db = NULL;
918 
919 	return -EINVAL;
920 }
921 int
922 tf_rm_free_db(struct tf *tfp,
923 	      struct tf_rm_free_db_parms *parms)
924 {
925 	int rc;
926 	int i;
927 	uint16_t resv_size = 0;
928 	struct tf_rm_new_db *rm_db;
929 	struct tf_rm_resc_entry *resv;
930 	bool residuals_found = false;
931 
932 	TF_CHECK_PARMS2(parms, parms->rm_db);
933 
934 	/* Device unbind happens when the TF Session is closed and the
935 	 * session ref count is 0. Device unbind will cleanup each of
936 	 * its support modules, i.e. Identifier, thus we're ending up
937 	 * here to close the DB.
938 	 *
939 	 * On TF Session close it is assumed that the session has already
940 	 * cleaned up all its resources, individually, while
941 	 * destroying its flows.
942 	 *
943 	 * To assist in the 'cleanup checking' the DB is checked for any
944 	 * remaining elements and logged if found to be the case.
945 	 *
946 	 * Any such elements will need to be 'cleared' ahead of
947 	 * returning the resources to the HCAPI RM.
948 	 *
949 	 * RM will signal FW to flush the DB resources. FW will
950 	 * perform the invalidation. TF Session close will return the
951 	 * previous allocated elements to the RM and then close the
952 	 * HCAPI RM registration. That then saves several 'free' msgs
953 	 * from being required.
954 	 */
955 
956 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
957 
958 	/* Check for residuals that the client didn't clean up */
959 	rc = tf_rm_check_residuals(rm_db,
960 				   &resv_size,
961 				   &resv,
962 				   &residuals_found);
963 	if (rc)
964 		return rc;
965 
966 	/* Invalidate any residuals followed by a DB traversal for
967 	 * pool cleanup.
968 	 */
969 	if (residuals_found) {
970 		rc = tf_msg_session_resc_flush(tfp,
971 					       parms->dir,
972 					       resv_size,
973 					       resv);
974 		tfp_free((void *)resv);
975 		/* On failure we still have to cleanup so we can only
976 		 * log that FW failed.
977 		 */
978 		if (rc)
979 			TFP_DRV_LOG(ERR,
980 				    "%s: Internal Flush error, module:%s\n",
981 				    tf_dir_2_str(parms->dir),
982 				    tf_module_2_str(rm_db->module));
983 	}
984 
985 	/* No need to check for configuration type, even if we do not
986 	 * have a BA pool we just delete on a null ptr, no harm
987 	 */
988 	for (i = 0; i < rm_db->num_entries; i++)
989 		tfp_free((void *)rm_db->db[i].pool);
990 
991 	tfp_free((void *)parms->rm_db);
992 
993 	return rc;
994 }
995 /**
996  * Get the bit allocator pool associated with the subtype and the db
997  *
998  * [in] rm_db
999  *   Pointer to the DB
1000  *
1001  * [in] subtype
1002  *   Module subtype used to index into the module specific database.
1003  *   An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
1004  *   module subtype of TF_MODULE_TYPE_TABLE.
1005  *
1006  * [in/out] pool
1007  *   Pointer to the bit allocator pool used
1008  *
1009  * [in/out] new_subtype
1010  *   Pointer to the subtype of the actual pool used
1011  * Returns:
1012  *     0          - Success
1013  *   - ENOTSUP    - Operation not supported
1014  */
1015 static int
1016 tf_rm_get_pool(struct tf_rm_new_db *rm_db,
1017 	       uint16_t subtype,
1018 	       struct bitalloc **pool,
1019 	       uint16_t *new_subtype)
1020 {
1021 	int rc = 0;
1022 	uint16_t tmp_subtype = subtype;
1023 
1024 	/* If we are a child, get the parent table index */
1025 	if (rm_db->db[subtype].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1026 		tmp_subtype = rm_db->db[subtype].parent_subtype;
1027 
1028 	*pool = rm_db->db[tmp_subtype].pool;
1029 
1030 	/* Bail out if the pool is not valid, should never happen */
1031 	if (rm_db->db[tmp_subtype].pool == NULL) {
1032 		rc = -ENOTSUP;
1033 		TFP_DRV_LOG(ERR,
1034 			    "%s: Invalid pool for this type:%d, rc:%s\n",
1035 			    tf_dir_2_str(rm_db->dir),
1036 			    tmp_subtype,
1037 			    strerror(-rc));
1038 		return rc;
1039 	}
1040 	*new_subtype = tmp_subtype;
1041 	return rc;
1042 }
1043 
1044 int
1045 tf_rm_allocate(struct tf_rm_allocate_parms *parms)
1046 {
1047 	int rc;
1048 	int id;
1049 	uint32_t index;
1050 	struct tf_rm_new_db *rm_db;
1051 	enum tf_rm_elem_cfg_type cfg_type;
1052 	struct bitalloc *pool;
1053 	uint16_t subtype;
1054 
1055 	TF_CHECK_PARMS2(parms, parms->rm_db);
1056 
1057 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1058 	TF_CHECK_PARMS1(rm_db->db);
1059 
1060 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1061 
1062 	/* Bail out if not controlled by RM */
1063 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1064 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1065 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1066 		return -ENOTSUP;
1067 
1068 	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1069 	if (rc)
1070 		return rc;
1071 	/*
1072 	 * priority  0: allocate from top of the tcam i.e. high
1073 	 * priority !0: allocate index from bottom i.e lowest
1074 	 */
1075 	if (parms->priority)
1076 		id = ba_alloc_reverse(pool);
1077 	else
1078 		id = ba_alloc(pool);
1079 	if (id == BA_FAIL) {
1080 		rc = -ENOMEM;
1081 		TFP_DRV_LOG(ERR,
1082 			    "%s: Allocation failed, rc:%s\n",
1083 			    tf_dir_2_str(rm_db->dir),
1084 			    strerror(-rc));
1085 		return rc;
1086 	}
1087 
1088 	/* Adjust for any non zero start value */
1089 	rc = tf_rm_adjust_index(rm_db->db,
1090 				TF_RM_ADJUST_ADD_BASE,
1091 				subtype,
1092 				id,
1093 				&index);
1094 	if (rc) {
1095 		TFP_DRV_LOG(ERR,
1096 			    "%s: Alloc adjust of base index failed, rc:%s\n",
1097 			    tf_dir_2_str(rm_db->dir),
1098 			    strerror(-rc));
1099 		return -EINVAL;
1100 	}
1101 
1102 	*parms->index = index;
1103 	if (parms->base_index)
1104 		*parms->base_index = id;
1105 
1106 	return rc;
1107 }
1108 
1109 int
1110 tf_rm_free(struct tf_rm_free_parms *parms)
1111 {
1112 	int rc;
1113 	uint32_t adj_index;
1114 	struct tf_rm_new_db *rm_db;
1115 	enum tf_rm_elem_cfg_type cfg_type;
1116 	struct bitalloc *pool;
1117 	uint16_t subtype;
1118 
1119 	TF_CHECK_PARMS2(parms, parms->rm_db);
1120 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1121 	TF_CHECK_PARMS1(rm_db->db);
1122 
1123 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1124 
1125 	/* Bail out if not controlled by RM */
1126 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1127 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1128 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1129 		return -ENOTSUP;
1130 
1131 	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1132 	if (rc)
1133 		return rc;
1134 
1135 	/* Adjust for any non zero start value */
1136 	rc = tf_rm_adjust_index(rm_db->db,
1137 				TF_RM_ADJUST_RM_BASE,
1138 				subtype,
1139 				parms->index,
1140 				&adj_index);
1141 	if (rc)
1142 		return rc;
1143 
1144 	rc = ba_free(pool, adj_index);
1145 	/* No logging direction matters and that is not available here */
1146 	if (rc)
1147 		return rc;
1148 
1149 	return rc;
1150 }
1151 
1152 int
1153 tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
1154 {
1155 	int rc;
1156 	uint32_t adj_index;
1157 	struct tf_rm_new_db *rm_db;
1158 	enum tf_rm_elem_cfg_type cfg_type;
1159 	struct bitalloc *pool;
1160 	uint16_t subtype;
1161 
1162 	TF_CHECK_PARMS2(parms, parms->rm_db);
1163 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1164 	TF_CHECK_PARMS1(rm_db->db);
1165 
1166 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1167 
1168 
1169 	/* Bail out if not controlled by RM */
1170 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1171 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1172 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1173 		return -ENOTSUP;
1174 
1175 	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1176 	if (rc)
1177 		return rc;
1178 
1179 	/* Adjust for any non zero start value */
1180 	rc = tf_rm_adjust_index(rm_db->db,
1181 				TF_RM_ADJUST_RM_BASE,
1182 				subtype,
1183 				parms->index,
1184 				&adj_index);
1185 	if (rc)
1186 		return rc;
1187 
1188 	if (parms->base_index)
1189 		*parms->base_index = adj_index;
1190 	*parms->allocated = ba_inuse(pool, adj_index);
1191 
1192 	return rc;
1193 }
1194 
1195 int
1196 tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
1197 {
1198 	struct tf_rm_new_db *rm_db;
1199 	enum tf_rm_elem_cfg_type cfg_type;
1200 
1201 	TF_CHECK_PARMS2(parms, parms->rm_db);
1202 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1203 	TF_CHECK_PARMS1(rm_db->db);
1204 
1205 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1206 
1207 	/* Bail out if not controlled by HCAPI */
1208 	if (cfg_type == TF_RM_ELEM_CFG_NULL)
1209 		return -ENOTSUP;
1210 
1211 	memcpy(parms->info,
1212 	       &rm_db->db[parms->subtype].alloc,
1213 	       sizeof(struct tf_rm_alloc_info));
1214 
1215 	return 0;
1216 }
1217 
1218 int
1219 tf_rm_get_all_info(struct tf_rm_get_alloc_info_parms *parms, int size)
1220 {
1221 	struct tf_rm_new_db *rm_db;
1222 	enum tf_rm_elem_cfg_type cfg_type;
1223 	struct tf_rm_alloc_info *info = parms->info;
1224 	int i;
1225 
1226 	TF_CHECK_PARMS1(parms);
1227 
1228 	/* No rm info available for this module type
1229 	 */
1230 	if (!parms->rm_db)
1231 		return -ENOMEM;
1232 
1233 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1234 	TF_CHECK_PARMS1(rm_db->db);
1235 
1236 	for (i = 0; i < size; i++) {
1237 		cfg_type = rm_db->db[i].cfg_type;
1238 
1239 		/* Bail out if not controlled by HCAPI */
1240 		if (cfg_type == TF_RM_ELEM_CFG_NULL) {
1241 			info++;
1242 			continue;
1243 		}
1244 
1245 		memcpy(info,
1246 		       &rm_db->db[i].alloc,
1247 		       sizeof(struct tf_rm_alloc_info));
1248 		info++;
1249 	}
1250 
1251 	return 0;
1252 }
1253 
1254 int
1255 tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
1256 {
1257 	struct tf_rm_new_db *rm_db;
1258 	enum tf_rm_elem_cfg_type cfg_type;
1259 
1260 	TF_CHECK_PARMS2(parms, parms->rm_db);
1261 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1262 	TF_CHECK_PARMS1(rm_db->db);
1263 
1264 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1265 
1266 	/* Bail out if not controlled by HCAPI */
1267 	if (cfg_type == TF_RM_ELEM_CFG_NULL)
1268 		return -ENOTSUP;
1269 
1270 	*parms->hcapi_type = rm_db->db[parms->subtype].hcapi_type;
1271 
1272 	return 0;
1273 }
1274 
1275 int
1276 tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
1277 {
1278 	int rc = 0;
1279 	struct tf_rm_new_db *rm_db;
1280 	enum tf_rm_elem_cfg_type cfg_type;
1281 
1282 	TF_CHECK_PARMS2(parms, parms->rm_db);
1283 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1284 	TF_CHECK_PARMS1(rm_db->db);
1285 
1286 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1287 
1288 	/* Bail out if not a BA pool */
1289 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1290 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1291 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1292 		return -ENOTSUP;
1293 
1294 	/* Bail silently (no logging), if the pool is not valid there
1295 	 * was no elements allocated for it.
1296 	 */
1297 	if (rm_db->db[parms->subtype].pool == NULL) {
1298 		*parms->count = 0;
1299 		return 0;
1300 	}
1301 
1302 	*parms->count = ba_inuse_count(rm_db->db[parms->subtype].pool);
1303 
1304 	return rc;
1305 }
1306 /* Only used for table bulk get at this time
1307  */
1308 int
1309 tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms)
1310 {
1311 	struct tf_rm_new_db *rm_db;
1312 	enum tf_rm_elem_cfg_type cfg_type;
1313 	uint32_t base_index;
1314 	uint32_t stride;
1315 	int rc = 0;
1316 	struct bitalloc *pool;
1317 	uint16_t subtype;
1318 
1319 	TF_CHECK_PARMS2(parms, parms->rm_db);
1320 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
1321 	TF_CHECK_PARMS1(rm_db->db);
1322 
1323 	cfg_type = rm_db->db[parms->subtype].cfg_type;
1324 
1325 	/* Bail out if not a BA pool */
1326 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1327 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1328 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1329 		return -ENOTSUP;
1330 
1331 	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1332 	if (rc)
1333 		return rc;
1334 
1335 	base_index = rm_db->db[subtype].alloc.entry.start;
1336 	stride = rm_db->db[subtype].alloc.entry.stride;
1337 
1338 	if (parms->starting_index < base_index ||
1339 	    parms->starting_index + parms->num_entries > base_index + stride)
1340 		return -EINVAL;
1341 
1342 	return rc;
1343 }
1344