1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2019-2024 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <string.h> 7 #include <rte_common.h> 8 #include <rte_debug.h> 9 #include <cfa_resource_types.h> 10 #include "tf_rm.h" 11 #include "tf_common.h" 12 #include "tf_util.h" 13 #include "tf_session.h" 14 #include "tf_device.h" 15 #include "tfp.h" 16 #include "tf_msg.h" 17 18 /** 19 * Generic RM Element data type that an RM DB is build upon. 20 */ 21 struct tf_rm_element { 22 /** 23 * RM Element configuration type. If Private then the 24 * hcapi_type can be ignored. If Null then the element is not 25 * valid for the device. 26 */ 27 enum tf_rm_elem_cfg_type cfg_type; 28 29 /** 30 * HCAPI RM Type for the element. 31 */ 32 uint16_t hcapi_type; 33 34 /** 35 * Resource slices. How many slices will fit in the 36 * resource pool chunk size. 37 */ 38 uint8_t slices; 39 40 /** 41 * HCAPI RM allocated range information for the element. 42 */ 43 struct tf_rm_alloc_info alloc; 44 45 /** 46 * If cfg_type == HCAPI_BA_CHILD, this field indicates 47 * the parent module subtype for look up into the parent pool. 48 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a 49 * module subtype of TF_MODULE_TYPE_TABLE. 50 */ 51 uint16_t parent_subtype; 52 53 /** 54 * Bit allocator pool for the element. Pool size is controlled 55 * by the struct tf_session_resources at time of session creation. 56 * Null indicates that the pool is not used for the element. 57 */ 58 struct bitalloc *pool; 59 }; 60 61 /** 62 * TF RM DB definition 63 */ 64 struct tf_rm_new_db { 65 /** 66 * Number of elements in the DB 67 */ 68 uint16_t num_entries; 69 70 /** 71 * Direction this DB controls. 72 */ 73 enum tf_dir dir; 74 75 /** 76 * Module type, used for logging purposes. 77 */ 78 enum tf_module_type module; 79 80 /** 81 * The DB consists of an array of elements 82 */ 83 struct tf_rm_element *db; 84 }; 85 86 /** 87 * Adjust an index according to the allocation information. 88 * 89 * All resources are controlled in a 0 based pool. Some resources, by 90 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they 91 * need to be adjusted before they are handed out. 92 * 93 * [in] cfg 94 * Pointer to the DB configuration 95 * 96 * [in] reservations 97 * Pointer to the allocation values associated with the module 98 * 99 * [in] count 100 * Number of DB configuration elements 101 * 102 * [out] valid_count 103 * Number of HCAPI entries with a reservation value greater than 0 104 * 105 * Returns: 106 * 0 - Success 107 * - EOPNOTSUPP - Operation not supported 108 */ 109 static void 110 tf_rm_count_hcapi_reservations(enum tf_dir dir, 111 enum tf_module_type module, 112 struct tf_rm_element_cfg *cfg, 113 uint16_t *reservations, 114 uint16_t count, 115 uint16_t *valid_count) 116 { 117 int i; 118 uint16_t cnt = 0; 119 120 for (i = 0; i < count; i++) { 121 if (cfg[i].cfg_type != TF_RM_ELEM_CFG_NULL && 122 reservations[i] > 0) 123 cnt++; 124 125 /* Only log msg if a type is attempted reserved and 126 * not supported. We ignore EM module as its using a 127 * split configuration array thus it would fail for 128 * this type of check. 129 */ 130 if (module != TF_MODULE_TYPE_EM && 131 cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL && 132 reservations[i] > 0) { 133 TFP_DRV_LOG(ERR, 134 "%s, %s, %s allocation of %d not supported\n", 135 tf_module_2_str(module), 136 tf_dir_2_str(dir), 137 tf_module_subtype_2_str(module, i), 138 reservations[i]); 139 } 140 } 141 142 *valid_count = cnt; 143 } 144 145 /** 146 * Resource Manager Adjust of base index definitions. 147 */ 148 enum tf_rm_adjust_type { 149 TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */ 150 TF_RM_ADJUST_RM_BASE /**< Removes base from the index */ 151 }; 152 153 /** 154 * Adjust an index according to the allocation information. 155 * 156 * All resources are controlled in a 0 based pool. Some resources, by 157 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they 158 * need to be adjusted before they are handed out. 159 * 160 * [in] db 161 * Pointer to the db, used for the lookup 162 * 163 * [in] action 164 * Adjust action 165 * 166 * [in] subtype 167 * TF module subtype used as an index into the database. 168 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a 169 * module subtype of TF_MODULE_TYPE_TABLE. 170 * 171 * [in] index 172 * Index to convert 173 * 174 * [out] adj_index 175 * Adjusted index 176 * 177 * Returns: 178 * 0 - Success 179 * - EOPNOTSUPP - Operation not supported 180 */ 181 static int 182 tf_rm_adjust_index(struct tf_rm_element *db, 183 enum tf_rm_adjust_type action, 184 uint32_t subtype, 185 uint32_t index, 186 uint32_t *adj_index) 187 { 188 int rc = 0; 189 uint32_t base_index; 190 191 base_index = db[subtype].alloc.entry.start; 192 193 switch (action) { 194 case TF_RM_ADJUST_RM_BASE: 195 *adj_index = index - base_index; 196 break; 197 case TF_RM_ADJUST_ADD_BASE: 198 *adj_index = index + base_index; 199 break; 200 default: 201 return -EOPNOTSUPP; 202 } 203 204 return rc; 205 } 206 207 /** 208 * Performs a check of the passed in DB for any lingering elements. If 209 * a resource type was found to not have been cleaned up by the caller 210 * then its residual values are recorded, logged and passed back in an 211 * allocate reservation array that the caller can pass to the FW for 212 * cleanup. 213 * 214 * [in] db 215 * Pointer to the db, used for the lookup 216 * 217 * [out] resv_size 218 * Pointer to the reservation size of the generated reservation 219 * array. 220 * 221 * [in/out] resv 222 * Pointer Pointer to a reservation array. The reservation array is 223 * allocated after the residual scan and holds any found residual 224 * entries. Thus it can be smaller than the DB that the check was 225 * performed on. Array must be freed by the caller. 226 * 227 * [out] residuals_present 228 * Pointer to a bool flag indicating if residual was present in the 229 * DB 230 * 231 * Returns: 232 * 0 - Success 233 * - EOPNOTSUPP - Operation not supported 234 */ 235 static int 236 tf_rm_check_residuals(struct tf_rm_new_db *rm_db, 237 uint16_t *resv_size, 238 struct tf_rm_resc_entry **resv, 239 bool *residuals_present) 240 { 241 int rc; 242 int i; 243 int f; 244 uint16_t count; 245 uint16_t found; 246 uint16_t *residuals = NULL; 247 uint16_t hcapi_type; 248 struct tf_rm_get_inuse_count_parms iparms; 249 struct tf_rm_get_alloc_info_parms aparms; 250 struct tf_rm_get_hcapi_parms hparms; 251 struct tf_rm_alloc_info info; 252 struct tfp_calloc_parms cparms; 253 struct tf_rm_resc_entry *local_resv = NULL; 254 255 /* Create array to hold the entries that have residuals */ 256 cparms.nitems = rm_db->num_entries; 257 cparms.size = sizeof(uint16_t); 258 cparms.alignment = 0; 259 rc = tfp_calloc(&cparms); 260 if (rc) 261 return rc; 262 263 residuals = (uint16_t *)cparms.mem_va; 264 265 /* Traverse the DB and collect any residual elements */ 266 iparms.rm_db = rm_db; 267 iparms.count = &count; 268 for (i = 0, found = 0; i < rm_db->num_entries; i++) { 269 iparms.subtype = i; 270 rc = tf_rm_get_inuse_count(&iparms); 271 /* Not a device supported entry, just skip */ 272 if (rc == -ENOTSUP) 273 continue; 274 if (rc) 275 goto cleanup_residuals; 276 277 if (count) { 278 found++; 279 residuals[i] = count; 280 *residuals_present = true; 281 } 282 } 283 284 if (*residuals_present) { 285 /* Populate a reduced resv array with only the entries 286 * that have residuals. 287 */ 288 cparms.nitems = found; 289 cparms.size = sizeof(struct tf_rm_resc_entry); 290 cparms.alignment = 0; 291 rc = tfp_calloc(&cparms); 292 if (rc) 293 return rc; 294 295 local_resv = (struct tf_rm_resc_entry *)cparms.mem_va; 296 297 aparms.rm_db = rm_db; 298 hparms.rm_db = rm_db; 299 hparms.hcapi_type = &hcapi_type; 300 for (i = 0, f = 0; i < rm_db->num_entries; i++) { 301 if (residuals[i] == 0) 302 continue; 303 aparms.subtype = i; 304 aparms.info = &info; 305 rc = tf_rm_get_info(&aparms); 306 if (rc) 307 goto cleanup_all; 308 309 hparms.subtype = i; 310 rc = tf_rm_get_hcapi_type(&hparms); 311 if (rc) 312 goto cleanup_all; 313 314 local_resv[f].type = hcapi_type; 315 local_resv[f].start = info.entry.start; 316 local_resv[f].stride = info.entry.stride; 317 f++; 318 } 319 *resv_size = found; 320 } 321 322 tfp_free((void *)residuals); 323 *resv = local_resv; 324 325 return 0; 326 327 cleanup_all: 328 tfp_free((void *)local_resv); 329 *resv = NULL; 330 cleanup_residuals: 331 tfp_free((void *)residuals); 332 333 return rc; 334 } 335 336 /** 337 * Some resources do not have a 1:1 mapping between the Truflow type and the cfa 338 * resource type (HCAPI RM). These resources have multiple Truflow types which 339 * map to a single HCAPI RM type. In order to support this, one Truflow type 340 * sharing the HCAPI resources is designated the parent. All other Truflow 341 * types associated with that HCAPI RM type are designated the children. 342 * 343 * This function updates the resource counts of any HCAPI_BA_PARENT with the 344 * counts of the HCAPI_BA_CHILDREN. These are read from the alloc_cnt and 345 * written back to the req_cnt. 346 * 347 * [in] cfg 348 * Pointer to an array of module specific Truflow type indexed RM cfg items 349 * 350 * [in] alloc_cnt 351 * Pointer to the tf_open_session() configured array of module specific 352 * Truflow type indexed requested counts. 353 * 354 * [in/out] req_cnt 355 * Pointer to the location to put the updated resource counts. 356 * 357 * Returns: 358 * 0 - Success 359 * - - Failure if negative 360 */ 361 static int 362 tf_rm_update_parent_reservations(struct tf *tfp, 363 struct tf_dev_info *dev, 364 struct tf_rm_element_cfg *cfg, 365 uint16_t *alloc_cnt, 366 uint16_t num_elements, 367 uint16_t *req_cnt, 368 __rte_unused enum tf_dir dir) 369 { 370 int parent, child; 371 const char *type_str = NULL; 372 373 /* Search through all the elements */ 374 for (parent = 0; parent < num_elements; parent++) { 375 uint16_t combined_cnt = 0; 376 377 /* If I am a parent */ 378 if (cfg[parent].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) { 379 uint8_t p_slices = cfg[parent].slices; 380 381 RTE_ASSERT(p_slices); 382 383 combined_cnt = alloc_cnt[parent] / p_slices; 384 385 if (alloc_cnt[parent] % p_slices) 386 combined_cnt++; 387 388 if (alloc_cnt[parent]) { 389 dev->ops->tf_dev_get_resource_str(tfp, 390 cfg[parent].hcapi_type, 391 &type_str); 392 #ifdef TF_FLOW_SCALE_QUERY 393 /* Initialize the usage buffer for SRAM tables */ 394 tf_tbl_usage_init(tfp, 395 dir, 396 parent, 397 alloc_cnt[parent]); 398 #endif /* TF_FLOW_SCALE_QUERY */ 399 } 400 401 /* Search again through all the elements */ 402 for (child = 0; child < num_elements; child++) { 403 /* If this is one of my children */ 404 if (cfg[child].cfg_type == 405 TF_RM_ELEM_CFG_HCAPI_BA_CHILD && 406 cfg[child].parent_subtype == parent && 407 alloc_cnt[child]) { 408 uint8_t c_slices = cfg[child].slices; 409 uint16_t cnt = 0; 410 411 RTE_ASSERT(c_slices); 412 413 dev->ops->tf_dev_get_resource_str(tfp, 414 cfg[child].hcapi_type, 415 &type_str); 416 417 /* Increment the parents combined count 418 * with each child's count adjusted for 419 * number of slices per RM alloc item. 420 */ 421 cnt = alloc_cnt[child] / c_slices; 422 423 if (alloc_cnt[child] % c_slices) 424 cnt++; 425 426 combined_cnt += cnt; 427 /* Clear the requested child count */ 428 req_cnt[child] = 0; 429 #ifdef TF_FLOW_SCALE_QUERY 430 /* Initialize the usage buffer for SRAM tables */ 431 tf_tbl_usage_init(tfp, 432 dir, 433 child, 434 alloc_cnt[child]); 435 #endif /* TF_FLOW_SCALE_QUERY */ 436 } 437 } 438 /* Save the parent count to be requested */ 439 req_cnt[parent] = combined_cnt * 2; 440 } 441 } 442 return 0; 443 } 444 445 int 446 tf_rm_create_db(struct tf *tfp, 447 struct tf_rm_create_db_parms *parms) 448 { 449 int rc; 450 struct tf_session *tfs; 451 struct tf_dev_info *dev; 452 int i, j; 453 uint16_t max_types, hcapi_items, *req_cnt; 454 struct tfp_calloc_parms cparms; 455 struct tf_rm_resc_req_entry *query; 456 enum tf_rm_resc_resv_strategy resv_strategy; 457 struct tf_rm_resc_req_entry *req; 458 struct tf_rm_resc_entry *resv; 459 struct tf_rm_new_db *rm_db; 460 struct tf_rm_element *db; 461 uint32_t pool_size; 462 463 TF_CHECK_PARMS2(tfp, parms); 464 465 /* Retrieve the session information */ 466 rc = tf_session_get_session_internal(tfp, &tfs); 467 if (rc) 468 return rc; 469 470 /* Retrieve device information */ 471 rc = tf_session_get_device(tfs, &dev); 472 if (rc) 473 return rc; 474 475 /* Need device max number of elements for the RM QCAPS */ 476 rc = dev->ops->tf_dev_get_max_types(tfp, &max_types); 477 478 /* Allocate memory for RM QCAPS request */ 479 cparms.nitems = max_types; 480 cparms.size = sizeof(struct tf_rm_resc_req_entry); 481 cparms.alignment = 0; 482 rc = tfp_calloc(&cparms); 483 if (rc) 484 return rc; 485 486 query = (struct tf_rm_resc_req_entry *)cparms.mem_va; 487 488 /* Get Firmware Capabilities */ 489 rc = tf_msg_session_resc_qcaps(tfp, 490 dev, 491 parms->dir, 492 max_types, 493 query, 494 &resv_strategy, 495 NULL); 496 if (rc) 497 return rc; 498 499 /* Copy requested counts (alloc_cnt) from tf_open_session() to local 500 * copy (req_cnt) so that it can be updated if required. 501 */ 502 503 cparms.nitems = parms->num_elements; 504 cparms.size = sizeof(uint16_t); 505 rc = tfp_calloc(&cparms); 506 if (rc) 507 return rc; 508 509 req_cnt = (uint16_t *)cparms.mem_va; 510 511 tfp_memcpy(req_cnt, parms->alloc_cnt, 512 parms->num_elements * sizeof(uint16_t)); 513 514 /* Update the req_cnt based upon the element configuration 515 */ 516 tf_rm_update_parent_reservations(tfp, dev, parms->cfg, 517 parms->alloc_cnt, 518 parms->num_elements, 519 req_cnt, 520 parms->dir); 521 522 /* Process capabilities against DB requirements. However, as a 523 * DB can hold elements that are not HCAPI we can reduce the 524 * req msg content by removing those out of the request yet 525 * the DB holds them all as to give a fast lookup. We can also 526 * remove entries where there are no request for elements. 527 */ 528 tf_rm_count_hcapi_reservations(parms->dir, 529 parms->module, 530 parms->cfg, 531 req_cnt, 532 parms->num_elements, 533 &hcapi_items); 534 535 if (hcapi_items == 0) { 536 parms->rm_db = NULL; 537 return -ENOMEM; 538 } 539 540 /* Alloc request, alignment already set */ 541 cparms.nitems = (size_t)hcapi_items; 542 cparms.size = sizeof(struct tf_rm_resc_req_entry); 543 rc = tfp_calloc(&cparms); 544 if (rc) 545 return rc; 546 req = (struct tf_rm_resc_req_entry *)cparms.mem_va; 547 548 /* Alloc reservation, alignment and nitems already set */ 549 cparms.size = sizeof(struct tf_rm_resc_entry); 550 rc = tfp_calloc(&cparms); 551 if (rc) 552 return rc; 553 resv = (struct tf_rm_resc_entry *)cparms.mem_va; 554 555 /* Build the request */ 556 for (i = 0, j = 0; i < parms->num_elements; i++) { 557 struct tf_rm_element_cfg *cfg = &parms->cfg[i]; 558 uint16_t hcapi_type = cfg->hcapi_type; 559 560 /* Only perform reservation for requested entries 561 */ 562 if (req_cnt[i] == 0) 563 continue; 564 565 /* Skip any children in the request */ 566 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI || 567 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA || 568 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) { 569 570 /* Verify that we can get the full amount per qcaps. 571 */ 572 if (req_cnt[i] <= query[hcapi_type].max) { 573 req[j].type = hcapi_type; 574 req[j].min = req_cnt[i]; 575 req[j].max = req_cnt[i]; 576 j++; 577 } else { 578 const char *type_str; 579 580 dev->ops->tf_dev_get_resource_str(tfp, 581 hcapi_type, 582 &type_str); 583 TFP_DRV_LOG(ERR, 584 "Failure, %s:%d:%s req:%d avail:%d\n", 585 tf_dir_2_str(parms->dir), 586 hcapi_type, type_str, 587 req_cnt[i], 588 query[hcapi_type].max); 589 return -EINVAL; 590 } 591 } 592 } 593 594 /* Allocate all resources for the module type 595 */ 596 rc = tf_msg_session_resc_alloc(tfp, 597 dev, 598 parms->dir, 599 hcapi_items, 600 req, 601 resv); 602 if (rc) 603 return rc; 604 605 /* Build the RM DB per the request */ 606 cparms.nitems = 1; 607 cparms.size = sizeof(struct tf_rm_new_db); 608 rc = tfp_calloc(&cparms); 609 if (rc) 610 return rc; 611 rm_db = (void *)cparms.mem_va; 612 613 /* Build the DB within RM DB */ 614 cparms.nitems = parms->num_elements; 615 cparms.size = sizeof(struct tf_rm_element); 616 rc = tfp_calloc(&cparms); 617 if (rc) 618 return rc; 619 rm_db->db = (struct tf_rm_element *)cparms.mem_va; 620 621 db = rm_db->db; 622 for (i = 0, j = 0; i < parms->num_elements; i++) { 623 struct tf_rm_element_cfg *cfg = &parms->cfg[i]; 624 const char *type_str; 625 626 dev->ops->tf_dev_get_resource_str(tfp, 627 cfg->hcapi_type, 628 &type_str); 629 630 db[i].cfg_type = cfg->cfg_type; 631 db[i].hcapi_type = cfg->hcapi_type; 632 db[i].slices = cfg->slices; 633 634 /* Save the parent subtype for later use to find the pool 635 */ 636 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD) 637 db[i].parent_subtype = cfg->parent_subtype; 638 639 /* If the element didn't request an allocation no need 640 * to create a pool nor verify if we got a reservation. 641 */ 642 if (req_cnt[i] == 0) 643 continue; 644 645 /* Skip any children or invalid 646 */ 647 if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI && 648 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA && 649 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT) 650 continue; 651 652 /* If the element had requested an allocation and that 653 * allocation was a success (full amount) then 654 * allocate the pool. 655 */ 656 if (req_cnt[i] == resv[j].stride) { 657 db[i].alloc.entry.start = resv[j].start; 658 db[i].alloc.entry.stride = resv[j].stride; 659 660 /* Only allocate BA pool if a BA type not a child */ 661 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA || 662 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) { 663 /* Create pool */ 664 pool_size = (BITALLOC_SIZEOF(resv[j].stride) / 665 sizeof(struct bitalloc)); 666 /* Alloc request, alignment already set */ 667 cparms.nitems = pool_size; 668 cparms.size = sizeof(struct bitalloc); 669 rc = tfp_calloc(&cparms); 670 if (rc) { 671 TFP_DRV_LOG(ERR, 672 "%s: Pool alloc failed, type:%d:%s\n", 673 tf_dir_2_str(parms->dir), 674 cfg->hcapi_type, type_str); 675 goto fail; 676 } 677 db[i].pool = (struct bitalloc *)cparms.mem_va; 678 679 rc = ba_init(db[i].pool, 680 resv[j].stride, 681 true); 682 if (rc) { 683 TFP_DRV_LOG(ERR, 684 "%s: Pool init failed, type:%d:%s\n", 685 tf_dir_2_str(parms->dir), 686 cfg->hcapi_type, type_str); 687 goto fail; 688 } 689 } 690 j++; 691 692 #ifdef TF_FLOW_SCALE_QUERY 693 /* Initialize the usage buffer for Meter tables */ 694 if (cfg->hcapi_type == CFA_RESOURCE_TYPE_P58_METER || 695 cfg->hcapi_type == CFA_RESOURCE_TYPE_P58_METER_PROF) { 696 uint32_t tbl_type; 697 if (cfg->hcapi_type == CFA_RESOURCE_TYPE_P58_METER) 698 tbl_type = TF_TBL_TYPE_METER_INST; 699 else 700 tbl_type = TF_TBL_TYPE_METER_PROF; 701 tf_tbl_usage_init(tfp, 702 parms->dir, 703 tbl_type, 704 req_cnt[i]); 705 } 706 #endif /* TF_FLOW_SCALE_QUERY */ 707 } else { 708 /* Bail out as we want what we requested for 709 * all elements, not any less. 710 */ 711 TFP_DRV_LOG(ERR, 712 "%s: Alloc failed %d:%s req:%d, alloc:%d\n", 713 tf_dir_2_str(parms->dir), cfg->hcapi_type, 714 type_str, req_cnt[i], resv[j].stride); 715 goto fail; 716 } 717 } 718 719 rm_db->num_entries = parms->num_elements; 720 rm_db->dir = parms->dir; 721 rm_db->module = parms->module; 722 *parms->rm_db = (void *)rm_db; 723 724 tfp_free((void *)req); 725 tfp_free((void *)resv); 726 tfp_free((void *)req_cnt); 727 return 0; 728 729 fail: 730 tfp_free((void *)req); 731 tfp_free((void *)resv); 732 tfp_free((void *)db->pool); 733 tfp_free((void *)db); 734 tfp_free((void *)rm_db); 735 tfp_free((void *)req_cnt); 736 parms->rm_db = NULL; 737 738 return -EINVAL; 739 } 740 741 int 742 tf_rm_create_db_no_reservation(struct tf *tfp, 743 struct tf_rm_create_db_parms *parms) 744 { 745 int rc; 746 struct tf_session *tfs; 747 struct tf_dev_info *dev; 748 int i, j; 749 uint16_t hcapi_items, *req_cnt; 750 struct tfp_calloc_parms cparms; 751 struct tf_rm_resc_req_entry *req; 752 struct tf_rm_resc_entry *resv; 753 struct tf_rm_new_db *rm_db; 754 struct tf_rm_element *db; 755 uint32_t pool_size; 756 757 TF_CHECK_PARMS2(tfp, parms); 758 759 /* Retrieve the session information */ 760 rc = tf_session_get_session_internal(tfp, &tfs); 761 if (rc) 762 return rc; 763 764 /* Retrieve device information */ 765 rc = tf_session_get_device(tfs, &dev); 766 if (rc) 767 return rc; 768 769 /* Copy requested counts (alloc_cnt) from tf_open_session() to local 770 * copy (req_cnt) so that it can be updated if required. 771 */ 772 773 cparms.nitems = parms->num_elements; 774 cparms.size = sizeof(uint16_t); 775 cparms.alignment = 0; 776 rc = tfp_calloc(&cparms); 777 if (rc) 778 return rc; 779 780 req_cnt = (uint16_t *)cparms.mem_va; 781 782 tfp_memcpy(req_cnt, parms->alloc_cnt, 783 parms->num_elements * sizeof(uint16_t)); 784 785 /* Update the req_cnt based upon the element configuration 786 */ 787 tf_rm_update_parent_reservations(tfp, dev, parms->cfg, 788 parms->alloc_cnt, 789 parms->num_elements, 790 req_cnt, 791 parms->dir); 792 793 /* Process capabilities against DB requirements. However, as a 794 * DB can hold elements that are not HCAPI we can reduce the 795 * req msg content by removing those out of the request yet 796 * the DB holds them all as to give a fast lookup. We can also 797 * remove entries where there are no request for elements. 798 */ 799 tf_rm_count_hcapi_reservations(parms->dir, 800 parms->module, 801 parms->cfg, 802 req_cnt, 803 parms->num_elements, 804 &hcapi_items); 805 806 if (hcapi_items == 0) { 807 parms->rm_db = NULL; 808 return -ENOMEM; 809 } 810 811 /* Alloc request, alignment already set */ 812 cparms.nitems = (size_t)hcapi_items; 813 cparms.size = sizeof(struct tf_rm_resc_req_entry); 814 rc = tfp_calloc(&cparms); 815 if (rc) 816 return rc; 817 req = (struct tf_rm_resc_req_entry *)cparms.mem_va; 818 819 /* Alloc reservation, alignment and nitems already set */ 820 cparms.size = sizeof(struct tf_rm_resc_entry); 821 rc = tfp_calloc(&cparms); 822 if (rc) 823 return rc; 824 resv = (struct tf_rm_resc_entry *)cparms.mem_va; 825 826 /* Build the request */ 827 for (i = 0, j = 0; i < parms->num_elements; i++) { 828 struct tf_rm_element_cfg *cfg = &parms->cfg[i]; 829 uint16_t hcapi_type = cfg->hcapi_type; 830 831 /* Only perform reservation for requested entries 832 */ 833 if (req_cnt[i] == 0) 834 continue; 835 836 /* Skip any children in the request */ 837 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI || 838 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA || 839 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) { 840 req[j].type = hcapi_type; 841 req[j].min = req_cnt[i]; 842 req[j].max = req_cnt[i]; 843 j++; 844 } 845 } 846 847 /* Get all resources info for the module type 848 */ 849 rc = tf_msg_session_resc_info(tfp, 850 dev, 851 parms->dir, 852 hcapi_items, 853 req, 854 resv); 855 if (rc) 856 return rc; 857 858 /* Build the RM DB per the request */ 859 cparms.nitems = 1; 860 cparms.size = sizeof(struct tf_rm_new_db); 861 rc = tfp_calloc(&cparms); 862 if (rc) 863 return rc; 864 rm_db = (void *)cparms.mem_va; 865 866 /* Build the DB within RM DB */ 867 cparms.nitems = parms->num_elements; 868 cparms.size = sizeof(struct tf_rm_element); 869 rc = tfp_calloc(&cparms); 870 if (rc) 871 return rc; 872 rm_db->db = (struct tf_rm_element *)cparms.mem_va; 873 874 db = rm_db->db; 875 for (i = 0, j = 0; i < parms->num_elements; i++) { 876 struct tf_rm_element_cfg *cfg = &parms->cfg[i]; 877 const char *type_str; 878 879 dev->ops->tf_dev_get_resource_str(tfp, 880 cfg->hcapi_type, 881 &type_str); 882 883 db[i].cfg_type = cfg->cfg_type; 884 db[i].hcapi_type = cfg->hcapi_type; 885 db[i].slices = cfg->slices; 886 887 /* Save the parent subtype for later use to find the pool 888 */ 889 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD) 890 db[i].parent_subtype = cfg->parent_subtype; 891 892 /* If the element didn't request an allocation no need 893 * to create a pool nor verify if we got a reservation. 894 */ 895 if (req_cnt[i] == 0) 896 continue; 897 898 /* Skip any children or invalid 899 */ 900 if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI && 901 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA && 902 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT) 903 continue; 904 905 /* If the element had requested an allocation and that 906 * allocation was a success (full amount) then 907 * allocate the pool. 908 */ 909 if (req_cnt[i] == resv[j].stride) { 910 db[i].alloc.entry.start = resv[j].start; 911 db[i].alloc.entry.stride = resv[j].stride; 912 913 /* Only allocate BA pool if a BA type not a child */ 914 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA || 915 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) { 916 /* Create pool */ 917 pool_size = (BITALLOC_SIZEOF(resv[j].stride) / 918 sizeof(struct bitalloc)); 919 /* Alloc request, alignment already set */ 920 cparms.nitems = pool_size; 921 cparms.size = sizeof(struct bitalloc); 922 rc = tfp_calloc(&cparms); 923 if (rc) { 924 TFP_DRV_LOG(ERR, 925 "%s: Pool alloc failed, type:%d:%s\n", 926 tf_dir_2_str(parms->dir), 927 cfg->hcapi_type, type_str); 928 goto fail; 929 } 930 db[i].pool = (struct bitalloc *)cparms.mem_va; 931 932 rc = ba_init(db[i].pool, 933 resv[j].stride, 934 true); 935 if (rc) { 936 TFP_DRV_LOG(ERR, 937 "%s: Pool init failed, type:%d:%s\n", 938 tf_dir_2_str(parms->dir), 939 cfg->hcapi_type, type_str); 940 goto fail; 941 } 942 } 943 j++; 944 } else { 945 /* Bail out as we want what we requested for 946 * all elements, not any less. 947 */ 948 TFP_DRV_LOG(ERR, 949 "%s: Alloc failed %d:%s req:%d, alloc:%d\n", 950 tf_dir_2_str(parms->dir), cfg->hcapi_type, 951 type_str, req_cnt[i], resv[j].stride); 952 goto fail; 953 } 954 } 955 956 rm_db->num_entries = parms->num_elements; 957 rm_db->dir = parms->dir; 958 rm_db->module = parms->module; 959 *parms->rm_db = (void *)rm_db; 960 961 tfp_free((void *)req); 962 tfp_free((void *)resv); 963 tfp_free((void *)req_cnt); 964 return 0; 965 966 fail: 967 tfp_free((void *)req); 968 tfp_free((void *)resv); 969 tfp_free((void *)db->pool); 970 tfp_free((void *)db); 971 tfp_free((void *)rm_db); 972 tfp_free((void *)req_cnt); 973 parms->rm_db = NULL; 974 975 return -EINVAL; 976 } 977 978 int 979 tf_rm_free_db(struct tf *tfp, 980 struct tf_rm_free_db_parms *parms) 981 { 982 int rc; 983 int i; 984 uint16_t resv_size = 0; 985 struct tf_rm_new_db *rm_db; 986 struct tf_rm_resc_entry *resv; 987 bool residuals_found = false; 988 989 TF_CHECK_PARMS2(parms, parms->rm_db); 990 991 /* Device unbind happens when the TF Session is closed and the 992 * session ref count is 0. Device unbind will cleanup each of 993 * its support modules, i.e. Identifier, thus we're ending up 994 * here to close the DB. 995 * 996 * On TF Session close it is assumed that the session has already 997 * cleaned up all its resources, individually, while 998 * destroying its flows. 999 * 1000 * To assist in the 'cleanup checking' the DB is checked for any 1001 * remaining elements and logged if found to be the case. 1002 * 1003 * Any such elements will need to be 'cleared' ahead of 1004 * returning the resources to the HCAPI RM. 1005 * 1006 * RM will signal FW to flush the DB resources. FW will 1007 * perform the invalidation. TF Session close will return the 1008 * previous allocated elements to the RM and then close the 1009 * HCAPI RM registration. That then saves several 'free' msgs 1010 * from being required. 1011 */ 1012 1013 rm_db = (struct tf_rm_new_db *)parms->rm_db; 1014 1015 /* Check for residuals that the client didn't clean up */ 1016 rc = tf_rm_check_residuals(rm_db, 1017 &resv_size, 1018 &resv, 1019 &residuals_found); 1020 if (rc) 1021 return rc; 1022 1023 /* Invalidate any residuals followed by a DB traversal for 1024 * pool cleanup. 1025 */ 1026 if (residuals_found) { 1027 rc = tf_msg_session_resc_flush(tfp, 1028 parms->dir, 1029 resv_size, 1030 resv); 1031 tfp_free((void *)resv); 1032 /* On failure we still have to cleanup so we can only 1033 * log that FW failed. 1034 */ 1035 if (rc) 1036 TFP_DRV_LOG(ERR, 1037 "%s: Internal Flush error, module:%s\n", 1038 tf_dir_2_str(parms->dir), 1039 tf_module_2_str(rm_db->module)); 1040 } 1041 1042 /* No need to check for configuration type, even if we do not 1043 * have a BA pool we just delete on a null ptr, no harm 1044 */ 1045 for (i = 0; i < rm_db->num_entries; i++) 1046 tfp_free((void *)rm_db->db[i].pool); 1047 1048 tfp_free((void *)parms->rm_db); 1049 1050 return rc; 1051 } 1052 1053 /** 1054 * Get the bit allocator pool associated with the subtype and the db 1055 * 1056 * [in] rm_db 1057 * Pointer to the DB 1058 * 1059 * [in] subtype 1060 * Module subtype used to index into the module specific database. 1061 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a 1062 * module subtype of TF_MODULE_TYPE_TABLE. 1063 * 1064 * [in/out] pool 1065 * Pointer to the bit allocator pool used 1066 * 1067 * [in/out] new_subtype 1068 * Pointer to the subtype of the actual pool used 1069 * Returns: 1070 * 0 - Success 1071 * - ENOTSUP - Operation not supported 1072 */ 1073 static int 1074 tf_rm_get_pool(struct tf_rm_new_db *rm_db, 1075 uint16_t subtype, 1076 struct bitalloc **pool, 1077 uint16_t *new_subtype) 1078 { 1079 int rc = 0; 1080 uint16_t tmp_subtype = subtype; 1081 1082 /* If we are a child, get the parent table index */ 1083 if (rm_db->db[subtype].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD) 1084 tmp_subtype = rm_db->db[subtype].parent_subtype; 1085 1086 *pool = rm_db->db[tmp_subtype].pool; 1087 1088 /* Bail out if the pool is not valid, should never happen */ 1089 if (rm_db->db[tmp_subtype].pool == NULL) { 1090 rc = -ENOTSUP; 1091 TFP_DRV_LOG(ERR, 1092 "%s: Invalid pool for this type:%d, rc:%s\n", 1093 tf_dir_2_str(rm_db->dir), 1094 tmp_subtype, 1095 strerror(-rc)); 1096 return rc; 1097 } 1098 *new_subtype = tmp_subtype; 1099 return rc; 1100 } 1101 1102 int 1103 tf_rm_allocate(struct tf_rm_allocate_parms *parms) 1104 { 1105 int rc; 1106 int id; 1107 uint32_t index; 1108 struct tf_rm_new_db *rm_db; 1109 enum tf_rm_elem_cfg_type cfg_type; 1110 struct bitalloc *pool; 1111 uint16_t subtype; 1112 1113 TF_CHECK_PARMS2(parms, parms->rm_db); 1114 1115 rm_db = (struct tf_rm_new_db *)parms->rm_db; 1116 TF_CHECK_PARMS1(rm_db->db); 1117 1118 cfg_type = rm_db->db[parms->subtype].cfg_type; 1119 1120 /* Bail out if not controlled by RM */ 1121 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA && 1122 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT && 1123 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD) 1124 return -ENOTSUP; 1125 1126 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype); 1127 if (rc) 1128 return rc; 1129 /* 1130 * priority 0: allocate from top of the tcam i.e. high 1131 * priority !0: allocate index from bottom i.e lowest 1132 */ 1133 if (parms->priority) 1134 id = ba_alloc_reverse(pool); 1135 else 1136 id = ba_alloc(pool); 1137 if (id == BA_FAIL) { 1138 rc = -ENOMEM; 1139 TFP_DRV_LOG(ERR, 1140 "%s: Allocation failed, rc:%s\n", 1141 tf_dir_2_str(rm_db->dir), 1142 strerror(-rc)); 1143 return rc; 1144 } 1145 1146 /* Adjust for any non zero start value */ 1147 rc = tf_rm_adjust_index(rm_db->db, 1148 TF_RM_ADJUST_ADD_BASE, 1149 subtype, 1150 id, 1151 &index); 1152 if (rc) { 1153 TFP_DRV_LOG(ERR, 1154 "%s: Alloc adjust of base index failed, rc:%s\n", 1155 tf_dir_2_str(rm_db->dir), 1156 strerror(-rc)); 1157 return -EINVAL; 1158 } 1159 1160 *parms->index = index; 1161 if (parms->base_index) 1162 *parms->base_index = id; 1163 1164 return rc; 1165 } 1166 1167 int 1168 tf_rm_free(struct tf_rm_free_parms *parms) 1169 { 1170 int rc; 1171 uint32_t adj_index; 1172 struct tf_rm_new_db *rm_db; 1173 enum tf_rm_elem_cfg_type cfg_type; 1174 struct bitalloc *pool; 1175 uint16_t subtype; 1176 1177 TF_CHECK_PARMS2(parms, parms->rm_db); 1178 rm_db = (struct tf_rm_new_db *)parms->rm_db; 1179 TF_CHECK_PARMS1(rm_db->db); 1180 1181 cfg_type = rm_db->db[parms->subtype].cfg_type; 1182 1183 /* Bail out if not controlled by RM */ 1184 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA && 1185 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT && 1186 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD) 1187 return -ENOTSUP; 1188 1189 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype); 1190 if (rc) 1191 return rc; 1192 1193 /* Adjust for any non zero start value */ 1194 rc = tf_rm_adjust_index(rm_db->db, 1195 TF_RM_ADJUST_RM_BASE, 1196 subtype, 1197 parms->index, 1198 &adj_index); 1199 if (rc) 1200 return rc; 1201 1202 rc = ba_free(pool, adj_index); 1203 /* No logging direction matters and that is not available here */ 1204 if (rc) 1205 return rc; 1206 1207 return rc; 1208 } 1209 1210 int 1211 tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms) 1212 { 1213 int rc; 1214 uint32_t adj_index; 1215 struct tf_rm_new_db *rm_db; 1216 enum tf_rm_elem_cfg_type cfg_type; 1217 struct bitalloc *pool; 1218 uint16_t subtype; 1219 1220 TF_CHECK_PARMS2(parms, parms->rm_db); 1221 rm_db = (struct tf_rm_new_db *)parms->rm_db; 1222 TF_CHECK_PARMS1(rm_db->db); 1223 1224 cfg_type = rm_db->db[parms->subtype].cfg_type; 1225 1226 /* Bail out if not controlled by RM */ 1227 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA && 1228 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT && 1229 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD) 1230 return -ENOTSUP; 1231 1232 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype); 1233 if (rc) 1234 return rc; 1235 1236 /* Adjust for any non zero start value */ 1237 rc = tf_rm_adjust_index(rm_db->db, 1238 TF_RM_ADJUST_RM_BASE, 1239 subtype, 1240 parms->index, 1241 &adj_index); 1242 if (rc) 1243 return rc; 1244 1245 if (parms->base_index) 1246 *parms->base_index = adj_index; 1247 *parms->allocated = ba_inuse(pool, adj_index); 1248 1249 return rc; 1250 } 1251 1252 int 1253 tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms) 1254 { 1255 struct tf_rm_new_db *rm_db; 1256 enum tf_rm_elem_cfg_type cfg_type; 1257 1258 TF_CHECK_PARMS2(parms, parms->rm_db); 1259 rm_db = (struct tf_rm_new_db *)parms->rm_db; 1260 TF_CHECK_PARMS1(rm_db->db); 1261 1262 cfg_type = rm_db->db[parms->subtype].cfg_type; 1263 1264 /* Bail out if not controlled by HCAPI */ 1265 if (cfg_type == TF_RM_ELEM_CFG_NULL) 1266 return -ENOTSUP; 1267 1268 memcpy(parms->info, 1269 &rm_db->db[parms->subtype].alloc, 1270 sizeof(struct tf_rm_alloc_info)); 1271 1272 return 0; 1273 } 1274 1275 int 1276 tf_rm_get_all_info(struct tf_rm_get_alloc_info_parms *parms, int size) 1277 { 1278 struct tf_rm_new_db *rm_db; 1279 enum tf_rm_elem_cfg_type cfg_type; 1280 struct tf_rm_alloc_info *info = parms->info; 1281 int i; 1282 1283 TF_CHECK_PARMS1(parms); 1284 1285 /* No rm info available for this module type 1286 */ 1287 if (!parms->rm_db) 1288 return -ENOMEM; 1289 1290 rm_db = (struct tf_rm_new_db *)parms->rm_db; 1291 TF_CHECK_PARMS1(rm_db->db); 1292 1293 for (i = 0; i < size; i++) { 1294 cfg_type = rm_db->db[i].cfg_type; 1295 1296 /* Bail out if not controlled by HCAPI */ 1297 if (cfg_type == TF_RM_ELEM_CFG_NULL) { 1298 info++; 1299 continue; 1300 } 1301 1302 memcpy(info, 1303 &rm_db->db[i].alloc, 1304 sizeof(struct tf_rm_alloc_info)); 1305 info++; 1306 } 1307 1308 return 0; 1309 } 1310 1311 int 1312 tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms) 1313 { 1314 struct tf_rm_new_db *rm_db; 1315 enum tf_rm_elem_cfg_type cfg_type; 1316 1317 TF_CHECK_PARMS2(parms, parms->rm_db); 1318 rm_db = (struct tf_rm_new_db *)parms->rm_db; 1319 TF_CHECK_PARMS1(rm_db->db); 1320 1321 cfg_type = rm_db->db[parms->subtype].cfg_type; 1322 1323 /* Bail out if not controlled by HCAPI */ 1324 if (cfg_type == TF_RM_ELEM_CFG_NULL) 1325 return -ENOTSUP; 1326 1327 *parms->hcapi_type = rm_db->db[parms->subtype].hcapi_type; 1328 1329 return 0; 1330 } 1331 1332 int 1333 tf_rm_get_slices(struct tf_rm_get_slices_parms *parms) 1334 { 1335 struct tf_rm_new_db *rm_db; 1336 enum tf_rm_elem_cfg_type cfg_type; 1337 1338 TF_CHECK_PARMS2(parms, parms->rm_db); 1339 rm_db = (struct tf_rm_new_db *)parms->rm_db; 1340 TF_CHECK_PARMS1(rm_db->db); 1341 1342 cfg_type = rm_db->db[parms->subtype].cfg_type; 1343 1344 /* Bail out if not controlled by HCAPI */ 1345 if (cfg_type == TF_RM_ELEM_CFG_NULL) 1346 return -ENOTSUP; 1347 1348 *parms->slices = rm_db->db[parms->subtype].slices; 1349 1350 return 0; 1351 } 1352 1353 int 1354 tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms) 1355 { 1356 int rc = 0; 1357 struct tf_rm_new_db *rm_db; 1358 enum tf_rm_elem_cfg_type cfg_type; 1359 1360 TF_CHECK_PARMS2(parms, parms->rm_db); 1361 rm_db = (struct tf_rm_new_db *)parms->rm_db; 1362 TF_CHECK_PARMS1(rm_db->db); 1363 1364 cfg_type = rm_db->db[parms->subtype].cfg_type; 1365 1366 /* Bail out if not a BA pool */ 1367 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA && 1368 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT && 1369 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD) 1370 return -ENOTSUP; 1371 1372 /* Bail silently (no logging), if the pool is not valid there 1373 * was no elements allocated for it. 1374 */ 1375 if (rm_db->db[parms->subtype].pool == NULL) { 1376 *parms->count = 0; 1377 return 0; 1378 } 1379 1380 *parms->count = ba_inuse_count(rm_db->db[parms->subtype].pool); 1381 1382 return rc; 1383 } 1384 1385 /* Only used for table bulk get at this time 1386 */ 1387 int 1388 tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms) 1389 { 1390 struct tf_rm_new_db *rm_db; 1391 enum tf_rm_elem_cfg_type cfg_type; 1392 uint32_t base_index; 1393 uint32_t stride; 1394 int rc = 0; 1395 struct bitalloc *pool; 1396 uint16_t subtype; 1397 1398 TF_CHECK_PARMS2(parms, parms->rm_db); 1399 rm_db = (struct tf_rm_new_db *)parms->rm_db; 1400 TF_CHECK_PARMS1(rm_db->db); 1401 1402 cfg_type = rm_db->db[parms->subtype].cfg_type; 1403 1404 /* Bail out if not a BA pool */ 1405 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA && 1406 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT && 1407 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD) 1408 return -ENOTSUP; 1409 1410 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype); 1411 if (rc) 1412 return rc; 1413 1414 base_index = rm_db->db[subtype].alloc.entry.start; 1415 stride = rm_db->db[subtype].alloc.entry.stride; 1416 1417 if (parms->starting_index < base_index || 1418 parms->starting_index + parms->num_entries > base_index + stride) 1419 return -EINVAL; 1420 1421 return rc; 1422 } 1423