1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2023 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <rte_common.h> 7 #include <rte_cycles.h> 8 #include <rte_malloc.h> 9 #include <rte_log.h> 10 #include <rte_alarm.h> 11 #include "bnxt.h" 12 #include "bnxt_ulp.h" 13 #include "bnxt_ulp_utils.h" 14 #include "bnxt_ulp_tf.h" 15 #include "bnxt_tf_common.h" 16 #include "ulp_fc_mgr.h" 17 #include "ulp_flow_db.h" 18 #include "ulp_template_db_enum.h" 19 #include "ulp_template_struct.h" 20 21 static const struct bnxt_ulp_fc_core_ops * 22 bnxt_ulp_fc_ops_get(struct bnxt_ulp_context *ctxt) 23 { 24 int32_t rc; 25 enum bnxt_ulp_device_id dev_id; 26 const struct bnxt_ulp_fc_core_ops *func_ops; 27 28 rc = bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id); 29 if (rc) 30 return NULL; 31 32 switch (dev_id) { 33 case BNXT_ULP_DEVICE_ID_THOR2: 34 func_ops = &ulp_fc_tfc_core_ops; 35 break; 36 case BNXT_ULP_DEVICE_ID_THOR: 37 case BNXT_ULP_DEVICE_ID_STINGRAY: 38 case BNXT_ULP_DEVICE_ID_WH_PLUS: 39 func_ops = &ulp_fc_tf_core_ops; 40 break; 41 default: 42 func_ops = NULL; 43 break; 44 } 45 return func_ops; 46 } 47 48 static int 49 ulp_fc_mgr_shadow_mem_alloc(struct hw_fc_mem_info *parms, int size) 50 { 51 /* Allocate memory*/ 52 if (!parms) 53 return -EINVAL; 54 55 parms->mem_va = rte_zmalloc("ulp_fc_info", 56 RTE_CACHE_LINE_ROUNDUP(size), 57 4096); 58 if (!parms->mem_va) { 59 BNXT_DRV_DBG(ERR, "Allocate failed mem_va\n"); 60 return -ENOMEM; 61 } 62 63 rte_mem_lock_page(parms->mem_va); 64 65 parms->mem_pa = (void *)(uintptr_t)rte_mem_virt2phy(parms->mem_va); 66 if (parms->mem_pa == (void *)RTE_BAD_IOVA) { 67 BNXT_DRV_DBG(ERR, "Allocate failed mem_pa\n"); 68 return -ENOMEM; 69 } 70 71 return 0; 72 } 73 74 static void 75 ulp_fc_mgr_shadow_mem_free(struct hw_fc_mem_info *parms) 76 { 77 rte_free(parms->mem_va); 78 } 79 80 /* 81 * Allocate and Initialize all Flow Counter Manager resources for this ulp 82 * context. 83 * 84 * ctxt [in] The ulp context for the Flow Counter manager. 85 * 86 */ 87 int32_t 88 ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt) 89 { 90 struct bnxt_ulp_device_params *dparms; 91 uint32_t dev_id, sw_acc_cntr_tbl_sz, hw_fc_mem_info_sz; 92 struct bnxt_ulp_fc_info *ulp_fc_info; 93 const struct bnxt_ulp_fc_core_ops *fc_ops; 94 uint32_t flags = 0; 95 int i, rc; 96 97 if (!ctxt) { 98 BNXT_DRV_DBG(DEBUG, "Invalid ULP CTXT\n"); 99 return -EINVAL; 100 } 101 102 if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) { 103 BNXT_DRV_DBG(DEBUG, "Failed to get device id\n"); 104 return -EINVAL; 105 } 106 107 dparms = bnxt_ulp_device_params_get(dev_id); 108 if (!dparms) { 109 BNXT_DRV_DBG(DEBUG, "Failed to device parms\n"); 110 return -EINVAL; 111 } 112 113 /* update the features list */ 114 if (dparms->dev_features & BNXT_ULP_DEV_FT_STAT_SW_AGG) 115 flags = ULP_FLAG_FC_SW_AGG_EN; 116 if (dparms->dev_features & BNXT_ULP_DEV_FT_STAT_PARENT_AGG) 117 flags |= ULP_FLAG_FC_PARENT_AGG_EN; 118 119 fc_ops = bnxt_ulp_fc_ops_get(ctxt); 120 if (fc_ops == NULL) { 121 BNXT_DRV_DBG(DEBUG, "Failed to get the counter ops\n"); 122 return -EINVAL; 123 } 124 125 ulp_fc_info = rte_zmalloc("ulp_fc_info", sizeof(*ulp_fc_info), 0); 126 if (!ulp_fc_info) 127 goto error; 128 129 ulp_fc_info->fc_ops = fc_ops; 130 ulp_fc_info->flags = flags; 131 132 rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL); 133 if (rc) { 134 BNXT_DRV_DBG(ERR, "Failed to initialize fc mutex\n"); 135 goto error; 136 } 137 138 /* Add the FC info tbl to the ulp context. */ 139 bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, ulp_fc_info); 140 141 ulp_fc_info->num_counters = dparms->flow_count_db_entries; 142 if (!ulp_fc_info->num_counters) { 143 /* No need for software counters, call fw directly */ 144 BNXT_DRV_DBG(DEBUG, "Sw flow counter support not enabled\n"); 145 return 0; 146 } 147 148 /* no need to allocate sw aggregation memory if agg is disabled */ 149 if (!(ulp_fc_info->flags & ULP_FLAG_FC_SW_AGG_EN)) 150 return 0; 151 152 sw_acc_cntr_tbl_sz = sizeof(struct sw_acc_counter) * 153 dparms->flow_count_db_entries; 154 155 for (i = 0; i < TF_DIR_MAX; i++) { 156 ulp_fc_info->sw_acc_tbl[i] = rte_zmalloc("ulp_sw_acc_cntr_tbl", 157 sw_acc_cntr_tbl_sz, 0); 158 if (!ulp_fc_info->sw_acc_tbl[i]) 159 goto error; 160 } 161 162 hw_fc_mem_info_sz = sizeof(uint64_t) * dparms->flow_count_db_entries; 163 164 for (i = 0; i < TF_DIR_MAX; i++) { 165 rc = ulp_fc_mgr_shadow_mem_alloc(&ulp_fc_info->shadow_hw_tbl[i], 166 hw_fc_mem_info_sz); 167 if (rc) 168 goto error; 169 } 170 171 return 0; 172 173 error: 174 ulp_fc_mgr_deinit(ctxt); 175 BNXT_DRV_DBG(DEBUG, "Failed to allocate memory for fc mgr\n"); 176 177 return -ENOMEM; 178 } 179 180 /* 181 * Release all resources in the Flow Counter Manager for this ulp context 182 * 183 * ctxt [in] The ulp context for the Flow Counter manager 184 * 185 */ 186 int32_t 187 ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt) 188 { 189 struct bnxt_ulp_fc_info *ulp_fc_info; 190 struct hw_fc_mem_info *shd_info; 191 int i; 192 193 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); 194 195 if (!ulp_fc_info) 196 return -EINVAL; 197 198 if (ulp_fc_info->flags & ULP_FLAG_FC_SW_AGG_EN) 199 ulp_fc_mgr_thread_cancel(ctxt); 200 201 pthread_mutex_destroy(&ulp_fc_info->fc_lock); 202 203 if (ulp_fc_info->flags & ULP_FLAG_FC_SW_AGG_EN) { 204 for (i = 0; i < TF_DIR_MAX; i++) 205 rte_free(ulp_fc_info->sw_acc_tbl[i]); 206 207 for (i = 0; i < TF_DIR_MAX; i++) { 208 shd_info = &ulp_fc_info->shadow_hw_tbl[i]; 209 ulp_fc_mgr_shadow_mem_free(shd_info); 210 } 211 } 212 213 rte_free(ulp_fc_info); 214 215 /* Safe to ignore on deinit */ 216 (void)bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL); 217 218 return 0; 219 } 220 221 /* 222 * Check if the alarm thread that walks through the flows is started 223 * 224 * ctxt [in] The ulp context for the flow counter manager 225 * 226 */ 227 bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt) 228 { 229 struct bnxt_ulp_fc_info *ulp_fc_info; 230 231 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); 232 233 if (ulp_fc_info) 234 return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD); 235 236 return false; 237 } 238 239 /* 240 * Setup the Flow counter timer thread that will fetch/accumulate raw counter 241 * data from the chip's internal flow counters 242 * 243 * ctxt [in] The ulp context for the flow counter manager 244 * 245 */ 246 int32_t 247 ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt) 248 { 249 struct bnxt_ulp_fc_info *ulp_fc_info; 250 251 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); 252 253 if (ulp_fc_info && !(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) { 254 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER, 255 ulp_fc_mgr_alarm_cb, (void *)ctxt->cfg_data); 256 ulp_fc_info->flags |= ULP_FLAG_FC_THREAD; 257 } 258 259 return 0; 260 } 261 262 /* 263 * Cancel the alarm handler 264 * 265 * ctxt [in] The ulp context for the flow counter manager 266 * 267 */ 268 void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt) 269 { 270 struct bnxt_ulp_fc_info *ulp_fc_info; 271 272 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); 273 if (!ulp_fc_info) 274 return; 275 276 ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD; 277 rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, ctxt->cfg_data); 278 } 279 280 /* 281 * Alarm handler that will issue the TF-Core API to fetch 282 * data from the chip's internal flow counters 283 * 284 * ctxt [in] The ulp context for the flow counter manager 285 * 286 */ 287 288 void 289 ulp_fc_mgr_alarm_cb(void *arg) 290 { 291 const struct bnxt_ulp_fc_core_ops *fc_ops; 292 struct bnxt_ulp_device_params *dparms; 293 struct bnxt_ulp_fc_info *ulp_fc_info; 294 struct bnxt_ulp_context *ctxt; 295 uint32_t dev_id; 296 int rc = 0; 297 298 ctxt = bnxt_ulp_cntxt_entry_acquire(arg); 299 if (ctxt == NULL) { 300 BNXT_DRV_DBG(INFO, "could not get the ulp context lock\n"); 301 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER, 302 ulp_fc_mgr_alarm_cb, arg); 303 return; 304 } 305 306 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); 307 if (!ulp_fc_info) { 308 bnxt_ulp_cntxt_entry_release(); 309 return; 310 } 311 312 fc_ops = ulp_fc_info->fc_ops; 313 314 if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) { 315 BNXT_DRV_DBG(DEBUG, "Failed to get device id\n"); 316 bnxt_ulp_cntxt_entry_release(); 317 return; 318 } 319 320 dparms = bnxt_ulp_device_params_get(dev_id); 321 if (!dparms) { 322 BNXT_DRV_DBG(DEBUG, "Failed to device parms\n"); 323 bnxt_ulp_cntxt_entry_release(); 324 return; 325 } 326 327 /* 328 * Take the fc_lock to ensure no flow is destroyed 329 * during the bulk get 330 */ 331 if (pthread_mutex_trylock(&ulp_fc_info->fc_lock)) 332 goto out; 333 334 if (!ulp_fc_info->num_entries) { 335 pthread_mutex_unlock(&ulp_fc_info->fc_lock); 336 ulp_fc_mgr_thread_cancel(ctxt); 337 bnxt_ulp_cntxt_entry_release(); 338 return; 339 } 340 /* 341 * Commented for now till GET_BULK is resolved, just get the first flow 342 * stat for now 343 for (i = 0; i < TF_DIR_MAX; i++) { 344 rc = ulp_bulk_get_flow_stats(tfp, ulp_fc_info, i, 345 dparms->flow_count_db_entries); 346 if (rc) 347 break; 348 } 349 */ 350 351 /* reset the parent accumulation counters before accumulation if any */ 352 ulp_flow_db_parent_flow_count_reset(ctxt); 353 354 rc = fc_ops->ulp_flow_stats_accum_update(ctxt, ulp_fc_info, dparms); 355 356 pthread_mutex_unlock(&ulp_fc_info->fc_lock); 357 358 /* 359 * If cmd fails once, no need of 360 * invoking again every second 361 */ 362 363 if (rc) { 364 ulp_fc_mgr_thread_cancel(ctxt); 365 bnxt_ulp_cntxt_entry_release(); 366 return; 367 } 368 out: 369 bnxt_ulp_cntxt_entry_release(); 370 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER, 371 ulp_fc_mgr_alarm_cb, arg); 372 } 373 374 /* 375 * Set the starting index that indicates the first HW flow 376 * counter ID 377 * 378 * ctxt [in] The ulp context for the flow counter manager 379 * 380 * dir [in] The direction of the flow 381 * 382 * start_idx [in] The HW flow counter ID 383 * 384 */ 385 bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, uint8_t dir) 386 { 387 struct bnxt_ulp_fc_info *ulp_fc_info; 388 389 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); 390 391 if (ulp_fc_info) 392 return ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set; 393 394 return false; 395 } 396 397 /* 398 * Set the starting index that indicates the first HW flow 399 * counter ID 400 * 401 * ctxt [in] The ulp context for the flow counter manager 402 * 403 * dir [in] The direction of the flow 404 * 405 * start_idx [in] The HW flow counter ID 406 * 407 */ 408 int32_t ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context *ctxt, uint8_t dir, 409 uint32_t start_idx) 410 { 411 struct bnxt_ulp_fc_info *ulp_fc_info; 412 413 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); 414 415 if (!ulp_fc_info) 416 return -EIO; 417 418 if (!ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set) { 419 ulp_fc_info->shadow_hw_tbl[dir].start_idx = start_idx; 420 ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set = true; 421 } 422 423 return 0; 424 } 425 426 /* 427 * Set the corresponding SW accumulator table entry based on 428 * the difference between this counter ID and the starting 429 * counter ID. Also, keep track of num of active counter enabled 430 * flows. 431 * 432 * ctxt [in] The ulp context for the flow counter manager 433 * 434 * dir [in] The direction of the flow 435 * 436 * hw_cntr_id [in] The HW flow counter ID 437 * 438 */ 439 int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir, 440 uint32_t hw_cntr_id, 441 enum bnxt_ulp_session_type session_type) 442 { 443 struct bnxt_ulp_fc_info *ulp_fc_info; 444 uint32_t sw_cntr_idx; 445 446 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); 447 if (!ulp_fc_info) 448 return -EIO; 449 450 if (!ulp_fc_info->num_counters) 451 return 0; 452 453 pthread_mutex_lock(&ulp_fc_info->fc_lock); 454 sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx; 455 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true; 456 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id; 457 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].session_type = session_type; 458 ulp_fc_info->num_entries++; 459 pthread_mutex_unlock(&ulp_fc_info->fc_lock); 460 461 return 0; 462 } 463 464 /* 465 * Reset the corresponding SW accumulator table entry based on 466 * the difference between this counter ID and the starting 467 * counter ID. 468 * 469 * ctxt [in] The ulp context for the flow counter manager 470 * 471 * dir [in] The direction of the flow 472 * 473 * hw_cntr_id [in] The HW flow counter ID 474 * 475 */ 476 int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, uint8_t dir, 477 uint32_t hw_cntr_id) 478 { 479 struct bnxt_ulp_fc_info *ulp_fc_info; 480 uint32_t sw_cntr_idx; 481 482 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); 483 if (!ulp_fc_info) 484 return -EIO; 485 486 if (!ulp_fc_info->num_counters) 487 return 0; 488 489 pthread_mutex_lock(&ulp_fc_info->fc_lock); 490 sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx; 491 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false; 492 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0; 493 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].session_type = 0; 494 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0; 495 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0; 496 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pc_flow_idx = 0; 497 ulp_fc_info->num_entries--; 498 pthread_mutex_unlock(&ulp_fc_info->fc_lock); 499 500 return 0; 501 } 502 503 /* 504 * Fill the rte_flow_query_count 'data' argument passed 505 * in the rte_flow_query() with the values obtained and 506 * accumulated locally. 507 * 508 * ctxt [in] The ulp context for the flow counter manager 509 * 510 * flow_id [in] The HW flow ID 511 * 512 * count [out] The rte_flow_query_count 'data' that is set 513 * 514 */ 515 int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt, 516 uint32_t flow_id, 517 struct rte_flow_query_count *count) 518 { 519 int rc = 0; 520 uint32_t nxt_resource_index = 0; 521 struct bnxt_ulp_fc_info *ulp_fc_info; 522 const struct bnxt_ulp_fc_core_ops *fc_ops; 523 struct ulp_flow_db_res_params params; 524 uint32_t hw_cntr_id = 0, sw_cntr_idx = 0; 525 struct sw_acc_counter *sw_acc_tbl_entry; 526 bool found_cntr_resource = false; 527 bool found_parent_flow = false; 528 uint32_t pc_idx = 0; 529 uint32_t session_type = 0; 530 uint8_t dir; 531 532 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); 533 if (!ulp_fc_info) 534 return -ENODEV; 535 536 fc_ops = ulp_fc_info->fc_ops; 537 538 if (bnxt_ulp_cntxt_acquire_fdb_lock(ctxt)) 539 return -EIO; 540 541 do { 542 rc = ulp_flow_db_resource_get(ctxt, 543 BNXT_ULP_FDB_TYPE_REGULAR, 544 flow_id, 545 &nxt_resource_index, 546 ¶ms); 547 if (params.resource_func == 548 BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE && 549 (params.resource_sub_type == 550 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT || 551 params.resource_sub_type == 552 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_EXT_COUNT)) { 553 found_cntr_resource = true; 554 break; 555 } 556 if (params.resource_func == BNXT_ULP_RESOURCE_FUNC_CMM_STAT) { 557 found_cntr_resource = true; 558 break; 559 } 560 if (params.resource_func == 561 BNXT_ULP_RESOURCE_FUNC_PARENT_FLOW) { 562 found_parent_flow = true; 563 pc_idx = params.resource_hndl; 564 } 565 566 } while (!rc && nxt_resource_index); 567 568 if (rc || !found_cntr_resource) { 569 bnxt_ulp_cntxt_release_fdb_lock(ctxt); 570 return rc; 571 } 572 573 dir = params.direction; 574 session_type = ulp_flow_db_shared_session_get(¶ms); 575 if (!(ulp_fc_info->flags & ULP_FLAG_FC_SW_AGG_EN)) { 576 rc = fc_ops->ulp_flow_stat_get(ctxt, dir, session_type, 577 params.resource_hndl, count); 578 bnxt_ulp_cntxt_release_fdb_lock(ctxt); 579 return rc; 580 } 581 582 if (!found_parent_flow && 583 params.resource_sub_type == 584 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) { 585 hw_cntr_id = params.resource_hndl; 586 if (!ulp_fc_info->num_counters) { 587 rc = fc_ops->ulp_flow_stat_get(ctxt, dir, session_type, 588 hw_cntr_id, count); 589 bnxt_ulp_cntxt_release_fdb_lock(ctxt); 590 return rc; 591 } 592 593 /* TODO: 594 * Think about optimizing with try_lock later 595 */ 596 pthread_mutex_lock(&ulp_fc_info->fc_lock); 597 sw_cntr_idx = hw_cntr_id - 598 ulp_fc_info->shadow_hw_tbl[dir].start_idx; 599 sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx]; 600 if (sw_acc_tbl_entry->pkt_count) { 601 count->hits_set = 1; 602 count->bytes_set = 1; 603 count->hits = sw_acc_tbl_entry->pkt_count; 604 count->bytes = sw_acc_tbl_entry->byte_count; 605 } 606 if (count->reset) { 607 sw_acc_tbl_entry->pkt_count = 0; 608 sw_acc_tbl_entry->byte_count = 0; 609 } 610 pthread_mutex_unlock(&ulp_fc_info->fc_lock); 611 } else if (found_parent_flow && 612 params.resource_sub_type == 613 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) { 614 /* Get stats from the parent child table */ 615 if (ulp_flow_db_parent_flow_count_get(ctxt, flow_id, 616 pc_idx, 617 &count->hits, 618 &count->bytes, 619 count->reset)) { 620 bnxt_ulp_cntxt_release_fdb_lock(ctxt); 621 return -EIO; 622 } 623 if (count->hits) 624 count->hits_set = 1; 625 if (count->bytes) 626 count->bytes_set = 1; 627 } else { 628 rc = -EINVAL; 629 } 630 bnxt_ulp_cntxt_release_fdb_lock(ctxt); 631 return rc; 632 } 633 634 /* 635 * Set the parent flow if it is SW accumulation counter entry. 636 * 637 * ctxt [in] The ulp context for the flow counter manager 638 * 639 * dir [in] The direction of the flow 640 * 641 * hw_cntr_id [in] The HW flow counter ID 642 * 643 * pc_idx [in] parent child db index 644 * 645 */ 646 int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt, 647 uint8_t dir, 648 uint32_t hw_cntr_id, 649 uint32_t pc_idx) 650 { 651 struct bnxt_ulp_fc_info *ulp_fc_info; 652 uint32_t sw_cntr_idx; 653 int32_t rc = 0; 654 655 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); 656 if (!ulp_fc_info) 657 return -EIO; 658 659 pthread_mutex_lock(&ulp_fc_info->fc_lock); 660 sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx; 661 if (ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid) { 662 pc_idx |= FLOW_CNTR_PC_FLOW_VALID; 663 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pc_flow_idx = pc_idx; 664 } else { 665 BNXT_DRV_DBG(ERR, "Failed to set parent flow id %x:%x\n", 666 hw_cntr_id, pc_idx); 667 rc = -ENOENT; 668 } 669 pthread_mutex_unlock(&ulp_fc_info->fc_lock); 670 671 return rc; 672 } 673