1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2019-2020 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <rte_log.h> 7 #include <rte_malloc.h> 8 #include <rte_flow.h> 9 #include <rte_flow_driver.h> 10 #include <rte_tailq.h> 11 12 #include "bnxt.h" 13 #include "bnxt_ulp.h" 14 #include "bnxt_tf_common.h" 15 #include "tf_core.h" 16 #include "tf_ext_flow_handle.h" 17 18 #include "ulp_template_db_enum.h" 19 #include "ulp_template_struct.h" 20 #include "ulp_mark_mgr.h" 21 #include "ulp_fc_mgr.h" 22 #include "ulp_flow_db.h" 23 #include "ulp_mapper.h" 24 #include "ulp_port_db.h" 25 26 /* Linked list of all TF sessions. */ 27 STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list = 28 STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list); 29 30 /* Mutex to synchronize bnxt_ulp_session_list operations. */ 31 static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER; 32 33 /* 34 * Allow the deletion of context only for the bnxt device that 35 * created the session. 36 */ 37 bool 38 ulp_ctx_deinit_allowed(struct bnxt_ulp_context *ulp_ctx) 39 { 40 if (!ulp_ctx || !ulp_ctx->cfg_data) 41 return false; 42 43 if (!ulp_ctx->cfg_data->ref_cnt) { 44 BNXT_TF_DBG(DEBUG, "ulp ctx shall initiate deinit\n"); 45 return true; 46 } 47 48 return false; 49 } 50 51 /* 52 * Initialize an ULP session. 53 * An ULP session will contain all the resources needed to support rte flow 54 * offloads. A session is initialized as part of rte_eth_device start. 55 * A single vswitch instance can have multiple uplinks which means 56 * rte_eth_device start will be called for each of these devices. 57 * ULP session manager will make sure that a single ULP session is only 58 * initialized once. Apart from this, it also initializes MARK database, 59 * EEM table & flow database. ULP session manager also manages a list of 60 * all opened ULP sessions. 61 */ 62 static int32_t 63 ulp_ctx_session_open(struct bnxt *bp, 64 struct bnxt_ulp_session_state *session) 65 { 66 struct rte_eth_dev *ethdev = bp->eth_dev; 67 int32_t rc = 0; 68 struct tf_open_session_parms params; 69 struct tf_session_resources *resources; 70 71 memset(¶ms, 0, sizeof(params)); 72 73 rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id, 74 params.ctrl_chan_name); 75 if (rc) { 76 BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n", 77 ethdev->data->port_id, rc); 78 return rc; 79 } 80 81 params.shadow_copy = true; 82 params.device_type = TF_DEVICE_TYPE_WH; 83 resources = ¶ms.resources; 84 /** RX **/ 85 /* Identifiers */ 86 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 422; 87 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 6; 88 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_WC_PROF] = 192; 89 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64; 90 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_EM_PROF] = 192; 91 92 /* Table Types */ 93 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192; 94 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384; 95 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023; 96 97 /* ENCAP */ 98 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 511; 99 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 63; 100 101 /* TCAMs */ 102 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = 103 422; 104 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = 105 6; 106 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960; 107 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 88; 108 109 /* EM */ 110 resources->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 13168; 111 112 /* EEM */ 113 resources->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1; 114 115 /* SP */ 116 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_SP_SMAC] = 255; 117 118 /** TX **/ 119 /* Identifiers */ 120 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 292; 121 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 148; 122 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_WC_PROF] = 192; 123 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64; 124 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_EM_PROF] = 192; 125 126 /* Table Types */ 127 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192; 128 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384; 129 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023; 130 131 /* ENCAP */ 132 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_64B] = 511; 133 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 223; 134 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 255; 135 136 /* TCAMs */ 137 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = 138 292; 139 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = 140 144; 141 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960; 142 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 928; 143 144 /* EM */ 145 resources->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 15232; 146 147 /* EEM */ 148 resources->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1; 149 150 /* SP */ 151 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 488; 152 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = 511; 153 154 rc = tf_open_session(&bp->tfp, ¶ms); 155 if (rc) { 156 BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n", 157 params.ctrl_chan_name, rc); 158 return -EINVAL; 159 } 160 if (!session->session_opened) { 161 session->session_opened = 1; 162 session->g_tfp = &bp->tfp; 163 } 164 return rc; 165 } 166 167 /* 168 * Close the ULP session. 169 * It takes the ulp context pointer. 170 */ 171 static void 172 ulp_ctx_session_close(struct bnxt *bp, 173 struct bnxt_ulp_session_state *session) 174 { 175 /* close the session in the hardware */ 176 if (session->session_opened) 177 tf_close_session(&bp->tfp); 178 session->session_opened = 0; 179 session->g_tfp = NULL; 180 } 181 182 static void 183 bnxt_init_tbl_scope_parms(struct bnxt *bp, 184 struct tf_alloc_tbl_scope_parms *params) 185 { 186 struct bnxt_ulp_device_params *dparms; 187 uint32_t dev_id; 188 int rc; 189 190 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id); 191 if (rc) 192 /* TBD: For now, just use default. */ 193 dparms = 0; 194 else 195 dparms = bnxt_ulp_device_params_get(dev_id); 196 197 /* 198 * Set the flush timer for EEM entries. The value is in 100ms intervals, 199 * so 100 is 10s. 200 */ 201 params->hw_flow_cache_flush_timer = 100; 202 203 if (!dparms) { 204 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY; 205 params->rx_max_action_entry_sz_in_bits = 206 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY; 207 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM; 208 params->rx_num_flows_in_k = BNXT_ULP_RX_NUM_FLOWS; 209 params->rx_tbl_if_id = BNXT_ULP_RX_TBL_IF_ID; 210 211 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY; 212 params->tx_max_action_entry_sz_in_bits = 213 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY; 214 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM; 215 params->tx_num_flows_in_k = BNXT_ULP_TX_NUM_FLOWS; 216 params->tx_tbl_if_id = BNXT_ULP_TX_TBL_IF_ID; 217 } else { 218 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY; 219 params->rx_max_action_entry_sz_in_bits = 220 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY; 221 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM; 222 params->rx_num_flows_in_k = dparms->flow_db_num_entries / 1024; 223 params->rx_tbl_if_id = BNXT_ULP_RX_TBL_IF_ID; 224 225 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY; 226 params->tx_max_action_entry_sz_in_bits = 227 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY; 228 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM; 229 params->tx_num_flows_in_k = dparms->flow_db_num_entries / 1024; 230 params->tx_tbl_if_id = BNXT_ULP_TX_TBL_IF_ID; 231 } 232 } 233 234 /* Initialize Extended Exact Match host memory. */ 235 static int32_t 236 ulp_eem_tbl_scope_init(struct bnxt *bp) 237 { 238 struct tf_alloc_tbl_scope_parms params = {0}; 239 uint32_t dev_id; 240 struct bnxt_ulp_device_params *dparms; 241 int rc; 242 243 /* Get the dev specific number of flows that needed to be supported. */ 244 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) { 245 BNXT_TF_DBG(ERR, "Invalid device id\n"); 246 return -EINVAL; 247 } 248 249 dparms = bnxt_ulp_device_params_get(dev_id); 250 if (!dparms) { 251 BNXT_TF_DBG(ERR, "could not fetch the device params\n"); 252 return -ENODEV; 253 } 254 255 if (dparms->flow_mem_type != BNXT_ULP_FLOW_MEM_TYPE_EXT) { 256 BNXT_TF_DBG(INFO, "Table Scope alloc is not required\n"); 257 return 0; 258 } 259 260 bnxt_init_tbl_scope_parms(bp, ¶ms); 261 262 rc = tf_alloc_tbl_scope(&bp->tfp, ¶ms); 263 if (rc) { 264 BNXT_TF_DBG(ERR, "Unable to allocate eem table scope rc = %d\n", 265 rc); 266 return rc; 267 } 268 269 rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id); 270 if (rc) { 271 BNXT_TF_DBG(ERR, "Unable to set table scope id\n"); 272 return rc; 273 } 274 275 return 0; 276 } 277 278 /* Free Extended Exact Match host memory */ 279 static int32_t 280 ulp_eem_tbl_scope_deinit(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx) 281 { 282 struct tf_free_tbl_scope_parms params = {0}; 283 struct tf *tfp; 284 int32_t rc = 0; 285 struct bnxt_ulp_device_params *dparms; 286 uint32_t dev_id; 287 288 if (!ulp_ctx || !ulp_ctx->cfg_data) 289 return -EINVAL; 290 291 tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx); 292 if (!tfp) { 293 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n"); 294 return -EINVAL; 295 } 296 297 /* Get the dev specific number of flows that needed to be supported. */ 298 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) { 299 BNXT_TF_DBG(ERR, "Invalid device id\n"); 300 return -EINVAL; 301 } 302 303 dparms = bnxt_ulp_device_params_get(dev_id); 304 if (!dparms) { 305 BNXT_TF_DBG(ERR, "could not fetch the device params\n"); 306 return -ENODEV; 307 } 308 309 if (dparms->flow_mem_type != BNXT_ULP_FLOW_MEM_TYPE_EXT) { 310 BNXT_TF_DBG(INFO, "Table Scope free is not required\n"); 311 return 0; 312 } 313 314 rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp_ctx, ¶ms.tbl_scope_id); 315 if (rc) { 316 BNXT_TF_DBG(ERR, "Failed to get the table scope id\n"); 317 return -EINVAL; 318 } 319 320 rc = tf_free_tbl_scope(tfp, ¶ms); 321 if (rc) { 322 BNXT_TF_DBG(ERR, "Unable to free table scope\n"); 323 return -EINVAL; 324 } 325 return rc; 326 } 327 328 /* The function to free and deinit the ulp context data. */ 329 static int32_t 330 ulp_ctx_deinit(struct bnxt *bp, 331 struct bnxt_ulp_session_state *session) 332 { 333 /* close the tf session */ 334 ulp_ctx_session_close(bp, session); 335 336 /* Free the contents */ 337 if (session->cfg_data) { 338 rte_free(session->cfg_data); 339 bp->ulp_ctx->cfg_data = NULL; 340 session->cfg_data = NULL; 341 } 342 return 0; 343 } 344 345 /* The function to allocate and initialize the ulp context data. */ 346 static int32_t 347 ulp_ctx_init(struct bnxt *bp, 348 struct bnxt_ulp_session_state *session) 349 { 350 struct bnxt_ulp_data *ulp_data; 351 int32_t rc = 0; 352 353 /* Allocate memory to hold ulp context data. */ 354 ulp_data = rte_zmalloc("bnxt_ulp_data", 355 sizeof(struct bnxt_ulp_data), 0); 356 if (!ulp_data) { 357 BNXT_TF_DBG(ERR, "Failed to allocate memory for ulp data\n"); 358 return -ENOMEM; 359 } 360 361 /* Increment the ulp context data reference count usage. */ 362 bp->ulp_ctx->cfg_data = ulp_data; 363 session->cfg_data = ulp_data; 364 ulp_data->ref_cnt++; 365 ulp_data->ulp_flags |= BNXT_ULP_VF_REP_ENABLED; 366 367 /* Open the ulp session. */ 368 rc = ulp_ctx_session_open(bp, session); 369 if (rc) { 370 session->session_opened = 1; 371 (void)ulp_ctx_deinit(bp, session); 372 return rc; 373 } 374 375 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp); 376 return rc; 377 } 378 379 /* The function to initialize ulp dparms with devargs */ 380 static int32_t 381 ulp_dparms_init(struct bnxt *bp, 382 struct bnxt_ulp_context *ulp_ctx) 383 { 384 struct bnxt_ulp_device_params *dparms; 385 uint32_t dev_id; 386 387 if (!bp->max_num_kflows) 388 return 0; 389 390 if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) { 391 BNXT_TF_DBG(DEBUG, "Failed to get device id\n"); 392 return -EINVAL; 393 } 394 395 dparms = bnxt_ulp_device_params_get(dev_id); 396 if (!dparms) { 397 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n"); 398 return -EINVAL; 399 } 400 401 /* num_flows = max_num_kflows * 1024 */ 402 dparms->flow_db_num_entries = bp->max_num_kflows * 1024; 403 /* GFID = 2 * num_flows */ 404 dparms->mark_db_gfid_entries = dparms->flow_db_num_entries * 2; 405 BNXT_TF_DBG(DEBUG, "Set the number of flows = %"PRIu64"\n", 406 dparms->flow_db_num_entries); 407 408 return 0; 409 } 410 411 /* The function to initialize bp flags with truflow features */ 412 static int32_t 413 ulp_dparms_dev_port_intf_update(struct bnxt *bp, 414 struct bnxt_ulp_context *ulp_ctx) 415 { 416 struct bnxt_ulp_device_params *dparms; 417 uint32_t dev_id; 418 419 if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) { 420 BNXT_TF_DBG(DEBUG, "Failed to get device id\n"); 421 return -EINVAL; 422 } 423 424 dparms = bnxt_ulp_device_params_get(dev_id); 425 if (!dparms) { 426 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n"); 427 return -EINVAL; 428 } 429 430 /* Update the bp flag with gfid flag */ 431 if (dparms->flow_mem_type == BNXT_ULP_FLOW_MEM_TYPE_EXT) 432 bp->flags |= BNXT_FLAG_GFID_ENABLE; 433 434 return 0; 435 } 436 437 static int32_t 438 ulp_ctx_attach(struct bnxt *bp, 439 struct bnxt_ulp_session_state *session) 440 { 441 int32_t rc = 0; 442 443 /* Increment the ulp context data reference count usage. */ 444 bp->ulp_ctx->cfg_data = session->cfg_data; 445 bp->ulp_ctx->cfg_data->ref_cnt++; 446 447 /* update the session details in bnxt tfp */ 448 bp->tfp.session = session->g_tfp->session; 449 450 /* Create a TF Client */ 451 rc = ulp_ctx_session_open(bp, session); 452 if (rc) { 453 PMD_DRV_LOG(ERR, "Failed to open ctxt session, rc:%d\n", rc); 454 bp->tfp.session = NULL; 455 return rc; 456 } 457 458 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp); 459 return rc; 460 } 461 462 static void 463 ulp_ctx_detach(struct bnxt *bp) 464 { 465 if (bp->tfp.session) { 466 tf_close_session(&bp->tfp); 467 bp->tfp.session = NULL; 468 } 469 } 470 471 /* 472 * Initialize the state of an ULP session. 473 * If the state of an ULP session is not initialized, set it's state to 474 * initialized. If the state is already initialized, do nothing. 475 */ 476 static void 477 ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init) 478 { 479 pthread_mutex_lock(&session->bnxt_ulp_mutex); 480 481 if (!session->bnxt_ulp_init) { 482 session->bnxt_ulp_init = true; 483 *init = false; 484 } else { 485 *init = true; 486 } 487 488 pthread_mutex_unlock(&session->bnxt_ulp_mutex); 489 } 490 491 /* 492 * Check if an ULP session is already allocated for a specific PCI 493 * domain & bus. If it is already allocated simply return the session 494 * pointer, otherwise allocate a new session. 495 */ 496 static struct bnxt_ulp_session_state * 497 ulp_get_session(struct rte_pci_addr *pci_addr) 498 { 499 struct bnxt_ulp_session_state *session; 500 501 STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) { 502 if (session->pci_info.domain == pci_addr->domain && 503 session->pci_info.bus == pci_addr->bus) { 504 return session; 505 } 506 } 507 return NULL; 508 } 509 510 /* 511 * Allocate and Initialize an ULP session and set it's state to INITIALIZED. 512 * If it's already initialized simply return the already existing session. 513 */ 514 static struct bnxt_ulp_session_state * 515 ulp_session_init(struct bnxt *bp, 516 bool *init) 517 { 518 struct rte_pci_device *pci_dev; 519 struct rte_pci_addr *pci_addr; 520 struct bnxt_ulp_session_state *session; 521 int rc = 0; 522 523 if (!bp) 524 return NULL; 525 526 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device); 527 pci_addr = &pci_dev->addr; 528 529 pthread_mutex_lock(&bnxt_ulp_global_mutex); 530 531 session = ulp_get_session(pci_addr); 532 if (!session) { 533 /* Not Found the session Allocate a new one */ 534 session = rte_zmalloc("bnxt_ulp_session", 535 sizeof(struct bnxt_ulp_session_state), 536 0); 537 if (!session) { 538 BNXT_TF_DBG(ERR, 539 "Allocation failed for bnxt_ulp_session\n"); 540 pthread_mutex_unlock(&bnxt_ulp_global_mutex); 541 return NULL; 542 543 } else { 544 /* Add it to the queue */ 545 session->pci_info.domain = pci_addr->domain; 546 session->pci_info.bus = pci_addr->bus; 547 rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL); 548 if (rc) { 549 BNXT_TF_DBG(ERR, "mutex create failed\n"); 550 pthread_mutex_unlock(&bnxt_ulp_global_mutex); 551 return NULL; 552 } 553 STAILQ_INSERT_TAIL(&bnxt_ulp_session_list, 554 session, next); 555 } 556 } 557 ulp_context_initialized(session, init); 558 pthread_mutex_unlock(&bnxt_ulp_global_mutex); 559 return session; 560 } 561 562 /* 563 * When a device is closed, remove it's associated session from the global 564 * session list. 565 */ 566 static void 567 ulp_session_deinit(struct bnxt_ulp_session_state *session) 568 { 569 if (!session) 570 return; 571 572 if (!session->cfg_data) { 573 pthread_mutex_lock(&bnxt_ulp_global_mutex); 574 STAILQ_REMOVE(&bnxt_ulp_session_list, session, 575 bnxt_ulp_session_state, next); 576 pthread_mutex_destroy(&session->bnxt_ulp_mutex); 577 rte_free(session); 578 pthread_mutex_unlock(&bnxt_ulp_global_mutex); 579 } 580 } 581 582 /* 583 * Internal api to enable NAT feature. 584 * Set set_flag to 1 to set the value or zero to reset the value. 585 * returns 0 on success. 586 */ 587 static int32_t 588 bnxt_ulp_global_cfg_update(struct bnxt *bp, 589 enum tf_dir dir, 590 enum tf_global_config_type type, 591 uint32_t offset, 592 uint32_t value, 593 uint32_t set_flag) 594 { 595 uint32_t global_cfg = 0; 596 int rc; 597 struct tf_global_cfg_parms parms = { 0 }; 598 599 /* Initialize the params */ 600 parms.dir = dir, 601 parms.type = type, 602 parms.offset = offset, 603 parms.config = (uint8_t *)&global_cfg, 604 parms.config_sz_in_bytes = sizeof(global_cfg); 605 606 rc = tf_get_global_cfg(&bp->tfp, &parms); 607 if (rc) { 608 BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n", 609 type, rc); 610 return rc; 611 } 612 613 if (set_flag) 614 global_cfg |= value; 615 else 616 global_cfg &= ~value; 617 618 /* SET the register RE_CFA_REG_ACT_TECT */ 619 rc = tf_set_global_cfg(&bp->tfp, &parms); 620 if (rc) { 621 BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n", 622 type, rc); 623 return rc; 624 } 625 return rc; 626 } 627 628 /* Internal function to delete all the flows belonging to the given port */ 629 static void 630 bnxt_ulp_flush_port_flows(struct bnxt *bp) 631 { 632 uint16_t func_id; 633 634 /* it is assumed that port is either TVF or PF */ 635 if (ulp_port_db_port_func_id_get(bp->ulp_ctx, 636 bp->eth_dev->data->port_id, 637 &func_id)) { 638 BNXT_TF_DBG(ERR, "Invalid argument\n"); 639 return; 640 } 641 (void)ulp_flow_db_function_flow_flush(bp->ulp_ctx, func_id); 642 } 643 644 /* Internal function to delete the VFR default flows */ 645 static void 646 bnxt_ulp_destroy_vfr_default_rules(struct bnxt *bp, bool global) 647 { 648 struct bnxt_ulp_vfr_rule_info *info; 649 uint16_t port_id; 650 struct rte_eth_dev *vfr_eth_dev; 651 struct bnxt_representor *vfr_bp; 652 653 if (!BNXT_TRUFLOW_EN(bp) || BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev)) 654 return; 655 656 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data) 657 return; 658 659 /* Delete default rules for all ports */ 660 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 661 info = &bp->ulp_ctx->cfg_data->vfr_rule_info[port_id]; 662 if (!info->valid) 663 continue; 664 665 if (!global && info->parent_port_id != 666 bp->eth_dev->data->port_id) 667 continue; 668 669 /* Destroy the flows */ 670 ulp_default_flow_destroy(bp->eth_dev, info->rep2vf_flow_id); 671 ulp_default_flow_destroy(bp->eth_dev, info->vf2rep_flow_id); 672 /* Clean up the tx action pointer */ 673 vfr_eth_dev = &rte_eth_devices[port_id]; 674 if (vfr_eth_dev) { 675 vfr_bp = vfr_eth_dev->data->dev_private; 676 vfr_bp->vfr_tx_cfa_action = 0; 677 } 678 memset(info, 0, sizeof(struct bnxt_ulp_vfr_rule_info)); 679 } 680 } 681 682 /* 683 * When a port is deinit'ed by dpdk. This function is called 684 * and this function clears the ULP context and rest of the 685 * infrastructure associated with it. 686 */ 687 static void 688 bnxt_ulp_deinit(struct bnxt *bp, 689 struct bnxt_ulp_session_state *session) 690 { 691 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data) 692 return; 693 694 /* clean up default flows */ 695 bnxt_ulp_destroy_df_rules(bp, true); 696 697 /* clean up default VFR flows */ 698 bnxt_ulp_destroy_vfr_default_rules(bp, true); 699 700 /* clean up regular flows */ 701 ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_REGULAR_FLOW_TABLE); 702 703 /* cleanup the eem table scope */ 704 ulp_eem_tbl_scope_deinit(bp, bp->ulp_ctx); 705 706 /* cleanup the flow database */ 707 ulp_flow_db_deinit(bp->ulp_ctx); 708 709 /* Delete the Mark database */ 710 ulp_mark_db_deinit(bp->ulp_ctx); 711 712 /* cleanup the ulp mapper */ 713 ulp_mapper_deinit(bp->ulp_ctx); 714 715 /* Delete the Flow Counter Manager */ 716 ulp_fc_mgr_deinit(bp->ulp_ctx); 717 718 /* Delete the Port database */ 719 ulp_port_db_deinit(bp->ulp_ctx); 720 721 /* Disable NAT feature */ 722 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP, 723 TF_TUNNEL_ENCAP_NAT, 724 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0); 725 726 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP, 727 TF_TUNNEL_ENCAP_NAT, 728 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0); 729 730 /* free the flow db lock */ 731 pthread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock); 732 733 /* Delete the ulp context and tf session and free the ulp context */ 734 ulp_ctx_deinit(bp, session); 735 BNXT_TF_DBG(DEBUG, "ulp ctx has been deinitialized\n"); 736 } 737 738 /* 739 * When a port is initialized by dpdk. This functions is called 740 * and this function initializes the ULP context and rest of the 741 * infrastructure associated with it. 742 */ 743 static int32_t 744 bnxt_ulp_init(struct bnxt *bp, 745 struct bnxt_ulp_session_state *session) 746 { 747 int rc; 748 749 /* Allocate and Initialize the ulp context. */ 750 rc = ulp_ctx_init(bp, session); 751 if (rc) { 752 BNXT_TF_DBG(ERR, "Failed to create the ulp context\n"); 753 goto jump_to_error; 754 } 755 756 rc = pthread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock, NULL); 757 if (rc) { 758 BNXT_TF_DBG(ERR, "Unable to initialize flow db lock\n"); 759 goto jump_to_error; 760 } 761 762 /* Initialize ulp dparms with values devargs passed */ 763 rc = ulp_dparms_init(bp, bp->ulp_ctx); 764 if (rc) { 765 BNXT_TF_DBG(ERR, "Failed to initialize the dparms\n"); 766 goto jump_to_error; 767 } 768 769 /* create the port database */ 770 rc = ulp_port_db_init(bp->ulp_ctx, bp->port_cnt); 771 if (rc) { 772 BNXT_TF_DBG(ERR, "Failed to create the port database\n"); 773 goto jump_to_error; 774 } 775 776 /* Create the Mark database. */ 777 rc = ulp_mark_db_init(bp->ulp_ctx); 778 if (rc) { 779 BNXT_TF_DBG(ERR, "Failed to create the mark database\n"); 780 goto jump_to_error; 781 } 782 783 /* Create the flow database. */ 784 rc = ulp_flow_db_init(bp->ulp_ctx); 785 if (rc) { 786 BNXT_TF_DBG(ERR, "Failed to create the flow database\n"); 787 goto jump_to_error; 788 } 789 790 /* Create the eem table scope. */ 791 rc = ulp_eem_tbl_scope_init(bp); 792 if (rc) { 793 BNXT_TF_DBG(ERR, "Failed to create the eem scope table\n"); 794 goto jump_to_error; 795 } 796 797 rc = ulp_mapper_init(bp->ulp_ctx); 798 if (rc) { 799 BNXT_TF_DBG(ERR, "Failed to initialize ulp mapper\n"); 800 goto jump_to_error; 801 } 802 803 rc = ulp_fc_mgr_init(bp->ulp_ctx); 804 if (rc) { 805 BNXT_TF_DBG(ERR, "Failed to initialize ulp flow counter mgr\n"); 806 goto jump_to_error; 807 } 808 809 /* 810 * Enable NAT feature. Set the global configuration register 811 * Tunnel encap to enable NAT with the reuse of existing inner 812 * L2 header smac and dmac 813 */ 814 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP, 815 TF_TUNNEL_ENCAP_NAT, 816 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1); 817 if (rc) { 818 BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n"); 819 goto jump_to_error; 820 } 821 822 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP, 823 TF_TUNNEL_ENCAP_NAT, 824 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1); 825 if (rc) { 826 BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n"); 827 goto jump_to_error; 828 } 829 BNXT_TF_DBG(DEBUG, "ulp ctx has been initialized\n"); 830 return rc; 831 832 jump_to_error: 833 bnxt_ulp_deinit(bp, session); 834 return rc; 835 } 836 837 /* 838 * When a port is initialized by dpdk. This functions sets up 839 * the port specific details. 840 */ 841 int32_t 842 bnxt_ulp_port_init(struct bnxt *bp) 843 { 844 struct bnxt_ulp_session_state *session; 845 bool initialized; 846 int32_t rc = 0; 847 848 if (!bp || !BNXT_TRUFLOW_EN(bp)) 849 return rc; 850 851 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 852 BNXT_TF_DBG(ERR, 853 "Skip ulp init for port: %d, not a TVF or PF\n", 854 bp->eth_dev->data->port_id); 855 return rc; 856 } 857 858 if (bp->ulp_ctx) { 859 BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n"); 860 return rc; 861 } 862 863 bp->ulp_ctx = rte_zmalloc("bnxt_ulp_ctx", 864 sizeof(struct bnxt_ulp_context), 0); 865 if (!bp->ulp_ctx) { 866 BNXT_TF_DBG(ERR, "Failed to allocate ulp ctx\n"); 867 return -ENOMEM; 868 } 869 870 /* 871 * Multiple uplink ports can be associated with a single vswitch. 872 * Make sure only the port that is started first will initialize 873 * the TF session. 874 */ 875 session = ulp_session_init(bp, &initialized); 876 if (!session) { 877 BNXT_TF_DBG(ERR, "Failed to initialize the tf session\n"); 878 rc = -EIO; 879 goto jump_to_error; 880 } 881 882 if (initialized) { 883 /* 884 * If ULP is already initialized for a specific domain then 885 * simply assign the ulp context to this rte_eth_dev. 886 */ 887 rc = ulp_ctx_attach(bp, session); 888 if (rc) { 889 BNXT_TF_DBG(ERR, "Failed to attach the ulp context\n"); 890 goto jump_to_error; 891 } 892 } else { 893 rc = bnxt_ulp_init(bp, session); 894 if (rc) { 895 BNXT_TF_DBG(ERR, "Failed to initialize the ulp init\n"); 896 goto jump_to_error; 897 } 898 } 899 900 /* Update bnxt driver flags */ 901 rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx); 902 if (rc) { 903 BNXT_TF_DBG(ERR, "Failed to update driver flags\n"); 904 goto jump_to_error; 905 } 906 907 /* update the port database for the given interface */ 908 rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp->eth_dev); 909 if (rc) { 910 BNXT_TF_DBG(ERR, "Failed to update port database\n"); 911 goto jump_to_error; 912 } 913 /* create the default rules */ 914 bnxt_ulp_create_df_rules(bp); 915 BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port init\n", 916 bp->eth_dev->data->port_id); 917 return rc; 918 919 jump_to_error: 920 bnxt_ulp_port_deinit(bp); 921 return rc; 922 } 923 924 /* 925 * When a port is de-initialized by dpdk. This functions clears up 926 * the port specific details. 927 */ 928 void 929 bnxt_ulp_port_deinit(struct bnxt *bp) 930 { 931 struct bnxt_ulp_session_state *session; 932 struct rte_pci_device *pci_dev; 933 struct rte_pci_addr *pci_addr; 934 935 if (!BNXT_TRUFLOW_EN(bp)) 936 return; 937 938 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 939 BNXT_TF_DBG(ERR, 940 "Skip ULP deinit port:%d, not a TVF or PF\n", 941 bp->eth_dev->data->port_id); 942 return; 943 } 944 945 if (!bp->ulp_ctx) { 946 BNXT_TF_DBG(DEBUG, "ulp ctx already de-allocated\n"); 947 return; 948 } 949 950 BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port deinit\n", 951 bp->eth_dev->data->port_id); 952 953 /* Get the session details */ 954 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device); 955 pci_addr = &pci_dev->addr; 956 pthread_mutex_lock(&bnxt_ulp_global_mutex); 957 session = ulp_get_session(pci_addr); 958 pthread_mutex_unlock(&bnxt_ulp_global_mutex); 959 960 /* session not found then just exit */ 961 if (!session) { 962 /* Free the ulp context */ 963 rte_free(bp->ulp_ctx); 964 bp->ulp_ctx = NULL; 965 return; 966 } 967 968 /* Check the reference count to deinit or deattach*/ 969 if (bp->ulp_ctx->cfg_data && bp->ulp_ctx->cfg_data->ref_cnt) { 970 bp->ulp_ctx->cfg_data->ref_cnt--; 971 if (bp->ulp_ctx->cfg_data->ref_cnt) { 972 /* free the port details */ 973 /* Free the default flow rule associated to this port */ 974 bnxt_ulp_destroy_df_rules(bp, false); 975 bnxt_ulp_destroy_vfr_default_rules(bp, false); 976 977 /* free flows associated with this port */ 978 bnxt_ulp_flush_port_flows(bp); 979 980 /* close the session associated with this port */ 981 ulp_ctx_detach(bp); 982 } else { 983 /* Perform ulp ctx deinit */ 984 bnxt_ulp_deinit(bp, session); 985 } 986 } 987 988 /* clean up the session */ 989 ulp_session_deinit(session); 990 991 /* Free the ulp context */ 992 rte_free(bp->ulp_ctx); 993 bp->ulp_ctx = NULL; 994 } 995 996 /* Below are the access functions to access internal data of ulp context. */ 997 /* Function to set the Mark DB into the context */ 998 int32_t 999 bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx, 1000 struct bnxt_ulp_mark_tbl *mark_tbl) 1001 { 1002 if (!ulp_ctx || !ulp_ctx->cfg_data) { 1003 BNXT_TF_DBG(ERR, "Invalid ulp context data\n"); 1004 return -EINVAL; 1005 } 1006 1007 ulp_ctx->cfg_data->mark_tbl = mark_tbl; 1008 1009 return 0; 1010 } 1011 1012 /* Function to retrieve the Mark DB from the context. */ 1013 struct bnxt_ulp_mark_tbl * 1014 bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx) 1015 { 1016 if (!ulp_ctx || !ulp_ctx->cfg_data) 1017 return NULL; 1018 1019 return ulp_ctx->cfg_data->mark_tbl; 1020 } 1021 1022 /* Function to set the device id of the hardware. */ 1023 int32_t 1024 bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx, 1025 uint32_t dev_id) 1026 { 1027 if (ulp_ctx && ulp_ctx->cfg_data) { 1028 ulp_ctx->cfg_data->dev_id = dev_id; 1029 return 0; 1030 } 1031 1032 return -EINVAL; 1033 } 1034 1035 /* Function to get the device id of the hardware. */ 1036 int32_t 1037 bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx, 1038 uint32_t *dev_id) 1039 { 1040 if (ulp_ctx && ulp_ctx->cfg_data) { 1041 *dev_id = ulp_ctx->cfg_data->dev_id; 1042 return 0; 1043 } 1044 1045 return -EINVAL; 1046 } 1047 1048 /* Function to get the table scope id of the EEM table. */ 1049 int32_t 1050 bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx, 1051 uint32_t *tbl_scope_id) 1052 { 1053 if (ulp_ctx && ulp_ctx->cfg_data) { 1054 *tbl_scope_id = ulp_ctx->cfg_data->tbl_scope_id; 1055 return 0; 1056 } 1057 1058 return -EINVAL; 1059 } 1060 1061 /* Function to set the table scope id of the EEM table. */ 1062 int32_t 1063 bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx, 1064 uint32_t tbl_scope_id) 1065 { 1066 if (ulp_ctx && ulp_ctx->cfg_data) { 1067 ulp_ctx->cfg_data->tbl_scope_id = tbl_scope_id; 1068 return 0; 1069 } 1070 1071 return -EINVAL; 1072 } 1073 1074 /* Function to set the tfp session details from the ulp context. */ 1075 int32_t 1076 bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp) 1077 { 1078 if (!ulp) { 1079 BNXT_TF_DBG(ERR, "Invalid arguments\n"); 1080 return -EINVAL; 1081 } 1082 1083 ulp->g_tfp = tfp; 1084 return 0; 1085 } 1086 1087 /* Function to get the tfp session details from the ulp context. */ 1088 struct tf * 1089 bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp) 1090 { 1091 if (!ulp) { 1092 BNXT_TF_DBG(ERR, "Invalid arguments\n"); 1093 return NULL; 1094 } 1095 return ulp->g_tfp; 1096 } 1097 1098 /* 1099 * Get the device table entry based on the device id. 1100 * 1101 * dev_id [in] The device id of the hardware 1102 * 1103 * Returns the pointer to the device parameters. 1104 */ 1105 struct bnxt_ulp_device_params * 1106 bnxt_ulp_device_params_get(uint32_t dev_id) 1107 { 1108 if (dev_id < BNXT_ULP_MAX_NUM_DEVICES) 1109 return &ulp_device_params[dev_id]; 1110 return NULL; 1111 } 1112 1113 /* Function to set the flow database to the ulp context. */ 1114 int32_t 1115 bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context *ulp_ctx, 1116 struct bnxt_ulp_flow_db *flow_db) 1117 { 1118 if (!ulp_ctx || !ulp_ctx->cfg_data) 1119 return -EINVAL; 1120 1121 ulp_ctx->cfg_data->flow_db = flow_db; 1122 return 0; 1123 } 1124 1125 /* Function to get the flow database from the ulp context. */ 1126 struct bnxt_ulp_flow_db * 1127 bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context *ulp_ctx) 1128 { 1129 if (!ulp_ctx || !ulp_ctx->cfg_data) 1130 return NULL; 1131 1132 return ulp_ctx->cfg_data->flow_db; 1133 } 1134 1135 /* Function to get the ulp context from eth device. */ 1136 struct bnxt_ulp_context * 1137 bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev) 1138 { 1139 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1140 1141 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) { 1142 struct bnxt_representor *vfr = dev->data->dev_private; 1143 1144 bp = vfr->parent_dev->data->dev_private; 1145 } 1146 1147 if (!bp) { 1148 BNXT_TF_DBG(ERR, "Bnxt private data is not initialized\n"); 1149 return NULL; 1150 } 1151 return bp->ulp_ctx; 1152 } 1153 1154 int32_t 1155 bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx, 1156 void *mapper_data) 1157 { 1158 if (!ulp_ctx || !ulp_ctx->cfg_data) { 1159 BNXT_TF_DBG(ERR, "Invalid ulp context data\n"); 1160 return -EINVAL; 1161 } 1162 1163 ulp_ctx->cfg_data->mapper_data = mapper_data; 1164 return 0; 1165 } 1166 1167 void * 1168 bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx) 1169 { 1170 if (!ulp_ctx || !ulp_ctx->cfg_data) { 1171 BNXT_TF_DBG(ERR, "Invalid ulp context data\n"); 1172 return NULL; 1173 } 1174 1175 return ulp_ctx->cfg_data->mapper_data; 1176 } 1177 1178 /* Function to set the port database to the ulp context. */ 1179 int32_t 1180 bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context *ulp_ctx, 1181 struct bnxt_ulp_port_db *port_db) 1182 { 1183 if (!ulp_ctx || !ulp_ctx->cfg_data) 1184 return -EINVAL; 1185 1186 ulp_ctx->cfg_data->port_db = port_db; 1187 return 0; 1188 } 1189 1190 /* Function to get the port database from the ulp context. */ 1191 struct bnxt_ulp_port_db * 1192 bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context *ulp_ctx) 1193 { 1194 if (!ulp_ctx || !ulp_ctx->cfg_data) 1195 return NULL; 1196 1197 return ulp_ctx->cfg_data->port_db; 1198 } 1199 1200 /* Function to set the flow counter info into the context */ 1201 int32_t 1202 bnxt_ulp_cntxt_ptr2_fc_info_set(struct bnxt_ulp_context *ulp_ctx, 1203 struct bnxt_ulp_fc_info *ulp_fc_info) 1204 { 1205 if (!ulp_ctx || !ulp_ctx->cfg_data) { 1206 BNXT_TF_DBG(ERR, "Invalid ulp context data\n"); 1207 return -EINVAL; 1208 } 1209 1210 ulp_ctx->cfg_data->fc_info = ulp_fc_info; 1211 1212 return 0; 1213 } 1214 1215 /* Function to retrieve the flow counter info from the context. */ 1216 struct bnxt_ulp_fc_info * 1217 bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx) 1218 { 1219 if (!ulp_ctx || !ulp_ctx->cfg_data) 1220 return NULL; 1221 1222 return ulp_ctx->cfg_data->fc_info; 1223 } 1224 1225 /* Function to get the ulp flags from the ulp context. */ 1226 int32_t 1227 bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx, 1228 uint32_t *flags) 1229 { 1230 if (!ulp_ctx || !ulp_ctx->cfg_data) 1231 return -1; 1232 1233 *flags = ulp_ctx->cfg_data->ulp_flags; 1234 return 0; 1235 } 1236 1237 /* Function to get the ulp vfr info from the ulp context. */ 1238 struct bnxt_ulp_vfr_rule_info* 1239 bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(struct bnxt_ulp_context *ulp_ctx, 1240 uint32_t port_id) 1241 { 1242 if (!ulp_ctx || !ulp_ctx->cfg_data || port_id >= RTE_MAX_ETHPORTS) 1243 return NULL; 1244 1245 return &ulp_ctx->cfg_data->vfr_rule_info[port_id]; 1246 } 1247 1248 /* Function to acquire the flow database lock from the ulp context. */ 1249 int32_t 1250 bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context *ulp_ctx) 1251 { 1252 if (!ulp_ctx || !ulp_ctx->cfg_data) 1253 return -1; 1254 1255 if (pthread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) { 1256 BNXT_TF_DBG(ERR, "unable to acquire fdb lock\n"); 1257 return -1; 1258 } 1259 return 0; 1260 } 1261 1262 /* Function to release the flow database lock from the ulp context. */ 1263 void 1264 bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context *ulp_ctx) 1265 { 1266 if (!ulp_ctx || !ulp_ctx->cfg_data) 1267 return; 1268 1269 pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); 1270 } 1271