1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2019-2020 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <rte_log.h> 7 #include <rte_malloc.h> 8 #include <rte_flow.h> 9 #include <rte_flow_driver.h> 10 #include <rte_tailq.h> 11 12 #include "bnxt.h" 13 #include "bnxt_ulp.h" 14 #include "bnxt_tf_common.h" 15 #include "tf_core.h" 16 #include "tf_ext_flow_handle.h" 17 18 #include "ulp_template_db_enum.h" 19 #include "ulp_template_struct.h" 20 #include "ulp_mark_mgr.h" 21 #include "ulp_fc_mgr.h" 22 #include "ulp_flow_db.h" 23 #include "ulp_mapper.h" 24 #include "ulp_port_db.h" 25 26 /* Linked list of all TF sessions. */ 27 STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list = 28 STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list); 29 30 /* Mutex to synchronize bnxt_ulp_session_list operations. */ 31 static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER; 32 33 /* 34 * Allow the deletion of context only for the bnxt device that 35 * created the session 36 * TBD - The implementation of the function should change to 37 * using the reference count once tf_session_attach functionality 38 * is fixed. 39 */ 40 bool 41 ulp_ctx_deinit_allowed(void *ptr) 42 { 43 struct bnxt *bp = (struct bnxt *)ptr; 44 45 if (!bp) 46 return 0; 47 48 if (&bp->tfp == bp->ulp_ctx->g_tfp) 49 return 1; 50 51 return 0; 52 } 53 54 /* 55 * Initialize an ULP session. 56 * An ULP session will contain all the resources needed to support rte flow 57 * offloads. A session is initialized as part of rte_eth_device start. 58 * A single vswitch instance can have multiple uplinks which means 59 * rte_eth_device start will be called for each of these devices. 60 * ULP session manager will make sure that a single ULP session is only 61 * initialized once. Apart from this, it also initializes MARK database, 62 * EEM table & flow database. ULP session manager also manages a list of 63 * all opened ULP sessions. 64 */ 65 static int32_t 66 ulp_ctx_session_open(struct bnxt *bp, 67 struct bnxt_ulp_session_state *session) 68 { 69 struct rte_eth_dev *ethdev = bp->eth_dev; 70 int32_t rc = 0; 71 struct tf_open_session_parms params; 72 struct tf_session_resources *resources; 73 74 memset(¶ms, 0, sizeof(params)); 75 76 rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id, 77 params.ctrl_chan_name); 78 if (rc) { 79 BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n", 80 ethdev->data->port_id, rc); 81 return rc; 82 } 83 84 params.shadow_copy = true; 85 params.device_type = TF_DEVICE_TYPE_WH; 86 resources = ¶ms.resources; 87 /** RX **/ 88 /* Identifiers */ 89 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 422; 90 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 6; 91 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_WC_PROF] = 8; 92 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 8; 93 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_EM_PROF] = 8; 94 95 /* Table Types */ 96 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192; 97 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 8192; 98 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023; 99 100 /* ENCAP */ 101 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 16; 102 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 63; 103 104 /* TCAMs */ 105 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = 106 422; 107 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = 108 6; 109 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 8; 110 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 88; 111 112 /* EM */ 113 resources->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 13176; 114 115 /* EEM */ 116 resources->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1; 117 118 /** TX **/ 119 /* Identifiers */ 120 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 292; 121 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 144; 122 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_WC_PROF] = 8; 123 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 8; 124 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_EM_PROF] = 8; 125 126 /* Table Types */ 127 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192; 128 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 8192; 129 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023; 130 131 /* ENCAP */ 132 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_64B] = 511; 133 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 200; 134 135 /* TCAMs */ 136 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = 137 292; 138 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = 139 144; 140 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 8; 141 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 8; 142 143 /* EM */ 144 resources->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 15232; 145 146 /* EEM */ 147 resources->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1; 148 149 /* SP */ 150 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 488; 151 152 rc = tf_open_session(&bp->tfp, ¶ms); 153 if (rc) { 154 BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n", 155 params.ctrl_chan_name, rc); 156 return -EINVAL; 157 } 158 session->session_opened = 1; 159 session->g_tfp = &bp->tfp; 160 return rc; 161 } 162 163 /* 164 * Close the ULP session. 165 * It takes the ulp context pointer. 166 */ 167 static void 168 ulp_ctx_session_close(struct bnxt *bp, 169 struct bnxt_ulp_session_state *session) 170 { 171 /* close the session in the hardware */ 172 if (session->session_opened) 173 tf_close_session(&bp->tfp); 174 session->session_opened = 0; 175 session->g_tfp = NULL; 176 bp->ulp_ctx->g_tfp = NULL; 177 } 178 179 static void 180 bnxt_init_tbl_scope_parms(struct bnxt *bp, 181 struct tf_alloc_tbl_scope_parms *params) 182 { 183 struct bnxt_ulp_device_params *dparms; 184 uint32_t dev_id; 185 int rc; 186 187 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id); 188 if (rc) 189 /* TBD: For now, just use default. */ 190 dparms = 0; 191 else 192 dparms = bnxt_ulp_device_params_get(dev_id); 193 194 /* 195 * Set the flush timer for EEM entries. The value is in 100ms intervals, 196 * so 100 is 10s. 197 */ 198 params->hw_flow_cache_flush_timer = 100; 199 200 if (!dparms) { 201 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY; 202 params->rx_max_action_entry_sz_in_bits = 203 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY; 204 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM; 205 params->rx_num_flows_in_k = BNXT_ULP_RX_NUM_FLOWS; 206 params->rx_tbl_if_id = BNXT_ULP_RX_TBL_IF_ID; 207 208 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY; 209 params->tx_max_action_entry_sz_in_bits = 210 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY; 211 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM; 212 params->tx_num_flows_in_k = BNXT_ULP_TX_NUM_FLOWS; 213 params->tx_tbl_if_id = BNXT_ULP_TX_TBL_IF_ID; 214 } else { 215 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY; 216 params->rx_max_action_entry_sz_in_bits = 217 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY; 218 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM; 219 params->rx_num_flows_in_k = dparms->flow_db_num_entries / 1024; 220 params->rx_tbl_if_id = BNXT_ULP_RX_TBL_IF_ID; 221 222 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY; 223 params->tx_max_action_entry_sz_in_bits = 224 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY; 225 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM; 226 params->tx_num_flows_in_k = dparms->flow_db_num_entries / 1024; 227 params->tx_tbl_if_id = BNXT_ULP_TX_TBL_IF_ID; 228 } 229 } 230 231 /* Initialize Extended Exact Match host memory. */ 232 static int32_t 233 ulp_eem_tbl_scope_init(struct bnxt *bp) 234 { 235 struct tf_alloc_tbl_scope_parms params = {0}; 236 uint32_t dev_id; 237 struct bnxt_ulp_device_params *dparms; 238 int rc; 239 240 /* Get the dev specific number of flows that needed to be supported. */ 241 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) { 242 BNXT_TF_DBG(ERR, "Invalid device id\n"); 243 return -EINVAL; 244 } 245 246 dparms = bnxt_ulp_device_params_get(dev_id); 247 if (!dparms) { 248 BNXT_TF_DBG(ERR, "could not fetch the device params\n"); 249 return -ENODEV; 250 } 251 252 if (dparms->flow_mem_type != BNXT_ULP_FLOW_MEM_TYPE_EXT) { 253 BNXT_TF_DBG(INFO, "Table Scope alloc is not required\n"); 254 return 0; 255 } 256 257 bnxt_init_tbl_scope_parms(bp, ¶ms); 258 259 rc = tf_alloc_tbl_scope(&bp->tfp, ¶ms); 260 if (rc) { 261 BNXT_TF_DBG(ERR, "Unable to allocate eem table scope rc = %d\n", 262 rc); 263 return rc; 264 } 265 266 rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id); 267 if (rc) { 268 BNXT_TF_DBG(ERR, "Unable to set table scope id\n"); 269 return rc; 270 } 271 272 return 0; 273 } 274 275 /* Free Extended Exact Match host memory */ 276 static int32_t 277 ulp_eem_tbl_scope_deinit(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx) 278 { 279 struct tf_free_tbl_scope_parms params = {0}; 280 struct tf *tfp; 281 int32_t rc = 0; 282 struct bnxt_ulp_device_params *dparms; 283 uint32_t dev_id; 284 285 if (!ulp_ctx || !ulp_ctx->cfg_data) 286 return -EINVAL; 287 288 /* Free the resources for the last device */ 289 if (!ulp_ctx_deinit_allowed(bp)) 290 return rc; 291 292 tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx); 293 if (!tfp) { 294 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n"); 295 return -EINVAL; 296 } 297 298 /* Get the dev specific number of flows that needed to be supported. */ 299 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) { 300 BNXT_TF_DBG(ERR, "Invalid device id\n"); 301 return -EINVAL; 302 } 303 304 dparms = bnxt_ulp_device_params_get(dev_id); 305 if (!dparms) { 306 BNXT_TF_DBG(ERR, "could not fetch the device params\n"); 307 return -ENODEV; 308 } 309 310 if (dparms->flow_mem_type != BNXT_ULP_FLOW_MEM_TYPE_EXT) { 311 BNXT_TF_DBG(INFO, "Table Scope free is not required\n"); 312 return 0; 313 } 314 315 rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp_ctx, ¶ms.tbl_scope_id); 316 if (rc) { 317 BNXT_TF_DBG(ERR, "Failed to get the table scope id\n"); 318 return -EINVAL; 319 } 320 321 rc = tf_free_tbl_scope(tfp, ¶ms); 322 if (rc) { 323 BNXT_TF_DBG(ERR, "Unable to free table scope\n"); 324 return -EINVAL; 325 } 326 return rc; 327 } 328 329 /* The function to free and deinit the ulp context data. */ 330 static int32_t 331 ulp_ctx_deinit(struct bnxt *bp, 332 struct bnxt_ulp_session_state *session) 333 { 334 if (!session || !bp) { 335 BNXT_TF_DBG(ERR, "Invalid Arguments\n"); 336 return -EINVAL; 337 } 338 339 /* close the tf session */ 340 ulp_ctx_session_close(bp, session); 341 342 /* Free the contents */ 343 if (session->cfg_data) { 344 rte_free(session->cfg_data); 345 bp->ulp_ctx->cfg_data = NULL; 346 session->cfg_data = NULL; 347 } 348 return 0; 349 } 350 351 /* The function to allocate and initialize the ulp context data. */ 352 static int32_t 353 ulp_ctx_init(struct bnxt *bp, 354 struct bnxt_ulp_session_state *session) 355 { 356 struct bnxt_ulp_data *ulp_data; 357 int32_t rc = 0; 358 359 if (!session || !bp) { 360 BNXT_TF_DBG(ERR, "Invalid Arguments\n"); 361 return -EINVAL; 362 } 363 364 /* Allocate memory to hold ulp context data. */ 365 ulp_data = rte_zmalloc("bnxt_ulp_data", 366 sizeof(struct bnxt_ulp_data), 0); 367 if (!ulp_data) { 368 BNXT_TF_DBG(ERR, "Failed to allocate memory for ulp data\n"); 369 return -ENOMEM; 370 } 371 372 /* Increment the ulp context data reference count usage. */ 373 bp->ulp_ctx->cfg_data = ulp_data; 374 session->cfg_data = ulp_data; 375 ulp_data->ref_cnt++; 376 ulp_data->ulp_flags |= BNXT_ULP_VF_REP_ENABLED; 377 378 /* Open the ulp session. */ 379 rc = ulp_ctx_session_open(bp, session); 380 if (rc) { 381 (void)ulp_ctx_deinit(bp, session); 382 return rc; 383 } 384 385 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, session->g_tfp); 386 return rc; 387 } 388 389 /* The function to initialize ulp dparms with devargs */ 390 static int32_t 391 ulp_dparms_init(struct bnxt *bp, 392 struct bnxt_ulp_context *ulp_ctx) 393 { 394 struct bnxt_ulp_device_params *dparms; 395 uint32_t dev_id; 396 397 if (!bp->max_num_kflows) 398 return -EINVAL; 399 400 if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) { 401 BNXT_TF_DBG(DEBUG, "Failed to get device id\n"); 402 return -EINVAL; 403 } 404 405 dparms = bnxt_ulp_device_params_get(dev_id); 406 if (!dparms) { 407 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n"); 408 return -EINVAL; 409 } 410 411 /* num_flows = max_num_kflows * 1024 */ 412 dparms->flow_db_num_entries = bp->max_num_kflows * 1024; 413 /* GFID = 2 * num_flows */ 414 dparms->mark_db_gfid_entries = dparms->flow_db_num_entries * 2; 415 BNXT_TF_DBG(DEBUG, "Set the number of flows = %"PRIu64"\n", 416 dparms->flow_db_num_entries); 417 418 return 0; 419 } 420 421 /* The function to initialize bp flags with truflow features */ 422 static int32_t 423 ulp_dparms_dev_port_intf_update(struct bnxt *bp, 424 struct bnxt_ulp_context *ulp_ctx) 425 { 426 struct bnxt_ulp_device_params *dparms; 427 uint32_t dev_id; 428 429 if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) { 430 BNXT_TF_DBG(DEBUG, "Failed to get device id\n"); 431 return -EINVAL; 432 } 433 434 dparms = bnxt_ulp_device_params_get(dev_id); 435 if (!dparms) { 436 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n"); 437 return -EINVAL; 438 } 439 440 /* Update the bp flag with gfid flag */ 441 if (dparms->flow_mem_type == BNXT_ULP_FLOW_MEM_TYPE_EXT) 442 bp->flags |= BNXT_FLAG_GFID_ENABLE; 443 444 return 0; 445 } 446 447 static int32_t 448 ulp_ctx_attach(struct bnxt_ulp_context *ulp_ctx, 449 struct bnxt_ulp_session_state *session) 450 { 451 if (!ulp_ctx || !session) { 452 BNXT_TF_DBG(ERR, "Invalid Arguments\n"); 453 return -EINVAL; 454 } 455 456 /* Increment the ulp context data reference count usage. */ 457 ulp_ctx->cfg_data = session->cfg_data; 458 ulp_ctx->cfg_data->ref_cnt++; 459 460 /* TBD call TF_session_attach. */ 461 ulp_ctx->g_tfp = session->g_tfp; 462 return 0; 463 } 464 465 static int32_t 466 ulp_ctx_detach(struct bnxt *bp, 467 struct bnxt_ulp_session_state *session) 468 { 469 struct bnxt_ulp_context *ulp_ctx; 470 471 if (!bp || !session) { 472 BNXT_TF_DBG(ERR, "Invalid Arguments\n"); 473 return -EINVAL; 474 } 475 ulp_ctx = bp->ulp_ctx; 476 477 if (!ulp_ctx->cfg_data) 478 return 0; 479 480 /* TBD call TF_session_detach */ 481 482 /* Increment the ulp context data reference count usage. */ 483 if (ulp_ctx->cfg_data->ref_cnt >= 1) { 484 ulp_ctx->cfg_data->ref_cnt--; 485 if (ulp_ctx_deinit_allowed(bp)) 486 ulp_ctx_deinit(bp, session); 487 ulp_ctx->cfg_data = NULL; 488 ulp_ctx->g_tfp = NULL; 489 return 0; 490 } 491 BNXT_TF_DBG(ERR, "context deatach on invalid data\n"); 492 return 0; 493 } 494 495 /* 496 * Initialize the state of an ULP session. 497 * If the state of an ULP session is not initialized, set it's state to 498 * initialized. If the state is already initialized, do nothing. 499 */ 500 static void 501 ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init) 502 { 503 pthread_mutex_lock(&session->bnxt_ulp_mutex); 504 505 if (!session->bnxt_ulp_init) { 506 session->bnxt_ulp_init = true; 507 *init = false; 508 } else { 509 *init = true; 510 } 511 512 pthread_mutex_unlock(&session->bnxt_ulp_mutex); 513 } 514 515 /* 516 * Check if an ULP session is already allocated for a specific PCI 517 * domain & bus. If it is already allocated simply return the session 518 * pointer, otherwise allocate a new session. 519 */ 520 static struct bnxt_ulp_session_state * 521 ulp_get_session(struct rte_pci_addr *pci_addr) 522 { 523 struct bnxt_ulp_session_state *session; 524 525 STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) { 526 if (session->pci_info.domain == pci_addr->domain && 527 session->pci_info.bus == pci_addr->bus) { 528 return session; 529 } 530 } 531 return NULL; 532 } 533 534 /* 535 * Allocate and Initialize an ULP session and set it's state to INITIALIZED. 536 * If it's already initialized simply return the already existing session. 537 */ 538 static struct bnxt_ulp_session_state * 539 ulp_session_init(struct bnxt *bp, 540 bool *init) 541 { 542 struct rte_pci_device *pci_dev; 543 struct rte_pci_addr *pci_addr; 544 struct bnxt_ulp_session_state *session; 545 546 if (!bp) 547 return NULL; 548 549 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device); 550 pci_addr = &pci_dev->addr; 551 552 pthread_mutex_lock(&bnxt_ulp_global_mutex); 553 554 session = ulp_get_session(pci_addr); 555 if (!session) { 556 /* Not Found the session Allocate a new one */ 557 session = rte_zmalloc("bnxt_ulp_session", 558 sizeof(struct bnxt_ulp_session_state), 559 0); 560 if (!session) { 561 BNXT_TF_DBG(ERR, 562 "Allocation failed for bnxt_ulp_session\n"); 563 pthread_mutex_unlock(&bnxt_ulp_global_mutex); 564 return NULL; 565 566 } else { 567 /* Add it to the queue */ 568 session->pci_info.domain = pci_addr->domain; 569 session->pci_info.bus = pci_addr->bus; 570 pthread_mutex_init(&session->bnxt_ulp_mutex, NULL); 571 STAILQ_INSERT_TAIL(&bnxt_ulp_session_list, 572 session, next); 573 } 574 } 575 ulp_context_initialized(session, init); 576 pthread_mutex_unlock(&bnxt_ulp_global_mutex); 577 return session; 578 } 579 580 /* 581 * When a device is closed, remove it's associated session from the global 582 * session list. 583 */ 584 static void 585 ulp_session_deinit(struct bnxt_ulp_session_state *session) 586 { 587 if (!session) 588 return; 589 590 if (!session->cfg_data) { 591 pthread_mutex_lock(&bnxt_ulp_global_mutex); 592 STAILQ_REMOVE(&bnxt_ulp_session_list, session, 593 bnxt_ulp_session_state, next); 594 pthread_mutex_destroy(&session->bnxt_ulp_mutex); 595 rte_free(session); 596 pthread_mutex_unlock(&bnxt_ulp_global_mutex); 597 } 598 } 599 600 /* 601 * Internal api to enable NAT feature. 602 * Set set_flag to 1 to set the value or zero to reset the value. 603 * returns 0 on success. 604 */ 605 static int32_t 606 bnxt_ulp_global_cfg_update(struct bnxt *bp, 607 enum tf_dir dir, 608 enum tf_global_config_type type, 609 uint32_t offset, 610 uint32_t value, 611 uint32_t set_flag) 612 { 613 uint32_t global_cfg = 0; 614 int rc; 615 struct tf_global_cfg_parms parms; 616 617 /* Initialize the params */ 618 parms.dir = dir, 619 parms.type = type, 620 parms.offset = offset, 621 parms.config = (uint8_t *)&global_cfg, 622 parms.config_sz_in_bytes = sizeof(global_cfg); 623 624 rc = tf_get_global_cfg(&bp->tfp, &parms); 625 if (rc) { 626 BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n", 627 type, rc); 628 return rc; 629 } 630 631 if (set_flag) 632 global_cfg |= value; 633 else 634 global_cfg &= ~value; 635 636 /* SET the register RE_CFA_REG_ACT_TECT */ 637 rc = tf_set_global_cfg(&bp->tfp, &parms); 638 if (rc) { 639 BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n", 640 type, rc); 641 return rc; 642 } 643 return rc; 644 } 645 646 /* 647 * When a port is initialized by dpdk. This functions is called 648 * and this function initializes the ULP context and rest of the 649 * infrastructure associated with it. 650 */ 651 int32_t 652 bnxt_ulp_init(struct bnxt *bp) 653 { 654 struct bnxt_ulp_session_state *session; 655 bool init; 656 int rc; 657 658 if (!BNXT_TRUFLOW_EN(bp)) 659 return 0; 660 661 if (bp->ulp_ctx) { 662 BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n"); 663 return -EINVAL; 664 } 665 666 /* 667 * Multiple uplink ports can be associated with a single vswitch. 668 * Make sure only the port that is started first will initialize 669 * the TF session. 670 */ 671 session = ulp_session_init(bp, &init); 672 if (!session) { 673 BNXT_TF_DBG(ERR, "Failed to initialize the tf session\n"); 674 return -EINVAL; 675 } 676 677 bp->ulp_ctx = rte_zmalloc("bnxt_ulp_ctx", 678 sizeof(struct bnxt_ulp_context), 0); 679 if (!bp->ulp_ctx) { 680 BNXT_TF_DBG(ERR, "Failed to allocate ulp ctx\n"); 681 ulp_session_deinit(session); 682 return -ENOMEM; 683 } 684 685 /* 686 * If ULP is already initialized for a specific domain then simply 687 * assign the ulp context to this rte_eth_dev. 688 */ 689 if (init) { 690 rc = ulp_ctx_attach(bp->ulp_ctx, session); 691 if (rc) { 692 BNXT_TF_DBG(ERR, 693 "Failed to attach the ulp context\n"); 694 ulp_session_deinit(session); 695 rte_free(bp->ulp_ctx); 696 return rc; 697 } 698 699 /* Update bnxt driver flags */ 700 rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx); 701 if (rc) { 702 BNXT_TF_DBG(ERR, "Failed to update driver flags\n"); 703 ulp_ctx_detach(bp, session); 704 ulp_session_deinit(session); 705 rte_free(bp->ulp_ctx); 706 return rc; 707 } 708 709 /* update the port database */ 710 rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp->eth_dev); 711 if (rc) { 712 BNXT_TF_DBG(ERR, 713 "Failed to update port database\n"); 714 ulp_ctx_detach(bp, session); 715 ulp_session_deinit(session); 716 rte_free(bp->ulp_ctx); 717 } 718 return rc; 719 } 720 721 /* Allocate and Initialize the ulp context. */ 722 rc = ulp_ctx_init(bp, session); 723 if (rc) { 724 BNXT_TF_DBG(ERR, "Failed to create the ulp context\n"); 725 goto jump_to_error; 726 } 727 728 /* Initialize ulp dparms with values devargs passed */ 729 rc = ulp_dparms_init(bp, bp->ulp_ctx); 730 731 /* create the port database */ 732 rc = ulp_port_db_init(bp->ulp_ctx, bp->port_cnt); 733 if (rc) { 734 BNXT_TF_DBG(ERR, "Failed to create the port database\n"); 735 goto jump_to_error; 736 } 737 738 /* Update bnxt driver flags */ 739 rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx); 740 if (rc) { 741 BNXT_TF_DBG(ERR, "Failed to update driver flags\n"); 742 goto jump_to_error; 743 } 744 745 /* update the port database */ 746 rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp->eth_dev); 747 if (rc) { 748 BNXT_TF_DBG(ERR, "Failed to update port database\n"); 749 goto jump_to_error; 750 } 751 752 /* Create the Mark database. */ 753 rc = ulp_mark_db_init(bp->ulp_ctx); 754 if (rc) { 755 BNXT_TF_DBG(ERR, "Failed to create the mark database\n"); 756 goto jump_to_error; 757 } 758 759 /* Create the flow database. */ 760 rc = ulp_flow_db_init(bp->ulp_ctx); 761 if (rc) { 762 BNXT_TF_DBG(ERR, "Failed to create the flow database\n"); 763 goto jump_to_error; 764 } 765 766 /* Create the eem table scope. */ 767 rc = ulp_eem_tbl_scope_init(bp); 768 if (rc) { 769 BNXT_TF_DBG(ERR, "Failed to create the eem scope table\n"); 770 goto jump_to_error; 771 } 772 773 rc = ulp_mapper_init(bp->ulp_ctx); 774 if (rc) { 775 BNXT_TF_DBG(ERR, "Failed to initialize ulp mapper\n"); 776 goto jump_to_error; 777 } 778 779 rc = ulp_fc_mgr_init(bp->ulp_ctx); 780 if (rc) { 781 BNXT_TF_DBG(ERR, "Failed to initialize ulp flow counter mgr\n"); 782 goto jump_to_error; 783 } 784 785 /* 786 * Enable NAT feature. Set the global configuration register 787 * Tunnel encap to enable NAT with the reuse of existing inner 788 * L2 header smac and dmac 789 */ 790 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP, 791 TF_TUNNEL_ENCAP_NAT, 792 (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC | 793 BNXT_ULP_NAT_INNER_L2_HEADER_DMAC), 1); 794 if (rc) { 795 BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n"); 796 goto jump_to_error; 797 } 798 799 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP, 800 TF_TUNNEL_ENCAP_NAT, 801 (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC | 802 BNXT_ULP_NAT_INNER_L2_HEADER_DMAC), 1); 803 if (rc) { 804 BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n"); 805 goto jump_to_error; 806 } 807 808 return rc; 809 810 jump_to_error: 811 bnxt_ulp_deinit(bp); 812 return -ENOMEM; 813 } 814 815 /* Below are the access functions to access internal data of ulp context. */ 816 817 /* 818 * When a port is deinit'ed by dpdk. This function is called 819 * and this function clears the ULP context and rest of the 820 * infrastructure associated with it. 821 */ 822 void 823 bnxt_ulp_deinit(struct bnxt *bp) 824 { 825 struct bnxt_ulp_session_state *session; 826 struct rte_pci_device *pci_dev; 827 struct rte_pci_addr *pci_addr; 828 829 if (!BNXT_TRUFLOW_EN(bp)) 830 return; 831 832 /* Get the session first */ 833 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device); 834 pci_addr = &pci_dev->addr; 835 pthread_mutex_lock(&bnxt_ulp_global_mutex); 836 session = ulp_get_session(pci_addr); 837 pthread_mutex_unlock(&bnxt_ulp_global_mutex); 838 839 /* session not found then just exit */ 840 if (!session) 841 return; 842 843 /* clean up default flows */ 844 bnxt_ulp_destroy_df_rules(bp, true); 845 846 /* clean up regular flows */ 847 ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_REGULAR_FLOW_TABLE); 848 849 /* cleanup the eem table scope */ 850 ulp_eem_tbl_scope_deinit(bp, bp->ulp_ctx); 851 852 /* cleanup the flow database */ 853 ulp_flow_db_deinit(bp->ulp_ctx); 854 855 /* Delete the Mark database */ 856 ulp_mark_db_deinit(bp->ulp_ctx); 857 858 /* cleanup the ulp mapper */ 859 ulp_mapper_deinit(bp->ulp_ctx); 860 861 /* Delete the Flow Counter Manager */ 862 ulp_fc_mgr_deinit(bp->ulp_ctx); 863 864 /* Delete the Port database */ 865 ulp_port_db_deinit(bp->ulp_ctx); 866 867 /* Disable NAT feature */ 868 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP, 869 TF_TUNNEL_ENCAP_NAT, 870 (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC | 871 BNXT_ULP_NAT_INNER_L2_HEADER_DMAC), 872 0); 873 874 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP, 875 TF_TUNNEL_ENCAP_NAT, 876 (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC | 877 BNXT_ULP_NAT_INNER_L2_HEADER_DMAC), 878 0); 879 880 /* Delete the ulp context and tf session */ 881 ulp_ctx_detach(bp, session); 882 883 /* Finally delete the bnxt session*/ 884 ulp_session_deinit(session); 885 886 rte_free(bp->ulp_ctx); 887 } 888 889 /* Function to set the Mark DB into the context */ 890 int32_t 891 bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx, 892 struct bnxt_ulp_mark_tbl *mark_tbl) 893 { 894 if (!ulp_ctx || !ulp_ctx->cfg_data) { 895 BNXT_TF_DBG(ERR, "Invalid ulp context data\n"); 896 return -EINVAL; 897 } 898 899 ulp_ctx->cfg_data->mark_tbl = mark_tbl; 900 901 return 0; 902 } 903 904 /* Function to retrieve the Mark DB from the context. */ 905 struct bnxt_ulp_mark_tbl * 906 bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx) 907 { 908 if (!ulp_ctx || !ulp_ctx->cfg_data) 909 return NULL; 910 911 return ulp_ctx->cfg_data->mark_tbl; 912 } 913 914 /* Function to set the device id of the hardware. */ 915 int32_t 916 bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx, 917 uint32_t dev_id) 918 { 919 if (ulp_ctx && ulp_ctx->cfg_data) { 920 ulp_ctx->cfg_data->dev_id = dev_id; 921 return 0; 922 } 923 924 return -EINVAL; 925 } 926 927 /* Function to get the device id of the hardware. */ 928 int32_t 929 bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx, 930 uint32_t *dev_id) 931 { 932 if (ulp_ctx && ulp_ctx->cfg_data) { 933 *dev_id = ulp_ctx->cfg_data->dev_id; 934 return 0; 935 } 936 937 return -EINVAL; 938 } 939 940 /* Function to get the table scope id of the EEM table. */ 941 int32_t 942 bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx, 943 uint32_t *tbl_scope_id) 944 { 945 if (ulp_ctx && ulp_ctx->cfg_data) { 946 *tbl_scope_id = ulp_ctx->cfg_data->tbl_scope_id; 947 return 0; 948 } 949 950 return -EINVAL; 951 } 952 953 /* Function to set the table scope id of the EEM table. */ 954 int32_t 955 bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx, 956 uint32_t tbl_scope_id) 957 { 958 if (ulp_ctx && ulp_ctx->cfg_data) { 959 ulp_ctx->cfg_data->tbl_scope_id = tbl_scope_id; 960 return 0; 961 } 962 963 return -EINVAL; 964 } 965 966 /* Function to set the tfp session details from the ulp context. */ 967 int32_t 968 bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp) 969 { 970 if (!ulp) { 971 BNXT_TF_DBG(ERR, "Invalid arguments\n"); 972 return -EINVAL; 973 } 974 975 /* TBD The tfp should be removed once tf_attach is implemented. */ 976 ulp->g_tfp = tfp; 977 return 0; 978 } 979 980 /* Function to get the tfp session details from the ulp context. */ 981 struct tf * 982 bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp) 983 { 984 if (!ulp) { 985 BNXT_TF_DBG(ERR, "Invalid arguments\n"); 986 return NULL; 987 } 988 /* TBD The tfp should be removed once tf_attach is implemented. */ 989 return ulp->g_tfp; 990 } 991 992 /* 993 * Get the device table entry based on the device id. 994 * 995 * dev_id [in] The device id of the hardware 996 * 997 * Returns the pointer to the device parameters. 998 */ 999 struct bnxt_ulp_device_params * 1000 bnxt_ulp_device_params_get(uint32_t dev_id) 1001 { 1002 if (dev_id < BNXT_ULP_MAX_NUM_DEVICES) 1003 return &ulp_device_params[dev_id]; 1004 return NULL; 1005 } 1006 1007 /* Function to set the flow database to the ulp context. */ 1008 int32_t 1009 bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context *ulp_ctx, 1010 struct bnxt_ulp_flow_db *flow_db) 1011 { 1012 if (!ulp_ctx || !ulp_ctx->cfg_data) 1013 return -EINVAL; 1014 1015 ulp_ctx->cfg_data->flow_db = flow_db; 1016 return 0; 1017 } 1018 1019 /* Function to get the flow database from the ulp context. */ 1020 struct bnxt_ulp_flow_db * 1021 bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context *ulp_ctx) 1022 { 1023 if (!ulp_ctx || !ulp_ctx->cfg_data) 1024 return NULL; 1025 1026 return ulp_ctx->cfg_data->flow_db; 1027 } 1028 1029 /* Function to get the ulp context from eth device. */ 1030 struct bnxt_ulp_context * 1031 bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev) 1032 { 1033 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1034 1035 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) { 1036 struct bnxt_vf_representor *vfr = dev->data->dev_private; 1037 1038 bp = vfr->parent_dev->data->dev_private; 1039 } 1040 1041 if (!bp) { 1042 BNXT_TF_DBG(ERR, "Bnxt private data is not initialized\n"); 1043 return NULL; 1044 } 1045 return bp->ulp_ctx; 1046 } 1047 1048 int32_t 1049 bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx, 1050 void *mapper_data) 1051 { 1052 if (!ulp_ctx || !ulp_ctx->cfg_data) { 1053 BNXT_TF_DBG(ERR, "Invalid ulp context data\n"); 1054 return -EINVAL; 1055 } 1056 1057 ulp_ctx->cfg_data->mapper_data = mapper_data; 1058 return 0; 1059 } 1060 1061 void * 1062 bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx) 1063 { 1064 if (!ulp_ctx || !ulp_ctx->cfg_data) { 1065 BNXT_TF_DBG(ERR, "Invalid ulp context data\n"); 1066 return NULL; 1067 } 1068 1069 return ulp_ctx->cfg_data->mapper_data; 1070 } 1071 1072 /* Function to set the port database to the ulp context. */ 1073 int32_t 1074 bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context *ulp_ctx, 1075 struct bnxt_ulp_port_db *port_db) 1076 { 1077 if (!ulp_ctx || !ulp_ctx->cfg_data) 1078 return -EINVAL; 1079 1080 ulp_ctx->cfg_data->port_db = port_db; 1081 return 0; 1082 } 1083 1084 /* Function to get the port database from the ulp context. */ 1085 struct bnxt_ulp_port_db * 1086 bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context *ulp_ctx) 1087 { 1088 if (!ulp_ctx || !ulp_ctx->cfg_data) 1089 return NULL; 1090 1091 return ulp_ctx->cfg_data->port_db; 1092 } 1093 1094 /* Function to set the flow counter info into the context */ 1095 int32_t 1096 bnxt_ulp_cntxt_ptr2_fc_info_set(struct bnxt_ulp_context *ulp_ctx, 1097 struct bnxt_ulp_fc_info *ulp_fc_info) 1098 { 1099 if (!ulp_ctx || !ulp_ctx->cfg_data) { 1100 BNXT_TF_DBG(ERR, "Invalid ulp context data\n"); 1101 return -EINVAL; 1102 } 1103 1104 ulp_ctx->cfg_data->fc_info = ulp_fc_info; 1105 1106 return 0; 1107 } 1108 1109 /* Function to retrieve the flow counter info from the context. */ 1110 struct bnxt_ulp_fc_info * 1111 bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx) 1112 { 1113 if (!ulp_ctx || !ulp_ctx->cfg_data) 1114 return NULL; 1115 1116 return ulp_ctx->cfg_data->fc_info; 1117 } 1118 1119 /* Function to get the ulp flags from the ulp context. */ 1120 int32_t 1121 bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx, 1122 uint32_t *flags) 1123 { 1124 if (!ulp_ctx || !ulp_ctx->cfg_data) 1125 return -1; 1126 1127 *flags = ulp_ctx->cfg_data->ulp_flags; 1128 return 0; 1129 } 1130