1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2023 Broadcom 3 * All rights reserved. 4 */ 5 6 #include "bnxt.h" 7 #include "bnxt_tf_common.h" 8 #include "bnxt_ulp_utils.h" 9 #include "ulp_rte_parser.h" 10 #include "ulp_matcher.h" 11 #include "ulp_flow_db.h" 12 #include "ulp_mapper.h" 13 #include "ulp_fc_mgr.h" 14 #include "ulp_port_db.h" 15 #include "ulp_ha_mgr.h" 16 #include "ulp_tun.h" 17 #include <rte_malloc.h> 18 #include "ulp_template_db_tbl.h" 19 #include "tfp.h" 20 21 static int32_t 22 bnxt_ulp_flow_validate_args(const struct rte_flow_attr *attr, 23 const struct rte_flow_item pattern[], 24 const struct rte_flow_action actions[], 25 struct rte_flow_error *error) 26 { 27 /* Perform the validation of the arguments for null */ 28 if (unlikely(!error)) 29 return BNXT_TF_RC_ERROR; 30 31 if (unlikely(!pattern)) { 32 rte_flow_error_set(error, 33 EINVAL, 34 RTE_FLOW_ERROR_TYPE_ITEM_NUM, 35 NULL, 36 "NULL pattern."); 37 return BNXT_TF_RC_ERROR; 38 } 39 40 if (unlikely(!actions)) { 41 rte_flow_error_set(error, 42 EINVAL, 43 RTE_FLOW_ERROR_TYPE_ACTION_NUM, 44 NULL, 45 "NULL action."); 46 return BNXT_TF_RC_ERROR; 47 } 48 49 if (unlikely(!attr)) { 50 rte_flow_error_set(error, 51 EINVAL, 52 RTE_FLOW_ERROR_TYPE_ATTR, 53 NULL, 54 "NULL attribute."); 55 return BNXT_TF_RC_ERROR; 56 } 57 58 if (unlikely(attr->egress && attr->ingress)) { 59 rte_flow_error_set(error, 60 EINVAL, 61 RTE_FLOW_ERROR_TYPE_ATTR, 62 attr, 63 "EGRESS AND INGRESS UNSUPPORTED"); 64 return BNXT_TF_RC_ERROR; 65 } 66 return BNXT_TF_RC_SUCCESS; 67 } 68 69 void 70 bnxt_ulp_set_dir_attributes(struct ulp_rte_parser_params *params, 71 const struct rte_flow_attr *attr) 72 { 73 /* Set the flow attributes */ 74 if (attr->egress) 75 params->dir_attr |= BNXT_ULP_FLOW_ATTR_EGRESS; 76 if (attr->ingress) 77 params->dir_attr |= BNXT_ULP_FLOW_ATTR_INGRESS; 78 #if RTE_VERSION_NUM(17, 11, 10, 16) < RTE_VERSION 79 if (attr->transfer) 80 params->dir_attr |= BNXT_ULP_FLOW_ATTR_TRANSFER; 81 #endif 82 if (attr->group) { 83 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_GROUP_ID, 84 rte_cpu_to_le_32(attr->group)); 85 ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_GROUP_ID); 86 } 87 } 88 89 int32_t 90 bnxt_ulp_set_prio_attribute(struct ulp_rte_parser_params *params, 91 const struct rte_flow_attr *attr) 92 { 93 uint32_t max_p = bnxt_ulp_max_flow_priority_get(params->ulp_ctx); 94 uint32_t min_p = bnxt_ulp_min_flow_priority_get(params->ulp_ctx); 95 96 if (max_p < min_p) { 97 if (unlikely(attr->priority > min_p || attr->priority < max_p)) { 98 BNXT_DRV_DBG(ERR, "invalid prio, not in range %u:%u\n", 99 max_p, min_p); 100 return -EINVAL; 101 } 102 params->priority = attr->priority; 103 } else { 104 if (unlikely(attr->priority > max_p || attr->priority < min_p)) { 105 BNXT_DRV_DBG(ERR, "invalid prio, not in range %u:%u\n", 106 min_p, max_p); 107 return -EINVAL; 108 } 109 params->priority = max_p - attr->priority; 110 } 111 /* flows with priority zero is considered as highest and put in EM */ 112 if (attr->priority >= 113 bnxt_ulp_default_app_priority_get(params->ulp_ctx) && 114 attr->priority <= bnxt_ulp_max_def_priority_get(params->ulp_ctx)) { 115 ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_DEF_PRIO); 116 } 117 return 0; 118 } 119 120 void 121 bnxt_ulp_init_parser_cf_defaults(struct ulp_rte_parser_params *params, 122 uint16_t port_id) 123 { 124 /* Set up defaults for Comp field */ 125 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_INCOMING_IF, port_id); 126 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DEV_PORT_ID, port_id); 127 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, 128 BNXT_ULP_INVALID_SVIF_VAL); 129 } 130 131 static void 132 bnxt_ulp_init_cf_header_bitmap(struct bnxt_ulp_mapper_parms *params) 133 { 134 uint64_t hdr_bits = 0; 135 136 /* Remove the internal tunnel bits */ 137 hdr_bits = params->hdr_bitmap->bits; 138 ULP_BITMAP_RESET(hdr_bits, BNXT_ULP_HDR_BIT_F2); 139 140 /* Add untag bits */ 141 if (!ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_OO_VLAN)) 142 ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_OO_UNTAGGED); 143 if (!ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_OI_VLAN)) 144 ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_OI_UNTAGGED); 145 if (!ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_IO_VLAN)) 146 ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_IO_UNTAGGED); 147 if (!ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_II_VLAN)) 148 ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_II_UNTAGGED); 149 150 /* Add non-tunnel bit */ 151 if (!ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL)) 152 ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_NON_TUNNEL); 153 154 /* Add l2 only bit */ 155 if ((!ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL) && 156 !ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_O_IPV4) && 157 !ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_O_IPV6)) || 158 (ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL) && 159 !ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_I_IPV4) && 160 !ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_I_IPV6))) { 161 ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_L2_ONLY); 162 ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_L2_ONLY); 163 } 164 165 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PROFILE_BITMAP, hdr_bits); 166 167 /* Update the l4 protocol bits */ 168 if ((ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_O_TCP) || 169 ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_O_UDP))) { 170 ULP_BITMAP_RESET(hdr_bits, BNXT_ULP_HDR_BIT_O_TCP); 171 ULP_BITMAP_RESET(hdr_bits, BNXT_ULP_HDR_BIT_O_UDP); 172 ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_O_L4_FLOW); 173 } 174 175 if ((ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_I_TCP) || 176 ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_I_UDP))) { 177 ULP_BITMAP_RESET(hdr_bits, BNXT_ULP_HDR_BIT_I_TCP); 178 ULP_BITMAP_RESET(hdr_bits, BNXT_ULP_HDR_BIT_I_UDP); 179 ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_I_L4_FLOW); 180 } 181 /*update the comp field header bits */ 182 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_HDR_BITMAP, hdr_bits); 183 } 184 185 void 186 bnxt_ulp_init_mapper_params(struct bnxt_ulp_mapper_parms *mparms, 187 struct ulp_rte_parser_params *params, 188 enum bnxt_ulp_fdb_type flow_type) 189 { 190 uint32_t ulp_flags = 0; 191 192 mparms->flow_type = flow_type; 193 mparms->app_priority = params->priority; 194 mparms->class_tid = params->class_id; 195 mparms->act_tid = params->act_tmpl; 196 mparms->func_id = params->func_id; 197 mparms->hdr_bitmap = ¶ms->hdr_bitmap; 198 mparms->enc_hdr_bitmap = ¶ms->enc_hdr_bitmap; 199 mparms->hdr_field = params->hdr_field; 200 mparms->enc_field = params->enc_field; 201 mparms->comp_fld = params->comp_fld; 202 mparms->act_bitmap = ¶ms->act_bitmap; 203 mparms->act_prop = ¶ms->act_prop; 204 mparms->parent_flow = params->parent_flow; 205 mparms->child_flow = params->child_flow; 206 mparms->fld_bitmap = ¶ms->fld_bitmap; 207 mparms->flow_pattern_id = params->flow_pattern_id; 208 mparms->act_pattern_id = params->act_pattern_id; 209 mparms->wc_field_bitmap = params->wc_field_bitmap; 210 mparms->app_id = params->app_id; 211 mparms->tun_idx = params->tun_idx; 212 mparms->cf_bitmap = params->cf_bitmap; 213 mparms->exclude_field_bitmap = params->exclude_field_bitmap; 214 215 /* update the signature fields into the computed field list */ 216 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_HDR_SIG_ID, 217 params->class_info_idx); 218 219 /* update the header bitmap */ 220 bnxt_ulp_init_cf_header_bitmap(mparms); 221 222 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FLOW_SIG_ID, 223 params->flow_sig_id); 224 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FUNCTION_ID, 225 params->func_id); 226 227 if (bnxt_ulp_cntxt_ptr2_ulp_flags_get(params->ulp_ctx, &ulp_flags)) 228 return; 229 230 /* update the WC Priority flag */ 231 if (ULP_HIGH_AVAIL_IS_ENABLED(ulp_flags)) { 232 enum ulp_ha_mgr_region region = ULP_HA_REGION_LOW; 233 int32_t rc; 234 235 rc = ulp_ha_mgr_region_get(params->ulp_ctx, ®ion); 236 if (rc) 237 BNXT_DRV_DBG(ERR, "Unable to get WC region\n"); 238 if (region == ULP_HA_REGION_HI) 239 ULP_COMP_FLD_IDX_WR(params, 240 BNXT_ULP_CF_IDX_WC_IS_HA_HIGH_REG, 241 1); 242 } else { 243 ULP_COMP_FLD_IDX_WR(params, 244 BNXT_ULP_CF_IDX_HA_SUPPORT_DISABLED, 245 1); 246 } 247 248 /* Update the socket direct flag */ 249 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 250 BNXT_ULP_HDR_BIT_SVIF_IGNORE)) { 251 uint32_t ifindex; 252 uint16_t vport; 253 254 /* Get the port db ifindex */ 255 if (unlikely(ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 256 params->port_id, 257 &ifindex))) { 258 BNXT_DRV_DBG(ERR, "Invalid port id %u\n", 259 params->port_id); 260 return; 261 } 262 /* Update the phy port of the other interface */ 263 if (unlikely(ulp_port_db_vport_get(params->ulp_ctx, ifindex, &vport))) { 264 BNXT_DRV_DBG(ERR, "Invalid port if index %u\n", 265 ifindex); 266 return; 267 } 268 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SOCKET_DIRECT_VPORT, 269 (vport == 1) ? 2 : 1); 270 } 271 272 /* Update the socket direct svif when socket_direct feature enabled. */ 273 if (ULP_BITMAP_ISSET(bnxt_ulp_feature_bits_get(params->ulp_ctx), 274 BNXT_ULP_FEATURE_BIT_SOCKET_DIRECT)) { 275 enum bnxt_ulp_intf_type intf_type; 276 /* For ingress flow on trusted_vf port */ 277 intf_type = bnxt_pmd_get_interface_type(params->port_id); 278 if (intf_type == BNXT_ULP_INTF_TYPE_TRUSTED_VF) { 279 uint16_t svif; 280 /* Get the socket direct svif of the given dev port */ 281 if (unlikely(ulp_port_db_dev_port_socket_direct_svif_get(params->ulp_ctx, 282 params->port_id, 283 &svif))) { 284 BNXT_DRV_DBG(ERR, "Invalid port id %u\n", 285 params->port_id); 286 return; 287 } 288 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SOCKET_DIRECT_SVIF, svif); 289 } 290 } 291 } 292 293 /* Function to create the rte flow. */ 294 static struct rte_flow * 295 bnxt_ulp_flow_create(struct rte_eth_dev *dev, 296 const struct rte_flow_attr *attr, 297 const struct rte_flow_item pattern[], 298 const struct rte_flow_action actions[], 299 struct rte_flow_error *error) 300 { 301 struct bnxt_ulp_mapper_parms mparms = { 0 }; 302 struct ulp_rte_parser_params params; 303 struct bnxt_ulp_context *ulp_ctx; 304 int rc, ret = BNXT_TF_RC_ERROR; 305 struct rte_flow *flow_id; 306 uint16_t func_id; 307 uint32_t fid; 308 309 if (error != NULL) 310 error->type = RTE_FLOW_ERROR_TYPE_NONE; 311 312 if (unlikely(bnxt_ulp_flow_validate_args(attr, 313 pattern, actions, 314 error) == BNXT_TF_RC_ERROR)) { 315 BNXT_DRV_DBG(ERR, "Invalid arguments being passed\n"); 316 goto flow_error; 317 } 318 319 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev); 320 if (unlikely(!ulp_ctx)) { 321 BNXT_DRV_DBG(ERR, "ULP context is not initialized\n"); 322 goto flow_error; 323 } 324 325 /* Initialize the parser params */ 326 memset(¶ms, 0, sizeof(struct ulp_rte_parser_params)); 327 params.ulp_ctx = ulp_ctx; 328 params.port_id = dev->data->port_id; 329 330 if (unlikely(bnxt_ulp_cntxt_app_id_get(params.ulp_ctx, ¶ms.app_id))) { 331 BNXT_DRV_DBG(ERR, "failed to get the app id\n"); 332 goto flow_error; 333 } 334 335 /* Set the flow attributes */ 336 bnxt_ulp_set_dir_attributes(¶ms, attr); 337 338 if (unlikely(bnxt_ulp_set_prio_attribute(¶ms, attr))) 339 goto flow_error; 340 341 bnxt_ulp_init_parser_cf_defaults(¶ms, dev->data->port_id); 342 343 /* Get the function id */ 344 if (unlikely(ulp_port_db_port_func_id_get(ulp_ctx, 345 dev->data->port_id, 346 &func_id))) { 347 BNXT_DRV_DBG(ERR, "conversion of port to func id failed\n"); 348 goto flow_error; 349 } 350 351 /* Protect flow creation */ 352 if (unlikely(bnxt_ulp_cntxt_acquire_fdb_lock(ulp_ctx))) { 353 BNXT_DRV_DBG(ERR, "Flow db lock acquire failed\n"); 354 goto flow_error; 355 } 356 357 /* Allocate a Flow ID for attaching all resources for the flow to. 358 * Once allocated, all errors have to walk the list of resources and 359 * free each of them. 360 */ 361 rc = ulp_flow_db_fid_alloc(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, 362 func_id, &fid); 363 if (unlikely(rc)) { 364 BNXT_DRV_DBG(ERR, "Unable to allocate flow table entry\n"); 365 goto release_lock; 366 } 367 368 /* Parse the rte flow pattern */ 369 ret = bnxt_ulp_rte_parser_hdr_parse(pattern, ¶ms); 370 if (unlikely(ret != BNXT_TF_RC_SUCCESS)) 371 goto free_fid; 372 373 /* Parse the rte flow action */ 374 ret = bnxt_ulp_rte_parser_act_parse(actions, ¶ms); 375 if (unlikely(ret != BNXT_TF_RC_SUCCESS)) 376 goto free_fid; 377 378 mparms.flow_id = fid; 379 mparms.func_id = func_id; 380 mparms.port_id = dev->data->port_id; 381 382 /* Perform the rte flow post process */ 383 bnxt_ulp_rte_parser_post_process(¶ms); 384 385 /* do the tunnel offload process if any */ 386 ret = ulp_tunnel_offload_process(¶ms); 387 if (unlikely(ret == BNXT_TF_RC_ERROR)) 388 goto free_fid; 389 390 ret = ulp_matcher_pattern_match(¶ms, ¶ms.class_id); 391 if (unlikely(ret != BNXT_TF_RC_SUCCESS)) 392 goto free_fid; 393 394 ret = ulp_matcher_action_match(¶ms, ¶ms.act_tmpl); 395 if (unlikely(ret != BNXT_TF_RC_SUCCESS)) 396 goto free_fid; 397 398 bnxt_ulp_init_mapper_params(&mparms, ¶ms, 399 BNXT_ULP_FDB_TYPE_REGULAR); 400 /* Call the ulp mapper to create the flow in the hardware. */ 401 ret = ulp_mapper_flow_create(ulp_ctx, &mparms, 402 (void *)error); 403 if (unlikely(ret)) 404 goto free_fid; 405 406 bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx); 407 408 flow_id = (struct rte_flow *)((uintptr_t)fid); 409 return flow_id; 410 411 free_fid: 412 ulp_flow_db_fid_free(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, fid); 413 release_lock: 414 bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx); 415 flow_error: 416 if (unlikely(error != NULL && 417 error->type == RTE_FLOW_ERROR_TYPE_NONE)) 418 rte_flow_error_set(error, ret, 419 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 420 "Failed to create flow."); 421 return NULL; 422 } 423 424 /* Function to validate the rte flow. */ 425 static int 426 bnxt_ulp_flow_validate(struct rte_eth_dev *dev, 427 const struct rte_flow_attr *attr, 428 const struct rte_flow_item pattern[], 429 const struct rte_flow_action actions[], 430 struct rte_flow_error *error) 431 { 432 struct ulp_rte_parser_params params; 433 struct bnxt_ulp_context *ulp_ctx; 434 uint32_t class_id, act_tmpl; 435 int ret = BNXT_TF_RC_ERROR; 436 437 if (unlikely(bnxt_ulp_flow_validate_args(attr, 438 pattern, actions, 439 error) == BNXT_TF_RC_ERROR)) { 440 BNXT_DRV_DBG(ERR, "Invalid arguments being passed\n"); 441 goto parse_error; 442 } 443 444 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev); 445 if (unlikely(!ulp_ctx)) { 446 BNXT_DRV_DBG(ERR, "ULP context is not initialized\n"); 447 goto parse_error; 448 } 449 450 /* Initialize the parser params */ 451 memset(¶ms, 0, sizeof(struct ulp_rte_parser_params)); 452 params.ulp_ctx = ulp_ctx; 453 454 if (unlikely(bnxt_ulp_cntxt_app_id_get(params.ulp_ctx, ¶ms.app_id))) { 455 BNXT_DRV_DBG(ERR, "failed to get the app id\n"); 456 goto parse_error; 457 } 458 459 /* Set the flow attributes */ 460 bnxt_ulp_set_dir_attributes(¶ms, attr); 461 462 if (unlikely(bnxt_ulp_set_prio_attribute(¶ms, attr))) 463 goto parse_error; 464 465 bnxt_ulp_init_parser_cf_defaults(¶ms, dev->data->port_id); 466 467 /* Parse the rte flow pattern */ 468 ret = bnxt_ulp_rte_parser_hdr_parse(pattern, ¶ms); 469 if (unlikely(ret != BNXT_TF_RC_SUCCESS)) 470 goto parse_error; 471 472 /* Parse the rte flow action */ 473 ret = bnxt_ulp_rte_parser_act_parse(actions, ¶ms); 474 if (unlikely(ret != BNXT_TF_RC_SUCCESS)) 475 goto parse_error; 476 477 /* Perform the rte flow post process */ 478 bnxt_ulp_rte_parser_post_process(¶ms); 479 480 /* do the tunnel offload process if any */ 481 ret = ulp_tunnel_offload_process(¶ms); 482 if (unlikely(ret == BNXT_TF_RC_ERROR)) 483 goto parse_error; 484 485 ret = ulp_matcher_pattern_match(¶ms, &class_id); 486 487 if (unlikely(ret != BNXT_TF_RC_SUCCESS)) 488 goto parse_error; 489 490 ret = ulp_matcher_action_match(¶ms, &act_tmpl); 491 if (unlikely(ret != BNXT_TF_RC_SUCCESS)) 492 goto parse_error; 493 494 /* all good return success */ 495 return ret; 496 497 parse_error: 498 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 499 "Failed to validate flow."); 500 return -EINVAL; 501 } 502 503 /* Function to destroy the rte flow. */ 504 int 505 bnxt_ulp_flow_destroy(struct rte_eth_dev *dev, 506 struct rte_flow *flow, 507 struct rte_flow_error *error) 508 { 509 struct bnxt_ulp_context *ulp_ctx; 510 uint32_t flow_id; 511 uint16_t func_id; 512 int ret; 513 514 if (error != NULL) 515 error->type = RTE_FLOW_ERROR_TYPE_NONE; 516 517 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev); 518 if (unlikely(!ulp_ctx)) { 519 BNXT_DRV_DBG(ERR, "ULP context is not initialized\n"); 520 if (error) 521 rte_flow_error_set(error, EINVAL, 522 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 523 "Failed to destroy flow."); 524 return -EINVAL; 525 } 526 527 flow_id = (uint32_t)(uintptr_t)flow; 528 529 if (unlikely(ulp_port_db_port_func_id_get(ulp_ctx, 530 dev->data->port_id, 531 &func_id))) { 532 BNXT_DRV_DBG(ERR, "conversion of port to func id failed\n"); 533 if (error) 534 rte_flow_error_set(error, EINVAL, 535 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 536 "Failed to destroy flow."); 537 return -EINVAL; 538 } 539 540 if (unlikely(ulp_flow_db_validate_flow_func(ulp_ctx, flow_id, func_id) == 541 false)) { 542 BNXT_DRV_DBG(ERR, "Incorrect device params\n"); 543 if (error) 544 rte_flow_error_set(error, EINVAL, 545 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 546 "Failed to destroy flow."); 547 return -EINVAL; 548 } 549 550 if (unlikely(bnxt_ulp_cntxt_acquire_fdb_lock(ulp_ctx))) { 551 BNXT_DRV_DBG(ERR, "Flow db lock acquire failed\n"); 552 return -EINVAL; 553 } 554 ret = ulp_mapper_flow_destroy(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, 555 flow_id, (void *)error); 556 if (unlikely(ret)) { 557 BNXT_DRV_DBG(ERR, "Failed to destroy flow.\n"); 558 if (error != NULL && 559 error->type == RTE_FLOW_ERROR_TYPE_NONE) 560 rte_flow_error_set(error, -ret, 561 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 562 "Failed to destroy flow."); 563 } 564 bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx); 565 566 return ret; 567 } 568 569 /* Function to destroy the rte flows. */ 570 static int32_t 571 bnxt_ulp_flow_flush(struct rte_eth_dev *eth_dev, 572 struct rte_flow_error *error) 573 { 574 struct bnxt_ulp_context *ulp_ctx; 575 int32_t ret = 0; 576 uint16_t func_id; 577 578 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev); 579 if (unlikely(!ulp_ctx)) 580 return ret; 581 582 /* Free the resources for the last device */ 583 if (ulp_ctx_deinit_allowed(ulp_ctx)) { 584 ret = ulp_flow_db_session_flow_flush(ulp_ctx); 585 } else if (bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctx)) { 586 ret = ulp_port_db_port_func_id_get(ulp_ctx, 587 eth_dev->data->port_id, 588 &func_id); 589 if (!ret) 590 ret = ulp_flow_db_function_flow_flush(ulp_ctx, func_id); 591 else 592 BNXT_DRV_DBG(ERR, "convert port to func id failed\n"); 593 } 594 if (unlikely(ret)) 595 rte_flow_error_set(error, ret, 596 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 597 "Failed to flush flow."); 598 return ret; 599 } 600 601 /* 602 * Fill the rte_flow_query_rss 'rss_conf' argument passed 603 * in the rte_flow_query() with the values obtained and 604 * accumulated locally. 605 * 606 * ctxt [in] The ulp context for the flow counter manager 607 * 608 * flow_id [in] The HW flow ID 609 * 610 * rss_conf [out] The rte_flow_query_count 'data' that is set 611 * 612 */ 613 static int ulp_flow_query_rss_get(struct bnxt_ulp_context *ctxt, 614 uint32_t flow_id, 615 struct rte_flow_action_rss *rss_conf) 616 { 617 struct ulp_flow_db_res_params params; 618 uint32_t nxt_resource_index = 0; 619 bool found_cntr_resource = false; 620 struct bnxt *bp; 621 uint16_t vnic_id = 0; 622 int rc = 0; 623 624 bp = bnxt_ulp_cntxt_bp_get(ctxt); 625 if (unlikely(!bp)) { 626 BNXT_DRV_DBG(ERR, "Failed to get bp from ulp cntxt\n"); 627 return -EINVAL; 628 } 629 630 if (unlikely(bnxt_ulp_cntxt_acquire_fdb_lock(ctxt))) { 631 BNXT_DRV_DBG(ERR, "Flow db lock acquire failed\n"); 632 return -EINVAL; 633 } 634 do { 635 rc = ulp_flow_db_resource_get(ctxt, 636 BNXT_ULP_FDB_TYPE_REGULAR, 637 flow_id, 638 &nxt_resource_index, 639 ¶ms); 640 if (params.resource_func == 641 BNXT_ULP_RESOURCE_FUNC_VNIC_TABLE && 642 (params.resource_sub_type == 643 BNXT_ULP_RESOURCE_SUB_TYPE_VNIC_TABLE_RSS || 644 params.resource_sub_type == 645 BNXT_ULP_RESOURCE_SUB_TYPE_VNIC_TABLE_QUEUE)) { 646 vnic_id = params.resource_hndl; 647 found_cntr_resource = true; 648 break; 649 } 650 651 } while (!rc && nxt_resource_index); 652 653 if (found_cntr_resource) 654 bnxt_vnic_rss_query_info_fill(bp, rss_conf, vnic_id); 655 656 bnxt_ulp_cntxt_release_fdb_lock(ctxt); 657 658 return rc; 659 } 660 661 /* Function to query the rte flows. */ 662 static int32_t 663 bnxt_ulp_flow_query(struct rte_eth_dev *eth_dev, 664 struct rte_flow *flow, 665 const struct rte_flow_action *action, 666 void *data, 667 struct rte_flow_error *error) 668 { 669 int rc = 0; 670 struct bnxt_ulp_context *ulp_ctx; 671 struct rte_flow_action_rss *rss_conf; 672 struct rte_flow_query_count *count; 673 enum bnxt_ulp_device_id dev_id; 674 uint32_t flow_id; 675 676 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev); 677 if (unlikely(!ulp_ctx)) { 678 BNXT_DRV_DBG(ERR, "ULP context is not initialized\n"); 679 rte_flow_error_set(error, EINVAL, 680 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 681 "Failed to query flow."); 682 return -EINVAL; 683 } 684 685 rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id); 686 if (rc) { 687 BNXT_DRV_DBG(ERR, "Can't identify the device\n"); 688 rte_flow_error_set(error, EINVAL, 689 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 690 "Failed to query flow."); 691 return -EINVAL; 692 } 693 694 flow_id = (uint32_t)(uintptr_t)flow; 695 696 switch (action->type) { 697 case RTE_FLOW_ACTION_TYPE_RSS: 698 rss_conf = (struct rte_flow_action_rss *)data; 699 rc = ulp_flow_query_rss_get(ulp_ctx, flow_id, rss_conf); 700 if (unlikely(rc)) { 701 rte_flow_error_set(error, EINVAL, 702 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 703 "Failed to query RSS info."); 704 } 705 706 break; 707 case RTE_FLOW_ACTION_TYPE_COUNT: 708 count = data; 709 if (dev_id == BNXT_ULP_DEVICE_ID_THOR2) 710 rc = ulp_sc_mgr_query_count_get(ulp_ctx, flow_id, count); 711 else 712 rc = ulp_fc_mgr_query_count_get(ulp_ctx, flow_id, count); 713 714 if (unlikely(rc)) { 715 rte_flow_error_set(error, EINVAL, 716 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 717 "Failed to query flow."); 718 } 719 break; 720 default: 721 rte_flow_error_set(error, -rc, RTE_FLOW_ERROR_TYPE_ACTION_NUM, 722 NULL, "Unsupported action item"); 723 } 724 725 return rc; 726 } 727 728 static int32_t 729 bnxt_ulp_action_handle_chk_args(const struct rte_flow_action *action, 730 const struct rte_flow_indir_action_conf *conf) 731 { 732 if (!action || !conf) 733 return BNXT_TF_RC_ERROR; 734 /* shared action only allowed to have one direction */ 735 if (conf->ingress == 1 && conf->egress == 1) 736 return BNXT_TF_RC_ERROR; 737 /* shared action must have at least one direction */ 738 if (conf->ingress == 0 && conf->egress == 0) 739 return BNXT_TF_RC_ERROR; 740 return BNXT_TF_RC_SUCCESS; 741 } 742 743 static inline void 744 bnxt_ulp_set_action_handle_dir_attr(struct ulp_rte_parser_params *params, 745 const struct rte_flow_indir_action_conf *conf) 746 { 747 if (conf->ingress == 1) 748 params->dir_attr |= BNXT_ULP_FLOW_ATTR_INGRESS; 749 else if (conf->egress == 1) 750 params->dir_attr |= BNXT_ULP_FLOW_ATTR_EGRESS; 751 } 752 753 static struct rte_flow_action_handle * 754 bnxt_ulp_action_handle_create(struct rte_eth_dev *dev, 755 const struct rte_flow_indir_action_conf *conf, 756 const struct rte_flow_action *action, 757 struct rte_flow_error *error) 758 { 759 enum bnxt_ulp_intf_type port_type = BNXT_ULP_INTF_TYPE_INVALID; 760 struct bnxt_ulp_mapper_parms mparms = { 0 }; 761 struct ulp_rte_parser_params params; 762 struct bnxt_ulp_context *ulp_ctx; 763 uint32_t act_tid; 764 uint16_t func_id; 765 uint32_t ifindex; 766 int ret = BNXT_TF_RC_ERROR; 767 const struct rte_flow_action actions[2] = { 768 { 769 .type = action->type, 770 .conf = action->conf 771 }, 772 { 773 .type = RTE_FLOW_ACTION_TYPE_END 774 } 775 }; 776 777 if (error != NULL) 778 error->type = RTE_FLOW_ERROR_TYPE_NONE; 779 780 if (unlikely(bnxt_ulp_action_handle_chk_args(action, conf) != BNXT_TF_RC_SUCCESS)) 781 goto parse_error; 782 783 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev); 784 if (unlikely(!ulp_ctx)) { 785 BNXT_DRV_DBG(ERR, "ULP context is not initialized\n"); 786 goto parse_error; 787 } 788 789 /* Initialize the parser params */ 790 memset(¶ms, 0, sizeof(struct ulp_rte_parser_params)); 791 params.ulp_ctx = ulp_ctx; 792 793 ULP_BITMAP_SET(params.act_bitmap.bits, BNXT_ULP_ACT_BIT_SHARED); 794 795 /* Set the shared action direction attribute */ 796 bnxt_ulp_set_action_handle_dir_attr(¶ms, conf); 797 798 /* perform the conversion from dpdk port to bnxt ifindex */ 799 if (unlikely(ulp_port_db_dev_port_to_ulp_index(ulp_ctx, 800 dev->data->port_id, 801 &ifindex))) { 802 BNXT_DRV_DBG(ERR, "Port id is not valid\n"); 803 goto parse_error; 804 } 805 port_type = ulp_port_db_port_type_get(ulp_ctx, ifindex); 806 if (unlikely(port_type == BNXT_ULP_INTF_TYPE_INVALID)) { 807 BNXT_DRV_DBG(ERR, "Port type is not valid\n"); 808 goto parse_error; 809 } 810 811 bnxt_ulp_init_parser_cf_defaults(¶ms, dev->data->port_id); 812 813 /* Emulating the match port for direction processing */ 814 ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, 815 port_type); 816 817 if ((params.dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) && 818 port_type == BNXT_ULP_INTF_TYPE_VF_REP) { 819 ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_DIRECTION, 820 BNXT_ULP_DIR_EGRESS); 821 } else { 822 /* Assign the input direction */ 823 if (params.dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) 824 ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_DIRECTION, 825 BNXT_ULP_DIR_INGRESS); 826 else 827 ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_DIRECTION, 828 BNXT_ULP_DIR_EGRESS); 829 } 830 831 /* perform the conversion from dpdk port to bnxt ifindex */ 832 if (unlikely(ulp_port_db_dev_port_to_ulp_index(ulp_ctx, 833 dev->data->port_id, 834 &ifindex))) { 835 BNXT_DRV_DBG(ERR, "Port id is not valid\n"); 836 goto parse_error; 837 } 838 port_type = ulp_port_db_port_type_get(ulp_ctx, ifindex); 839 if (unlikely(port_type == BNXT_ULP_INTF_TYPE_INVALID)) { 840 BNXT_DRV_DBG(ERR, "Port type is not valid\n"); 841 goto parse_error; 842 } 843 844 bnxt_ulp_init_parser_cf_defaults(¶ms, dev->data->port_id); 845 846 /* Emulating the match port for direction processing */ 847 ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, 848 port_type); 849 850 if ((params.dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) && 851 port_type == BNXT_ULP_INTF_TYPE_VF_REP) { 852 ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_DIRECTION, 853 BNXT_ULP_DIR_EGRESS); 854 } else { 855 /* Assign the input direction */ 856 if (params.dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) 857 ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_DIRECTION, 858 BNXT_ULP_DIR_INGRESS); 859 else 860 ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_DIRECTION, 861 BNXT_ULP_DIR_EGRESS); 862 } 863 864 /* Parse the shared action */ 865 ret = bnxt_ulp_rte_parser_act_parse(actions, ¶ms); 866 if (unlikely(ret != BNXT_TF_RC_SUCCESS)) 867 goto parse_error; 868 869 /* Perform the rte flow post process */ 870 bnxt_ulp_rte_parser_post_process(¶ms); 871 872 /* do the tunnel offload process if any */ 873 ret = ulp_tunnel_offload_process(¶ms); 874 if (unlikely(ret == BNXT_TF_RC_ERROR)) 875 goto parse_error; 876 877 ret = ulp_matcher_action_match(¶ms, &act_tid); 878 if (unlikely(ret != BNXT_TF_RC_SUCCESS)) 879 goto parse_error; 880 881 bnxt_ulp_init_mapper_params(&mparms, ¶ms, 882 BNXT_ULP_FDB_TYPE_REGULAR); 883 mparms.act_tid = act_tid; 884 885 /* Get the function id */ 886 if (unlikely(ulp_port_db_port_func_id_get(ulp_ctx, 887 dev->data->port_id, 888 &func_id))) { 889 BNXT_DRV_DBG(ERR, "conversion of port to func id failed\n"); 890 goto parse_error; 891 } 892 893 /* Protect flow creation */ 894 if (unlikely(bnxt_ulp_cntxt_acquire_fdb_lock(ulp_ctx))) { 895 BNXT_DRV_DBG(ERR, "Flow db lock acquire failed\n"); 896 goto parse_error; 897 } 898 899 ret = ulp_mapper_flow_create(params.ulp_ctx, &mparms, 900 (void *)error); 901 bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx); 902 903 if (unlikely(ret)) 904 goto parse_error; 905 906 return (struct rte_flow_action_handle *)((uintptr_t)mparms.shared_hndl); 907 908 parse_error: 909 if (error != NULL && 910 error->type == RTE_FLOW_ERROR_TYPE_NONE) 911 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 912 "Failed to create shared action."); 913 return NULL; 914 } 915 916 static int 917 bnxt_ulp_action_handle_destroy(struct rte_eth_dev *dev, 918 struct rte_flow_action_handle *shared_hndl, 919 struct rte_flow_error *error) 920 { 921 struct bnxt_ulp_mapper_parms mparms = { 0 }; 922 struct bnxt_ulp_shared_act_info *act_info; 923 struct ulp_rte_parser_params params; 924 struct ulp_rte_act_prop *act_prop; 925 struct bnxt_ulp_context *ulp_ctx; 926 enum bnxt_ulp_direction_type dir; 927 uint32_t act_tid, act_info_entries; 928 int ret = BNXT_TF_RC_ERROR; 929 uint32_t shared_action_type; 930 uint64_t tmp64; 931 932 if (error != NULL) 933 error->type = RTE_FLOW_ERROR_TYPE_NONE; 934 935 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev); 936 if (unlikely(!ulp_ctx)) { 937 BNXT_DRV_DBG(ERR, "ULP context is not initialized\n"); 938 goto parse_error; 939 } 940 941 if (unlikely(!shared_hndl)) { 942 BNXT_DRV_DBG(ERR, "Invalid argument of shared handle\n"); 943 goto parse_error; 944 } 945 946 act_prop = ¶ms.act_prop; 947 memset(¶ms, 0, sizeof(struct ulp_rte_parser_params)); 948 params.ulp_ctx = ulp_ctx; 949 950 if (unlikely(bnxt_ulp_cntxt_app_id_get(ulp_ctx, ¶ms.app_id))) { 951 BNXT_DRV_DBG(ERR, "failed to get the app id\n"); 952 goto parse_error; 953 } 954 /* The template will delete the entry if there are no references */ 955 if (unlikely(bnxt_get_action_handle_type(shared_hndl, &shared_action_type))) { 956 BNXT_DRV_DBG(ERR, "Invalid shared handle\n"); 957 goto parse_error; 958 } 959 960 act_info_entries = 0; 961 act_info = bnxt_ulp_shared_act_info_get(&act_info_entries); 962 if (unlikely(shared_action_type >= act_info_entries || !act_info)) { 963 BNXT_DRV_DBG(ERR, "Invalid shared handle\n"); 964 goto parse_error; 965 } 966 967 ULP_BITMAP_SET(params.act_bitmap.bits, 968 act_info[shared_action_type].act_bitmask); 969 ULP_BITMAP_SET(params.act_bitmap.bits, BNXT_ULP_ACT_BIT_DELETE); 970 971 ret = bnxt_get_action_handle_direction(shared_hndl, &dir); 972 if (unlikely(ret)) { 973 BNXT_DRV_DBG(ERR, "Invalid shared handle dir\n"); 974 goto parse_error; 975 } 976 977 if (dir == BNXT_ULP_DIR_EGRESS) { 978 params.dir_attr = BNXT_ULP_FLOW_ATTR_EGRESS; 979 ULP_BITMAP_SET(params.act_bitmap.bits, 980 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 981 } else { 982 params.dir_attr = BNXT_ULP_FLOW_ATTR_INGRESS; 983 ULP_BITMAP_SET(params.act_bitmap.bits, 984 BNXT_ULP_FLOW_DIR_BITMASK_ING); 985 } 986 987 tmp64 = tfp_cpu_to_be_64((uint64_t) 988 bnxt_get_action_handle_index(shared_hndl)); 989 990 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE], 991 &tmp64, BNXT_ULP_ACT_PROP_SZ_SHARED_HANDLE); 992 993 ret = ulp_matcher_action_match(¶ms, &act_tid); 994 if (unlikely(ret != BNXT_TF_RC_SUCCESS)) 995 goto parse_error; 996 997 bnxt_ulp_init_mapper_params(&mparms, ¶ms, 998 BNXT_ULP_FDB_TYPE_REGULAR); 999 mparms.act_tid = act_tid; 1000 1001 if (unlikely(bnxt_ulp_cntxt_acquire_fdb_lock(ulp_ctx))) { 1002 BNXT_DRV_DBG(ERR, "Flow db lock acquire failed\n"); 1003 goto parse_error; 1004 } 1005 1006 ret = ulp_mapper_flow_create(ulp_ctx, &mparms, 1007 (void *)error); 1008 bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx); 1009 if (unlikely(ret)) 1010 goto parse_error; 1011 1012 return 0; 1013 1014 parse_error: 1015 if (error != NULL && 1016 error->type == RTE_FLOW_ERROR_TYPE_NONE) 1017 rte_flow_error_set(error, BNXT_TF_RC_ERROR, 1018 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1019 "Failed to destroy shared action."); 1020 return -EINVAL; 1021 } 1022 1023 /* Tunnel offload Apis */ 1024 #define BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS 1 1025 1026 static int 1027 bnxt_ulp_tunnel_decap_set(struct rte_eth_dev *eth_dev, 1028 struct rte_flow_tunnel *tunnel, 1029 struct rte_flow_action **pmd_actions, 1030 uint32_t *num_of_actions, 1031 struct rte_flow_error *error) 1032 { 1033 struct bnxt_ulp_context *ulp_ctx; 1034 struct bnxt_flow_app_tun_ent *tun_entry; 1035 int32_t rc = 0; 1036 1037 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev); 1038 if (unlikely(ulp_ctx == NULL)) { 1039 BNXT_DRV_DBG(ERR, "ULP context is not initialized\n"); 1040 rte_flow_error_set(error, EINVAL, 1041 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1042 "ULP context uninitialized"); 1043 return -EINVAL; 1044 } 1045 1046 if (unlikely(tunnel == NULL)) { 1047 BNXT_DRV_DBG(ERR, "No tunnel specified\n"); 1048 rte_flow_error_set(error, EINVAL, 1049 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 1050 "no tunnel specified"); 1051 return -EINVAL; 1052 } 1053 1054 if (unlikely(tunnel->type != RTE_FLOW_ITEM_TYPE_VXLAN)) { 1055 BNXT_DRV_DBG(ERR, "Tunnel type unsupported\n"); 1056 rte_flow_error_set(error, EINVAL, 1057 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 1058 "tunnel type unsupported"); 1059 return -EINVAL; 1060 } 1061 1062 rc = ulp_app_tun_search_entry(ulp_ctx, tunnel, &tun_entry); 1063 if (unlikely(rc < 0)) { 1064 rte_flow_error_set(error, EINVAL, 1065 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 1066 "tunnel decap set failed"); 1067 return -EINVAL; 1068 } 1069 1070 rc = ulp_app_tun_entry_set_decap_action(tun_entry); 1071 if (unlikely(rc < 0)) { 1072 rte_flow_error_set(error, EINVAL, 1073 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 1074 "tunnel decap set failed"); 1075 return -EINVAL; 1076 } 1077 1078 *pmd_actions = &tun_entry->action; 1079 *num_of_actions = BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS; 1080 return 0; 1081 } 1082 1083 static int 1084 bnxt_ulp_tunnel_match(struct rte_eth_dev *eth_dev, 1085 struct rte_flow_tunnel *tunnel, 1086 struct rte_flow_item **pmd_items, 1087 uint32_t *num_of_items, 1088 struct rte_flow_error *error) 1089 { 1090 struct bnxt_ulp_context *ulp_ctx; 1091 struct bnxt_flow_app_tun_ent *tun_entry; 1092 int32_t rc = 0; 1093 1094 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev); 1095 if (unlikely(ulp_ctx == NULL)) { 1096 BNXT_DRV_DBG(ERR, "ULP context is not initialized\n"); 1097 rte_flow_error_set(error, EINVAL, 1098 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1099 "ULP context uninitialized"); 1100 return -EINVAL; 1101 } 1102 1103 if (unlikely(tunnel == NULL)) { 1104 BNXT_DRV_DBG(ERR, "No tunnel specified\n"); 1105 rte_flow_error_set(error, EINVAL, 1106 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1107 "no tunnel specified"); 1108 return -EINVAL; 1109 } 1110 1111 if (unlikely(tunnel->type != RTE_FLOW_ITEM_TYPE_VXLAN)) { 1112 BNXT_DRV_DBG(ERR, "Tunnel type unsupported\n"); 1113 rte_flow_error_set(error, EINVAL, 1114 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1115 "tunnel type unsupported"); 1116 return -EINVAL; 1117 } 1118 1119 rc = ulp_app_tun_search_entry(ulp_ctx, tunnel, &tun_entry); 1120 if (unlikely(rc < 0)) { 1121 rte_flow_error_set(error, EINVAL, 1122 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 1123 "tunnel match set failed"); 1124 return -EINVAL; 1125 } 1126 1127 rc = ulp_app_tun_entry_set_decap_item(tun_entry); 1128 if (unlikely(rc < 0)) { 1129 rte_flow_error_set(error, EINVAL, 1130 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 1131 "tunnel match set failed"); 1132 return -EINVAL; 1133 } 1134 1135 *pmd_items = &tun_entry->item; 1136 *num_of_items = BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS; 1137 return 0; 1138 } 1139 1140 static int 1141 bnxt_ulp_tunnel_decap_release(struct rte_eth_dev *eth_dev, 1142 struct rte_flow_action *pmd_actions, 1143 uint32_t num_actions, 1144 struct rte_flow_error *error) 1145 { 1146 struct bnxt_ulp_context *ulp_ctx; 1147 struct bnxt_flow_app_tun_ent *tun_entry; 1148 const struct rte_flow_action *action_item = pmd_actions; 1149 1150 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev); 1151 if (unlikely(ulp_ctx == NULL)) { 1152 BNXT_DRV_DBG(ERR, "ULP context is not initialized\n"); 1153 rte_flow_error_set(error, EINVAL, 1154 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1155 "ULP context uninitialized"); 1156 return -EINVAL; 1157 } 1158 if (unlikely(num_actions != BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS)) { 1159 BNXT_DRV_DBG(ERR, "num actions is invalid\n"); 1160 rte_flow_error_set(error, EINVAL, 1161 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 1162 "num actions is invalid"); 1163 return -EINVAL; 1164 } 1165 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) { 1166 if (unlikely(action_item->type == (typeof(tun_entry->action.type)) 1167 BNXT_RTE_FLOW_ACTION_TYPE_VXLAN_DECAP)) { 1168 tun_entry = ulp_app_tun_match_entry(ulp_ctx, 1169 action_item->conf); 1170 ulp_app_tun_entry_delete(tun_entry); 1171 } 1172 action_item++; 1173 } 1174 return 0; 1175 } 1176 1177 static int 1178 bnxt_ulp_tunnel_item_release(struct rte_eth_dev *eth_dev, 1179 struct rte_flow_item *pmd_items, 1180 uint32_t num_items, 1181 struct rte_flow_error *error) 1182 { 1183 struct bnxt_ulp_context *ulp_ctx; 1184 struct bnxt_flow_app_tun_ent *tun_entry; 1185 1186 ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev); 1187 if (unlikely(ulp_ctx == NULL)) { 1188 BNXT_DRV_DBG(ERR, "ULP context is not initialized\n"); 1189 rte_flow_error_set(error, EINVAL, 1190 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 1191 "ULP context uninitialized"); 1192 return -EINVAL; 1193 } 1194 if (unlikely(num_items != BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS)) { 1195 BNXT_DRV_DBG(ERR, "num items is invalid\n"); 1196 rte_flow_error_set(error, EINVAL, 1197 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 1198 "num items is invalid"); 1199 return -EINVAL; 1200 } 1201 1202 tun_entry = ulp_app_tun_match_entry(ulp_ctx, pmd_items->spec); 1203 ulp_app_tun_entry_delete(tun_entry); 1204 return 0; 1205 } 1206 1207 const struct rte_flow_ops bnxt_ulp_rte_flow_ops = { 1208 .validate = bnxt_ulp_flow_validate, 1209 .create = bnxt_ulp_flow_create, 1210 .destroy = bnxt_ulp_flow_destroy, 1211 .flush = bnxt_ulp_flow_flush, 1212 .query = bnxt_ulp_flow_query, 1213 .isolate = NULL, 1214 .action_handle_create = bnxt_ulp_action_handle_create, 1215 .action_handle_destroy = bnxt_ulp_action_handle_destroy, 1216 /* Tunnel offload callbacks */ 1217 .tunnel_decap_set = bnxt_ulp_tunnel_decap_set, 1218 .tunnel_match = bnxt_ulp_tunnel_match, 1219 .tunnel_action_decap_release = bnxt_ulp_tunnel_decap_release, 1220 .tunnel_item_release = bnxt_ulp_tunnel_item_release, 1221 .get_restore_info = NULL 1222 }; 1223