1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2020 Broadcom 3 * All rights reserved. 4 */ 5 6 #include "bnxt.h" 7 #include "ulp_template_db_enum.h" 8 #include "ulp_template_struct.h" 9 #include "bnxt_tf_common.h" 10 #include "ulp_rte_parser.h" 11 #include "ulp_utils.h" 12 #include "tfp.h" 13 #include "ulp_port_db.h" 14 15 /* Local defines for the parsing functions */ 16 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */ 17 #define ULP_VLAN_PRIORITY_MASK 0x700 18 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/ 19 #define ULP_UDP_PORT_VXLAN 4789 20 21 /* Utility function to skip the void items. */ 22 static inline int32_t 23 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment) 24 { 25 if (!*item) 26 return 0; 27 if (increment) 28 (*item)++; 29 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID) 30 (*item)++; 31 if (*item) 32 return 1; 33 return 0; 34 } 35 36 /* Utility function to update the field_bitmap */ 37 static void 38 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params, 39 uint32_t idx) 40 { 41 struct ulp_rte_hdr_field *field; 42 43 field = ¶ms->hdr_field[idx]; 44 if (ulp_bitmap_notzero(field->mask, field->size)) { 45 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx); 46 /* Not exact match */ 47 if (!ulp_bitmap_is_ones(field->mask, field->size)) 48 ULP_BITMAP_SET(params->fld_bitmap.bits, 49 BNXT_ULP_MATCH_TYPE_BITMASK_WM); 50 } else { 51 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx); 52 } 53 } 54 55 /* Utility function to copy field spec items */ 56 static struct ulp_rte_hdr_field * 57 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field, 58 const void *buffer, 59 uint32_t size) 60 { 61 field->size = size; 62 memcpy(field->spec, buffer, field->size); 63 field++; 64 return field; 65 } 66 67 /* Utility function to copy field masks items */ 68 static void 69 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params, 70 uint32_t *idx, 71 const void *buffer, 72 uint32_t size) 73 { 74 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx]; 75 76 memcpy(field->mask, buffer, size); 77 ulp_rte_parser_field_bitmap_update(params, *idx); 78 *idx = *idx + 1; 79 } 80 81 /* 82 * Function to handle the parsing of RTE Flows and placing 83 * the RTE flow items into the ulp structures. 84 */ 85 int32_t 86 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], 87 struct ulp_rte_parser_params *params) 88 { 89 const struct rte_flow_item *item = pattern; 90 struct bnxt_ulp_rte_hdr_info *hdr_info; 91 92 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM; 93 94 /* Set the computed flags for no vlan tags before parsing */ 95 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1); 96 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1); 97 98 /* Parse all the items in the pattern */ 99 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) { 100 /* get the header information from the flow_hdr_info table */ 101 hdr_info = &ulp_hdr_info[item->type]; 102 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) { 103 BNXT_TF_DBG(ERR, 104 "Truflow parser does not support type %d\n", 105 item->type); 106 return BNXT_TF_RC_PARSE_ERR; 107 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) { 108 /* call the registered callback handler */ 109 if (hdr_info->proto_hdr_func) { 110 if (hdr_info->proto_hdr_func(item, params) != 111 BNXT_TF_RC_SUCCESS) { 112 return BNXT_TF_RC_ERROR; 113 } 114 } 115 } 116 item++; 117 } 118 /* update the implied SVIF */ 119 return ulp_rte_parser_implicit_match_port_process(params); 120 } 121 122 /* 123 * Function to handle the parsing of RTE Flows and placing 124 * the RTE flow actions into the ulp structures. 125 */ 126 int32_t 127 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], 128 struct ulp_rte_parser_params *params) 129 { 130 const struct rte_flow_action *action_item = actions; 131 struct bnxt_ulp_rte_act_info *hdr_info; 132 133 /* Parse all the items in the pattern */ 134 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) { 135 /* get the header information from the flow_hdr_info table */ 136 hdr_info = &ulp_act_info[action_item->type]; 137 if (hdr_info->act_type == 138 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) { 139 BNXT_TF_DBG(ERR, 140 "Truflow parser does not support act %u\n", 141 action_item->type); 142 return BNXT_TF_RC_ERROR; 143 } else if (hdr_info->act_type == 144 BNXT_ULP_ACT_TYPE_SUPPORTED) { 145 /* call the registered callback handler */ 146 if (hdr_info->proto_act_func) { 147 if (hdr_info->proto_act_func(action_item, 148 params) != 149 BNXT_TF_RC_SUCCESS) { 150 return BNXT_TF_RC_ERROR; 151 } 152 } 153 } 154 action_item++; 155 } 156 /* update the implied port details */ 157 ulp_rte_parser_implicit_act_port_process(params); 158 return BNXT_TF_RC_SUCCESS; 159 } 160 161 /* 162 * Function to handle the post processing of the computed 163 * fields for the interface. 164 */ 165 static void 166 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params) 167 { 168 uint32_t ifindex; 169 uint16_t port_id, parif; 170 uint32_t mtype; 171 enum bnxt_ulp_direction_type dir; 172 173 /* get the direction details */ 174 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 175 176 /* read the port id details */ 177 port_id = ULP_COMP_FLD_IDX_RD(params, 178 BNXT_ULP_CF_IDX_INCOMING_IF); 179 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 180 port_id, 181 &ifindex)) { 182 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 183 return; 184 } 185 186 if (dir == BNXT_ULP_DIR_INGRESS) { 187 /* Set port PARIF */ 188 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 189 BNXT_ULP_PHY_PORT_PARIF, &parif)) { 190 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n"); 191 return; 192 } 193 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF, 194 parif); 195 } else { 196 /* Get the match port type */ 197 mtype = ULP_COMP_FLD_IDX_RD(params, 198 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 199 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) { 200 ULP_COMP_FLD_IDX_WR(params, 201 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP, 202 1); 203 /* Set VF func PARIF */ 204 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 205 BNXT_ULP_VF_FUNC_PARIF, 206 &parif)) { 207 BNXT_TF_DBG(ERR, 208 "ParseErr:ifindex is not valid\n"); 209 return; 210 } 211 ULP_COMP_FLD_IDX_WR(params, 212 BNXT_ULP_CF_IDX_VF_FUNC_PARIF, 213 parif); 214 } else { 215 /* Set DRV func PARIF */ 216 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 217 BNXT_ULP_DRV_FUNC_PARIF, 218 &parif)) { 219 BNXT_TF_DBG(ERR, 220 "ParseErr:ifindex is not valid\n"); 221 return; 222 } 223 ULP_COMP_FLD_IDX_WR(params, 224 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF, 225 parif); 226 } 227 } 228 } 229 230 /* 231 * Function to handle the post processing of the parsing details 232 */ 233 int32_t 234 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params) 235 { 236 enum bnxt_ulp_direction_type dir; 237 enum bnxt_ulp_intf_type match_port_type, act_port_type; 238 uint32_t act_port_set; 239 240 /* Get the computed details */ 241 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 242 match_port_type = ULP_COMP_FLD_IDX_RD(params, 243 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 244 act_port_type = ULP_COMP_FLD_IDX_RD(params, 245 BNXT_ULP_CF_IDX_ACT_PORT_TYPE); 246 act_port_set = ULP_COMP_FLD_IDX_RD(params, 247 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET); 248 249 /* set the flow direction in the proto and action header */ 250 if (dir == BNXT_ULP_DIR_EGRESS) { 251 ULP_BITMAP_SET(params->hdr_bitmap.bits, 252 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 253 ULP_BITMAP_SET(params->act_bitmap.bits, 254 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 255 } 256 257 /* calculate the VF to VF flag */ 258 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP && 259 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) 260 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1); 261 262 /* Update the decrement ttl computational fields */ 263 if (ULP_BITMAP_ISSET(params->act_bitmap.bits, 264 BNXT_ULP_ACTION_BIT_DEC_TTL)) { 265 /* 266 * Check that vxlan proto is included and vxlan decap 267 * action is not set then decrement tunnel ttl. 268 * Similarly add GRE and NVGRE in future. 269 */ 270 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 271 BNXT_ULP_HDR_BIT_T_VXLAN) && 272 !ULP_BITMAP_ISSET(params->act_bitmap.bits, 273 BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) { 274 ULP_COMP_FLD_IDX_WR(params, 275 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1); 276 } else { 277 ULP_COMP_FLD_IDX_WR(params, 278 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1); 279 } 280 } 281 282 /* Merge the hdr_fp_bit into the proto header bit */ 283 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits; 284 285 /* Update the computed interface parameters */ 286 bnxt_ulp_comp_fld_intf_update(params); 287 288 /* TBD: Handle the flow rejection scenarios */ 289 return 0; 290 } 291 292 /* 293 * Function to compute the flow direction based on the match port details 294 */ 295 static void 296 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params) 297 { 298 enum bnxt_ulp_intf_type match_port_type; 299 300 /* Get the match port type */ 301 match_port_type = ULP_COMP_FLD_IDX_RD(params, 302 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 303 304 /* If ingress flow and matchport is vf rep then dir is egress*/ 305 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) && 306 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) { 307 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 308 BNXT_ULP_DIR_EGRESS); 309 } else { 310 /* Assign the input direction */ 311 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) 312 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 313 BNXT_ULP_DIR_INGRESS); 314 else 315 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 316 BNXT_ULP_DIR_EGRESS); 317 } 318 } 319 320 /* Function to handle the parsing of RTE Flow item PF Header. */ 321 static int32_t 322 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params, 323 uint32_t ifindex, 324 uint16_t mask) 325 { 326 uint16_t svif; 327 enum bnxt_ulp_direction_type dir; 328 struct ulp_rte_hdr_field *hdr_field; 329 enum bnxt_ulp_svif_type svif_type; 330 enum bnxt_ulp_intf_type port_type; 331 332 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 333 BNXT_ULP_INVALID_SVIF_VAL) { 334 BNXT_TF_DBG(ERR, 335 "SVIF already set,multiple source not support'd\n"); 336 return BNXT_TF_RC_ERROR; 337 } 338 339 /* Get port type details */ 340 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 341 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) { 342 BNXT_TF_DBG(ERR, "Invalid port type\n"); 343 return BNXT_TF_RC_ERROR; 344 } 345 346 /* Update the match port type */ 347 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type); 348 349 /* compute the direction */ 350 bnxt_ulp_rte_parser_direction_compute(params); 351 352 /* Get the computed direction */ 353 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 354 if (dir == BNXT_ULP_DIR_INGRESS) { 355 svif_type = BNXT_ULP_PHY_PORT_SVIF; 356 } else { 357 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP) 358 svif_type = BNXT_ULP_VF_FUNC_SVIF; 359 else 360 svif_type = BNXT_ULP_DRV_FUNC_SVIF; 361 } 362 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type, 363 &svif); 364 svif = rte_cpu_to_be_16(svif); 365 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; 366 memcpy(hdr_field->spec, &svif, sizeof(svif)); 367 memcpy(hdr_field->mask, &mask, sizeof(mask)); 368 hdr_field->size = sizeof(svif); 369 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, 370 rte_be_to_cpu_16(svif)); 371 return BNXT_TF_RC_SUCCESS; 372 } 373 374 /* Function to handle the parsing of the RTE port id */ 375 int32_t 376 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params) 377 { 378 uint16_t port_id = 0; 379 uint16_t svif_mask = 0xFFFF; 380 uint32_t ifindex; 381 int32_t rc = BNXT_TF_RC_ERROR; 382 383 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 384 BNXT_ULP_INVALID_SVIF_VAL) 385 return BNXT_TF_RC_SUCCESS; 386 387 /* SVIF not set. So get the port id */ 388 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 389 390 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 391 port_id, 392 &ifindex)) { 393 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 394 return rc; 395 } 396 397 /* Update the SVIF details */ 398 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask); 399 return rc; 400 } 401 402 /* Function to handle the implicit action port id */ 403 int32_t 404 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params) 405 { 406 struct rte_flow_action action_item = {0}; 407 struct rte_flow_action_port_id port_id = {0}; 408 409 /* Read the action port set bit */ 410 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) { 411 /* Already set, so just exit */ 412 return BNXT_TF_RC_SUCCESS; 413 } 414 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 415 action_item.conf = &port_id; 416 417 /* Update the action port based on incoming port */ 418 ulp_rte_port_id_act_handler(&action_item, params); 419 420 /* Reset the action port set bit */ 421 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0); 422 return BNXT_TF_RC_SUCCESS; 423 } 424 425 /* Function to handle the parsing of RTE Flow item PF Header. */ 426 int32_t 427 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused, 428 struct ulp_rte_parser_params *params) 429 { 430 uint16_t port_id = 0; 431 uint16_t svif_mask = 0xFFFF; 432 uint32_t ifindex; 433 434 /* Get the implicit port id */ 435 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 436 437 /* perform the conversion from dpdk port to bnxt ifindex */ 438 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 439 port_id, 440 &ifindex)) { 441 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 442 return BNXT_TF_RC_ERROR; 443 } 444 445 /* Update the SVIF details */ 446 return ulp_rte_parser_svif_set(params, ifindex, svif_mask); 447 } 448 449 /* Function to handle the parsing of RTE Flow item VF Header. */ 450 int32_t 451 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item, 452 struct ulp_rte_parser_params *params) 453 { 454 const struct rte_flow_item_vf *vf_spec = item->spec; 455 const struct rte_flow_item_vf *vf_mask = item->mask; 456 uint16_t mask = 0; 457 uint32_t ifindex; 458 int32_t rc = BNXT_TF_RC_PARSE_ERR; 459 460 /* Get VF rte_flow_item for Port details */ 461 if (!vf_spec) { 462 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n"); 463 return rc; 464 } 465 if (!vf_mask) { 466 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n"); 467 return rc; 468 } 469 mask = vf_mask->id; 470 471 /* perform the conversion from VF Func id to bnxt ifindex */ 472 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, 473 vf_spec->id, 474 &ifindex)) { 475 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 476 return rc; 477 } 478 /* Update the SVIF details */ 479 return ulp_rte_parser_svif_set(params, ifindex, mask); 480 } 481 482 /* Function to handle the parsing of RTE Flow item port id Header. */ 483 int32_t 484 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item, 485 struct ulp_rte_parser_params *params) 486 { 487 const struct rte_flow_item_port_id *port_spec = item->spec; 488 const struct rte_flow_item_port_id *port_mask = item->mask; 489 uint16_t mask = 0; 490 int32_t rc = BNXT_TF_RC_PARSE_ERR; 491 uint32_t ifindex; 492 493 if (!port_spec) { 494 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n"); 495 return rc; 496 } 497 if (!port_mask) { 498 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n"); 499 return rc; 500 } 501 mask = port_mask->id; 502 503 /* perform the conversion from dpdk port to bnxt ifindex */ 504 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 505 port_spec->id, 506 &ifindex)) { 507 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 508 return rc; 509 } 510 /* Update the SVIF details */ 511 return ulp_rte_parser_svif_set(params, ifindex, mask); 512 } 513 514 /* Function to handle the parsing of RTE Flow item phy port Header. */ 515 int32_t 516 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item, 517 struct ulp_rte_parser_params *params) 518 { 519 const struct rte_flow_item_phy_port *port_spec = item->spec; 520 const struct rte_flow_item_phy_port *port_mask = item->mask; 521 uint16_t mask = 0; 522 int32_t rc = BNXT_TF_RC_ERROR; 523 uint16_t svif; 524 enum bnxt_ulp_direction_type dir; 525 struct ulp_rte_hdr_field *hdr_field; 526 527 /* Copy the rte_flow_item for phy port into hdr_field */ 528 if (!port_spec) { 529 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n"); 530 return rc; 531 } 532 if (!port_mask) { 533 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n"); 534 return rc; 535 } 536 mask = port_mask->index; 537 538 /* Update the match port type */ 539 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, 540 BNXT_ULP_INTF_TYPE_PHY_PORT); 541 542 /* Compute the Hw direction */ 543 bnxt_ulp_rte_parser_direction_compute(params); 544 545 /* Direction validation */ 546 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 547 if (dir == BNXT_ULP_DIR_EGRESS) { 548 BNXT_TF_DBG(ERR, 549 "Parse Err:Phy ports are valid only for ingress\n"); 550 return BNXT_TF_RC_PARSE_ERR; 551 } 552 553 /* Get the physical port details from port db */ 554 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index, 555 &svif); 556 if (rc) { 557 BNXT_TF_DBG(ERR, "Failed to get port details\n"); 558 return BNXT_TF_RC_PARSE_ERR; 559 } 560 561 /* Update the SVIF details */ 562 svif = rte_cpu_to_be_16(svif); 563 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; 564 memcpy(hdr_field->spec, &svif, sizeof(svif)); 565 memcpy(hdr_field->mask, &mask, sizeof(mask)); 566 hdr_field->size = sizeof(svif); 567 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, 568 rte_be_to_cpu_16(svif)); 569 return BNXT_TF_RC_SUCCESS; 570 } 571 572 /* Function to handle the update of proto header based on field values */ 573 static void 574 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param, 575 uint16_t type, uint32_t in_flag) 576 { 577 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { 578 if (in_flag) { 579 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 580 BNXT_ULP_HDR_BIT_I_IPV4); 581 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); 582 } else { 583 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 584 BNXT_ULP_HDR_BIT_O_IPV4); 585 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); 586 } 587 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { 588 if (in_flag) { 589 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 590 BNXT_ULP_HDR_BIT_I_IPV6); 591 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); 592 } else { 593 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 594 BNXT_ULP_HDR_BIT_O_IPV6); 595 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); 596 } 597 } 598 } 599 600 /* Function to handle the parsing of RTE Flow item Ethernet Header. */ 601 int32_t 602 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item, 603 struct ulp_rte_parser_params *params) 604 { 605 const struct rte_flow_item_eth *eth_spec = item->spec; 606 const struct rte_flow_item_eth *eth_mask = item->mask; 607 struct ulp_rte_hdr_field *field; 608 uint32_t idx = params->field_idx; 609 uint32_t size; 610 uint16_t eth_type = 0; 611 uint32_t inner_flag = 0; 612 613 /* 614 * Copy the rte_flow_item for eth into hdr_field using ethernet 615 * header fields 616 */ 617 if (eth_spec) { 618 size = sizeof(eth_spec->dst.addr_bytes); 619 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 620 eth_spec->dst.addr_bytes, 621 size); 622 size = sizeof(eth_spec->src.addr_bytes); 623 field = ulp_rte_parser_fld_copy(field, 624 eth_spec->src.addr_bytes, 625 size); 626 field = ulp_rte_parser_fld_copy(field, 627 ð_spec->type, 628 sizeof(eth_spec->type)); 629 eth_type = eth_spec->type; 630 } 631 if (eth_mask) { 632 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes, 633 sizeof(eth_mask->dst.addr_bytes)); 634 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes, 635 sizeof(eth_mask->src.addr_bytes)); 636 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type, 637 sizeof(eth_mask->type)); 638 } 639 /* Add number of vlan header elements */ 640 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM; 641 params->vlan_idx = params->field_idx; 642 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM; 643 644 /* Update the protocol hdr bitmap */ 645 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH)) { 646 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH); 647 inner_flag = 1; 648 } else { 649 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); 650 } 651 /* Update the field protocol hdr bitmap */ 652 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag); 653 654 return BNXT_TF_RC_SUCCESS; 655 } 656 657 /* Function to handle the parsing of RTE Flow item Vlan Header. */ 658 int32_t 659 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, 660 struct ulp_rte_parser_params *params) 661 { 662 const struct rte_flow_item_vlan *vlan_spec = item->spec; 663 const struct rte_flow_item_vlan *vlan_mask = item->mask; 664 struct ulp_rte_hdr_field *field; 665 struct ulp_rte_hdr_bitmap *hdr_bit; 666 uint32_t idx = params->vlan_idx; 667 uint16_t vlan_tag, priority; 668 uint32_t outer_vtag_num; 669 uint32_t inner_vtag_num; 670 uint16_t eth_type = 0; 671 uint32_t inner_flag = 0; 672 673 /* 674 * Copy the rte_flow_item for vlan into hdr_field using Vlan 675 * header fields 676 */ 677 if (vlan_spec) { 678 vlan_tag = ntohs(vlan_spec->tci); 679 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT); 680 vlan_tag &= ULP_VLAN_TAG_MASK; 681 vlan_tag = htons(vlan_tag); 682 683 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 684 &priority, 685 sizeof(priority)); 686 field = ulp_rte_parser_fld_copy(field, 687 &vlan_tag, 688 sizeof(vlan_tag)); 689 field = ulp_rte_parser_fld_copy(field, 690 &vlan_spec->inner_type, 691 sizeof(vlan_spec->inner_type)); 692 eth_type = vlan_spec->inner_type; 693 } 694 695 if (vlan_mask) { 696 vlan_tag = ntohs(vlan_mask->tci); 697 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT); 698 vlan_tag &= 0xfff; 699 700 /* 701 * the storage for priority and vlan tag is 2 bytes 702 * The mask of priority which is 3 bits if it is all 1's 703 * then make the rest bits 13 bits as 1's 704 * so that it is matched as exact match. 705 */ 706 if (priority == ULP_VLAN_PRIORITY_MASK) 707 priority |= ~ULP_VLAN_PRIORITY_MASK; 708 if (vlan_tag == ULP_VLAN_TAG_MASK) 709 vlan_tag |= ~ULP_VLAN_TAG_MASK; 710 vlan_tag = htons(vlan_tag); 711 712 /* 713 * The priority field is ignored since OVS is setting it as 714 * wild card match and it is not supported. This is a work 715 * around and shall be addressed in the future. 716 */ 717 idx += 1; 718 719 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag, 720 sizeof(vlan_tag)); 721 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type, 722 sizeof(vlan_mask->inner_type)); 723 } 724 /* Set the vlan index to new incremented value */ 725 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM; 726 727 /* Get the outer tag and inner tag counts */ 728 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params, 729 BNXT_ULP_CF_IDX_O_VTAG_NUM); 730 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params, 731 BNXT_ULP_CF_IDX_I_VTAG_NUM); 732 733 /* Update the hdr_bitmap of the vlans */ 734 hdr_bit = ¶ms->hdr_bitmap; 735 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 736 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 737 !outer_vtag_num) { 738 /* Update the vlan tag num */ 739 outer_vtag_num++; 740 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 741 outer_vtag_num); 742 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0); 743 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1); 744 ULP_BITMAP_SET(params->hdr_bitmap.bits, 745 BNXT_ULP_HDR_BIT_OO_VLAN); 746 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 747 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 748 outer_vtag_num == 1) { 749 /* update the vlan tag num */ 750 outer_vtag_num++; 751 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 752 outer_vtag_num); 753 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1); 754 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0); 755 ULP_BITMAP_SET(params->hdr_bitmap.bits, 756 BNXT_ULP_HDR_BIT_OI_VLAN); 757 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 758 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 759 !inner_vtag_num) { 760 /* update the vlan tag num */ 761 inner_vtag_num++; 762 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 763 inner_vtag_num); 764 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0); 765 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1); 766 ULP_BITMAP_SET(params->hdr_bitmap.bits, 767 BNXT_ULP_HDR_BIT_IO_VLAN); 768 inner_flag = 1; 769 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 770 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 771 inner_vtag_num == 1) { 772 /* update the vlan tag num */ 773 inner_vtag_num++; 774 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 775 inner_vtag_num); 776 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1); 777 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0); 778 ULP_BITMAP_SET(params->hdr_bitmap.bits, 779 BNXT_ULP_HDR_BIT_II_VLAN); 780 inner_flag = 1; 781 } else { 782 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n"); 783 return BNXT_TF_RC_ERROR; 784 } 785 /* Update the field protocol hdr bitmap */ 786 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag); 787 return BNXT_TF_RC_SUCCESS; 788 } 789 790 /* Function to handle the update of proto header based on field values */ 791 static void 792 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param, 793 uint8_t proto, uint32_t in_flag) 794 { 795 if (proto == IPPROTO_UDP) { 796 if (in_flag) { 797 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 798 BNXT_ULP_HDR_BIT_I_UDP); 799 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); 800 } else { 801 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 802 BNXT_ULP_HDR_BIT_O_UDP); 803 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); 804 } 805 } else if (proto == IPPROTO_TCP) { 806 if (in_flag) { 807 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 808 BNXT_ULP_HDR_BIT_I_TCP); 809 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); 810 } else { 811 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 812 BNXT_ULP_HDR_BIT_O_TCP); 813 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); 814 } 815 } 816 } 817 818 /* Function to handle the parsing of RTE Flow item IPV4 Header. */ 819 int32_t 820 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, 821 struct ulp_rte_parser_params *params) 822 { 823 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec; 824 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask; 825 struct ulp_rte_hdr_field *field; 826 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 827 uint32_t idx = params->field_idx; 828 uint32_t size; 829 uint8_t proto = 0; 830 uint32_t inner_flag = 0; 831 uint32_t cnt; 832 833 /* validate there are no 3rd L3 header */ 834 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); 835 if (cnt == 2) { 836 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n"); 837 return BNXT_TF_RC_ERROR; 838 } 839 840 /* 841 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 842 * header fields 843 */ 844 if (ipv4_spec) { 845 size = sizeof(ipv4_spec->hdr.version_ihl); 846 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 847 &ipv4_spec->hdr.version_ihl, 848 size); 849 size = sizeof(ipv4_spec->hdr.type_of_service); 850 field = ulp_rte_parser_fld_copy(field, 851 &ipv4_spec->hdr.type_of_service, 852 size); 853 size = sizeof(ipv4_spec->hdr.total_length); 854 field = ulp_rte_parser_fld_copy(field, 855 &ipv4_spec->hdr.total_length, 856 size); 857 size = sizeof(ipv4_spec->hdr.packet_id); 858 field = ulp_rte_parser_fld_copy(field, 859 &ipv4_spec->hdr.packet_id, 860 size); 861 size = sizeof(ipv4_spec->hdr.fragment_offset); 862 field = ulp_rte_parser_fld_copy(field, 863 &ipv4_spec->hdr.fragment_offset, 864 size); 865 size = sizeof(ipv4_spec->hdr.time_to_live); 866 field = ulp_rte_parser_fld_copy(field, 867 &ipv4_spec->hdr.time_to_live, 868 size); 869 size = sizeof(ipv4_spec->hdr.next_proto_id); 870 field = ulp_rte_parser_fld_copy(field, 871 &ipv4_spec->hdr.next_proto_id, 872 size); 873 proto = ipv4_spec->hdr.next_proto_id; 874 size = sizeof(ipv4_spec->hdr.hdr_checksum); 875 field = ulp_rte_parser_fld_copy(field, 876 &ipv4_spec->hdr.hdr_checksum, 877 size); 878 size = sizeof(ipv4_spec->hdr.src_addr); 879 field = ulp_rte_parser_fld_copy(field, 880 &ipv4_spec->hdr.src_addr, 881 size); 882 size = sizeof(ipv4_spec->hdr.dst_addr); 883 field = ulp_rte_parser_fld_copy(field, 884 &ipv4_spec->hdr.dst_addr, 885 size); 886 } 887 if (ipv4_mask) { 888 ulp_rte_prsr_mask_copy(params, &idx, 889 &ipv4_mask->hdr.version_ihl, 890 sizeof(ipv4_mask->hdr.version_ihl)); 891 /* 892 * The tos field is ignored since OVS is setting it as wild card 893 * match and it is not supported. This is a work around and 894 * shall be addressed in the future. 895 */ 896 idx += 1; 897 898 ulp_rte_prsr_mask_copy(params, &idx, 899 &ipv4_mask->hdr.total_length, 900 sizeof(ipv4_mask->hdr.total_length)); 901 ulp_rte_prsr_mask_copy(params, &idx, 902 &ipv4_mask->hdr.packet_id, 903 sizeof(ipv4_mask->hdr.packet_id)); 904 ulp_rte_prsr_mask_copy(params, &idx, 905 &ipv4_mask->hdr.fragment_offset, 906 sizeof(ipv4_mask->hdr.fragment_offset)); 907 ulp_rte_prsr_mask_copy(params, &idx, 908 &ipv4_mask->hdr.time_to_live, 909 sizeof(ipv4_mask->hdr.time_to_live)); 910 ulp_rte_prsr_mask_copy(params, &idx, 911 &ipv4_mask->hdr.next_proto_id, 912 sizeof(ipv4_mask->hdr.next_proto_id)); 913 ulp_rte_prsr_mask_copy(params, &idx, 914 &ipv4_mask->hdr.hdr_checksum, 915 sizeof(ipv4_mask->hdr.hdr_checksum)); 916 ulp_rte_prsr_mask_copy(params, &idx, 917 &ipv4_mask->hdr.src_addr, 918 sizeof(ipv4_mask->hdr.src_addr)); 919 ulp_rte_prsr_mask_copy(params, &idx, 920 &ipv4_mask->hdr.dst_addr, 921 sizeof(ipv4_mask->hdr.dst_addr)); 922 } 923 /* Add the number of ipv4 header elements */ 924 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM; 925 926 /* Set the ipv4 header bitmap and computed l3 header bitmaps */ 927 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 928 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { 929 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4); 930 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); 931 inner_flag = 1; 932 } else { 933 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4); 934 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); 935 } 936 937 /* Update the field protocol hdr bitmap */ 938 ulp_rte_l3_proto_type_update(params, proto, inner_flag); 939 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); 940 return BNXT_TF_RC_SUCCESS; 941 } 942 943 /* Function to handle the parsing of RTE Flow item IPV6 Header */ 944 int32_t 945 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, 946 struct ulp_rte_parser_params *params) 947 { 948 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec; 949 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask; 950 struct ulp_rte_hdr_field *field; 951 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 952 uint32_t idx = params->field_idx; 953 uint32_t size; 954 uint32_t vtcf, vtcf_mask; 955 uint8_t proto = 0; 956 uint32_t inner_flag = 0; 957 uint32_t cnt; 958 959 /* validate there are no 3rd L3 header */ 960 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); 961 if (cnt == 2) { 962 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n"); 963 return BNXT_TF_RC_ERROR; 964 } 965 966 /* 967 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6 968 * header fields 969 */ 970 if (ipv6_spec) { 971 size = sizeof(ipv6_spec->hdr.vtc_flow); 972 973 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow); 974 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 975 &vtcf, 976 size); 977 978 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow); 979 field = ulp_rte_parser_fld_copy(field, 980 &vtcf, 981 size); 982 983 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow); 984 field = ulp_rte_parser_fld_copy(field, 985 &vtcf, 986 size); 987 988 size = sizeof(ipv6_spec->hdr.payload_len); 989 field = ulp_rte_parser_fld_copy(field, 990 &ipv6_spec->hdr.payload_len, 991 size); 992 size = sizeof(ipv6_spec->hdr.proto); 993 field = ulp_rte_parser_fld_copy(field, 994 &ipv6_spec->hdr.proto, 995 size); 996 proto = ipv6_spec->hdr.proto; 997 size = sizeof(ipv6_spec->hdr.hop_limits); 998 field = ulp_rte_parser_fld_copy(field, 999 &ipv6_spec->hdr.hop_limits, 1000 size); 1001 size = sizeof(ipv6_spec->hdr.src_addr); 1002 field = ulp_rte_parser_fld_copy(field, 1003 &ipv6_spec->hdr.src_addr, 1004 size); 1005 size = sizeof(ipv6_spec->hdr.dst_addr); 1006 field = ulp_rte_parser_fld_copy(field, 1007 &ipv6_spec->hdr.dst_addr, 1008 size); 1009 } 1010 if (ipv6_mask) { 1011 size = sizeof(ipv6_mask->hdr.vtc_flow); 1012 1013 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow); 1014 ulp_rte_prsr_mask_copy(params, &idx, 1015 &vtcf_mask, 1016 size); 1017 1018 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow); 1019 ulp_rte_prsr_mask_copy(params, &idx, 1020 &vtcf_mask, 1021 size); 1022 1023 vtcf_mask = 1024 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow); 1025 ulp_rte_prsr_mask_copy(params, &idx, 1026 &vtcf_mask, 1027 size); 1028 1029 ulp_rte_prsr_mask_copy(params, &idx, 1030 &ipv6_mask->hdr.payload_len, 1031 sizeof(ipv6_mask->hdr.payload_len)); 1032 ulp_rte_prsr_mask_copy(params, &idx, 1033 &ipv6_mask->hdr.proto, 1034 sizeof(ipv6_mask->hdr.proto)); 1035 ulp_rte_prsr_mask_copy(params, &idx, 1036 &ipv6_mask->hdr.hop_limits, 1037 sizeof(ipv6_mask->hdr.hop_limits)); 1038 ulp_rte_prsr_mask_copy(params, &idx, 1039 &ipv6_mask->hdr.src_addr, 1040 sizeof(ipv6_mask->hdr.src_addr)); 1041 ulp_rte_prsr_mask_copy(params, &idx, 1042 &ipv6_mask->hdr.dst_addr, 1043 sizeof(ipv6_mask->hdr.dst_addr)); 1044 } 1045 /* add number of ipv6 header elements */ 1046 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM; 1047 1048 /* Set the ipv6 header bitmap and computed l3 header bitmaps */ 1049 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 1050 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { 1051 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6); 1052 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); 1053 inner_flag = 1; 1054 } else { 1055 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6); 1056 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); 1057 } 1058 1059 /* Update the field protocol hdr bitmap */ 1060 ulp_rte_l3_proto_type_update(params, proto, inner_flag); 1061 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); 1062 1063 return BNXT_TF_RC_SUCCESS; 1064 } 1065 1066 /* Function to handle the update of proto header based on field values */ 1067 static void 1068 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param, 1069 uint16_t dst_port) 1070 { 1071 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) 1072 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 1073 BNXT_ULP_HDR_BIT_T_VXLAN); 1074 } 1075 1076 /* Function to handle the parsing of RTE Flow item UDP Header. */ 1077 int32_t 1078 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, 1079 struct ulp_rte_parser_params *params) 1080 { 1081 const struct rte_flow_item_udp *udp_spec = item->spec; 1082 const struct rte_flow_item_udp *udp_mask = item->mask; 1083 struct ulp_rte_hdr_field *field; 1084 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1085 uint32_t idx = params->field_idx; 1086 uint32_t size; 1087 uint16_t dst_port = 0; 1088 uint32_t cnt; 1089 1090 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 1091 if (cnt == 2) { 1092 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 1093 return BNXT_TF_RC_ERROR; 1094 } 1095 1096 /* 1097 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1098 * header fields 1099 */ 1100 if (udp_spec) { 1101 size = sizeof(udp_spec->hdr.src_port); 1102 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 1103 &udp_spec->hdr.src_port, 1104 size); 1105 size = sizeof(udp_spec->hdr.dst_port); 1106 field = ulp_rte_parser_fld_copy(field, 1107 &udp_spec->hdr.dst_port, 1108 size); 1109 dst_port = udp_spec->hdr.dst_port; 1110 size = sizeof(udp_spec->hdr.dgram_len); 1111 field = ulp_rte_parser_fld_copy(field, 1112 &udp_spec->hdr.dgram_len, 1113 size); 1114 size = sizeof(udp_spec->hdr.dgram_cksum); 1115 field = ulp_rte_parser_fld_copy(field, 1116 &udp_spec->hdr.dgram_cksum, 1117 size); 1118 } 1119 if (udp_mask) { 1120 ulp_rte_prsr_mask_copy(params, &idx, 1121 &udp_mask->hdr.src_port, 1122 sizeof(udp_mask->hdr.src_port)); 1123 ulp_rte_prsr_mask_copy(params, &idx, 1124 &udp_mask->hdr.dst_port, 1125 sizeof(udp_mask->hdr.dst_port)); 1126 ulp_rte_prsr_mask_copy(params, &idx, 1127 &udp_mask->hdr.dgram_len, 1128 sizeof(udp_mask->hdr.dgram_len)); 1129 ulp_rte_prsr_mask_copy(params, &idx, 1130 &udp_mask->hdr.dgram_cksum, 1131 sizeof(udp_mask->hdr.dgram_cksum)); 1132 } 1133 1134 /* Add number of UDP header elements */ 1135 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM; 1136 1137 /* Set the udp header bitmap and computed l4 header bitmaps */ 1138 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 1139 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) { 1140 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP); 1141 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1); 1142 } else { 1143 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP); 1144 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1); 1145 /* Update the field protocol hdr bitmap */ 1146 ulp_rte_l4_proto_type_update(params, dst_port); 1147 } 1148 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); 1149 return BNXT_TF_RC_SUCCESS; 1150 } 1151 1152 /* Function to handle the parsing of RTE Flow item TCP Header. */ 1153 int32_t 1154 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, 1155 struct ulp_rte_parser_params *params) 1156 { 1157 const struct rte_flow_item_tcp *tcp_spec = item->spec; 1158 const struct rte_flow_item_tcp *tcp_mask = item->mask; 1159 struct ulp_rte_hdr_field *field; 1160 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1161 uint32_t idx = params->field_idx; 1162 uint32_t size; 1163 uint32_t cnt; 1164 1165 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 1166 if (cnt == 2) { 1167 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 1168 return BNXT_TF_RC_ERROR; 1169 } 1170 1171 /* 1172 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1173 * header fields 1174 */ 1175 if (tcp_spec) { 1176 size = sizeof(tcp_spec->hdr.src_port); 1177 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 1178 &tcp_spec->hdr.src_port, 1179 size); 1180 size = sizeof(tcp_spec->hdr.dst_port); 1181 field = ulp_rte_parser_fld_copy(field, 1182 &tcp_spec->hdr.dst_port, 1183 size); 1184 size = sizeof(tcp_spec->hdr.sent_seq); 1185 field = ulp_rte_parser_fld_copy(field, 1186 &tcp_spec->hdr.sent_seq, 1187 size); 1188 size = sizeof(tcp_spec->hdr.recv_ack); 1189 field = ulp_rte_parser_fld_copy(field, 1190 &tcp_spec->hdr.recv_ack, 1191 size); 1192 size = sizeof(tcp_spec->hdr.data_off); 1193 field = ulp_rte_parser_fld_copy(field, 1194 &tcp_spec->hdr.data_off, 1195 size); 1196 size = sizeof(tcp_spec->hdr.tcp_flags); 1197 field = ulp_rte_parser_fld_copy(field, 1198 &tcp_spec->hdr.tcp_flags, 1199 size); 1200 size = sizeof(tcp_spec->hdr.rx_win); 1201 field = ulp_rte_parser_fld_copy(field, 1202 &tcp_spec->hdr.rx_win, 1203 size); 1204 size = sizeof(tcp_spec->hdr.cksum); 1205 field = ulp_rte_parser_fld_copy(field, 1206 &tcp_spec->hdr.cksum, 1207 size); 1208 size = sizeof(tcp_spec->hdr.tcp_urp); 1209 field = ulp_rte_parser_fld_copy(field, 1210 &tcp_spec->hdr.tcp_urp, 1211 size); 1212 } else { 1213 idx += BNXT_ULP_PROTO_HDR_TCP_NUM; 1214 } 1215 1216 if (tcp_mask) { 1217 ulp_rte_prsr_mask_copy(params, &idx, 1218 &tcp_mask->hdr.src_port, 1219 sizeof(tcp_mask->hdr.src_port)); 1220 ulp_rte_prsr_mask_copy(params, &idx, 1221 &tcp_mask->hdr.dst_port, 1222 sizeof(tcp_mask->hdr.dst_port)); 1223 ulp_rte_prsr_mask_copy(params, &idx, 1224 &tcp_mask->hdr.sent_seq, 1225 sizeof(tcp_mask->hdr.sent_seq)); 1226 ulp_rte_prsr_mask_copy(params, &idx, 1227 &tcp_mask->hdr.recv_ack, 1228 sizeof(tcp_mask->hdr.recv_ack)); 1229 ulp_rte_prsr_mask_copy(params, &idx, 1230 &tcp_mask->hdr.data_off, 1231 sizeof(tcp_mask->hdr.data_off)); 1232 ulp_rte_prsr_mask_copy(params, &idx, 1233 &tcp_mask->hdr.tcp_flags, 1234 sizeof(tcp_mask->hdr.tcp_flags)); 1235 ulp_rte_prsr_mask_copy(params, &idx, 1236 &tcp_mask->hdr.rx_win, 1237 sizeof(tcp_mask->hdr.rx_win)); 1238 ulp_rte_prsr_mask_copy(params, &idx, 1239 &tcp_mask->hdr.cksum, 1240 sizeof(tcp_mask->hdr.cksum)); 1241 ulp_rte_prsr_mask_copy(params, &idx, 1242 &tcp_mask->hdr.tcp_urp, 1243 sizeof(tcp_mask->hdr.tcp_urp)); 1244 } 1245 /* add number of TCP header elements */ 1246 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM; 1247 1248 /* Set the udp header bitmap and computed l4 header bitmaps */ 1249 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 1250 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) { 1251 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP); 1252 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1); 1253 } else { 1254 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP); 1255 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1); 1256 } 1257 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); 1258 return BNXT_TF_RC_SUCCESS; 1259 } 1260 1261 /* Function to handle the parsing of RTE Flow item Vxlan Header. */ 1262 int32_t 1263 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item, 1264 struct ulp_rte_parser_params *params) 1265 { 1266 const struct rte_flow_item_vxlan *vxlan_spec = item->spec; 1267 const struct rte_flow_item_vxlan *vxlan_mask = item->mask; 1268 struct ulp_rte_hdr_field *field; 1269 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1270 uint32_t idx = params->field_idx; 1271 uint32_t size; 1272 1273 /* 1274 * Copy the rte_flow_item for vxlan into hdr_field using vxlan 1275 * header fields 1276 */ 1277 if (vxlan_spec) { 1278 size = sizeof(vxlan_spec->flags); 1279 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 1280 &vxlan_spec->flags, 1281 size); 1282 size = sizeof(vxlan_spec->rsvd0); 1283 field = ulp_rte_parser_fld_copy(field, 1284 &vxlan_spec->rsvd0, 1285 size); 1286 size = sizeof(vxlan_spec->vni); 1287 field = ulp_rte_parser_fld_copy(field, 1288 &vxlan_spec->vni, 1289 size); 1290 size = sizeof(vxlan_spec->rsvd1); 1291 field = ulp_rte_parser_fld_copy(field, 1292 &vxlan_spec->rsvd1, 1293 size); 1294 } 1295 if (vxlan_mask) { 1296 ulp_rte_prsr_mask_copy(params, &idx, 1297 &vxlan_mask->flags, 1298 sizeof(vxlan_mask->flags)); 1299 ulp_rte_prsr_mask_copy(params, &idx, 1300 &vxlan_mask->rsvd0, 1301 sizeof(vxlan_mask->rsvd0)); 1302 ulp_rte_prsr_mask_copy(params, &idx, 1303 &vxlan_mask->vni, 1304 sizeof(vxlan_mask->vni)); 1305 ulp_rte_prsr_mask_copy(params, &idx, 1306 &vxlan_mask->rsvd1, 1307 sizeof(vxlan_mask->rsvd1)); 1308 } 1309 /* Add number of vxlan header elements */ 1310 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM; 1311 1312 /* Update the hdr_bitmap with vxlan */ 1313 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN); 1314 return BNXT_TF_RC_SUCCESS; 1315 } 1316 1317 /* Function to handle the parsing of RTE Flow item void Header */ 1318 int32_t 1319 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused, 1320 struct ulp_rte_parser_params *params __rte_unused) 1321 { 1322 return BNXT_TF_RC_SUCCESS; 1323 } 1324 1325 /* Function to handle the parsing of RTE Flow action void Header. */ 1326 int32_t 1327 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused, 1328 struct ulp_rte_parser_params *params __rte_unused) 1329 { 1330 return BNXT_TF_RC_SUCCESS; 1331 } 1332 1333 /* Function to handle the parsing of RTE Flow action Mark Header. */ 1334 int32_t 1335 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item, 1336 struct ulp_rte_parser_params *param) 1337 { 1338 const struct rte_flow_action_mark *mark; 1339 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap; 1340 uint32_t mark_id; 1341 1342 mark = action_item->conf; 1343 if (mark) { 1344 mark_id = tfp_cpu_to_be_32(mark->id); 1345 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK], 1346 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK); 1347 1348 /* Update the hdr_bitmap with vxlan */ 1349 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK); 1350 return BNXT_TF_RC_SUCCESS; 1351 } 1352 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n"); 1353 return BNXT_TF_RC_ERROR; 1354 } 1355 1356 /* Function to handle the parsing of RTE Flow action RSS Header. */ 1357 int32_t 1358 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item, 1359 struct ulp_rte_parser_params *param) 1360 { 1361 const struct rte_flow_action_rss *rss = action_item->conf; 1362 1363 if (rss) { 1364 /* Update the hdr_bitmap with vxlan */ 1365 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS); 1366 return BNXT_TF_RC_SUCCESS; 1367 } 1368 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n"); 1369 return BNXT_TF_RC_ERROR; 1370 } 1371 1372 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */ 1373 int32_t 1374 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, 1375 struct ulp_rte_parser_params *params) 1376 { 1377 const struct rte_flow_action_vxlan_encap *vxlan_encap; 1378 const struct rte_flow_item *item; 1379 const struct rte_flow_item_eth *eth_spec; 1380 const struct rte_flow_item_ipv4 *ipv4_spec; 1381 const struct rte_flow_item_ipv6 *ipv6_spec; 1382 struct rte_flow_item_vxlan vxlan_spec; 1383 uint32_t vlan_num = 0, vlan_size = 0; 1384 uint32_t ip_size = 0, ip_type = 0; 1385 uint32_t vxlan_size = 0; 1386 uint8_t *buff; 1387 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */ 1388 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00, 1389 0x00, 0x40, 0x11}; 1390 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap; 1391 struct ulp_rte_act_prop *ap = ¶ms->act_prop; 1392 1393 vxlan_encap = action_item->conf; 1394 if (!vxlan_encap) { 1395 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n"); 1396 return BNXT_TF_RC_ERROR; 1397 } 1398 1399 item = vxlan_encap->definition; 1400 if (!item) { 1401 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n"); 1402 return BNXT_TF_RC_ERROR; 1403 } 1404 1405 if (!ulp_rte_item_skip_void(&item, 0)) 1406 return BNXT_TF_RC_ERROR; 1407 1408 /* must have ethernet header */ 1409 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { 1410 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n"); 1411 return BNXT_TF_RC_ERROR; 1412 } 1413 eth_spec = item->spec; 1414 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC]; 1415 ulp_encap_buffer_copy(buff, 1416 eth_spec->dst.addr_bytes, 1417 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC); 1418 1419 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC]; 1420 ulp_encap_buffer_copy(buff, 1421 eth_spec->src.addr_bytes, 1422 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC); 1423 1424 /* Goto the next item */ 1425 if (!ulp_rte_item_skip_void(&item, 1)) 1426 return BNXT_TF_RC_ERROR; 1427 1428 /* May have vlan header */ 1429 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 1430 vlan_num++; 1431 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG]; 1432 ulp_encap_buffer_copy(buff, 1433 item->spec, 1434 sizeof(struct rte_flow_item_vlan)); 1435 1436 if (!ulp_rte_item_skip_void(&item, 1)) 1437 return BNXT_TF_RC_ERROR; 1438 } 1439 1440 /* may have two vlan headers */ 1441 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 1442 vlan_num++; 1443 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG + 1444 sizeof(struct rte_flow_item_vlan)], 1445 item->spec, 1446 sizeof(struct rte_flow_item_vlan)); 1447 if (!ulp_rte_item_skip_void(&item, 1)) 1448 return BNXT_TF_RC_ERROR; 1449 } 1450 /* Update the vlan count and size of more than one */ 1451 if (vlan_num) { 1452 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan); 1453 vlan_num = tfp_cpu_to_be_32(vlan_num); 1454 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM], 1455 &vlan_num, 1456 sizeof(uint32_t)); 1457 vlan_size = tfp_cpu_to_be_32(vlan_size); 1458 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ], 1459 &vlan_size, 1460 sizeof(uint32_t)); 1461 } 1462 1463 /* L3 must be IPv4, IPv6 */ 1464 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { 1465 ipv4_spec = item->spec; 1466 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE; 1467 1468 /* copy the ipv4 details */ 1469 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl, 1470 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) { 1471 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; 1472 ulp_encap_buffer_copy(buff, 1473 def_ipv4_hdr, 1474 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS + 1475 BNXT_ULP_ENCAP_IPV4_ID_PROTO); 1476 } else { 1477 const uint8_t *tmp_buff; 1478 1479 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; 1480 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id; 1481 ulp_encap_buffer_copy(buff, 1482 tmp_buff, 1483 BNXT_ULP_ENCAP_IPV4_ID_PROTO); 1484 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + 1485 BNXT_ULP_ENCAP_IPV4_ID_PROTO]; 1486 ulp_encap_buffer_copy(buff, 1487 &ipv4_spec->hdr.version_ihl, 1488 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS); 1489 } 1490 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + 1491 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS + 1492 BNXT_ULP_ENCAP_IPV4_ID_PROTO]; 1493 ulp_encap_buffer_copy(buff, 1494 (const uint8_t *)&ipv4_spec->hdr.dst_addr, 1495 BNXT_ULP_ENCAP_IPV4_DEST_IP); 1496 1497 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC]; 1498 ulp_encap_buffer_copy(buff, 1499 (const uint8_t *)&ipv4_spec->hdr.src_addr, 1500 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC); 1501 1502 /* Update the ip size details */ 1503 ip_size = tfp_cpu_to_be_32(ip_size); 1504 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 1505 &ip_size, sizeof(uint32_t)); 1506 1507 /* update the ip type */ 1508 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4); 1509 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 1510 &ip_type, sizeof(uint32_t)); 1511 1512 /* update the computed field to notify it is ipv4 header */ 1513 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG, 1514 1); 1515 1516 if (!ulp_rte_item_skip_void(&item, 1)) 1517 return BNXT_TF_RC_ERROR; 1518 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { 1519 ipv6_spec = item->spec; 1520 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE; 1521 1522 /* copy the ipv4 details */ 1523 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP], 1524 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE); 1525 1526 /* Update the ip size details */ 1527 ip_size = tfp_cpu_to_be_32(ip_size); 1528 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 1529 &ip_size, sizeof(uint32_t)); 1530 1531 /* update the ip type */ 1532 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6); 1533 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 1534 &ip_type, sizeof(uint32_t)); 1535 1536 /* update the computed field to notify it is ipv6 header */ 1537 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG, 1538 1); 1539 1540 if (!ulp_rte_item_skip_void(&item, 1)) 1541 return BNXT_TF_RC_ERROR; 1542 } else { 1543 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n"); 1544 return BNXT_TF_RC_ERROR; 1545 } 1546 1547 /* L4 is UDP */ 1548 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) { 1549 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n"); 1550 return BNXT_TF_RC_ERROR; 1551 } 1552 /* copy the udp details */ 1553 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP], 1554 item->spec, BNXT_ULP_ENCAP_UDP_SIZE); 1555 1556 if (!ulp_rte_item_skip_void(&item, 1)) 1557 return BNXT_TF_RC_ERROR; 1558 1559 /* Finally VXLAN */ 1560 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { 1561 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n"); 1562 return BNXT_TF_RC_ERROR; 1563 } 1564 vxlan_size = sizeof(struct rte_flow_item_vxlan); 1565 /* copy the vxlan details */ 1566 memcpy(&vxlan_spec, item->spec, vxlan_size); 1567 vxlan_spec.flags = 0x08; 1568 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN], 1569 (const uint8_t *)&vxlan_spec, 1570 vxlan_size); 1571 vxlan_size = tfp_cpu_to_be_32(vxlan_size); 1572 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ], 1573 &vxlan_size, sizeof(uint32_t)); 1574 1575 /* update the hdr_bitmap with vxlan */ 1576 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP); 1577 return BNXT_TF_RC_SUCCESS; 1578 } 1579 1580 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */ 1581 int32_t 1582 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item 1583 __rte_unused, 1584 struct ulp_rte_parser_params *params) 1585 { 1586 /* update the hdr_bitmap with vxlan */ 1587 ULP_BITMAP_SET(params->act_bitmap.bits, 1588 BNXT_ULP_ACTION_BIT_VXLAN_DECAP); 1589 return BNXT_TF_RC_SUCCESS; 1590 } 1591 1592 /* Function to handle the parsing of RTE Flow action drop Header. */ 1593 int32_t 1594 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused, 1595 struct ulp_rte_parser_params *params) 1596 { 1597 /* Update the hdr_bitmap with drop */ 1598 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP); 1599 return BNXT_TF_RC_SUCCESS; 1600 } 1601 1602 /* Function to handle the parsing of RTE Flow action count. */ 1603 int32_t 1604 ulp_rte_count_act_handler(const struct rte_flow_action *action_item, 1605 struct ulp_rte_parser_params *params) 1606 1607 { 1608 const struct rte_flow_action_count *act_count; 1609 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop; 1610 1611 act_count = action_item->conf; 1612 if (act_count) { 1613 if (act_count->shared) { 1614 BNXT_TF_DBG(ERR, 1615 "Parse Error:Shared count not supported\n"); 1616 return BNXT_TF_RC_PARSE_ERR; 1617 } 1618 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT], 1619 &act_count->id, 1620 BNXT_ULP_ACT_PROP_SZ_COUNT); 1621 } 1622 1623 /* Update the hdr_bitmap with count */ 1624 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT); 1625 return BNXT_TF_RC_SUCCESS; 1626 } 1627 1628 /* Function to handle the parsing of action ports. */ 1629 static int32_t 1630 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param, 1631 uint32_t ifindex) 1632 { 1633 enum bnxt_ulp_direction_type dir; 1634 uint16_t pid_s; 1635 uint32_t pid; 1636 struct ulp_rte_act_prop *act = ¶m->act_prop; 1637 enum bnxt_ulp_intf_type port_type; 1638 uint32_t vnic_type; 1639 1640 /* Get the direction */ 1641 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION); 1642 if (dir == BNXT_ULP_DIR_EGRESS) { 1643 /* For egress direction, fill vport */ 1644 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s)) 1645 return BNXT_TF_RC_ERROR; 1646 1647 pid = pid_s; 1648 pid = rte_cpu_to_be_32(pid); 1649 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], 1650 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); 1651 } else { 1652 /* For ingress direction, fill vnic */ 1653 port_type = ULP_COMP_FLD_IDX_RD(param, 1654 BNXT_ULP_CF_IDX_ACT_PORT_TYPE); 1655 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP) 1656 vnic_type = BNXT_ULP_VF_FUNC_VNIC; 1657 else 1658 vnic_type = BNXT_ULP_DRV_FUNC_VNIC; 1659 1660 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex, 1661 vnic_type, &pid_s)) 1662 return BNXT_TF_RC_ERROR; 1663 1664 pid = pid_s; 1665 pid = rte_cpu_to_be_32(pid); 1666 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], 1667 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC); 1668 } 1669 1670 /* Update the action port set bit */ 1671 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1); 1672 return BNXT_TF_RC_SUCCESS; 1673 } 1674 1675 /* Function to handle the parsing of RTE Flow action PF. */ 1676 int32_t 1677 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused, 1678 struct ulp_rte_parser_params *params) 1679 { 1680 uint32_t port_id; 1681 uint32_t ifindex; 1682 enum bnxt_ulp_intf_type intf_type; 1683 1684 /* Get the port id of the current device */ 1685 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 1686 1687 /* Get the port db ifindex */ 1688 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id, 1689 &ifindex)) { 1690 BNXT_TF_DBG(ERR, "Invalid port id\n"); 1691 return BNXT_TF_RC_ERROR; 1692 } 1693 1694 /* Check the port is PF port */ 1695 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 1696 if (intf_type != BNXT_ULP_INTF_TYPE_PF) { 1697 BNXT_TF_DBG(ERR, "Port is not a PF port\n"); 1698 return BNXT_TF_RC_ERROR; 1699 } 1700 /* Update the action properties */ 1701 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 1702 return ulp_rte_parser_act_port_set(params, ifindex); 1703 } 1704 1705 /* Function to handle the parsing of RTE Flow action VF. */ 1706 int32_t 1707 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, 1708 struct ulp_rte_parser_params *params) 1709 { 1710 const struct rte_flow_action_vf *vf_action; 1711 uint32_t ifindex; 1712 enum bnxt_ulp_intf_type intf_type; 1713 1714 vf_action = action_item->conf; 1715 if (!vf_action) { 1716 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n"); 1717 return BNXT_TF_RC_PARSE_ERR; 1718 } 1719 1720 if (vf_action->original) { 1721 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n"); 1722 return BNXT_TF_RC_PARSE_ERR; 1723 } 1724 1725 /* Check the port is VF port */ 1726 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id, 1727 &ifindex)) { 1728 BNXT_TF_DBG(ERR, "VF is not valid interface\n"); 1729 return BNXT_TF_RC_ERROR; 1730 } 1731 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 1732 if (intf_type != BNXT_ULP_INTF_TYPE_VF && 1733 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) { 1734 BNXT_TF_DBG(ERR, "Port is not a VF port\n"); 1735 return BNXT_TF_RC_ERROR; 1736 } 1737 1738 /* Update the action properties */ 1739 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 1740 return ulp_rte_parser_act_port_set(params, ifindex); 1741 } 1742 1743 /* Function to handle the parsing of RTE Flow action port_id. */ 1744 int32_t 1745 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item, 1746 struct ulp_rte_parser_params *param) 1747 { 1748 const struct rte_flow_action_port_id *port_id = act_item->conf; 1749 uint32_t ifindex; 1750 enum bnxt_ulp_intf_type intf_type; 1751 1752 if (!port_id) { 1753 BNXT_TF_DBG(ERR, 1754 "ParseErr: Invalid Argument\n"); 1755 return BNXT_TF_RC_PARSE_ERR; 1756 } 1757 if (port_id->original) { 1758 BNXT_TF_DBG(ERR, 1759 "ParseErr:Portid Original not supported\n"); 1760 return BNXT_TF_RC_PARSE_ERR; 1761 } 1762 1763 /* Get the port db ifindex */ 1764 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id, 1765 &ifindex)) { 1766 BNXT_TF_DBG(ERR, "Invalid port id\n"); 1767 return BNXT_TF_RC_ERROR; 1768 } 1769 1770 /* Get the intf type */ 1771 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex); 1772 if (!intf_type) { 1773 BNXT_TF_DBG(ERR, "Invalid port type\n"); 1774 return BNXT_TF_RC_ERROR; 1775 } 1776 1777 /* Set the action port */ 1778 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 1779 return ulp_rte_parser_act_port_set(param, ifindex); 1780 } 1781 1782 /* Function to handle the parsing of RTE Flow action phy_port. */ 1783 int32_t 1784 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item, 1785 struct ulp_rte_parser_params *prm) 1786 { 1787 const struct rte_flow_action_phy_port *phy_port; 1788 uint32_t pid; 1789 int32_t rc; 1790 uint16_t pid_s; 1791 enum bnxt_ulp_direction_type dir; 1792 1793 phy_port = action_item->conf; 1794 if (!phy_port) { 1795 BNXT_TF_DBG(ERR, 1796 "ParseErr: Invalid Argument\n"); 1797 return BNXT_TF_RC_PARSE_ERR; 1798 } 1799 1800 if (phy_port->original) { 1801 BNXT_TF_DBG(ERR, 1802 "Parse Err:Port Original not supported\n"); 1803 return BNXT_TF_RC_PARSE_ERR; 1804 } 1805 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION); 1806 if (dir != BNXT_ULP_DIR_EGRESS) { 1807 BNXT_TF_DBG(ERR, 1808 "Parse Err:Phy ports are valid only for egress\n"); 1809 return BNXT_TF_RC_PARSE_ERR; 1810 } 1811 /* Get the physical port details from port db */ 1812 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index, 1813 &pid_s); 1814 if (rc) { 1815 BNXT_TF_DBG(ERR, "Failed to get port details\n"); 1816 return -EINVAL; 1817 } 1818 1819 pid = pid_s; 1820 pid = rte_cpu_to_be_32(pid); 1821 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], 1822 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); 1823 1824 /* Update the action port set bit */ 1825 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1); 1826 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, 1827 BNXT_ULP_INTF_TYPE_PHY_PORT); 1828 return BNXT_TF_RC_SUCCESS; 1829 } 1830 1831 /* Function to handle the parsing of RTE Flow action pop vlan. */ 1832 int32_t 1833 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused, 1834 struct ulp_rte_parser_params *params) 1835 { 1836 /* Update the act_bitmap with pop */ 1837 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN); 1838 return BNXT_TF_RC_SUCCESS; 1839 } 1840 1841 /* Function to handle the parsing of RTE Flow action push vlan. */ 1842 int32_t 1843 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item, 1844 struct ulp_rte_parser_params *params) 1845 { 1846 const struct rte_flow_action_of_push_vlan *push_vlan; 1847 uint16_t ethertype; 1848 struct ulp_rte_act_prop *act = ¶ms->act_prop; 1849 1850 push_vlan = action_item->conf; 1851 if (push_vlan) { 1852 ethertype = push_vlan->ethertype; 1853 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) { 1854 BNXT_TF_DBG(ERR, 1855 "Parse Err: Ethertype not supported\n"); 1856 return BNXT_TF_RC_PARSE_ERR; 1857 } 1858 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN], 1859 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN); 1860 /* Update the hdr_bitmap with push vlan */ 1861 ULP_BITMAP_SET(params->act_bitmap.bits, 1862 BNXT_ULP_ACTION_BIT_PUSH_VLAN); 1863 return BNXT_TF_RC_SUCCESS; 1864 } 1865 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n"); 1866 return BNXT_TF_RC_ERROR; 1867 } 1868 1869 /* Function to handle the parsing of RTE Flow action set vlan id. */ 1870 int32_t 1871 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item, 1872 struct ulp_rte_parser_params *params) 1873 { 1874 const struct rte_flow_action_of_set_vlan_vid *vlan_vid; 1875 uint32_t vid; 1876 struct ulp_rte_act_prop *act = ¶ms->act_prop; 1877 1878 vlan_vid = action_item->conf; 1879 if (vlan_vid && vlan_vid->vlan_vid) { 1880 vid = vlan_vid->vlan_vid; 1881 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID], 1882 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID); 1883 /* Update the hdr_bitmap with vlan vid */ 1884 ULP_BITMAP_SET(params->act_bitmap.bits, 1885 BNXT_ULP_ACTION_BIT_SET_VLAN_VID); 1886 return BNXT_TF_RC_SUCCESS; 1887 } 1888 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n"); 1889 return BNXT_TF_RC_ERROR; 1890 } 1891 1892 /* Function to handle the parsing of RTE Flow action set vlan pcp. */ 1893 int32_t 1894 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item, 1895 struct ulp_rte_parser_params *params) 1896 { 1897 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp; 1898 uint8_t pcp; 1899 struct ulp_rte_act_prop *act = ¶ms->act_prop; 1900 1901 vlan_pcp = action_item->conf; 1902 if (vlan_pcp) { 1903 pcp = vlan_pcp->vlan_pcp; 1904 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP], 1905 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP); 1906 /* Update the hdr_bitmap with vlan vid */ 1907 ULP_BITMAP_SET(params->act_bitmap.bits, 1908 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP); 1909 return BNXT_TF_RC_SUCCESS; 1910 } 1911 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n"); 1912 return BNXT_TF_RC_ERROR; 1913 } 1914 1915 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/ 1916 int32_t 1917 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item, 1918 struct ulp_rte_parser_params *params) 1919 { 1920 const struct rte_flow_action_set_ipv4 *set_ipv4; 1921 struct ulp_rte_act_prop *act = ¶ms->act_prop; 1922 1923 set_ipv4 = action_item->conf; 1924 if (set_ipv4) { 1925 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC], 1926 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC); 1927 /* Update the hdr_bitmap with set ipv4 src */ 1928 ULP_BITMAP_SET(params->act_bitmap.bits, 1929 BNXT_ULP_ACTION_BIT_SET_IPV4_SRC); 1930 return BNXT_TF_RC_SUCCESS; 1931 } 1932 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n"); 1933 return BNXT_TF_RC_ERROR; 1934 } 1935 1936 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/ 1937 int32_t 1938 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item, 1939 struct ulp_rte_parser_params *params) 1940 { 1941 const struct rte_flow_action_set_ipv4 *set_ipv4; 1942 struct ulp_rte_act_prop *act = ¶ms->act_prop; 1943 1944 set_ipv4 = action_item->conf; 1945 if (set_ipv4) { 1946 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST], 1947 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST); 1948 /* Update the hdr_bitmap with set ipv4 dst */ 1949 ULP_BITMAP_SET(params->act_bitmap.bits, 1950 BNXT_ULP_ACTION_BIT_SET_IPV4_DST); 1951 return BNXT_TF_RC_SUCCESS; 1952 } 1953 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n"); 1954 return BNXT_TF_RC_ERROR; 1955 } 1956 1957 /* Function to handle the parsing of RTE Flow action set tp src.*/ 1958 int32_t 1959 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item, 1960 struct ulp_rte_parser_params *params) 1961 { 1962 const struct rte_flow_action_set_tp *set_tp; 1963 struct ulp_rte_act_prop *act = ¶ms->act_prop; 1964 1965 set_tp = action_item->conf; 1966 if (set_tp) { 1967 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC], 1968 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC); 1969 /* Update the hdr_bitmap with set tp src */ 1970 ULP_BITMAP_SET(params->act_bitmap.bits, 1971 BNXT_ULP_ACTION_BIT_SET_TP_SRC); 1972 return BNXT_TF_RC_SUCCESS; 1973 } 1974 1975 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n"); 1976 return BNXT_TF_RC_ERROR; 1977 } 1978 1979 /* Function to handle the parsing of RTE Flow action set tp dst.*/ 1980 int32_t 1981 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item, 1982 struct ulp_rte_parser_params *params) 1983 { 1984 const struct rte_flow_action_set_tp *set_tp; 1985 struct ulp_rte_act_prop *act = ¶ms->act_prop; 1986 1987 set_tp = action_item->conf; 1988 if (set_tp) { 1989 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST], 1990 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST); 1991 /* Update the hdr_bitmap with set tp dst */ 1992 ULP_BITMAP_SET(params->act_bitmap.bits, 1993 BNXT_ULP_ACTION_BIT_SET_TP_DST); 1994 return BNXT_TF_RC_SUCCESS; 1995 } 1996 1997 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n"); 1998 return BNXT_TF_RC_ERROR; 1999 } 2000 2001 /* Function to handle the parsing of RTE Flow action dec ttl.*/ 2002 int32_t 2003 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused, 2004 struct ulp_rte_parser_params *params) 2005 { 2006 /* Update the act_bitmap with dec ttl */ 2007 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL); 2008 return BNXT_TF_RC_SUCCESS; 2009 } 2010