1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2020 Broadcom 3 * All rights reserved. 4 */ 5 6 #include "bnxt.h" 7 #include "ulp_template_db_enum.h" 8 #include "ulp_template_struct.h" 9 #include "bnxt_tf_common.h" 10 #include "ulp_rte_parser.h" 11 #include "ulp_utils.h" 12 #include "tfp.h" 13 #include "ulp_port_db.h" 14 15 /* Local defines for the parsing functions */ 16 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */ 17 #define ULP_VLAN_PRIORITY_MASK 0x700 18 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/ 19 #define ULP_UDP_PORT_VXLAN 4789 20 21 /* Utility function to skip the void items. */ 22 static inline int32_t 23 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment) 24 { 25 if (!*item) 26 return 0; 27 if (increment) 28 (*item)++; 29 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID) 30 (*item)++; 31 if (*item) 32 return 1; 33 return 0; 34 } 35 36 /* Utility function to update the field_bitmap */ 37 static void 38 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params, 39 uint32_t idx) 40 { 41 struct ulp_rte_hdr_field *field; 42 43 field = ¶ms->hdr_field[idx]; 44 if (ulp_bitmap_notzero(field->mask, field->size)) { 45 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx); 46 /* Not exact match */ 47 if (!ulp_bitmap_is_ones(field->mask, field->size)) 48 ULP_BITMAP_SET(params->fld_bitmap.bits, 49 BNXT_ULP_MATCH_TYPE_BITMASK_WM); 50 } else { 51 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx); 52 } 53 } 54 55 /* Utility function to copy field spec items */ 56 static struct ulp_rte_hdr_field * 57 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field, 58 const void *buffer, 59 uint32_t size) 60 { 61 field->size = size; 62 memcpy(field->spec, buffer, field->size); 63 field++; 64 return field; 65 } 66 67 /* Utility function to copy field masks items */ 68 static void 69 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params, 70 uint32_t *idx, 71 const void *buffer, 72 uint32_t size) 73 { 74 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx]; 75 76 memcpy(field->mask, buffer, size); 77 ulp_rte_parser_field_bitmap_update(params, *idx); 78 *idx = *idx + 1; 79 } 80 81 /* Utility function to ignore field masks items */ 82 static void 83 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused, 84 uint32_t *idx, 85 const void *buffer __rte_unused, 86 uint32_t size __rte_unused) 87 { 88 *idx = *idx + 1; 89 } 90 91 /* 92 * Function to handle the parsing of RTE Flows and placing 93 * the RTE flow items into the ulp structures. 94 */ 95 int32_t 96 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], 97 struct ulp_rte_parser_params *params) 98 { 99 const struct rte_flow_item *item = pattern; 100 struct bnxt_ulp_rte_hdr_info *hdr_info; 101 102 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM; 103 104 /* Set the computed flags for no vlan tags before parsing */ 105 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1); 106 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1); 107 108 /* Parse all the items in the pattern */ 109 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) { 110 /* get the header information from the flow_hdr_info table */ 111 hdr_info = &ulp_hdr_info[item->type]; 112 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) { 113 BNXT_TF_DBG(ERR, 114 "Truflow parser does not support type %d\n", 115 item->type); 116 return BNXT_TF_RC_PARSE_ERR; 117 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) { 118 /* call the registered callback handler */ 119 if (hdr_info->proto_hdr_func) { 120 if (hdr_info->proto_hdr_func(item, params) != 121 BNXT_TF_RC_SUCCESS) { 122 return BNXT_TF_RC_ERROR; 123 } 124 } 125 } 126 item++; 127 } 128 /* update the implied SVIF */ 129 return ulp_rte_parser_implicit_match_port_process(params); 130 } 131 132 /* 133 * Function to handle the parsing of RTE Flows and placing 134 * the RTE flow actions into the ulp structures. 135 */ 136 int32_t 137 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], 138 struct ulp_rte_parser_params *params) 139 { 140 const struct rte_flow_action *action_item = actions; 141 struct bnxt_ulp_rte_act_info *hdr_info; 142 143 /* Parse all the items in the pattern */ 144 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) { 145 /* get the header information from the flow_hdr_info table */ 146 hdr_info = &ulp_act_info[action_item->type]; 147 if (hdr_info->act_type == 148 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) { 149 BNXT_TF_DBG(ERR, 150 "Truflow parser does not support act %u\n", 151 action_item->type); 152 return BNXT_TF_RC_ERROR; 153 } else if (hdr_info->act_type == 154 BNXT_ULP_ACT_TYPE_SUPPORTED) { 155 /* call the registered callback handler */ 156 if (hdr_info->proto_act_func) { 157 if (hdr_info->proto_act_func(action_item, 158 params) != 159 BNXT_TF_RC_SUCCESS) { 160 return BNXT_TF_RC_ERROR; 161 } 162 } 163 } 164 action_item++; 165 } 166 /* update the implied port details */ 167 ulp_rte_parser_implicit_act_port_process(params); 168 return BNXT_TF_RC_SUCCESS; 169 } 170 171 /* 172 * Function to handle the post processing of the computed 173 * fields for the interface. 174 */ 175 static void 176 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params) 177 { 178 uint32_t ifindex; 179 uint16_t port_id, parif; 180 uint32_t mtype; 181 enum bnxt_ulp_direction_type dir; 182 183 /* get the direction details */ 184 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 185 186 /* read the port id details */ 187 port_id = ULP_COMP_FLD_IDX_RD(params, 188 BNXT_ULP_CF_IDX_INCOMING_IF); 189 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 190 port_id, 191 &ifindex)) { 192 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 193 return; 194 } 195 196 if (dir == BNXT_ULP_DIR_INGRESS) { 197 /* Set port PARIF */ 198 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 199 BNXT_ULP_PHY_PORT_PARIF, &parif)) { 200 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n"); 201 return; 202 } 203 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF, 204 parif); 205 } else { 206 /* Get the match port type */ 207 mtype = ULP_COMP_FLD_IDX_RD(params, 208 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 209 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) { 210 ULP_COMP_FLD_IDX_WR(params, 211 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP, 212 1); 213 /* Set VF func PARIF */ 214 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 215 BNXT_ULP_VF_FUNC_PARIF, 216 &parif)) { 217 BNXT_TF_DBG(ERR, 218 "ParseErr:ifindex is not valid\n"); 219 return; 220 } 221 ULP_COMP_FLD_IDX_WR(params, 222 BNXT_ULP_CF_IDX_VF_FUNC_PARIF, 223 parif); 224 225 /* populate the loopback parif */ 226 ULP_COMP_FLD_IDX_WR(params, 227 BNXT_ULP_CF_IDX_LOOPBACK_PARIF, 228 BNXT_ULP_SYM_VF_FUNC_PARIF); 229 230 } else { 231 /* Set DRV func PARIF */ 232 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 233 BNXT_ULP_DRV_FUNC_PARIF, 234 &parif)) { 235 BNXT_TF_DBG(ERR, 236 "ParseErr:ifindex is not valid\n"); 237 return; 238 } 239 ULP_COMP_FLD_IDX_WR(params, 240 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF, 241 parif); 242 } 243 } 244 } 245 246 /* 247 * Function to handle the post processing of the parsing details 248 */ 249 int32_t 250 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params) 251 { 252 enum bnxt_ulp_direction_type dir; 253 enum bnxt_ulp_intf_type match_port_type, act_port_type; 254 uint32_t act_port_set; 255 256 /* Get the computed details */ 257 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 258 match_port_type = ULP_COMP_FLD_IDX_RD(params, 259 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 260 act_port_type = ULP_COMP_FLD_IDX_RD(params, 261 BNXT_ULP_CF_IDX_ACT_PORT_TYPE); 262 act_port_set = ULP_COMP_FLD_IDX_RD(params, 263 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET); 264 265 /* set the flow direction in the proto and action header */ 266 if (dir == BNXT_ULP_DIR_EGRESS) { 267 ULP_BITMAP_SET(params->hdr_bitmap.bits, 268 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 269 ULP_BITMAP_SET(params->act_bitmap.bits, 270 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 271 } 272 273 /* calculate the VF to VF flag */ 274 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP && 275 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) 276 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1); 277 278 /* Update the decrement ttl computational fields */ 279 if (ULP_BITMAP_ISSET(params->act_bitmap.bits, 280 BNXT_ULP_ACTION_BIT_DEC_TTL)) { 281 /* 282 * Check that vxlan proto is included and vxlan decap 283 * action is not set then decrement tunnel ttl. 284 * Similarly add GRE and NVGRE in future. 285 */ 286 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 287 BNXT_ULP_HDR_BIT_T_VXLAN) && 288 !ULP_BITMAP_ISSET(params->act_bitmap.bits, 289 BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) { 290 ULP_COMP_FLD_IDX_WR(params, 291 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1); 292 } else { 293 ULP_COMP_FLD_IDX_WR(params, 294 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1); 295 } 296 } 297 298 /* Merge the hdr_fp_bit into the proto header bit */ 299 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits; 300 301 /* Update the computed interface parameters */ 302 bnxt_ulp_comp_fld_intf_update(params); 303 304 /* TBD: Handle the flow rejection scenarios */ 305 return 0; 306 } 307 308 /* 309 * Function to compute the flow direction based on the match port details 310 */ 311 static void 312 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params) 313 { 314 enum bnxt_ulp_intf_type match_port_type; 315 316 /* Get the match port type */ 317 match_port_type = ULP_COMP_FLD_IDX_RD(params, 318 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 319 320 /* If ingress flow and matchport is vf rep then dir is egress*/ 321 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) && 322 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) { 323 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 324 BNXT_ULP_DIR_EGRESS); 325 } else { 326 /* Assign the input direction */ 327 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) 328 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 329 BNXT_ULP_DIR_INGRESS); 330 else 331 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 332 BNXT_ULP_DIR_EGRESS); 333 } 334 } 335 336 /* Function to handle the parsing of RTE Flow item PF Header. */ 337 static int32_t 338 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params, 339 uint32_t ifindex, 340 uint16_t mask) 341 { 342 uint16_t svif; 343 enum bnxt_ulp_direction_type dir; 344 struct ulp_rte_hdr_field *hdr_field; 345 enum bnxt_ulp_svif_type svif_type; 346 enum bnxt_ulp_intf_type port_type; 347 348 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 349 BNXT_ULP_INVALID_SVIF_VAL) { 350 BNXT_TF_DBG(ERR, 351 "SVIF already set,multiple source not support'd\n"); 352 return BNXT_TF_RC_ERROR; 353 } 354 355 /* Get port type details */ 356 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 357 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) { 358 BNXT_TF_DBG(ERR, "Invalid port type\n"); 359 return BNXT_TF_RC_ERROR; 360 } 361 362 /* Update the match port type */ 363 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type); 364 365 /* compute the direction */ 366 bnxt_ulp_rte_parser_direction_compute(params); 367 368 /* Get the computed direction */ 369 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 370 if (dir == BNXT_ULP_DIR_INGRESS) { 371 svif_type = BNXT_ULP_PHY_PORT_SVIF; 372 } else { 373 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP) 374 svif_type = BNXT_ULP_VF_FUNC_SVIF; 375 else 376 svif_type = BNXT_ULP_DRV_FUNC_SVIF; 377 } 378 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type, 379 &svif); 380 svif = rte_cpu_to_be_16(svif); 381 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; 382 memcpy(hdr_field->spec, &svif, sizeof(svif)); 383 memcpy(hdr_field->mask, &mask, sizeof(mask)); 384 hdr_field->size = sizeof(svif); 385 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, 386 rte_be_to_cpu_16(svif)); 387 return BNXT_TF_RC_SUCCESS; 388 } 389 390 /* Function to handle the parsing of the RTE port id */ 391 int32_t 392 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params) 393 { 394 uint16_t port_id = 0; 395 uint16_t svif_mask = 0xFFFF; 396 uint32_t ifindex; 397 int32_t rc = BNXT_TF_RC_ERROR; 398 399 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 400 BNXT_ULP_INVALID_SVIF_VAL) 401 return BNXT_TF_RC_SUCCESS; 402 403 /* SVIF not set. So get the port id */ 404 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 405 406 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 407 port_id, 408 &ifindex)) { 409 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 410 return rc; 411 } 412 413 /* Update the SVIF details */ 414 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask); 415 return rc; 416 } 417 418 /* Function to handle the implicit action port id */ 419 int32_t 420 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params) 421 { 422 struct rte_flow_action action_item = {0}; 423 struct rte_flow_action_port_id port_id = {0}; 424 425 /* Read the action port set bit */ 426 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) { 427 /* Already set, so just exit */ 428 return BNXT_TF_RC_SUCCESS; 429 } 430 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 431 action_item.conf = &port_id; 432 433 /* Update the action port based on incoming port */ 434 ulp_rte_port_id_act_handler(&action_item, params); 435 436 /* Reset the action port set bit */ 437 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0); 438 return BNXT_TF_RC_SUCCESS; 439 } 440 441 /* Function to handle the parsing of RTE Flow item PF Header. */ 442 int32_t 443 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused, 444 struct ulp_rte_parser_params *params) 445 { 446 uint16_t port_id = 0; 447 uint16_t svif_mask = 0xFFFF; 448 uint32_t ifindex; 449 450 /* Get the implicit port id */ 451 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 452 453 /* perform the conversion from dpdk port to bnxt ifindex */ 454 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 455 port_id, 456 &ifindex)) { 457 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 458 return BNXT_TF_RC_ERROR; 459 } 460 461 /* Update the SVIF details */ 462 return ulp_rte_parser_svif_set(params, ifindex, svif_mask); 463 } 464 465 /* Function to handle the parsing of RTE Flow item VF Header. */ 466 int32_t 467 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item, 468 struct ulp_rte_parser_params *params) 469 { 470 const struct rte_flow_item_vf *vf_spec = item->spec; 471 const struct rte_flow_item_vf *vf_mask = item->mask; 472 uint16_t mask = 0; 473 uint32_t ifindex; 474 int32_t rc = BNXT_TF_RC_PARSE_ERR; 475 476 /* Get VF rte_flow_item for Port details */ 477 if (!vf_spec) { 478 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n"); 479 return rc; 480 } 481 if (!vf_mask) { 482 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n"); 483 return rc; 484 } 485 mask = vf_mask->id; 486 487 /* perform the conversion from VF Func id to bnxt ifindex */ 488 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, 489 vf_spec->id, 490 &ifindex)) { 491 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 492 return rc; 493 } 494 /* Update the SVIF details */ 495 return ulp_rte_parser_svif_set(params, ifindex, mask); 496 } 497 498 /* Function to handle the parsing of RTE Flow item port id Header. */ 499 int32_t 500 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item, 501 struct ulp_rte_parser_params *params) 502 { 503 const struct rte_flow_item_port_id *port_spec = item->spec; 504 const struct rte_flow_item_port_id *port_mask = item->mask; 505 uint16_t mask = 0; 506 int32_t rc = BNXT_TF_RC_PARSE_ERR; 507 uint32_t ifindex; 508 509 if (!port_spec) { 510 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n"); 511 return rc; 512 } 513 if (!port_mask) { 514 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n"); 515 return rc; 516 } 517 mask = port_mask->id; 518 519 /* perform the conversion from dpdk port to bnxt ifindex */ 520 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 521 port_spec->id, 522 &ifindex)) { 523 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 524 return rc; 525 } 526 /* Update the SVIF details */ 527 return ulp_rte_parser_svif_set(params, ifindex, mask); 528 } 529 530 /* Function to handle the parsing of RTE Flow item phy port Header. */ 531 int32_t 532 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item, 533 struct ulp_rte_parser_params *params) 534 { 535 const struct rte_flow_item_phy_port *port_spec = item->spec; 536 const struct rte_flow_item_phy_port *port_mask = item->mask; 537 uint16_t mask = 0; 538 int32_t rc = BNXT_TF_RC_ERROR; 539 uint16_t svif; 540 enum bnxt_ulp_direction_type dir; 541 struct ulp_rte_hdr_field *hdr_field; 542 543 /* Copy the rte_flow_item for phy port into hdr_field */ 544 if (!port_spec) { 545 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n"); 546 return rc; 547 } 548 if (!port_mask) { 549 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n"); 550 return rc; 551 } 552 mask = port_mask->index; 553 554 /* Update the match port type */ 555 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, 556 BNXT_ULP_INTF_TYPE_PHY_PORT); 557 558 /* Compute the Hw direction */ 559 bnxt_ulp_rte_parser_direction_compute(params); 560 561 /* Direction validation */ 562 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 563 if (dir == BNXT_ULP_DIR_EGRESS) { 564 BNXT_TF_DBG(ERR, 565 "Parse Err:Phy ports are valid only for ingress\n"); 566 return BNXT_TF_RC_PARSE_ERR; 567 } 568 569 /* Get the physical port details from port db */ 570 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index, 571 &svif); 572 if (rc) { 573 BNXT_TF_DBG(ERR, "Failed to get port details\n"); 574 return BNXT_TF_RC_PARSE_ERR; 575 } 576 577 /* Update the SVIF details */ 578 svif = rte_cpu_to_be_16(svif); 579 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; 580 memcpy(hdr_field->spec, &svif, sizeof(svif)); 581 memcpy(hdr_field->mask, &mask, sizeof(mask)); 582 hdr_field->size = sizeof(svif); 583 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, 584 rte_be_to_cpu_16(svif)); 585 return BNXT_TF_RC_SUCCESS; 586 } 587 588 /* Function to handle the update of proto header based on field values */ 589 static void 590 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param, 591 uint16_t type, uint32_t in_flag) 592 { 593 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { 594 if (in_flag) { 595 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 596 BNXT_ULP_HDR_BIT_I_IPV4); 597 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); 598 } else { 599 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 600 BNXT_ULP_HDR_BIT_O_IPV4); 601 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); 602 } 603 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { 604 if (in_flag) { 605 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 606 BNXT_ULP_HDR_BIT_I_IPV6); 607 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); 608 } else { 609 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 610 BNXT_ULP_HDR_BIT_O_IPV6); 611 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); 612 } 613 } 614 } 615 616 /* Internal Function to identify broadcast or multicast packets */ 617 static int32_t 618 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr) 619 { 620 if (rte_is_multicast_ether_addr(eth_addr) || 621 rte_is_broadcast_ether_addr(eth_addr)) { 622 BNXT_TF_DBG(DEBUG, 623 "No support for bcast or mcast addr offload\n"); 624 return 1; 625 } 626 return 0; 627 } 628 629 /* Function to handle the parsing of RTE Flow item Ethernet Header. */ 630 int32_t 631 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item, 632 struct ulp_rte_parser_params *params) 633 { 634 const struct rte_flow_item_eth *eth_spec = item->spec; 635 const struct rte_flow_item_eth *eth_mask = item->mask; 636 struct ulp_rte_hdr_field *field; 637 uint32_t idx = params->field_idx; 638 uint32_t size; 639 uint16_t eth_type = 0; 640 uint32_t inner_flag = 0; 641 642 /* 643 * Copy the rte_flow_item for eth into hdr_field using ethernet 644 * header fields 645 */ 646 if (eth_spec) { 647 size = sizeof(eth_spec->dst.addr_bytes); 648 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 649 eth_spec->dst.addr_bytes, 650 size); 651 /* Todo: work around to avoid multicast and broadcast addr */ 652 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst)) 653 return BNXT_TF_RC_PARSE_ERR; 654 655 size = sizeof(eth_spec->src.addr_bytes); 656 field = ulp_rte_parser_fld_copy(field, 657 eth_spec->src.addr_bytes, 658 size); 659 /* Todo: work around to avoid multicast and broadcast addr */ 660 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src)) 661 return BNXT_TF_RC_PARSE_ERR; 662 663 field = ulp_rte_parser_fld_copy(field, 664 ð_spec->type, 665 sizeof(eth_spec->type)); 666 eth_type = eth_spec->type; 667 } 668 if (eth_mask) { 669 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes, 670 sizeof(eth_mask->dst.addr_bytes)); 671 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes, 672 sizeof(eth_mask->src.addr_bytes)); 673 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type, 674 sizeof(eth_mask->type)); 675 } 676 /* Add number of vlan header elements */ 677 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM; 678 params->vlan_idx = params->field_idx; 679 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM; 680 681 /* Update the protocol hdr bitmap */ 682 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH)) { 683 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH); 684 inner_flag = 1; 685 } else { 686 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); 687 } 688 /* Update the field protocol hdr bitmap */ 689 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag); 690 691 return BNXT_TF_RC_SUCCESS; 692 } 693 694 /* Function to handle the parsing of RTE Flow item Vlan Header. */ 695 int32_t 696 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, 697 struct ulp_rte_parser_params *params) 698 { 699 const struct rte_flow_item_vlan *vlan_spec = item->spec; 700 const struct rte_flow_item_vlan *vlan_mask = item->mask; 701 struct ulp_rte_hdr_field *field; 702 struct ulp_rte_hdr_bitmap *hdr_bit; 703 uint32_t idx = params->vlan_idx; 704 uint16_t vlan_tag, priority; 705 uint32_t outer_vtag_num; 706 uint32_t inner_vtag_num; 707 uint16_t eth_type = 0; 708 uint32_t inner_flag = 0; 709 710 /* 711 * Copy the rte_flow_item for vlan into hdr_field using Vlan 712 * header fields 713 */ 714 if (vlan_spec) { 715 vlan_tag = ntohs(vlan_spec->tci); 716 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT); 717 vlan_tag &= ULP_VLAN_TAG_MASK; 718 vlan_tag = htons(vlan_tag); 719 720 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 721 &priority, 722 sizeof(priority)); 723 field = ulp_rte_parser_fld_copy(field, 724 &vlan_tag, 725 sizeof(vlan_tag)); 726 field = ulp_rte_parser_fld_copy(field, 727 &vlan_spec->inner_type, 728 sizeof(vlan_spec->inner_type)); 729 eth_type = vlan_spec->inner_type; 730 } 731 732 if (vlan_mask) { 733 vlan_tag = ntohs(vlan_mask->tci); 734 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT); 735 vlan_tag &= 0xfff; 736 737 /* 738 * the storage for priority and vlan tag is 2 bytes 739 * The mask of priority which is 3 bits if it is all 1's 740 * then make the rest bits 13 bits as 1's 741 * so that it is matched as exact match. 742 */ 743 if (priority == ULP_VLAN_PRIORITY_MASK) 744 priority |= ~ULP_VLAN_PRIORITY_MASK; 745 if (vlan_tag == ULP_VLAN_TAG_MASK) 746 vlan_tag |= ~ULP_VLAN_TAG_MASK; 747 vlan_tag = htons(vlan_tag); 748 749 /* 750 * The priority field is ignored since OVS is setting it as 751 * wild card match and it is not supported. This is a work 752 * around and shall be addressed in the future. 753 */ 754 ulp_rte_prsr_mask_ignore(params, &idx, &priority, 755 sizeof(priority)); 756 757 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag, 758 sizeof(vlan_tag)); 759 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type, 760 sizeof(vlan_mask->inner_type)); 761 } 762 /* Set the vlan index to new incremented value */ 763 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM; 764 765 /* Get the outer tag and inner tag counts */ 766 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params, 767 BNXT_ULP_CF_IDX_O_VTAG_NUM); 768 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params, 769 BNXT_ULP_CF_IDX_I_VTAG_NUM); 770 771 /* Update the hdr_bitmap of the vlans */ 772 hdr_bit = ¶ms->hdr_bitmap; 773 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 774 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 775 !outer_vtag_num) { 776 /* Update the vlan tag num */ 777 outer_vtag_num++; 778 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 779 outer_vtag_num); 780 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0); 781 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1); 782 ULP_BITMAP_SET(params->hdr_bitmap.bits, 783 BNXT_ULP_HDR_BIT_OO_VLAN); 784 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 785 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 786 outer_vtag_num == 1) { 787 /* update the vlan tag num */ 788 outer_vtag_num++; 789 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 790 outer_vtag_num); 791 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1); 792 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0); 793 ULP_BITMAP_SET(params->hdr_bitmap.bits, 794 BNXT_ULP_HDR_BIT_OI_VLAN); 795 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 796 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 797 !inner_vtag_num) { 798 /* update the vlan tag num */ 799 inner_vtag_num++; 800 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 801 inner_vtag_num); 802 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0); 803 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1); 804 ULP_BITMAP_SET(params->hdr_bitmap.bits, 805 BNXT_ULP_HDR_BIT_IO_VLAN); 806 inner_flag = 1; 807 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 808 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 809 inner_vtag_num == 1) { 810 /* update the vlan tag num */ 811 inner_vtag_num++; 812 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 813 inner_vtag_num); 814 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1); 815 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0); 816 ULP_BITMAP_SET(params->hdr_bitmap.bits, 817 BNXT_ULP_HDR_BIT_II_VLAN); 818 inner_flag = 1; 819 } else { 820 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n"); 821 return BNXT_TF_RC_ERROR; 822 } 823 /* Update the field protocol hdr bitmap */ 824 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag); 825 return BNXT_TF_RC_SUCCESS; 826 } 827 828 /* Function to handle the update of proto header based on field values */ 829 static void 830 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param, 831 uint8_t proto, uint32_t in_flag) 832 { 833 if (proto == IPPROTO_UDP) { 834 if (in_flag) { 835 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 836 BNXT_ULP_HDR_BIT_I_UDP); 837 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); 838 } else { 839 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 840 BNXT_ULP_HDR_BIT_O_UDP); 841 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); 842 } 843 } else if (proto == IPPROTO_TCP) { 844 if (in_flag) { 845 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 846 BNXT_ULP_HDR_BIT_I_TCP); 847 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); 848 } else { 849 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 850 BNXT_ULP_HDR_BIT_O_TCP); 851 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); 852 } 853 } 854 } 855 856 /* Function to handle the parsing of RTE Flow item IPV4 Header. */ 857 int32_t 858 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, 859 struct ulp_rte_parser_params *params) 860 { 861 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec; 862 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask; 863 struct ulp_rte_hdr_field *field; 864 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 865 uint32_t idx = params->field_idx; 866 uint32_t size; 867 uint8_t proto = 0; 868 uint32_t inner_flag = 0; 869 uint32_t cnt; 870 871 /* validate there are no 3rd L3 header */ 872 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); 873 if (cnt == 2) { 874 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n"); 875 return BNXT_TF_RC_ERROR; 876 } 877 878 /* 879 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 880 * header fields 881 */ 882 if (ipv4_spec) { 883 size = sizeof(ipv4_spec->hdr.version_ihl); 884 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 885 &ipv4_spec->hdr.version_ihl, 886 size); 887 size = sizeof(ipv4_spec->hdr.type_of_service); 888 field = ulp_rte_parser_fld_copy(field, 889 &ipv4_spec->hdr.type_of_service, 890 size); 891 size = sizeof(ipv4_spec->hdr.total_length); 892 field = ulp_rte_parser_fld_copy(field, 893 &ipv4_spec->hdr.total_length, 894 size); 895 size = sizeof(ipv4_spec->hdr.packet_id); 896 field = ulp_rte_parser_fld_copy(field, 897 &ipv4_spec->hdr.packet_id, 898 size); 899 size = sizeof(ipv4_spec->hdr.fragment_offset); 900 field = ulp_rte_parser_fld_copy(field, 901 &ipv4_spec->hdr.fragment_offset, 902 size); 903 size = sizeof(ipv4_spec->hdr.time_to_live); 904 field = ulp_rte_parser_fld_copy(field, 905 &ipv4_spec->hdr.time_to_live, 906 size); 907 size = sizeof(ipv4_spec->hdr.next_proto_id); 908 field = ulp_rte_parser_fld_copy(field, 909 &ipv4_spec->hdr.next_proto_id, 910 size); 911 proto = ipv4_spec->hdr.next_proto_id; 912 size = sizeof(ipv4_spec->hdr.hdr_checksum); 913 field = ulp_rte_parser_fld_copy(field, 914 &ipv4_spec->hdr.hdr_checksum, 915 size); 916 size = sizeof(ipv4_spec->hdr.src_addr); 917 field = ulp_rte_parser_fld_copy(field, 918 &ipv4_spec->hdr.src_addr, 919 size); 920 size = sizeof(ipv4_spec->hdr.dst_addr); 921 field = ulp_rte_parser_fld_copy(field, 922 &ipv4_spec->hdr.dst_addr, 923 size); 924 } 925 if (ipv4_mask) { 926 ulp_rte_prsr_mask_copy(params, &idx, 927 &ipv4_mask->hdr.version_ihl, 928 sizeof(ipv4_mask->hdr.version_ihl)); 929 /* 930 * The tos field is ignored since OVS is setting it as wild card 931 * match and it is not supported. This is a work around and 932 * shall be addressed in the future. 933 */ 934 ulp_rte_prsr_mask_ignore(params, &idx, 935 &ipv4_mask->hdr.type_of_service, 936 sizeof(ipv4_mask->hdr.type_of_service) 937 ); 938 939 ulp_rte_prsr_mask_copy(params, &idx, 940 &ipv4_mask->hdr.total_length, 941 sizeof(ipv4_mask->hdr.total_length)); 942 ulp_rte_prsr_mask_copy(params, &idx, 943 &ipv4_mask->hdr.packet_id, 944 sizeof(ipv4_mask->hdr.packet_id)); 945 ulp_rte_prsr_mask_copy(params, &idx, 946 &ipv4_mask->hdr.fragment_offset, 947 sizeof(ipv4_mask->hdr.fragment_offset)); 948 ulp_rte_prsr_mask_copy(params, &idx, 949 &ipv4_mask->hdr.time_to_live, 950 sizeof(ipv4_mask->hdr.time_to_live)); 951 ulp_rte_prsr_mask_copy(params, &idx, 952 &ipv4_mask->hdr.next_proto_id, 953 sizeof(ipv4_mask->hdr.next_proto_id)); 954 ulp_rte_prsr_mask_copy(params, &idx, 955 &ipv4_mask->hdr.hdr_checksum, 956 sizeof(ipv4_mask->hdr.hdr_checksum)); 957 ulp_rte_prsr_mask_copy(params, &idx, 958 &ipv4_mask->hdr.src_addr, 959 sizeof(ipv4_mask->hdr.src_addr)); 960 ulp_rte_prsr_mask_copy(params, &idx, 961 &ipv4_mask->hdr.dst_addr, 962 sizeof(ipv4_mask->hdr.dst_addr)); 963 } 964 /* Add the number of ipv4 header elements */ 965 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM; 966 967 /* Set the ipv4 header bitmap and computed l3 header bitmaps */ 968 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 969 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { 970 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4); 971 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); 972 inner_flag = 1; 973 } else { 974 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4); 975 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); 976 } 977 978 /* Update the field protocol hdr bitmap */ 979 ulp_rte_l3_proto_type_update(params, proto, inner_flag); 980 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); 981 return BNXT_TF_RC_SUCCESS; 982 } 983 984 /* Function to handle the parsing of RTE Flow item IPV6 Header */ 985 int32_t 986 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, 987 struct ulp_rte_parser_params *params) 988 { 989 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec; 990 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask; 991 struct ulp_rte_hdr_field *field; 992 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 993 uint32_t idx = params->field_idx; 994 uint32_t size; 995 uint32_t vtcf, vtcf_mask; 996 uint8_t proto = 0; 997 uint32_t inner_flag = 0; 998 uint32_t cnt; 999 1000 /* validate there are no 3rd L3 header */ 1001 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); 1002 if (cnt == 2) { 1003 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n"); 1004 return BNXT_TF_RC_ERROR; 1005 } 1006 1007 /* 1008 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6 1009 * header fields 1010 */ 1011 if (ipv6_spec) { 1012 size = sizeof(ipv6_spec->hdr.vtc_flow); 1013 1014 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow); 1015 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 1016 &vtcf, 1017 size); 1018 1019 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow); 1020 field = ulp_rte_parser_fld_copy(field, 1021 &vtcf, 1022 size); 1023 1024 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow); 1025 field = ulp_rte_parser_fld_copy(field, 1026 &vtcf, 1027 size); 1028 1029 size = sizeof(ipv6_spec->hdr.payload_len); 1030 field = ulp_rte_parser_fld_copy(field, 1031 &ipv6_spec->hdr.payload_len, 1032 size); 1033 size = sizeof(ipv6_spec->hdr.proto); 1034 field = ulp_rte_parser_fld_copy(field, 1035 &ipv6_spec->hdr.proto, 1036 size); 1037 proto = ipv6_spec->hdr.proto; 1038 size = sizeof(ipv6_spec->hdr.hop_limits); 1039 field = ulp_rte_parser_fld_copy(field, 1040 &ipv6_spec->hdr.hop_limits, 1041 size); 1042 size = sizeof(ipv6_spec->hdr.src_addr); 1043 field = ulp_rte_parser_fld_copy(field, 1044 &ipv6_spec->hdr.src_addr, 1045 size); 1046 size = sizeof(ipv6_spec->hdr.dst_addr); 1047 field = ulp_rte_parser_fld_copy(field, 1048 &ipv6_spec->hdr.dst_addr, 1049 size); 1050 } 1051 if (ipv6_mask) { 1052 size = sizeof(ipv6_mask->hdr.vtc_flow); 1053 1054 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow); 1055 ulp_rte_prsr_mask_copy(params, &idx, 1056 &vtcf_mask, 1057 size); 1058 /* 1059 * The TC and flow label field are ignored since OVS is 1060 * setting it for match and it is not supported. 1061 * This is a work around and 1062 * shall be addressed in the future. 1063 */ 1064 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow); 1065 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size); 1066 vtcf_mask = 1067 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow); 1068 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size); 1069 1070 ulp_rte_prsr_mask_copy(params, &idx, 1071 &ipv6_mask->hdr.payload_len, 1072 sizeof(ipv6_mask->hdr.payload_len)); 1073 ulp_rte_prsr_mask_copy(params, &idx, 1074 &ipv6_mask->hdr.proto, 1075 sizeof(ipv6_mask->hdr.proto)); 1076 ulp_rte_prsr_mask_copy(params, &idx, 1077 &ipv6_mask->hdr.hop_limits, 1078 sizeof(ipv6_mask->hdr.hop_limits)); 1079 ulp_rte_prsr_mask_copy(params, &idx, 1080 &ipv6_mask->hdr.src_addr, 1081 sizeof(ipv6_mask->hdr.src_addr)); 1082 ulp_rte_prsr_mask_copy(params, &idx, 1083 &ipv6_mask->hdr.dst_addr, 1084 sizeof(ipv6_mask->hdr.dst_addr)); 1085 } 1086 /* add number of ipv6 header elements */ 1087 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM; 1088 1089 /* Set the ipv6 header bitmap and computed l3 header bitmaps */ 1090 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 1091 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { 1092 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6); 1093 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); 1094 inner_flag = 1; 1095 } else { 1096 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6); 1097 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); 1098 } 1099 1100 /* Update the field protocol hdr bitmap */ 1101 ulp_rte_l3_proto_type_update(params, proto, inner_flag); 1102 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); 1103 1104 return BNXT_TF_RC_SUCCESS; 1105 } 1106 1107 /* Function to handle the update of proto header based on field values */ 1108 static void 1109 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param, 1110 uint16_t dst_port) 1111 { 1112 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) 1113 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 1114 BNXT_ULP_HDR_BIT_T_VXLAN); 1115 } 1116 1117 /* Function to handle the parsing of RTE Flow item UDP Header. */ 1118 int32_t 1119 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, 1120 struct ulp_rte_parser_params *params) 1121 { 1122 const struct rte_flow_item_udp *udp_spec = item->spec; 1123 const struct rte_flow_item_udp *udp_mask = item->mask; 1124 struct ulp_rte_hdr_field *field; 1125 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1126 uint32_t idx = params->field_idx; 1127 uint32_t size; 1128 uint16_t dst_port = 0; 1129 uint32_t cnt; 1130 1131 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 1132 if (cnt == 2) { 1133 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 1134 return BNXT_TF_RC_ERROR; 1135 } 1136 1137 /* 1138 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1139 * header fields 1140 */ 1141 if (udp_spec) { 1142 size = sizeof(udp_spec->hdr.src_port); 1143 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 1144 &udp_spec->hdr.src_port, 1145 size); 1146 size = sizeof(udp_spec->hdr.dst_port); 1147 field = ulp_rte_parser_fld_copy(field, 1148 &udp_spec->hdr.dst_port, 1149 size); 1150 dst_port = udp_spec->hdr.dst_port; 1151 size = sizeof(udp_spec->hdr.dgram_len); 1152 field = ulp_rte_parser_fld_copy(field, 1153 &udp_spec->hdr.dgram_len, 1154 size); 1155 size = sizeof(udp_spec->hdr.dgram_cksum); 1156 field = ulp_rte_parser_fld_copy(field, 1157 &udp_spec->hdr.dgram_cksum, 1158 size); 1159 } 1160 if (udp_mask) { 1161 ulp_rte_prsr_mask_copy(params, &idx, 1162 &udp_mask->hdr.src_port, 1163 sizeof(udp_mask->hdr.src_port)); 1164 ulp_rte_prsr_mask_copy(params, &idx, 1165 &udp_mask->hdr.dst_port, 1166 sizeof(udp_mask->hdr.dst_port)); 1167 ulp_rte_prsr_mask_copy(params, &idx, 1168 &udp_mask->hdr.dgram_len, 1169 sizeof(udp_mask->hdr.dgram_len)); 1170 ulp_rte_prsr_mask_copy(params, &idx, 1171 &udp_mask->hdr.dgram_cksum, 1172 sizeof(udp_mask->hdr.dgram_cksum)); 1173 } 1174 1175 /* Add number of UDP header elements */ 1176 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM; 1177 1178 /* Set the udp header bitmap and computed l4 header bitmaps */ 1179 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 1180 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) { 1181 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP); 1182 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1); 1183 } else { 1184 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP); 1185 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1); 1186 /* Update the field protocol hdr bitmap */ 1187 ulp_rte_l4_proto_type_update(params, dst_port); 1188 } 1189 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); 1190 return BNXT_TF_RC_SUCCESS; 1191 } 1192 1193 /* Function to handle the parsing of RTE Flow item TCP Header. */ 1194 int32_t 1195 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, 1196 struct ulp_rte_parser_params *params) 1197 { 1198 const struct rte_flow_item_tcp *tcp_spec = item->spec; 1199 const struct rte_flow_item_tcp *tcp_mask = item->mask; 1200 struct ulp_rte_hdr_field *field; 1201 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1202 uint32_t idx = params->field_idx; 1203 uint32_t size; 1204 uint32_t cnt; 1205 1206 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 1207 if (cnt == 2) { 1208 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 1209 return BNXT_TF_RC_ERROR; 1210 } 1211 1212 /* 1213 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1214 * header fields 1215 */ 1216 if (tcp_spec) { 1217 size = sizeof(tcp_spec->hdr.src_port); 1218 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 1219 &tcp_spec->hdr.src_port, 1220 size); 1221 size = sizeof(tcp_spec->hdr.dst_port); 1222 field = ulp_rte_parser_fld_copy(field, 1223 &tcp_spec->hdr.dst_port, 1224 size); 1225 size = sizeof(tcp_spec->hdr.sent_seq); 1226 field = ulp_rte_parser_fld_copy(field, 1227 &tcp_spec->hdr.sent_seq, 1228 size); 1229 size = sizeof(tcp_spec->hdr.recv_ack); 1230 field = ulp_rte_parser_fld_copy(field, 1231 &tcp_spec->hdr.recv_ack, 1232 size); 1233 size = sizeof(tcp_spec->hdr.data_off); 1234 field = ulp_rte_parser_fld_copy(field, 1235 &tcp_spec->hdr.data_off, 1236 size); 1237 size = sizeof(tcp_spec->hdr.tcp_flags); 1238 field = ulp_rte_parser_fld_copy(field, 1239 &tcp_spec->hdr.tcp_flags, 1240 size); 1241 size = sizeof(tcp_spec->hdr.rx_win); 1242 field = ulp_rte_parser_fld_copy(field, 1243 &tcp_spec->hdr.rx_win, 1244 size); 1245 size = sizeof(tcp_spec->hdr.cksum); 1246 field = ulp_rte_parser_fld_copy(field, 1247 &tcp_spec->hdr.cksum, 1248 size); 1249 size = sizeof(tcp_spec->hdr.tcp_urp); 1250 field = ulp_rte_parser_fld_copy(field, 1251 &tcp_spec->hdr.tcp_urp, 1252 size); 1253 } else { 1254 idx += BNXT_ULP_PROTO_HDR_TCP_NUM; 1255 } 1256 1257 if (tcp_mask) { 1258 ulp_rte_prsr_mask_copy(params, &idx, 1259 &tcp_mask->hdr.src_port, 1260 sizeof(tcp_mask->hdr.src_port)); 1261 ulp_rte_prsr_mask_copy(params, &idx, 1262 &tcp_mask->hdr.dst_port, 1263 sizeof(tcp_mask->hdr.dst_port)); 1264 ulp_rte_prsr_mask_copy(params, &idx, 1265 &tcp_mask->hdr.sent_seq, 1266 sizeof(tcp_mask->hdr.sent_seq)); 1267 ulp_rte_prsr_mask_copy(params, &idx, 1268 &tcp_mask->hdr.recv_ack, 1269 sizeof(tcp_mask->hdr.recv_ack)); 1270 ulp_rte_prsr_mask_copy(params, &idx, 1271 &tcp_mask->hdr.data_off, 1272 sizeof(tcp_mask->hdr.data_off)); 1273 ulp_rte_prsr_mask_copy(params, &idx, 1274 &tcp_mask->hdr.tcp_flags, 1275 sizeof(tcp_mask->hdr.tcp_flags)); 1276 ulp_rte_prsr_mask_copy(params, &idx, 1277 &tcp_mask->hdr.rx_win, 1278 sizeof(tcp_mask->hdr.rx_win)); 1279 ulp_rte_prsr_mask_copy(params, &idx, 1280 &tcp_mask->hdr.cksum, 1281 sizeof(tcp_mask->hdr.cksum)); 1282 ulp_rte_prsr_mask_copy(params, &idx, 1283 &tcp_mask->hdr.tcp_urp, 1284 sizeof(tcp_mask->hdr.tcp_urp)); 1285 } 1286 /* add number of TCP header elements */ 1287 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM; 1288 1289 /* Set the udp header bitmap and computed l4 header bitmaps */ 1290 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 1291 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) { 1292 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP); 1293 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1); 1294 } else { 1295 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP); 1296 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1); 1297 } 1298 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); 1299 return BNXT_TF_RC_SUCCESS; 1300 } 1301 1302 /* Function to handle the parsing of RTE Flow item Vxlan Header. */ 1303 int32_t 1304 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item, 1305 struct ulp_rte_parser_params *params) 1306 { 1307 const struct rte_flow_item_vxlan *vxlan_spec = item->spec; 1308 const struct rte_flow_item_vxlan *vxlan_mask = item->mask; 1309 struct ulp_rte_hdr_field *field; 1310 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1311 uint32_t idx = params->field_idx; 1312 uint32_t size; 1313 1314 /* 1315 * Copy the rte_flow_item for vxlan into hdr_field using vxlan 1316 * header fields 1317 */ 1318 if (vxlan_spec) { 1319 size = sizeof(vxlan_spec->flags); 1320 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 1321 &vxlan_spec->flags, 1322 size); 1323 size = sizeof(vxlan_spec->rsvd0); 1324 field = ulp_rte_parser_fld_copy(field, 1325 &vxlan_spec->rsvd0, 1326 size); 1327 size = sizeof(vxlan_spec->vni); 1328 field = ulp_rte_parser_fld_copy(field, 1329 &vxlan_spec->vni, 1330 size); 1331 size = sizeof(vxlan_spec->rsvd1); 1332 field = ulp_rte_parser_fld_copy(field, 1333 &vxlan_spec->rsvd1, 1334 size); 1335 } 1336 if (vxlan_mask) { 1337 ulp_rte_prsr_mask_copy(params, &idx, 1338 &vxlan_mask->flags, 1339 sizeof(vxlan_mask->flags)); 1340 ulp_rte_prsr_mask_copy(params, &idx, 1341 &vxlan_mask->rsvd0, 1342 sizeof(vxlan_mask->rsvd0)); 1343 ulp_rte_prsr_mask_copy(params, &idx, 1344 &vxlan_mask->vni, 1345 sizeof(vxlan_mask->vni)); 1346 ulp_rte_prsr_mask_copy(params, &idx, 1347 &vxlan_mask->rsvd1, 1348 sizeof(vxlan_mask->rsvd1)); 1349 } 1350 /* Add number of vxlan header elements */ 1351 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM; 1352 1353 /* Update the hdr_bitmap with vxlan */ 1354 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN); 1355 return BNXT_TF_RC_SUCCESS; 1356 } 1357 1358 /* Function to handle the parsing of RTE Flow item void Header */ 1359 int32_t 1360 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused, 1361 struct ulp_rte_parser_params *params __rte_unused) 1362 { 1363 return BNXT_TF_RC_SUCCESS; 1364 } 1365 1366 /* Function to handle the parsing of RTE Flow action void Header. */ 1367 int32_t 1368 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused, 1369 struct ulp_rte_parser_params *params __rte_unused) 1370 { 1371 return BNXT_TF_RC_SUCCESS; 1372 } 1373 1374 /* Function to handle the parsing of RTE Flow action Mark Header. */ 1375 int32_t 1376 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item, 1377 struct ulp_rte_parser_params *param) 1378 { 1379 const struct rte_flow_action_mark *mark; 1380 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap; 1381 uint32_t mark_id; 1382 1383 mark = action_item->conf; 1384 if (mark) { 1385 mark_id = tfp_cpu_to_be_32(mark->id); 1386 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK], 1387 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK); 1388 1389 /* Update the hdr_bitmap with vxlan */ 1390 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK); 1391 return BNXT_TF_RC_SUCCESS; 1392 } 1393 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n"); 1394 return BNXT_TF_RC_ERROR; 1395 } 1396 1397 /* Function to handle the parsing of RTE Flow action RSS Header. */ 1398 int32_t 1399 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item, 1400 struct ulp_rte_parser_params *param) 1401 { 1402 const struct rte_flow_action_rss *rss = action_item->conf; 1403 1404 if (rss) { 1405 /* Update the hdr_bitmap with vxlan */ 1406 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS); 1407 return BNXT_TF_RC_SUCCESS; 1408 } 1409 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n"); 1410 return BNXT_TF_RC_ERROR; 1411 } 1412 1413 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */ 1414 int32_t 1415 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, 1416 struct ulp_rte_parser_params *params) 1417 { 1418 const struct rte_flow_action_vxlan_encap *vxlan_encap; 1419 const struct rte_flow_item *item; 1420 const struct rte_flow_item_eth *eth_spec; 1421 const struct rte_flow_item_ipv4 *ipv4_spec; 1422 const struct rte_flow_item_ipv6 *ipv6_spec; 1423 struct rte_flow_item_vxlan vxlan_spec; 1424 uint32_t vlan_num = 0, vlan_size = 0; 1425 uint32_t ip_size = 0, ip_type = 0; 1426 uint32_t vxlan_size = 0; 1427 uint8_t *buff; 1428 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */ 1429 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00, 1430 0x00, 0x40, 0x11}; 1431 /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */ 1432 const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00, 1433 0x00, 0x11, 0xf6}; 1434 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap; 1435 struct ulp_rte_act_prop *ap = ¶ms->act_prop; 1436 const uint8_t *tmp_buff; 1437 1438 vxlan_encap = action_item->conf; 1439 if (!vxlan_encap) { 1440 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n"); 1441 return BNXT_TF_RC_ERROR; 1442 } 1443 1444 item = vxlan_encap->definition; 1445 if (!item) { 1446 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n"); 1447 return BNXT_TF_RC_ERROR; 1448 } 1449 1450 if (!ulp_rte_item_skip_void(&item, 0)) 1451 return BNXT_TF_RC_ERROR; 1452 1453 /* must have ethernet header */ 1454 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { 1455 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n"); 1456 return BNXT_TF_RC_ERROR; 1457 } 1458 eth_spec = item->spec; 1459 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC]; 1460 ulp_encap_buffer_copy(buff, 1461 eth_spec->dst.addr_bytes, 1462 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC, 1463 ULP_BUFFER_ALIGN_8_BYTE); 1464 1465 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC]; 1466 ulp_encap_buffer_copy(buff, 1467 eth_spec->src.addr_bytes, 1468 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC, 1469 ULP_BUFFER_ALIGN_8_BYTE); 1470 1471 /* Goto the next item */ 1472 if (!ulp_rte_item_skip_void(&item, 1)) 1473 return BNXT_TF_RC_ERROR; 1474 1475 /* May have vlan header */ 1476 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 1477 vlan_num++; 1478 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG]; 1479 ulp_encap_buffer_copy(buff, 1480 item->spec, 1481 sizeof(struct rte_flow_item_vlan), 1482 ULP_BUFFER_ALIGN_8_BYTE); 1483 1484 if (!ulp_rte_item_skip_void(&item, 1)) 1485 return BNXT_TF_RC_ERROR; 1486 } 1487 1488 /* may have two vlan headers */ 1489 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 1490 vlan_num++; 1491 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG + 1492 sizeof(struct rte_flow_item_vlan)], 1493 item->spec, 1494 sizeof(struct rte_flow_item_vlan)); 1495 if (!ulp_rte_item_skip_void(&item, 1)) 1496 return BNXT_TF_RC_ERROR; 1497 } 1498 /* Update the vlan count and size of more than one */ 1499 if (vlan_num) { 1500 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan); 1501 vlan_num = tfp_cpu_to_be_32(vlan_num); 1502 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM], 1503 &vlan_num, 1504 sizeof(uint32_t)); 1505 vlan_size = tfp_cpu_to_be_32(vlan_size); 1506 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ], 1507 &vlan_size, 1508 sizeof(uint32_t)); 1509 } 1510 1511 /* L3 must be IPv4, IPv6 */ 1512 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { 1513 ipv4_spec = item->spec; 1514 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE; 1515 1516 /* copy the ipv4 details */ 1517 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl, 1518 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) { 1519 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; 1520 ulp_encap_buffer_copy(buff, 1521 def_ipv4_hdr, 1522 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS + 1523 BNXT_ULP_ENCAP_IPV4_ID_PROTO, 1524 ULP_BUFFER_ALIGN_8_BYTE); 1525 } else { 1526 /* Total length being ignored in the ip hdr. */ 1527 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; 1528 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id; 1529 ulp_encap_buffer_copy(buff, 1530 tmp_buff, 1531 BNXT_ULP_ENCAP_IPV4_ID_PROTO, 1532 ULP_BUFFER_ALIGN_8_BYTE); 1533 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + 1534 BNXT_ULP_ENCAP_IPV4_ID_PROTO]; 1535 ulp_encap_buffer_copy(buff, 1536 &ipv4_spec->hdr.version_ihl, 1537 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS, 1538 ULP_BUFFER_ALIGN_8_BYTE); 1539 } 1540 1541 /* Update the dst ip address in ip encap buffer */ 1542 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + 1543 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS + 1544 BNXT_ULP_ENCAP_IPV4_ID_PROTO]; 1545 ulp_encap_buffer_copy(buff, 1546 (const uint8_t *)&ipv4_spec->hdr.dst_addr, 1547 sizeof(ipv4_spec->hdr.dst_addr), 1548 ULP_BUFFER_ALIGN_8_BYTE); 1549 1550 /* Update the src ip address */ 1551 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC + 1552 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC - 1553 sizeof(ipv4_spec->hdr.src_addr)]; 1554 ulp_encap_buffer_copy(buff, 1555 (const uint8_t *)&ipv4_spec->hdr.src_addr, 1556 sizeof(ipv4_spec->hdr.src_addr), 1557 ULP_BUFFER_ALIGN_8_BYTE); 1558 1559 /* Update the ip size details */ 1560 ip_size = tfp_cpu_to_be_32(ip_size); 1561 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 1562 &ip_size, sizeof(uint32_t)); 1563 1564 /* update the ip type */ 1565 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4); 1566 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 1567 &ip_type, sizeof(uint32_t)); 1568 1569 /* update the computed field to notify it is ipv4 header */ 1570 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG, 1571 1); 1572 1573 if (!ulp_rte_item_skip_void(&item, 1)) 1574 return BNXT_TF_RC_ERROR; 1575 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { 1576 ipv6_spec = item->spec; 1577 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE; 1578 1579 /* copy the ipv6 details */ 1580 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow; 1581 if (ulp_buffer_is_empty(tmp_buff, 1582 BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) { 1583 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; 1584 ulp_encap_buffer_copy(buff, 1585 def_ipv6_hdr, 1586 sizeof(def_ipv6_hdr), 1587 ULP_BUFFER_ALIGN_8_BYTE); 1588 } else { 1589 /* The payload length being ignored in the ip hdr. */ 1590 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; 1591 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto; 1592 ulp_encap_buffer_copy(buff, 1593 tmp_buff, 1594 BNXT_ULP_ENCAP_IPV6_PROTO_TTL, 1595 ULP_BUFFER_ALIGN_8_BYTE); 1596 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + 1597 BNXT_ULP_ENCAP_IPV6_PROTO_TTL + 1598 BNXT_ULP_ENCAP_IPV6_DO]; 1599 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow; 1600 ulp_encap_buffer_copy(buff, 1601 tmp_buff, 1602 BNXT_ULP_ENCAP_IPV6_VTC_FLOW, 1603 ULP_BUFFER_ALIGN_8_BYTE); 1604 } 1605 /* Update the dst ip address in ip encap buffer */ 1606 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + 1607 sizeof(def_ipv6_hdr)]; 1608 ulp_encap_buffer_copy(buff, 1609 (const uint8_t *)ipv6_spec->hdr.dst_addr, 1610 sizeof(ipv6_spec->hdr.dst_addr), 1611 ULP_BUFFER_ALIGN_8_BYTE); 1612 1613 /* Update the src ip address */ 1614 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC]; 1615 ulp_encap_buffer_copy(buff, 1616 (const uint8_t *)ipv6_spec->hdr.src_addr, 1617 sizeof(ipv6_spec->hdr.src_addr), 1618 ULP_BUFFER_ALIGN_16_BYTE); 1619 1620 /* Update the ip size details */ 1621 ip_size = tfp_cpu_to_be_32(ip_size); 1622 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 1623 &ip_size, sizeof(uint32_t)); 1624 1625 /* update the ip type */ 1626 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6); 1627 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 1628 &ip_type, sizeof(uint32_t)); 1629 1630 /* update the computed field to notify it is ipv6 header */ 1631 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG, 1632 1); 1633 1634 if (!ulp_rte_item_skip_void(&item, 1)) 1635 return BNXT_TF_RC_ERROR; 1636 } else { 1637 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n"); 1638 return BNXT_TF_RC_ERROR; 1639 } 1640 1641 /* L4 is UDP */ 1642 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) { 1643 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n"); 1644 return BNXT_TF_RC_ERROR; 1645 } 1646 /* copy the udp details */ 1647 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP], 1648 item->spec, BNXT_ULP_ENCAP_UDP_SIZE, 1649 ULP_BUFFER_ALIGN_8_BYTE); 1650 1651 if (!ulp_rte_item_skip_void(&item, 1)) 1652 return BNXT_TF_RC_ERROR; 1653 1654 /* Finally VXLAN */ 1655 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { 1656 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n"); 1657 return BNXT_TF_RC_ERROR; 1658 } 1659 vxlan_size = sizeof(struct rte_flow_item_vxlan); 1660 /* copy the vxlan details */ 1661 memcpy(&vxlan_spec, item->spec, vxlan_size); 1662 vxlan_spec.flags = 0x08; 1663 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN]; 1664 if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) { 1665 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec, 1666 vxlan_size, ULP_BUFFER_ALIGN_8_BYTE); 1667 } else { 1668 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec, 1669 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE); 1670 ulp_encap_buffer_copy(buff + (vxlan_size / 2), 1671 (const uint8_t *)&vxlan_spec.vni, 1672 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE); 1673 } 1674 vxlan_size = tfp_cpu_to_be_32(vxlan_size); 1675 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ], 1676 &vxlan_size, sizeof(uint32_t)); 1677 1678 /* update the hdr_bitmap with vxlan */ 1679 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP); 1680 return BNXT_TF_RC_SUCCESS; 1681 } 1682 1683 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */ 1684 int32_t 1685 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item 1686 __rte_unused, 1687 struct ulp_rte_parser_params *params) 1688 { 1689 /* update the hdr_bitmap with vxlan */ 1690 ULP_BITMAP_SET(params->act_bitmap.bits, 1691 BNXT_ULP_ACTION_BIT_VXLAN_DECAP); 1692 return BNXT_TF_RC_SUCCESS; 1693 } 1694 1695 /* Function to handle the parsing of RTE Flow action drop Header. */ 1696 int32_t 1697 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused, 1698 struct ulp_rte_parser_params *params) 1699 { 1700 /* Update the hdr_bitmap with drop */ 1701 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP); 1702 return BNXT_TF_RC_SUCCESS; 1703 } 1704 1705 /* Function to handle the parsing of RTE Flow action count. */ 1706 int32_t 1707 ulp_rte_count_act_handler(const struct rte_flow_action *action_item, 1708 struct ulp_rte_parser_params *params) 1709 1710 { 1711 const struct rte_flow_action_count *act_count; 1712 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop; 1713 1714 act_count = action_item->conf; 1715 if (act_count) { 1716 if (act_count->shared) { 1717 BNXT_TF_DBG(ERR, 1718 "Parse Error:Shared count not supported\n"); 1719 return BNXT_TF_RC_PARSE_ERR; 1720 } 1721 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT], 1722 &act_count->id, 1723 BNXT_ULP_ACT_PROP_SZ_COUNT); 1724 } 1725 1726 /* Update the hdr_bitmap with count */ 1727 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT); 1728 return BNXT_TF_RC_SUCCESS; 1729 } 1730 1731 /* Function to handle the parsing of action ports. */ 1732 static int32_t 1733 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param, 1734 uint32_t ifindex) 1735 { 1736 enum bnxt_ulp_direction_type dir; 1737 uint16_t pid_s; 1738 uint32_t pid; 1739 struct ulp_rte_act_prop *act = ¶m->act_prop; 1740 enum bnxt_ulp_intf_type port_type; 1741 uint32_t vnic_type; 1742 1743 /* Get the direction */ 1744 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION); 1745 if (dir == BNXT_ULP_DIR_EGRESS) { 1746 /* For egress direction, fill vport */ 1747 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s)) 1748 return BNXT_TF_RC_ERROR; 1749 1750 pid = pid_s; 1751 pid = rte_cpu_to_be_32(pid); 1752 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], 1753 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); 1754 } else { 1755 /* For ingress direction, fill vnic */ 1756 port_type = ULP_COMP_FLD_IDX_RD(param, 1757 BNXT_ULP_CF_IDX_ACT_PORT_TYPE); 1758 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP) 1759 vnic_type = BNXT_ULP_VF_FUNC_VNIC; 1760 else 1761 vnic_type = BNXT_ULP_DRV_FUNC_VNIC; 1762 1763 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex, 1764 vnic_type, &pid_s)) 1765 return BNXT_TF_RC_ERROR; 1766 1767 pid = pid_s; 1768 pid = rte_cpu_to_be_32(pid); 1769 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], 1770 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC); 1771 } 1772 1773 /* Update the action port set bit */ 1774 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1); 1775 return BNXT_TF_RC_SUCCESS; 1776 } 1777 1778 /* Function to handle the parsing of RTE Flow action PF. */ 1779 int32_t 1780 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused, 1781 struct ulp_rte_parser_params *params) 1782 { 1783 uint32_t port_id; 1784 uint32_t ifindex; 1785 enum bnxt_ulp_intf_type intf_type; 1786 1787 /* Get the port id of the current device */ 1788 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 1789 1790 /* Get the port db ifindex */ 1791 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id, 1792 &ifindex)) { 1793 BNXT_TF_DBG(ERR, "Invalid port id\n"); 1794 return BNXT_TF_RC_ERROR; 1795 } 1796 1797 /* Check the port is PF port */ 1798 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 1799 if (intf_type != BNXT_ULP_INTF_TYPE_PF) { 1800 BNXT_TF_DBG(ERR, "Port is not a PF port\n"); 1801 return BNXT_TF_RC_ERROR; 1802 } 1803 /* Update the action properties */ 1804 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 1805 return ulp_rte_parser_act_port_set(params, ifindex); 1806 } 1807 1808 /* Function to handle the parsing of RTE Flow action VF. */ 1809 int32_t 1810 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, 1811 struct ulp_rte_parser_params *params) 1812 { 1813 const struct rte_flow_action_vf *vf_action; 1814 uint32_t ifindex; 1815 enum bnxt_ulp_intf_type intf_type; 1816 1817 vf_action = action_item->conf; 1818 if (!vf_action) { 1819 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n"); 1820 return BNXT_TF_RC_PARSE_ERR; 1821 } 1822 1823 if (vf_action->original) { 1824 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n"); 1825 return BNXT_TF_RC_PARSE_ERR; 1826 } 1827 1828 /* Check the port is VF port */ 1829 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id, 1830 &ifindex)) { 1831 BNXT_TF_DBG(ERR, "VF is not valid interface\n"); 1832 return BNXT_TF_RC_ERROR; 1833 } 1834 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 1835 if (intf_type != BNXT_ULP_INTF_TYPE_VF && 1836 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) { 1837 BNXT_TF_DBG(ERR, "Port is not a VF port\n"); 1838 return BNXT_TF_RC_ERROR; 1839 } 1840 1841 /* Update the action properties */ 1842 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 1843 return ulp_rte_parser_act_port_set(params, ifindex); 1844 } 1845 1846 /* Function to handle the parsing of RTE Flow action port_id. */ 1847 int32_t 1848 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item, 1849 struct ulp_rte_parser_params *param) 1850 { 1851 const struct rte_flow_action_port_id *port_id = act_item->conf; 1852 uint32_t ifindex; 1853 enum bnxt_ulp_intf_type intf_type; 1854 1855 if (!port_id) { 1856 BNXT_TF_DBG(ERR, 1857 "ParseErr: Invalid Argument\n"); 1858 return BNXT_TF_RC_PARSE_ERR; 1859 } 1860 if (port_id->original) { 1861 BNXT_TF_DBG(ERR, 1862 "ParseErr:Portid Original not supported\n"); 1863 return BNXT_TF_RC_PARSE_ERR; 1864 } 1865 1866 /* Get the port db ifindex */ 1867 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id, 1868 &ifindex)) { 1869 BNXT_TF_DBG(ERR, "Invalid port id\n"); 1870 return BNXT_TF_RC_ERROR; 1871 } 1872 1873 /* Get the intf type */ 1874 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex); 1875 if (!intf_type) { 1876 BNXT_TF_DBG(ERR, "Invalid port type\n"); 1877 return BNXT_TF_RC_ERROR; 1878 } 1879 1880 /* Set the action port */ 1881 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 1882 return ulp_rte_parser_act_port_set(param, ifindex); 1883 } 1884 1885 /* Function to handle the parsing of RTE Flow action phy_port. */ 1886 int32_t 1887 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item, 1888 struct ulp_rte_parser_params *prm) 1889 { 1890 const struct rte_flow_action_phy_port *phy_port; 1891 uint32_t pid; 1892 int32_t rc; 1893 uint16_t pid_s; 1894 enum bnxt_ulp_direction_type dir; 1895 1896 phy_port = action_item->conf; 1897 if (!phy_port) { 1898 BNXT_TF_DBG(ERR, 1899 "ParseErr: Invalid Argument\n"); 1900 return BNXT_TF_RC_PARSE_ERR; 1901 } 1902 1903 if (phy_port->original) { 1904 BNXT_TF_DBG(ERR, 1905 "Parse Err:Port Original not supported\n"); 1906 return BNXT_TF_RC_PARSE_ERR; 1907 } 1908 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION); 1909 if (dir != BNXT_ULP_DIR_EGRESS) { 1910 BNXT_TF_DBG(ERR, 1911 "Parse Err:Phy ports are valid only for egress\n"); 1912 return BNXT_TF_RC_PARSE_ERR; 1913 } 1914 /* Get the physical port details from port db */ 1915 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index, 1916 &pid_s); 1917 if (rc) { 1918 BNXT_TF_DBG(ERR, "Failed to get port details\n"); 1919 return -EINVAL; 1920 } 1921 1922 pid = pid_s; 1923 pid = rte_cpu_to_be_32(pid); 1924 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], 1925 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); 1926 1927 /* Update the action port set bit */ 1928 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1); 1929 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, 1930 BNXT_ULP_INTF_TYPE_PHY_PORT); 1931 return BNXT_TF_RC_SUCCESS; 1932 } 1933 1934 /* Function to handle the parsing of RTE Flow action pop vlan. */ 1935 int32_t 1936 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused, 1937 struct ulp_rte_parser_params *params) 1938 { 1939 /* Update the act_bitmap with pop */ 1940 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN); 1941 return BNXT_TF_RC_SUCCESS; 1942 } 1943 1944 /* Function to handle the parsing of RTE Flow action push vlan. */ 1945 int32_t 1946 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item, 1947 struct ulp_rte_parser_params *params) 1948 { 1949 const struct rte_flow_action_of_push_vlan *push_vlan; 1950 uint16_t ethertype; 1951 struct ulp_rte_act_prop *act = ¶ms->act_prop; 1952 1953 push_vlan = action_item->conf; 1954 if (push_vlan) { 1955 ethertype = push_vlan->ethertype; 1956 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) { 1957 BNXT_TF_DBG(ERR, 1958 "Parse Err: Ethertype not supported\n"); 1959 return BNXT_TF_RC_PARSE_ERR; 1960 } 1961 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN], 1962 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN); 1963 /* Update the hdr_bitmap with push vlan */ 1964 ULP_BITMAP_SET(params->act_bitmap.bits, 1965 BNXT_ULP_ACTION_BIT_PUSH_VLAN); 1966 return BNXT_TF_RC_SUCCESS; 1967 } 1968 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n"); 1969 return BNXT_TF_RC_ERROR; 1970 } 1971 1972 /* Function to handle the parsing of RTE Flow action set vlan id. */ 1973 int32_t 1974 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item, 1975 struct ulp_rte_parser_params *params) 1976 { 1977 const struct rte_flow_action_of_set_vlan_vid *vlan_vid; 1978 uint32_t vid; 1979 struct ulp_rte_act_prop *act = ¶ms->act_prop; 1980 1981 vlan_vid = action_item->conf; 1982 if (vlan_vid && vlan_vid->vlan_vid) { 1983 vid = vlan_vid->vlan_vid; 1984 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID], 1985 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID); 1986 /* Update the hdr_bitmap with vlan vid */ 1987 ULP_BITMAP_SET(params->act_bitmap.bits, 1988 BNXT_ULP_ACTION_BIT_SET_VLAN_VID); 1989 return BNXT_TF_RC_SUCCESS; 1990 } 1991 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n"); 1992 return BNXT_TF_RC_ERROR; 1993 } 1994 1995 /* Function to handle the parsing of RTE Flow action set vlan pcp. */ 1996 int32_t 1997 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item, 1998 struct ulp_rte_parser_params *params) 1999 { 2000 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp; 2001 uint8_t pcp; 2002 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2003 2004 vlan_pcp = action_item->conf; 2005 if (vlan_pcp) { 2006 pcp = vlan_pcp->vlan_pcp; 2007 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP], 2008 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP); 2009 /* Update the hdr_bitmap with vlan vid */ 2010 ULP_BITMAP_SET(params->act_bitmap.bits, 2011 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP); 2012 return BNXT_TF_RC_SUCCESS; 2013 } 2014 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n"); 2015 return BNXT_TF_RC_ERROR; 2016 } 2017 2018 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/ 2019 int32_t 2020 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item, 2021 struct ulp_rte_parser_params *params) 2022 { 2023 const struct rte_flow_action_set_ipv4 *set_ipv4; 2024 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2025 2026 set_ipv4 = action_item->conf; 2027 if (set_ipv4) { 2028 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC], 2029 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC); 2030 /* Update the hdr_bitmap with set ipv4 src */ 2031 ULP_BITMAP_SET(params->act_bitmap.bits, 2032 BNXT_ULP_ACTION_BIT_SET_IPV4_SRC); 2033 return BNXT_TF_RC_SUCCESS; 2034 } 2035 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n"); 2036 return BNXT_TF_RC_ERROR; 2037 } 2038 2039 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/ 2040 int32_t 2041 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item, 2042 struct ulp_rte_parser_params *params) 2043 { 2044 const struct rte_flow_action_set_ipv4 *set_ipv4; 2045 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2046 2047 set_ipv4 = action_item->conf; 2048 if (set_ipv4) { 2049 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST], 2050 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST); 2051 /* Update the hdr_bitmap with set ipv4 dst */ 2052 ULP_BITMAP_SET(params->act_bitmap.bits, 2053 BNXT_ULP_ACTION_BIT_SET_IPV4_DST); 2054 return BNXT_TF_RC_SUCCESS; 2055 } 2056 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n"); 2057 return BNXT_TF_RC_ERROR; 2058 } 2059 2060 /* Function to handle the parsing of RTE Flow action set tp src.*/ 2061 int32_t 2062 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item, 2063 struct ulp_rte_parser_params *params) 2064 { 2065 const struct rte_flow_action_set_tp *set_tp; 2066 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2067 2068 set_tp = action_item->conf; 2069 if (set_tp) { 2070 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC], 2071 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC); 2072 /* Update the hdr_bitmap with set tp src */ 2073 ULP_BITMAP_SET(params->act_bitmap.bits, 2074 BNXT_ULP_ACTION_BIT_SET_TP_SRC); 2075 return BNXT_TF_RC_SUCCESS; 2076 } 2077 2078 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n"); 2079 return BNXT_TF_RC_ERROR; 2080 } 2081 2082 /* Function to handle the parsing of RTE Flow action set tp dst.*/ 2083 int32_t 2084 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item, 2085 struct ulp_rte_parser_params *params) 2086 { 2087 const struct rte_flow_action_set_tp *set_tp; 2088 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2089 2090 set_tp = action_item->conf; 2091 if (set_tp) { 2092 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST], 2093 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST); 2094 /* Update the hdr_bitmap with set tp dst */ 2095 ULP_BITMAP_SET(params->act_bitmap.bits, 2096 BNXT_ULP_ACTION_BIT_SET_TP_DST); 2097 return BNXT_TF_RC_SUCCESS; 2098 } 2099 2100 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n"); 2101 return BNXT_TF_RC_ERROR; 2102 } 2103 2104 /* Function to handle the parsing of RTE Flow action dec ttl.*/ 2105 int32_t 2106 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused, 2107 struct ulp_rte_parser_params *params) 2108 { 2109 /* Update the act_bitmap with dec ttl */ 2110 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL); 2111 return BNXT_TF_RC_SUCCESS; 2112 } 2113