1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2021 Broadcom 3 * All rights reserved. 4 */ 5 6 #include "bnxt.h" 7 #include "ulp_template_db_enum.h" 8 #include "ulp_template_struct.h" 9 #include "bnxt_ulp.h" 10 #include "bnxt_tf_common.h" 11 #include "bnxt_tf_pmd_shim.h" 12 #include "ulp_rte_parser.h" 13 #include "ulp_matcher.h" 14 #include "ulp_utils.h" 15 #include "tfp.h" 16 #include "ulp_port_db.h" 17 #include "ulp_flow_db.h" 18 #include "ulp_mapper.h" 19 #include "ulp_tun.h" 20 #include "ulp_template_db_tbl.h" 21 22 /* Local defines for the parsing functions */ 23 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */ 24 #define ULP_VLAN_PRIORITY_MASK 0x700 25 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/ 26 #define ULP_UDP_PORT_VXLAN 4789 27 28 /* Utility function to skip the void items. */ 29 static inline int32_t 30 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment) 31 { 32 if (!*item) 33 return 0; 34 if (increment) 35 (*item)++; 36 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID) 37 (*item)++; 38 if (*item) 39 return 1; 40 return 0; 41 } 42 43 /* Utility function to copy field spec items */ 44 static struct ulp_rte_hdr_field * 45 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field, 46 const void *buffer, 47 uint32_t size) 48 { 49 field->size = size; 50 memcpy(field->spec, buffer, field->size); 51 field++; 52 return field; 53 } 54 55 /* Utility function to update the field_bitmap */ 56 static void 57 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params, 58 uint32_t idx, 59 enum bnxt_ulp_prsr_action prsr_act) 60 { 61 struct ulp_rte_hdr_field *field; 62 63 field = ¶ms->hdr_field[idx]; 64 if (ulp_bitmap_notzero(field->mask, field->size)) { 65 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx); 66 if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE)) 67 ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx); 68 /* Not exact match */ 69 if (!ulp_bitmap_is_ones(field->mask, field->size)) 70 ULP_COMP_FLD_IDX_WR(params, 71 BNXT_ULP_CF_IDX_WC_MATCH, 1); 72 } else { 73 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx); 74 } 75 } 76 77 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL) 78 /* Utility function to copy field spec and masks items */ 79 static void 80 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params, 81 uint32_t *idx, 82 uint32_t size, 83 const void *spec_buff, 84 const void *mask_buff, 85 enum bnxt_ulp_prsr_action prsr_act) 86 { 87 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx]; 88 89 /* update the field size */ 90 field->size = size; 91 92 /* copy the mask specifications only if mask is not null */ 93 if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) { 94 memcpy(field->mask, mask_buff, size); 95 ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act); 96 } 97 98 /* copy the protocol specifications only if mask is not null*/ 99 if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size)) 100 memcpy(field->spec, spec_buff, size); 101 102 /* Increment the index */ 103 *idx = *idx + 1; 104 } 105 106 /* Utility function to copy field spec and masks items */ 107 static int32_t 108 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params, 109 uint32_t *idx, 110 uint32_t size) 111 { 112 if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) { 113 BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx); 114 return -EINVAL; 115 } 116 *idx = params->field_idx; 117 params->field_idx += size; 118 return 0; 119 } 120 121 /* 122 * Function to handle the parsing of RTE Flows and placing 123 * the RTE flow items into the ulp structures. 124 */ 125 int32_t 126 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], 127 struct ulp_rte_parser_params *params) 128 { 129 const struct rte_flow_item *item = pattern; 130 struct bnxt_ulp_rte_hdr_info *hdr_info; 131 132 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM; 133 134 /* Set the computed flags for no vlan tags before parsing */ 135 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1); 136 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1); 137 138 /* Parse all the items in the pattern */ 139 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) { 140 if (item->type >= (uint32_t) 141 BNXT_RTE_FLOW_ITEM_TYPE_END) { 142 if (item->type >= 143 (uint32_t)BNXT_RTE_FLOW_ITEM_TYPE_LAST) 144 goto hdr_parser_error; 145 /* get the header information */ 146 hdr_info = &ulp_vendor_hdr_info[item->type - 147 BNXT_RTE_FLOW_ITEM_TYPE_END]; 148 } else { 149 if (item->type > RTE_FLOW_ITEM_TYPE_HIGIG2) 150 goto hdr_parser_error; 151 hdr_info = &ulp_hdr_info[item->type]; 152 } 153 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) { 154 goto hdr_parser_error; 155 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) { 156 /* call the registered callback handler */ 157 if (hdr_info->proto_hdr_func) { 158 if (hdr_info->proto_hdr_func(item, params) != 159 BNXT_TF_RC_SUCCESS) { 160 return BNXT_TF_RC_ERROR; 161 } 162 } 163 } 164 item++; 165 } 166 /* update the implied SVIF */ 167 return ulp_rte_parser_implicit_match_port_process(params); 168 169 hdr_parser_error: 170 BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n", 171 item->type); 172 return BNXT_TF_RC_PARSE_ERR; 173 } 174 175 /* 176 * Function to handle the parsing of RTE Flows and placing 177 * the RTE flow actions into the ulp structures. 178 */ 179 int32_t 180 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], 181 struct ulp_rte_parser_params *params) 182 { 183 const struct rte_flow_action *action_item = actions; 184 struct bnxt_ulp_rte_act_info *hdr_info; 185 186 /* Parse all the items in the pattern */ 187 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) { 188 if (action_item->type >= 189 (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_END) { 190 if (action_item->type >= 191 (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_LAST) 192 goto act_parser_error; 193 /* get the header information from bnxt actinfo table */ 194 hdr_info = &ulp_vendor_act_info[action_item->type - 195 BNXT_RTE_FLOW_ACTION_TYPE_END]; 196 } else { 197 if (action_item->type > RTE_FLOW_ACTION_TYPE_SHARED) 198 goto act_parser_error; 199 /* get the header information from the act info table */ 200 hdr_info = &ulp_act_info[action_item->type]; 201 } 202 if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) { 203 goto act_parser_error; 204 } else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) { 205 /* call the registered callback handler */ 206 if (hdr_info->proto_act_func) { 207 if (hdr_info->proto_act_func(action_item, 208 params) != 209 BNXT_TF_RC_SUCCESS) { 210 return BNXT_TF_RC_ERROR; 211 } 212 } 213 } 214 action_item++; 215 } 216 /* update the implied port details */ 217 ulp_rte_parser_implicit_act_port_process(params); 218 return BNXT_TF_RC_SUCCESS; 219 220 act_parser_error: 221 BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n", 222 action_item->type); 223 return BNXT_TF_RC_ERROR; 224 } 225 226 /* 227 * Function to handle the post processing of the computed 228 * fields for the interface. 229 */ 230 static void 231 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params) 232 { 233 uint32_t ifindex; 234 uint16_t port_id, parif; 235 uint32_t mtype; 236 enum bnxt_ulp_direction_type dir; 237 238 /* get the direction details */ 239 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 240 241 /* read the port id details */ 242 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 243 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 244 port_id, 245 &ifindex)) { 246 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 247 return; 248 } 249 250 if (dir == BNXT_ULP_DIR_INGRESS) { 251 /* Set port PARIF */ 252 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 253 BNXT_ULP_PHY_PORT_PARIF, &parif)) { 254 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n"); 255 return; 256 } 257 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF, 258 parif); 259 } else { 260 /* Get the match port type */ 261 mtype = ULP_COMP_FLD_IDX_RD(params, 262 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 263 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) { 264 ULP_COMP_FLD_IDX_WR(params, 265 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP, 266 1); 267 /* Set VF func PARIF */ 268 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 269 BNXT_ULP_VF_FUNC_PARIF, 270 &parif)) { 271 BNXT_TF_DBG(ERR, 272 "ParseErr:ifindex is not valid\n"); 273 return; 274 } 275 ULP_COMP_FLD_IDX_WR(params, 276 BNXT_ULP_CF_IDX_VF_FUNC_PARIF, 277 parif); 278 279 } else { 280 /* Set DRV func PARIF */ 281 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 282 BNXT_ULP_DRV_FUNC_PARIF, 283 &parif)) { 284 BNXT_TF_DBG(ERR, 285 "ParseErr:ifindex is not valid\n"); 286 return; 287 } 288 ULP_COMP_FLD_IDX_WR(params, 289 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF, 290 parif); 291 } 292 if (mtype == BNXT_ULP_INTF_TYPE_PF) { 293 ULP_COMP_FLD_IDX_WR(params, 294 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF, 295 1); 296 } 297 } 298 } 299 300 static int32_t 301 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params) 302 { 303 enum bnxt_ulp_intf_type match_port_type, act_port_type; 304 enum bnxt_ulp_direction_type dir; 305 uint32_t act_port_set; 306 307 /* Get the computed details */ 308 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 309 match_port_type = ULP_COMP_FLD_IDX_RD(params, 310 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 311 act_port_type = ULP_COMP_FLD_IDX_RD(params, 312 BNXT_ULP_CF_IDX_ACT_PORT_TYPE); 313 act_port_set = ULP_COMP_FLD_IDX_RD(params, 314 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET); 315 316 /* set the flow direction in the proto and action header */ 317 if (dir == BNXT_ULP_DIR_EGRESS) { 318 ULP_BITMAP_SET(params->hdr_bitmap.bits, 319 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 320 ULP_BITMAP_SET(params->act_bitmap.bits, 321 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 322 } 323 324 /* calculate the VF to VF flag */ 325 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP && 326 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) 327 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1); 328 329 /* Update the decrement ttl computational fields */ 330 if (ULP_BITMAP_ISSET(params->act_bitmap.bits, 331 BNXT_ULP_ACT_BIT_DEC_TTL)) { 332 /* 333 * Check that vxlan proto is included and vxlan decap 334 * action is not set then decrement tunnel ttl. 335 * Similarly add GRE and NVGRE in future. 336 */ 337 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 338 BNXT_ULP_HDR_BIT_T_VXLAN) && 339 !ULP_BITMAP_ISSET(params->act_bitmap.bits, 340 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) { 341 ULP_COMP_FLD_IDX_WR(params, 342 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1); 343 } else { 344 ULP_COMP_FLD_IDX_WR(params, 345 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1); 346 } 347 } 348 349 /* Merge the hdr_fp_bit into the proto header bit */ 350 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits; 351 352 /* Update the comp fld fid */ 353 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid); 354 355 /* Update the computed interface parameters */ 356 bnxt_ulp_comp_fld_intf_update(params); 357 358 /* TBD: Handle the flow rejection scenarios */ 359 return 0; 360 } 361 362 /* 363 * Function to handle the post processing of the parsing details 364 */ 365 void 366 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params) 367 { 368 ulp_post_process_normal_flow(params); 369 } 370 371 /* 372 * Function to compute the flow direction based on the match port details 373 */ 374 static void 375 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params) 376 { 377 enum bnxt_ulp_intf_type match_port_type; 378 379 /* Get the match port type */ 380 match_port_type = ULP_COMP_FLD_IDX_RD(params, 381 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 382 383 /* If ingress flow and matchport is vf rep then dir is egress*/ 384 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) && 385 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) { 386 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 387 BNXT_ULP_DIR_EGRESS); 388 } else { 389 /* Assign the input direction */ 390 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) 391 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 392 BNXT_ULP_DIR_INGRESS); 393 else 394 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 395 BNXT_ULP_DIR_EGRESS); 396 } 397 } 398 399 /* Function to handle the parsing of RTE Flow item PF Header. */ 400 static int32_t 401 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params, 402 uint32_t ifindex, 403 uint16_t mask) 404 { 405 uint16_t svif; 406 enum bnxt_ulp_direction_type dir; 407 struct ulp_rte_hdr_field *hdr_field; 408 enum bnxt_ulp_svif_type svif_type; 409 enum bnxt_ulp_intf_type port_type; 410 411 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 412 BNXT_ULP_INVALID_SVIF_VAL) { 413 BNXT_TF_DBG(ERR, 414 "SVIF already set,multiple source not support'd\n"); 415 return BNXT_TF_RC_ERROR; 416 } 417 418 /* Get port type details */ 419 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 420 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) { 421 BNXT_TF_DBG(ERR, "Invalid port type\n"); 422 return BNXT_TF_RC_ERROR; 423 } 424 425 /* Update the match port type */ 426 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type); 427 428 /* compute the direction */ 429 bnxt_ulp_rte_parser_direction_compute(params); 430 431 /* Get the computed direction */ 432 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 433 if (dir == BNXT_ULP_DIR_INGRESS) { 434 svif_type = BNXT_ULP_PHY_PORT_SVIF; 435 } else { 436 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP) 437 svif_type = BNXT_ULP_VF_FUNC_SVIF; 438 else 439 svif_type = BNXT_ULP_DRV_FUNC_SVIF; 440 } 441 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type, 442 &svif); 443 svif = rte_cpu_to_be_16(svif); 444 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; 445 memcpy(hdr_field->spec, &svif, sizeof(svif)); 446 memcpy(hdr_field->mask, &mask, sizeof(mask)); 447 hdr_field->size = sizeof(svif); 448 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, 449 rte_be_to_cpu_16(svif)); 450 return BNXT_TF_RC_SUCCESS; 451 } 452 453 /* Function to handle the parsing of the RTE port id */ 454 int32_t 455 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params) 456 { 457 uint16_t port_id = 0; 458 uint16_t svif_mask = 0xFFFF; 459 uint32_t ifindex; 460 int32_t rc = BNXT_TF_RC_ERROR; 461 462 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 463 BNXT_ULP_INVALID_SVIF_VAL) 464 return BNXT_TF_RC_SUCCESS; 465 466 /* SVIF not set. So get the port id */ 467 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 468 469 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 470 port_id, 471 &ifindex)) { 472 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 473 return rc; 474 } 475 476 /* Update the SVIF details */ 477 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask); 478 return rc; 479 } 480 481 /* Function to handle the implicit action port id */ 482 int32_t 483 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params) 484 { 485 struct rte_flow_action action_item = {0}; 486 struct rte_flow_action_port_id port_id = {0}; 487 488 /* Read the action port set bit */ 489 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) { 490 /* Already set, so just exit */ 491 return BNXT_TF_RC_SUCCESS; 492 } 493 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 494 action_item.conf = &port_id; 495 496 /* Update the action port based on incoming port */ 497 ulp_rte_port_id_act_handler(&action_item, params); 498 499 /* Reset the action port set bit */ 500 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0); 501 return BNXT_TF_RC_SUCCESS; 502 } 503 504 /* Function to handle the parsing of RTE Flow item PF Header. */ 505 int32_t 506 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused, 507 struct ulp_rte_parser_params *params) 508 { 509 uint16_t port_id = 0; 510 uint16_t svif_mask = 0xFFFF; 511 uint32_t ifindex; 512 513 /* Get the implicit port id */ 514 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 515 516 /* perform the conversion from dpdk port to bnxt ifindex */ 517 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 518 port_id, 519 &ifindex)) { 520 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 521 return BNXT_TF_RC_ERROR; 522 } 523 524 /* Update the SVIF details */ 525 return ulp_rte_parser_svif_set(params, ifindex, svif_mask); 526 } 527 528 /* Function to handle the parsing of RTE Flow item VF Header. */ 529 int32_t 530 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item, 531 struct ulp_rte_parser_params *params) 532 { 533 const struct rte_flow_item_vf *vf_spec = item->spec; 534 const struct rte_flow_item_vf *vf_mask = item->mask; 535 uint16_t mask = 0; 536 uint32_t ifindex; 537 int32_t rc = BNXT_TF_RC_PARSE_ERR; 538 539 /* Get VF rte_flow_item for Port details */ 540 if (!vf_spec) { 541 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n"); 542 return rc; 543 } 544 if (!vf_mask) { 545 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n"); 546 return rc; 547 } 548 mask = vf_mask->id; 549 550 /* perform the conversion from VF Func id to bnxt ifindex */ 551 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, 552 vf_spec->id, 553 &ifindex)) { 554 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 555 return rc; 556 } 557 /* Update the SVIF details */ 558 return ulp_rte_parser_svif_set(params, ifindex, mask); 559 } 560 561 /* Function to handle the parsing of RTE Flow item port id Header. */ 562 int32_t 563 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item, 564 struct ulp_rte_parser_params *params) 565 { 566 const struct rte_flow_item_port_id *port_spec = item->spec; 567 const struct rte_flow_item_port_id *port_mask = item->mask; 568 uint16_t mask = 0; 569 int32_t rc = BNXT_TF_RC_PARSE_ERR; 570 uint32_t ifindex; 571 572 if (!port_spec) { 573 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n"); 574 return rc; 575 } 576 if (!port_mask) { 577 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n"); 578 return rc; 579 } 580 mask = port_mask->id; 581 582 /* perform the conversion from dpdk port to bnxt ifindex */ 583 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 584 port_spec->id, 585 &ifindex)) { 586 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 587 return rc; 588 } 589 /* Update the SVIF details */ 590 return ulp_rte_parser_svif_set(params, ifindex, mask); 591 } 592 593 /* Function to handle the parsing of RTE Flow item phy port Header. */ 594 int32_t 595 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item, 596 struct ulp_rte_parser_params *params) 597 { 598 const struct rte_flow_item_phy_port *port_spec = item->spec; 599 const struct rte_flow_item_phy_port *port_mask = item->mask; 600 uint16_t mask = 0; 601 int32_t rc = BNXT_TF_RC_ERROR; 602 uint16_t svif; 603 enum bnxt_ulp_direction_type dir; 604 struct ulp_rte_hdr_field *hdr_field; 605 606 /* Copy the rte_flow_item for phy port into hdr_field */ 607 if (!port_spec) { 608 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n"); 609 return rc; 610 } 611 if (!port_mask) { 612 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n"); 613 return rc; 614 } 615 mask = port_mask->index; 616 617 /* Update the match port type */ 618 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, 619 BNXT_ULP_INTF_TYPE_PHY_PORT); 620 621 /* Compute the Hw direction */ 622 bnxt_ulp_rte_parser_direction_compute(params); 623 624 /* Direction validation */ 625 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 626 if (dir == BNXT_ULP_DIR_EGRESS) { 627 BNXT_TF_DBG(ERR, 628 "Parse Err:Phy ports are valid only for ingress\n"); 629 return BNXT_TF_RC_PARSE_ERR; 630 } 631 632 /* Get the physical port details from port db */ 633 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index, 634 &svif); 635 if (rc) { 636 BNXT_TF_DBG(ERR, "Failed to get port details\n"); 637 return BNXT_TF_RC_PARSE_ERR; 638 } 639 640 /* Update the SVIF details */ 641 svif = rte_cpu_to_be_16(svif); 642 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; 643 memcpy(hdr_field->spec, &svif, sizeof(svif)); 644 memcpy(hdr_field->mask, &mask, sizeof(mask)); 645 hdr_field->size = sizeof(svif); 646 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, 647 rte_be_to_cpu_16(svif)); 648 return BNXT_TF_RC_SUCCESS; 649 } 650 651 /* Function to handle the update of proto header based on field values */ 652 static void 653 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param, 654 uint16_t type, uint32_t in_flag) 655 { 656 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { 657 if (in_flag) { 658 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 659 BNXT_ULP_HDR_BIT_I_IPV4); 660 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); 661 } else { 662 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 663 BNXT_ULP_HDR_BIT_O_IPV4); 664 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); 665 } 666 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { 667 if (in_flag) { 668 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 669 BNXT_ULP_HDR_BIT_I_IPV6); 670 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); 671 } else { 672 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 673 BNXT_ULP_HDR_BIT_O_IPV6); 674 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); 675 } 676 } 677 } 678 679 /* Internal Function to identify broadcast or multicast packets */ 680 static int32_t 681 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr) 682 { 683 if (rte_is_multicast_ether_addr(eth_addr) || 684 rte_is_broadcast_ether_addr(eth_addr)) { 685 BNXT_TF_DBG(DEBUG, 686 "No support for bcast or mcast addr offload\n"); 687 return 1; 688 } 689 return 0; 690 } 691 692 /* Function to handle the parsing of RTE Flow item Ethernet Header. */ 693 int32_t 694 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item, 695 struct ulp_rte_parser_params *params) 696 { 697 const struct rte_flow_item_eth *eth_spec = item->spec; 698 const struct rte_flow_item_eth *eth_mask = item->mask; 699 uint32_t idx = 0, dmac_idx = 0; 700 uint32_t size; 701 uint16_t eth_type = 0; 702 uint32_t inner_flag = 0; 703 704 /* Perform validations */ 705 if (eth_spec) { 706 /* Todo: work around to avoid multicast and broadcast addr */ 707 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst)) 708 return BNXT_TF_RC_PARSE_ERR; 709 710 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src)) 711 return BNXT_TF_RC_PARSE_ERR; 712 713 eth_type = eth_spec->type; 714 } 715 716 if (ulp_rte_prsr_fld_size_validate(params, &idx, 717 BNXT_ULP_PROTO_HDR_ETH_NUM)) { 718 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 719 return BNXT_TF_RC_ERROR; 720 } 721 /* 722 * Copy the rte_flow_item for eth into hdr_field using ethernet 723 * header fields 724 */ 725 dmac_idx = idx; 726 size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes); 727 ulp_rte_prsr_fld_mask(params, &idx, size, 728 ulp_deference_struct(eth_spec, dst.addr_bytes), 729 ulp_deference_struct(eth_mask, dst.addr_bytes), 730 ULP_PRSR_ACT_DEFAULT); 731 732 size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes); 733 ulp_rte_prsr_fld_mask(params, &idx, size, 734 ulp_deference_struct(eth_spec, src.addr_bytes), 735 ulp_deference_struct(eth_mask, src.addr_bytes), 736 ULP_PRSR_ACT_DEFAULT); 737 738 size = sizeof(((struct rte_flow_item_eth *)NULL)->type); 739 ulp_rte_prsr_fld_mask(params, &idx, size, 740 ulp_deference_struct(eth_spec, type), 741 ulp_deference_struct(eth_mask, type), 742 ULP_PRSR_ACT_MATCH_IGNORE); 743 744 /* Update the protocol hdr bitmap */ 745 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 746 BNXT_ULP_HDR_BIT_O_ETH) || 747 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 748 BNXT_ULP_HDR_BIT_O_IPV4) || 749 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 750 BNXT_ULP_HDR_BIT_O_IPV6) || 751 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 752 BNXT_ULP_HDR_BIT_O_UDP) || 753 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 754 BNXT_ULP_HDR_BIT_O_TCP)) { 755 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH); 756 inner_flag = 1; 757 } else { 758 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); 759 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID, 760 dmac_idx); 761 } 762 /* Update the field protocol hdr bitmap */ 763 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag); 764 765 return BNXT_TF_RC_SUCCESS; 766 } 767 768 /* Function to handle the parsing of RTE Flow item Vlan Header. */ 769 int32_t 770 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, 771 struct ulp_rte_parser_params *params) 772 { 773 const struct rte_flow_item_vlan *vlan_spec = item->spec; 774 const struct rte_flow_item_vlan *vlan_mask = item->mask; 775 struct ulp_rte_hdr_bitmap *hdr_bit; 776 uint32_t idx = 0; 777 uint16_t vlan_tag = 0, priority = 0; 778 uint16_t vlan_tag_mask = 0, priority_mask = 0; 779 uint32_t outer_vtag_num; 780 uint32_t inner_vtag_num; 781 uint16_t eth_type = 0; 782 uint32_t inner_flag = 0; 783 uint32_t size; 784 785 if (vlan_spec) { 786 vlan_tag = ntohs(vlan_spec->tci); 787 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT); 788 vlan_tag &= ULP_VLAN_TAG_MASK; 789 vlan_tag = htons(vlan_tag); 790 eth_type = vlan_spec->inner_type; 791 } 792 793 if (vlan_mask) { 794 vlan_tag_mask = ntohs(vlan_mask->tci); 795 priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT); 796 vlan_tag_mask &= 0xfff; 797 798 /* 799 * the storage for priority and vlan tag is 2 bytes 800 * The mask of priority which is 3 bits if it is all 1's 801 * then make the rest bits 13 bits as 1's 802 * so that it is matched as exact match. 803 */ 804 if (priority_mask == ULP_VLAN_PRIORITY_MASK) 805 priority_mask |= ~ULP_VLAN_PRIORITY_MASK; 806 if (vlan_tag_mask == ULP_VLAN_TAG_MASK) 807 vlan_tag_mask |= ~ULP_VLAN_TAG_MASK; 808 vlan_tag_mask = htons(vlan_tag_mask); 809 } 810 811 if (ulp_rte_prsr_fld_size_validate(params, &idx, 812 BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) { 813 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 814 return BNXT_TF_RC_ERROR; 815 } 816 817 /* 818 * Copy the rte_flow_item for vlan into hdr_field using Vlan 819 * header fields 820 */ 821 size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci); 822 /* 823 * The priority field is ignored since OVS is setting it as 824 * wild card match and it is not supported. This is a work 825 * around and shall be addressed in the future. 826 */ 827 ulp_rte_prsr_fld_mask(params, &idx, size, 828 &priority, 829 (vlan_mask) ? &priority_mask : NULL, 830 ULP_PRSR_ACT_MASK_IGNORE); 831 832 ulp_rte_prsr_fld_mask(params, &idx, size, 833 &vlan_tag, 834 (vlan_mask) ? &vlan_tag_mask : NULL, 835 ULP_PRSR_ACT_DEFAULT); 836 837 size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type); 838 ulp_rte_prsr_fld_mask(params, &idx, size, 839 ulp_deference_struct(vlan_spec, inner_type), 840 ulp_deference_struct(vlan_mask, inner_type), 841 ULP_PRSR_ACT_MATCH_IGNORE); 842 843 /* Get the outer tag and inner tag counts */ 844 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params, 845 BNXT_ULP_CF_IDX_O_VTAG_NUM); 846 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params, 847 BNXT_ULP_CF_IDX_I_VTAG_NUM); 848 849 /* Update the hdr_bitmap of the vlans */ 850 hdr_bit = ¶ms->hdr_bitmap; 851 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 852 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 853 !outer_vtag_num) { 854 /* Update the vlan tag num */ 855 outer_vtag_num++; 856 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 857 outer_vtag_num); 858 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0); 859 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1); 860 ULP_BITMAP_SET(params->hdr_bitmap.bits, 861 BNXT_ULP_HDR_BIT_OO_VLAN); 862 if (vlan_mask && vlan_tag_mask) 863 ULP_COMP_FLD_IDX_WR(params, 864 BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1); 865 866 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 867 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 868 outer_vtag_num == 1) { 869 /* update the vlan tag num */ 870 outer_vtag_num++; 871 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 872 outer_vtag_num); 873 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1); 874 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0); 875 ULP_BITMAP_SET(params->hdr_bitmap.bits, 876 BNXT_ULP_HDR_BIT_OI_VLAN); 877 if (vlan_mask && vlan_tag_mask) 878 ULP_COMP_FLD_IDX_WR(params, 879 BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1); 880 881 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 882 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 883 !inner_vtag_num) { 884 /* update the vlan tag num */ 885 inner_vtag_num++; 886 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 887 inner_vtag_num); 888 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0); 889 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1); 890 ULP_BITMAP_SET(params->hdr_bitmap.bits, 891 BNXT_ULP_HDR_BIT_IO_VLAN); 892 if (vlan_mask && vlan_tag_mask) 893 ULP_COMP_FLD_IDX_WR(params, 894 BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1); 895 inner_flag = 1; 896 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 897 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 898 inner_vtag_num == 1) { 899 /* update the vlan tag num */ 900 inner_vtag_num++; 901 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 902 inner_vtag_num); 903 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1); 904 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0); 905 ULP_BITMAP_SET(params->hdr_bitmap.bits, 906 BNXT_ULP_HDR_BIT_II_VLAN); 907 if (vlan_mask && vlan_tag_mask) 908 ULP_COMP_FLD_IDX_WR(params, 909 BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1); 910 inner_flag = 1; 911 } else { 912 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n"); 913 return BNXT_TF_RC_ERROR; 914 } 915 /* Update the field protocol hdr bitmap */ 916 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag); 917 return BNXT_TF_RC_SUCCESS; 918 } 919 920 /* Function to handle the update of proto header based on field values */ 921 static void 922 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param, 923 uint8_t proto, uint32_t in_flag) 924 { 925 if (proto == IPPROTO_UDP) { 926 if (in_flag) { 927 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 928 BNXT_ULP_HDR_BIT_I_UDP); 929 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); 930 } else { 931 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 932 BNXT_ULP_HDR_BIT_O_UDP); 933 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); 934 } 935 } else if (proto == IPPROTO_TCP) { 936 if (in_flag) { 937 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 938 BNXT_ULP_HDR_BIT_I_TCP); 939 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); 940 } else { 941 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 942 BNXT_ULP_HDR_BIT_O_TCP); 943 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); 944 } 945 } else if (proto == IPPROTO_GRE) { 946 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE); 947 } else if (proto == IPPROTO_ICMP) { 948 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN)) 949 ULP_BITMAP_SET(param->hdr_bitmap.bits, 950 BNXT_ULP_HDR_BIT_I_ICMP); 951 else 952 ULP_BITMAP_SET(param->hdr_bitmap.bits, 953 BNXT_ULP_HDR_BIT_O_ICMP); 954 } 955 if (proto) { 956 if (in_flag) { 957 ULP_COMP_FLD_IDX_WR(param, 958 BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, 959 1); 960 ULP_COMP_FLD_IDX_WR(param, 961 BNXT_ULP_CF_IDX_I_L3_PROTO_ID, 962 proto); 963 } else { 964 ULP_COMP_FLD_IDX_WR(param, 965 BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, 966 1); 967 ULP_COMP_FLD_IDX_WR(param, 968 BNXT_ULP_CF_IDX_O_L3_PROTO_ID, 969 proto); 970 } 971 } 972 } 973 974 /* Function to handle the parsing of RTE Flow item IPV4 Header. */ 975 int32_t 976 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, 977 struct ulp_rte_parser_params *params) 978 { 979 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec; 980 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask; 981 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 982 uint32_t idx = 0, dip_idx = 0; 983 uint32_t size; 984 uint8_t proto = 0; 985 uint32_t inner_flag = 0; 986 uint32_t cnt; 987 988 /* validate there are no 3rd L3 header */ 989 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); 990 if (cnt == 2) { 991 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n"); 992 return BNXT_TF_RC_ERROR; 993 } 994 995 if (ulp_rte_prsr_fld_size_validate(params, &idx, 996 BNXT_ULP_PROTO_HDR_IPV4_NUM)) { 997 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 998 return BNXT_TF_RC_ERROR; 999 } 1000 1001 /* 1002 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1003 * header fields 1004 */ 1005 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl); 1006 ulp_rte_prsr_fld_mask(params, &idx, size, 1007 ulp_deference_struct(ipv4_spec, hdr.version_ihl), 1008 ulp_deference_struct(ipv4_mask, hdr.version_ihl), 1009 ULP_PRSR_ACT_DEFAULT); 1010 1011 /* 1012 * The tos field is ignored since OVS is setting it as wild card 1013 * match and it is not supported. This is a work around and 1014 * shall be addressed in the future. 1015 */ 1016 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service); 1017 ulp_rte_prsr_fld_mask(params, &idx, size, 1018 ulp_deference_struct(ipv4_spec, 1019 hdr.type_of_service), 1020 ulp_deference_struct(ipv4_mask, 1021 hdr.type_of_service), 1022 ULP_PRSR_ACT_MASK_IGNORE); 1023 1024 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length); 1025 ulp_rte_prsr_fld_mask(params, &idx, size, 1026 ulp_deference_struct(ipv4_spec, hdr.total_length), 1027 ulp_deference_struct(ipv4_mask, hdr.total_length), 1028 ULP_PRSR_ACT_DEFAULT); 1029 1030 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id); 1031 ulp_rte_prsr_fld_mask(params, &idx, size, 1032 ulp_deference_struct(ipv4_spec, hdr.packet_id), 1033 ulp_deference_struct(ipv4_mask, hdr.packet_id), 1034 ULP_PRSR_ACT_DEFAULT); 1035 1036 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset); 1037 ulp_rte_prsr_fld_mask(params, &idx, size, 1038 ulp_deference_struct(ipv4_spec, 1039 hdr.fragment_offset), 1040 ulp_deference_struct(ipv4_mask, 1041 hdr.fragment_offset), 1042 ULP_PRSR_ACT_DEFAULT); 1043 1044 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live); 1045 ulp_rte_prsr_fld_mask(params, &idx, size, 1046 ulp_deference_struct(ipv4_spec, hdr.time_to_live), 1047 ulp_deference_struct(ipv4_mask, hdr.time_to_live), 1048 ULP_PRSR_ACT_DEFAULT); 1049 1050 /* Ignore proto for matching templates */ 1051 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id); 1052 ulp_rte_prsr_fld_mask(params, &idx, size, 1053 ulp_deference_struct(ipv4_spec, 1054 hdr.next_proto_id), 1055 ulp_deference_struct(ipv4_mask, 1056 hdr.next_proto_id), 1057 ULP_PRSR_ACT_MATCH_IGNORE); 1058 if (ipv4_spec) 1059 proto = ipv4_spec->hdr.next_proto_id; 1060 1061 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum); 1062 ulp_rte_prsr_fld_mask(params, &idx, size, 1063 ulp_deference_struct(ipv4_spec, hdr.hdr_checksum), 1064 ulp_deference_struct(ipv4_mask, hdr.hdr_checksum), 1065 ULP_PRSR_ACT_DEFAULT); 1066 1067 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr); 1068 ulp_rte_prsr_fld_mask(params, &idx, size, 1069 ulp_deference_struct(ipv4_spec, hdr.src_addr), 1070 ulp_deference_struct(ipv4_mask, hdr.src_addr), 1071 ULP_PRSR_ACT_DEFAULT); 1072 1073 dip_idx = idx; 1074 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr); 1075 ulp_rte_prsr_fld_mask(params, &idx, size, 1076 ulp_deference_struct(ipv4_spec, hdr.dst_addr), 1077 ulp_deference_struct(ipv4_mask, hdr.dst_addr), 1078 ULP_PRSR_ACT_DEFAULT); 1079 1080 /* Set the ipv4 header bitmap and computed l3 header bitmaps */ 1081 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 1082 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { 1083 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4); 1084 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); 1085 inner_flag = 1; 1086 } else { 1087 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4); 1088 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); 1089 /* Update the tunnel offload dest ip offset */ 1090 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID, 1091 dip_idx); 1092 } 1093 1094 /* Some of the PMD applications may set the protocol field 1095 * in the IPv4 spec but don't set the mask. So, consider 1096 * the mask in the proto value calculation. 1097 */ 1098 if (ipv4_mask) 1099 proto &= ipv4_mask->hdr.next_proto_id; 1100 1101 /* Update the field protocol hdr bitmap */ 1102 ulp_rte_l3_proto_type_update(params, proto, inner_flag); 1103 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); 1104 return BNXT_TF_RC_SUCCESS; 1105 } 1106 1107 /* Function to handle the parsing of RTE Flow item IPV6 Header */ 1108 int32_t 1109 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, 1110 struct ulp_rte_parser_params *params) 1111 { 1112 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec; 1113 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask; 1114 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1115 uint32_t idx = 0, dip_idx = 0; 1116 uint32_t size; 1117 uint32_t ver_spec = 0, ver_mask = 0; 1118 uint32_t tc_spec = 0, tc_mask = 0; 1119 uint32_t lab_spec = 0, lab_mask = 0; 1120 uint8_t proto = 0; 1121 uint32_t inner_flag = 0; 1122 uint32_t cnt; 1123 1124 /* validate there are no 3rd L3 header */ 1125 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); 1126 if (cnt == 2) { 1127 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n"); 1128 return BNXT_TF_RC_ERROR; 1129 } 1130 1131 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1132 BNXT_ULP_PROTO_HDR_IPV6_NUM)) { 1133 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1134 return BNXT_TF_RC_ERROR; 1135 } 1136 1137 /* 1138 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6 1139 * header fields 1140 */ 1141 if (ipv6_spec) { 1142 ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow); 1143 tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow); 1144 lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow); 1145 proto = ipv6_spec->hdr.proto; 1146 } 1147 1148 if (ipv6_mask) { 1149 ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow); 1150 tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow); 1151 lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow); 1152 1153 /* Some of the PMD applications may set the protocol field 1154 * in the IPv6 spec but don't set the mask. So, consider 1155 * the mask in proto value calculation. 1156 */ 1157 proto &= ipv6_mask->hdr.proto; 1158 } 1159 1160 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow); 1161 ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask, 1162 ULP_PRSR_ACT_DEFAULT); 1163 /* 1164 * The TC and flow label field are ignored since OVS is 1165 * setting it for match and it is not supported. 1166 * This is a work around and 1167 * shall be addressed in the future. 1168 */ 1169 ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask, 1170 ULP_PRSR_ACT_MASK_IGNORE); 1171 ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask, 1172 ULP_PRSR_ACT_MASK_IGNORE); 1173 1174 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len); 1175 ulp_rte_prsr_fld_mask(params, &idx, size, 1176 ulp_deference_struct(ipv6_spec, hdr.payload_len), 1177 ulp_deference_struct(ipv6_mask, hdr.payload_len), 1178 ULP_PRSR_ACT_DEFAULT); 1179 1180 /* Ignore proto for template matching */ 1181 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto); 1182 ulp_rte_prsr_fld_mask(params, &idx, size, 1183 ulp_deference_struct(ipv6_spec, hdr.proto), 1184 ulp_deference_struct(ipv6_mask, hdr.proto), 1185 ULP_PRSR_ACT_MATCH_IGNORE); 1186 1187 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits); 1188 ulp_rte_prsr_fld_mask(params, &idx, size, 1189 ulp_deference_struct(ipv6_spec, hdr.hop_limits), 1190 ulp_deference_struct(ipv6_mask, hdr.hop_limits), 1191 ULP_PRSR_ACT_DEFAULT); 1192 1193 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr); 1194 ulp_rte_prsr_fld_mask(params, &idx, size, 1195 ulp_deference_struct(ipv6_spec, hdr.src_addr), 1196 ulp_deference_struct(ipv6_mask, hdr.src_addr), 1197 ULP_PRSR_ACT_DEFAULT); 1198 1199 dip_idx = idx; 1200 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr); 1201 ulp_rte_prsr_fld_mask(params, &idx, size, 1202 ulp_deference_struct(ipv6_spec, hdr.dst_addr), 1203 ulp_deference_struct(ipv6_mask, hdr.dst_addr), 1204 ULP_PRSR_ACT_DEFAULT); 1205 1206 /* Set the ipv6 header bitmap and computed l3 header bitmaps */ 1207 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 1208 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { 1209 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6); 1210 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); 1211 inner_flag = 1; 1212 } else { 1213 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6); 1214 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); 1215 /* Update the tunnel offload dest ip offset */ 1216 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID, 1217 dip_idx); 1218 } 1219 1220 /* Update the field protocol hdr bitmap */ 1221 ulp_rte_l3_proto_type_update(params, proto, inner_flag); 1222 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); 1223 1224 return BNXT_TF_RC_SUCCESS; 1225 } 1226 1227 /* Function to handle the update of proto header based on field values */ 1228 static void 1229 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params, 1230 uint16_t src_port, uint16_t src_mask, 1231 uint16_t dst_port, uint16_t dst_mask, 1232 enum bnxt_ulp_hdr_bit hdr_bit) 1233 { 1234 switch (hdr_bit) { 1235 case BNXT_ULP_HDR_BIT_I_UDP: 1236 case BNXT_ULP_HDR_BIT_I_TCP: 1237 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); 1238 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1); 1239 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT, 1240 (uint64_t)rte_be_to_cpu_16(src_port)); 1241 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT, 1242 (uint64_t)rte_be_to_cpu_16(dst_port)); 1243 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK, 1244 (uint64_t)rte_be_to_cpu_16(src_mask)); 1245 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK, 1246 (uint64_t)rte_be_to_cpu_16(dst_mask)); 1247 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, 1248 1); 1249 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT, 1250 !!(src_port & src_mask)); 1251 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT, 1252 !!(dst_port & dst_mask)); 1253 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID, 1254 (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ? 1255 IPPROTO_UDP : IPPROTO_TCP); 1256 break; 1257 case BNXT_ULP_HDR_BIT_O_UDP: 1258 case BNXT_ULP_HDR_BIT_O_TCP: 1259 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); 1260 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1); 1261 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT, 1262 (uint64_t)rte_be_to_cpu_16(src_port)); 1263 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT, 1264 (uint64_t)rte_be_to_cpu_16(dst_port)); 1265 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK, 1266 (uint64_t)rte_be_to_cpu_16(src_mask)); 1267 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK, 1268 (uint64_t)rte_be_to_cpu_16(dst_mask)); 1269 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, 1270 1); 1271 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT, 1272 !!(src_port & src_mask)); 1273 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT, 1274 !!(dst_port & dst_mask)); 1275 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID, 1276 (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ? 1277 IPPROTO_UDP : IPPROTO_TCP); 1278 break; 1279 default: 1280 break; 1281 } 1282 1283 if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port == 1284 tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) { 1285 ULP_BITMAP_SET(params->hdr_fp_bit.bits, 1286 BNXT_ULP_HDR_BIT_T_VXLAN); 1287 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 1288 } 1289 } 1290 1291 /* Function to handle the parsing of RTE Flow item UDP Header. */ 1292 int32_t 1293 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, 1294 struct ulp_rte_parser_params *params) 1295 { 1296 const struct rte_flow_item_udp *udp_spec = item->spec; 1297 const struct rte_flow_item_udp *udp_mask = item->mask; 1298 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1299 uint32_t idx = 0; 1300 uint32_t size; 1301 uint16_t dport = 0, sport = 0; 1302 uint16_t dport_mask = 0, sport_mask = 0; 1303 uint32_t cnt; 1304 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP; 1305 1306 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 1307 if (cnt == 2) { 1308 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 1309 return BNXT_TF_RC_ERROR; 1310 } 1311 1312 if (udp_spec) { 1313 sport = udp_spec->hdr.src_port; 1314 dport = udp_spec->hdr.dst_port; 1315 } 1316 if (udp_mask) { 1317 sport_mask = udp_mask->hdr.src_port; 1318 dport_mask = udp_mask->hdr.dst_port; 1319 } 1320 1321 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1322 BNXT_ULP_PROTO_HDR_UDP_NUM)) { 1323 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1324 return BNXT_TF_RC_ERROR; 1325 } 1326 1327 /* 1328 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1329 * header fields 1330 */ 1331 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port); 1332 ulp_rte_prsr_fld_mask(params, &idx, size, 1333 ulp_deference_struct(udp_spec, hdr.src_port), 1334 ulp_deference_struct(udp_mask, hdr.src_port), 1335 ULP_PRSR_ACT_DEFAULT); 1336 1337 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port); 1338 ulp_rte_prsr_fld_mask(params, &idx, size, 1339 ulp_deference_struct(udp_spec, hdr.dst_port), 1340 ulp_deference_struct(udp_mask, hdr.dst_port), 1341 ULP_PRSR_ACT_DEFAULT); 1342 1343 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len); 1344 ulp_rte_prsr_fld_mask(params, &idx, size, 1345 ulp_deference_struct(udp_spec, hdr.dgram_len), 1346 ulp_deference_struct(udp_mask, hdr.dgram_len), 1347 ULP_PRSR_ACT_DEFAULT); 1348 1349 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum); 1350 ulp_rte_prsr_fld_mask(params, &idx, size, 1351 ulp_deference_struct(udp_spec, hdr.dgram_cksum), 1352 ulp_deference_struct(udp_mask, hdr.dgram_cksum), 1353 ULP_PRSR_ACT_DEFAULT); 1354 1355 /* Set the udp header bitmap and computed l4 header bitmaps */ 1356 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 1357 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) 1358 out_l4 = BNXT_ULP_HDR_BIT_I_UDP; 1359 1360 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport, 1361 dport_mask, out_l4); 1362 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); 1363 return BNXT_TF_RC_SUCCESS; 1364 } 1365 1366 /* Function to handle the parsing of RTE Flow item TCP Header. */ 1367 int32_t 1368 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, 1369 struct ulp_rte_parser_params *params) 1370 { 1371 const struct rte_flow_item_tcp *tcp_spec = item->spec; 1372 const struct rte_flow_item_tcp *tcp_mask = item->mask; 1373 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1374 uint32_t idx = 0; 1375 uint16_t dport = 0, sport = 0; 1376 uint16_t dport_mask = 0, sport_mask = 0; 1377 uint32_t size; 1378 uint32_t cnt; 1379 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP; 1380 1381 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 1382 if (cnt == 2) { 1383 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 1384 return BNXT_TF_RC_ERROR; 1385 } 1386 1387 if (tcp_spec) { 1388 sport = tcp_spec->hdr.src_port; 1389 dport = tcp_spec->hdr.dst_port; 1390 } 1391 if (tcp_mask) { 1392 sport_mask = tcp_mask->hdr.src_port; 1393 dport_mask = tcp_mask->hdr.dst_port; 1394 } 1395 1396 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1397 BNXT_ULP_PROTO_HDR_TCP_NUM)) { 1398 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1399 return BNXT_TF_RC_ERROR; 1400 } 1401 1402 /* 1403 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1404 * header fields 1405 */ 1406 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port); 1407 ulp_rte_prsr_fld_mask(params, &idx, size, 1408 ulp_deference_struct(tcp_spec, hdr.src_port), 1409 ulp_deference_struct(tcp_mask, hdr.src_port), 1410 ULP_PRSR_ACT_DEFAULT); 1411 1412 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port); 1413 ulp_rte_prsr_fld_mask(params, &idx, size, 1414 ulp_deference_struct(tcp_spec, hdr.dst_port), 1415 ulp_deference_struct(tcp_mask, hdr.dst_port), 1416 ULP_PRSR_ACT_DEFAULT); 1417 1418 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq); 1419 ulp_rte_prsr_fld_mask(params, &idx, size, 1420 ulp_deference_struct(tcp_spec, hdr.sent_seq), 1421 ulp_deference_struct(tcp_mask, hdr.sent_seq), 1422 ULP_PRSR_ACT_DEFAULT); 1423 1424 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack); 1425 ulp_rte_prsr_fld_mask(params, &idx, size, 1426 ulp_deference_struct(tcp_spec, hdr.recv_ack), 1427 ulp_deference_struct(tcp_mask, hdr.recv_ack), 1428 ULP_PRSR_ACT_DEFAULT); 1429 1430 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off); 1431 ulp_rte_prsr_fld_mask(params, &idx, size, 1432 ulp_deference_struct(tcp_spec, hdr.data_off), 1433 ulp_deference_struct(tcp_mask, hdr.data_off), 1434 ULP_PRSR_ACT_DEFAULT); 1435 1436 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags); 1437 ulp_rte_prsr_fld_mask(params, &idx, size, 1438 ulp_deference_struct(tcp_spec, hdr.tcp_flags), 1439 ulp_deference_struct(tcp_mask, hdr.tcp_flags), 1440 ULP_PRSR_ACT_DEFAULT); 1441 1442 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win); 1443 ulp_rte_prsr_fld_mask(params, &idx, size, 1444 ulp_deference_struct(tcp_spec, hdr.rx_win), 1445 ulp_deference_struct(tcp_mask, hdr.rx_win), 1446 ULP_PRSR_ACT_DEFAULT); 1447 1448 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum); 1449 ulp_rte_prsr_fld_mask(params, &idx, size, 1450 ulp_deference_struct(tcp_spec, hdr.cksum), 1451 ulp_deference_struct(tcp_mask, hdr.cksum), 1452 ULP_PRSR_ACT_DEFAULT); 1453 1454 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp); 1455 ulp_rte_prsr_fld_mask(params, &idx, size, 1456 ulp_deference_struct(tcp_spec, hdr.tcp_urp), 1457 ulp_deference_struct(tcp_mask, hdr.tcp_urp), 1458 ULP_PRSR_ACT_DEFAULT); 1459 1460 /* Set the udp header bitmap and computed l4 header bitmaps */ 1461 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 1462 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) 1463 out_l4 = BNXT_ULP_HDR_BIT_I_TCP; 1464 1465 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport, 1466 dport_mask, out_l4); 1467 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); 1468 return BNXT_TF_RC_SUCCESS; 1469 } 1470 1471 /* Function to handle the parsing of RTE Flow item Vxlan Header. */ 1472 int32_t 1473 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item, 1474 struct ulp_rte_parser_params *params) 1475 { 1476 const struct rte_flow_item_vxlan *vxlan_spec = item->spec; 1477 const struct rte_flow_item_vxlan *vxlan_mask = item->mask; 1478 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1479 uint32_t idx = 0; 1480 uint32_t size; 1481 1482 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1483 BNXT_ULP_PROTO_HDR_VXLAN_NUM)) { 1484 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1485 return BNXT_TF_RC_ERROR; 1486 } 1487 1488 /* 1489 * Copy the rte_flow_item for vxlan into hdr_field using vxlan 1490 * header fields 1491 */ 1492 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags); 1493 ulp_rte_prsr_fld_mask(params, &idx, size, 1494 ulp_deference_struct(vxlan_spec, flags), 1495 ulp_deference_struct(vxlan_mask, flags), 1496 ULP_PRSR_ACT_DEFAULT); 1497 1498 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0); 1499 ulp_rte_prsr_fld_mask(params, &idx, size, 1500 ulp_deference_struct(vxlan_spec, rsvd0), 1501 ulp_deference_struct(vxlan_mask, rsvd0), 1502 ULP_PRSR_ACT_DEFAULT); 1503 1504 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni); 1505 ulp_rte_prsr_fld_mask(params, &idx, size, 1506 ulp_deference_struct(vxlan_spec, vni), 1507 ulp_deference_struct(vxlan_mask, vni), 1508 ULP_PRSR_ACT_DEFAULT); 1509 1510 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1); 1511 ulp_rte_prsr_fld_mask(params, &idx, size, 1512 ulp_deference_struct(vxlan_spec, rsvd1), 1513 ulp_deference_struct(vxlan_mask, rsvd1), 1514 ULP_PRSR_ACT_DEFAULT); 1515 1516 /* Update the hdr_bitmap with vxlan */ 1517 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN); 1518 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 1519 return BNXT_TF_RC_SUCCESS; 1520 } 1521 1522 /* Function to handle the parsing of RTE Flow item GRE Header. */ 1523 int32_t 1524 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item, 1525 struct ulp_rte_parser_params *params) 1526 { 1527 const struct rte_flow_item_gre *gre_spec = item->spec; 1528 const struct rte_flow_item_gre *gre_mask = item->mask; 1529 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1530 uint32_t idx = 0; 1531 uint32_t size; 1532 1533 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1534 BNXT_ULP_PROTO_HDR_GRE_NUM)) { 1535 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1536 return BNXT_TF_RC_ERROR; 1537 } 1538 1539 size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver); 1540 ulp_rte_prsr_fld_mask(params, &idx, size, 1541 ulp_deference_struct(gre_spec, c_rsvd0_ver), 1542 ulp_deference_struct(gre_mask, c_rsvd0_ver), 1543 ULP_PRSR_ACT_DEFAULT); 1544 1545 size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol); 1546 ulp_rte_prsr_fld_mask(params, &idx, size, 1547 ulp_deference_struct(gre_spec, protocol), 1548 ulp_deference_struct(gre_mask, protocol), 1549 ULP_PRSR_ACT_DEFAULT); 1550 1551 /* Update the hdr_bitmap with GRE */ 1552 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE); 1553 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 1554 return BNXT_TF_RC_SUCCESS; 1555 } 1556 1557 /* Function to handle the parsing of RTE Flow item ANY. */ 1558 int32_t 1559 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused, 1560 struct ulp_rte_parser_params *params __rte_unused) 1561 { 1562 return BNXT_TF_RC_SUCCESS; 1563 } 1564 1565 /* Function to handle the parsing of RTE Flow item ICMP Header. */ 1566 int32_t 1567 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item, 1568 struct ulp_rte_parser_params *params) 1569 { 1570 const struct rte_flow_item_icmp *icmp_spec = item->spec; 1571 const struct rte_flow_item_icmp *icmp_mask = item->mask; 1572 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1573 uint32_t idx = 0; 1574 uint32_t size; 1575 1576 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1577 BNXT_ULP_PROTO_HDR_ICMP_NUM)) { 1578 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1579 return BNXT_TF_RC_ERROR; 1580 } 1581 1582 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type); 1583 ulp_rte_prsr_fld_mask(params, &idx, size, 1584 ulp_deference_struct(icmp_spec, hdr.icmp_type), 1585 ulp_deference_struct(icmp_mask, hdr.icmp_type), 1586 ULP_PRSR_ACT_DEFAULT); 1587 1588 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code); 1589 ulp_rte_prsr_fld_mask(params, &idx, size, 1590 ulp_deference_struct(icmp_spec, hdr.icmp_code), 1591 ulp_deference_struct(icmp_mask, hdr.icmp_code), 1592 ULP_PRSR_ACT_DEFAULT); 1593 1594 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum); 1595 ulp_rte_prsr_fld_mask(params, &idx, size, 1596 ulp_deference_struct(icmp_spec, hdr.icmp_cksum), 1597 ulp_deference_struct(icmp_mask, hdr.icmp_cksum), 1598 ULP_PRSR_ACT_DEFAULT); 1599 1600 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident); 1601 ulp_rte_prsr_fld_mask(params, &idx, size, 1602 ulp_deference_struct(icmp_spec, hdr.icmp_ident), 1603 ulp_deference_struct(icmp_mask, hdr.icmp_ident), 1604 ULP_PRSR_ACT_DEFAULT); 1605 1606 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb); 1607 ulp_rte_prsr_fld_mask(params, &idx, size, 1608 ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb), 1609 ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb), 1610 ULP_PRSR_ACT_DEFAULT); 1611 1612 /* Update the hdr_bitmap with ICMP */ 1613 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) 1614 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP); 1615 else 1616 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP); 1617 return BNXT_TF_RC_SUCCESS; 1618 } 1619 1620 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */ 1621 int32_t 1622 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item, 1623 struct ulp_rte_parser_params *params) 1624 { 1625 const struct rte_flow_item_icmp6 *icmp_spec = item->spec; 1626 const struct rte_flow_item_icmp6 *icmp_mask = item->mask; 1627 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1628 uint32_t idx = 0; 1629 uint32_t size; 1630 1631 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1632 BNXT_ULP_PROTO_HDR_ICMP_NUM)) { 1633 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1634 return BNXT_TF_RC_ERROR; 1635 } 1636 1637 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type); 1638 ulp_rte_prsr_fld_mask(params, &idx, size, 1639 ulp_deference_struct(icmp_spec, type), 1640 ulp_deference_struct(icmp_mask, type), 1641 ULP_PRSR_ACT_DEFAULT); 1642 1643 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code); 1644 ulp_rte_prsr_fld_mask(params, &idx, size, 1645 ulp_deference_struct(icmp_spec, code), 1646 ulp_deference_struct(icmp_mask, code), 1647 ULP_PRSR_ACT_DEFAULT); 1648 1649 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum); 1650 ulp_rte_prsr_fld_mask(params, &idx, size, 1651 ulp_deference_struct(icmp_spec, checksum), 1652 ulp_deference_struct(icmp_mask, checksum), 1653 ULP_PRSR_ACT_DEFAULT); 1654 1655 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) { 1656 BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n"); 1657 return BNXT_TF_RC_ERROR; 1658 } 1659 1660 /* Update the hdr_bitmap with ICMP */ 1661 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) 1662 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP); 1663 else 1664 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP); 1665 return BNXT_TF_RC_SUCCESS; 1666 } 1667 1668 /* Function to handle the parsing of RTE Flow item void Header */ 1669 int32_t 1670 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused, 1671 struct ulp_rte_parser_params *params __rte_unused) 1672 { 1673 return BNXT_TF_RC_SUCCESS; 1674 } 1675 1676 /* Function to handle the parsing of RTE Flow action void Header. */ 1677 int32_t 1678 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused, 1679 struct ulp_rte_parser_params *params __rte_unused) 1680 { 1681 return BNXT_TF_RC_SUCCESS; 1682 } 1683 1684 /* Function to handle the parsing of RTE Flow action Mark Header. */ 1685 int32_t 1686 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item, 1687 struct ulp_rte_parser_params *param) 1688 { 1689 const struct rte_flow_action_mark *mark; 1690 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap; 1691 uint32_t mark_id; 1692 1693 mark = action_item->conf; 1694 if (mark) { 1695 mark_id = tfp_cpu_to_be_32(mark->id); 1696 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK], 1697 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK); 1698 1699 /* Update the hdr_bitmap with vxlan */ 1700 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK); 1701 return BNXT_TF_RC_SUCCESS; 1702 } 1703 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n"); 1704 return BNXT_TF_RC_ERROR; 1705 } 1706 1707 /* Function to handle the parsing of RTE Flow action RSS Header. */ 1708 int32_t 1709 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item, 1710 struct ulp_rte_parser_params *param) 1711 { 1712 const struct rte_flow_action_rss *rss; 1713 struct ulp_rte_act_prop *ap = ¶m->act_prop; 1714 1715 if (action_item == NULL || action_item->conf == NULL) { 1716 BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n"); 1717 return BNXT_TF_RC_ERROR; 1718 } 1719 1720 rss = action_item->conf; 1721 /* Copy the rss into the specific action properties */ 1722 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types, 1723 BNXT_ULP_ACT_PROP_SZ_RSS_TYPES); 1724 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level, 1725 BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL); 1726 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN], 1727 &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN); 1728 1729 if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) { 1730 BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n"); 1731 return BNXT_TF_RC_ERROR; 1732 } 1733 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key, 1734 rss->key_len); 1735 1736 /* set the RSS action header bit */ 1737 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS); 1738 1739 return BNXT_TF_RC_SUCCESS; 1740 } 1741 1742 /* Function to handle the parsing of RTE Flow item eth Header. */ 1743 static void 1744 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params, 1745 const struct rte_flow_item_eth *eth_spec) 1746 { 1747 struct ulp_rte_hdr_field *field; 1748 uint32_t size; 1749 1750 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC]; 1751 size = sizeof(eth_spec->dst.addr_bytes); 1752 field = ulp_rte_parser_fld_copy(field, eth_spec->dst.addr_bytes, size); 1753 1754 size = sizeof(eth_spec->src.addr_bytes); 1755 field = ulp_rte_parser_fld_copy(field, eth_spec->src.addr_bytes, size); 1756 1757 size = sizeof(eth_spec->type); 1758 field = ulp_rte_parser_fld_copy(field, ð_spec->type, size); 1759 1760 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); 1761 } 1762 1763 /* Function to handle the parsing of RTE Flow item vlan Header. */ 1764 static void 1765 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params, 1766 const struct rte_flow_item_vlan *vlan_spec, 1767 uint32_t inner) 1768 { 1769 struct ulp_rte_hdr_field *field; 1770 uint32_t size; 1771 1772 if (!inner) { 1773 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI]; 1774 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, 1775 BNXT_ULP_HDR_BIT_OO_VLAN); 1776 } else { 1777 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI]; 1778 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, 1779 BNXT_ULP_HDR_BIT_OI_VLAN); 1780 } 1781 1782 size = sizeof(vlan_spec->tci); 1783 field = ulp_rte_parser_fld_copy(field, &vlan_spec->tci, size); 1784 1785 size = sizeof(vlan_spec->inner_type); 1786 field = ulp_rte_parser_fld_copy(field, &vlan_spec->inner_type, size); 1787 } 1788 1789 /* Function to handle the parsing of RTE Flow item ipv4 Header. */ 1790 static void 1791 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params, 1792 const struct rte_flow_item_ipv4 *ip) 1793 { 1794 struct ulp_rte_hdr_field *field; 1795 uint32_t size; 1796 uint8_t val8; 1797 1798 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL]; 1799 size = sizeof(ip->hdr.version_ihl); 1800 if (!ip->hdr.version_ihl) 1801 val8 = RTE_IPV4_VHL_DEF; 1802 else 1803 val8 = ip->hdr.version_ihl; 1804 field = ulp_rte_parser_fld_copy(field, &val8, size); 1805 1806 size = sizeof(ip->hdr.type_of_service); 1807 field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size); 1808 1809 size = sizeof(ip->hdr.packet_id); 1810 field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size); 1811 1812 size = sizeof(ip->hdr.fragment_offset); 1813 field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size); 1814 1815 size = sizeof(ip->hdr.time_to_live); 1816 if (!ip->hdr.time_to_live) 1817 val8 = BNXT_ULP_DEFAULT_TTL; 1818 else 1819 val8 = ip->hdr.time_to_live; 1820 field = ulp_rte_parser_fld_copy(field, &val8, size); 1821 1822 size = sizeof(ip->hdr.next_proto_id); 1823 field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size); 1824 1825 size = sizeof(ip->hdr.src_addr); 1826 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size); 1827 1828 size = sizeof(ip->hdr.dst_addr); 1829 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size); 1830 1831 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4); 1832 } 1833 1834 /* Function to handle the parsing of RTE Flow item ipv6 Header. */ 1835 static void 1836 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params, 1837 const struct rte_flow_item_ipv6 *ip) 1838 { 1839 struct ulp_rte_hdr_field *field; 1840 uint32_t size; 1841 uint32_t val32; 1842 uint8_t val8; 1843 1844 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW]; 1845 size = sizeof(ip->hdr.vtc_flow); 1846 if (!ip->hdr.vtc_flow) 1847 val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER); 1848 else 1849 val32 = ip->hdr.vtc_flow; 1850 field = ulp_rte_parser_fld_copy(field, &val32, size); 1851 1852 size = sizeof(ip->hdr.proto); 1853 field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size); 1854 1855 size = sizeof(ip->hdr.hop_limits); 1856 if (!ip->hdr.hop_limits) 1857 val8 = BNXT_ULP_DEFAULT_TTL; 1858 else 1859 val8 = ip->hdr.hop_limits; 1860 field = ulp_rte_parser_fld_copy(field, &val8, size); 1861 1862 size = sizeof(ip->hdr.src_addr); 1863 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size); 1864 1865 size = sizeof(ip->hdr.dst_addr); 1866 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size); 1867 1868 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6); 1869 } 1870 1871 /* Function to handle the parsing of RTE Flow item UDP Header. */ 1872 static void 1873 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params, 1874 const struct rte_flow_item_udp *udp_spec) 1875 { 1876 struct ulp_rte_hdr_field *field; 1877 uint32_t size; 1878 uint8_t type = IPPROTO_UDP; 1879 1880 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT]; 1881 size = sizeof(udp_spec->hdr.src_port); 1882 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size); 1883 1884 size = sizeof(udp_spec->hdr.dst_port); 1885 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size); 1886 1887 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP); 1888 1889 /* Update thhe ip header protocol */ 1890 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO]; 1891 ulp_rte_parser_fld_copy(field, &type, sizeof(type)); 1892 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO]; 1893 ulp_rte_parser_fld_copy(field, &type, sizeof(type)); 1894 } 1895 1896 /* Function to handle the parsing of RTE Flow item vxlan Header. */ 1897 static void 1898 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params, 1899 struct rte_flow_item_vxlan *vxlan_spec) 1900 { 1901 struct ulp_rte_hdr_field *field; 1902 uint32_t size; 1903 1904 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS]; 1905 size = sizeof(vxlan_spec->flags); 1906 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->flags, size); 1907 1908 size = sizeof(vxlan_spec->rsvd0); 1909 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd0, size); 1910 1911 size = sizeof(vxlan_spec->vni); 1912 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->vni, size); 1913 1914 size = sizeof(vxlan_spec->rsvd1); 1915 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd1, size); 1916 1917 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN); 1918 } 1919 1920 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */ 1921 int32_t 1922 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, 1923 struct ulp_rte_parser_params *params) 1924 { 1925 const struct rte_flow_action_vxlan_encap *vxlan_encap; 1926 const struct rte_flow_item *item; 1927 const struct rte_flow_item_ipv4 *ipv4_spec; 1928 const struct rte_flow_item_ipv6 *ipv6_spec; 1929 struct rte_flow_item_vxlan vxlan_spec; 1930 uint32_t vlan_num = 0, vlan_size = 0; 1931 uint32_t ip_size = 0, ip_type = 0; 1932 uint32_t vxlan_size = 0; 1933 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap; 1934 struct ulp_rte_act_prop *ap = ¶ms->act_prop; 1935 1936 vxlan_encap = action_item->conf; 1937 if (!vxlan_encap) { 1938 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n"); 1939 return BNXT_TF_RC_ERROR; 1940 } 1941 1942 item = vxlan_encap->definition; 1943 if (!item) { 1944 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n"); 1945 return BNXT_TF_RC_ERROR; 1946 } 1947 1948 if (!ulp_rte_item_skip_void(&item, 0)) 1949 return BNXT_TF_RC_ERROR; 1950 1951 /* must have ethernet header */ 1952 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { 1953 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n"); 1954 return BNXT_TF_RC_ERROR; 1955 } 1956 1957 /* Parse the ethernet header */ 1958 if (item->spec) 1959 ulp_rte_enc_eth_hdr_handler(params, item->spec); 1960 1961 /* Goto the next item */ 1962 if (!ulp_rte_item_skip_void(&item, 1)) 1963 return BNXT_TF_RC_ERROR; 1964 1965 /* May have vlan header */ 1966 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 1967 vlan_num++; 1968 if (item->spec) 1969 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0); 1970 1971 if (!ulp_rte_item_skip_void(&item, 1)) 1972 return BNXT_TF_RC_ERROR; 1973 } 1974 1975 /* may have two vlan headers */ 1976 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 1977 vlan_num++; 1978 if (item->spec) 1979 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1); 1980 1981 if (!ulp_rte_item_skip_void(&item, 1)) 1982 return BNXT_TF_RC_ERROR; 1983 } 1984 1985 /* Update the vlan count and size of more than one */ 1986 if (vlan_num) { 1987 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan); 1988 vlan_num = tfp_cpu_to_be_32(vlan_num); 1989 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM], 1990 &vlan_num, 1991 sizeof(uint32_t)); 1992 vlan_size = tfp_cpu_to_be_32(vlan_size); 1993 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ], 1994 &vlan_size, 1995 sizeof(uint32_t)); 1996 } 1997 1998 /* L3 must be IPv4, IPv6 */ 1999 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { 2000 ipv4_spec = item->spec; 2001 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE; 2002 2003 /* Update the ip size details */ 2004 ip_size = tfp_cpu_to_be_32(ip_size); 2005 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 2006 &ip_size, sizeof(uint32_t)); 2007 2008 /* update the ip type */ 2009 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4); 2010 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 2011 &ip_type, sizeof(uint32_t)); 2012 2013 /* update the computed field to notify it is ipv4 header */ 2014 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG, 2015 1); 2016 if (ipv4_spec) 2017 ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec); 2018 2019 if (!ulp_rte_item_skip_void(&item, 1)) 2020 return BNXT_TF_RC_ERROR; 2021 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { 2022 ipv6_spec = item->spec; 2023 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE; 2024 2025 /* Update the ip size details */ 2026 ip_size = tfp_cpu_to_be_32(ip_size); 2027 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 2028 &ip_size, sizeof(uint32_t)); 2029 2030 /* update the ip type */ 2031 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6); 2032 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 2033 &ip_type, sizeof(uint32_t)); 2034 2035 /* update the computed field to notify it is ipv6 header */ 2036 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG, 2037 1); 2038 if (ipv6_spec) 2039 ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec); 2040 2041 if (!ulp_rte_item_skip_void(&item, 1)) 2042 return BNXT_TF_RC_ERROR; 2043 } else { 2044 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n"); 2045 return BNXT_TF_RC_ERROR; 2046 } 2047 2048 /* L4 is UDP */ 2049 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) { 2050 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n"); 2051 return BNXT_TF_RC_ERROR; 2052 } 2053 if (item->spec) 2054 ulp_rte_enc_udp_hdr_handler(params, item->spec); 2055 2056 if (!ulp_rte_item_skip_void(&item, 1)) 2057 return BNXT_TF_RC_ERROR; 2058 2059 /* Finally VXLAN */ 2060 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { 2061 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n"); 2062 return BNXT_TF_RC_ERROR; 2063 } 2064 vxlan_size = sizeof(struct rte_flow_item_vxlan); 2065 /* copy the vxlan details */ 2066 memcpy(&vxlan_spec, item->spec, vxlan_size); 2067 vxlan_spec.flags = 0x08; 2068 vxlan_size = tfp_cpu_to_be_32(vxlan_size); 2069 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ], 2070 &vxlan_size, sizeof(uint32_t)); 2071 2072 ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec); 2073 2074 /* update the hdr_bitmap with vxlan */ 2075 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP); 2076 return BNXT_TF_RC_SUCCESS; 2077 } 2078 2079 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */ 2080 int32_t 2081 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item 2082 __rte_unused, 2083 struct ulp_rte_parser_params *params) 2084 { 2085 /* update the hdr_bitmap with vxlan */ 2086 ULP_BITMAP_SET(params->act_bitmap.bits, 2087 BNXT_ULP_ACT_BIT_VXLAN_DECAP); 2088 /* Update computational field with tunnel decap info */ 2089 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1); 2090 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 2091 return BNXT_TF_RC_SUCCESS; 2092 } 2093 2094 /* Function to handle the parsing of RTE Flow action drop Header. */ 2095 int32_t 2096 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused, 2097 struct ulp_rte_parser_params *params) 2098 { 2099 /* Update the hdr_bitmap with drop */ 2100 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP); 2101 return BNXT_TF_RC_SUCCESS; 2102 } 2103 2104 /* Function to handle the parsing of RTE Flow action count. */ 2105 int32_t 2106 ulp_rte_count_act_handler(const struct rte_flow_action *action_item, 2107 struct ulp_rte_parser_params *params) 2108 { 2109 const struct rte_flow_action_count *act_count; 2110 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop; 2111 2112 act_count = action_item->conf; 2113 if (act_count) { 2114 if (act_count->shared) { 2115 BNXT_TF_DBG(ERR, 2116 "Parse Error:Shared count not supported\n"); 2117 return BNXT_TF_RC_PARSE_ERR; 2118 } 2119 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT], 2120 &act_count->id, 2121 BNXT_ULP_ACT_PROP_SZ_COUNT); 2122 } 2123 2124 /* Update the hdr_bitmap with count */ 2125 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT); 2126 return BNXT_TF_RC_SUCCESS; 2127 } 2128 2129 /* Function to handle the parsing of action ports. */ 2130 static int32_t 2131 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param, 2132 uint32_t ifindex) 2133 { 2134 enum bnxt_ulp_direction_type dir; 2135 uint16_t pid_s; 2136 uint32_t pid; 2137 struct ulp_rte_act_prop *act = ¶m->act_prop; 2138 enum bnxt_ulp_intf_type port_type; 2139 uint32_t vnic_type; 2140 2141 /* Get the direction */ 2142 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION); 2143 if (dir == BNXT_ULP_DIR_EGRESS) { 2144 /* For egress direction, fill vport */ 2145 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s)) 2146 return BNXT_TF_RC_ERROR; 2147 2148 pid = pid_s; 2149 pid = rte_cpu_to_be_32(pid); 2150 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], 2151 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); 2152 } else { 2153 /* For ingress direction, fill vnic */ 2154 port_type = ULP_COMP_FLD_IDX_RD(param, 2155 BNXT_ULP_CF_IDX_ACT_PORT_TYPE); 2156 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP) 2157 vnic_type = BNXT_ULP_VF_FUNC_VNIC; 2158 else 2159 vnic_type = BNXT_ULP_DRV_FUNC_VNIC; 2160 2161 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex, 2162 vnic_type, &pid_s)) 2163 return BNXT_TF_RC_ERROR; 2164 2165 pid = pid_s; 2166 pid = rte_cpu_to_be_32(pid); 2167 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], 2168 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC); 2169 } 2170 2171 /* Update the action port set bit */ 2172 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1); 2173 return BNXT_TF_RC_SUCCESS; 2174 } 2175 2176 /* Function to handle the parsing of RTE Flow action PF. */ 2177 int32_t 2178 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused, 2179 struct ulp_rte_parser_params *params) 2180 { 2181 uint32_t port_id; 2182 uint32_t ifindex; 2183 enum bnxt_ulp_intf_type intf_type; 2184 2185 /* Get the port id of the current device */ 2186 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 2187 2188 /* Get the port db ifindex */ 2189 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id, 2190 &ifindex)) { 2191 BNXT_TF_DBG(ERR, "Invalid port id\n"); 2192 return BNXT_TF_RC_ERROR; 2193 } 2194 2195 /* Check the port is PF port */ 2196 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 2197 if (intf_type != BNXT_ULP_INTF_TYPE_PF) { 2198 BNXT_TF_DBG(ERR, "Port is not a PF port\n"); 2199 return BNXT_TF_RC_ERROR; 2200 } 2201 /* Update the action properties */ 2202 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2203 return ulp_rte_parser_act_port_set(params, ifindex); 2204 } 2205 2206 /* Function to handle the parsing of RTE Flow action VF. */ 2207 int32_t 2208 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, 2209 struct ulp_rte_parser_params *params) 2210 { 2211 const struct rte_flow_action_vf *vf_action; 2212 enum bnxt_ulp_intf_type intf_type; 2213 uint32_t ifindex; 2214 struct bnxt *bp; 2215 2216 vf_action = action_item->conf; 2217 if (!vf_action) { 2218 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n"); 2219 return BNXT_TF_RC_PARSE_ERR; 2220 } 2221 2222 if (vf_action->original) { 2223 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n"); 2224 return BNXT_TF_RC_PARSE_ERR; 2225 } 2226 2227 bp = bnxt_pmd_get_bp(params->port_id); 2228 if (bp == NULL) { 2229 BNXT_TF_DBG(ERR, "Invalid bp\n"); 2230 return BNXT_TF_RC_ERROR; 2231 } 2232 2233 /* vf_action->id is a logical number which in this case is an 2234 * offset from the first VF. So, to get the absolute VF id, the 2235 * offset must be added to the absolute first vf id of that port. 2236 */ 2237 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, 2238 bp->first_vf_id + 2239 vf_action->id, 2240 &ifindex)) { 2241 BNXT_TF_DBG(ERR, "VF is not valid interface\n"); 2242 return BNXT_TF_RC_ERROR; 2243 } 2244 /* Check the port is VF port */ 2245 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 2246 if (intf_type != BNXT_ULP_INTF_TYPE_VF && 2247 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) { 2248 BNXT_TF_DBG(ERR, "Port is not a VF port\n"); 2249 return BNXT_TF_RC_ERROR; 2250 } 2251 2252 /* Update the action properties */ 2253 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2254 return ulp_rte_parser_act_port_set(params, ifindex); 2255 } 2256 2257 /* Function to handle the parsing of RTE Flow action port_id. */ 2258 int32_t 2259 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item, 2260 struct ulp_rte_parser_params *param) 2261 { 2262 const struct rte_flow_action_port_id *port_id = act_item->conf; 2263 uint32_t ifindex; 2264 enum bnxt_ulp_intf_type intf_type; 2265 2266 if (!port_id) { 2267 BNXT_TF_DBG(ERR, 2268 "ParseErr: Invalid Argument\n"); 2269 return BNXT_TF_RC_PARSE_ERR; 2270 } 2271 if (port_id->original) { 2272 BNXT_TF_DBG(ERR, 2273 "ParseErr:Portid Original not supported\n"); 2274 return BNXT_TF_RC_PARSE_ERR; 2275 } 2276 2277 /* Get the port db ifindex */ 2278 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id, 2279 &ifindex)) { 2280 BNXT_TF_DBG(ERR, "Invalid port id\n"); 2281 return BNXT_TF_RC_ERROR; 2282 } 2283 2284 /* Get the intf type */ 2285 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex); 2286 if (!intf_type) { 2287 BNXT_TF_DBG(ERR, "Invalid port type\n"); 2288 return BNXT_TF_RC_ERROR; 2289 } 2290 2291 /* Set the action port */ 2292 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2293 return ulp_rte_parser_act_port_set(param, ifindex); 2294 } 2295 2296 /* Function to handle the parsing of RTE Flow action phy_port. */ 2297 int32_t 2298 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item, 2299 struct ulp_rte_parser_params *prm) 2300 { 2301 const struct rte_flow_action_phy_port *phy_port; 2302 uint32_t pid; 2303 int32_t rc; 2304 uint16_t pid_s; 2305 enum bnxt_ulp_direction_type dir; 2306 2307 phy_port = action_item->conf; 2308 if (!phy_port) { 2309 BNXT_TF_DBG(ERR, 2310 "ParseErr: Invalid Argument\n"); 2311 return BNXT_TF_RC_PARSE_ERR; 2312 } 2313 2314 if (phy_port->original) { 2315 BNXT_TF_DBG(ERR, 2316 "Parse Err:Port Original not supported\n"); 2317 return BNXT_TF_RC_PARSE_ERR; 2318 } 2319 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION); 2320 if (dir != BNXT_ULP_DIR_EGRESS) { 2321 BNXT_TF_DBG(ERR, 2322 "Parse Err:Phy ports are valid only for egress\n"); 2323 return BNXT_TF_RC_PARSE_ERR; 2324 } 2325 /* Get the physical port details from port db */ 2326 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index, 2327 &pid_s); 2328 if (rc) { 2329 BNXT_TF_DBG(ERR, "Failed to get port details\n"); 2330 return -EINVAL; 2331 } 2332 2333 pid = pid_s; 2334 pid = rte_cpu_to_be_32(pid); 2335 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], 2336 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); 2337 2338 /* Update the action port set bit */ 2339 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1); 2340 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, 2341 BNXT_ULP_INTF_TYPE_PHY_PORT); 2342 return BNXT_TF_RC_SUCCESS; 2343 } 2344 2345 /* Function to handle the parsing of RTE Flow action pop vlan. */ 2346 int32_t 2347 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused, 2348 struct ulp_rte_parser_params *params) 2349 { 2350 /* Update the act_bitmap with pop */ 2351 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN); 2352 return BNXT_TF_RC_SUCCESS; 2353 } 2354 2355 /* Function to handle the parsing of RTE Flow action push vlan. */ 2356 int32_t 2357 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item, 2358 struct ulp_rte_parser_params *params) 2359 { 2360 const struct rte_flow_action_of_push_vlan *push_vlan; 2361 uint16_t ethertype; 2362 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2363 2364 push_vlan = action_item->conf; 2365 if (push_vlan) { 2366 ethertype = push_vlan->ethertype; 2367 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) { 2368 BNXT_TF_DBG(ERR, 2369 "Parse Err: Ethertype not supported\n"); 2370 return BNXT_TF_RC_PARSE_ERR; 2371 } 2372 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN], 2373 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN); 2374 /* Update the hdr_bitmap with push vlan */ 2375 ULP_BITMAP_SET(params->act_bitmap.bits, 2376 BNXT_ULP_ACT_BIT_PUSH_VLAN); 2377 return BNXT_TF_RC_SUCCESS; 2378 } 2379 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n"); 2380 return BNXT_TF_RC_ERROR; 2381 } 2382 2383 /* Function to handle the parsing of RTE Flow action set vlan id. */ 2384 int32_t 2385 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item, 2386 struct ulp_rte_parser_params *params) 2387 { 2388 const struct rte_flow_action_of_set_vlan_vid *vlan_vid; 2389 uint32_t vid; 2390 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2391 2392 vlan_vid = action_item->conf; 2393 if (vlan_vid && vlan_vid->vlan_vid) { 2394 vid = vlan_vid->vlan_vid; 2395 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID], 2396 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID); 2397 /* Update the hdr_bitmap with vlan vid */ 2398 ULP_BITMAP_SET(params->act_bitmap.bits, 2399 BNXT_ULP_ACT_BIT_SET_VLAN_VID); 2400 return BNXT_TF_RC_SUCCESS; 2401 } 2402 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n"); 2403 return BNXT_TF_RC_ERROR; 2404 } 2405 2406 /* Function to handle the parsing of RTE Flow action set vlan pcp. */ 2407 int32_t 2408 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item, 2409 struct ulp_rte_parser_params *params) 2410 { 2411 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp; 2412 uint8_t pcp; 2413 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2414 2415 vlan_pcp = action_item->conf; 2416 if (vlan_pcp) { 2417 pcp = vlan_pcp->vlan_pcp; 2418 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP], 2419 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP); 2420 /* Update the hdr_bitmap with vlan vid */ 2421 ULP_BITMAP_SET(params->act_bitmap.bits, 2422 BNXT_ULP_ACT_BIT_SET_VLAN_PCP); 2423 return BNXT_TF_RC_SUCCESS; 2424 } 2425 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n"); 2426 return BNXT_TF_RC_ERROR; 2427 } 2428 2429 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/ 2430 int32_t 2431 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item, 2432 struct ulp_rte_parser_params *params) 2433 { 2434 const struct rte_flow_action_set_ipv4 *set_ipv4; 2435 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2436 2437 set_ipv4 = action_item->conf; 2438 if (set_ipv4) { 2439 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC], 2440 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC); 2441 /* Update the hdr_bitmap with set ipv4 src */ 2442 ULP_BITMAP_SET(params->act_bitmap.bits, 2443 BNXT_ULP_ACT_BIT_SET_IPV4_SRC); 2444 return BNXT_TF_RC_SUCCESS; 2445 } 2446 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n"); 2447 return BNXT_TF_RC_ERROR; 2448 } 2449 2450 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/ 2451 int32_t 2452 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item, 2453 struct ulp_rte_parser_params *params) 2454 { 2455 const struct rte_flow_action_set_ipv4 *set_ipv4; 2456 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2457 2458 set_ipv4 = action_item->conf; 2459 if (set_ipv4) { 2460 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST], 2461 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST); 2462 /* Update the hdr_bitmap with set ipv4 dst */ 2463 ULP_BITMAP_SET(params->act_bitmap.bits, 2464 BNXT_ULP_ACT_BIT_SET_IPV4_DST); 2465 return BNXT_TF_RC_SUCCESS; 2466 } 2467 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n"); 2468 return BNXT_TF_RC_ERROR; 2469 } 2470 2471 /* Function to handle the parsing of RTE Flow action set tp src.*/ 2472 int32_t 2473 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item, 2474 struct ulp_rte_parser_params *params) 2475 { 2476 const struct rte_flow_action_set_tp *set_tp; 2477 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2478 2479 set_tp = action_item->conf; 2480 if (set_tp) { 2481 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC], 2482 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC); 2483 /* Update the hdr_bitmap with set tp src */ 2484 ULP_BITMAP_SET(params->act_bitmap.bits, 2485 BNXT_ULP_ACT_BIT_SET_TP_SRC); 2486 return BNXT_TF_RC_SUCCESS; 2487 } 2488 2489 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n"); 2490 return BNXT_TF_RC_ERROR; 2491 } 2492 2493 /* Function to handle the parsing of RTE Flow action set tp dst.*/ 2494 int32_t 2495 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item, 2496 struct ulp_rte_parser_params *params) 2497 { 2498 const struct rte_flow_action_set_tp *set_tp; 2499 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2500 2501 set_tp = action_item->conf; 2502 if (set_tp) { 2503 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST], 2504 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST); 2505 /* Update the hdr_bitmap with set tp dst */ 2506 ULP_BITMAP_SET(params->act_bitmap.bits, 2507 BNXT_ULP_ACT_BIT_SET_TP_DST); 2508 return BNXT_TF_RC_SUCCESS; 2509 } 2510 2511 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n"); 2512 return BNXT_TF_RC_ERROR; 2513 } 2514 2515 /* Function to handle the parsing of RTE Flow action dec ttl.*/ 2516 int32_t 2517 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused, 2518 struct ulp_rte_parser_params *params) 2519 { 2520 /* Update the act_bitmap with dec ttl */ 2521 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL); 2522 return BNXT_TF_RC_SUCCESS; 2523 } 2524 2525 /* Function to handle the parsing of RTE Flow action JUMP */ 2526 int32_t 2527 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused, 2528 struct ulp_rte_parser_params *params) 2529 { 2530 /* Update the act_bitmap with dec ttl */ 2531 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP); 2532 return BNXT_TF_RC_SUCCESS; 2533 } 2534 2535 int32_t 2536 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item, 2537 struct ulp_rte_parser_params *params) 2538 { 2539 const struct rte_flow_action_sample *sample; 2540 int ret; 2541 2542 sample = action_item->conf; 2543 2544 /* if SAMPLE bit is set it means this sample action is nested within the 2545 * actions of another sample action; this is not allowed 2546 */ 2547 if (ULP_BITMAP_ISSET(params->act_bitmap.bits, 2548 BNXT_ULP_ACT_BIT_SAMPLE)) 2549 return BNXT_TF_RC_ERROR; 2550 2551 /* a sample action is only allowed as a shared action */ 2552 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits, 2553 BNXT_ULP_ACT_BIT_SHARED)) 2554 return BNXT_TF_RC_ERROR; 2555 2556 /* only a ratio of 1 i.e. 100% is supported */ 2557 if (sample->ratio != 1) 2558 return BNXT_TF_RC_ERROR; 2559 2560 if (!sample->actions) 2561 return BNXT_TF_RC_ERROR; 2562 2563 /* parse the nested actions for a sample action */ 2564 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params); 2565 if (ret == BNXT_TF_RC_SUCCESS) 2566 /* Update the act_bitmap with sample */ 2567 ULP_BITMAP_SET(params->act_bitmap.bits, 2568 BNXT_ULP_ACT_BIT_SAMPLE); 2569 2570 return ret; 2571 } 2572 2573 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */ 2574 int32_t 2575 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item, 2576 struct ulp_rte_parser_params *params) 2577 { 2578 /* Set the F1 flow header bit */ 2579 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1); 2580 return ulp_rte_vxlan_decap_act_handler(action_item, params); 2581 } 2582 2583 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */ 2584 int32_t 2585 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item, 2586 struct ulp_rte_parser_params *params) 2587 { 2588 RTE_SET_USED(item); 2589 /* Set the F2 flow header bit */ 2590 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2); 2591 return ulp_rte_vxlan_decap_act_handler(NULL, params); 2592 } 2593