1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2021 Broadcom 3 * All rights reserved. 4 */ 5 6 #include "bnxt.h" 7 #include "ulp_template_db_enum.h" 8 #include "ulp_template_struct.h" 9 #include "bnxt_ulp.h" 10 #include "bnxt_tf_common.h" 11 #include "bnxt_tf_pmd_shim.h" 12 #include "ulp_rte_parser.h" 13 #include "ulp_matcher.h" 14 #include "ulp_utils.h" 15 #include "tfp.h" 16 #include "ulp_port_db.h" 17 #include "ulp_flow_db.h" 18 #include "ulp_mapper.h" 19 #include "ulp_tun.h" 20 #include "ulp_template_db_tbl.h" 21 22 /* Local defines for the parsing functions */ 23 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */ 24 #define ULP_VLAN_PRIORITY_MASK 0x700 25 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/ 26 #define ULP_UDP_PORT_VXLAN 4789 27 28 /* Utility function to skip the void items. */ 29 static inline int32_t 30 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment) 31 { 32 if (!*item) 33 return 0; 34 if (increment) 35 (*item)++; 36 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID) 37 (*item)++; 38 if (*item) 39 return 1; 40 return 0; 41 } 42 43 /* Utility function to copy field spec items */ 44 static struct ulp_rte_hdr_field * 45 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field, 46 const void *buffer, 47 uint32_t size) 48 { 49 field->size = size; 50 memcpy(field->spec, buffer, field->size); 51 field++; 52 return field; 53 } 54 55 /* Utility function to update the field_bitmap */ 56 static void 57 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params, 58 uint32_t idx, 59 enum bnxt_ulp_prsr_action prsr_act) 60 { 61 struct ulp_rte_hdr_field *field; 62 63 field = ¶ms->hdr_field[idx]; 64 if (ulp_bitmap_notzero(field->mask, field->size)) { 65 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx); 66 if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE)) 67 ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx); 68 /* Not exact match */ 69 if (!ulp_bitmap_is_ones(field->mask, field->size)) 70 ULP_COMP_FLD_IDX_WR(params, 71 BNXT_ULP_CF_IDX_WC_MATCH, 1); 72 } else { 73 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx); 74 } 75 } 76 77 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL) 78 /* Utility function to copy field spec and masks items */ 79 static void 80 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params, 81 uint32_t *idx, 82 uint32_t size, 83 const void *spec_buff, 84 const void *mask_buff, 85 enum bnxt_ulp_prsr_action prsr_act) 86 { 87 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx]; 88 89 /* update the field size */ 90 field->size = size; 91 92 /* copy the mask specifications only if mask is not null */ 93 if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) { 94 memcpy(field->mask, mask_buff, size); 95 ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act); 96 } 97 98 /* copy the protocol specifications only if mask is not null*/ 99 if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size)) 100 memcpy(field->spec, spec_buff, size); 101 102 /* Increment the index */ 103 *idx = *idx + 1; 104 } 105 106 /* Utility function to copy field spec and masks items */ 107 static int32_t 108 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params, 109 uint32_t *idx, 110 uint32_t size) 111 { 112 if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) { 113 BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx); 114 return -EINVAL; 115 } 116 *idx = params->field_idx; 117 params->field_idx += size; 118 return 0; 119 } 120 121 /* 122 * Function to handle the parsing of RTE Flows and placing 123 * the RTE flow items into the ulp structures. 124 */ 125 int32_t 126 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], 127 struct ulp_rte_parser_params *params) 128 { 129 const struct rte_flow_item *item = pattern; 130 struct bnxt_ulp_rte_hdr_info *hdr_info; 131 132 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM; 133 134 /* Set the computed flags for no vlan tags before parsing */ 135 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1); 136 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1); 137 138 /* Parse all the items in the pattern */ 139 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) { 140 if (item->type >= (typeof(item->type)) 141 BNXT_RTE_FLOW_ITEM_TYPE_END) { 142 if (item->type >= 143 (typeof(item->type))BNXT_RTE_FLOW_ITEM_TYPE_LAST) 144 goto hdr_parser_error; 145 /* get the header information */ 146 hdr_info = &ulp_vendor_hdr_info[item->type - 147 BNXT_RTE_FLOW_ITEM_TYPE_END]; 148 } else { 149 if (item->type > RTE_FLOW_ITEM_TYPE_HIGIG2) 150 goto hdr_parser_error; 151 hdr_info = &ulp_hdr_info[item->type]; 152 } 153 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) { 154 goto hdr_parser_error; 155 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) { 156 /* call the registered callback handler */ 157 if (hdr_info->proto_hdr_func) { 158 if (hdr_info->proto_hdr_func(item, params) != 159 BNXT_TF_RC_SUCCESS) { 160 return BNXT_TF_RC_ERROR; 161 } 162 } 163 } 164 item++; 165 } 166 /* update the implied SVIF */ 167 return ulp_rte_parser_implicit_match_port_process(params); 168 169 hdr_parser_error: 170 BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n", 171 item->type); 172 return BNXT_TF_RC_PARSE_ERR; 173 } 174 175 /* 176 * Function to handle the parsing of RTE Flows and placing 177 * the RTE flow actions into the ulp structures. 178 */ 179 int32_t 180 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], 181 struct ulp_rte_parser_params *params) 182 { 183 const struct rte_flow_action *action_item = actions; 184 struct bnxt_ulp_rte_act_info *hdr_info; 185 186 /* Parse all the items in the pattern */ 187 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) { 188 if (action_item->type >= 189 (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_END) { 190 if (action_item->type >= 191 (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_LAST) 192 goto act_parser_error; 193 /* get the header information from bnxt actinfo table */ 194 hdr_info = &ulp_vendor_act_info[action_item->type - 195 BNXT_RTE_FLOW_ACTION_TYPE_END]; 196 } else { 197 if (action_item->type > RTE_FLOW_ACTION_TYPE_SHARED) 198 goto act_parser_error; 199 /* get the header information from the act info table */ 200 hdr_info = &ulp_act_info[action_item->type]; 201 } 202 if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) { 203 goto act_parser_error; 204 } else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) { 205 /* call the registered callback handler */ 206 if (hdr_info->proto_act_func) { 207 if (hdr_info->proto_act_func(action_item, 208 params) != 209 BNXT_TF_RC_SUCCESS) { 210 return BNXT_TF_RC_ERROR; 211 } 212 } 213 } 214 action_item++; 215 } 216 /* update the implied port details */ 217 ulp_rte_parser_implicit_act_port_process(params); 218 return BNXT_TF_RC_SUCCESS; 219 220 act_parser_error: 221 BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n", 222 action_item->type); 223 return BNXT_TF_RC_ERROR; 224 } 225 226 /* 227 * Function to handle the post processing of the computed 228 * fields for the interface. 229 */ 230 static void 231 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params) 232 { 233 uint32_t ifindex; 234 uint16_t port_id, parif; 235 uint32_t mtype; 236 enum bnxt_ulp_direction_type dir; 237 238 /* get the direction details */ 239 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 240 241 /* read the port id details */ 242 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 243 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 244 port_id, 245 &ifindex)) { 246 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 247 return; 248 } 249 250 if (dir == BNXT_ULP_DIR_INGRESS) { 251 /* Set port PARIF */ 252 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 253 BNXT_ULP_PHY_PORT_PARIF, &parif)) { 254 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n"); 255 return; 256 } 257 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF, 258 parif); 259 } else { 260 /* Get the match port type */ 261 mtype = ULP_COMP_FLD_IDX_RD(params, 262 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 263 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) { 264 ULP_COMP_FLD_IDX_WR(params, 265 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP, 266 1); 267 /* Set VF func PARIF */ 268 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 269 BNXT_ULP_VF_FUNC_PARIF, 270 &parif)) { 271 BNXT_TF_DBG(ERR, 272 "ParseErr:ifindex is not valid\n"); 273 return; 274 } 275 ULP_COMP_FLD_IDX_WR(params, 276 BNXT_ULP_CF_IDX_VF_FUNC_PARIF, 277 parif); 278 279 } else { 280 /* Set DRV func PARIF */ 281 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 282 BNXT_ULP_DRV_FUNC_PARIF, 283 &parif)) { 284 BNXT_TF_DBG(ERR, 285 "ParseErr:ifindex is not valid\n"); 286 return; 287 } 288 ULP_COMP_FLD_IDX_WR(params, 289 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF, 290 parif); 291 } 292 if (mtype == BNXT_ULP_INTF_TYPE_PF) { 293 ULP_COMP_FLD_IDX_WR(params, 294 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF, 295 1); 296 } 297 } 298 } 299 300 static int32_t 301 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params) 302 { 303 enum bnxt_ulp_intf_type match_port_type, act_port_type; 304 enum bnxt_ulp_direction_type dir; 305 uint32_t act_port_set; 306 307 /* Get the computed details */ 308 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 309 match_port_type = ULP_COMP_FLD_IDX_RD(params, 310 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 311 act_port_type = ULP_COMP_FLD_IDX_RD(params, 312 BNXT_ULP_CF_IDX_ACT_PORT_TYPE); 313 act_port_set = ULP_COMP_FLD_IDX_RD(params, 314 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET); 315 316 /* set the flow direction in the proto and action header */ 317 if (dir == BNXT_ULP_DIR_EGRESS) { 318 ULP_BITMAP_SET(params->hdr_bitmap.bits, 319 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 320 ULP_BITMAP_SET(params->act_bitmap.bits, 321 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 322 } 323 324 /* calculate the VF to VF flag */ 325 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP && 326 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) 327 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1); 328 329 /* Update the decrement ttl computational fields */ 330 if (ULP_BITMAP_ISSET(params->act_bitmap.bits, 331 BNXT_ULP_ACT_BIT_DEC_TTL)) { 332 /* 333 * Check that vxlan proto is included and vxlan decap 334 * action is not set then decrement tunnel ttl. 335 * Similarly add GRE and NVGRE in future. 336 */ 337 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 338 BNXT_ULP_HDR_BIT_T_VXLAN) && 339 !ULP_BITMAP_ISSET(params->act_bitmap.bits, 340 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) { 341 ULP_COMP_FLD_IDX_WR(params, 342 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1); 343 } else { 344 ULP_COMP_FLD_IDX_WR(params, 345 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1); 346 } 347 } 348 349 /* Merge the hdr_fp_bit into the proto header bit */ 350 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits; 351 352 /* Update the comp fld fid */ 353 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid); 354 355 /* Update the computed interface parameters */ 356 bnxt_ulp_comp_fld_intf_update(params); 357 358 /* TBD: Handle the flow rejection scenarios */ 359 return 0; 360 } 361 362 /* 363 * Function to handle the post processing of the parsing details 364 */ 365 void 366 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params) 367 { 368 ulp_post_process_normal_flow(params); 369 } 370 371 /* 372 * Function to compute the flow direction based on the match port details 373 */ 374 static void 375 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params) 376 { 377 enum bnxt_ulp_intf_type match_port_type; 378 379 /* Get the match port type */ 380 match_port_type = ULP_COMP_FLD_IDX_RD(params, 381 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 382 383 /* If ingress flow and matchport is vf rep then dir is egress*/ 384 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) && 385 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) { 386 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 387 BNXT_ULP_DIR_EGRESS); 388 } else { 389 /* Assign the input direction */ 390 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) 391 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 392 BNXT_ULP_DIR_INGRESS); 393 else 394 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 395 BNXT_ULP_DIR_EGRESS); 396 } 397 } 398 399 /* Function to handle the parsing of RTE Flow item PF Header. */ 400 static int32_t 401 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params, 402 uint32_t ifindex, 403 uint16_t mask, 404 enum bnxt_ulp_direction_type item_dir) 405 { 406 uint16_t svif; 407 enum bnxt_ulp_direction_type dir; 408 struct ulp_rte_hdr_field *hdr_field; 409 enum bnxt_ulp_svif_type svif_type; 410 enum bnxt_ulp_intf_type port_type; 411 412 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 413 BNXT_ULP_INVALID_SVIF_VAL) { 414 BNXT_TF_DBG(ERR, 415 "SVIF already set,multiple source not support'd\n"); 416 return BNXT_TF_RC_ERROR; 417 } 418 419 /* Get port type details */ 420 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 421 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) { 422 BNXT_TF_DBG(ERR, "Invalid port type\n"); 423 return BNXT_TF_RC_ERROR; 424 } 425 426 /* Update the match port type */ 427 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type); 428 429 /* compute the direction */ 430 bnxt_ulp_rte_parser_direction_compute(params); 431 432 /* Get the computed direction */ 433 dir = (item_dir != BNXT_ULP_DIR_INVALID) ? item_dir : 434 ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 435 if (dir == BNXT_ULP_DIR_INGRESS && 436 port_type != BNXT_ULP_INTF_TYPE_VF_REP) { 437 svif_type = BNXT_ULP_PHY_PORT_SVIF; 438 } else { 439 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP && 440 item_dir != BNXT_ULP_DIR_EGRESS) 441 svif_type = BNXT_ULP_VF_FUNC_SVIF; 442 else 443 svif_type = BNXT_ULP_DRV_FUNC_SVIF; 444 } 445 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type, 446 &svif); 447 svif = rte_cpu_to_be_16(svif); 448 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; 449 memcpy(hdr_field->spec, &svif, sizeof(svif)); 450 memcpy(hdr_field->mask, &mask, sizeof(mask)); 451 hdr_field->size = sizeof(svif); 452 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, 453 rte_be_to_cpu_16(svif)); 454 return BNXT_TF_RC_SUCCESS; 455 } 456 457 /* Function to handle the parsing of the RTE port id */ 458 int32_t 459 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params) 460 { 461 uint16_t port_id = 0; 462 uint16_t svif_mask = 0xFFFF; 463 uint32_t ifindex; 464 int32_t rc = BNXT_TF_RC_ERROR; 465 466 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 467 BNXT_ULP_INVALID_SVIF_VAL) 468 return BNXT_TF_RC_SUCCESS; 469 470 /* SVIF not set. So get the port id */ 471 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 472 473 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 474 port_id, 475 &ifindex)) { 476 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 477 return rc; 478 } 479 480 /* Update the SVIF details */ 481 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask, 482 BNXT_ULP_DIR_INVALID); 483 return rc; 484 } 485 486 /* Function to handle the implicit action port id */ 487 int32_t 488 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params) 489 { 490 struct rte_flow_action action_item = {0}; 491 struct rte_flow_action_port_id port_id = {0}; 492 493 /* Read the action port set bit */ 494 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) { 495 /* Already set, so just exit */ 496 return BNXT_TF_RC_SUCCESS; 497 } 498 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 499 action_item.type = RTE_FLOW_ACTION_TYPE_PORT_ID; 500 action_item.conf = &port_id; 501 502 /* Update the action port based on incoming port */ 503 ulp_rte_port_act_handler(&action_item, params); 504 505 /* Reset the action port set bit */ 506 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0); 507 return BNXT_TF_RC_SUCCESS; 508 } 509 510 /* Function to handle the parsing of RTE Flow item PF Header. */ 511 int32_t 512 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused, 513 struct ulp_rte_parser_params *params) 514 { 515 uint16_t port_id = 0; 516 uint16_t svif_mask = 0xFFFF; 517 uint32_t ifindex; 518 519 /* Get the implicit port id */ 520 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 521 522 /* perform the conversion from dpdk port to bnxt ifindex */ 523 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 524 port_id, 525 &ifindex)) { 526 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 527 return BNXT_TF_RC_ERROR; 528 } 529 530 /* Update the SVIF details */ 531 return ulp_rte_parser_svif_set(params, ifindex, svif_mask, 532 BNXT_ULP_DIR_INVALID); 533 } 534 535 /* Function to handle the parsing of RTE Flow item VF Header. */ 536 int32_t 537 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item, 538 struct ulp_rte_parser_params *params) 539 { 540 const struct rte_flow_item_vf *vf_spec = item->spec; 541 const struct rte_flow_item_vf *vf_mask = item->mask; 542 uint16_t mask = 0; 543 uint32_t ifindex; 544 int32_t rc = BNXT_TF_RC_PARSE_ERR; 545 546 /* Get VF rte_flow_item for Port details */ 547 if (!vf_spec) { 548 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n"); 549 return rc; 550 } 551 if (!vf_mask) { 552 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n"); 553 return rc; 554 } 555 mask = vf_mask->id; 556 557 /* perform the conversion from VF Func id to bnxt ifindex */ 558 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, 559 vf_spec->id, 560 &ifindex)) { 561 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 562 return rc; 563 } 564 /* Update the SVIF details */ 565 return ulp_rte_parser_svif_set(params, ifindex, mask, 566 BNXT_ULP_DIR_INVALID); 567 } 568 569 /* Parse items PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */ 570 int32_t 571 ulp_rte_port_hdr_handler(const struct rte_flow_item *item, 572 struct ulp_rte_parser_params *params) 573 { 574 enum bnxt_ulp_direction_type item_dir; 575 uint16_t ethdev_id; 576 uint16_t mask = 0; 577 int32_t rc = BNXT_TF_RC_PARSE_ERR; 578 uint32_t ifindex; 579 580 if (!item->spec) { 581 BNXT_TF_DBG(ERR, "ParseErr:Port spec is not valid\n"); 582 return rc; 583 } 584 if (!item->mask) { 585 BNXT_TF_DBG(ERR, "ParseErr:Port mask is not valid\n"); 586 return rc; 587 } 588 589 switch (item->type) { 590 case RTE_FLOW_ITEM_TYPE_PORT_ID: { 591 const struct rte_flow_item_port_id *port_spec = item->spec; 592 const struct rte_flow_item_port_id *port_mask = item->mask; 593 594 item_dir = BNXT_ULP_DIR_INVALID; 595 ethdev_id = port_spec->id; 596 mask = port_mask->id; 597 break; 598 } 599 case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: { 600 const struct rte_flow_item_ethdev *ethdev_spec = item->spec; 601 const struct rte_flow_item_ethdev *ethdev_mask = item->mask; 602 603 item_dir = BNXT_ULP_DIR_INGRESS; 604 ethdev_id = ethdev_spec->port_id; 605 mask = ethdev_mask->port_id; 606 break; 607 } 608 case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: { 609 const struct rte_flow_item_ethdev *ethdev_spec = item->spec; 610 const struct rte_flow_item_ethdev *ethdev_mask = item->mask; 611 612 item_dir = BNXT_ULP_DIR_EGRESS; 613 ethdev_id = ethdev_spec->port_id; 614 mask = ethdev_mask->port_id; 615 break; 616 } 617 default: 618 BNXT_TF_DBG(ERR, "ParseErr:Unexpected item\n"); 619 return rc; 620 } 621 622 /* perform the conversion from dpdk port to bnxt ifindex */ 623 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 624 ethdev_id, 625 &ifindex)) { 626 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 627 return rc; 628 } 629 /* Update the SVIF details */ 630 return ulp_rte_parser_svif_set(params, ifindex, mask, item_dir); 631 } 632 633 /* Function to handle the parsing of RTE Flow item phy port Header. */ 634 int32_t 635 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item, 636 struct ulp_rte_parser_params *params) 637 { 638 const struct rte_flow_item_phy_port *port_spec = item->spec; 639 const struct rte_flow_item_phy_port *port_mask = item->mask; 640 uint16_t mask = 0; 641 int32_t rc = BNXT_TF_RC_ERROR; 642 uint16_t svif; 643 enum bnxt_ulp_direction_type dir; 644 struct ulp_rte_hdr_field *hdr_field; 645 646 /* Copy the rte_flow_item for phy port into hdr_field */ 647 if (!port_spec) { 648 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n"); 649 return rc; 650 } 651 if (!port_mask) { 652 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n"); 653 return rc; 654 } 655 mask = port_mask->index; 656 657 /* Update the match port type */ 658 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, 659 BNXT_ULP_INTF_TYPE_PHY_PORT); 660 661 /* Compute the Hw direction */ 662 bnxt_ulp_rte_parser_direction_compute(params); 663 664 /* Direction validation */ 665 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 666 if (dir == BNXT_ULP_DIR_EGRESS) { 667 BNXT_TF_DBG(ERR, 668 "Parse Err:Phy ports are valid only for ingress\n"); 669 return BNXT_TF_RC_PARSE_ERR; 670 } 671 672 /* Get the physical port details from port db */ 673 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index, 674 &svif); 675 if (rc) { 676 BNXT_TF_DBG(ERR, "Failed to get port details\n"); 677 return BNXT_TF_RC_PARSE_ERR; 678 } 679 680 /* Update the SVIF details */ 681 svif = rte_cpu_to_be_16(svif); 682 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; 683 memcpy(hdr_field->spec, &svif, sizeof(svif)); 684 memcpy(hdr_field->mask, &mask, sizeof(mask)); 685 hdr_field->size = sizeof(svif); 686 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, 687 rte_be_to_cpu_16(svif)); 688 if (!mask) { 689 uint32_t port_id = 0; 690 uint16_t phy_port = 0; 691 692 /* Validate the control port */ 693 port_id = ULP_COMP_FLD_IDX_RD(params, 694 BNXT_ULP_CF_IDX_DEV_PORT_ID); 695 if (ulp_port_db_phy_port_get(params->ulp_ctx, 696 port_id, &phy_port) || 697 (uint16_t)port_spec->index != phy_port) { 698 BNXT_TF_DBG(ERR, "Mismatch of control and phy_port\n"); 699 return BNXT_TF_RC_PARSE_ERR; 700 } 701 ULP_BITMAP_SET(params->hdr_bitmap.bits, 702 BNXT_ULP_HDR_BIT_SVIF_IGNORE); 703 memset(hdr_field->mask, 0xFF, sizeof(mask)); 704 } 705 return BNXT_TF_RC_SUCCESS; 706 } 707 708 /* Function to handle the update of proto header based on field values */ 709 static void 710 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param, 711 uint16_t type, uint32_t in_flag) 712 { 713 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { 714 if (in_flag) { 715 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 716 BNXT_ULP_HDR_BIT_I_IPV4); 717 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); 718 } else { 719 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 720 BNXT_ULP_HDR_BIT_O_IPV4); 721 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); 722 } 723 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { 724 if (in_flag) { 725 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 726 BNXT_ULP_HDR_BIT_I_IPV6); 727 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); 728 } else { 729 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 730 BNXT_ULP_HDR_BIT_O_IPV6); 731 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); 732 } 733 } 734 } 735 736 /* Internal Function to identify broadcast or multicast packets */ 737 static int32_t 738 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr) 739 { 740 if (rte_is_multicast_ether_addr(eth_addr) || 741 rte_is_broadcast_ether_addr(eth_addr)) { 742 BNXT_TF_DBG(DEBUG, 743 "No support for bcast or mcast addr offload\n"); 744 return 1; 745 } 746 return 0; 747 } 748 749 /* Function to handle the parsing of RTE Flow item Ethernet Header. */ 750 int32_t 751 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item, 752 struct ulp_rte_parser_params *params) 753 { 754 const struct rte_flow_item_eth *eth_spec = item->spec; 755 const struct rte_flow_item_eth *eth_mask = item->mask; 756 uint32_t idx = 0, dmac_idx = 0; 757 uint32_t size; 758 uint16_t eth_type = 0; 759 uint32_t inner_flag = 0; 760 761 /* Perform validations */ 762 if (eth_spec) { 763 /* Todo: work around to avoid multicast and broadcast addr */ 764 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst)) 765 return BNXT_TF_RC_PARSE_ERR; 766 767 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src)) 768 return BNXT_TF_RC_PARSE_ERR; 769 770 eth_type = eth_spec->type; 771 } 772 773 if (ulp_rte_prsr_fld_size_validate(params, &idx, 774 BNXT_ULP_PROTO_HDR_ETH_NUM)) { 775 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 776 return BNXT_TF_RC_ERROR; 777 } 778 /* 779 * Copy the rte_flow_item for eth into hdr_field using ethernet 780 * header fields 781 */ 782 dmac_idx = idx; 783 size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes); 784 ulp_rte_prsr_fld_mask(params, &idx, size, 785 ulp_deference_struct(eth_spec, dst.addr_bytes), 786 ulp_deference_struct(eth_mask, dst.addr_bytes), 787 ULP_PRSR_ACT_DEFAULT); 788 789 size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes); 790 ulp_rte_prsr_fld_mask(params, &idx, size, 791 ulp_deference_struct(eth_spec, src.addr_bytes), 792 ulp_deference_struct(eth_mask, src.addr_bytes), 793 ULP_PRSR_ACT_DEFAULT); 794 795 size = sizeof(((struct rte_flow_item_eth *)NULL)->type); 796 ulp_rte_prsr_fld_mask(params, &idx, size, 797 ulp_deference_struct(eth_spec, type), 798 ulp_deference_struct(eth_mask, type), 799 ULP_PRSR_ACT_MATCH_IGNORE); 800 801 /* Update the protocol hdr bitmap */ 802 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 803 BNXT_ULP_HDR_BIT_O_ETH) || 804 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 805 BNXT_ULP_HDR_BIT_O_IPV4) || 806 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 807 BNXT_ULP_HDR_BIT_O_IPV6) || 808 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 809 BNXT_ULP_HDR_BIT_O_UDP) || 810 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 811 BNXT_ULP_HDR_BIT_O_TCP)) { 812 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH); 813 inner_flag = 1; 814 } else { 815 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); 816 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID, 817 dmac_idx); 818 } 819 /* Update the field protocol hdr bitmap */ 820 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag); 821 822 return BNXT_TF_RC_SUCCESS; 823 } 824 825 /* Function to handle the parsing of RTE Flow item Vlan Header. */ 826 int32_t 827 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, 828 struct ulp_rte_parser_params *params) 829 { 830 const struct rte_flow_item_vlan *vlan_spec = item->spec; 831 const struct rte_flow_item_vlan *vlan_mask = item->mask; 832 struct ulp_rte_hdr_bitmap *hdr_bit; 833 uint32_t idx = 0; 834 uint16_t vlan_tag = 0, priority = 0; 835 uint16_t vlan_tag_mask = 0, priority_mask = 0; 836 uint32_t outer_vtag_num; 837 uint32_t inner_vtag_num; 838 uint16_t eth_type = 0; 839 uint32_t inner_flag = 0; 840 uint32_t size; 841 842 if (vlan_spec) { 843 vlan_tag = ntohs(vlan_spec->tci); 844 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT); 845 vlan_tag &= ULP_VLAN_TAG_MASK; 846 vlan_tag = htons(vlan_tag); 847 eth_type = vlan_spec->inner_type; 848 } 849 850 if (vlan_mask) { 851 vlan_tag_mask = ntohs(vlan_mask->tci); 852 priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT); 853 vlan_tag_mask &= 0xfff; 854 855 /* 856 * the storage for priority and vlan tag is 2 bytes 857 * The mask of priority which is 3 bits if it is all 1's 858 * then make the rest bits 13 bits as 1's 859 * so that it is matched as exact match. 860 */ 861 if (priority_mask == ULP_VLAN_PRIORITY_MASK) 862 priority_mask |= ~ULP_VLAN_PRIORITY_MASK; 863 if (vlan_tag_mask == ULP_VLAN_TAG_MASK) 864 vlan_tag_mask |= ~ULP_VLAN_TAG_MASK; 865 vlan_tag_mask = htons(vlan_tag_mask); 866 } 867 868 if (ulp_rte_prsr_fld_size_validate(params, &idx, 869 BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) { 870 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 871 return BNXT_TF_RC_ERROR; 872 } 873 874 /* 875 * Copy the rte_flow_item for vlan into hdr_field using Vlan 876 * header fields 877 */ 878 size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci); 879 /* 880 * The priority field is ignored since OVS is setting it as 881 * wild card match and it is not supported. This is a work 882 * around and shall be addressed in the future. 883 */ 884 ulp_rte_prsr_fld_mask(params, &idx, size, 885 &priority, 886 (vlan_mask) ? &priority_mask : NULL, 887 ULP_PRSR_ACT_MASK_IGNORE); 888 889 ulp_rte_prsr_fld_mask(params, &idx, size, 890 &vlan_tag, 891 (vlan_mask) ? &vlan_tag_mask : NULL, 892 ULP_PRSR_ACT_DEFAULT); 893 894 size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type); 895 ulp_rte_prsr_fld_mask(params, &idx, size, 896 ulp_deference_struct(vlan_spec, inner_type), 897 ulp_deference_struct(vlan_mask, inner_type), 898 ULP_PRSR_ACT_MATCH_IGNORE); 899 900 /* Get the outer tag and inner tag counts */ 901 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params, 902 BNXT_ULP_CF_IDX_O_VTAG_NUM); 903 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params, 904 BNXT_ULP_CF_IDX_I_VTAG_NUM); 905 906 /* Update the hdr_bitmap of the vlans */ 907 hdr_bit = ¶ms->hdr_bitmap; 908 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 909 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 910 !outer_vtag_num) { 911 /* Update the vlan tag num */ 912 outer_vtag_num++; 913 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 914 outer_vtag_num); 915 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0); 916 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1); 917 ULP_BITMAP_SET(params->hdr_bitmap.bits, 918 BNXT_ULP_HDR_BIT_OO_VLAN); 919 if (vlan_mask && vlan_tag_mask) 920 ULP_COMP_FLD_IDX_WR(params, 921 BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1); 922 923 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 924 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 925 outer_vtag_num == 1) { 926 /* update the vlan tag num */ 927 outer_vtag_num++; 928 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 929 outer_vtag_num); 930 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1); 931 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0); 932 ULP_BITMAP_SET(params->hdr_bitmap.bits, 933 BNXT_ULP_HDR_BIT_OI_VLAN); 934 if (vlan_mask && vlan_tag_mask) 935 ULP_COMP_FLD_IDX_WR(params, 936 BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1); 937 938 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 939 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 940 !inner_vtag_num) { 941 /* update the vlan tag num */ 942 inner_vtag_num++; 943 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 944 inner_vtag_num); 945 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0); 946 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1); 947 ULP_BITMAP_SET(params->hdr_bitmap.bits, 948 BNXT_ULP_HDR_BIT_IO_VLAN); 949 if (vlan_mask && vlan_tag_mask) 950 ULP_COMP_FLD_IDX_WR(params, 951 BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1); 952 inner_flag = 1; 953 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 954 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 955 inner_vtag_num == 1) { 956 /* update the vlan tag num */ 957 inner_vtag_num++; 958 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 959 inner_vtag_num); 960 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1); 961 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0); 962 ULP_BITMAP_SET(params->hdr_bitmap.bits, 963 BNXT_ULP_HDR_BIT_II_VLAN); 964 if (vlan_mask && vlan_tag_mask) 965 ULP_COMP_FLD_IDX_WR(params, 966 BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1); 967 inner_flag = 1; 968 } else { 969 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n"); 970 return BNXT_TF_RC_ERROR; 971 } 972 /* Update the field protocol hdr bitmap */ 973 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag); 974 return BNXT_TF_RC_SUCCESS; 975 } 976 977 /* Function to handle the update of proto header based on field values */ 978 static void 979 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param, 980 uint8_t proto, uint32_t in_flag) 981 { 982 if (proto == IPPROTO_UDP) { 983 if (in_flag) { 984 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 985 BNXT_ULP_HDR_BIT_I_UDP); 986 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); 987 } else { 988 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 989 BNXT_ULP_HDR_BIT_O_UDP); 990 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); 991 } 992 } else if (proto == IPPROTO_TCP) { 993 if (in_flag) { 994 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 995 BNXT_ULP_HDR_BIT_I_TCP); 996 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); 997 } else { 998 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 999 BNXT_ULP_HDR_BIT_O_TCP); 1000 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); 1001 } 1002 } else if (proto == IPPROTO_GRE) { 1003 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE); 1004 } else if (proto == IPPROTO_ICMP) { 1005 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN)) 1006 ULP_BITMAP_SET(param->hdr_bitmap.bits, 1007 BNXT_ULP_HDR_BIT_I_ICMP); 1008 else 1009 ULP_BITMAP_SET(param->hdr_bitmap.bits, 1010 BNXT_ULP_HDR_BIT_O_ICMP); 1011 } 1012 if (proto) { 1013 if (in_flag) { 1014 ULP_COMP_FLD_IDX_WR(param, 1015 BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, 1016 1); 1017 ULP_COMP_FLD_IDX_WR(param, 1018 BNXT_ULP_CF_IDX_I_L3_PROTO_ID, 1019 proto); 1020 } else { 1021 ULP_COMP_FLD_IDX_WR(param, 1022 BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, 1023 1); 1024 ULP_COMP_FLD_IDX_WR(param, 1025 BNXT_ULP_CF_IDX_O_L3_PROTO_ID, 1026 proto); 1027 } 1028 } 1029 } 1030 1031 /* Function to handle the parsing of RTE Flow item IPV4 Header. */ 1032 int32_t 1033 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, 1034 struct ulp_rte_parser_params *params) 1035 { 1036 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec; 1037 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask; 1038 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1039 uint32_t idx = 0, dip_idx = 0; 1040 uint32_t size; 1041 uint8_t proto = 0; 1042 uint32_t inner_flag = 0; 1043 uint32_t cnt; 1044 1045 /* validate there are no 3rd L3 header */ 1046 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); 1047 if (cnt == 2) { 1048 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n"); 1049 return BNXT_TF_RC_ERROR; 1050 } 1051 1052 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1053 BNXT_ULP_PROTO_HDR_IPV4_NUM)) { 1054 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1055 return BNXT_TF_RC_ERROR; 1056 } 1057 1058 /* 1059 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1060 * header fields 1061 */ 1062 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl); 1063 ulp_rte_prsr_fld_mask(params, &idx, size, 1064 ulp_deference_struct(ipv4_spec, hdr.version_ihl), 1065 ulp_deference_struct(ipv4_mask, hdr.version_ihl), 1066 ULP_PRSR_ACT_DEFAULT); 1067 1068 /* 1069 * The tos field is ignored since OVS is setting it as wild card 1070 * match and it is not supported. This is a work around and 1071 * shall be addressed in the future. 1072 */ 1073 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service); 1074 ulp_rte_prsr_fld_mask(params, &idx, size, 1075 ulp_deference_struct(ipv4_spec, 1076 hdr.type_of_service), 1077 ulp_deference_struct(ipv4_mask, 1078 hdr.type_of_service), 1079 ULP_PRSR_ACT_MASK_IGNORE); 1080 1081 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length); 1082 ulp_rte_prsr_fld_mask(params, &idx, size, 1083 ulp_deference_struct(ipv4_spec, hdr.total_length), 1084 ulp_deference_struct(ipv4_mask, hdr.total_length), 1085 ULP_PRSR_ACT_DEFAULT); 1086 1087 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id); 1088 ulp_rte_prsr_fld_mask(params, &idx, size, 1089 ulp_deference_struct(ipv4_spec, hdr.packet_id), 1090 ulp_deference_struct(ipv4_mask, hdr.packet_id), 1091 ULP_PRSR_ACT_DEFAULT); 1092 1093 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset); 1094 ulp_rte_prsr_fld_mask(params, &idx, size, 1095 ulp_deference_struct(ipv4_spec, 1096 hdr.fragment_offset), 1097 ulp_deference_struct(ipv4_mask, 1098 hdr.fragment_offset), 1099 ULP_PRSR_ACT_DEFAULT); 1100 1101 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live); 1102 ulp_rte_prsr_fld_mask(params, &idx, size, 1103 ulp_deference_struct(ipv4_spec, hdr.time_to_live), 1104 ulp_deference_struct(ipv4_mask, hdr.time_to_live), 1105 ULP_PRSR_ACT_DEFAULT); 1106 1107 /* Ignore proto for matching templates */ 1108 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id); 1109 ulp_rte_prsr_fld_mask(params, &idx, size, 1110 ulp_deference_struct(ipv4_spec, 1111 hdr.next_proto_id), 1112 ulp_deference_struct(ipv4_mask, 1113 hdr.next_proto_id), 1114 ULP_PRSR_ACT_MATCH_IGNORE); 1115 if (ipv4_spec) 1116 proto = ipv4_spec->hdr.next_proto_id; 1117 1118 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum); 1119 ulp_rte_prsr_fld_mask(params, &idx, size, 1120 ulp_deference_struct(ipv4_spec, hdr.hdr_checksum), 1121 ulp_deference_struct(ipv4_mask, hdr.hdr_checksum), 1122 ULP_PRSR_ACT_DEFAULT); 1123 1124 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr); 1125 ulp_rte_prsr_fld_mask(params, &idx, size, 1126 ulp_deference_struct(ipv4_spec, hdr.src_addr), 1127 ulp_deference_struct(ipv4_mask, hdr.src_addr), 1128 ULP_PRSR_ACT_DEFAULT); 1129 1130 dip_idx = idx; 1131 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr); 1132 ulp_rte_prsr_fld_mask(params, &idx, size, 1133 ulp_deference_struct(ipv4_spec, hdr.dst_addr), 1134 ulp_deference_struct(ipv4_mask, hdr.dst_addr), 1135 ULP_PRSR_ACT_DEFAULT); 1136 1137 /* Set the ipv4 header bitmap and computed l3 header bitmaps */ 1138 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 1139 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) || 1140 ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) { 1141 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4); 1142 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); 1143 inner_flag = 1; 1144 } else { 1145 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4); 1146 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); 1147 /* Update the tunnel offload dest ip offset */ 1148 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID, 1149 dip_idx); 1150 } 1151 1152 /* Some of the PMD applications may set the protocol field 1153 * in the IPv4 spec but don't set the mask. So, consider 1154 * the mask in the proto value calculation. 1155 */ 1156 if (ipv4_mask) 1157 proto &= ipv4_mask->hdr.next_proto_id; 1158 1159 /* Update the field protocol hdr bitmap */ 1160 ulp_rte_l3_proto_type_update(params, proto, inner_flag); 1161 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); 1162 return BNXT_TF_RC_SUCCESS; 1163 } 1164 1165 /* Function to handle the parsing of RTE Flow item IPV6 Header */ 1166 int32_t 1167 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, 1168 struct ulp_rte_parser_params *params) 1169 { 1170 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec; 1171 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask; 1172 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1173 uint32_t idx = 0, dip_idx = 0; 1174 uint32_t size; 1175 uint32_t ver_spec = 0, ver_mask = 0; 1176 uint32_t tc_spec = 0, tc_mask = 0; 1177 uint32_t lab_spec = 0, lab_mask = 0; 1178 uint8_t proto = 0; 1179 uint32_t inner_flag = 0; 1180 uint32_t cnt; 1181 1182 /* validate there are no 3rd L3 header */ 1183 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); 1184 if (cnt == 2) { 1185 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n"); 1186 return BNXT_TF_RC_ERROR; 1187 } 1188 1189 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1190 BNXT_ULP_PROTO_HDR_IPV6_NUM)) { 1191 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1192 return BNXT_TF_RC_ERROR; 1193 } 1194 1195 /* 1196 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6 1197 * header fields 1198 */ 1199 if (ipv6_spec) { 1200 ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow); 1201 tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow); 1202 lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow); 1203 proto = ipv6_spec->hdr.proto; 1204 } 1205 1206 if (ipv6_mask) { 1207 ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow); 1208 tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow); 1209 lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow); 1210 1211 /* Some of the PMD applications may set the protocol field 1212 * in the IPv6 spec but don't set the mask. So, consider 1213 * the mask in proto value calculation. 1214 */ 1215 proto &= ipv6_mask->hdr.proto; 1216 } 1217 1218 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow); 1219 ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask, 1220 ULP_PRSR_ACT_DEFAULT); 1221 /* 1222 * The TC and flow label field are ignored since OVS is 1223 * setting it for match and it is not supported. 1224 * This is a work around and 1225 * shall be addressed in the future. 1226 */ 1227 ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask, 1228 ULP_PRSR_ACT_MASK_IGNORE); 1229 ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask, 1230 ULP_PRSR_ACT_MASK_IGNORE); 1231 1232 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len); 1233 ulp_rte_prsr_fld_mask(params, &idx, size, 1234 ulp_deference_struct(ipv6_spec, hdr.payload_len), 1235 ulp_deference_struct(ipv6_mask, hdr.payload_len), 1236 ULP_PRSR_ACT_DEFAULT); 1237 1238 /* Ignore proto for template matching */ 1239 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto); 1240 ulp_rte_prsr_fld_mask(params, &idx, size, 1241 ulp_deference_struct(ipv6_spec, hdr.proto), 1242 ulp_deference_struct(ipv6_mask, hdr.proto), 1243 ULP_PRSR_ACT_MATCH_IGNORE); 1244 1245 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits); 1246 ulp_rte_prsr_fld_mask(params, &idx, size, 1247 ulp_deference_struct(ipv6_spec, hdr.hop_limits), 1248 ulp_deference_struct(ipv6_mask, hdr.hop_limits), 1249 ULP_PRSR_ACT_DEFAULT); 1250 1251 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr); 1252 ulp_rte_prsr_fld_mask(params, &idx, size, 1253 ulp_deference_struct(ipv6_spec, hdr.src_addr), 1254 ulp_deference_struct(ipv6_mask, hdr.src_addr), 1255 ULP_PRSR_ACT_DEFAULT); 1256 1257 dip_idx = idx; 1258 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr); 1259 ulp_rte_prsr_fld_mask(params, &idx, size, 1260 ulp_deference_struct(ipv6_spec, hdr.dst_addr), 1261 ulp_deference_struct(ipv6_mask, hdr.dst_addr), 1262 ULP_PRSR_ACT_DEFAULT); 1263 1264 /* Set the ipv6 header bitmap and computed l3 header bitmaps */ 1265 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 1266 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) || 1267 ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) { 1268 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6); 1269 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); 1270 inner_flag = 1; 1271 } else { 1272 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6); 1273 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); 1274 /* Update the tunnel offload dest ip offset */ 1275 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID, 1276 dip_idx); 1277 } 1278 1279 /* Update the field protocol hdr bitmap */ 1280 ulp_rte_l3_proto_type_update(params, proto, inner_flag); 1281 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); 1282 1283 return BNXT_TF_RC_SUCCESS; 1284 } 1285 1286 /* Function to handle the update of proto header based on field values */ 1287 static void 1288 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params, 1289 uint16_t src_port, uint16_t src_mask, 1290 uint16_t dst_port, uint16_t dst_mask, 1291 enum bnxt_ulp_hdr_bit hdr_bit) 1292 { 1293 switch (hdr_bit) { 1294 case BNXT_ULP_HDR_BIT_I_UDP: 1295 case BNXT_ULP_HDR_BIT_I_TCP: 1296 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); 1297 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1); 1298 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT, 1299 (uint64_t)rte_be_to_cpu_16(src_port)); 1300 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT, 1301 (uint64_t)rte_be_to_cpu_16(dst_port)); 1302 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK, 1303 (uint64_t)rte_be_to_cpu_16(src_mask)); 1304 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK, 1305 (uint64_t)rte_be_to_cpu_16(dst_mask)); 1306 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, 1307 1); 1308 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT, 1309 !!(src_port & src_mask)); 1310 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT, 1311 !!(dst_port & dst_mask)); 1312 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID, 1313 (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ? 1314 IPPROTO_UDP : IPPROTO_TCP); 1315 break; 1316 case BNXT_ULP_HDR_BIT_O_UDP: 1317 case BNXT_ULP_HDR_BIT_O_TCP: 1318 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); 1319 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1); 1320 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT, 1321 (uint64_t)rte_be_to_cpu_16(src_port)); 1322 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT, 1323 (uint64_t)rte_be_to_cpu_16(dst_port)); 1324 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK, 1325 (uint64_t)rte_be_to_cpu_16(src_mask)); 1326 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK, 1327 (uint64_t)rte_be_to_cpu_16(dst_mask)); 1328 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, 1329 1); 1330 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT, 1331 !!(src_port & src_mask)); 1332 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT, 1333 !!(dst_port & dst_mask)); 1334 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID, 1335 (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ? 1336 IPPROTO_UDP : IPPROTO_TCP); 1337 break; 1338 default: 1339 break; 1340 } 1341 1342 if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port == 1343 tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) { 1344 ULP_BITMAP_SET(params->hdr_fp_bit.bits, 1345 BNXT_ULP_HDR_BIT_T_VXLAN); 1346 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 1347 } 1348 } 1349 1350 /* Function to handle the parsing of RTE Flow item UDP Header. */ 1351 int32_t 1352 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, 1353 struct ulp_rte_parser_params *params) 1354 { 1355 const struct rte_flow_item_udp *udp_spec = item->spec; 1356 const struct rte_flow_item_udp *udp_mask = item->mask; 1357 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1358 uint32_t idx = 0; 1359 uint32_t size; 1360 uint16_t dport = 0, sport = 0; 1361 uint16_t dport_mask = 0, sport_mask = 0; 1362 uint32_t cnt; 1363 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP; 1364 1365 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 1366 if (cnt == 2) { 1367 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 1368 return BNXT_TF_RC_ERROR; 1369 } 1370 1371 if (udp_spec) { 1372 sport = udp_spec->hdr.src_port; 1373 dport = udp_spec->hdr.dst_port; 1374 } 1375 if (udp_mask) { 1376 sport_mask = udp_mask->hdr.src_port; 1377 dport_mask = udp_mask->hdr.dst_port; 1378 } 1379 1380 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1381 BNXT_ULP_PROTO_HDR_UDP_NUM)) { 1382 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1383 return BNXT_TF_RC_ERROR; 1384 } 1385 1386 /* 1387 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1388 * header fields 1389 */ 1390 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port); 1391 ulp_rte_prsr_fld_mask(params, &idx, size, 1392 ulp_deference_struct(udp_spec, hdr.src_port), 1393 ulp_deference_struct(udp_mask, hdr.src_port), 1394 ULP_PRSR_ACT_DEFAULT); 1395 1396 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port); 1397 ulp_rte_prsr_fld_mask(params, &idx, size, 1398 ulp_deference_struct(udp_spec, hdr.dst_port), 1399 ulp_deference_struct(udp_mask, hdr.dst_port), 1400 ULP_PRSR_ACT_DEFAULT); 1401 1402 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len); 1403 ulp_rte_prsr_fld_mask(params, &idx, size, 1404 ulp_deference_struct(udp_spec, hdr.dgram_len), 1405 ulp_deference_struct(udp_mask, hdr.dgram_len), 1406 ULP_PRSR_ACT_DEFAULT); 1407 1408 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum); 1409 ulp_rte_prsr_fld_mask(params, &idx, size, 1410 ulp_deference_struct(udp_spec, hdr.dgram_cksum), 1411 ulp_deference_struct(udp_mask, hdr.dgram_cksum), 1412 ULP_PRSR_ACT_DEFAULT); 1413 1414 /* Set the udp header bitmap and computed l4 header bitmaps */ 1415 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 1416 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) 1417 out_l4 = BNXT_ULP_HDR_BIT_I_UDP; 1418 1419 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport, 1420 dport_mask, out_l4); 1421 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); 1422 return BNXT_TF_RC_SUCCESS; 1423 } 1424 1425 /* Function to handle the parsing of RTE Flow item TCP Header. */ 1426 int32_t 1427 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, 1428 struct ulp_rte_parser_params *params) 1429 { 1430 const struct rte_flow_item_tcp *tcp_spec = item->spec; 1431 const struct rte_flow_item_tcp *tcp_mask = item->mask; 1432 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1433 uint32_t idx = 0; 1434 uint16_t dport = 0, sport = 0; 1435 uint16_t dport_mask = 0, sport_mask = 0; 1436 uint32_t size; 1437 uint32_t cnt; 1438 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP; 1439 1440 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 1441 if (cnt == 2) { 1442 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 1443 return BNXT_TF_RC_ERROR; 1444 } 1445 1446 if (tcp_spec) { 1447 sport = tcp_spec->hdr.src_port; 1448 dport = tcp_spec->hdr.dst_port; 1449 } 1450 if (tcp_mask) { 1451 sport_mask = tcp_mask->hdr.src_port; 1452 dport_mask = tcp_mask->hdr.dst_port; 1453 } 1454 1455 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1456 BNXT_ULP_PROTO_HDR_TCP_NUM)) { 1457 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1458 return BNXT_TF_RC_ERROR; 1459 } 1460 1461 /* 1462 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1463 * header fields 1464 */ 1465 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port); 1466 ulp_rte_prsr_fld_mask(params, &idx, size, 1467 ulp_deference_struct(tcp_spec, hdr.src_port), 1468 ulp_deference_struct(tcp_mask, hdr.src_port), 1469 ULP_PRSR_ACT_DEFAULT); 1470 1471 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port); 1472 ulp_rte_prsr_fld_mask(params, &idx, size, 1473 ulp_deference_struct(tcp_spec, hdr.dst_port), 1474 ulp_deference_struct(tcp_mask, hdr.dst_port), 1475 ULP_PRSR_ACT_DEFAULT); 1476 1477 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq); 1478 ulp_rte_prsr_fld_mask(params, &idx, size, 1479 ulp_deference_struct(tcp_spec, hdr.sent_seq), 1480 ulp_deference_struct(tcp_mask, hdr.sent_seq), 1481 ULP_PRSR_ACT_DEFAULT); 1482 1483 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack); 1484 ulp_rte_prsr_fld_mask(params, &idx, size, 1485 ulp_deference_struct(tcp_spec, hdr.recv_ack), 1486 ulp_deference_struct(tcp_mask, hdr.recv_ack), 1487 ULP_PRSR_ACT_DEFAULT); 1488 1489 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off); 1490 ulp_rte_prsr_fld_mask(params, &idx, size, 1491 ulp_deference_struct(tcp_spec, hdr.data_off), 1492 ulp_deference_struct(tcp_mask, hdr.data_off), 1493 ULP_PRSR_ACT_DEFAULT); 1494 1495 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags); 1496 ulp_rte_prsr_fld_mask(params, &idx, size, 1497 ulp_deference_struct(tcp_spec, hdr.tcp_flags), 1498 ulp_deference_struct(tcp_mask, hdr.tcp_flags), 1499 ULP_PRSR_ACT_DEFAULT); 1500 1501 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win); 1502 ulp_rte_prsr_fld_mask(params, &idx, size, 1503 ulp_deference_struct(tcp_spec, hdr.rx_win), 1504 ulp_deference_struct(tcp_mask, hdr.rx_win), 1505 ULP_PRSR_ACT_DEFAULT); 1506 1507 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum); 1508 ulp_rte_prsr_fld_mask(params, &idx, size, 1509 ulp_deference_struct(tcp_spec, hdr.cksum), 1510 ulp_deference_struct(tcp_mask, hdr.cksum), 1511 ULP_PRSR_ACT_DEFAULT); 1512 1513 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp); 1514 ulp_rte_prsr_fld_mask(params, &idx, size, 1515 ulp_deference_struct(tcp_spec, hdr.tcp_urp), 1516 ulp_deference_struct(tcp_mask, hdr.tcp_urp), 1517 ULP_PRSR_ACT_DEFAULT); 1518 1519 /* Set the udp header bitmap and computed l4 header bitmaps */ 1520 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 1521 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) 1522 out_l4 = BNXT_ULP_HDR_BIT_I_TCP; 1523 1524 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport, 1525 dport_mask, out_l4); 1526 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); 1527 return BNXT_TF_RC_SUCCESS; 1528 } 1529 1530 /* Function to handle the parsing of RTE Flow item Vxlan Header. */ 1531 int32_t 1532 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item, 1533 struct ulp_rte_parser_params *params) 1534 { 1535 const struct rte_flow_item_vxlan *vxlan_spec = item->spec; 1536 const struct rte_flow_item_vxlan *vxlan_mask = item->mask; 1537 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1538 uint32_t idx = 0; 1539 uint32_t size; 1540 1541 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1542 BNXT_ULP_PROTO_HDR_VXLAN_NUM)) { 1543 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1544 return BNXT_TF_RC_ERROR; 1545 } 1546 1547 /* 1548 * Copy the rte_flow_item for vxlan into hdr_field using vxlan 1549 * header fields 1550 */ 1551 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags); 1552 ulp_rte_prsr_fld_mask(params, &idx, size, 1553 ulp_deference_struct(vxlan_spec, flags), 1554 ulp_deference_struct(vxlan_mask, flags), 1555 ULP_PRSR_ACT_DEFAULT); 1556 1557 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0); 1558 ulp_rte_prsr_fld_mask(params, &idx, size, 1559 ulp_deference_struct(vxlan_spec, rsvd0), 1560 ulp_deference_struct(vxlan_mask, rsvd0), 1561 ULP_PRSR_ACT_DEFAULT); 1562 1563 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni); 1564 ulp_rte_prsr_fld_mask(params, &idx, size, 1565 ulp_deference_struct(vxlan_spec, vni), 1566 ulp_deference_struct(vxlan_mask, vni), 1567 ULP_PRSR_ACT_DEFAULT); 1568 1569 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1); 1570 ulp_rte_prsr_fld_mask(params, &idx, size, 1571 ulp_deference_struct(vxlan_spec, rsvd1), 1572 ulp_deference_struct(vxlan_mask, rsvd1), 1573 ULP_PRSR_ACT_DEFAULT); 1574 1575 /* Update the hdr_bitmap with vxlan */ 1576 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN); 1577 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 1578 return BNXT_TF_RC_SUCCESS; 1579 } 1580 1581 /* Function to handle the parsing of RTE Flow item GRE Header. */ 1582 int32_t 1583 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item, 1584 struct ulp_rte_parser_params *params) 1585 { 1586 const struct rte_flow_item_gre *gre_spec = item->spec; 1587 const struct rte_flow_item_gre *gre_mask = item->mask; 1588 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1589 uint32_t idx = 0; 1590 uint32_t size; 1591 1592 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1593 BNXT_ULP_PROTO_HDR_GRE_NUM)) { 1594 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1595 return BNXT_TF_RC_ERROR; 1596 } 1597 1598 size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver); 1599 ulp_rte_prsr_fld_mask(params, &idx, size, 1600 ulp_deference_struct(gre_spec, c_rsvd0_ver), 1601 ulp_deference_struct(gre_mask, c_rsvd0_ver), 1602 ULP_PRSR_ACT_DEFAULT); 1603 1604 size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol); 1605 ulp_rte_prsr_fld_mask(params, &idx, size, 1606 ulp_deference_struct(gre_spec, protocol), 1607 ulp_deference_struct(gre_mask, protocol), 1608 ULP_PRSR_ACT_DEFAULT); 1609 1610 /* Update the hdr_bitmap with GRE */ 1611 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE); 1612 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 1613 return BNXT_TF_RC_SUCCESS; 1614 } 1615 1616 /* Function to handle the parsing of RTE Flow item ANY. */ 1617 int32_t 1618 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused, 1619 struct ulp_rte_parser_params *params __rte_unused) 1620 { 1621 return BNXT_TF_RC_SUCCESS; 1622 } 1623 1624 /* Function to handle the parsing of RTE Flow item ICMP Header. */ 1625 int32_t 1626 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item, 1627 struct ulp_rte_parser_params *params) 1628 { 1629 const struct rte_flow_item_icmp *icmp_spec = item->spec; 1630 const struct rte_flow_item_icmp *icmp_mask = item->mask; 1631 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1632 uint32_t idx = 0; 1633 uint32_t size; 1634 1635 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1636 BNXT_ULP_PROTO_HDR_ICMP_NUM)) { 1637 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1638 return BNXT_TF_RC_ERROR; 1639 } 1640 1641 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type); 1642 ulp_rte_prsr_fld_mask(params, &idx, size, 1643 ulp_deference_struct(icmp_spec, hdr.icmp_type), 1644 ulp_deference_struct(icmp_mask, hdr.icmp_type), 1645 ULP_PRSR_ACT_DEFAULT); 1646 1647 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code); 1648 ulp_rte_prsr_fld_mask(params, &idx, size, 1649 ulp_deference_struct(icmp_spec, hdr.icmp_code), 1650 ulp_deference_struct(icmp_mask, hdr.icmp_code), 1651 ULP_PRSR_ACT_DEFAULT); 1652 1653 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum); 1654 ulp_rte_prsr_fld_mask(params, &idx, size, 1655 ulp_deference_struct(icmp_spec, hdr.icmp_cksum), 1656 ulp_deference_struct(icmp_mask, hdr.icmp_cksum), 1657 ULP_PRSR_ACT_DEFAULT); 1658 1659 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident); 1660 ulp_rte_prsr_fld_mask(params, &idx, size, 1661 ulp_deference_struct(icmp_spec, hdr.icmp_ident), 1662 ulp_deference_struct(icmp_mask, hdr.icmp_ident), 1663 ULP_PRSR_ACT_DEFAULT); 1664 1665 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb); 1666 ulp_rte_prsr_fld_mask(params, &idx, size, 1667 ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb), 1668 ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb), 1669 ULP_PRSR_ACT_DEFAULT); 1670 1671 /* Update the hdr_bitmap with ICMP */ 1672 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) 1673 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP); 1674 else 1675 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP); 1676 return BNXT_TF_RC_SUCCESS; 1677 } 1678 1679 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */ 1680 int32_t 1681 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item, 1682 struct ulp_rte_parser_params *params) 1683 { 1684 const struct rte_flow_item_icmp6 *icmp_spec = item->spec; 1685 const struct rte_flow_item_icmp6 *icmp_mask = item->mask; 1686 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1687 uint32_t idx = 0; 1688 uint32_t size; 1689 1690 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1691 BNXT_ULP_PROTO_HDR_ICMP_NUM)) { 1692 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1693 return BNXT_TF_RC_ERROR; 1694 } 1695 1696 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type); 1697 ulp_rte_prsr_fld_mask(params, &idx, size, 1698 ulp_deference_struct(icmp_spec, type), 1699 ulp_deference_struct(icmp_mask, type), 1700 ULP_PRSR_ACT_DEFAULT); 1701 1702 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code); 1703 ulp_rte_prsr_fld_mask(params, &idx, size, 1704 ulp_deference_struct(icmp_spec, code), 1705 ulp_deference_struct(icmp_mask, code), 1706 ULP_PRSR_ACT_DEFAULT); 1707 1708 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum); 1709 ulp_rte_prsr_fld_mask(params, &idx, size, 1710 ulp_deference_struct(icmp_spec, checksum), 1711 ulp_deference_struct(icmp_mask, checksum), 1712 ULP_PRSR_ACT_DEFAULT); 1713 1714 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) { 1715 BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n"); 1716 return BNXT_TF_RC_ERROR; 1717 } 1718 1719 /* Update the hdr_bitmap with ICMP */ 1720 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) 1721 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP); 1722 else 1723 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP); 1724 return BNXT_TF_RC_SUCCESS; 1725 } 1726 1727 /* Function to handle the parsing of RTE Flow item void Header */ 1728 int32_t 1729 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused, 1730 struct ulp_rte_parser_params *params __rte_unused) 1731 { 1732 return BNXT_TF_RC_SUCCESS; 1733 } 1734 1735 /* Function to handle the parsing of RTE Flow action void Header. */ 1736 int32_t 1737 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused, 1738 struct ulp_rte_parser_params *params __rte_unused) 1739 { 1740 return BNXT_TF_RC_SUCCESS; 1741 } 1742 1743 /* Function to handle the parsing of RTE Flow action Mark Header. */ 1744 int32_t 1745 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item, 1746 struct ulp_rte_parser_params *param) 1747 { 1748 const struct rte_flow_action_mark *mark; 1749 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap; 1750 uint32_t mark_id; 1751 1752 mark = action_item->conf; 1753 if (mark) { 1754 mark_id = tfp_cpu_to_be_32(mark->id); 1755 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK], 1756 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK); 1757 1758 /* Update the hdr_bitmap with vxlan */ 1759 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK); 1760 return BNXT_TF_RC_SUCCESS; 1761 } 1762 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n"); 1763 return BNXT_TF_RC_ERROR; 1764 } 1765 1766 /* Function to handle the parsing of RTE Flow action RSS Header. */ 1767 int32_t 1768 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item, 1769 struct ulp_rte_parser_params *param) 1770 { 1771 const struct rte_flow_action_rss *rss; 1772 struct ulp_rte_act_prop *ap = ¶m->act_prop; 1773 1774 if (action_item == NULL || action_item->conf == NULL) { 1775 BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n"); 1776 return BNXT_TF_RC_ERROR; 1777 } 1778 1779 rss = action_item->conf; 1780 /* Copy the rss into the specific action properties */ 1781 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types, 1782 BNXT_ULP_ACT_PROP_SZ_RSS_TYPES); 1783 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level, 1784 BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL); 1785 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN], 1786 &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN); 1787 1788 if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) { 1789 BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n"); 1790 return BNXT_TF_RC_ERROR; 1791 } 1792 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key, 1793 rss->key_len); 1794 1795 /* set the RSS action header bit */ 1796 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS); 1797 1798 return BNXT_TF_RC_SUCCESS; 1799 } 1800 1801 /* Function to handle the parsing of RTE Flow item eth Header. */ 1802 static void 1803 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params, 1804 const struct rte_flow_item_eth *eth_spec) 1805 { 1806 struct ulp_rte_hdr_field *field; 1807 uint32_t size; 1808 1809 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC]; 1810 size = sizeof(eth_spec->dst.addr_bytes); 1811 field = ulp_rte_parser_fld_copy(field, eth_spec->dst.addr_bytes, size); 1812 1813 size = sizeof(eth_spec->src.addr_bytes); 1814 field = ulp_rte_parser_fld_copy(field, eth_spec->src.addr_bytes, size); 1815 1816 size = sizeof(eth_spec->type); 1817 field = ulp_rte_parser_fld_copy(field, ð_spec->type, size); 1818 1819 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); 1820 } 1821 1822 /* Function to handle the parsing of RTE Flow item vlan Header. */ 1823 static void 1824 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params, 1825 const struct rte_flow_item_vlan *vlan_spec, 1826 uint32_t inner) 1827 { 1828 struct ulp_rte_hdr_field *field; 1829 uint32_t size; 1830 1831 if (!inner) { 1832 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI]; 1833 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, 1834 BNXT_ULP_HDR_BIT_OO_VLAN); 1835 } else { 1836 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI]; 1837 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, 1838 BNXT_ULP_HDR_BIT_OI_VLAN); 1839 } 1840 1841 size = sizeof(vlan_spec->tci); 1842 field = ulp_rte_parser_fld_copy(field, &vlan_spec->tci, size); 1843 1844 size = sizeof(vlan_spec->inner_type); 1845 field = ulp_rte_parser_fld_copy(field, &vlan_spec->inner_type, size); 1846 } 1847 1848 /* Function to handle the parsing of RTE Flow item ipv4 Header. */ 1849 static void 1850 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params, 1851 const struct rte_flow_item_ipv4 *ip) 1852 { 1853 struct ulp_rte_hdr_field *field; 1854 uint32_t size; 1855 uint8_t val8; 1856 1857 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL]; 1858 size = sizeof(ip->hdr.version_ihl); 1859 if (!ip->hdr.version_ihl) 1860 val8 = RTE_IPV4_VHL_DEF; 1861 else 1862 val8 = ip->hdr.version_ihl; 1863 field = ulp_rte_parser_fld_copy(field, &val8, size); 1864 1865 size = sizeof(ip->hdr.type_of_service); 1866 field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size); 1867 1868 size = sizeof(ip->hdr.packet_id); 1869 field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size); 1870 1871 size = sizeof(ip->hdr.fragment_offset); 1872 field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size); 1873 1874 size = sizeof(ip->hdr.time_to_live); 1875 if (!ip->hdr.time_to_live) 1876 val8 = BNXT_ULP_DEFAULT_TTL; 1877 else 1878 val8 = ip->hdr.time_to_live; 1879 field = ulp_rte_parser_fld_copy(field, &val8, size); 1880 1881 size = sizeof(ip->hdr.next_proto_id); 1882 field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size); 1883 1884 size = sizeof(ip->hdr.src_addr); 1885 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size); 1886 1887 size = sizeof(ip->hdr.dst_addr); 1888 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size); 1889 1890 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4); 1891 } 1892 1893 /* Function to handle the parsing of RTE Flow item ipv6 Header. */ 1894 static void 1895 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params, 1896 const struct rte_flow_item_ipv6 *ip) 1897 { 1898 struct ulp_rte_hdr_field *field; 1899 uint32_t size; 1900 uint32_t val32; 1901 uint8_t val8; 1902 1903 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW]; 1904 size = sizeof(ip->hdr.vtc_flow); 1905 if (!ip->hdr.vtc_flow) 1906 val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER); 1907 else 1908 val32 = ip->hdr.vtc_flow; 1909 field = ulp_rte_parser_fld_copy(field, &val32, size); 1910 1911 size = sizeof(ip->hdr.proto); 1912 field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size); 1913 1914 size = sizeof(ip->hdr.hop_limits); 1915 if (!ip->hdr.hop_limits) 1916 val8 = BNXT_ULP_DEFAULT_TTL; 1917 else 1918 val8 = ip->hdr.hop_limits; 1919 field = ulp_rte_parser_fld_copy(field, &val8, size); 1920 1921 size = sizeof(ip->hdr.src_addr); 1922 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size); 1923 1924 size = sizeof(ip->hdr.dst_addr); 1925 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size); 1926 1927 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6); 1928 } 1929 1930 /* Function to handle the parsing of RTE Flow item UDP Header. */ 1931 static void 1932 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params, 1933 const struct rte_flow_item_udp *udp_spec) 1934 { 1935 struct ulp_rte_hdr_field *field; 1936 uint32_t size; 1937 uint8_t type = IPPROTO_UDP; 1938 1939 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT]; 1940 size = sizeof(udp_spec->hdr.src_port); 1941 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size); 1942 1943 size = sizeof(udp_spec->hdr.dst_port); 1944 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size); 1945 1946 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP); 1947 1948 /* Update thhe ip header protocol */ 1949 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO]; 1950 ulp_rte_parser_fld_copy(field, &type, sizeof(type)); 1951 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO]; 1952 ulp_rte_parser_fld_copy(field, &type, sizeof(type)); 1953 } 1954 1955 /* Function to handle the parsing of RTE Flow item vxlan Header. */ 1956 static void 1957 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params, 1958 struct rte_flow_item_vxlan *vxlan_spec) 1959 { 1960 struct ulp_rte_hdr_field *field; 1961 uint32_t size; 1962 1963 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS]; 1964 size = sizeof(vxlan_spec->flags); 1965 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->flags, size); 1966 1967 size = sizeof(vxlan_spec->rsvd0); 1968 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd0, size); 1969 1970 size = sizeof(vxlan_spec->vni); 1971 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->vni, size); 1972 1973 size = sizeof(vxlan_spec->rsvd1); 1974 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd1, size); 1975 1976 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN); 1977 } 1978 1979 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */ 1980 int32_t 1981 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, 1982 struct ulp_rte_parser_params *params) 1983 { 1984 const struct rte_flow_action_vxlan_encap *vxlan_encap; 1985 const struct rte_flow_item *item; 1986 const struct rte_flow_item_ipv4 *ipv4_spec; 1987 const struct rte_flow_item_ipv6 *ipv6_spec; 1988 struct rte_flow_item_vxlan vxlan_spec; 1989 uint32_t vlan_num = 0, vlan_size = 0; 1990 uint32_t ip_size = 0, ip_type = 0; 1991 uint32_t vxlan_size = 0; 1992 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap; 1993 struct ulp_rte_act_prop *ap = ¶ms->act_prop; 1994 1995 vxlan_encap = action_item->conf; 1996 if (!vxlan_encap) { 1997 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n"); 1998 return BNXT_TF_RC_ERROR; 1999 } 2000 2001 item = vxlan_encap->definition; 2002 if (!item) { 2003 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n"); 2004 return BNXT_TF_RC_ERROR; 2005 } 2006 2007 if (!ulp_rte_item_skip_void(&item, 0)) 2008 return BNXT_TF_RC_ERROR; 2009 2010 /* must have ethernet header */ 2011 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { 2012 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n"); 2013 return BNXT_TF_RC_ERROR; 2014 } 2015 2016 /* Parse the ethernet header */ 2017 if (item->spec) 2018 ulp_rte_enc_eth_hdr_handler(params, item->spec); 2019 2020 /* Goto the next item */ 2021 if (!ulp_rte_item_skip_void(&item, 1)) 2022 return BNXT_TF_RC_ERROR; 2023 2024 /* May have vlan header */ 2025 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2026 vlan_num++; 2027 if (item->spec) 2028 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0); 2029 2030 if (!ulp_rte_item_skip_void(&item, 1)) 2031 return BNXT_TF_RC_ERROR; 2032 } 2033 2034 /* may have two vlan headers */ 2035 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2036 vlan_num++; 2037 if (item->spec) 2038 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1); 2039 2040 if (!ulp_rte_item_skip_void(&item, 1)) 2041 return BNXT_TF_RC_ERROR; 2042 } 2043 2044 /* Update the vlan count and size of more than one */ 2045 if (vlan_num) { 2046 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan); 2047 vlan_num = tfp_cpu_to_be_32(vlan_num); 2048 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM], 2049 &vlan_num, 2050 sizeof(uint32_t)); 2051 vlan_size = tfp_cpu_to_be_32(vlan_size); 2052 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ], 2053 &vlan_size, 2054 sizeof(uint32_t)); 2055 } 2056 2057 /* L3 must be IPv4, IPv6 */ 2058 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { 2059 ipv4_spec = item->spec; 2060 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE; 2061 2062 /* Update the ip size details */ 2063 ip_size = tfp_cpu_to_be_32(ip_size); 2064 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 2065 &ip_size, sizeof(uint32_t)); 2066 2067 /* update the ip type */ 2068 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4); 2069 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 2070 &ip_type, sizeof(uint32_t)); 2071 2072 /* update the computed field to notify it is ipv4 header */ 2073 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG, 2074 1); 2075 if (ipv4_spec) 2076 ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec); 2077 2078 if (!ulp_rte_item_skip_void(&item, 1)) 2079 return BNXT_TF_RC_ERROR; 2080 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { 2081 ipv6_spec = item->spec; 2082 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE; 2083 2084 /* Update the ip size details */ 2085 ip_size = tfp_cpu_to_be_32(ip_size); 2086 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 2087 &ip_size, sizeof(uint32_t)); 2088 2089 /* update the ip type */ 2090 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6); 2091 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 2092 &ip_type, sizeof(uint32_t)); 2093 2094 /* update the computed field to notify it is ipv6 header */ 2095 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG, 2096 1); 2097 if (ipv6_spec) 2098 ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec); 2099 2100 if (!ulp_rte_item_skip_void(&item, 1)) 2101 return BNXT_TF_RC_ERROR; 2102 } else { 2103 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n"); 2104 return BNXT_TF_RC_ERROR; 2105 } 2106 2107 /* L4 is UDP */ 2108 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) { 2109 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n"); 2110 return BNXT_TF_RC_ERROR; 2111 } 2112 if (item->spec) 2113 ulp_rte_enc_udp_hdr_handler(params, item->spec); 2114 2115 if (!ulp_rte_item_skip_void(&item, 1)) 2116 return BNXT_TF_RC_ERROR; 2117 2118 /* Finally VXLAN */ 2119 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { 2120 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n"); 2121 return BNXT_TF_RC_ERROR; 2122 } 2123 vxlan_size = sizeof(struct rte_flow_item_vxlan); 2124 /* copy the vxlan details */ 2125 memcpy(&vxlan_spec, item->spec, vxlan_size); 2126 vxlan_spec.flags = 0x08; 2127 vxlan_size = tfp_cpu_to_be_32(vxlan_size); 2128 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ], 2129 &vxlan_size, sizeof(uint32_t)); 2130 2131 ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec); 2132 2133 /* update the hdr_bitmap with vxlan */ 2134 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP); 2135 return BNXT_TF_RC_SUCCESS; 2136 } 2137 2138 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */ 2139 int32_t 2140 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item 2141 __rte_unused, 2142 struct ulp_rte_parser_params *params) 2143 { 2144 /* update the hdr_bitmap with vxlan */ 2145 ULP_BITMAP_SET(params->act_bitmap.bits, 2146 BNXT_ULP_ACT_BIT_VXLAN_DECAP); 2147 /* Update computational field with tunnel decap info */ 2148 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1); 2149 return BNXT_TF_RC_SUCCESS; 2150 } 2151 2152 /* Function to handle the parsing of RTE Flow action drop Header. */ 2153 int32_t 2154 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused, 2155 struct ulp_rte_parser_params *params) 2156 { 2157 /* Update the hdr_bitmap with drop */ 2158 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP); 2159 return BNXT_TF_RC_SUCCESS; 2160 } 2161 2162 /* Function to handle the parsing of RTE Flow action count. */ 2163 int32_t 2164 ulp_rte_count_act_handler(const struct rte_flow_action *action_item, 2165 struct ulp_rte_parser_params *params) 2166 { 2167 const struct rte_flow_action_count *act_count; 2168 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop; 2169 2170 act_count = action_item->conf; 2171 if (act_count) { 2172 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT], 2173 &act_count->id, 2174 BNXT_ULP_ACT_PROP_SZ_COUNT); 2175 } 2176 2177 /* Update the hdr_bitmap with count */ 2178 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT); 2179 return BNXT_TF_RC_SUCCESS; 2180 } 2181 2182 /* Function to handle the parsing of action ports. */ 2183 static int32_t 2184 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param, 2185 uint32_t ifindex, 2186 enum bnxt_ulp_direction_type act_dir) 2187 { 2188 enum bnxt_ulp_direction_type dir; 2189 uint16_t pid_s; 2190 uint32_t pid; 2191 struct ulp_rte_act_prop *act = ¶m->act_prop; 2192 enum bnxt_ulp_intf_type port_type; 2193 uint32_t vnic_type; 2194 2195 /* Get the direction */ 2196 /* If action implicitly specifies direction, use the specification. */ 2197 dir = (act_dir == BNXT_ULP_DIR_INVALID) ? 2198 ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION) : 2199 act_dir; 2200 port_type = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE); 2201 if (dir == BNXT_ULP_DIR_EGRESS && 2202 port_type != BNXT_ULP_INTF_TYPE_VF_REP) { 2203 /* For egress direction, fill vport */ 2204 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s)) 2205 return BNXT_TF_RC_ERROR; 2206 2207 pid = pid_s; 2208 pid = rte_cpu_to_be_32(pid); 2209 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], 2210 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); 2211 } else { 2212 /* For ingress direction, fill vnic */ 2213 /* 2214 * Action Destination 2215 * ------------------------------------ 2216 * PORT_REPRESENTOR Driver Function 2217 * ------------------------------------ 2218 * REPRESENTED_PORT VF 2219 * ------------------------------------ 2220 * PORT_ID VF 2221 */ 2222 if (act_dir != BNXT_ULP_DIR_INGRESS && 2223 port_type == BNXT_ULP_INTF_TYPE_VF_REP) 2224 vnic_type = BNXT_ULP_VF_FUNC_VNIC; 2225 else 2226 vnic_type = BNXT_ULP_DRV_FUNC_VNIC; 2227 2228 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex, 2229 vnic_type, &pid_s)) 2230 return BNXT_TF_RC_ERROR; 2231 2232 pid = pid_s; 2233 pid = rte_cpu_to_be_32(pid); 2234 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], 2235 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC); 2236 } 2237 2238 /* Update the action port set bit */ 2239 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1); 2240 return BNXT_TF_RC_SUCCESS; 2241 } 2242 2243 /* Function to handle the parsing of RTE Flow action PF. */ 2244 int32_t 2245 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused, 2246 struct ulp_rte_parser_params *params) 2247 { 2248 uint32_t port_id; 2249 uint32_t ifindex; 2250 enum bnxt_ulp_intf_type intf_type; 2251 2252 /* Get the port id of the current device */ 2253 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 2254 2255 /* Get the port db ifindex */ 2256 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id, 2257 &ifindex)) { 2258 BNXT_TF_DBG(ERR, "Invalid port id\n"); 2259 return BNXT_TF_RC_ERROR; 2260 } 2261 2262 /* Check the port is PF port */ 2263 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 2264 if (intf_type != BNXT_ULP_INTF_TYPE_PF) { 2265 BNXT_TF_DBG(ERR, "Port is not a PF port\n"); 2266 return BNXT_TF_RC_ERROR; 2267 } 2268 /* Update the action properties */ 2269 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2270 return ulp_rte_parser_act_port_set(params, ifindex, 2271 BNXT_ULP_DIR_INVALID); 2272 } 2273 2274 /* Function to handle the parsing of RTE Flow action VF. */ 2275 int32_t 2276 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, 2277 struct ulp_rte_parser_params *params) 2278 { 2279 const struct rte_flow_action_vf *vf_action; 2280 enum bnxt_ulp_intf_type intf_type; 2281 uint32_t ifindex; 2282 struct bnxt *bp; 2283 2284 vf_action = action_item->conf; 2285 if (!vf_action) { 2286 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n"); 2287 return BNXT_TF_RC_PARSE_ERR; 2288 } 2289 2290 if (vf_action->original) { 2291 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n"); 2292 return BNXT_TF_RC_PARSE_ERR; 2293 } 2294 2295 bp = bnxt_pmd_get_bp(params->port_id); 2296 if (bp == NULL) { 2297 BNXT_TF_DBG(ERR, "Invalid bp\n"); 2298 return BNXT_TF_RC_ERROR; 2299 } 2300 2301 /* vf_action->id is a logical number which in this case is an 2302 * offset from the first VF. So, to get the absolute VF id, the 2303 * offset must be added to the absolute first vf id of that port. 2304 */ 2305 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, 2306 bp->first_vf_id + 2307 vf_action->id, 2308 &ifindex)) { 2309 BNXT_TF_DBG(ERR, "VF is not valid interface\n"); 2310 return BNXT_TF_RC_ERROR; 2311 } 2312 /* Check the port is VF port */ 2313 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 2314 if (intf_type != BNXT_ULP_INTF_TYPE_VF && 2315 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) { 2316 BNXT_TF_DBG(ERR, "Port is not a VF port\n"); 2317 return BNXT_TF_RC_ERROR; 2318 } 2319 2320 /* Update the action properties */ 2321 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2322 return ulp_rte_parser_act_port_set(params, ifindex, 2323 BNXT_ULP_DIR_INVALID); 2324 } 2325 2326 /* Parse actions PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */ 2327 int32_t 2328 ulp_rte_port_act_handler(const struct rte_flow_action *act_item, 2329 struct ulp_rte_parser_params *param) 2330 { 2331 uint32_t ethdev_id; 2332 uint32_t ifindex; 2333 enum bnxt_ulp_intf_type intf_type; 2334 enum bnxt_ulp_direction_type act_dir; 2335 2336 if (!act_item->conf) { 2337 BNXT_TF_DBG(ERR, 2338 "ParseErr: Invalid Argument\n"); 2339 return BNXT_TF_RC_PARSE_ERR; 2340 } 2341 switch (act_item->type) { 2342 case RTE_FLOW_ACTION_TYPE_PORT_ID: { 2343 const struct rte_flow_action_port_id *port_id = act_item->conf; 2344 2345 if (port_id->original) { 2346 BNXT_TF_DBG(ERR, 2347 "ParseErr:Portid Original not supported\n"); 2348 return BNXT_TF_RC_PARSE_ERR; 2349 } 2350 ethdev_id = port_id->id; 2351 act_dir = BNXT_ULP_DIR_INVALID; 2352 break; 2353 } 2354 case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: { 2355 const struct rte_flow_action_ethdev *ethdev = act_item->conf; 2356 2357 ethdev_id = ethdev->port_id; 2358 act_dir = BNXT_ULP_DIR_INGRESS; 2359 break; 2360 } 2361 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: { 2362 const struct rte_flow_action_ethdev *ethdev = act_item->conf; 2363 2364 ethdev_id = ethdev->port_id; 2365 act_dir = BNXT_ULP_DIR_EGRESS; 2366 break; 2367 } 2368 default: 2369 BNXT_TF_DBG(ERR, "Unknown port action\n"); 2370 return BNXT_TF_RC_ERROR; 2371 } 2372 2373 /* Get the port db ifindex */ 2374 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, ethdev_id, 2375 &ifindex)) { 2376 BNXT_TF_DBG(ERR, "Invalid port id\n"); 2377 return BNXT_TF_RC_ERROR; 2378 } 2379 2380 /* Get the intf type */ 2381 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex); 2382 if (!intf_type) { 2383 BNXT_TF_DBG(ERR, "Invalid port type\n"); 2384 return BNXT_TF_RC_ERROR; 2385 } 2386 2387 /* Set the action port */ 2388 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2389 return ulp_rte_parser_act_port_set(param, ifindex, act_dir); 2390 } 2391 2392 /* Function to handle the parsing of RTE Flow action phy_port. */ 2393 int32_t 2394 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item, 2395 struct ulp_rte_parser_params *prm) 2396 { 2397 const struct rte_flow_action_phy_port *phy_port; 2398 uint32_t pid; 2399 int32_t rc; 2400 uint16_t pid_s; 2401 enum bnxt_ulp_direction_type dir; 2402 2403 phy_port = action_item->conf; 2404 if (!phy_port) { 2405 BNXT_TF_DBG(ERR, 2406 "ParseErr: Invalid Argument\n"); 2407 return BNXT_TF_RC_PARSE_ERR; 2408 } 2409 2410 if (phy_port->original) { 2411 BNXT_TF_DBG(ERR, 2412 "Parse Err:Port Original not supported\n"); 2413 return BNXT_TF_RC_PARSE_ERR; 2414 } 2415 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION); 2416 if (dir != BNXT_ULP_DIR_EGRESS) { 2417 BNXT_TF_DBG(ERR, 2418 "Parse Err:Phy ports are valid only for egress\n"); 2419 return BNXT_TF_RC_PARSE_ERR; 2420 } 2421 /* Get the physical port details from port db */ 2422 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index, 2423 &pid_s); 2424 if (rc) { 2425 BNXT_TF_DBG(ERR, "Failed to get port details\n"); 2426 return -EINVAL; 2427 } 2428 2429 pid = pid_s; 2430 pid = rte_cpu_to_be_32(pid); 2431 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], 2432 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); 2433 2434 /* Update the action port set bit */ 2435 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1); 2436 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, 2437 BNXT_ULP_INTF_TYPE_PHY_PORT); 2438 return BNXT_TF_RC_SUCCESS; 2439 } 2440 2441 /* Function to handle the parsing of RTE Flow action pop vlan. */ 2442 int32_t 2443 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused, 2444 struct ulp_rte_parser_params *params) 2445 { 2446 /* Update the act_bitmap with pop */ 2447 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN); 2448 return BNXT_TF_RC_SUCCESS; 2449 } 2450 2451 /* Function to handle the parsing of RTE Flow action push vlan. */ 2452 int32_t 2453 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item, 2454 struct ulp_rte_parser_params *params) 2455 { 2456 const struct rte_flow_action_of_push_vlan *push_vlan; 2457 uint16_t ethertype; 2458 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2459 2460 push_vlan = action_item->conf; 2461 if (push_vlan) { 2462 ethertype = push_vlan->ethertype; 2463 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) { 2464 BNXT_TF_DBG(ERR, 2465 "Parse Err: Ethertype not supported\n"); 2466 return BNXT_TF_RC_PARSE_ERR; 2467 } 2468 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN], 2469 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN); 2470 /* Update the hdr_bitmap with push vlan */ 2471 ULP_BITMAP_SET(params->act_bitmap.bits, 2472 BNXT_ULP_ACT_BIT_PUSH_VLAN); 2473 return BNXT_TF_RC_SUCCESS; 2474 } 2475 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n"); 2476 return BNXT_TF_RC_ERROR; 2477 } 2478 2479 /* Function to handle the parsing of RTE Flow action set vlan id. */ 2480 int32_t 2481 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item, 2482 struct ulp_rte_parser_params *params) 2483 { 2484 const struct rte_flow_action_of_set_vlan_vid *vlan_vid; 2485 uint32_t vid; 2486 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2487 2488 vlan_vid = action_item->conf; 2489 if (vlan_vid && vlan_vid->vlan_vid) { 2490 vid = vlan_vid->vlan_vid; 2491 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID], 2492 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID); 2493 /* Update the hdr_bitmap with vlan vid */ 2494 ULP_BITMAP_SET(params->act_bitmap.bits, 2495 BNXT_ULP_ACT_BIT_SET_VLAN_VID); 2496 return BNXT_TF_RC_SUCCESS; 2497 } 2498 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n"); 2499 return BNXT_TF_RC_ERROR; 2500 } 2501 2502 /* Function to handle the parsing of RTE Flow action set vlan pcp. */ 2503 int32_t 2504 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item, 2505 struct ulp_rte_parser_params *params) 2506 { 2507 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp; 2508 uint8_t pcp; 2509 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2510 2511 vlan_pcp = action_item->conf; 2512 if (vlan_pcp) { 2513 pcp = vlan_pcp->vlan_pcp; 2514 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP], 2515 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP); 2516 /* Update the hdr_bitmap with vlan vid */ 2517 ULP_BITMAP_SET(params->act_bitmap.bits, 2518 BNXT_ULP_ACT_BIT_SET_VLAN_PCP); 2519 return BNXT_TF_RC_SUCCESS; 2520 } 2521 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n"); 2522 return BNXT_TF_RC_ERROR; 2523 } 2524 2525 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/ 2526 int32_t 2527 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item, 2528 struct ulp_rte_parser_params *params) 2529 { 2530 const struct rte_flow_action_set_ipv4 *set_ipv4; 2531 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2532 2533 set_ipv4 = action_item->conf; 2534 if (set_ipv4) { 2535 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC], 2536 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC); 2537 /* Update the hdr_bitmap with set ipv4 src */ 2538 ULP_BITMAP_SET(params->act_bitmap.bits, 2539 BNXT_ULP_ACT_BIT_SET_IPV4_SRC); 2540 return BNXT_TF_RC_SUCCESS; 2541 } 2542 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n"); 2543 return BNXT_TF_RC_ERROR; 2544 } 2545 2546 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/ 2547 int32_t 2548 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item, 2549 struct ulp_rte_parser_params *params) 2550 { 2551 const struct rte_flow_action_set_ipv4 *set_ipv4; 2552 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2553 2554 set_ipv4 = action_item->conf; 2555 if (set_ipv4) { 2556 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST], 2557 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST); 2558 /* Update the hdr_bitmap with set ipv4 dst */ 2559 ULP_BITMAP_SET(params->act_bitmap.bits, 2560 BNXT_ULP_ACT_BIT_SET_IPV4_DST); 2561 return BNXT_TF_RC_SUCCESS; 2562 } 2563 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n"); 2564 return BNXT_TF_RC_ERROR; 2565 } 2566 2567 /* Function to handle the parsing of RTE Flow action set tp src.*/ 2568 int32_t 2569 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item, 2570 struct ulp_rte_parser_params *params) 2571 { 2572 const struct rte_flow_action_set_tp *set_tp; 2573 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2574 2575 set_tp = action_item->conf; 2576 if (set_tp) { 2577 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC], 2578 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC); 2579 /* Update the hdr_bitmap with set tp src */ 2580 ULP_BITMAP_SET(params->act_bitmap.bits, 2581 BNXT_ULP_ACT_BIT_SET_TP_SRC); 2582 return BNXT_TF_RC_SUCCESS; 2583 } 2584 2585 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n"); 2586 return BNXT_TF_RC_ERROR; 2587 } 2588 2589 /* Function to handle the parsing of RTE Flow action set tp dst.*/ 2590 int32_t 2591 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item, 2592 struct ulp_rte_parser_params *params) 2593 { 2594 const struct rte_flow_action_set_tp *set_tp; 2595 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2596 2597 set_tp = action_item->conf; 2598 if (set_tp) { 2599 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST], 2600 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST); 2601 /* Update the hdr_bitmap with set tp dst */ 2602 ULP_BITMAP_SET(params->act_bitmap.bits, 2603 BNXT_ULP_ACT_BIT_SET_TP_DST); 2604 return BNXT_TF_RC_SUCCESS; 2605 } 2606 2607 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n"); 2608 return BNXT_TF_RC_ERROR; 2609 } 2610 2611 /* Function to handle the parsing of RTE Flow action dec ttl.*/ 2612 int32_t 2613 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused, 2614 struct ulp_rte_parser_params *params) 2615 { 2616 /* Update the act_bitmap with dec ttl */ 2617 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL); 2618 return BNXT_TF_RC_SUCCESS; 2619 } 2620 2621 /* Function to handle the parsing of RTE Flow action JUMP */ 2622 int32_t 2623 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused, 2624 struct ulp_rte_parser_params *params) 2625 { 2626 /* Update the act_bitmap with dec ttl */ 2627 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP); 2628 return BNXT_TF_RC_SUCCESS; 2629 } 2630 2631 int32_t 2632 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item, 2633 struct ulp_rte_parser_params *params) 2634 { 2635 const struct rte_flow_action_sample *sample; 2636 int ret; 2637 2638 sample = action_item->conf; 2639 2640 /* if SAMPLE bit is set it means this sample action is nested within the 2641 * actions of another sample action; this is not allowed 2642 */ 2643 if (ULP_BITMAP_ISSET(params->act_bitmap.bits, 2644 BNXT_ULP_ACT_BIT_SAMPLE)) 2645 return BNXT_TF_RC_ERROR; 2646 2647 /* a sample action is only allowed as a shared action */ 2648 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits, 2649 BNXT_ULP_ACT_BIT_SHARED)) 2650 return BNXT_TF_RC_ERROR; 2651 2652 /* only a ratio of 1 i.e. 100% is supported */ 2653 if (sample->ratio != 1) 2654 return BNXT_TF_RC_ERROR; 2655 2656 if (!sample->actions) 2657 return BNXT_TF_RC_ERROR; 2658 2659 /* parse the nested actions for a sample action */ 2660 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params); 2661 if (ret == BNXT_TF_RC_SUCCESS) 2662 /* Update the act_bitmap with sample */ 2663 ULP_BITMAP_SET(params->act_bitmap.bits, 2664 BNXT_ULP_ACT_BIT_SAMPLE); 2665 2666 return ret; 2667 } 2668 2669 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */ 2670 int32_t 2671 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item, 2672 struct ulp_rte_parser_params *params) 2673 { 2674 /* Set the F1 flow header bit */ 2675 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1); 2676 return ulp_rte_vxlan_decap_act_handler(action_item, params); 2677 } 2678 2679 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */ 2680 int32_t 2681 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item, 2682 struct ulp_rte_parser_params *params) 2683 { 2684 RTE_SET_USED(item); 2685 /* Set the F2 flow header bit */ 2686 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2); 2687 return ulp_rte_vxlan_decap_act_handler(NULL, params); 2688 } 2689