1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2021 Broadcom 3 * All rights reserved. 4 */ 5 6 #include "bnxt.h" 7 #include "ulp_template_db_enum.h" 8 #include "ulp_template_struct.h" 9 #include "bnxt_ulp.h" 10 #include "bnxt_tf_common.h" 11 #include "bnxt_tf_pmd_shim.h" 12 #include "ulp_rte_parser.h" 13 #include "ulp_matcher.h" 14 #include "ulp_utils.h" 15 #include "tfp.h" 16 #include "ulp_port_db.h" 17 #include "ulp_flow_db.h" 18 #include "ulp_mapper.h" 19 #include "ulp_tun.h" 20 #include "ulp_template_db_tbl.h" 21 22 /* Local defines for the parsing functions */ 23 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */ 24 #define ULP_VLAN_PRIORITY_MASK 0x700 25 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/ 26 #define ULP_UDP_PORT_VXLAN 4789 27 28 /* Utility function to skip the void items. */ 29 static inline int32_t 30 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment) 31 { 32 if (!*item) 33 return 0; 34 if (increment) 35 (*item)++; 36 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID) 37 (*item)++; 38 if (*item) 39 return 1; 40 return 0; 41 } 42 43 /* Utility function to copy field spec items */ 44 static struct ulp_rte_hdr_field * 45 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field, 46 const void *buffer, 47 uint32_t size) 48 { 49 field->size = size; 50 memcpy(field->spec, buffer, field->size); 51 field++; 52 return field; 53 } 54 55 /* Utility function to update the field_bitmap */ 56 static void 57 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params, 58 uint32_t idx, 59 enum bnxt_ulp_prsr_action prsr_act) 60 { 61 struct ulp_rte_hdr_field *field; 62 63 field = ¶ms->hdr_field[idx]; 64 if (ulp_bitmap_notzero(field->mask, field->size)) { 65 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx); 66 if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE)) 67 ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx); 68 /* Not exact match */ 69 if (!ulp_bitmap_is_ones(field->mask, field->size)) 70 ULP_COMP_FLD_IDX_WR(params, 71 BNXT_ULP_CF_IDX_WC_MATCH, 1); 72 } else { 73 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx); 74 } 75 } 76 77 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL) 78 /* Utility function to copy field spec and masks items */ 79 static void 80 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params, 81 uint32_t *idx, 82 uint32_t size, 83 const void *spec_buff, 84 const void *mask_buff, 85 enum bnxt_ulp_prsr_action prsr_act) 86 { 87 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx]; 88 89 /* update the field size */ 90 field->size = size; 91 92 /* copy the mask specifications only if mask is not null */ 93 if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) { 94 memcpy(field->mask, mask_buff, size); 95 ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act); 96 } 97 98 /* copy the protocol specifications only if mask is not null*/ 99 if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size)) 100 memcpy(field->spec, spec_buff, size); 101 102 /* Increment the index */ 103 *idx = *idx + 1; 104 } 105 106 /* Utility function to copy field spec and masks items */ 107 static int32_t 108 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params, 109 uint32_t *idx, 110 uint32_t size) 111 { 112 if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) { 113 BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx); 114 return -EINVAL; 115 } 116 *idx = params->field_idx; 117 params->field_idx += size; 118 return 0; 119 } 120 121 /* 122 * Function to handle the parsing of RTE Flows and placing 123 * the RTE flow items into the ulp structures. 124 */ 125 int32_t 126 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], 127 struct ulp_rte_parser_params *params) 128 { 129 const struct rte_flow_item *item = pattern; 130 struct bnxt_ulp_rte_hdr_info *hdr_info; 131 132 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM; 133 134 /* Set the computed flags for no vlan tags before parsing */ 135 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1); 136 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1); 137 138 /* Parse all the items in the pattern */ 139 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) { 140 if (item->type >= (uint32_t) 141 BNXT_RTE_FLOW_ITEM_TYPE_END) { 142 if (item->type >= 143 (uint32_t)BNXT_RTE_FLOW_ITEM_TYPE_LAST) 144 goto hdr_parser_error; 145 /* get the header information */ 146 hdr_info = &ulp_vendor_hdr_info[item->type - 147 BNXT_RTE_FLOW_ITEM_TYPE_END]; 148 } else { 149 if (item->type > RTE_FLOW_ITEM_TYPE_HIGIG2) 150 goto hdr_parser_error; 151 hdr_info = &ulp_hdr_info[item->type]; 152 } 153 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) { 154 goto hdr_parser_error; 155 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) { 156 /* call the registered callback handler */ 157 if (hdr_info->proto_hdr_func) { 158 if (hdr_info->proto_hdr_func(item, params) != 159 BNXT_TF_RC_SUCCESS) { 160 return BNXT_TF_RC_ERROR; 161 } 162 } 163 } 164 item++; 165 } 166 /* update the implied SVIF */ 167 return ulp_rte_parser_implicit_match_port_process(params); 168 169 hdr_parser_error: 170 BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n", 171 item->type); 172 return BNXT_TF_RC_PARSE_ERR; 173 } 174 175 /* 176 * Function to handle the parsing of RTE Flows and placing 177 * the RTE flow actions into the ulp structures. 178 */ 179 int32_t 180 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], 181 struct ulp_rte_parser_params *params) 182 { 183 const struct rte_flow_action *action_item = actions; 184 struct bnxt_ulp_rte_act_info *hdr_info; 185 186 /* Parse all the items in the pattern */ 187 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) { 188 if (action_item->type >= 189 (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_END) { 190 if (action_item->type >= 191 (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_LAST) 192 goto act_parser_error; 193 /* get the header information from bnxt actinfo table */ 194 hdr_info = &ulp_vendor_act_info[action_item->type - 195 BNXT_RTE_FLOW_ACTION_TYPE_END]; 196 } else { 197 if (action_item->type > RTE_FLOW_ACTION_TYPE_SHARED) 198 goto act_parser_error; 199 /* get the header information from the act info table */ 200 hdr_info = &ulp_act_info[action_item->type]; 201 } 202 if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) { 203 goto act_parser_error; 204 } else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) { 205 /* call the registered callback handler */ 206 if (hdr_info->proto_act_func) { 207 if (hdr_info->proto_act_func(action_item, 208 params) != 209 BNXT_TF_RC_SUCCESS) { 210 return BNXT_TF_RC_ERROR; 211 } 212 } 213 } 214 action_item++; 215 } 216 /* update the implied port details */ 217 ulp_rte_parser_implicit_act_port_process(params); 218 return BNXT_TF_RC_SUCCESS; 219 220 act_parser_error: 221 BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n", 222 action_item->type); 223 return BNXT_TF_RC_ERROR; 224 } 225 226 /* 227 * Function to handle the post processing of the computed 228 * fields for the interface. 229 */ 230 static void 231 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params) 232 { 233 uint32_t ifindex; 234 uint16_t port_id, parif; 235 uint32_t mtype; 236 enum bnxt_ulp_direction_type dir; 237 238 /* get the direction details */ 239 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 240 241 /* read the port id details */ 242 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 243 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 244 port_id, 245 &ifindex)) { 246 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 247 return; 248 } 249 250 if (dir == BNXT_ULP_DIR_INGRESS) { 251 /* Set port PARIF */ 252 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 253 BNXT_ULP_PHY_PORT_PARIF, &parif)) { 254 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n"); 255 return; 256 } 257 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF, 258 parif); 259 } else { 260 /* Get the match port type */ 261 mtype = ULP_COMP_FLD_IDX_RD(params, 262 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 263 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) { 264 ULP_COMP_FLD_IDX_WR(params, 265 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP, 266 1); 267 /* Set VF func PARIF */ 268 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 269 BNXT_ULP_VF_FUNC_PARIF, 270 &parif)) { 271 BNXT_TF_DBG(ERR, 272 "ParseErr:ifindex is not valid\n"); 273 return; 274 } 275 ULP_COMP_FLD_IDX_WR(params, 276 BNXT_ULP_CF_IDX_VF_FUNC_PARIF, 277 parif); 278 279 } else { 280 /* Set DRV func PARIF */ 281 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 282 BNXT_ULP_DRV_FUNC_PARIF, 283 &parif)) { 284 BNXT_TF_DBG(ERR, 285 "ParseErr:ifindex is not valid\n"); 286 return; 287 } 288 ULP_COMP_FLD_IDX_WR(params, 289 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF, 290 parif); 291 } 292 if (mtype == BNXT_ULP_INTF_TYPE_PF) { 293 ULP_COMP_FLD_IDX_WR(params, 294 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF, 295 1); 296 } 297 } 298 } 299 300 static int32_t 301 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params) 302 { 303 enum bnxt_ulp_intf_type match_port_type, act_port_type; 304 enum bnxt_ulp_direction_type dir; 305 uint32_t act_port_set; 306 307 /* Get the computed details */ 308 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 309 match_port_type = ULP_COMP_FLD_IDX_RD(params, 310 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 311 act_port_type = ULP_COMP_FLD_IDX_RD(params, 312 BNXT_ULP_CF_IDX_ACT_PORT_TYPE); 313 act_port_set = ULP_COMP_FLD_IDX_RD(params, 314 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET); 315 316 /* set the flow direction in the proto and action header */ 317 if (dir == BNXT_ULP_DIR_EGRESS) { 318 ULP_BITMAP_SET(params->hdr_bitmap.bits, 319 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 320 ULP_BITMAP_SET(params->act_bitmap.bits, 321 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 322 } 323 324 /* calculate the VF to VF flag */ 325 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP && 326 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) 327 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1); 328 329 /* Update the decrement ttl computational fields */ 330 if (ULP_BITMAP_ISSET(params->act_bitmap.bits, 331 BNXT_ULP_ACT_BIT_DEC_TTL)) { 332 /* 333 * Check that vxlan proto is included and vxlan decap 334 * action is not set then decrement tunnel ttl. 335 * Similarly add GRE and NVGRE in future. 336 */ 337 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 338 BNXT_ULP_HDR_BIT_T_VXLAN) && 339 !ULP_BITMAP_ISSET(params->act_bitmap.bits, 340 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) { 341 ULP_COMP_FLD_IDX_WR(params, 342 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1); 343 } else { 344 ULP_COMP_FLD_IDX_WR(params, 345 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1); 346 } 347 } 348 349 /* Merge the hdr_fp_bit into the proto header bit */ 350 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits; 351 352 /* Update the comp fld fid */ 353 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid); 354 355 /* Update the computed interface parameters */ 356 bnxt_ulp_comp_fld_intf_update(params); 357 358 /* TBD: Handle the flow rejection scenarios */ 359 return 0; 360 } 361 362 /* 363 * Function to handle the post processing of the parsing details 364 */ 365 void 366 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params) 367 { 368 ulp_post_process_normal_flow(params); 369 } 370 371 /* 372 * Function to compute the flow direction based on the match port details 373 */ 374 static void 375 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params) 376 { 377 enum bnxt_ulp_intf_type match_port_type; 378 379 /* Get the match port type */ 380 match_port_type = ULP_COMP_FLD_IDX_RD(params, 381 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 382 383 /* If ingress flow and matchport is vf rep then dir is egress*/ 384 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) && 385 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) { 386 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 387 BNXT_ULP_DIR_EGRESS); 388 } else { 389 /* Assign the input direction */ 390 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) 391 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 392 BNXT_ULP_DIR_INGRESS); 393 else 394 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 395 BNXT_ULP_DIR_EGRESS); 396 } 397 } 398 399 /* Function to handle the parsing of RTE Flow item PF Header. */ 400 static int32_t 401 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params, 402 uint32_t ifindex, 403 uint16_t mask, 404 enum bnxt_ulp_direction_type item_dir) 405 { 406 uint16_t svif; 407 enum bnxt_ulp_direction_type dir; 408 struct ulp_rte_hdr_field *hdr_field; 409 enum bnxt_ulp_svif_type svif_type; 410 enum bnxt_ulp_intf_type port_type; 411 412 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 413 BNXT_ULP_INVALID_SVIF_VAL) { 414 BNXT_TF_DBG(ERR, 415 "SVIF already set,multiple source not support'd\n"); 416 return BNXT_TF_RC_ERROR; 417 } 418 419 /* Get port type details */ 420 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 421 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) { 422 BNXT_TF_DBG(ERR, "Invalid port type\n"); 423 return BNXT_TF_RC_ERROR; 424 } 425 426 /* Update the match port type */ 427 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type); 428 429 /* compute the direction */ 430 bnxt_ulp_rte_parser_direction_compute(params); 431 432 /* Get the computed direction */ 433 dir = (item_dir != BNXT_ULP_DIR_INVALID) ? item_dir : 434 ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 435 if (dir == BNXT_ULP_DIR_INGRESS && 436 port_type != BNXT_ULP_INTF_TYPE_VF_REP) { 437 svif_type = BNXT_ULP_PHY_PORT_SVIF; 438 } else { 439 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP && 440 item_dir != BNXT_ULP_DIR_EGRESS) 441 svif_type = BNXT_ULP_VF_FUNC_SVIF; 442 else 443 svif_type = BNXT_ULP_DRV_FUNC_SVIF; 444 } 445 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type, 446 &svif); 447 svif = rte_cpu_to_be_16(svif); 448 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; 449 memcpy(hdr_field->spec, &svif, sizeof(svif)); 450 memcpy(hdr_field->mask, &mask, sizeof(mask)); 451 hdr_field->size = sizeof(svif); 452 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, 453 rte_be_to_cpu_16(svif)); 454 return BNXT_TF_RC_SUCCESS; 455 } 456 457 /* Function to handle the parsing of the RTE port id */ 458 int32_t 459 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params) 460 { 461 uint16_t port_id = 0; 462 uint16_t svif_mask = 0xFFFF; 463 uint32_t ifindex; 464 int32_t rc = BNXT_TF_RC_ERROR; 465 466 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 467 BNXT_ULP_INVALID_SVIF_VAL) 468 return BNXT_TF_RC_SUCCESS; 469 470 /* SVIF not set. So get the port id */ 471 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 472 473 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 474 port_id, 475 &ifindex)) { 476 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 477 return rc; 478 } 479 480 /* Update the SVIF details */ 481 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask, 482 BNXT_ULP_DIR_INVALID); 483 return rc; 484 } 485 486 /* Function to handle the implicit action port id */ 487 int32_t 488 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params) 489 { 490 struct rte_flow_action action_item = {0}; 491 struct rte_flow_action_port_id port_id = {0}; 492 493 /* Read the action port set bit */ 494 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) { 495 /* Already set, so just exit */ 496 return BNXT_TF_RC_SUCCESS; 497 } 498 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 499 action_item.type = RTE_FLOW_ACTION_TYPE_PORT_ID; 500 action_item.conf = &port_id; 501 502 /* Update the action port based on incoming port */ 503 ulp_rte_port_act_handler(&action_item, params); 504 505 /* Reset the action port set bit */ 506 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0); 507 return BNXT_TF_RC_SUCCESS; 508 } 509 510 /* Function to handle the parsing of RTE Flow item PF Header. */ 511 int32_t 512 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused, 513 struct ulp_rte_parser_params *params) 514 { 515 uint16_t port_id = 0; 516 uint16_t svif_mask = 0xFFFF; 517 uint32_t ifindex; 518 519 /* Get the implicit port id */ 520 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 521 522 /* perform the conversion from dpdk port to bnxt ifindex */ 523 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 524 port_id, 525 &ifindex)) { 526 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 527 return BNXT_TF_RC_ERROR; 528 } 529 530 /* Update the SVIF details */ 531 return ulp_rte_parser_svif_set(params, ifindex, svif_mask, 532 BNXT_ULP_DIR_INVALID); 533 } 534 535 /* Function to handle the parsing of RTE Flow item VF Header. */ 536 int32_t 537 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item, 538 struct ulp_rte_parser_params *params) 539 { 540 const struct rte_flow_item_vf *vf_spec = item->spec; 541 const struct rte_flow_item_vf *vf_mask = item->mask; 542 uint16_t mask = 0; 543 uint32_t ifindex; 544 int32_t rc = BNXT_TF_RC_PARSE_ERR; 545 546 /* Get VF rte_flow_item for Port details */ 547 if (!vf_spec) { 548 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n"); 549 return rc; 550 } 551 if (!vf_mask) { 552 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n"); 553 return rc; 554 } 555 mask = vf_mask->id; 556 557 /* perform the conversion from VF Func id to bnxt ifindex */ 558 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, 559 vf_spec->id, 560 &ifindex)) { 561 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 562 return rc; 563 } 564 /* Update the SVIF details */ 565 return ulp_rte_parser_svif_set(params, ifindex, mask, 566 BNXT_ULP_DIR_INVALID); 567 } 568 569 /* Parse items PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */ 570 int32_t 571 ulp_rte_port_hdr_handler(const struct rte_flow_item *item, 572 struct ulp_rte_parser_params *params) 573 { 574 enum bnxt_ulp_direction_type item_dir; 575 uint16_t ethdev_id; 576 uint16_t mask = 0; 577 int32_t rc = BNXT_TF_RC_PARSE_ERR; 578 uint32_t ifindex; 579 580 if (!item->spec) { 581 BNXT_TF_DBG(ERR, "ParseErr:Port spec is not valid\n"); 582 return rc; 583 } 584 if (!item->mask) { 585 BNXT_TF_DBG(ERR, "ParseErr:Port mask is not valid\n"); 586 return rc; 587 } 588 589 switch (item->type) { 590 case RTE_FLOW_ITEM_TYPE_PORT_ID: { 591 const struct rte_flow_item_port_id *port_spec = item->spec; 592 const struct rte_flow_item_port_id *port_mask = item->mask; 593 594 item_dir = BNXT_ULP_DIR_INVALID; 595 ethdev_id = port_spec->id; 596 mask = port_mask->id; 597 break; 598 } 599 case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: { 600 const struct rte_flow_item_ethdev *ethdev_spec = item->spec; 601 const struct rte_flow_item_ethdev *ethdev_mask = item->mask; 602 603 item_dir = BNXT_ULP_DIR_INGRESS; 604 ethdev_id = ethdev_spec->port_id; 605 mask = ethdev_mask->port_id; 606 break; 607 } 608 case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: { 609 const struct rte_flow_item_ethdev *ethdev_spec = item->spec; 610 const struct rte_flow_item_ethdev *ethdev_mask = item->mask; 611 612 item_dir = BNXT_ULP_DIR_EGRESS; 613 ethdev_id = ethdev_spec->port_id; 614 mask = ethdev_mask->port_id; 615 break; 616 } 617 default: 618 BNXT_TF_DBG(ERR, "ParseErr:Unexpected item\n"); 619 return rc; 620 } 621 622 /* perform the conversion from dpdk port to bnxt ifindex */ 623 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 624 ethdev_id, 625 &ifindex)) { 626 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 627 return rc; 628 } 629 /* Update the SVIF details */ 630 return ulp_rte_parser_svif_set(params, ifindex, mask, item_dir); 631 } 632 633 /* Function to handle the parsing of RTE Flow item phy port Header. */ 634 int32_t 635 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item, 636 struct ulp_rte_parser_params *params) 637 { 638 const struct rte_flow_item_phy_port *port_spec = item->spec; 639 const struct rte_flow_item_phy_port *port_mask = item->mask; 640 uint16_t mask = 0; 641 int32_t rc = BNXT_TF_RC_ERROR; 642 uint16_t svif; 643 enum bnxt_ulp_direction_type dir; 644 struct ulp_rte_hdr_field *hdr_field; 645 646 /* Copy the rte_flow_item for phy port into hdr_field */ 647 if (!port_spec) { 648 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n"); 649 return rc; 650 } 651 if (!port_mask) { 652 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n"); 653 return rc; 654 } 655 mask = port_mask->index; 656 657 /* Update the match port type */ 658 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, 659 BNXT_ULP_INTF_TYPE_PHY_PORT); 660 661 /* Compute the Hw direction */ 662 bnxt_ulp_rte_parser_direction_compute(params); 663 664 /* Direction validation */ 665 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 666 if (dir == BNXT_ULP_DIR_EGRESS) { 667 BNXT_TF_DBG(ERR, 668 "Parse Err:Phy ports are valid only for ingress\n"); 669 return BNXT_TF_RC_PARSE_ERR; 670 } 671 672 /* Get the physical port details from port db */ 673 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index, 674 &svif); 675 if (rc) { 676 BNXT_TF_DBG(ERR, "Failed to get port details\n"); 677 return BNXT_TF_RC_PARSE_ERR; 678 } 679 680 /* Update the SVIF details */ 681 svif = rte_cpu_to_be_16(svif); 682 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; 683 memcpy(hdr_field->spec, &svif, sizeof(svif)); 684 memcpy(hdr_field->mask, &mask, sizeof(mask)); 685 hdr_field->size = sizeof(svif); 686 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, 687 rte_be_to_cpu_16(svif)); 688 return BNXT_TF_RC_SUCCESS; 689 } 690 691 /* Function to handle the update of proto header based on field values */ 692 static void 693 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param, 694 uint16_t type, uint32_t in_flag) 695 { 696 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { 697 if (in_flag) { 698 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 699 BNXT_ULP_HDR_BIT_I_IPV4); 700 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); 701 } else { 702 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 703 BNXT_ULP_HDR_BIT_O_IPV4); 704 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); 705 } 706 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { 707 if (in_flag) { 708 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 709 BNXT_ULP_HDR_BIT_I_IPV6); 710 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); 711 } else { 712 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 713 BNXT_ULP_HDR_BIT_O_IPV6); 714 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); 715 } 716 } 717 } 718 719 /* Internal Function to identify broadcast or multicast packets */ 720 static int32_t 721 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr) 722 { 723 if (rte_is_multicast_ether_addr(eth_addr) || 724 rte_is_broadcast_ether_addr(eth_addr)) { 725 BNXT_TF_DBG(DEBUG, 726 "No support for bcast or mcast addr offload\n"); 727 return 1; 728 } 729 return 0; 730 } 731 732 /* Function to handle the parsing of RTE Flow item Ethernet Header. */ 733 int32_t 734 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item, 735 struct ulp_rte_parser_params *params) 736 { 737 const struct rte_flow_item_eth *eth_spec = item->spec; 738 const struct rte_flow_item_eth *eth_mask = item->mask; 739 uint32_t idx = 0, dmac_idx = 0; 740 uint32_t size; 741 uint16_t eth_type = 0; 742 uint32_t inner_flag = 0; 743 744 /* Perform validations */ 745 if (eth_spec) { 746 /* Todo: work around to avoid multicast and broadcast addr */ 747 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst)) 748 return BNXT_TF_RC_PARSE_ERR; 749 750 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src)) 751 return BNXT_TF_RC_PARSE_ERR; 752 753 eth_type = eth_spec->type; 754 } 755 756 if (ulp_rte_prsr_fld_size_validate(params, &idx, 757 BNXT_ULP_PROTO_HDR_ETH_NUM)) { 758 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 759 return BNXT_TF_RC_ERROR; 760 } 761 /* 762 * Copy the rte_flow_item for eth into hdr_field using ethernet 763 * header fields 764 */ 765 dmac_idx = idx; 766 size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes); 767 ulp_rte_prsr_fld_mask(params, &idx, size, 768 ulp_deference_struct(eth_spec, dst.addr_bytes), 769 ulp_deference_struct(eth_mask, dst.addr_bytes), 770 ULP_PRSR_ACT_DEFAULT); 771 772 size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes); 773 ulp_rte_prsr_fld_mask(params, &idx, size, 774 ulp_deference_struct(eth_spec, src.addr_bytes), 775 ulp_deference_struct(eth_mask, src.addr_bytes), 776 ULP_PRSR_ACT_DEFAULT); 777 778 size = sizeof(((struct rte_flow_item_eth *)NULL)->type); 779 ulp_rte_prsr_fld_mask(params, &idx, size, 780 ulp_deference_struct(eth_spec, type), 781 ulp_deference_struct(eth_mask, type), 782 ULP_PRSR_ACT_MATCH_IGNORE); 783 784 /* Update the protocol hdr bitmap */ 785 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 786 BNXT_ULP_HDR_BIT_O_ETH) || 787 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 788 BNXT_ULP_HDR_BIT_O_IPV4) || 789 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 790 BNXT_ULP_HDR_BIT_O_IPV6) || 791 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 792 BNXT_ULP_HDR_BIT_O_UDP) || 793 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 794 BNXT_ULP_HDR_BIT_O_TCP)) { 795 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH); 796 inner_flag = 1; 797 } else { 798 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); 799 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID, 800 dmac_idx); 801 } 802 /* Update the field protocol hdr bitmap */ 803 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag); 804 805 return BNXT_TF_RC_SUCCESS; 806 } 807 808 /* Function to handle the parsing of RTE Flow item Vlan Header. */ 809 int32_t 810 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, 811 struct ulp_rte_parser_params *params) 812 { 813 const struct rte_flow_item_vlan *vlan_spec = item->spec; 814 const struct rte_flow_item_vlan *vlan_mask = item->mask; 815 struct ulp_rte_hdr_bitmap *hdr_bit; 816 uint32_t idx = 0; 817 uint16_t vlan_tag = 0, priority = 0; 818 uint16_t vlan_tag_mask = 0, priority_mask = 0; 819 uint32_t outer_vtag_num; 820 uint32_t inner_vtag_num; 821 uint16_t eth_type = 0; 822 uint32_t inner_flag = 0; 823 uint32_t size; 824 825 if (vlan_spec) { 826 vlan_tag = ntohs(vlan_spec->tci); 827 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT); 828 vlan_tag &= ULP_VLAN_TAG_MASK; 829 vlan_tag = htons(vlan_tag); 830 eth_type = vlan_spec->inner_type; 831 } 832 833 if (vlan_mask) { 834 vlan_tag_mask = ntohs(vlan_mask->tci); 835 priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT); 836 vlan_tag_mask &= 0xfff; 837 838 /* 839 * the storage for priority and vlan tag is 2 bytes 840 * The mask of priority which is 3 bits if it is all 1's 841 * then make the rest bits 13 bits as 1's 842 * so that it is matched as exact match. 843 */ 844 if (priority_mask == ULP_VLAN_PRIORITY_MASK) 845 priority_mask |= ~ULP_VLAN_PRIORITY_MASK; 846 if (vlan_tag_mask == ULP_VLAN_TAG_MASK) 847 vlan_tag_mask |= ~ULP_VLAN_TAG_MASK; 848 vlan_tag_mask = htons(vlan_tag_mask); 849 } 850 851 if (ulp_rte_prsr_fld_size_validate(params, &idx, 852 BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) { 853 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 854 return BNXT_TF_RC_ERROR; 855 } 856 857 /* 858 * Copy the rte_flow_item for vlan into hdr_field using Vlan 859 * header fields 860 */ 861 size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci); 862 /* 863 * The priority field is ignored since OVS is setting it as 864 * wild card match and it is not supported. This is a work 865 * around and shall be addressed in the future. 866 */ 867 ulp_rte_prsr_fld_mask(params, &idx, size, 868 &priority, 869 (vlan_mask) ? &priority_mask : NULL, 870 ULP_PRSR_ACT_MASK_IGNORE); 871 872 ulp_rte_prsr_fld_mask(params, &idx, size, 873 &vlan_tag, 874 (vlan_mask) ? &vlan_tag_mask : NULL, 875 ULP_PRSR_ACT_DEFAULT); 876 877 size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type); 878 ulp_rte_prsr_fld_mask(params, &idx, size, 879 ulp_deference_struct(vlan_spec, inner_type), 880 ulp_deference_struct(vlan_mask, inner_type), 881 ULP_PRSR_ACT_MATCH_IGNORE); 882 883 /* Get the outer tag and inner tag counts */ 884 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params, 885 BNXT_ULP_CF_IDX_O_VTAG_NUM); 886 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params, 887 BNXT_ULP_CF_IDX_I_VTAG_NUM); 888 889 /* Update the hdr_bitmap of the vlans */ 890 hdr_bit = ¶ms->hdr_bitmap; 891 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 892 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 893 !outer_vtag_num) { 894 /* Update the vlan tag num */ 895 outer_vtag_num++; 896 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 897 outer_vtag_num); 898 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0); 899 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1); 900 ULP_BITMAP_SET(params->hdr_bitmap.bits, 901 BNXT_ULP_HDR_BIT_OO_VLAN); 902 if (vlan_mask && vlan_tag_mask) 903 ULP_COMP_FLD_IDX_WR(params, 904 BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1); 905 906 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 907 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 908 outer_vtag_num == 1) { 909 /* update the vlan tag num */ 910 outer_vtag_num++; 911 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 912 outer_vtag_num); 913 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1); 914 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0); 915 ULP_BITMAP_SET(params->hdr_bitmap.bits, 916 BNXT_ULP_HDR_BIT_OI_VLAN); 917 if (vlan_mask && vlan_tag_mask) 918 ULP_COMP_FLD_IDX_WR(params, 919 BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1); 920 921 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 922 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 923 !inner_vtag_num) { 924 /* update the vlan tag num */ 925 inner_vtag_num++; 926 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 927 inner_vtag_num); 928 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0); 929 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1); 930 ULP_BITMAP_SET(params->hdr_bitmap.bits, 931 BNXT_ULP_HDR_BIT_IO_VLAN); 932 if (vlan_mask && vlan_tag_mask) 933 ULP_COMP_FLD_IDX_WR(params, 934 BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1); 935 inner_flag = 1; 936 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 937 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 938 inner_vtag_num == 1) { 939 /* update the vlan tag num */ 940 inner_vtag_num++; 941 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 942 inner_vtag_num); 943 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1); 944 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0); 945 ULP_BITMAP_SET(params->hdr_bitmap.bits, 946 BNXT_ULP_HDR_BIT_II_VLAN); 947 if (vlan_mask && vlan_tag_mask) 948 ULP_COMP_FLD_IDX_WR(params, 949 BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1); 950 inner_flag = 1; 951 } else { 952 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n"); 953 return BNXT_TF_RC_ERROR; 954 } 955 /* Update the field protocol hdr bitmap */ 956 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag); 957 return BNXT_TF_RC_SUCCESS; 958 } 959 960 /* Function to handle the update of proto header based on field values */ 961 static void 962 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param, 963 uint8_t proto, uint32_t in_flag) 964 { 965 if (proto == IPPROTO_UDP) { 966 if (in_flag) { 967 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 968 BNXT_ULP_HDR_BIT_I_UDP); 969 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); 970 } else { 971 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 972 BNXT_ULP_HDR_BIT_O_UDP); 973 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); 974 } 975 } else if (proto == IPPROTO_TCP) { 976 if (in_flag) { 977 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 978 BNXT_ULP_HDR_BIT_I_TCP); 979 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); 980 } else { 981 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 982 BNXT_ULP_HDR_BIT_O_TCP); 983 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); 984 } 985 } else if (proto == IPPROTO_GRE) { 986 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE); 987 } else if (proto == IPPROTO_ICMP) { 988 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN)) 989 ULP_BITMAP_SET(param->hdr_bitmap.bits, 990 BNXT_ULP_HDR_BIT_I_ICMP); 991 else 992 ULP_BITMAP_SET(param->hdr_bitmap.bits, 993 BNXT_ULP_HDR_BIT_O_ICMP); 994 } 995 if (proto) { 996 if (in_flag) { 997 ULP_COMP_FLD_IDX_WR(param, 998 BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, 999 1); 1000 ULP_COMP_FLD_IDX_WR(param, 1001 BNXT_ULP_CF_IDX_I_L3_PROTO_ID, 1002 proto); 1003 } else { 1004 ULP_COMP_FLD_IDX_WR(param, 1005 BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, 1006 1); 1007 ULP_COMP_FLD_IDX_WR(param, 1008 BNXT_ULP_CF_IDX_O_L3_PROTO_ID, 1009 proto); 1010 } 1011 } 1012 } 1013 1014 /* Function to handle the parsing of RTE Flow item IPV4 Header. */ 1015 int32_t 1016 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, 1017 struct ulp_rte_parser_params *params) 1018 { 1019 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec; 1020 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask; 1021 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1022 uint32_t idx = 0, dip_idx = 0; 1023 uint32_t size; 1024 uint8_t proto = 0; 1025 uint32_t inner_flag = 0; 1026 uint32_t cnt; 1027 1028 /* validate there are no 3rd L3 header */ 1029 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); 1030 if (cnt == 2) { 1031 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n"); 1032 return BNXT_TF_RC_ERROR; 1033 } 1034 1035 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1036 BNXT_ULP_PROTO_HDR_IPV4_NUM)) { 1037 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1038 return BNXT_TF_RC_ERROR; 1039 } 1040 1041 /* 1042 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1043 * header fields 1044 */ 1045 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl); 1046 ulp_rte_prsr_fld_mask(params, &idx, size, 1047 ulp_deference_struct(ipv4_spec, hdr.version_ihl), 1048 ulp_deference_struct(ipv4_mask, hdr.version_ihl), 1049 ULP_PRSR_ACT_DEFAULT); 1050 1051 /* 1052 * The tos field is ignored since OVS is setting it as wild card 1053 * match and it is not supported. This is a work around and 1054 * shall be addressed in the future. 1055 */ 1056 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service); 1057 ulp_rte_prsr_fld_mask(params, &idx, size, 1058 ulp_deference_struct(ipv4_spec, 1059 hdr.type_of_service), 1060 ulp_deference_struct(ipv4_mask, 1061 hdr.type_of_service), 1062 ULP_PRSR_ACT_MASK_IGNORE); 1063 1064 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length); 1065 ulp_rte_prsr_fld_mask(params, &idx, size, 1066 ulp_deference_struct(ipv4_spec, hdr.total_length), 1067 ulp_deference_struct(ipv4_mask, hdr.total_length), 1068 ULP_PRSR_ACT_DEFAULT); 1069 1070 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id); 1071 ulp_rte_prsr_fld_mask(params, &idx, size, 1072 ulp_deference_struct(ipv4_spec, hdr.packet_id), 1073 ulp_deference_struct(ipv4_mask, hdr.packet_id), 1074 ULP_PRSR_ACT_DEFAULT); 1075 1076 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset); 1077 ulp_rte_prsr_fld_mask(params, &idx, size, 1078 ulp_deference_struct(ipv4_spec, 1079 hdr.fragment_offset), 1080 ulp_deference_struct(ipv4_mask, 1081 hdr.fragment_offset), 1082 ULP_PRSR_ACT_DEFAULT); 1083 1084 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live); 1085 ulp_rte_prsr_fld_mask(params, &idx, size, 1086 ulp_deference_struct(ipv4_spec, hdr.time_to_live), 1087 ulp_deference_struct(ipv4_mask, hdr.time_to_live), 1088 ULP_PRSR_ACT_DEFAULT); 1089 1090 /* Ignore proto for matching templates */ 1091 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id); 1092 ulp_rte_prsr_fld_mask(params, &idx, size, 1093 ulp_deference_struct(ipv4_spec, 1094 hdr.next_proto_id), 1095 ulp_deference_struct(ipv4_mask, 1096 hdr.next_proto_id), 1097 ULP_PRSR_ACT_MATCH_IGNORE); 1098 if (ipv4_spec) 1099 proto = ipv4_spec->hdr.next_proto_id; 1100 1101 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum); 1102 ulp_rte_prsr_fld_mask(params, &idx, size, 1103 ulp_deference_struct(ipv4_spec, hdr.hdr_checksum), 1104 ulp_deference_struct(ipv4_mask, hdr.hdr_checksum), 1105 ULP_PRSR_ACT_DEFAULT); 1106 1107 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr); 1108 ulp_rte_prsr_fld_mask(params, &idx, size, 1109 ulp_deference_struct(ipv4_spec, hdr.src_addr), 1110 ulp_deference_struct(ipv4_mask, hdr.src_addr), 1111 ULP_PRSR_ACT_DEFAULT); 1112 1113 dip_idx = idx; 1114 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr); 1115 ulp_rte_prsr_fld_mask(params, &idx, size, 1116 ulp_deference_struct(ipv4_spec, hdr.dst_addr), 1117 ulp_deference_struct(ipv4_mask, hdr.dst_addr), 1118 ULP_PRSR_ACT_DEFAULT); 1119 1120 /* Set the ipv4 header bitmap and computed l3 header bitmaps */ 1121 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 1122 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { 1123 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4); 1124 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); 1125 inner_flag = 1; 1126 } else { 1127 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4); 1128 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); 1129 /* Update the tunnel offload dest ip offset */ 1130 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID, 1131 dip_idx); 1132 } 1133 1134 /* Some of the PMD applications may set the protocol field 1135 * in the IPv4 spec but don't set the mask. So, consider 1136 * the mask in the proto value calculation. 1137 */ 1138 if (ipv4_mask) 1139 proto &= ipv4_mask->hdr.next_proto_id; 1140 1141 /* Update the field protocol hdr bitmap */ 1142 ulp_rte_l3_proto_type_update(params, proto, inner_flag); 1143 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); 1144 return BNXT_TF_RC_SUCCESS; 1145 } 1146 1147 /* Function to handle the parsing of RTE Flow item IPV6 Header */ 1148 int32_t 1149 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, 1150 struct ulp_rte_parser_params *params) 1151 { 1152 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec; 1153 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask; 1154 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1155 uint32_t idx = 0, dip_idx = 0; 1156 uint32_t size; 1157 uint32_t ver_spec = 0, ver_mask = 0; 1158 uint32_t tc_spec = 0, tc_mask = 0; 1159 uint32_t lab_spec = 0, lab_mask = 0; 1160 uint8_t proto = 0; 1161 uint32_t inner_flag = 0; 1162 uint32_t cnt; 1163 1164 /* validate there are no 3rd L3 header */ 1165 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); 1166 if (cnt == 2) { 1167 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n"); 1168 return BNXT_TF_RC_ERROR; 1169 } 1170 1171 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1172 BNXT_ULP_PROTO_HDR_IPV6_NUM)) { 1173 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1174 return BNXT_TF_RC_ERROR; 1175 } 1176 1177 /* 1178 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6 1179 * header fields 1180 */ 1181 if (ipv6_spec) { 1182 ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow); 1183 tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow); 1184 lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow); 1185 proto = ipv6_spec->hdr.proto; 1186 } 1187 1188 if (ipv6_mask) { 1189 ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow); 1190 tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow); 1191 lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow); 1192 1193 /* Some of the PMD applications may set the protocol field 1194 * in the IPv6 spec but don't set the mask. So, consider 1195 * the mask in proto value calculation. 1196 */ 1197 proto &= ipv6_mask->hdr.proto; 1198 } 1199 1200 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow); 1201 ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask, 1202 ULP_PRSR_ACT_DEFAULT); 1203 /* 1204 * The TC and flow label field are ignored since OVS is 1205 * setting it for match and it is not supported. 1206 * This is a work around and 1207 * shall be addressed in the future. 1208 */ 1209 ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask, 1210 ULP_PRSR_ACT_MASK_IGNORE); 1211 ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask, 1212 ULP_PRSR_ACT_MASK_IGNORE); 1213 1214 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len); 1215 ulp_rte_prsr_fld_mask(params, &idx, size, 1216 ulp_deference_struct(ipv6_spec, hdr.payload_len), 1217 ulp_deference_struct(ipv6_mask, hdr.payload_len), 1218 ULP_PRSR_ACT_DEFAULT); 1219 1220 /* Ignore proto for template matching */ 1221 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto); 1222 ulp_rte_prsr_fld_mask(params, &idx, size, 1223 ulp_deference_struct(ipv6_spec, hdr.proto), 1224 ulp_deference_struct(ipv6_mask, hdr.proto), 1225 ULP_PRSR_ACT_MATCH_IGNORE); 1226 1227 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits); 1228 ulp_rte_prsr_fld_mask(params, &idx, size, 1229 ulp_deference_struct(ipv6_spec, hdr.hop_limits), 1230 ulp_deference_struct(ipv6_mask, hdr.hop_limits), 1231 ULP_PRSR_ACT_DEFAULT); 1232 1233 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr); 1234 ulp_rte_prsr_fld_mask(params, &idx, size, 1235 ulp_deference_struct(ipv6_spec, hdr.src_addr), 1236 ulp_deference_struct(ipv6_mask, hdr.src_addr), 1237 ULP_PRSR_ACT_DEFAULT); 1238 1239 dip_idx = idx; 1240 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr); 1241 ulp_rte_prsr_fld_mask(params, &idx, size, 1242 ulp_deference_struct(ipv6_spec, hdr.dst_addr), 1243 ulp_deference_struct(ipv6_mask, hdr.dst_addr), 1244 ULP_PRSR_ACT_DEFAULT); 1245 1246 /* Set the ipv6 header bitmap and computed l3 header bitmaps */ 1247 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 1248 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { 1249 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6); 1250 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); 1251 inner_flag = 1; 1252 } else { 1253 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6); 1254 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); 1255 /* Update the tunnel offload dest ip offset */ 1256 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID, 1257 dip_idx); 1258 } 1259 1260 /* Update the field protocol hdr bitmap */ 1261 ulp_rte_l3_proto_type_update(params, proto, inner_flag); 1262 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); 1263 1264 return BNXT_TF_RC_SUCCESS; 1265 } 1266 1267 /* Function to handle the update of proto header based on field values */ 1268 static void 1269 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params, 1270 uint16_t src_port, uint16_t src_mask, 1271 uint16_t dst_port, uint16_t dst_mask, 1272 enum bnxt_ulp_hdr_bit hdr_bit) 1273 { 1274 switch (hdr_bit) { 1275 case BNXT_ULP_HDR_BIT_I_UDP: 1276 case BNXT_ULP_HDR_BIT_I_TCP: 1277 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); 1278 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1); 1279 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT, 1280 (uint64_t)rte_be_to_cpu_16(src_port)); 1281 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT, 1282 (uint64_t)rte_be_to_cpu_16(dst_port)); 1283 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK, 1284 (uint64_t)rte_be_to_cpu_16(src_mask)); 1285 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK, 1286 (uint64_t)rte_be_to_cpu_16(dst_mask)); 1287 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, 1288 1); 1289 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT, 1290 !!(src_port & src_mask)); 1291 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT, 1292 !!(dst_port & dst_mask)); 1293 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID, 1294 (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ? 1295 IPPROTO_UDP : IPPROTO_TCP); 1296 break; 1297 case BNXT_ULP_HDR_BIT_O_UDP: 1298 case BNXT_ULP_HDR_BIT_O_TCP: 1299 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); 1300 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1); 1301 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT, 1302 (uint64_t)rte_be_to_cpu_16(src_port)); 1303 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT, 1304 (uint64_t)rte_be_to_cpu_16(dst_port)); 1305 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK, 1306 (uint64_t)rte_be_to_cpu_16(src_mask)); 1307 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK, 1308 (uint64_t)rte_be_to_cpu_16(dst_mask)); 1309 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, 1310 1); 1311 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT, 1312 !!(src_port & src_mask)); 1313 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT, 1314 !!(dst_port & dst_mask)); 1315 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID, 1316 (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ? 1317 IPPROTO_UDP : IPPROTO_TCP); 1318 break; 1319 default: 1320 break; 1321 } 1322 1323 if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port == 1324 tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) { 1325 ULP_BITMAP_SET(params->hdr_fp_bit.bits, 1326 BNXT_ULP_HDR_BIT_T_VXLAN); 1327 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 1328 } 1329 } 1330 1331 /* Function to handle the parsing of RTE Flow item UDP Header. */ 1332 int32_t 1333 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, 1334 struct ulp_rte_parser_params *params) 1335 { 1336 const struct rte_flow_item_udp *udp_spec = item->spec; 1337 const struct rte_flow_item_udp *udp_mask = item->mask; 1338 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1339 uint32_t idx = 0; 1340 uint32_t size; 1341 uint16_t dport = 0, sport = 0; 1342 uint16_t dport_mask = 0, sport_mask = 0; 1343 uint32_t cnt; 1344 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP; 1345 1346 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 1347 if (cnt == 2) { 1348 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 1349 return BNXT_TF_RC_ERROR; 1350 } 1351 1352 if (udp_spec) { 1353 sport = udp_spec->hdr.src_port; 1354 dport = udp_spec->hdr.dst_port; 1355 } 1356 if (udp_mask) { 1357 sport_mask = udp_mask->hdr.src_port; 1358 dport_mask = udp_mask->hdr.dst_port; 1359 } 1360 1361 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1362 BNXT_ULP_PROTO_HDR_UDP_NUM)) { 1363 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1364 return BNXT_TF_RC_ERROR; 1365 } 1366 1367 /* 1368 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1369 * header fields 1370 */ 1371 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port); 1372 ulp_rte_prsr_fld_mask(params, &idx, size, 1373 ulp_deference_struct(udp_spec, hdr.src_port), 1374 ulp_deference_struct(udp_mask, hdr.src_port), 1375 ULP_PRSR_ACT_DEFAULT); 1376 1377 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port); 1378 ulp_rte_prsr_fld_mask(params, &idx, size, 1379 ulp_deference_struct(udp_spec, hdr.dst_port), 1380 ulp_deference_struct(udp_mask, hdr.dst_port), 1381 ULP_PRSR_ACT_DEFAULT); 1382 1383 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len); 1384 ulp_rte_prsr_fld_mask(params, &idx, size, 1385 ulp_deference_struct(udp_spec, hdr.dgram_len), 1386 ulp_deference_struct(udp_mask, hdr.dgram_len), 1387 ULP_PRSR_ACT_DEFAULT); 1388 1389 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum); 1390 ulp_rte_prsr_fld_mask(params, &idx, size, 1391 ulp_deference_struct(udp_spec, hdr.dgram_cksum), 1392 ulp_deference_struct(udp_mask, hdr.dgram_cksum), 1393 ULP_PRSR_ACT_DEFAULT); 1394 1395 /* Set the udp header bitmap and computed l4 header bitmaps */ 1396 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 1397 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) 1398 out_l4 = BNXT_ULP_HDR_BIT_I_UDP; 1399 1400 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport, 1401 dport_mask, out_l4); 1402 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); 1403 return BNXT_TF_RC_SUCCESS; 1404 } 1405 1406 /* Function to handle the parsing of RTE Flow item TCP Header. */ 1407 int32_t 1408 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, 1409 struct ulp_rte_parser_params *params) 1410 { 1411 const struct rte_flow_item_tcp *tcp_spec = item->spec; 1412 const struct rte_flow_item_tcp *tcp_mask = item->mask; 1413 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1414 uint32_t idx = 0; 1415 uint16_t dport = 0, sport = 0; 1416 uint16_t dport_mask = 0, sport_mask = 0; 1417 uint32_t size; 1418 uint32_t cnt; 1419 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP; 1420 1421 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 1422 if (cnt == 2) { 1423 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 1424 return BNXT_TF_RC_ERROR; 1425 } 1426 1427 if (tcp_spec) { 1428 sport = tcp_spec->hdr.src_port; 1429 dport = tcp_spec->hdr.dst_port; 1430 } 1431 if (tcp_mask) { 1432 sport_mask = tcp_mask->hdr.src_port; 1433 dport_mask = tcp_mask->hdr.dst_port; 1434 } 1435 1436 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1437 BNXT_ULP_PROTO_HDR_TCP_NUM)) { 1438 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1439 return BNXT_TF_RC_ERROR; 1440 } 1441 1442 /* 1443 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1444 * header fields 1445 */ 1446 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port); 1447 ulp_rte_prsr_fld_mask(params, &idx, size, 1448 ulp_deference_struct(tcp_spec, hdr.src_port), 1449 ulp_deference_struct(tcp_mask, hdr.src_port), 1450 ULP_PRSR_ACT_DEFAULT); 1451 1452 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port); 1453 ulp_rte_prsr_fld_mask(params, &idx, size, 1454 ulp_deference_struct(tcp_spec, hdr.dst_port), 1455 ulp_deference_struct(tcp_mask, hdr.dst_port), 1456 ULP_PRSR_ACT_DEFAULT); 1457 1458 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq); 1459 ulp_rte_prsr_fld_mask(params, &idx, size, 1460 ulp_deference_struct(tcp_spec, hdr.sent_seq), 1461 ulp_deference_struct(tcp_mask, hdr.sent_seq), 1462 ULP_PRSR_ACT_DEFAULT); 1463 1464 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack); 1465 ulp_rte_prsr_fld_mask(params, &idx, size, 1466 ulp_deference_struct(tcp_spec, hdr.recv_ack), 1467 ulp_deference_struct(tcp_mask, hdr.recv_ack), 1468 ULP_PRSR_ACT_DEFAULT); 1469 1470 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off); 1471 ulp_rte_prsr_fld_mask(params, &idx, size, 1472 ulp_deference_struct(tcp_spec, hdr.data_off), 1473 ulp_deference_struct(tcp_mask, hdr.data_off), 1474 ULP_PRSR_ACT_DEFAULT); 1475 1476 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags); 1477 ulp_rte_prsr_fld_mask(params, &idx, size, 1478 ulp_deference_struct(tcp_spec, hdr.tcp_flags), 1479 ulp_deference_struct(tcp_mask, hdr.tcp_flags), 1480 ULP_PRSR_ACT_DEFAULT); 1481 1482 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win); 1483 ulp_rte_prsr_fld_mask(params, &idx, size, 1484 ulp_deference_struct(tcp_spec, hdr.rx_win), 1485 ulp_deference_struct(tcp_mask, hdr.rx_win), 1486 ULP_PRSR_ACT_DEFAULT); 1487 1488 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum); 1489 ulp_rte_prsr_fld_mask(params, &idx, size, 1490 ulp_deference_struct(tcp_spec, hdr.cksum), 1491 ulp_deference_struct(tcp_mask, hdr.cksum), 1492 ULP_PRSR_ACT_DEFAULT); 1493 1494 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp); 1495 ulp_rte_prsr_fld_mask(params, &idx, size, 1496 ulp_deference_struct(tcp_spec, hdr.tcp_urp), 1497 ulp_deference_struct(tcp_mask, hdr.tcp_urp), 1498 ULP_PRSR_ACT_DEFAULT); 1499 1500 /* Set the udp header bitmap and computed l4 header bitmaps */ 1501 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 1502 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) 1503 out_l4 = BNXT_ULP_HDR_BIT_I_TCP; 1504 1505 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport, 1506 dport_mask, out_l4); 1507 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); 1508 return BNXT_TF_RC_SUCCESS; 1509 } 1510 1511 /* Function to handle the parsing of RTE Flow item Vxlan Header. */ 1512 int32_t 1513 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item, 1514 struct ulp_rte_parser_params *params) 1515 { 1516 const struct rte_flow_item_vxlan *vxlan_spec = item->spec; 1517 const struct rte_flow_item_vxlan *vxlan_mask = item->mask; 1518 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1519 uint32_t idx = 0; 1520 uint32_t size; 1521 1522 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1523 BNXT_ULP_PROTO_HDR_VXLAN_NUM)) { 1524 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1525 return BNXT_TF_RC_ERROR; 1526 } 1527 1528 /* 1529 * Copy the rte_flow_item for vxlan into hdr_field using vxlan 1530 * header fields 1531 */ 1532 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags); 1533 ulp_rte_prsr_fld_mask(params, &idx, size, 1534 ulp_deference_struct(vxlan_spec, flags), 1535 ulp_deference_struct(vxlan_mask, flags), 1536 ULP_PRSR_ACT_DEFAULT); 1537 1538 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0); 1539 ulp_rte_prsr_fld_mask(params, &idx, size, 1540 ulp_deference_struct(vxlan_spec, rsvd0), 1541 ulp_deference_struct(vxlan_mask, rsvd0), 1542 ULP_PRSR_ACT_DEFAULT); 1543 1544 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni); 1545 ulp_rte_prsr_fld_mask(params, &idx, size, 1546 ulp_deference_struct(vxlan_spec, vni), 1547 ulp_deference_struct(vxlan_mask, vni), 1548 ULP_PRSR_ACT_DEFAULT); 1549 1550 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1); 1551 ulp_rte_prsr_fld_mask(params, &idx, size, 1552 ulp_deference_struct(vxlan_spec, rsvd1), 1553 ulp_deference_struct(vxlan_mask, rsvd1), 1554 ULP_PRSR_ACT_DEFAULT); 1555 1556 /* Update the hdr_bitmap with vxlan */ 1557 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN); 1558 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 1559 return BNXT_TF_RC_SUCCESS; 1560 } 1561 1562 /* Function to handle the parsing of RTE Flow item GRE Header. */ 1563 int32_t 1564 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item, 1565 struct ulp_rte_parser_params *params) 1566 { 1567 const struct rte_flow_item_gre *gre_spec = item->spec; 1568 const struct rte_flow_item_gre *gre_mask = item->mask; 1569 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1570 uint32_t idx = 0; 1571 uint32_t size; 1572 1573 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1574 BNXT_ULP_PROTO_HDR_GRE_NUM)) { 1575 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1576 return BNXT_TF_RC_ERROR; 1577 } 1578 1579 size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver); 1580 ulp_rte_prsr_fld_mask(params, &idx, size, 1581 ulp_deference_struct(gre_spec, c_rsvd0_ver), 1582 ulp_deference_struct(gre_mask, c_rsvd0_ver), 1583 ULP_PRSR_ACT_DEFAULT); 1584 1585 size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol); 1586 ulp_rte_prsr_fld_mask(params, &idx, size, 1587 ulp_deference_struct(gre_spec, protocol), 1588 ulp_deference_struct(gre_mask, protocol), 1589 ULP_PRSR_ACT_DEFAULT); 1590 1591 /* Update the hdr_bitmap with GRE */ 1592 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE); 1593 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 1594 return BNXT_TF_RC_SUCCESS; 1595 } 1596 1597 /* Function to handle the parsing of RTE Flow item ANY. */ 1598 int32_t 1599 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused, 1600 struct ulp_rte_parser_params *params __rte_unused) 1601 { 1602 return BNXT_TF_RC_SUCCESS; 1603 } 1604 1605 /* Function to handle the parsing of RTE Flow item ICMP Header. */ 1606 int32_t 1607 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item, 1608 struct ulp_rte_parser_params *params) 1609 { 1610 const struct rte_flow_item_icmp *icmp_spec = item->spec; 1611 const struct rte_flow_item_icmp *icmp_mask = item->mask; 1612 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1613 uint32_t idx = 0; 1614 uint32_t size; 1615 1616 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1617 BNXT_ULP_PROTO_HDR_ICMP_NUM)) { 1618 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1619 return BNXT_TF_RC_ERROR; 1620 } 1621 1622 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type); 1623 ulp_rte_prsr_fld_mask(params, &idx, size, 1624 ulp_deference_struct(icmp_spec, hdr.icmp_type), 1625 ulp_deference_struct(icmp_mask, hdr.icmp_type), 1626 ULP_PRSR_ACT_DEFAULT); 1627 1628 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code); 1629 ulp_rte_prsr_fld_mask(params, &idx, size, 1630 ulp_deference_struct(icmp_spec, hdr.icmp_code), 1631 ulp_deference_struct(icmp_mask, hdr.icmp_code), 1632 ULP_PRSR_ACT_DEFAULT); 1633 1634 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum); 1635 ulp_rte_prsr_fld_mask(params, &idx, size, 1636 ulp_deference_struct(icmp_spec, hdr.icmp_cksum), 1637 ulp_deference_struct(icmp_mask, hdr.icmp_cksum), 1638 ULP_PRSR_ACT_DEFAULT); 1639 1640 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident); 1641 ulp_rte_prsr_fld_mask(params, &idx, size, 1642 ulp_deference_struct(icmp_spec, hdr.icmp_ident), 1643 ulp_deference_struct(icmp_mask, hdr.icmp_ident), 1644 ULP_PRSR_ACT_DEFAULT); 1645 1646 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb); 1647 ulp_rte_prsr_fld_mask(params, &idx, size, 1648 ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb), 1649 ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb), 1650 ULP_PRSR_ACT_DEFAULT); 1651 1652 /* Update the hdr_bitmap with ICMP */ 1653 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) 1654 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP); 1655 else 1656 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP); 1657 return BNXT_TF_RC_SUCCESS; 1658 } 1659 1660 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */ 1661 int32_t 1662 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item, 1663 struct ulp_rte_parser_params *params) 1664 { 1665 const struct rte_flow_item_icmp6 *icmp_spec = item->spec; 1666 const struct rte_flow_item_icmp6 *icmp_mask = item->mask; 1667 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1668 uint32_t idx = 0; 1669 uint32_t size; 1670 1671 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1672 BNXT_ULP_PROTO_HDR_ICMP_NUM)) { 1673 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1674 return BNXT_TF_RC_ERROR; 1675 } 1676 1677 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type); 1678 ulp_rte_prsr_fld_mask(params, &idx, size, 1679 ulp_deference_struct(icmp_spec, type), 1680 ulp_deference_struct(icmp_mask, type), 1681 ULP_PRSR_ACT_DEFAULT); 1682 1683 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code); 1684 ulp_rte_prsr_fld_mask(params, &idx, size, 1685 ulp_deference_struct(icmp_spec, code), 1686 ulp_deference_struct(icmp_mask, code), 1687 ULP_PRSR_ACT_DEFAULT); 1688 1689 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum); 1690 ulp_rte_prsr_fld_mask(params, &idx, size, 1691 ulp_deference_struct(icmp_spec, checksum), 1692 ulp_deference_struct(icmp_mask, checksum), 1693 ULP_PRSR_ACT_DEFAULT); 1694 1695 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) { 1696 BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n"); 1697 return BNXT_TF_RC_ERROR; 1698 } 1699 1700 /* Update the hdr_bitmap with ICMP */ 1701 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) 1702 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP); 1703 else 1704 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP); 1705 return BNXT_TF_RC_SUCCESS; 1706 } 1707 1708 /* Function to handle the parsing of RTE Flow item void Header */ 1709 int32_t 1710 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused, 1711 struct ulp_rte_parser_params *params __rte_unused) 1712 { 1713 return BNXT_TF_RC_SUCCESS; 1714 } 1715 1716 /* Function to handle the parsing of RTE Flow action void Header. */ 1717 int32_t 1718 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused, 1719 struct ulp_rte_parser_params *params __rte_unused) 1720 { 1721 return BNXT_TF_RC_SUCCESS; 1722 } 1723 1724 /* Function to handle the parsing of RTE Flow action Mark Header. */ 1725 int32_t 1726 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item, 1727 struct ulp_rte_parser_params *param) 1728 { 1729 const struct rte_flow_action_mark *mark; 1730 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap; 1731 uint32_t mark_id; 1732 1733 mark = action_item->conf; 1734 if (mark) { 1735 mark_id = tfp_cpu_to_be_32(mark->id); 1736 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK], 1737 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK); 1738 1739 /* Update the hdr_bitmap with vxlan */ 1740 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK); 1741 return BNXT_TF_RC_SUCCESS; 1742 } 1743 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n"); 1744 return BNXT_TF_RC_ERROR; 1745 } 1746 1747 /* Function to handle the parsing of RTE Flow action RSS Header. */ 1748 int32_t 1749 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item, 1750 struct ulp_rte_parser_params *param) 1751 { 1752 const struct rte_flow_action_rss *rss; 1753 struct ulp_rte_act_prop *ap = ¶m->act_prop; 1754 1755 if (action_item == NULL || action_item->conf == NULL) { 1756 BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n"); 1757 return BNXT_TF_RC_ERROR; 1758 } 1759 1760 rss = action_item->conf; 1761 /* Copy the rss into the specific action properties */ 1762 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types, 1763 BNXT_ULP_ACT_PROP_SZ_RSS_TYPES); 1764 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level, 1765 BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL); 1766 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN], 1767 &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN); 1768 1769 if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) { 1770 BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n"); 1771 return BNXT_TF_RC_ERROR; 1772 } 1773 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key, 1774 rss->key_len); 1775 1776 /* set the RSS action header bit */ 1777 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS); 1778 1779 return BNXT_TF_RC_SUCCESS; 1780 } 1781 1782 /* Function to handle the parsing of RTE Flow item eth Header. */ 1783 static void 1784 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params, 1785 const struct rte_flow_item_eth *eth_spec) 1786 { 1787 struct ulp_rte_hdr_field *field; 1788 uint32_t size; 1789 1790 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC]; 1791 size = sizeof(eth_spec->dst.addr_bytes); 1792 field = ulp_rte_parser_fld_copy(field, eth_spec->dst.addr_bytes, size); 1793 1794 size = sizeof(eth_spec->src.addr_bytes); 1795 field = ulp_rte_parser_fld_copy(field, eth_spec->src.addr_bytes, size); 1796 1797 size = sizeof(eth_spec->type); 1798 field = ulp_rte_parser_fld_copy(field, ð_spec->type, size); 1799 1800 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); 1801 } 1802 1803 /* Function to handle the parsing of RTE Flow item vlan Header. */ 1804 static void 1805 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params, 1806 const struct rte_flow_item_vlan *vlan_spec, 1807 uint32_t inner) 1808 { 1809 struct ulp_rte_hdr_field *field; 1810 uint32_t size; 1811 1812 if (!inner) { 1813 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI]; 1814 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, 1815 BNXT_ULP_HDR_BIT_OO_VLAN); 1816 } else { 1817 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI]; 1818 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, 1819 BNXT_ULP_HDR_BIT_OI_VLAN); 1820 } 1821 1822 size = sizeof(vlan_spec->tci); 1823 field = ulp_rte_parser_fld_copy(field, &vlan_spec->tci, size); 1824 1825 size = sizeof(vlan_spec->inner_type); 1826 field = ulp_rte_parser_fld_copy(field, &vlan_spec->inner_type, size); 1827 } 1828 1829 /* Function to handle the parsing of RTE Flow item ipv4 Header. */ 1830 static void 1831 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params, 1832 const struct rte_flow_item_ipv4 *ip) 1833 { 1834 struct ulp_rte_hdr_field *field; 1835 uint32_t size; 1836 uint8_t val8; 1837 1838 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL]; 1839 size = sizeof(ip->hdr.version_ihl); 1840 if (!ip->hdr.version_ihl) 1841 val8 = RTE_IPV4_VHL_DEF; 1842 else 1843 val8 = ip->hdr.version_ihl; 1844 field = ulp_rte_parser_fld_copy(field, &val8, size); 1845 1846 size = sizeof(ip->hdr.type_of_service); 1847 field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size); 1848 1849 size = sizeof(ip->hdr.packet_id); 1850 field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size); 1851 1852 size = sizeof(ip->hdr.fragment_offset); 1853 field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size); 1854 1855 size = sizeof(ip->hdr.time_to_live); 1856 if (!ip->hdr.time_to_live) 1857 val8 = BNXT_ULP_DEFAULT_TTL; 1858 else 1859 val8 = ip->hdr.time_to_live; 1860 field = ulp_rte_parser_fld_copy(field, &val8, size); 1861 1862 size = sizeof(ip->hdr.next_proto_id); 1863 field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size); 1864 1865 size = sizeof(ip->hdr.src_addr); 1866 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size); 1867 1868 size = sizeof(ip->hdr.dst_addr); 1869 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size); 1870 1871 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4); 1872 } 1873 1874 /* Function to handle the parsing of RTE Flow item ipv6 Header. */ 1875 static void 1876 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params, 1877 const struct rte_flow_item_ipv6 *ip) 1878 { 1879 struct ulp_rte_hdr_field *field; 1880 uint32_t size; 1881 uint32_t val32; 1882 uint8_t val8; 1883 1884 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW]; 1885 size = sizeof(ip->hdr.vtc_flow); 1886 if (!ip->hdr.vtc_flow) 1887 val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER); 1888 else 1889 val32 = ip->hdr.vtc_flow; 1890 field = ulp_rte_parser_fld_copy(field, &val32, size); 1891 1892 size = sizeof(ip->hdr.proto); 1893 field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size); 1894 1895 size = sizeof(ip->hdr.hop_limits); 1896 if (!ip->hdr.hop_limits) 1897 val8 = BNXT_ULP_DEFAULT_TTL; 1898 else 1899 val8 = ip->hdr.hop_limits; 1900 field = ulp_rte_parser_fld_copy(field, &val8, size); 1901 1902 size = sizeof(ip->hdr.src_addr); 1903 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size); 1904 1905 size = sizeof(ip->hdr.dst_addr); 1906 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size); 1907 1908 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6); 1909 } 1910 1911 /* Function to handle the parsing of RTE Flow item UDP Header. */ 1912 static void 1913 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params, 1914 const struct rte_flow_item_udp *udp_spec) 1915 { 1916 struct ulp_rte_hdr_field *field; 1917 uint32_t size; 1918 uint8_t type = IPPROTO_UDP; 1919 1920 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT]; 1921 size = sizeof(udp_spec->hdr.src_port); 1922 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size); 1923 1924 size = sizeof(udp_spec->hdr.dst_port); 1925 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size); 1926 1927 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP); 1928 1929 /* Update thhe ip header protocol */ 1930 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO]; 1931 ulp_rte_parser_fld_copy(field, &type, sizeof(type)); 1932 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO]; 1933 ulp_rte_parser_fld_copy(field, &type, sizeof(type)); 1934 } 1935 1936 /* Function to handle the parsing of RTE Flow item vxlan Header. */ 1937 static void 1938 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params, 1939 struct rte_flow_item_vxlan *vxlan_spec) 1940 { 1941 struct ulp_rte_hdr_field *field; 1942 uint32_t size; 1943 1944 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS]; 1945 size = sizeof(vxlan_spec->flags); 1946 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->flags, size); 1947 1948 size = sizeof(vxlan_spec->rsvd0); 1949 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd0, size); 1950 1951 size = sizeof(vxlan_spec->vni); 1952 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->vni, size); 1953 1954 size = sizeof(vxlan_spec->rsvd1); 1955 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd1, size); 1956 1957 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN); 1958 } 1959 1960 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */ 1961 int32_t 1962 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, 1963 struct ulp_rte_parser_params *params) 1964 { 1965 const struct rte_flow_action_vxlan_encap *vxlan_encap; 1966 const struct rte_flow_item *item; 1967 const struct rte_flow_item_ipv4 *ipv4_spec; 1968 const struct rte_flow_item_ipv6 *ipv6_spec; 1969 struct rte_flow_item_vxlan vxlan_spec; 1970 uint32_t vlan_num = 0, vlan_size = 0; 1971 uint32_t ip_size = 0, ip_type = 0; 1972 uint32_t vxlan_size = 0; 1973 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap; 1974 struct ulp_rte_act_prop *ap = ¶ms->act_prop; 1975 1976 vxlan_encap = action_item->conf; 1977 if (!vxlan_encap) { 1978 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n"); 1979 return BNXT_TF_RC_ERROR; 1980 } 1981 1982 item = vxlan_encap->definition; 1983 if (!item) { 1984 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n"); 1985 return BNXT_TF_RC_ERROR; 1986 } 1987 1988 if (!ulp_rte_item_skip_void(&item, 0)) 1989 return BNXT_TF_RC_ERROR; 1990 1991 /* must have ethernet header */ 1992 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { 1993 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n"); 1994 return BNXT_TF_RC_ERROR; 1995 } 1996 1997 /* Parse the ethernet header */ 1998 if (item->spec) 1999 ulp_rte_enc_eth_hdr_handler(params, item->spec); 2000 2001 /* Goto the next item */ 2002 if (!ulp_rte_item_skip_void(&item, 1)) 2003 return BNXT_TF_RC_ERROR; 2004 2005 /* May have vlan header */ 2006 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2007 vlan_num++; 2008 if (item->spec) 2009 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0); 2010 2011 if (!ulp_rte_item_skip_void(&item, 1)) 2012 return BNXT_TF_RC_ERROR; 2013 } 2014 2015 /* may have two vlan headers */ 2016 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2017 vlan_num++; 2018 if (item->spec) 2019 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1); 2020 2021 if (!ulp_rte_item_skip_void(&item, 1)) 2022 return BNXT_TF_RC_ERROR; 2023 } 2024 2025 /* Update the vlan count and size of more than one */ 2026 if (vlan_num) { 2027 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan); 2028 vlan_num = tfp_cpu_to_be_32(vlan_num); 2029 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM], 2030 &vlan_num, 2031 sizeof(uint32_t)); 2032 vlan_size = tfp_cpu_to_be_32(vlan_size); 2033 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ], 2034 &vlan_size, 2035 sizeof(uint32_t)); 2036 } 2037 2038 /* L3 must be IPv4, IPv6 */ 2039 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { 2040 ipv4_spec = item->spec; 2041 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE; 2042 2043 /* Update the ip size details */ 2044 ip_size = tfp_cpu_to_be_32(ip_size); 2045 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 2046 &ip_size, sizeof(uint32_t)); 2047 2048 /* update the ip type */ 2049 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4); 2050 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 2051 &ip_type, sizeof(uint32_t)); 2052 2053 /* update the computed field to notify it is ipv4 header */ 2054 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG, 2055 1); 2056 if (ipv4_spec) 2057 ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec); 2058 2059 if (!ulp_rte_item_skip_void(&item, 1)) 2060 return BNXT_TF_RC_ERROR; 2061 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { 2062 ipv6_spec = item->spec; 2063 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE; 2064 2065 /* Update the ip size details */ 2066 ip_size = tfp_cpu_to_be_32(ip_size); 2067 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 2068 &ip_size, sizeof(uint32_t)); 2069 2070 /* update the ip type */ 2071 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6); 2072 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 2073 &ip_type, sizeof(uint32_t)); 2074 2075 /* update the computed field to notify it is ipv6 header */ 2076 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG, 2077 1); 2078 if (ipv6_spec) 2079 ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec); 2080 2081 if (!ulp_rte_item_skip_void(&item, 1)) 2082 return BNXT_TF_RC_ERROR; 2083 } else { 2084 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n"); 2085 return BNXT_TF_RC_ERROR; 2086 } 2087 2088 /* L4 is UDP */ 2089 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) { 2090 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n"); 2091 return BNXT_TF_RC_ERROR; 2092 } 2093 if (item->spec) 2094 ulp_rte_enc_udp_hdr_handler(params, item->spec); 2095 2096 if (!ulp_rte_item_skip_void(&item, 1)) 2097 return BNXT_TF_RC_ERROR; 2098 2099 /* Finally VXLAN */ 2100 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { 2101 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n"); 2102 return BNXT_TF_RC_ERROR; 2103 } 2104 vxlan_size = sizeof(struct rte_flow_item_vxlan); 2105 /* copy the vxlan details */ 2106 memcpy(&vxlan_spec, item->spec, vxlan_size); 2107 vxlan_spec.flags = 0x08; 2108 vxlan_size = tfp_cpu_to_be_32(vxlan_size); 2109 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ], 2110 &vxlan_size, sizeof(uint32_t)); 2111 2112 ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec); 2113 2114 /* update the hdr_bitmap with vxlan */ 2115 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP); 2116 return BNXT_TF_RC_SUCCESS; 2117 } 2118 2119 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */ 2120 int32_t 2121 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item 2122 __rte_unused, 2123 struct ulp_rte_parser_params *params) 2124 { 2125 /* update the hdr_bitmap with vxlan */ 2126 ULP_BITMAP_SET(params->act_bitmap.bits, 2127 BNXT_ULP_ACT_BIT_VXLAN_DECAP); 2128 /* Update computational field with tunnel decap info */ 2129 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1); 2130 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 2131 return BNXT_TF_RC_SUCCESS; 2132 } 2133 2134 /* Function to handle the parsing of RTE Flow action drop Header. */ 2135 int32_t 2136 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused, 2137 struct ulp_rte_parser_params *params) 2138 { 2139 /* Update the hdr_bitmap with drop */ 2140 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP); 2141 return BNXT_TF_RC_SUCCESS; 2142 } 2143 2144 /* Function to handle the parsing of RTE Flow action count. */ 2145 int32_t 2146 ulp_rte_count_act_handler(const struct rte_flow_action *action_item, 2147 struct ulp_rte_parser_params *params) 2148 { 2149 const struct rte_flow_action_count *act_count; 2150 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop; 2151 2152 act_count = action_item->conf; 2153 if (act_count) { 2154 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT], 2155 &act_count->id, 2156 BNXT_ULP_ACT_PROP_SZ_COUNT); 2157 } 2158 2159 /* Update the hdr_bitmap with count */ 2160 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT); 2161 return BNXT_TF_RC_SUCCESS; 2162 } 2163 2164 /* Function to handle the parsing of action ports. */ 2165 static int32_t 2166 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param, 2167 uint32_t ifindex, 2168 enum bnxt_ulp_direction_type act_dir) 2169 { 2170 enum bnxt_ulp_direction_type dir; 2171 uint16_t pid_s; 2172 uint32_t pid; 2173 struct ulp_rte_act_prop *act = ¶m->act_prop; 2174 enum bnxt_ulp_intf_type port_type; 2175 uint32_t vnic_type; 2176 2177 /* Get the direction */ 2178 /* If action implicitly specifies direction, use the specification. */ 2179 dir = (act_dir == BNXT_ULP_DIR_INVALID) ? 2180 ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION) : 2181 act_dir; 2182 port_type = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE); 2183 if (dir == BNXT_ULP_DIR_EGRESS && 2184 port_type != BNXT_ULP_INTF_TYPE_VF_REP) { 2185 /* For egress direction, fill vport */ 2186 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s)) 2187 return BNXT_TF_RC_ERROR; 2188 2189 pid = pid_s; 2190 pid = rte_cpu_to_be_32(pid); 2191 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], 2192 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); 2193 } else { 2194 /* For ingress direction, fill vnic */ 2195 /* 2196 * Action Destination 2197 * ------------------------------------ 2198 * PORT_REPRESENTOR Driver Function 2199 * ------------------------------------ 2200 * REPRESENTED_PORT VF 2201 * ------------------------------------ 2202 * PORT_ID VF 2203 */ 2204 if (act_dir != BNXT_ULP_DIR_INGRESS && 2205 port_type == BNXT_ULP_INTF_TYPE_VF_REP) 2206 vnic_type = BNXT_ULP_VF_FUNC_VNIC; 2207 else 2208 vnic_type = BNXT_ULP_DRV_FUNC_VNIC; 2209 2210 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex, 2211 vnic_type, &pid_s)) 2212 return BNXT_TF_RC_ERROR; 2213 2214 pid = pid_s; 2215 pid = rte_cpu_to_be_32(pid); 2216 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], 2217 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC); 2218 } 2219 2220 /* Update the action port set bit */ 2221 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1); 2222 return BNXT_TF_RC_SUCCESS; 2223 } 2224 2225 /* Function to handle the parsing of RTE Flow action PF. */ 2226 int32_t 2227 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused, 2228 struct ulp_rte_parser_params *params) 2229 { 2230 uint32_t port_id; 2231 uint32_t ifindex; 2232 enum bnxt_ulp_intf_type intf_type; 2233 2234 /* Get the port id of the current device */ 2235 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 2236 2237 /* Get the port db ifindex */ 2238 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id, 2239 &ifindex)) { 2240 BNXT_TF_DBG(ERR, "Invalid port id\n"); 2241 return BNXT_TF_RC_ERROR; 2242 } 2243 2244 /* Check the port is PF port */ 2245 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 2246 if (intf_type != BNXT_ULP_INTF_TYPE_PF) { 2247 BNXT_TF_DBG(ERR, "Port is not a PF port\n"); 2248 return BNXT_TF_RC_ERROR; 2249 } 2250 /* Update the action properties */ 2251 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2252 return ulp_rte_parser_act_port_set(params, ifindex, 2253 BNXT_ULP_DIR_INVALID); 2254 } 2255 2256 /* Function to handle the parsing of RTE Flow action VF. */ 2257 int32_t 2258 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, 2259 struct ulp_rte_parser_params *params) 2260 { 2261 const struct rte_flow_action_vf *vf_action; 2262 enum bnxt_ulp_intf_type intf_type; 2263 uint32_t ifindex; 2264 struct bnxt *bp; 2265 2266 vf_action = action_item->conf; 2267 if (!vf_action) { 2268 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n"); 2269 return BNXT_TF_RC_PARSE_ERR; 2270 } 2271 2272 if (vf_action->original) { 2273 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n"); 2274 return BNXT_TF_RC_PARSE_ERR; 2275 } 2276 2277 bp = bnxt_pmd_get_bp(params->port_id); 2278 if (bp == NULL) { 2279 BNXT_TF_DBG(ERR, "Invalid bp\n"); 2280 return BNXT_TF_RC_ERROR; 2281 } 2282 2283 /* vf_action->id is a logical number which in this case is an 2284 * offset from the first VF. So, to get the absolute VF id, the 2285 * offset must be added to the absolute first vf id of that port. 2286 */ 2287 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, 2288 bp->first_vf_id + 2289 vf_action->id, 2290 &ifindex)) { 2291 BNXT_TF_DBG(ERR, "VF is not valid interface\n"); 2292 return BNXT_TF_RC_ERROR; 2293 } 2294 /* Check the port is VF port */ 2295 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 2296 if (intf_type != BNXT_ULP_INTF_TYPE_VF && 2297 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) { 2298 BNXT_TF_DBG(ERR, "Port is not a VF port\n"); 2299 return BNXT_TF_RC_ERROR; 2300 } 2301 2302 /* Update the action properties */ 2303 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2304 return ulp_rte_parser_act_port_set(params, ifindex, 2305 BNXT_ULP_DIR_INVALID); 2306 } 2307 2308 /* Parse actions PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */ 2309 int32_t 2310 ulp_rte_port_act_handler(const struct rte_flow_action *act_item, 2311 struct ulp_rte_parser_params *param) 2312 { 2313 uint32_t ethdev_id; 2314 uint32_t ifindex; 2315 enum bnxt_ulp_intf_type intf_type; 2316 enum bnxt_ulp_direction_type act_dir; 2317 2318 if (!act_item->conf) { 2319 BNXT_TF_DBG(ERR, 2320 "ParseErr: Invalid Argument\n"); 2321 return BNXT_TF_RC_PARSE_ERR; 2322 } 2323 switch (act_item->type) { 2324 case RTE_FLOW_ACTION_TYPE_PORT_ID: { 2325 const struct rte_flow_action_port_id *port_id = act_item->conf; 2326 2327 if (port_id->original) { 2328 BNXT_TF_DBG(ERR, 2329 "ParseErr:Portid Original not supported\n"); 2330 return BNXT_TF_RC_PARSE_ERR; 2331 } 2332 ethdev_id = port_id->id; 2333 act_dir = BNXT_ULP_DIR_INVALID; 2334 break; 2335 } 2336 case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: { 2337 const struct rte_flow_action_ethdev *ethdev = act_item->conf; 2338 2339 ethdev_id = ethdev->port_id; 2340 act_dir = BNXT_ULP_DIR_INGRESS; 2341 break; 2342 } 2343 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: { 2344 const struct rte_flow_action_ethdev *ethdev = act_item->conf; 2345 2346 ethdev_id = ethdev->port_id; 2347 act_dir = BNXT_ULP_DIR_EGRESS; 2348 break; 2349 } 2350 default: 2351 BNXT_TF_DBG(ERR, "Unknown port action\n"); 2352 return BNXT_TF_RC_ERROR; 2353 } 2354 2355 /* Get the port db ifindex */ 2356 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, ethdev_id, 2357 &ifindex)) { 2358 BNXT_TF_DBG(ERR, "Invalid port id\n"); 2359 return BNXT_TF_RC_ERROR; 2360 } 2361 2362 /* Get the intf type */ 2363 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex); 2364 if (!intf_type) { 2365 BNXT_TF_DBG(ERR, "Invalid port type\n"); 2366 return BNXT_TF_RC_ERROR; 2367 } 2368 2369 /* Set the action port */ 2370 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2371 return ulp_rte_parser_act_port_set(param, ifindex, act_dir); 2372 } 2373 2374 /* Function to handle the parsing of RTE Flow action phy_port. */ 2375 int32_t 2376 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item, 2377 struct ulp_rte_parser_params *prm) 2378 { 2379 const struct rte_flow_action_phy_port *phy_port; 2380 uint32_t pid; 2381 int32_t rc; 2382 uint16_t pid_s; 2383 enum bnxt_ulp_direction_type dir; 2384 2385 phy_port = action_item->conf; 2386 if (!phy_port) { 2387 BNXT_TF_DBG(ERR, 2388 "ParseErr: Invalid Argument\n"); 2389 return BNXT_TF_RC_PARSE_ERR; 2390 } 2391 2392 if (phy_port->original) { 2393 BNXT_TF_DBG(ERR, 2394 "Parse Err:Port Original not supported\n"); 2395 return BNXT_TF_RC_PARSE_ERR; 2396 } 2397 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION); 2398 if (dir != BNXT_ULP_DIR_EGRESS) { 2399 BNXT_TF_DBG(ERR, 2400 "Parse Err:Phy ports are valid only for egress\n"); 2401 return BNXT_TF_RC_PARSE_ERR; 2402 } 2403 /* Get the physical port details from port db */ 2404 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index, 2405 &pid_s); 2406 if (rc) { 2407 BNXT_TF_DBG(ERR, "Failed to get port details\n"); 2408 return -EINVAL; 2409 } 2410 2411 pid = pid_s; 2412 pid = rte_cpu_to_be_32(pid); 2413 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], 2414 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); 2415 2416 /* Update the action port set bit */ 2417 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1); 2418 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, 2419 BNXT_ULP_INTF_TYPE_PHY_PORT); 2420 return BNXT_TF_RC_SUCCESS; 2421 } 2422 2423 /* Function to handle the parsing of RTE Flow action pop vlan. */ 2424 int32_t 2425 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused, 2426 struct ulp_rte_parser_params *params) 2427 { 2428 /* Update the act_bitmap with pop */ 2429 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN); 2430 return BNXT_TF_RC_SUCCESS; 2431 } 2432 2433 /* Function to handle the parsing of RTE Flow action push vlan. */ 2434 int32_t 2435 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item, 2436 struct ulp_rte_parser_params *params) 2437 { 2438 const struct rte_flow_action_of_push_vlan *push_vlan; 2439 uint16_t ethertype; 2440 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2441 2442 push_vlan = action_item->conf; 2443 if (push_vlan) { 2444 ethertype = push_vlan->ethertype; 2445 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) { 2446 BNXT_TF_DBG(ERR, 2447 "Parse Err: Ethertype not supported\n"); 2448 return BNXT_TF_RC_PARSE_ERR; 2449 } 2450 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN], 2451 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN); 2452 /* Update the hdr_bitmap with push vlan */ 2453 ULP_BITMAP_SET(params->act_bitmap.bits, 2454 BNXT_ULP_ACT_BIT_PUSH_VLAN); 2455 return BNXT_TF_RC_SUCCESS; 2456 } 2457 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n"); 2458 return BNXT_TF_RC_ERROR; 2459 } 2460 2461 /* Function to handle the parsing of RTE Flow action set vlan id. */ 2462 int32_t 2463 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item, 2464 struct ulp_rte_parser_params *params) 2465 { 2466 const struct rte_flow_action_of_set_vlan_vid *vlan_vid; 2467 uint32_t vid; 2468 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2469 2470 vlan_vid = action_item->conf; 2471 if (vlan_vid && vlan_vid->vlan_vid) { 2472 vid = vlan_vid->vlan_vid; 2473 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID], 2474 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID); 2475 /* Update the hdr_bitmap with vlan vid */ 2476 ULP_BITMAP_SET(params->act_bitmap.bits, 2477 BNXT_ULP_ACT_BIT_SET_VLAN_VID); 2478 return BNXT_TF_RC_SUCCESS; 2479 } 2480 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n"); 2481 return BNXT_TF_RC_ERROR; 2482 } 2483 2484 /* Function to handle the parsing of RTE Flow action set vlan pcp. */ 2485 int32_t 2486 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item, 2487 struct ulp_rte_parser_params *params) 2488 { 2489 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp; 2490 uint8_t pcp; 2491 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2492 2493 vlan_pcp = action_item->conf; 2494 if (vlan_pcp) { 2495 pcp = vlan_pcp->vlan_pcp; 2496 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP], 2497 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP); 2498 /* Update the hdr_bitmap with vlan vid */ 2499 ULP_BITMAP_SET(params->act_bitmap.bits, 2500 BNXT_ULP_ACT_BIT_SET_VLAN_PCP); 2501 return BNXT_TF_RC_SUCCESS; 2502 } 2503 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n"); 2504 return BNXT_TF_RC_ERROR; 2505 } 2506 2507 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/ 2508 int32_t 2509 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item, 2510 struct ulp_rte_parser_params *params) 2511 { 2512 const struct rte_flow_action_set_ipv4 *set_ipv4; 2513 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2514 2515 set_ipv4 = action_item->conf; 2516 if (set_ipv4) { 2517 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC], 2518 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC); 2519 /* Update the hdr_bitmap with set ipv4 src */ 2520 ULP_BITMAP_SET(params->act_bitmap.bits, 2521 BNXT_ULP_ACT_BIT_SET_IPV4_SRC); 2522 return BNXT_TF_RC_SUCCESS; 2523 } 2524 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n"); 2525 return BNXT_TF_RC_ERROR; 2526 } 2527 2528 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/ 2529 int32_t 2530 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item, 2531 struct ulp_rte_parser_params *params) 2532 { 2533 const struct rte_flow_action_set_ipv4 *set_ipv4; 2534 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2535 2536 set_ipv4 = action_item->conf; 2537 if (set_ipv4) { 2538 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST], 2539 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST); 2540 /* Update the hdr_bitmap with set ipv4 dst */ 2541 ULP_BITMAP_SET(params->act_bitmap.bits, 2542 BNXT_ULP_ACT_BIT_SET_IPV4_DST); 2543 return BNXT_TF_RC_SUCCESS; 2544 } 2545 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n"); 2546 return BNXT_TF_RC_ERROR; 2547 } 2548 2549 /* Function to handle the parsing of RTE Flow action set tp src.*/ 2550 int32_t 2551 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item, 2552 struct ulp_rte_parser_params *params) 2553 { 2554 const struct rte_flow_action_set_tp *set_tp; 2555 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2556 2557 set_tp = action_item->conf; 2558 if (set_tp) { 2559 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC], 2560 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC); 2561 /* Update the hdr_bitmap with set tp src */ 2562 ULP_BITMAP_SET(params->act_bitmap.bits, 2563 BNXT_ULP_ACT_BIT_SET_TP_SRC); 2564 return BNXT_TF_RC_SUCCESS; 2565 } 2566 2567 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n"); 2568 return BNXT_TF_RC_ERROR; 2569 } 2570 2571 /* Function to handle the parsing of RTE Flow action set tp dst.*/ 2572 int32_t 2573 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item, 2574 struct ulp_rte_parser_params *params) 2575 { 2576 const struct rte_flow_action_set_tp *set_tp; 2577 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2578 2579 set_tp = action_item->conf; 2580 if (set_tp) { 2581 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST], 2582 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST); 2583 /* Update the hdr_bitmap with set tp dst */ 2584 ULP_BITMAP_SET(params->act_bitmap.bits, 2585 BNXT_ULP_ACT_BIT_SET_TP_DST); 2586 return BNXT_TF_RC_SUCCESS; 2587 } 2588 2589 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n"); 2590 return BNXT_TF_RC_ERROR; 2591 } 2592 2593 /* Function to handle the parsing of RTE Flow action dec ttl.*/ 2594 int32_t 2595 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused, 2596 struct ulp_rte_parser_params *params) 2597 { 2598 /* Update the act_bitmap with dec ttl */ 2599 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL); 2600 return BNXT_TF_RC_SUCCESS; 2601 } 2602 2603 /* Function to handle the parsing of RTE Flow action JUMP */ 2604 int32_t 2605 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused, 2606 struct ulp_rte_parser_params *params) 2607 { 2608 /* Update the act_bitmap with dec ttl */ 2609 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP); 2610 return BNXT_TF_RC_SUCCESS; 2611 } 2612 2613 int32_t 2614 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item, 2615 struct ulp_rte_parser_params *params) 2616 { 2617 const struct rte_flow_action_sample *sample; 2618 int ret; 2619 2620 sample = action_item->conf; 2621 2622 /* if SAMPLE bit is set it means this sample action is nested within the 2623 * actions of another sample action; this is not allowed 2624 */ 2625 if (ULP_BITMAP_ISSET(params->act_bitmap.bits, 2626 BNXT_ULP_ACT_BIT_SAMPLE)) 2627 return BNXT_TF_RC_ERROR; 2628 2629 /* a sample action is only allowed as a shared action */ 2630 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits, 2631 BNXT_ULP_ACT_BIT_SHARED)) 2632 return BNXT_TF_RC_ERROR; 2633 2634 /* only a ratio of 1 i.e. 100% is supported */ 2635 if (sample->ratio != 1) 2636 return BNXT_TF_RC_ERROR; 2637 2638 if (!sample->actions) 2639 return BNXT_TF_RC_ERROR; 2640 2641 /* parse the nested actions for a sample action */ 2642 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params); 2643 if (ret == BNXT_TF_RC_SUCCESS) 2644 /* Update the act_bitmap with sample */ 2645 ULP_BITMAP_SET(params->act_bitmap.bits, 2646 BNXT_ULP_ACT_BIT_SAMPLE); 2647 2648 return ret; 2649 } 2650 2651 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */ 2652 int32_t 2653 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item, 2654 struct ulp_rte_parser_params *params) 2655 { 2656 /* Set the F1 flow header bit */ 2657 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1); 2658 return ulp_rte_vxlan_decap_act_handler(action_item, params); 2659 } 2660 2661 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */ 2662 int32_t 2663 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item, 2664 struct ulp_rte_parser_params *params) 2665 { 2666 RTE_SET_USED(item); 2667 /* Set the F2 flow header bit */ 2668 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2); 2669 return ulp_rte_vxlan_decap_act_handler(NULL, params); 2670 } 2671