1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2021 Broadcom 3 * All rights reserved. 4 */ 5 6 #include "bnxt.h" 7 #include "ulp_template_db_enum.h" 8 #include "ulp_template_struct.h" 9 #include "bnxt_ulp.h" 10 #include "bnxt_tf_common.h" 11 #include "ulp_rte_parser.h" 12 #include "ulp_matcher.h" 13 #include "ulp_utils.h" 14 #include "tfp.h" 15 #include "ulp_port_db.h" 16 #include "ulp_flow_db.h" 17 #include "ulp_mapper.h" 18 #include "ulp_tun.h" 19 #include "ulp_template_db_tbl.h" 20 21 /* Local defines for the parsing functions */ 22 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */ 23 #define ULP_VLAN_PRIORITY_MASK 0x700 24 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/ 25 #define ULP_UDP_PORT_VXLAN 4789 26 27 /* Utility function to skip the void items. */ 28 static inline int32_t 29 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment) 30 { 31 if (!*item) 32 return 0; 33 if (increment) 34 (*item)++; 35 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID) 36 (*item)++; 37 if (*item) 38 return 1; 39 return 0; 40 } 41 42 /* Utility function to update the field_bitmap */ 43 static void 44 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params, 45 uint32_t idx, 46 enum bnxt_ulp_prsr_action prsr_act) 47 { 48 struct ulp_rte_hdr_field *field; 49 50 field = ¶ms->hdr_field[idx]; 51 if (ulp_bitmap_notzero(field->mask, field->size)) { 52 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx); 53 if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE)) 54 ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx); 55 /* Not exact match */ 56 if (!ulp_bitmap_is_ones(field->mask, field->size)) 57 ULP_COMP_FLD_IDX_WR(params, 58 BNXT_ULP_CF_IDX_WC_MATCH, 1); 59 } else { 60 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx); 61 } 62 } 63 64 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL) 65 /* Utility function to copy field spec and masks items */ 66 static void 67 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params, 68 uint32_t *idx, 69 uint32_t size, 70 const void *spec_buff, 71 const void *mask_buff, 72 enum bnxt_ulp_prsr_action prsr_act) 73 { 74 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx]; 75 76 /* update the field size */ 77 field->size = size; 78 79 /* copy the mask specifications only if mask is not null */ 80 if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) { 81 memcpy(field->mask, mask_buff, size); 82 ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act); 83 } 84 85 /* copy the protocol specifications only if mask is not null*/ 86 if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size)) 87 memcpy(field->spec, spec_buff, size); 88 89 /* Increment the index */ 90 *idx = *idx + 1; 91 } 92 93 /* Utility function to copy field spec and masks items */ 94 static int32_t 95 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params, 96 uint32_t *idx, 97 uint32_t size) 98 { 99 if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) { 100 BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx); 101 return -EINVAL; 102 } 103 *idx = params->field_idx; 104 params->field_idx += size; 105 return 0; 106 } 107 108 /* 109 * Function to handle the parsing of RTE Flows and placing 110 * the RTE flow items into the ulp structures. 111 */ 112 int32_t 113 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], 114 struct ulp_rte_parser_params *params) 115 { 116 const struct rte_flow_item *item = pattern; 117 struct bnxt_ulp_rte_hdr_info *hdr_info; 118 119 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM; 120 121 /* Set the computed flags for no vlan tags before parsing */ 122 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1); 123 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1); 124 125 /* Parse all the items in the pattern */ 126 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) { 127 /* get the header information from the flow_hdr_info table */ 128 hdr_info = &ulp_hdr_info[item->type]; 129 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) { 130 BNXT_TF_DBG(ERR, 131 "Truflow parser does not support type %d\n", 132 item->type); 133 return BNXT_TF_RC_PARSE_ERR; 134 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) { 135 /* call the registered callback handler */ 136 if (hdr_info->proto_hdr_func) { 137 if (hdr_info->proto_hdr_func(item, params) != 138 BNXT_TF_RC_SUCCESS) { 139 return BNXT_TF_RC_ERROR; 140 } 141 } 142 } 143 item++; 144 } 145 /* update the implied SVIF */ 146 return ulp_rte_parser_implicit_match_port_process(params); 147 } 148 149 /* 150 * Function to handle the parsing of RTE Flows and placing 151 * the RTE flow actions into the ulp structures. 152 */ 153 int32_t 154 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], 155 struct ulp_rte_parser_params *params) 156 { 157 const struct rte_flow_action *action_item = actions; 158 struct bnxt_ulp_rte_act_info *hdr_info; 159 160 /* Parse all the items in the pattern */ 161 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) { 162 /* get the header information from the flow_hdr_info table */ 163 hdr_info = &ulp_act_info[action_item->type]; 164 if (hdr_info->act_type == 165 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) { 166 BNXT_TF_DBG(ERR, 167 "Truflow parser does not support act %u\n", 168 action_item->type); 169 return BNXT_TF_RC_ERROR; 170 } else if (hdr_info->act_type == 171 BNXT_ULP_ACT_TYPE_SUPPORTED) { 172 /* call the registered callback handler */ 173 if (hdr_info->proto_act_func) { 174 if (hdr_info->proto_act_func(action_item, 175 params) != 176 BNXT_TF_RC_SUCCESS) { 177 return BNXT_TF_RC_ERROR; 178 } 179 } 180 } 181 action_item++; 182 } 183 /* update the implied port details */ 184 ulp_rte_parser_implicit_act_port_process(params); 185 return BNXT_TF_RC_SUCCESS; 186 } 187 188 /* 189 * Function to handle the post processing of the computed 190 * fields for the interface. 191 */ 192 static void 193 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params) 194 { 195 uint32_t ifindex; 196 uint16_t port_id, parif; 197 uint32_t mtype; 198 enum bnxt_ulp_direction_type dir; 199 200 /* get the direction details */ 201 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 202 203 /* read the port id details */ 204 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 205 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 206 port_id, 207 &ifindex)) { 208 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 209 return; 210 } 211 212 if (dir == BNXT_ULP_DIR_INGRESS) { 213 /* Set port PARIF */ 214 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 215 BNXT_ULP_PHY_PORT_PARIF, &parif)) { 216 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n"); 217 return; 218 } 219 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF, 220 parif); 221 } else { 222 /* Get the match port type */ 223 mtype = ULP_COMP_FLD_IDX_RD(params, 224 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 225 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) { 226 ULP_COMP_FLD_IDX_WR(params, 227 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP, 228 1); 229 /* Set VF func PARIF */ 230 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 231 BNXT_ULP_VF_FUNC_PARIF, 232 &parif)) { 233 BNXT_TF_DBG(ERR, 234 "ParseErr:ifindex is not valid\n"); 235 return; 236 } 237 ULP_COMP_FLD_IDX_WR(params, 238 BNXT_ULP_CF_IDX_VF_FUNC_PARIF, 239 parif); 240 241 } else { 242 /* Set DRV func PARIF */ 243 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 244 BNXT_ULP_DRV_FUNC_PARIF, 245 &parif)) { 246 BNXT_TF_DBG(ERR, 247 "ParseErr:ifindex is not valid\n"); 248 return; 249 } 250 ULP_COMP_FLD_IDX_WR(params, 251 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF, 252 parif); 253 } 254 if (mtype == BNXT_ULP_INTF_TYPE_PF) { 255 ULP_COMP_FLD_IDX_WR(params, 256 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF, 257 1); 258 } 259 } 260 } 261 262 static int32_t 263 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params) 264 { 265 enum bnxt_ulp_intf_type match_port_type, act_port_type; 266 enum bnxt_ulp_direction_type dir; 267 uint32_t act_port_set; 268 269 /* Get the computed details */ 270 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 271 match_port_type = ULP_COMP_FLD_IDX_RD(params, 272 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 273 act_port_type = ULP_COMP_FLD_IDX_RD(params, 274 BNXT_ULP_CF_IDX_ACT_PORT_TYPE); 275 act_port_set = ULP_COMP_FLD_IDX_RD(params, 276 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET); 277 278 /* set the flow direction in the proto and action header */ 279 if (dir == BNXT_ULP_DIR_EGRESS) { 280 ULP_BITMAP_SET(params->hdr_bitmap.bits, 281 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 282 ULP_BITMAP_SET(params->act_bitmap.bits, 283 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 284 } 285 286 /* calculate the VF to VF flag */ 287 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP && 288 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) 289 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1); 290 291 /* Update the decrement ttl computational fields */ 292 if (ULP_BITMAP_ISSET(params->act_bitmap.bits, 293 BNXT_ULP_ACT_BIT_DEC_TTL)) { 294 /* 295 * Check that vxlan proto is included and vxlan decap 296 * action is not set then decrement tunnel ttl. 297 * Similarly add GRE and NVGRE in future. 298 */ 299 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 300 BNXT_ULP_HDR_BIT_T_VXLAN) && 301 !ULP_BITMAP_ISSET(params->act_bitmap.bits, 302 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) { 303 ULP_COMP_FLD_IDX_WR(params, 304 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1); 305 } else { 306 ULP_COMP_FLD_IDX_WR(params, 307 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1); 308 } 309 } 310 311 /* Merge the hdr_fp_bit into the proto header bit */ 312 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits; 313 314 /* Update the comp fld fid */ 315 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid); 316 317 /* Update the computed interface parameters */ 318 bnxt_ulp_comp_fld_intf_update(params); 319 320 /* TBD: Handle the flow rejection scenarios */ 321 return 0; 322 } 323 324 /* 325 * Function to handle the post processing of the parsing details 326 */ 327 int32_t 328 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params) 329 { 330 ulp_post_process_normal_flow(params); 331 return ulp_post_process_tun_flow(params); 332 } 333 334 /* 335 * Function to compute the flow direction based on the match port details 336 */ 337 static void 338 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params) 339 { 340 enum bnxt_ulp_intf_type match_port_type; 341 342 /* Get the match port type */ 343 match_port_type = ULP_COMP_FLD_IDX_RD(params, 344 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 345 346 /* If ingress flow and matchport is vf rep then dir is egress*/ 347 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) && 348 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) { 349 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 350 BNXT_ULP_DIR_EGRESS); 351 } else { 352 /* Assign the input direction */ 353 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) 354 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 355 BNXT_ULP_DIR_INGRESS); 356 else 357 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 358 BNXT_ULP_DIR_EGRESS); 359 } 360 } 361 362 /* Function to handle the parsing of RTE Flow item PF Header. */ 363 static int32_t 364 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params, 365 uint32_t ifindex, 366 uint16_t mask) 367 { 368 uint16_t svif; 369 enum bnxt_ulp_direction_type dir; 370 struct ulp_rte_hdr_field *hdr_field; 371 enum bnxt_ulp_svif_type svif_type; 372 enum bnxt_ulp_intf_type port_type; 373 374 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 375 BNXT_ULP_INVALID_SVIF_VAL) { 376 BNXT_TF_DBG(ERR, 377 "SVIF already set,multiple source not support'd\n"); 378 return BNXT_TF_RC_ERROR; 379 } 380 381 /* Get port type details */ 382 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 383 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) { 384 BNXT_TF_DBG(ERR, "Invalid port type\n"); 385 return BNXT_TF_RC_ERROR; 386 } 387 388 /* Update the match port type */ 389 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type); 390 391 /* compute the direction */ 392 bnxt_ulp_rte_parser_direction_compute(params); 393 394 /* Get the computed direction */ 395 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 396 if (dir == BNXT_ULP_DIR_INGRESS) { 397 svif_type = BNXT_ULP_PHY_PORT_SVIF; 398 } else { 399 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP) 400 svif_type = BNXT_ULP_VF_FUNC_SVIF; 401 else 402 svif_type = BNXT_ULP_DRV_FUNC_SVIF; 403 } 404 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type, 405 &svif); 406 svif = rte_cpu_to_be_16(svif); 407 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; 408 memcpy(hdr_field->spec, &svif, sizeof(svif)); 409 memcpy(hdr_field->mask, &mask, sizeof(mask)); 410 hdr_field->size = sizeof(svif); 411 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, 412 rte_be_to_cpu_16(svif)); 413 return BNXT_TF_RC_SUCCESS; 414 } 415 416 /* Function to handle the parsing of the RTE port id */ 417 int32_t 418 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params) 419 { 420 uint16_t port_id = 0; 421 uint16_t svif_mask = 0xFFFF; 422 uint32_t ifindex; 423 int32_t rc = BNXT_TF_RC_ERROR; 424 425 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 426 BNXT_ULP_INVALID_SVIF_VAL) 427 return BNXT_TF_RC_SUCCESS; 428 429 /* SVIF not set. So get the port id */ 430 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 431 432 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 433 port_id, 434 &ifindex)) { 435 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 436 return rc; 437 } 438 439 /* Update the SVIF details */ 440 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask); 441 return rc; 442 } 443 444 /* Function to handle the implicit action port id */ 445 int32_t 446 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params) 447 { 448 struct rte_flow_action action_item = {0}; 449 struct rte_flow_action_port_id port_id = {0}; 450 451 /* Read the action port set bit */ 452 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) { 453 /* Already set, so just exit */ 454 return BNXT_TF_RC_SUCCESS; 455 } 456 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 457 action_item.conf = &port_id; 458 459 /* Update the action port based on incoming port */ 460 ulp_rte_port_id_act_handler(&action_item, params); 461 462 /* Reset the action port set bit */ 463 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0); 464 return BNXT_TF_RC_SUCCESS; 465 } 466 467 /* Function to handle the parsing of RTE Flow item PF Header. */ 468 int32_t 469 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused, 470 struct ulp_rte_parser_params *params) 471 { 472 uint16_t port_id = 0; 473 uint16_t svif_mask = 0xFFFF; 474 uint32_t ifindex; 475 476 /* Get the implicit port id */ 477 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 478 479 /* perform the conversion from dpdk port to bnxt ifindex */ 480 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 481 port_id, 482 &ifindex)) { 483 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 484 return BNXT_TF_RC_ERROR; 485 } 486 487 /* Update the SVIF details */ 488 return ulp_rte_parser_svif_set(params, ifindex, svif_mask); 489 } 490 491 /* Function to handle the parsing of RTE Flow item VF Header. */ 492 int32_t 493 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item, 494 struct ulp_rte_parser_params *params) 495 { 496 const struct rte_flow_item_vf *vf_spec = item->spec; 497 const struct rte_flow_item_vf *vf_mask = item->mask; 498 uint16_t mask = 0; 499 uint32_t ifindex; 500 int32_t rc = BNXT_TF_RC_PARSE_ERR; 501 502 /* Get VF rte_flow_item for Port details */ 503 if (!vf_spec) { 504 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n"); 505 return rc; 506 } 507 if (!vf_mask) { 508 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n"); 509 return rc; 510 } 511 mask = vf_mask->id; 512 513 /* perform the conversion from VF Func id to bnxt ifindex */ 514 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, 515 vf_spec->id, 516 &ifindex)) { 517 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 518 return rc; 519 } 520 /* Update the SVIF details */ 521 return ulp_rte_parser_svif_set(params, ifindex, mask); 522 } 523 524 /* Function to handle the parsing of RTE Flow item port id Header. */ 525 int32_t 526 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item, 527 struct ulp_rte_parser_params *params) 528 { 529 const struct rte_flow_item_port_id *port_spec = item->spec; 530 const struct rte_flow_item_port_id *port_mask = item->mask; 531 uint16_t mask = 0; 532 int32_t rc = BNXT_TF_RC_PARSE_ERR; 533 uint32_t ifindex; 534 535 if (!port_spec) { 536 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n"); 537 return rc; 538 } 539 if (!port_mask) { 540 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n"); 541 return rc; 542 } 543 mask = port_mask->id; 544 545 /* perform the conversion from dpdk port to bnxt ifindex */ 546 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 547 port_spec->id, 548 &ifindex)) { 549 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 550 return rc; 551 } 552 /* Update the SVIF details */ 553 return ulp_rte_parser_svif_set(params, ifindex, mask); 554 } 555 556 /* Function to handle the parsing of RTE Flow item phy port Header. */ 557 int32_t 558 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item, 559 struct ulp_rte_parser_params *params) 560 { 561 const struct rte_flow_item_phy_port *port_spec = item->spec; 562 const struct rte_flow_item_phy_port *port_mask = item->mask; 563 uint16_t mask = 0; 564 int32_t rc = BNXT_TF_RC_ERROR; 565 uint16_t svif; 566 enum bnxt_ulp_direction_type dir; 567 struct ulp_rte_hdr_field *hdr_field; 568 569 /* Copy the rte_flow_item for phy port into hdr_field */ 570 if (!port_spec) { 571 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n"); 572 return rc; 573 } 574 if (!port_mask) { 575 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n"); 576 return rc; 577 } 578 mask = port_mask->index; 579 580 /* Update the match port type */ 581 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, 582 BNXT_ULP_INTF_TYPE_PHY_PORT); 583 584 /* Compute the Hw direction */ 585 bnxt_ulp_rte_parser_direction_compute(params); 586 587 /* Direction validation */ 588 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 589 if (dir == BNXT_ULP_DIR_EGRESS) { 590 BNXT_TF_DBG(ERR, 591 "Parse Err:Phy ports are valid only for ingress\n"); 592 return BNXT_TF_RC_PARSE_ERR; 593 } 594 595 /* Get the physical port details from port db */ 596 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index, 597 &svif); 598 if (rc) { 599 BNXT_TF_DBG(ERR, "Failed to get port details\n"); 600 return BNXT_TF_RC_PARSE_ERR; 601 } 602 603 /* Update the SVIF details */ 604 svif = rte_cpu_to_be_16(svif); 605 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; 606 memcpy(hdr_field->spec, &svif, sizeof(svif)); 607 memcpy(hdr_field->mask, &mask, sizeof(mask)); 608 hdr_field->size = sizeof(svif); 609 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, 610 rte_be_to_cpu_16(svif)); 611 return BNXT_TF_RC_SUCCESS; 612 } 613 614 /* Function to handle the update of proto header based on field values */ 615 static void 616 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param, 617 uint16_t type, uint32_t in_flag) 618 { 619 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { 620 if (in_flag) { 621 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 622 BNXT_ULP_HDR_BIT_I_IPV4); 623 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); 624 } else { 625 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 626 BNXT_ULP_HDR_BIT_O_IPV4); 627 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); 628 } 629 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { 630 if (in_flag) { 631 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 632 BNXT_ULP_HDR_BIT_I_IPV6); 633 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); 634 } else { 635 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 636 BNXT_ULP_HDR_BIT_O_IPV6); 637 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); 638 } 639 } 640 } 641 642 /* Internal Function to identify broadcast or multicast packets */ 643 static int32_t 644 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr) 645 { 646 if (rte_is_multicast_ether_addr(eth_addr) || 647 rte_is_broadcast_ether_addr(eth_addr)) { 648 BNXT_TF_DBG(DEBUG, 649 "No support for bcast or mcast addr offload\n"); 650 return 1; 651 } 652 return 0; 653 } 654 655 /* Function to handle the parsing of RTE Flow item Ethernet Header. */ 656 int32_t 657 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item, 658 struct ulp_rte_parser_params *params) 659 { 660 const struct rte_flow_item_eth *eth_spec = item->spec; 661 const struct rte_flow_item_eth *eth_mask = item->mask; 662 uint32_t idx = 0; 663 uint32_t size; 664 uint16_t eth_type = 0; 665 uint32_t inner_flag = 0; 666 667 /* Perform validations */ 668 if (eth_spec) { 669 /* Todo: work around to avoid multicast and broadcast addr */ 670 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst)) 671 return BNXT_TF_RC_PARSE_ERR; 672 673 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src)) 674 return BNXT_TF_RC_PARSE_ERR; 675 676 eth_type = eth_spec->type; 677 } 678 679 if (ulp_rte_prsr_fld_size_validate(params, &idx, 680 BNXT_ULP_PROTO_HDR_ETH_NUM)) { 681 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 682 return BNXT_TF_RC_ERROR; 683 } 684 /* 685 * Copy the rte_flow_item for eth into hdr_field using ethernet 686 * header fields 687 */ 688 size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes); 689 ulp_rte_prsr_fld_mask(params, &idx, size, 690 ulp_deference_struct(eth_spec, dst.addr_bytes), 691 ulp_deference_struct(eth_mask, dst.addr_bytes), 692 ULP_PRSR_ACT_DEFAULT); 693 694 size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes); 695 ulp_rte_prsr_fld_mask(params, &idx, size, 696 ulp_deference_struct(eth_spec, src.addr_bytes), 697 ulp_deference_struct(eth_mask, src.addr_bytes), 698 ULP_PRSR_ACT_DEFAULT); 699 700 size = sizeof(((struct rte_flow_item_eth *)NULL)->type); 701 ulp_rte_prsr_fld_mask(params, &idx, size, 702 ulp_deference_struct(eth_spec, type), 703 ulp_deference_struct(eth_mask, type), 704 ULP_PRSR_ACT_MATCH_IGNORE); 705 706 /* Update the protocol hdr bitmap */ 707 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 708 BNXT_ULP_HDR_BIT_O_ETH) || 709 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 710 BNXT_ULP_HDR_BIT_O_IPV4) || 711 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 712 BNXT_ULP_HDR_BIT_O_IPV6) || 713 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 714 BNXT_ULP_HDR_BIT_O_UDP) || 715 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 716 BNXT_ULP_HDR_BIT_O_TCP)) { 717 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH); 718 inner_flag = 1; 719 } else { 720 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); 721 } 722 /* Update the field protocol hdr bitmap */ 723 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag); 724 725 return BNXT_TF_RC_SUCCESS; 726 } 727 728 /* Function to handle the parsing of RTE Flow item Vlan Header. */ 729 int32_t 730 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, 731 struct ulp_rte_parser_params *params) 732 { 733 const struct rte_flow_item_vlan *vlan_spec = item->spec; 734 const struct rte_flow_item_vlan *vlan_mask = item->mask; 735 struct ulp_rte_hdr_bitmap *hdr_bit; 736 uint32_t idx = 0; 737 uint16_t vlan_tag = 0, priority = 0; 738 uint16_t vlan_tag_mask = 0, priority_mask = 0; 739 uint32_t outer_vtag_num; 740 uint32_t inner_vtag_num; 741 uint16_t eth_type = 0; 742 uint32_t inner_flag = 0; 743 uint32_t size; 744 745 if (vlan_spec) { 746 vlan_tag = ntohs(vlan_spec->tci); 747 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT); 748 vlan_tag &= ULP_VLAN_TAG_MASK; 749 vlan_tag = htons(vlan_tag); 750 eth_type = vlan_spec->inner_type; 751 } 752 753 if (vlan_mask) { 754 vlan_tag_mask = ntohs(vlan_mask->tci); 755 priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT); 756 vlan_tag_mask &= 0xfff; 757 758 /* 759 * the storage for priority and vlan tag is 2 bytes 760 * The mask of priority which is 3 bits if it is all 1's 761 * then make the rest bits 13 bits as 1's 762 * so that it is matched as exact match. 763 */ 764 if (priority_mask == ULP_VLAN_PRIORITY_MASK) 765 priority_mask |= ~ULP_VLAN_PRIORITY_MASK; 766 if (vlan_tag_mask == ULP_VLAN_TAG_MASK) 767 vlan_tag_mask |= ~ULP_VLAN_TAG_MASK; 768 vlan_tag_mask = htons(vlan_tag_mask); 769 } 770 771 if (ulp_rte_prsr_fld_size_validate(params, &idx, 772 BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) { 773 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 774 return BNXT_TF_RC_ERROR; 775 } 776 777 /* 778 * Copy the rte_flow_item for vlan into hdr_field using Vlan 779 * header fields 780 */ 781 size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci); 782 /* 783 * The priority field is ignored since OVS is setting it as 784 * wild card match and it is not supported. This is a work 785 * around and shall be addressed in the future. 786 */ 787 ulp_rte_prsr_fld_mask(params, &idx, size, 788 &priority, 789 &priority_mask, 790 ULP_PRSR_ACT_MASK_IGNORE); 791 792 ulp_rte_prsr_fld_mask(params, &idx, size, 793 &vlan_tag, 794 &vlan_tag_mask, 795 ULP_PRSR_ACT_DEFAULT); 796 797 size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type); 798 ulp_rte_prsr_fld_mask(params, &idx, size, 799 ulp_deference_struct(vlan_spec, inner_type), 800 ulp_deference_struct(vlan_mask, inner_type), 801 ULP_PRSR_ACT_MATCH_IGNORE); 802 803 /* Get the outer tag and inner tag counts */ 804 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params, 805 BNXT_ULP_CF_IDX_O_VTAG_NUM); 806 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params, 807 BNXT_ULP_CF_IDX_I_VTAG_NUM); 808 809 /* Update the hdr_bitmap of the vlans */ 810 hdr_bit = ¶ms->hdr_bitmap; 811 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 812 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 813 !outer_vtag_num) { 814 /* Update the vlan tag num */ 815 outer_vtag_num++; 816 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 817 outer_vtag_num); 818 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0); 819 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1); 820 ULP_BITMAP_SET(params->hdr_bitmap.bits, 821 BNXT_ULP_HDR_BIT_OO_VLAN); 822 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 823 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 824 outer_vtag_num == 1) { 825 /* update the vlan tag num */ 826 outer_vtag_num++; 827 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 828 outer_vtag_num); 829 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1); 830 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0); 831 ULP_BITMAP_SET(params->hdr_bitmap.bits, 832 BNXT_ULP_HDR_BIT_OI_VLAN); 833 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 834 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 835 !inner_vtag_num) { 836 /* update the vlan tag num */ 837 inner_vtag_num++; 838 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 839 inner_vtag_num); 840 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0); 841 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1); 842 ULP_BITMAP_SET(params->hdr_bitmap.bits, 843 BNXT_ULP_HDR_BIT_IO_VLAN); 844 inner_flag = 1; 845 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 846 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 847 inner_vtag_num == 1) { 848 /* update the vlan tag num */ 849 inner_vtag_num++; 850 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 851 inner_vtag_num); 852 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1); 853 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0); 854 ULP_BITMAP_SET(params->hdr_bitmap.bits, 855 BNXT_ULP_HDR_BIT_II_VLAN); 856 inner_flag = 1; 857 } else { 858 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n"); 859 return BNXT_TF_RC_ERROR; 860 } 861 /* Update the field protocol hdr bitmap */ 862 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag); 863 return BNXT_TF_RC_SUCCESS; 864 } 865 866 /* Function to handle the update of proto header based on field values */ 867 static void 868 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param, 869 uint8_t proto, uint32_t in_flag) 870 { 871 if (proto == IPPROTO_UDP) { 872 if (in_flag) { 873 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 874 BNXT_ULP_HDR_BIT_I_UDP); 875 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); 876 } else { 877 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 878 BNXT_ULP_HDR_BIT_O_UDP); 879 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); 880 } 881 } else if (proto == IPPROTO_TCP) { 882 if (in_flag) { 883 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 884 BNXT_ULP_HDR_BIT_I_TCP); 885 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); 886 } else { 887 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 888 BNXT_ULP_HDR_BIT_O_TCP); 889 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); 890 } 891 } else if (proto == IPPROTO_GRE) { 892 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE); 893 } else if (proto == IPPROTO_ICMP) { 894 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN)) 895 ULP_BITMAP_SET(param->hdr_bitmap.bits, 896 BNXT_ULP_HDR_BIT_I_ICMP); 897 else 898 ULP_BITMAP_SET(param->hdr_bitmap.bits, 899 BNXT_ULP_HDR_BIT_O_ICMP); 900 } 901 if (proto) { 902 if (in_flag) { 903 ULP_COMP_FLD_IDX_WR(param, 904 BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, 905 1); 906 ULP_COMP_FLD_IDX_WR(param, 907 BNXT_ULP_CF_IDX_I_L3_PROTO_ID, 908 proto); 909 } else { 910 ULP_COMP_FLD_IDX_WR(param, 911 BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, 912 1); 913 ULP_COMP_FLD_IDX_WR(param, 914 BNXT_ULP_CF_IDX_O_L3_PROTO_ID, 915 proto); 916 } 917 } 918 } 919 920 /* Function to handle the parsing of RTE Flow item IPV4 Header. */ 921 int32_t 922 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, 923 struct ulp_rte_parser_params *params) 924 { 925 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec; 926 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask; 927 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 928 uint32_t idx = 0; 929 uint32_t size; 930 uint8_t proto = 0; 931 uint32_t inner_flag = 0; 932 uint32_t cnt; 933 934 /* validate there are no 3rd L3 header */ 935 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); 936 if (cnt == 2) { 937 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n"); 938 return BNXT_TF_RC_ERROR; 939 } 940 941 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 942 BNXT_ULP_HDR_BIT_O_ETH) && 943 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 944 BNXT_ULP_HDR_BIT_I_ETH)) { 945 /* Since F2 flow does not include eth item, when parser detects 946 * IPv4/IPv6 item list and it belongs to the outer header; i.e., 947 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set, 948 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index. 949 * This will allow the parser post processor to update the 950 * t_dmac in hdr_field[o_eth.dmac] 951 */ 952 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM + 953 BNXT_ULP_PROTO_HDR_VLAN_NUM); 954 params->field_idx = idx; 955 } 956 957 if (ulp_rte_prsr_fld_size_validate(params, &idx, 958 BNXT_ULP_PROTO_HDR_IPV4_NUM)) { 959 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 960 return BNXT_TF_RC_ERROR; 961 } 962 963 /* 964 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 965 * header fields 966 */ 967 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl); 968 ulp_rte_prsr_fld_mask(params, &idx, size, 969 ulp_deference_struct(ipv4_spec, hdr.version_ihl), 970 ulp_deference_struct(ipv4_mask, hdr.version_ihl), 971 ULP_PRSR_ACT_DEFAULT); 972 973 /* 974 * The tos field is ignored since OVS is setting it as wild card 975 * match and it is not supported. This is a work around and 976 * shall be addressed in the future. 977 */ 978 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service); 979 ulp_rte_prsr_fld_mask(params, &idx, size, 980 ulp_deference_struct(ipv4_spec, 981 hdr.type_of_service), 982 ulp_deference_struct(ipv4_mask, 983 hdr.type_of_service), 984 ULP_PRSR_ACT_MASK_IGNORE); 985 986 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length); 987 ulp_rte_prsr_fld_mask(params, &idx, size, 988 ulp_deference_struct(ipv4_spec, hdr.total_length), 989 ulp_deference_struct(ipv4_mask, hdr.total_length), 990 ULP_PRSR_ACT_DEFAULT); 991 992 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id); 993 ulp_rte_prsr_fld_mask(params, &idx, size, 994 ulp_deference_struct(ipv4_spec, hdr.packet_id), 995 ulp_deference_struct(ipv4_mask, hdr.packet_id), 996 ULP_PRSR_ACT_DEFAULT); 997 998 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset); 999 ulp_rte_prsr_fld_mask(params, &idx, size, 1000 ulp_deference_struct(ipv4_spec, 1001 hdr.fragment_offset), 1002 ulp_deference_struct(ipv4_mask, 1003 hdr.fragment_offset), 1004 ULP_PRSR_ACT_DEFAULT); 1005 1006 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live); 1007 ulp_rte_prsr_fld_mask(params, &idx, size, 1008 ulp_deference_struct(ipv4_spec, hdr.time_to_live), 1009 ulp_deference_struct(ipv4_mask, hdr.time_to_live), 1010 ULP_PRSR_ACT_DEFAULT); 1011 1012 /* Ignore proto for matching templates */ 1013 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id); 1014 ulp_rte_prsr_fld_mask(params, &idx, size, 1015 ulp_deference_struct(ipv4_spec, 1016 hdr.next_proto_id), 1017 ulp_deference_struct(ipv4_mask, 1018 hdr.next_proto_id), 1019 ULP_PRSR_ACT_MATCH_IGNORE); 1020 if (ipv4_spec) 1021 proto = ipv4_spec->hdr.next_proto_id; 1022 1023 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum); 1024 ulp_rte_prsr_fld_mask(params, &idx, size, 1025 ulp_deference_struct(ipv4_spec, hdr.hdr_checksum), 1026 ulp_deference_struct(ipv4_mask, hdr.hdr_checksum), 1027 ULP_PRSR_ACT_DEFAULT); 1028 1029 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr); 1030 ulp_rte_prsr_fld_mask(params, &idx, size, 1031 ulp_deference_struct(ipv4_spec, hdr.src_addr), 1032 ulp_deference_struct(ipv4_mask, hdr.src_addr), 1033 ULP_PRSR_ACT_DEFAULT); 1034 1035 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr); 1036 ulp_rte_prsr_fld_mask(params, &idx, size, 1037 ulp_deference_struct(ipv4_spec, hdr.dst_addr), 1038 ulp_deference_struct(ipv4_mask, hdr.dst_addr), 1039 ULP_PRSR_ACT_DEFAULT); 1040 1041 /* Set the ipv4 header bitmap and computed l3 header bitmaps */ 1042 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 1043 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { 1044 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4); 1045 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); 1046 inner_flag = 1; 1047 } else { 1048 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4); 1049 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); 1050 } 1051 1052 /* Some of the PMD applications may set the protocol field 1053 * in the IPv4 spec but don't set the mask. So, consider 1054 * the mask in the proto value calculation. 1055 */ 1056 if (ipv4_mask) 1057 proto &= ipv4_mask->hdr.next_proto_id; 1058 1059 /* Update the field protocol hdr bitmap */ 1060 ulp_rte_l3_proto_type_update(params, proto, inner_flag); 1061 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); 1062 return BNXT_TF_RC_SUCCESS; 1063 } 1064 1065 /* Function to handle the parsing of RTE Flow item IPV6 Header */ 1066 int32_t 1067 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, 1068 struct ulp_rte_parser_params *params) 1069 { 1070 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec; 1071 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask; 1072 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1073 uint32_t idx = 0; 1074 uint32_t size; 1075 uint32_t ver_spec = 0, ver_mask = 0; 1076 uint32_t tc_spec = 0, tc_mask = 0; 1077 uint32_t lab_spec = 0, lab_mask = 0; 1078 uint8_t proto = 0; 1079 uint32_t inner_flag = 0; 1080 uint32_t cnt; 1081 1082 /* validate there are no 3rd L3 header */ 1083 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); 1084 if (cnt == 2) { 1085 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n"); 1086 return BNXT_TF_RC_ERROR; 1087 } 1088 1089 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 1090 BNXT_ULP_HDR_BIT_O_ETH) && 1091 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 1092 BNXT_ULP_HDR_BIT_I_ETH)) { 1093 /* Since F2 flow does not include eth item, when parser detects 1094 * IPv4/IPv6 item list and it belongs to the outer header; i.e., 1095 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set, 1096 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index. 1097 * This will allow the parser post processor to update the 1098 * t_dmac in hdr_field[o_eth.dmac] 1099 */ 1100 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM + 1101 BNXT_ULP_PROTO_HDR_VLAN_NUM); 1102 params->field_idx = idx; 1103 } 1104 1105 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1106 BNXT_ULP_PROTO_HDR_IPV6_NUM)) { 1107 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1108 return BNXT_TF_RC_ERROR; 1109 } 1110 1111 /* 1112 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6 1113 * header fields 1114 */ 1115 if (ipv6_spec) { 1116 ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow); 1117 tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow); 1118 lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow); 1119 proto = ipv6_spec->hdr.proto; 1120 } 1121 1122 if (ipv6_mask) { 1123 ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow); 1124 tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow); 1125 lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow); 1126 1127 /* Some of the PMD applications may set the protocol field 1128 * in the IPv6 spec but don't set the mask. So, consider 1129 * the mask in proto value calculation. 1130 */ 1131 proto &= ipv6_mask->hdr.proto; 1132 } 1133 1134 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow); 1135 ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask, 1136 ULP_PRSR_ACT_DEFAULT); 1137 /* 1138 * The TC and flow label field are ignored since OVS is setting 1139 * it for match and it is not supported. 1140 * This is a work around and 1141 * shall be addressed in the future. 1142 */ 1143 ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask, 1144 ULP_PRSR_ACT_MASK_IGNORE); 1145 ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask, 1146 ULP_PRSR_ACT_MASK_IGNORE); 1147 1148 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len); 1149 ulp_rte_prsr_fld_mask(params, &idx, size, 1150 ulp_deference_struct(ipv6_spec, hdr.payload_len), 1151 ulp_deference_struct(ipv6_mask, hdr.payload_len), 1152 ULP_PRSR_ACT_DEFAULT); 1153 1154 /* Ignore proto for template matching */ 1155 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto); 1156 ulp_rte_prsr_fld_mask(params, &idx, size, 1157 ulp_deference_struct(ipv6_spec, hdr.proto), 1158 ulp_deference_struct(ipv6_mask, hdr.proto), 1159 ULP_PRSR_ACT_MATCH_IGNORE); 1160 1161 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits); 1162 ulp_rte_prsr_fld_mask(params, &idx, size, 1163 ulp_deference_struct(ipv6_spec, hdr.hop_limits), 1164 ulp_deference_struct(ipv6_mask, hdr.hop_limits), 1165 ULP_PRSR_ACT_DEFAULT); 1166 1167 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr); 1168 ulp_rte_prsr_fld_mask(params, &idx, size, 1169 ulp_deference_struct(ipv6_spec, hdr.src_addr), 1170 ulp_deference_struct(ipv6_mask, hdr.src_addr), 1171 ULP_PRSR_ACT_DEFAULT); 1172 1173 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr); 1174 ulp_rte_prsr_fld_mask(params, &idx, size, 1175 ulp_deference_struct(ipv6_spec, hdr.dst_addr), 1176 ulp_deference_struct(ipv6_mask, hdr.dst_addr), 1177 ULP_PRSR_ACT_DEFAULT); 1178 1179 /* Set the ipv6 header bitmap and computed l3 header bitmaps */ 1180 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 1181 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { 1182 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6); 1183 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); 1184 inner_flag = 1; 1185 } else { 1186 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6); 1187 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); 1188 } 1189 1190 /* Update the field protocol hdr bitmap */ 1191 ulp_rte_l3_proto_type_update(params, proto, inner_flag); 1192 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); 1193 1194 return BNXT_TF_RC_SUCCESS; 1195 } 1196 1197 /* Function to handle the update of proto header based on field values */ 1198 static void 1199 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param, 1200 uint16_t dst_port) 1201 { 1202 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) 1203 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 1204 BNXT_ULP_HDR_BIT_T_VXLAN); 1205 1206 if (ULP_BITMAP_ISSET(param->hdr_bitmap.bits, 1207 BNXT_ULP_HDR_BIT_T_VXLAN) || 1208 ULP_BITMAP_ISSET(param->hdr_bitmap.bits, 1209 BNXT_ULP_HDR_BIT_T_GRE)) 1210 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1); 1211 } 1212 1213 /* Function to handle the parsing of RTE Flow item UDP Header. */ 1214 int32_t 1215 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, 1216 struct ulp_rte_parser_params *params) 1217 { 1218 const struct rte_flow_item_udp *udp_spec = item->spec; 1219 const struct rte_flow_item_udp *udp_mask = item->mask; 1220 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1221 uint32_t idx = 0; 1222 uint32_t size; 1223 uint16_t dport = 0, sport = 0; 1224 uint32_t cnt; 1225 1226 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 1227 if (cnt == 2) { 1228 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 1229 return BNXT_TF_RC_ERROR; 1230 } 1231 1232 if (udp_spec) { 1233 sport = udp_spec->hdr.src_port; 1234 dport = udp_spec->hdr.dst_port; 1235 } 1236 1237 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1238 BNXT_ULP_PROTO_HDR_UDP_NUM)) { 1239 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1240 return BNXT_TF_RC_ERROR; 1241 } 1242 1243 /* 1244 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1245 * header fields 1246 */ 1247 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port); 1248 ulp_rte_prsr_fld_mask(params, &idx, size, 1249 ulp_deference_struct(udp_spec, hdr.src_port), 1250 ulp_deference_struct(udp_mask, hdr.src_port), 1251 ULP_PRSR_ACT_DEFAULT); 1252 1253 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port); 1254 ulp_rte_prsr_fld_mask(params, &idx, size, 1255 ulp_deference_struct(udp_spec, hdr.dst_port), 1256 ulp_deference_struct(udp_mask, hdr.dst_port), 1257 ULP_PRSR_ACT_DEFAULT); 1258 1259 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len); 1260 ulp_rte_prsr_fld_mask(params, &idx, size, 1261 ulp_deference_struct(udp_spec, hdr.dgram_len), 1262 ulp_deference_struct(udp_mask, hdr.dgram_len), 1263 ULP_PRSR_ACT_DEFAULT); 1264 1265 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum); 1266 ulp_rte_prsr_fld_mask(params, &idx, size, 1267 ulp_deference_struct(udp_spec, hdr.dgram_cksum), 1268 ulp_deference_struct(udp_mask, hdr.dgram_cksum), 1269 ULP_PRSR_ACT_DEFAULT); 1270 1271 /* Set the udp header bitmap and computed l4 header bitmaps */ 1272 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 1273 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) { 1274 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP); 1275 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1); 1276 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT, 1277 (uint32_t)rte_be_to_cpu_16(sport)); 1278 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT, 1279 (uint32_t)rte_be_to_cpu_16(dport)); 1280 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, 1281 1); 1282 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID, 1283 IPPROTO_UDP); 1284 if (udp_mask && udp_mask->hdr.src_port) 1285 ULP_COMP_FLD_IDX_WR(params, 1286 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT, 1287 1); 1288 if (udp_mask && udp_mask->hdr.dst_port) 1289 ULP_COMP_FLD_IDX_WR(params, 1290 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT, 1291 1); 1292 } else { 1293 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP); 1294 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1); 1295 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT, 1296 (uint32_t)rte_be_to_cpu_16(sport)); 1297 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT, 1298 (uint32_t)rte_be_to_cpu_16(dport)); 1299 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, 1300 1); 1301 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID, 1302 IPPROTO_UDP); 1303 if (udp_mask && udp_mask->hdr.src_port) 1304 ULP_COMP_FLD_IDX_WR(params, 1305 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT, 1306 1); 1307 if (udp_mask && udp_mask->hdr.dst_port) 1308 ULP_COMP_FLD_IDX_WR(params, 1309 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT, 1310 1); 1311 1312 /* Update the field protocol hdr bitmap */ 1313 ulp_rte_l4_proto_type_update(params, dport); 1314 } 1315 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); 1316 return BNXT_TF_RC_SUCCESS; 1317 } 1318 1319 /* Function to handle the parsing of RTE Flow item TCP Header. */ 1320 int32_t 1321 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, 1322 struct ulp_rte_parser_params *params) 1323 { 1324 const struct rte_flow_item_tcp *tcp_spec = item->spec; 1325 const struct rte_flow_item_tcp *tcp_mask = item->mask; 1326 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1327 uint32_t idx = 0; 1328 uint16_t dport = 0, sport = 0; 1329 uint32_t size; 1330 uint32_t cnt; 1331 1332 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 1333 if (cnt == 2) { 1334 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 1335 return BNXT_TF_RC_ERROR; 1336 } 1337 1338 if (tcp_spec) { 1339 sport = tcp_spec->hdr.src_port; 1340 dport = tcp_spec->hdr.dst_port; 1341 } 1342 1343 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1344 BNXT_ULP_PROTO_HDR_TCP_NUM)) { 1345 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1346 return BNXT_TF_RC_ERROR; 1347 } 1348 1349 /* 1350 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1351 * header fields 1352 */ 1353 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port); 1354 ulp_rte_prsr_fld_mask(params, &idx, size, 1355 ulp_deference_struct(tcp_spec, hdr.src_port), 1356 ulp_deference_struct(tcp_mask, hdr.src_port), 1357 ULP_PRSR_ACT_DEFAULT); 1358 1359 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port); 1360 ulp_rte_prsr_fld_mask(params, &idx, size, 1361 ulp_deference_struct(tcp_spec, hdr.dst_port), 1362 ulp_deference_struct(tcp_mask, hdr.dst_port), 1363 ULP_PRSR_ACT_DEFAULT); 1364 1365 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq); 1366 ulp_rte_prsr_fld_mask(params, &idx, size, 1367 ulp_deference_struct(tcp_spec, hdr.sent_seq), 1368 ulp_deference_struct(tcp_mask, hdr.sent_seq), 1369 ULP_PRSR_ACT_DEFAULT); 1370 1371 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack); 1372 ulp_rte_prsr_fld_mask(params, &idx, size, 1373 ulp_deference_struct(tcp_spec, hdr.recv_ack), 1374 ulp_deference_struct(tcp_mask, hdr.recv_ack), 1375 ULP_PRSR_ACT_DEFAULT); 1376 1377 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off); 1378 ulp_rte_prsr_fld_mask(params, &idx, size, 1379 ulp_deference_struct(tcp_spec, hdr.data_off), 1380 ulp_deference_struct(tcp_mask, hdr.data_off), 1381 ULP_PRSR_ACT_DEFAULT); 1382 1383 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags); 1384 ulp_rte_prsr_fld_mask(params, &idx, size, 1385 ulp_deference_struct(tcp_spec, hdr.tcp_flags), 1386 ulp_deference_struct(tcp_mask, hdr.tcp_flags), 1387 ULP_PRSR_ACT_DEFAULT); 1388 1389 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win); 1390 ulp_rte_prsr_fld_mask(params, &idx, size, 1391 ulp_deference_struct(tcp_spec, hdr.rx_win), 1392 ulp_deference_struct(tcp_mask, hdr.rx_win), 1393 ULP_PRSR_ACT_DEFAULT); 1394 1395 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum); 1396 ulp_rte_prsr_fld_mask(params, &idx, size, 1397 ulp_deference_struct(tcp_spec, hdr.cksum), 1398 ulp_deference_struct(tcp_mask, hdr.cksum), 1399 ULP_PRSR_ACT_DEFAULT); 1400 1401 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp); 1402 ulp_rte_prsr_fld_mask(params, &idx, size, 1403 ulp_deference_struct(tcp_spec, hdr.tcp_urp), 1404 ulp_deference_struct(tcp_mask, hdr.tcp_urp), 1405 ULP_PRSR_ACT_DEFAULT); 1406 1407 /* Set the udp header bitmap and computed l4 header bitmaps */ 1408 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 1409 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) { 1410 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP); 1411 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1); 1412 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT, 1413 (uint32_t)rte_be_to_cpu_16(sport)); 1414 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT, 1415 (uint32_t)rte_be_to_cpu_16(dport)); 1416 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, 1417 1); 1418 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID, 1419 IPPROTO_TCP); 1420 if (tcp_mask && tcp_mask->hdr.src_port) 1421 ULP_COMP_FLD_IDX_WR(params, 1422 BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT, 1423 1); 1424 if (tcp_mask && tcp_mask->hdr.dst_port) 1425 ULP_COMP_FLD_IDX_WR(params, 1426 BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT, 1427 1); 1428 } else { 1429 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP); 1430 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1); 1431 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT, 1432 (uint32_t)rte_be_to_cpu_16(sport)); 1433 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT, 1434 (uint32_t)rte_be_to_cpu_16(dport)); 1435 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, 1436 1); 1437 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID, 1438 IPPROTO_TCP); 1439 if (tcp_mask && tcp_mask->hdr.src_port) 1440 ULP_COMP_FLD_IDX_WR(params, 1441 BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT, 1442 1); 1443 if (tcp_mask && tcp_mask->hdr.dst_port) 1444 ULP_COMP_FLD_IDX_WR(params, 1445 BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT, 1446 1); 1447 } 1448 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); 1449 return BNXT_TF_RC_SUCCESS; 1450 } 1451 1452 /* Function to handle the parsing of RTE Flow item Vxlan Header. */ 1453 int32_t 1454 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item, 1455 struct ulp_rte_parser_params *params) 1456 { 1457 const struct rte_flow_item_vxlan *vxlan_spec = item->spec; 1458 const struct rte_flow_item_vxlan *vxlan_mask = item->mask; 1459 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1460 uint32_t idx = 0; 1461 uint32_t size; 1462 1463 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1464 BNXT_ULP_PROTO_HDR_VXLAN_NUM)) { 1465 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1466 return BNXT_TF_RC_ERROR; 1467 } 1468 1469 /* 1470 * Copy the rte_flow_item for vxlan into hdr_field using vxlan 1471 * header fields 1472 */ 1473 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags); 1474 ulp_rte_prsr_fld_mask(params, &idx, size, 1475 ulp_deference_struct(vxlan_spec, flags), 1476 ulp_deference_struct(vxlan_mask, flags), 1477 ULP_PRSR_ACT_DEFAULT); 1478 1479 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0); 1480 ulp_rte_prsr_fld_mask(params, &idx, size, 1481 ulp_deference_struct(vxlan_spec, rsvd0), 1482 ulp_deference_struct(vxlan_mask, rsvd0), 1483 ULP_PRSR_ACT_DEFAULT); 1484 1485 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni); 1486 ulp_rte_prsr_fld_mask(params, &idx, size, 1487 ulp_deference_struct(vxlan_spec, vni), 1488 ulp_deference_struct(vxlan_mask, vni), 1489 ULP_PRSR_ACT_DEFAULT); 1490 1491 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1); 1492 ulp_rte_prsr_fld_mask(params, &idx, size, 1493 ulp_deference_struct(vxlan_spec, rsvd1), 1494 ulp_deference_struct(vxlan_mask, rsvd1), 1495 ULP_PRSR_ACT_DEFAULT); 1496 1497 /* Update the hdr_bitmap with vxlan */ 1498 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN); 1499 ulp_rte_l4_proto_type_update(params, 0); 1500 return BNXT_TF_RC_SUCCESS; 1501 } 1502 1503 /* Function to handle the parsing of RTE Flow item GRE Header. */ 1504 int32_t 1505 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item, 1506 struct ulp_rte_parser_params *params) 1507 { 1508 const struct rte_flow_item_gre *gre_spec = item->spec; 1509 const struct rte_flow_item_gre *gre_mask = item->mask; 1510 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1511 uint32_t idx = 0; 1512 uint32_t size; 1513 1514 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1515 BNXT_ULP_PROTO_HDR_GRE_NUM)) { 1516 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1517 return BNXT_TF_RC_ERROR; 1518 } 1519 1520 size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver); 1521 ulp_rte_prsr_fld_mask(params, &idx, size, 1522 ulp_deference_struct(gre_spec, c_rsvd0_ver), 1523 ulp_deference_struct(gre_mask, c_rsvd0_ver), 1524 ULP_PRSR_ACT_DEFAULT); 1525 1526 size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol); 1527 ulp_rte_prsr_fld_mask(params, &idx, size, 1528 ulp_deference_struct(gre_spec, protocol), 1529 ulp_deference_struct(gre_mask, protocol), 1530 ULP_PRSR_ACT_DEFAULT); 1531 1532 /* Update the hdr_bitmap with GRE */ 1533 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE); 1534 ulp_rte_l4_proto_type_update(params, 0); 1535 return BNXT_TF_RC_SUCCESS; 1536 } 1537 1538 /* Function to handle the parsing of RTE Flow item ANY. */ 1539 int32_t 1540 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused, 1541 struct ulp_rte_parser_params *params __rte_unused) 1542 { 1543 return BNXT_TF_RC_SUCCESS; 1544 } 1545 1546 /* Function to handle the parsing of RTE Flow item ICMP Header. */ 1547 int32_t 1548 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item, 1549 struct ulp_rte_parser_params *params) 1550 { 1551 const struct rte_flow_item_icmp *icmp_spec = item->spec; 1552 const struct rte_flow_item_icmp *icmp_mask = item->mask; 1553 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1554 uint32_t idx = 0; 1555 uint32_t size; 1556 1557 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1558 BNXT_ULP_PROTO_HDR_ICMP_NUM)) { 1559 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1560 return BNXT_TF_RC_ERROR; 1561 } 1562 1563 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type); 1564 ulp_rte_prsr_fld_mask(params, &idx, size, 1565 ulp_deference_struct(icmp_spec, hdr.icmp_type), 1566 ulp_deference_struct(icmp_mask, hdr.icmp_type), 1567 ULP_PRSR_ACT_DEFAULT); 1568 1569 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code); 1570 ulp_rte_prsr_fld_mask(params, &idx, size, 1571 ulp_deference_struct(icmp_spec, hdr.icmp_code), 1572 ulp_deference_struct(icmp_mask, hdr.icmp_code), 1573 ULP_PRSR_ACT_DEFAULT); 1574 1575 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum); 1576 ulp_rte_prsr_fld_mask(params, &idx, size, 1577 ulp_deference_struct(icmp_spec, hdr.icmp_cksum), 1578 ulp_deference_struct(icmp_mask, hdr.icmp_cksum), 1579 ULP_PRSR_ACT_DEFAULT); 1580 1581 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident); 1582 ulp_rte_prsr_fld_mask(params, &idx, size, 1583 ulp_deference_struct(icmp_spec, hdr.icmp_ident), 1584 ulp_deference_struct(icmp_mask, hdr.icmp_ident), 1585 ULP_PRSR_ACT_DEFAULT); 1586 1587 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb); 1588 ulp_rte_prsr_fld_mask(params, &idx, size, 1589 ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb), 1590 ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb), 1591 ULP_PRSR_ACT_DEFAULT); 1592 1593 /* Update the hdr_bitmap with ICMP */ 1594 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) 1595 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP); 1596 else 1597 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP); 1598 return BNXT_TF_RC_SUCCESS; 1599 } 1600 1601 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */ 1602 int32_t 1603 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item, 1604 struct ulp_rte_parser_params *params) 1605 { 1606 const struct rte_flow_item_icmp6 *icmp_spec = item->spec; 1607 const struct rte_flow_item_icmp6 *icmp_mask = item->mask; 1608 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1609 uint32_t idx = 0; 1610 uint32_t size; 1611 1612 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1613 BNXT_ULP_PROTO_HDR_ICMP_NUM)) { 1614 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1615 return BNXT_TF_RC_ERROR; 1616 } 1617 1618 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type); 1619 ulp_rte_prsr_fld_mask(params, &idx, size, 1620 ulp_deference_struct(icmp_spec, type), 1621 ulp_deference_struct(icmp_mask, type), 1622 ULP_PRSR_ACT_DEFAULT); 1623 1624 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code); 1625 ulp_rte_prsr_fld_mask(params, &idx, size, 1626 ulp_deference_struct(icmp_spec, code), 1627 ulp_deference_struct(icmp_mask, code), 1628 ULP_PRSR_ACT_DEFAULT); 1629 1630 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum); 1631 ulp_rte_prsr_fld_mask(params, &idx, size, 1632 ulp_deference_struct(icmp_spec, checksum), 1633 ulp_deference_struct(icmp_mask, checksum), 1634 ULP_PRSR_ACT_DEFAULT); 1635 1636 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) { 1637 BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n"); 1638 return BNXT_TF_RC_ERROR; 1639 } 1640 1641 /* Update the hdr_bitmap with ICMP */ 1642 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) 1643 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP); 1644 else 1645 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP); 1646 return BNXT_TF_RC_SUCCESS; 1647 } 1648 1649 /* Function to handle the parsing of RTE Flow item void Header */ 1650 int32_t 1651 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused, 1652 struct ulp_rte_parser_params *params __rte_unused) 1653 { 1654 return BNXT_TF_RC_SUCCESS; 1655 } 1656 1657 /* Function to handle the parsing of RTE Flow action void Header. */ 1658 int32_t 1659 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused, 1660 struct ulp_rte_parser_params *params __rte_unused) 1661 { 1662 return BNXT_TF_RC_SUCCESS; 1663 } 1664 1665 /* Function to handle the parsing of RTE Flow action Mark Header. */ 1666 int32_t 1667 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item, 1668 struct ulp_rte_parser_params *param) 1669 { 1670 const struct rte_flow_action_mark *mark; 1671 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap; 1672 uint32_t mark_id; 1673 1674 mark = action_item->conf; 1675 if (mark) { 1676 mark_id = tfp_cpu_to_be_32(mark->id); 1677 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK], 1678 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK); 1679 1680 /* Update the hdr_bitmap with vxlan */ 1681 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK); 1682 return BNXT_TF_RC_SUCCESS; 1683 } 1684 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n"); 1685 return BNXT_TF_RC_ERROR; 1686 } 1687 1688 /* Function to handle the parsing of RTE Flow action RSS Header. */ 1689 int32_t 1690 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item, 1691 struct ulp_rte_parser_params *param) 1692 { 1693 const struct rte_flow_action_rss *rss; 1694 struct ulp_rte_act_prop *ap = ¶m->act_prop; 1695 1696 if (action_item == NULL || action_item->conf == NULL) { 1697 BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n"); 1698 return BNXT_TF_RC_ERROR; 1699 } 1700 1701 rss = action_item->conf; 1702 /* Copy the rss into the specific action properties */ 1703 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types, 1704 BNXT_ULP_ACT_PROP_SZ_RSS_TYPES); 1705 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level, 1706 BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL); 1707 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN], 1708 &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN); 1709 1710 if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) { 1711 BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n"); 1712 return BNXT_TF_RC_ERROR; 1713 } 1714 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key, 1715 rss->key_len); 1716 1717 /* set the RSS action header bit */ 1718 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS); 1719 1720 return BNXT_TF_RC_SUCCESS; 1721 } 1722 1723 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */ 1724 int32_t 1725 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, 1726 struct ulp_rte_parser_params *params) 1727 { 1728 const struct rte_flow_action_vxlan_encap *vxlan_encap; 1729 const struct rte_flow_item *item; 1730 const struct rte_flow_item_eth *eth_spec; 1731 const struct rte_flow_item_ipv4 *ipv4_spec; 1732 const struct rte_flow_item_ipv6 *ipv6_spec; 1733 struct rte_flow_item_vxlan vxlan_spec; 1734 uint32_t vlan_num = 0, vlan_size = 0; 1735 uint32_t ip_size = 0, ip_type = 0; 1736 uint32_t vxlan_size = 0; 1737 uint8_t *buff; 1738 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */ 1739 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00, 1740 0x00, 0x40, 0x11}; 1741 /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */ 1742 const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00, 1743 0x00, 0x11, 0xf6}; 1744 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap; 1745 struct ulp_rte_act_prop *ap = ¶ms->act_prop; 1746 const uint8_t *tmp_buff; 1747 1748 vxlan_encap = action_item->conf; 1749 if (!vxlan_encap) { 1750 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n"); 1751 return BNXT_TF_RC_ERROR; 1752 } 1753 1754 item = vxlan_encap->definition; 1755 if (!item) { 1756 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n"); 1757 return BNXT_TF_RC_ERROR; 1758 } 1759 1760 if (!ulp_rte_item_skip_void(&item, 0)) 1761 return BNXT_TF_RC_ERROR; 1762 1763 /* must have ethernet header */ 1764 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { 1765 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n"); 1766 return BNXT_TF_RC_ERROR; 1767 } 1768 eth_spec = item->spec; 1769 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC]; 1770 ulp_encap_buffer_copy(buff, 1771 eth_spec->dst.addr_bytes, 1772 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC, 1773 ULP_BUFFER_ALIGN_8_BYTE); 1774 1775 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC]; 1776 ulp_encap_buffer_copy(buff, 1777 eth_spec->src.addr_bytes, 1778 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC, 1779 ULP_BUFFER_ALIGN_8_BYTE); 1780 1781 /* Goto the next item */ 1782 if (!ulp_rte_item_skip_void(&item, 1)) 1783 return BNXT_TF_RC_ERROR; 1784 1785 /* May have vlan header */ 1786 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 1787 vlan_num++; 1788 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG]; 1789 ulp_encap_buffer_copy(buff, 1790 item->spec, 1791 sizeof(struct rte_flow_item_vlan), 1792 ULP_BUFFER_ALIGN_8_BYTE); 1793 1794 if (!ulp_rte_item_skip_void(&item, 1)) 1795 return BNXT_TF_RC_ERROR; 1796 } 1797 1798 /* may have two vlan headers */ 1799 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 1800 vlan_num++; 1801 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG + 1802 sizeof(struct rte_flow_item_vlan)], 1803 item->spec, 1804 sizeof(struct rte_flow_item_vlan)); 1805 if (!ulp_rte_item_skip_void(&item, 1)) 1806 return BNXT_TF_RC_ERROR; 1807 } 1808 /* Update the vlan count and size of more than one */ 1809 if (vlan_num) { 1810 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan); 1811 vlan_num = tfp_cpu_to_be_32(vlan_num); 1812 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM], 1813 &vlan_num, 1814 sizeof(uint32_t)); 1815 vlan_size = tfp_cpu_to_be_32(vlan_size); 1816 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ], 1817 &vlan_size, 1818 sizeof(uint32_t)); 1819 } 1820 1821 /* L3 must be IPv4, IPv6 */ 1822 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { 1823 ipv4_spec = item->spec; 1824 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE; 1825 1826 /* copy the ipv4 details */ 1827 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl, 1828 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) { 1829 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; 1830 ulp_encap_buffer_copy(buff, 1831 def_ipv4_hdr, 1832 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS + 1833 BNXT_ULP_ENCAP_IPV4_ID_PROTO, 1834 ULP_BUFFER_ALIGN_8_BYTE); 1835 } else { 1836 /* Total length being ignored in the ip hdr. */ 1837 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; 1838 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id; 1839 ulp_encap_buffer_copy(buff, 1840 tmp_buff, 1841 BNXT_ULP_ENCAP_IPV4_ID_PROTO, 1842 ULP_BUFFER_ALIGN_8_BYTE); 1843 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + 1844 BNXT_ULP_ENCAP_IPV4_ID_PROTO]; 1845 ulp_encap_buffer_copy(buff, 1846 &ipv4_spec->hdr.version_ihl, 1847 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS, 1848 ULP_BUFFER_ALIGN_8_BYTE); 1849 } 1850 1851 /* Update the dst ip address in ip encap buffer */ 1852 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + 1853 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS + 1854 BNXT_ULP_ENCAP_IPV4_ID_PROTO]; 1855 ulp_encap_buffer_copy(buff, 1856 (const uint8_t *)&ipv4_spec->hdr.dst_addr, 1857 sizeof(ipv4_spec->hdr.dst_addr), 1858 ULP_BUFFER_ALIGN_8_BYTE); 1859 1860 /* Update the src ip address */ 1861 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC + 1862 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC - 1863 sizeof(ipv4_spec->hdr.src_addr)]; 1864 ulp_encap_buffer_copy(buff, 1865 (const uint8_t *)&ipv4_spec->hdr.src_addr, 1866 sizeof(ipv4_spec->hdr.src_addr), 1867 ULP_BUFFER_ALIGN_8_BYTE); 1868 1869 /* Update the ip size details */ 1870 ip_size = tfp_cpu_to_be_32(ip_size); 1871 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 1872 &ip_size, sizeof(uint32_t)); 1873 1874 /* update the ip type */ 1875 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4); 1876 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 1877 &ip_type, sizeof(uint32_t)); 1878 1879 /* update the computed field to notify it is ipv4 header */ 1880 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG, 1881 1); 1882 1883 if (!ulp_rte_item_skip_void(&item, 1)) 1884 return BNXT_TF_RC_ERROR; 1885 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { 1886 ipv6_spec = item->spec; 1887 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE; 1888 1889 /* copy the ipv6 details */ 1890 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow; 1891 if (ulp_buffer_is_empty(tmp_buff, 1892 BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) { 1893 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; 1894 ulp_encap_buffer_copy(buff, 1895 def_ipv6_hdr, 1896 sizeof(def_ipv6_hdr), 1897 ULP_BUFFER_ALIGN_8_BYTE); 1898 } else { 1899 /* The payload length being ignored in the ip hdr. */ 1900 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; 1901 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto; 1902 ulp_encap_buffer_copy(buff, 1903 tmp_buff, 1904 BNXT_ULP_ENCAP_IPV6_PROTO_TTL, 1905 ULP_BUFFER_ALIGN_8_BYTE); 1906 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + 1907 BNXT_ULP_ENCAP_IPV6_PROTO_TTL + 1908 BNXT_ULP_ENCAP_IPV6_DO]; 1909 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow; 1910 ulp_encap_buffer_copy(buff, 1911 tmp_buff, 1912 BNXT_ULP_ENCAP_IPV6_VTC_FLOW, 1913 ULP_BUFFER_ALIGN_8_BYTE); 1914 } 1915 /* Update the dst ip address in ip encap buffer */ 1916 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + 1917 sizeof(def_ipv6_hdr)]; 1918 ulp_encap_buffer_copy(buff, 1919 (const uint8_t *)ipv6_spec->hdr.dst_addr, 1920 sizeof(ipv6_spec->hdr.dst_addr), 1921 ULP_BUFFER_ALIGN_8_BYTE); 1922 1923 /* Update the src ip address */ 1924 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC]; 1925 ulp_encap_buffer_copy(buff, 1926 (const uint8_t *)ipv6_spec->hdr.src_addr, 1927 sizeof(ipv6_spec->hdr.src_addr), 1928 ULP_BUFFER_ALIGN_16_BYTE); 1929 1930 /* Update the ip size details */ 1931 ip_size = tfp_cpu_to_be_32(ip_size); 1932 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 1933 &ip_size, sizeof(uint32_t)); 1934 1935 /* update the ip type */ 1936 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6); 1937 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 1938 &ip_type, sizeof(uint32_t)); 1939 1940 /* update the computed field to notify it is ipv6 header */ 1941 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG, 1942 1); 1943 1944 if (!ulp_rte_item_skip_void(&item, 1)) 1945 return BNXT_TF_RC_ERROR; 1946 } else { 1947 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n"); 1948 return BNXT_TF_RC_ERROR; 1949 } 1950 1951 /* L4 is UDP */ 1952 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) { 1953 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n"); 1954 return BNXT_TF_RC_ERROR; 1955 } 1956 /* copy the udp details */ 1957 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP], 1958 item->spec, BNXT_ULP_ENCAP_UDP_SIZE, 1959 ULP_BUFFER_ALIGN_8_BYTE); 1960 1961 if (!ulp_rte_item_skip_void(&item, 1)) 1962 return BNXT_TF_RC_ERROR; 1963 1964 /* Finally VXLAN */ 1965 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { 1966 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n"); 1967 return BNXT_TF_RC_ERROR; 1968 } 1969 vxlan_size = sizeof(struct rte_flow_item_vxlan); 1970 /* copy the vxlan details */ 1971 memcpy(&vxlan_spec, item->spec, vxlan_size); 1972 vxlan_spec.flags = 0x08; 1973 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN]; 1974 if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) { 1975 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec, 1976 vxlan_size, ULP_BUFFER_ALIGN_8_BYTE); 1977 } else { 1978 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec, 1979 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE); 1980 ulp_encap_buffer_copy(buff + (vxlan_size / 2), 1981 (const uint8_t *)&vxlan_spec.vni, 1982 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE); 1983 } 1984 vxlan_size = tfp_cpu_to_be_32(vxlan_size); 1985 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ], 1986 &vxlan_size, sizeof(uint32_t)); 1987 1988 /* update the hdr_bitmap with vxlan */ 1989 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP); 1990 return BNXT_TF_RC_SUCCESS; 1991 } 1992 1993 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */ 1994 int32_t 1995 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item 1996 __rte_unused, 1997 struct ulp_rte_parser_params *params) 1998 { 1999 /* update the hdr_bitmap with vxlan */ 2000 ULP_BITMAP_SET(params->act_bitmap.bits, 2001 BNXT_ULP_ACT_BIT_VXLAN_DECAP); 2002 /* Update computational field with tunnel decap info */ 2003 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1); 2004 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 2005 return BNXT_TF_RC_SUCCESS; 2006 } 2007 2008 /* Function to handle the parsing of RTE Flow action drop Header. */ 2009 int32_t 2010 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused, 2011 struct ulp_rte_parser_params *params) 2012 { 2013 /* Update the hdr_bitmap with drop */ 2014 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP); 2015 return BNXT_TF_RC_SUCCESS; 2016 } 2017 2018 /* Function to handle the parsing of RTE Flow action count. */ 2019 int32_t 2020 ulp_rte_count_act_handler(const struct rte_flow_action *action_item, 2021 struct ulp_rte_parser_params *params) 2022 { 2023 const struct rte_flow_action_count *act_count; 2024 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop; 2025 2026 act_count = action_item->conf; 2027 if (act_count) { 2028 if (act_count->shared) { 2029 BNXT_TF_DBG(ERR, 2030 "Parse Error:Shared count not supported\n"); 2031 return BNXT_TF_RC_PARSE_ERR; 2032 } 2033 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT], 2034 &act_count->id, 2035 BNXT_ULP_ACT_PROP_SZ_COUNT); 2036 } 2037 2038 /* Update the hdr_bitmap with count */ 2039 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT); 2040 return BNXT_TF_RC_SUCCESS; 2041 } 2042 2043 /* Function to handle the parsing of action ports. */ 2044 static int32_t 2045 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param, 2046 uint32_t ifindex) 2047 { 2048 enum bnxt_ulp_direction_type dir; 2049 uint16_t pid_s; 2050 uint32_t pid; 2051 struct ulp_rte_act_prop *act = ¶m->act_prop; 2052 enum bnxt_ulp_intf_type port_type; 2053 uint32_t vnic_type; 2054 2055 /* Get the direction */ 2056 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION); 2057 if (dir == BNXT_ULP_DIR_EGRESS) { 2058 /* For egress direction, fill vport */ 2059 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s)) 2060 return BNXT_TF_RC_ERROR; 2061 2062 pid = pid_s; 2063 pid = rte_cpu_to_be_32(pid); 2064 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], 2065 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); 2066 } else { 2067 /* For ingress direction, fill vnic */ 2068 port_type = ULP_COMP_FLD_IDX_RD(param, 2069 BNXT_ULP_CF_IDX_ACT_PORT_TYPE); 2070 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP) 2071 vnic_type = BNXT_ULP_VF_FUNC_VNIC; 2072 else 2073 vnic_type = BNXT_ULP_DRV_FUNC_VNIC; 2074 2075 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex, 2076 vnic_type, &pid_s)) 2077 return BNXT_TF_RC_ERROR; 2078 2079 pid = pid_s; 2080 pid = rte_cpu_to_be_32(pid); 2081 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], 2082 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC); 2083 } 2084 2085 /* Update the action port set bit */ 2086 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1); 2087 return BNXT_TF_RC_SUCCESS; 2088 } 2089 2090 /* Function to handle the parsing of RTE Flow action PF. */ 2091 int32_t 2092 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused, 2093 struct ulp_rte_parser_params *params) 2094 { 2095 uint32_t port_id; 2096 uint32_t ifindex; 2097 enum bnxt_ulp_intf_type intf_type; 2098 2099 /* Get the port id of the current device */ 2100 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 2101 2102 /* Get the port db ifindex */ 2103 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id, 2104 &ifindex)) { 2105 BNXT_TF_DBG(ERR, "Invalid port id\n"); 2106 return BNXT_TF_RC_ERROR; 2107 } 2108 2109 /* Check the port is PF port */ 2110 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 2111 if (intf_type != BNXT_ULP_INTF_TYPE_PF) { 2112 BNXT_TF_DBG(ERR, "Port is not a PF port\n"); 2113 return BNXT_TF_RC_ERROR; 2114 } 2115 /* Update the action properties */ 2116 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2117 return ulp_rte_parser_act_port_set(params, ifindex); 2118 } 2119 2120 /* Function to handle the parsing of RTE Flow action VF. */ 2121 int32_t 2122 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, 2123 struct ulp_rte_parser_params *params) 2124 { 2125 const struct rte_flow_action_vf *vf_action; 2126 enum bnxt_ulp_intf_type intf_type; 2127 uint32_t ifindex; 2128 struct bnxt *bp; 2129 2130 vf_action = action_item->conf; 2131 if (!vf_action) { 2132 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n"); 2133 return BNXT_TF_RC_PARSE_ERR; 2134 } 2135 2136 if (vf_action->original) { 2137 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n"); 2138 return BNXT_TF_RC_PARSE_ERR; 2139 } 2140 2141 bp = bnxt_get_bp(params->port_id); 2142 if (bp == NULL) { 2143 BNXT_TF_DBG(ERR, "Invalid bp\n"); 2144 return BNXT_TF_RC_ERROR; 2145 } 2146 2147 /* vf_action->id is a logical number which in this case is an 2148 * offset from the first VF. So, to get the absolute VF id, the 2149 * offset must be added to the absolute first vf id of that port. 2150 */ 2151 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, 2152 bp->first_vf_id + 2153 vf_action->id, 2154 &ifindex)) { 2155 BNXT_TF_DBG(ERR, "VF is not valid interface\n"); 2156 return BNXT_TF_RC_ERROR; 2157 } 2158 /* Check the port is VF port */ 2159 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 2160 if (intf_type != BNXT_ULP_INTF_TYPE_VF && 2161 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) { 2162 BNXT_TF_DBG(ERR, "Port is not a VF port\n"); 2163 return BNXT_TF_RC_ERROR; 2164 } 2165 2166 /* Update the action properties */ 2167 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2168 return ulp_rte_parser_act_port_set(params, ifindex); 2169 } 2170 2171 /* Function to handle the parsing of RTE Flow action port_id. */ 2172 int32_t 2173 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item, 2174 struct ulp_rte_parser_params *param) 2175 { 2176 const struct rte_flow_action_port_id *port_id = act_item->conf; 2177 uint32_t ifindex; 2178 enum bnxt_ulp_intf_type intf_type; 2179 2180 if (!port_id) { 2181 BNXT_TF_DBG(ERR, 2182 "ParseErr: Invalid Argument\n"); 2183 return BNXT_TF_RC_PARSE_ERR; 2184 } 2185 if (port_id->original) { 2186 BNXT_TF_DBG(ERR, 2187 "ParseErr:Portid Original not supported\n"); 2188 return BNXT_TF_RC_PARSE_ERR; 2189 } 2190 2191 /* Get the port db ifindex */ 2192 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id, 2193 &ifindex)) { 2194 BNXT_TF_DBG(ERR, "Invalid port id\n"); 2195 return BNXT_TF_RC_ERROR; 2196 } 2197 2198 /* Get the intf type */ 2199 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex); 2200 if (!intf_type) { 2201 BNXT_TF_DBG(ERR, "Invalid port type\n"); 2202 return BNXT_TF_RC_ERROR; 2203 } 2204 2205 /* Set the action port */ 2206 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2207 return ulp_rte_parser_act_port_set(param, ifindex); 2208 } 2209 2210 /* Function to handle the parsing of RTE Flow action phy_port. */ 2211 int32_t 2212 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item, 2213 struct ulp_rte_parser_params *prm) 2214 { 2215 const struct rte_flow_action_phy_port *phy_port; 2216 uint32_t pid; 2217 int32_t rc; 2218 uint16_t pid_s; 2219 enum bnxt_ulp_direction_type dir; 2220 2221 phy_port = action_item->conf; 2222 if (!phy_port) { 2223 BNXT_TF_DBG(ERR, 2224 "ParseErr: Invalid Argument\n"); 2225 return BNXT_TF_RC_PARSE_ERR; 2226 } 2227 2228 if (phy_port->original) { 2229 BNXT_TF_DBG(ERR, 2230 "Parse Err:Port Original not supported\n"); 2231 return BNXT_TF_RC_PARSE_ERR; 2232 } 2233 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION); 2234 if (dir != BNXT_ULP_DIR_EGRESS) { 2235 BNXT_TF_DBG(ERR, 2236 "Parse Err:Phy ports are valid only for egress\n"); 2237 return BNXT_TF_RC_PARSE_ERR; 2238 } 2239 /* Get the physical port details from port db */ 2240 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index, 2241 &pid_s); 2242 if (rc) { 2243 BNXT_TF_DBG(ERR, "Failed to get port details\n"); 2244 return -EINVAL; 2245 } 2246 2247 pid = pid_s; 2248 pid = rte_cpu_to_be_32(pid); 2249 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], 2250 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); 2251 2252 /* Update the action port set bit */ 2253 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1); 2254 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, 2255 BNXT_ULP_INTF_TYPE_PHY_PORT); 2256 return BNXT_TF_RC_SUCCESS; 2257 } 2258 2259 /* Function to handle the parsing of RTE Flow action pop vlan. */ 2260 int32_t 2261 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused, 2262 struct ulp_rte_parser_params *params) 2263 { 2264 /* Update the act_bitmap with pop */ 2265 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN); 2266 return BNXT_TF_RC_SUCCESS; 2267 } 2268 2269 /* Function to handle the parsing of RTE Flow action push vlan. */ 2270 int32_t 2271 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item, 2272 struct ulp_rte_parser_params *params) 2273 { 2274 const struct rte_flow_action_of_push_vlan *push_vlan; 2275 uint16_t ethertype; 2276 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2277 2278 push_vlan = action_item->conf; 2279 if (push_vlan) { 2280 ethertype = push_vlan->ethertype; 2281 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) { 2282 BNXT_TF_DBG(ERR, 2283 "Parse Err: Ethertype not supported\n"); 2284 return BNXT_TF_RC_PARSE_ERR; 2285 } 2286 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN], 2287 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN); 2288 /* Update the hdr_bitmap with push vlan */ 2289 ULP_BITMAP_SET(params->act_bitmap.bits, 2290 BNXT_ULP_ACT_BIT_PUSH_VLAN); 2291 return BNXT_TF_RC_SUCCESS; 2292 } 2293 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n"); 2294 return BNXT_TF_RC_ERROR; 2295 } 2296 2297 /* Function to handle the parsing of RTE Flow action set vlan id. */ 2298 int32_t 2299 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item, 2300 struct ulp_rte_parser_params *params) 2301 { 2302 const struct rte_flow_action_of_set_vlan_vid *vlan_vid; 2303 uint32_t vid; 2304 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2305 2306 vlan_vid = action_item->conf; 2307 if (vlan_vid && vlan_vid->vlan_vid) { 2308 vid = vlan_vid->vlan_vid; 2309 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID], 2310 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID); 2311 /* Update the hdr_bitmap with vlan vid */ 2312 ULP_BITMAP_SET(params->act_bitmap.bits, 2313 BNXT_ULP_ACT_BIT_SET_VLAN_VID); 2314 return BNXT_TF_RC_SUCCESS; 2315 } 2316 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n"); 2317 return BNXT_TF_RC_ERROR; 2318 } 2319 2320 /* Function to handle the parsing of RTE Flow action set vlan pcp. */ 2321 int32_t 2322 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item, 2323 struct ulp_rte_parser_params *params) 2324 { 2325 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp; 2326 uint8_t pcp; 2327 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2328 2329 vlan_pcp = action_item->conf; 2330 if (vlan_pcp) { 2331 pcp = vlan_pcp->vlan_pcp; 2332 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP], 2333 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP); 2334 /* Update the hdr_bitmap with vlan vid */ 2335 ULP_BITMAP_SET(params->act_bitmap.bits, 2336 BNXT_ULP_ACT_BIT_SET_VLAN_PCP); 2337 return BNXT_TF_RC_SUCCESS; 2338 } 2339 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n"); 2340 return BNXT_TF_RC_ERROR; 2341 } 2342 2343 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/ 2344 int32_t 2345 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item, 2346 struct ulp_rte_parser_params *params) 2347 { 2348 const struct rte_flow_action_set_ipv4 *set_ipv4; 2349 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2350 2351 set_ipv4 = action_item->conf; 2352 if (set_ipv4) { 2353 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC], 2354 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC); 2355 /* Update the hdr_bitmap with set ipv4 src */ 2356 ULP_BITMAP_SET(params->act_bitmap.bits, 2357 BNXT_ULP_ACT_BIT_SET_IPV4_SRC); 2358 return BNXT_TF_RC_SUCCESS; 2359 } 2360 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n"); 2361 return BNXT_TF_RC_ERROR; 2362 } 2363 2364 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/ 2365 int32_t 2366 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item, 2367 struct ulp_rte_parser_params *params) 2368 { 2369 const struct rte_flow_action_set_ipv4 *set_ipv4; 2370 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2371 2372 set_ipv4 = action_item->conf; 2373 if (set_ipv4) { 2374 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST], 2375 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST); 2376 /* Update the hdr_bitmap with set ipv4 dst */ 2377 ULP_BITMAP_SET(params->act_bitmap.bits, 2378 BNXT_ULP_ACT_BIT_SET_IPV4_DST); 2379 return BNXT_TF_RC_SUCCESS; 2380 } 2381 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n"); 2382 return BNXT_TF_RC_ERROR; 2383 } 2384 2385 /* Function to handle the parsing of RTE Flow action set tp src.*/ 2386 int32_t 2387 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item, 2388 struct ulp_rte_parser_params *params) 2389 { 2390 const struct rte_flow_action_set_tp *set_tp; 2391 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2392 2393 set_tp = action_item->conf; 2394 if (set_tp) { 2395 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC], 2396 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC); 2397 /* Update the hdr_bitmap with set tp src */ 2398 ULP_BITMAP_SET(params->act_bitmap.bits, 2399 BNXT_ULP_ACT_BIT_SET_TP_SRC); 2400 return BNXT_TF_RC_SUCCESS; 2401 } 2402 2403 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n"); 2404 return BNXT_TF_RC_ERROR; 2405 } 2406 2407 /* Function to handle the parsing of RTE Flow action set tp dst.*/ 2408 int32_t 2409 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item, 2410 struct ulp_rte_parser_params *params) 2411 { 2412 const struct rte_flow_action_set_tp *set_tp; 2413 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2414 2415 set_tp = action_item->conf; 2416 if (set_tp) { 2417 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST], 2418 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST); 2419 /* Update the hdr_bitmap with set tp dst */ 2420 ULP_BITMAP_SET(params->act_bitmap.bits, 2421 BNXT_ULP_ACT_BIT_SET_TP_DST); 2422 return BNXT_TF_RC_SUCCESS; 2423 } 2424 2425 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n"); 2426 return BNXT_TF_RC_ERROR; 2427 } 2428 2429 /* Function to handle the parsing of RTE Flow action dec ttl.*/ 2430 int32_t 2431 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused, 2432 struct ulp_rte_parser_params *params) 2433 { 2434 /* Update the act_bitmap with dec ttl */ 2435 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL); 2436 return BNXT_TF_RC_SUCCESS; 2437 } 2438 2439 /* Function to handle the parsing of RTE Flow action JUMP */ 2440 int32_t 2441 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused, 2442 struct ulp_rte_parser_params *params) 2443 { 2444 /* Update the act_bitmap with dec ttl */ 2445 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP); 2446 return BNXT_TF_RC_SUCCESS; 2447 } 2448 2449 int32_t 2450 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item, 2451 struct ulp_rte_parser_params *params) 2452 { 2453 const struct rte_flow_action_sample *sample; 2454 int ret; 2455 2456 sample = action_item->conf; 2457 2458 /* if SAMPLE bit is set it means this sample action is nested within the 2459 * actions of another sample action; this is not allowed 2460 */ 2461 if (ULP_BITMAP_ISSET(params->act_bitmap.bits, 2462 BNXT_ULP_ACT_BIT_SAMPLE)) 2463 return BNXT_TF_RC_ERROR; 2464 2465 /* a sample action is only allowed as a shared action */ 2466 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits, 2467 BNXT_ULP_ACT_BIT_SHARED)) 2468 return BNXT_TF_RC_ERROR; 2469 2470 /* only a ratio of 1 i.e. 100% is supported */ 2471 if (sample->ratio != 1) 2472 return BNXT_TF_RC_ERROR; 2473 2474 if (!sample->actions) 2475 return BNXT_TF_RC_ERROR; 2476 2477 /* parse the nested actions for a sample action */ 2478 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params); 2479 if (ret == BNXT_TF_RC_SUCCESS) 2480 /* Update the act_bitmap with sample */ 2481 ULP_BITMAP_SET(params->act_bitmap.bits, 2482 BNXT_ULP_ACT_BIT_SAMPLE); 2483 2484 return ret; 2485 } 2486