1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2023 Broadcom 3 * All rights reserved. 4 */ 5 6 #include "bnxt.h" 7 #include "ulp_template_db_enum.h" 8 #include "ulp_template_struct.h" 9 #include "bnxt_ulp.h" 10 #include "bnxt_tf_common.h" 11 #include "bnxt_tf_pmd_shim.h" 12 #include "ulp_rte_parser.h" 13 #include "ulp_matcher.h" 14 #include "ulp_utils.h" 15 #include "tfp.h" 16 #include "ulp_port_db.h" 17 #include "ulp_flow_db.h" 18 #include "ulp_mapper.h" 19 #include "ulp_tun.h" 20 #include "ulp_template_db_tbl.h" 21 22 /* Local defines for the parsing functions */ 23 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */ 24 #define ULP_VLAN_PRIORITY_MASK 0x700 25 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/ 26 #define ULP_UDP_PORT_VXLAN 4789 27 #define ULP_UDP_PORT_VXLAN_MASK 0xFFFF 28 #define ULP_UDP_PORT_VXLAN_GPE 4790 29 #define ULP_UDP_PORT_VXLAN_GPE_MASK 0xFFFF 30 31 /* Utility function to skip the void items. */ 32 static inline int32_t 33 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment) 34 { 35 if (!*item) 36 return 0; 37 if (increment) 38 (*item)++; 39 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID) 40 (*item)++; 41 if (*item) 42 return 1; 43 return 0; 44 } 45 46 /* Utility function to copy field spec items */ 47 static struct ulp_rte_hdr_field * 48 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field, 49 const void *buffer, 50 uint32_t size) 51 { 52 field->size = size; 53 memcpy(field->spec, buffer, field->size); 54 field++; 55 return field; 56 } 57 58 /* Utility function to update the field_bitmap */ 59 static void 60 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params, 61 uint32_t idx, 62 enum bnxt_ulp_prsr_action prsr_act) 63 { 64 struct ulp_rte_hdr_field *field; 65 66 field = ¶ms->hdr_field[idx]; 67 if (ulp_bitmap_notzero(field->mask, field->size)) { 68 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx); 69 if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE)) 70 ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx); 71 /* Not exact match */ 72 if (!ulp_bitmap_is_ones(field->mask, field->size)) 73 ULP_COMP_FLD_IDX_WR(params, 74 BNXT_ULP_CF_IDX_WC_MATCH, 1); 75 } else { 76 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx); 77 } 78 } 79 80 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL) 81 /* Utility function to copy field spec and masks items */ 82 static void 83 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params, 84 uint32_t *idx, 85 uint32_t size, 86 const void *spec_buff, 87 const void *mask_buff, 88 enum bnxt_ulp_prsr_action prsr_act) 89 { 90 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx]; 91 92 /* update the field size */ 93 field->size = size; 94 95 /* copy the mask specifications only if mask is not null */ 96 if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) { 97 memcpy(field->mask, mask_buff, size); 98 ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act); 99 } 100 101 /* copy the protocol specifications only if mask is not null*/ 102 if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size)) 103 memcpy(field->spec, spec_buff, size); 104 105 /* Increment the index */ 106 *idx = *idx + 1; 107 } 108 109 /* Utility function to copy field spec and masks items */ 110 static int32_t 111 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params, 112 uint32_t *idx, 113 uint32_t size) 114 { 115 if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) { 116 BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx); 117 return -EINVAL; 118 } 119 *idx = params->field_idx; 120 params->field_idx += size; 121 return 0; 122 } 123 124 /* 125 * Function to handle the parsing of RTE Flows and placing 126 * the RTE flow items into the ulp structures. 127 */ 128 int32_t 129 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], 130 struct ulp_rte_parser_params *params) 131 { 132 const struct rte_flow_item *item = pattern; 133 struct bnxt_ulp_rte_hdr_info *hdr_info; 134 135 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM; 136 137 /* Parse all the items in the pattern */ 138 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) { 139 if (item->type >= (typeof(item->type)) 140 BNXT_RTE_FLOW_ITEM_TYPE_END) { 141 if (item->type >= 142 (typeof(item->type))BNXT_RTE_FLOW_ITEM_TYPE_LAST) 143 goto hdr_parser_error; 144 /* get the header information */ 145 hdr_info = &ulp_vendor_hdr_info[item->type - 146 BNXT_RTE_FLOW_ITEM_TYPE_END]; 147 } else { 148 if (item->type > RTE_FLOW_ITEM_TYPE_ECPRI) 149 goto hdr_parser_error; 150 hdr_info = &ulp_hdr_info[item->type]; 151 } 152 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) { 153 goto hdr_parser_error; 154 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) { 155 /* call the registered callback handler */ 156 if (hdr_info->proto_hdr_func) { 157 if (hdr_info->proto_hdr_func(item, params) != 158 BNXT_TF_RC_SUCCESS) { 159 return BNXT_TF_RC_ERROR; 160 } 161 } 162 } 163 item++; 164 } 165 /* update the implied SVIF */ 166 return ulp_rte_parser_implicit_match_port_process(params); 167 168 hdr_parser_error: 169 BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n", 170 item->type); 171 return BNXT_TF_RC_PARSE_ERR; 172 } 173 174 /* 175 * Function to handle the parsing of RTE Flows and placing 176 * the RTE flow actions into the ulp structures. 177 */ 178 int32_t 179 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], 180 struct ulp_rte_parser_params *params) 181 { 182 const struct rte_flow_action *action_item = actions; 183 struct bnxt_ulp_rte_act_info *hdr_info; 184 185 /* Parse all the items in the pattern */ 186 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) { 187 if (action_item->type >= 188 (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_END) { 189 if (action_item->type >= 190 (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_LAST) 191 goto act_parser_error; 192 /* get the header information from bnxt actinfo table */ 193 hdr_info = &ulp_vendor_act_info[action_item->type - 194 BNXT_RTE_FLOW_ACTION_TYPE_END]; 195 } else { 196 if (action_item->type > RTE_FLOW_ACTION_TYPE_INDIRECT) 197 goto act_parser_error; 198 /* get the header information from the act info table */ 199 hdr_info = &ulp_act_info[action_item->type]; 200 } 201 if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) { 202 goto act_parser_error; 203 } else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) { 204 /* call the registered callback handler */ 205 if (hdr_info->proto_act_func) { 206 if (hdr_info->proto_act_func(action_item, 207 params) != 208 BNXT_TF_RC_SUCCESS) { 209 return BNXT_TF_RC_ERROR; 210 } 211 } 212 } 213 action_item++; 214 } 215 /* update the implied port details */ 216 ulp_rte_parser_implicit_act_port_process(params); 217 return BNXT_TF_RC_SUCCESS; 218 219 act_parser_error: 220 BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n", 221 action_item->type); 222 return BNXT_TF_RC_ERROR; 223 } 224 225 /* 226 * Function to handle the post processing of the computed 227 * fields for the interface. 228 */ 229 static void 230 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params) 231 { 232 uint32_t ifindex; 233 uint16_t port_id, parif, svif; 234 uint32_t mtype; 235 enum bnxt_ulp_direction_type dir; 236 237 /* get the direction details */ 238 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 239 240 /* read the port id details */ 241 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 242 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 243 port_id, 244 &ifindex)) { 245 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 246 return; 247 } 248 249 if (dir == BNXT_ULP_DIR_INGRESS) { 250 /* Set port PARIF */ 251 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 252 BNXT_ULP_DRV_FUNC_PARIF, &parif)) { 253 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n"); 254 return; 255 } 256 /* Note: 257 * We save the drv_func_parif into CF_IDX of phy_port_parif, 258 * since that index is currently referenced by ingress templates 259 * for datapath flows. If in the future we change the parser to 260 * save it in the CF_IDX of drv_func_parif we also need to update 261 * the template. 262 * WARNING: Two VFs on same parent PF will not work, as the parif is 263 * based on fw fid of the parent PF. 264 */ 265 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF, 266 parif); 267 /* Set port SVIF */ 268 if (ulp_port_db_svif_get(params->ulp_ctx, ifindex, 269 BNXT_ULP_PHY_PORT_SVIF, &svif)) { 270 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n"); 271 return; 272 } 273 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_SVIF, 274 svif); 275 } else { 276 /* Get the match port type */ 277 mtype = ULP_COMP_FLD_IDX_RD(params, 278 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 279 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) { 280 ULP_COMP_FLD_IDX_WR(params, 281 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP, 282 1); 283 /* Set VF func PARIF */ 284 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 285 BNXT_ULP_VF_FUNC_PARIF, 286 &parif)) { 287 BNXT_TF_DBG(ERR, 288 "ParseErr:ifindex is not valid\n"); 289 return; 290 } 291 ULP_COMP_FLD_IDX_WR(params, 292 BNXT_ULP_CF_IDX_VF_FUNC_PARIF, 293 parif); 294 295 } else { 296 /* Set DRV func PARIF */ 297 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 298 BNXT_ULP_DRV_FUNC_PARIF, 299 &parif)) { 300 BNXT_TF_DBG(ERR, 301 "ParseErr:ifindex is not valid\n"); 302 return; 303 } 304 ULP_COMP_FLD_IDX_WR(params, 305 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF, 306 parif); 307 } 308 if (mtype == BNXT_ULP_INTF_TYPE_PF) { 309 ULP_COMP_FLD_IDX_WR(params, 310 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF, 311 1); 312 } 313 } 314 } 315 316 static int32_t 317 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params) 318 { 319 enum bnxt_ulp_intf_type match_port_type, act_port_type; 320 enum bnxt_ulp_direction_type dir; 321 uint32_t act_port_set; 322 323 /* Get the computed details */ 324 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 325 match_port_type = ULP_COMP_FLD_IDX_RD(params, 326 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 327 act_port_type = ULP_COMP_FLD_IDX_RD(params, 328 BNXT_ULP_CF_IDX_ACT_PORT_TYPE); 329 act_port_set = ULP_COMP_FLD_IDX_RD(params, 330 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET); 331 332 /* set the flow direction in the proto and action header */ 333 if (dir == BNXT_ULP_DIR_EGRESS) { 334 ULP_BITMAP_SET(params->hdr_bitmap.bits, 335 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 336 ULP_BITMAP_SET(params->act_bitmap.bits, 337 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 338 } 339 340 /* Evaluate the VF to VF flag */ 341 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP && 342 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) { 343 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits, 344 BNXT_ULP_ACT_BIT_MULTIPLE_PORT)) { 345 ULP_BITMAP_SET(params->act_bitmap.bits, 346 BNXT_ULP_ACT_BIT_VF_TO_VF); 347 } else { 348 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_MP_A_IS_VFREP) && 349 ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_MP_B_IS_VFREP)) 350 ULP_BITMAP_SET(params->act_bitmap.bits, 351 BNXT_ULP_ACT_BIT_VF_TO_VF); 352 else 353 ULP_BITMAP_RESET(params->act_bitmap.bits, 354 BNXT_ULP_ACT_BIT_VF_TO_VF); 355 } 356 } 357 358 /* Update the decrement ttl computational fields */ 359 if (ULP_BITMAP_ISSET(params->act_bitmap.bits, 360 BNXT_ULP_ACT_BIT_DEC_TTL)) { 361 /* 362 * Check that vxlan proto is included and vxlan decap 363 * action is not set then decrement tunnel ttl. 364 * Similarly add GRE and NVGRE in future. 365 */ 366 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 367 BNXT_ULP_HDR_BIT_T_VXLAN) && 368 !ULP_BITMAP_ISSET(params->act_bitmap.bits, 369 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) { 370 ULP_COMP_FLD_IDX_WR(params, 371 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1); 372 } else { 373 ULP_COMP_FLD_IDX_WR(params, 374 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1); 375 } 376 } 377 378 /* Merge the hdr_fp_bit into the proto header bit */ 379 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits; 380 381 /* Update the comp fld fid */ 382 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid); 383 384 /* Update the computed interface parameters */ 385 bnxt_ulp_comp_fld_intf_update(params); 386 387 /* TBD: Handle the flow rejection scenarios */ 388 return 0; 389 } 390 391 /* 392 * Function to handle the post processing of the parsing details 393 */ 394 void 395 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params) 396 { 397 ulp_post_process_normal_flow(params); 398 } 399 400 /* 401 * Function to compute the flow direction based on the match port details 402 */ 403 static void 404 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params) 405 { 406 enum bnxt_ulp_intf_type match_port_type; 407 408 /* Get the match port type */ 409 match_port_type = ULP_COMP_FLD_IDX_RD(params, 410 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 411 412 /* If ingress flow and matchport is vf rep then dir is egress*/ 413 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) && 414 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) { 415 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 416 BNXT_ULP_DIR_EGRESS); 417 } else { 418 /* Assign the input direction */ 419 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) 420 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 421 BNXT_ULP_DIR_INGRESS); 422 else 423 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 424 BNXT_ULP_DIR_EGRESS); 425 } 426 } 427 428 /* Function to handle the parsing of RTE Flow item PF Header. */ 429 static int32_t 430 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params, 431 uint32_t ifindex, 432 uint16_t mask, 433 enum bnxt_ulp_direction_type item_dir) 434 { 435 uint16_t svif; 436 enum bnxt_ulp_direction_type dir; 437 struct ulp_rte_hdr_field *hdr_field; 438 enum bnxt_ulp_svif_type svif_type; 439 enum bnxt_ulp_intf_type port_type; 440 441 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 442 BNXT_ULP_INVALID_SVIF_VAL) { 443 BNXT_TF_DBG(ERR, 444 "SVIF already set,multiple source not support'd\n"); 445 return BNXT_TF_RC_ERROR; 446 } 447 448 /* Get port type details */ 449 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 450 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) { 451 BNXT_TF_DBG(ERR, "Invalid port type\n"); 452 return BNXT_TF_RC_ERROR; 453 } 454 455 /* Update the match port type */ 456 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type); 457 458 /* compute the direction */ 459 bnxt_ulp_rte_parser_direction_compute(params); 460 461 /* Get the computed direction */ 462 dir = (item_dir != BNXT_ULP_DIR_INVALID) ? item_dir : 463 ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 464 if (dir == BNXT_ULP_DIR_INGRESS && 465 port_type != BNXT_ULP_INTF_TYPE_VF_REP) { 466 svif_type = BNXT_ULP_PHY_PORT_SVIF; 467 } else { 468 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP && 469 item_dir != BNXT_ULP_DIR_EGRESS) 470 svif_type = BNXT_ULP_VF_FUNC_SVIF; 471 else 472 svif_type = BNXT_ULP_DRV_FUNC_SVIF; 473 } 474 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type, &svif); 475 svif = rte_cpu_to_be_16(svif); 476 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; 477 memcpy(hdr_field->spec, &svif, sizeof(svif)); 478 memcpy(hdr_field->mask, &mask, sizeof(mask)); 479 hdr_field->size = sizeof(svif); 480 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, 481 rte_be_to_cpu_16(svif)); 482 return BNXT_TF_RC_SUCCESS; 483 } 484 485 /* Function to handle the parsing of the RTE port id */ 486 int32_t 487 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params) 488 { 489 uint16_t port_id = 0; 490 uint16_t svif_mask = 0xFFFF; 491 uint32_t ifindex; 492 int32_t rc = BNXT_TF_RC_ERROR; 493 494 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 495 BNXT_ULP_INVALID_SVIF_VAL) 496 return BNXT_TF_RC_SUCCESS; 497 498 /* SVIF not set. So get the port id */ 499 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 500 501 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 502 port_id, 503 &ifindex)) { 504 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 505 return rc; 506 } 507 508 /* Update the SVIF details */ 509 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask, 510 BNXT_ULP_DIR_INVALID); 511 return rc; 512 } 513 514 /* Function to handle the implicit action port id */ 515 int32_t 516 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params) 517 { 518 struct rte_flow_action action_item = {0}; 519 struct rte_flow_action_port_id port_id = {0}; 520 521 /* Read the action port set bit */ 522 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) { 523 /* Already set, so just exit */ 524 return BNXT_TF_RC_SUCCESS; 525 } 526 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 527 action_item.type = RTE_FLOW_ACTION_TYPE_PORT_ID; 528 action_item.conf = &port_id; 529 530 /* Update the action port based on incoming port */ 531 ulp_rte_port_act_handler(&action_item, params); 532 533 /* Reset the action port set bit */ 534 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0); 535 return BNXT_TF_RC_SUCCESS; 536 } 537 538 /* Parse items PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */ 539 int32_t 540 ulp_rte_port_hdr_handler(const struct rte_flow_item *item, 541 struct ulp_rte_parser_params *params) 542 { 543 enum bnxt_ulp_direction_type item_dir; 544 uint16_t ethdev_id; 545 uint16_t mask = 0; 546 uint32_t ifindex; 547 int32_t rc = BNXT_TF_RC_PARSE_ERR; 548 549 if (!item->spec) { 550 BNXT_TF_DBG(ERR, "ParseErr:Port spec is not valid\n"); 551 return rc; 552 } 553 if (!item->mask) { 554 BNXT_TF_DBG(ERR, "ParseErr:Port mask is not valid\n"); 555 return rc; 556 } 557 558 switch (item->type) { 559 case RTE_FLOW_ITEM_TYPE_PORT_ID: { 560 const struct rte_flow_item_port_id *port_spec = item->spec; 561 const struct rte_flow_item_port_id *port_mask = item->mask; 562 563 item_dir = BNXT_ULP_DIR_INVALID; 564 ethdev_id = port_spec->id; 565 mask = port_mask->id; 566 567 if (!port_mask->id) { 568 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF_IGNORE); 569 mask = 0xff; 570 } 571 break; 572 } 573 case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: { 574 const struct rte_flow_item_ethdev *ethdev_spec = item->spec; 575 const struct rte_flow_item_ethdev *ethdev_mask = item->mask; 576 577 item_dir = BNXT_ULP_DIR_INGRESS; 578 ethdev_id = ethdev_spec->port_id; 579 mask = ethdev_mask->port_id; 580 break; 581 } 582 case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: { 583 const struct rte_flow_item_ethdev *ethdev_spec = item->spec; 584 const struct rte_flow_item_ethdev *ethdev_mask = item->mask; 585 586 item_dir = BNXT_ULP_DIR_EGRESS; 587 ethdev_id = ethdev_spec->port_id; 588 mask = ethdev_mask->port_id; 589 break; 590 } 591 default: 592 BNXT_TF_DBG(ERR, "ParseErr:Unexpected item\n"); 593 return rc; 594 } 595 596 /* perform the conversion from dpdk port to bnxt ifindex */ 597 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 598 ethdev_id, 599 &ifindex)) { 600 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n"); 601 return rc; 602 } 603 /* Update the SVIF details */ 604 return ulp_rte_parser_svif_set(params, ifindex, mask, item_dir); 605 } 606 607 /* Function to handle the update of proto header based on field values */ 608 static void 609 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param, 610 uint16_t type, uint32_t in_flag, 611 uint32_t has_vlan, uint32_t has_vlan_mask) 612 { 613 #define ULP_RTE_ETHER_TYPE_ROE 0xfc3d 614 615 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { 616 if (in_flag) { 617 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 618 BNXT_ULP_HDR_BIT_I_IPV4); 619 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); 620 } else { 621 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 622 BNXT_ULP_HDR_BIT_O_IPV4); 623 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); 624 } 625 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { 626 if (in_flag) { 627 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 628 BNXT_ULP_HDR_BIT_I_IPV6); 629 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); 630 } else { 631 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 632 BNXT_ULP_HDR_BIT_O_IPV6); 633 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); 634 } 635 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) { 636 has_vlan_mask = 1; 637 has_vlan = 1; 638 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_ECPRI)) { 639 /* Update the hdr_bitmap with eCPRI */ 640 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 641 BNXT_ULP_HDR_BIT_O_ECPRI); 642 } else if (type == tfp_cpu_to_be_16(ULP_RTE_ETHER_TYPE_ROE)) { 643 /* Update the hdr_bitmap with RoE */ 644 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 645 BNXT_ULP_HDR_BIT_O_ROE); 646 } 647 648 if (has_vlan_mask) { 649 if (in_flag) { 650 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_HAS_VTAG, 651 has_vlan); 652 ULP_COMP_FLD_IDX_WR(param, 653 BNXT_ULP_CF_IDX_I_VLAN_NO_IGNORE, 654 1); 655 } else { 656 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_HAS_VTAG, 657 has_vlan); 658 ULP_COMP_FLD_IDX_WR(param, 659 BNXT_ULP_CF_IDX_O_VLAN_NO_IGNORE, 660 1); 661 } 662 } 663 } 664 665 /* Internal Function to identify broadcast or multicast packets */ 666 static int32_t 667 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr) 668 { 669 if (rte_is_multicast_ether_addr(eth_addr) || 670 rte_is_broadcast_ether_addr(eth_addr)) { 671 BNXT_TF_DBG(DEBUG, 672 "No support for bcast or mcast addr offload\n"); 673 return 1; 674 } 675 return 0; 676 } 677 678 /* Function to handle the parsing of RTE Flow item Ethernet Header. */ 679 int32_t 680 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item, 681 struct ulp_rte_parser_params *params) 682 { 683 const struct rte_flow_item_eth *eth_spec = item->spec; 684 const struct rte_flow_item_eth *eth_mask = item->mask; 685 uint32_t idx = 0, dmac_idx = 0; 686 uint32_t size; 687 uint16_t eth_type = 0; 688 uint32_t inner_flag = 0; 689 uint32_t has_vlan = 0, has_vlan_mask = 0; 690 691 /* Perform validations */ 692 if (eth_spec) { 693 /* Avoid multicast and broadcast addr */ 694 if (!ULP_APP_BC_MC_SUPPORT(params->ulp_ctx) && 695 ulp_rte_parser_is_bcmc_addr(ð_spec->hdr.dst_addr)) 696 return BNXT_TF_RC_PARSE_ERR; 697 698 if (!ULP_APP_BC_MC_SUPPORT(params->ulp_ctx) && 699 ulp_rte_parser_is_bcmc_addr(ð_spec->hdr.src_addr)) 700 return BNXT_TF_RC_PARSE_ERR; 701 702 eth_type = eth_spec->hdr.ether_type; 703 has_vlan = eth_spec->has_vlan; 704 } 705 if (eth_mask) { 706 eth_type &= eth_mask->hdr.ether_type; 707 has_vlan_mask = eth_mask->has_vlan; 708 } 709 710 if (ulp_rte_prsr_fld_size_validate(params, &idx, 711 BNXT_ULP_PROTO_HDR_ETH_NUM)) { 712 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 713 return BNXT_TF_RC_ERROR; 714 } 715 /* 716 * Copy the rte_flow_item for eth into hdr_field using ethernet 717 * header fields 718 */ 719 dmac_idx = idx; 720 size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.dst_addr.addr_bytes); 721 ulp_rte_prsr_fld_mask(params, &idx, size, 722 ulp_deference_struct(eth_spec, hdr.dst_addr.addr_bytes), 723 ulp_deference_struct(eth_mask, hdr.dst_addr.addr_bytes), 724 ULP_PRSR_ACT_DEFAULT); 725 726 size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.src_addr.addr_bytes); 727 ulp_rte_prsr_fld_mask(params, &idx, size, 728 ulp_deference_struct(eth_spec, hdr.src_addr.addr_bytes), 729 ulp_deference_struct(eth_mask, hdr.src_addr.addr_bytes), 730 ULP_PRSR_ACT_DEFAULT); 731 732 size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.ether_type); 733 ulp_rte_prsr_fld_mask(params, &idx, size, 734 ulp_deference_struct(eth_spec, hdr.ether_type), 735 ulp_deference_struct(eth_mask, hdr.ether_type), 736 (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ? 737 ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE); 738 739 /* Update the protocol hdr bitmap */ 740 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 741 BNXT_ULP_HDR_BIT_O_ETH) || 742 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 743 BNXT_ULP_HDR_BIT_O_IPV4) || 744 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 745 BNXT_ULP_HDR_BIT_O_IPV6) || 746 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 747 BNXT_ULP_HDR_BIT_O_UDP) || 748 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 749 BNXT_ULP_HDR_BIT_O_TCP)) { 750 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH); 751 inner_flag = 1; 752 } else { 753 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); 754 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID, 755 dmac_idx); 756 } 757 /* Update the field protocol hdr bitmap */ 758 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag, 759 has_vlan, has_vlan_mask); 760 761 return BNXT_TF_RC_SUCCESS; 762 } 763 764 /* Function to handle the parsing of RTE Flow item Vlan Header. */ 765 int32_t 766 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, 767 struct ulp_rte_parser_params *params) 768 { 769 const struct rte_flow_item_vlan *vlan_spec = item->spec; 770 const struct rte_flow_item_vlan *vlan_mask = item->mask; 771 struct ulp_rte_hdr_bitmap *hdr_bit; 772 uint32_t idx = 0; 773 uint16_t vlan_tag = 0, priority = 0; 774 uint16_t vlan_tag_mask = 0, priority_mask = 0; 775 uint32_t outer_vtag_num; 776 uint32_t inner_vtag_num; 777 uint16_t eth_type = 0; 778 uint32_t inner_flag = 0; 779 uint32_t size; 780 781 if (vlan_spec) { 782 vlan_tag = ntohs(vlan_spec->hdr.vlan_tci); 783 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT); 784 vlan_tag &= ULP_VLAN_TAG_MASK; 785 vlan_tag = htons(vlan_tag); 786 eth_type = vlan_spec->hdr.eth_proto; 787 } 788 789 if (vlan_mask) { 790 vlan_tag_mask = ntohs(vlan_mask->hdr.vlan_tci); 791 priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT); 792 vlan_tag_mask &= 0xfff; 793 794 /* 795 * the storage for priority and vlan tag is 2 bytes 796 * The mask of priority which is 3 bits if it is all 1's 797 * then make the rest bits 13 bits as 1's 798 * so that it is matched as exact match. 799 */ 800 if (priority_mask == ULP_VLAN_PRIORITY_MASK) 801 priority_mask |= ~ULP_VLAN_PRIORITY_MASK; 802 if (vlan_tag_mask == ULP_VLAN_TAG_MASK) 803 vlan_tag_mask |= ~ULP_VLAN_TAG_MASK; 804 vlan_tag_mask = htons(vlan_tag_mask); 805 } 806 807 if (ulp_rte_prsr_fld_size_validate(params, &idx, 808 BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) { 809 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 810 return BNXT_TF_RC_ERROR; 811 } 812 813 /* 814 * Copy the rte_flow_item for vlan into hdr_field using Vlan 815 * header fields 816 */ 817 size = sizeof(((struct rte_flow_item_vlan *)NULL)->hdr.vlan_tci); 818 /* 819 * The priority field is ignored since OVS is setting it as 820 * wild card match and it is not supported. This is a work 821 * around and shall be addressed in the future. 822 */ 823 ulp_rte_prsr_fld_mask(params, &idx, size, 824 &priority, 825 (vlan_mask) ? &priority_mask : NULL, 826 ULP_PRSR_ACT_MASK_IGNORE); 827 828 ulp_rte_prsr_fld_mask(params, &idx, size, 829 &vlan_tag, 830 (vlan_mask) ? &vlan_tag_mask : NULL, 831 ULP_PRSR_ACT_DEFAULT); 832 833 size = sizeof(((struct rte_flow_item_vlan *)NULL)->hdr.eth_proto); 834 ulp_rte_prsr_fld_mask(params, &idx, size, 835 ulp_deference_struct(vlan_spec, hdr.eth_proto), 836 ulp_deference_struct(vlan_mask, hdr.eth_proto), 837 ULP_PRSR_ACT_MATCH_IGNORE); 838 839 /* Get the outer tag and inner tag counts */ 840 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params, 841 BNXT_ULP_CF_IDX_O_VTAG_NUM); 842 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params, 843 BNXT_ULP_CF_IDX_I_VTAG_NUM); 844 845 /* Update the hdr_bitmap of the vlans */ 846 hdr_bit = ¶ms->hdr_bitmap; 847 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 848 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 849 !outer_vtag_num) { 850 /* Update the vlan tag num */ 851 outer_vtag_num++; 852 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 853 outer_vtag_num); 854 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_HAS_VTAG, 1); 855 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1); 856 ULP_BITMAP_SET(params->hdr_bitmap.bits, 857 BNXT_ULP_HDR_BIT_OO_VLAN); 858 if (vlan_mask && vlan_tag_mask) 859 ULP_COMP_FLD_IDX_WR(params, 860 BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1); 861 862 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 863 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 864 outer_vtag_num == 1) { 865 /* update the vlan tag num */ 866 outer_vtag_num++; 867 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 868 outer_vtag_num); 869 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1); 870 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0); 871 ULP_BITMAP_SET(params->hdr_bitmap.bits, 872 BNXT_ULP_HDR_BIT_OI_VLAN); 873 if (vlan_mask && vlan_tag_mask) 874 ULP_COMP_FLD_IDX_WR(params, 875 BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1); 876 877 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 878 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 879 !inner_vtag_num) { 880 /* update the vlan tag num */ 881 inner_vtag_num++; 882 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 883 inner_vtag_num); 884 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_HAS_VTAG, 1); 885 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1); 886 ULP_BITMAP_SET(params->hdr_bitmap.bits, 887 BNXT_ULP_HDR_BIT_IO_VLAN); 888 if (vlan_mask && vlan_tag_mask) 889 ULP_COMP_FLD_IDX_WR(params, 890 BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1); 891 inner_flag = 1; 892 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 893 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 894 inner_vtag_num == 1) { 895 /* update the vlan tag num */ 896 inner_vtag_num++; 897 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 898 inner_vtag_num); 899 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1); 900 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0); 901 ULP_BITMAP_SET(params->hdr_bitmap.bits, 902 BNXT_ULP_HDR_BIT_II_VLAN); 903 if (vlan_mask && vlan_tag_mask) 904 ULP_COMP_FLD_IDX_WR(params, 905 BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1); 906 inner_flag = 1; 907 } else { 908 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n"); 909 return BNXT_TF_RC_ERROR; 910 } 911 /* Update the field protocol hdr bitmap */ 912 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag, 1, 1); 913 return BNXT_TF_RC_SUCCESS; 914 } 915 916 /* Function to handle the update of proto header based on field values */ 917 static void 918 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param, 919 uint8_t proto, uint32_t in_flag) 920 { 921 if (proto == IPPROTO_UDP) { 922 if (in_flag) { 923 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 924 BNXT_ULP_HDR_BIT_I_UDP); 925 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); 926 } else { 927 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 928 BNXT_ULP_HDR_BIT_O_UDP); 929 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); 930 } 931 } else if (proto == IPPROTO_TCP) { 932 if (in_flag) { 933 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 934 BNXT_ULP_HDR_BIT_I_TCP); 935 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); 936 } else { 937 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 938 BNXT_ULP_HDR_BIT_O_TCP); 939 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); 940 } 941 } else if (proto == IPPROTO_GRE) { 942 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE); 943 } else if (proto == IPPROTO_ICMP) { 944 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN)) 945 ULP_BITMAP_SET(param->hdr_bitmap.bits, 946 BNXT_ULP_HDR_BIT_I_ICMP); 947 else 948 ULP_BITMAP_SET(param->hdr_bitmap.bits, 949 BNXT_ULP_HDR_BIT_O_ICMP); 950 } 951 952 if (in_flag) { 953 ULP_COMP_FLD_IDX_WR(param, 954 BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, 955 1); 956 ULP_COMP_FLD_IDX_WR(param, 957 BNXT_ULP_CF_IDX_I_L3_PROTO_ID, 958 proto); 959 } else { 960 ULP_COMP_FLD_IDX_WR(param, 961 BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, 962 1); 963 ULP_COMP_FLD_IDX_WR(param, 964 BNXT_ULP_CF_IDX_O_L3_PROTO_ID, 965 proto); 966 } 967 } 968 969 /* Function to handle the parsing of RTE Flow item IPV4 Header. */ 970 int32_t 971 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, 972 struct ulp_rte_parser_params *params) 973 { 974 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec; 975 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask; 976 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 977 uint32_t idx = 0, dip_idx = 0; 978 uint32_t size; 979 uint8_t proto = 0; 980 uint8_t proto_mask = 0; 981 uint32_t inner_flag = 0; 982 uint32_t cnt; 983 984 /* validate there are no 3rd L3 header */ 985 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); 986 if (cnt == 2) { 987 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n"); 988 return BNXT_TF_RC_ERROR; 989 } 990 991 if (ulp_rte_prsr_fld_size_validate(params, &idx, 992 BNXT_ULP_PROTO_HDR_IPV4_NUM)) { 993 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 994 return BNXT_TF_RC_ERROR; 995 } 996 997 /* 998 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 999 * header fields 1000 */ 1001 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl); 1002 ulp_rte_prsr_fld_mask(params, &idx, size, 1003 ulp_deference_struct(ipv4_spec, hdr.version_ihl), 1004 ulp_deference_struct(ipv4_mask, hdr.version_ihl), 1005 ULP_PRSR_ACT_DEFAULT); 1006 1007 /* 1008 * The tos field is ignored since OVS is setting it as wild card 1009 * match and it is not supported. An application can enable tos support. 1010 */ 1011 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service); 1012 ulp_rte_prsr_fld_mask(params, &idx, size, 1013 ulp_deference_struct(ipv4_spec, 1014 hdr.type_of_service), 1015 ulp_deference_struct(ipv4_mask, 1016 hdr.type_of_service), 1017 (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ? 1018 ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MASK_IGNORE); 1019 1020 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length); 1021 ulp_rte_prsr_fld_mask(params, &idx, size, 1022 ulp_deference_struct(ipv4_spec, hdr.total_length), 1023 ulp_deference_struct(ipv4_mask, hdr.total_length), 1024 ULP_PRSR_ACT_DEFAULT); 1025 1026 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id); 1027 ulp_rte_prsr_fld_mask(params, &idx, size, 1028 ulp_deference_struct(ipv4_spec, hdr.packet_id), 1029 ulp_deference_struct(ipv4_mask, hdr.packet_id), 1030 ULP_PRSR_ACT_DEFAULT); 1031 1032 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset); 1033 ulp_rte_prsr_fld_mask(params, &idx, size, 1034 ulp_deference_struct(ipv4_spec, 1035 hdr.fragment_offset), 1036 ulp_deference_struct(ipv4_mask, 1037 hdr.fragment_offset), 1038 ULP_PRSR_ACT_MASK_IGNORE); 1039 1040 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live); 1041 ulp_rte_prsr_fld_mask(params, &idx, size, 1042 ulp_deference_struct(ipv4_spec, hdr.time_to_live), 1043 ulp_deference_struct(ipv4_mask, hdr.time_to_live), 1044 ULP_PRSR_ACT_DEFAULT); 1045 1046 /* Ignore proto for matching templates */ 1047 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id); 1048 ulp_rte_prsr_fld_mask(params, &idx, size, 1049 ulp_deference_struct(ipv4_spec, 1050 hdr.next_proto_id), 1051 ulp_deference_struct(ipv4_mask, 1052 hdr.next_proto_id), 1053 (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ? 1054 ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE); 1055 1056 if (ipv4_spec) 1057 proto = ipv4_spec->hdr.next_proto_id; 1058 1059 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum); 1060 ulp_rte_prsr_fld_mask(params, &idx, size, 1061 ulp_deference_struct(ipv4_spec, hdr.hdr_checksum), 1062 ulp_deference_struct(ipv4_mask, hdr.hdr_checksum), 1063 ULP_PRSR_ACT_DEFAULT); 1064 1065 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr); 1066 ulp_rte_prsr_fld_mask(params, &idx, size, 1067 ulp_deference_struct(ipv4_spec, hdr.src_addr), 1068 ulp_deference_struct(ipv4_mask, hdr.src_addr), 1069 ULP_PRSR_ACT_DEFAULT); 1070 1071 dip_idx = idx; 1072 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr); 1073 ulp_rte_prsr_fld_mask(params, &idx, size, 1074 ulp_deference_struct(ipv4_spec, hdr.dst_addr), 1075 ulp_deference_struct(ipv4_mask, hdr.dst_addr), 1076 ULP_PRSR_ACT_DEFAULT); 1077 1078 /* Set the ipv4 header bitmap and computed l3 header bitmaps */ 1079 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 1080 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) || 1081 ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) { 1082 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4); 1083 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); 1084 inner_flag = 1; 1085 } else { 1086 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4); 1087 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); 1088 /* Update the tunnel offload dest ip offset */ 1089 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID, 1090 dip_idx); 1091 } 1092 1093 /* Some of the PMD applications may set the protocol field 1094 * in the IPv4 spec but don't set the mask. So, consider 1095 * the mask in the proto value calculation. 1096 */ 1097 if (ipv4_mask) { 1098 proto &= ipv4_mask->hdr.next_proto_id; 1099 proto_mask = ipv4_mask->hdr.next_proto_id; 1100 } 1101 1102 /* Update the field protocol hdr bitmap */ 1103 if (proto_mask) 1104 ulp_rte_l3_proto_type_update(params, proto, inner_flag); 1105 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); 1106 return BNXT_TF_RC_SUCCESS; 1107 } 1108 1109 /* Function to handle the parsing of RTE Flow item IPV6 Header */ 1110 int32_t 1111 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, 1112 struct ulp_rte_parser_params *params) 1113 { 1114 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec; 1115 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask; 1116 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1117 uint32_t idx = 0, dip_idx = 0; 1118 uint32_t size, vtc_flow; 1119 uint32_t ver_spec = 0, ver_mask = 0; 1120 uint32_t tc_spec = 0, tc_mask = 0; 1121 uint32_t lab_spec = 0, lab_mask = 0; 1122 uint8_t proto = 0; 1123 uint8_t proto_mask = 0; 1124 uint32_t inner_flag = 0; 1125 uint32_t cnt; 1126 1127 /* validate there are no 3rd L3 header */ 1128 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); 1129 if (cnt == 2) { 1130 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n"); 1131 return BNXT_TF_RC_ERROR; 1132 } 1133 1134 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1135 BNXT_ULP_PROTO_HDR_IPV6_NUM)) { 1136 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1137 return BNXT_TF_RC_ERROR; 1138 } 1139 1140 /* 1141 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6 1142 * header fields 1143 */ 1144 if (ipv6_spec) { 1145 vtc_flow = ntohl(ipv6_spec->hdr.vtc_flow); 1146 ver_spec = htonl(BNXT_ULP_GET_IPV6_VER(vtc_flow)); 1147 tc_spec = htonl(BNXT_ULP_GET_IPV6_TC(vtc_flow)); 1148 lab_spec = htonl(BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow)); 1149 proto = ipv6_spec->hdr.proto; 1150 } 1151 1152 if (ipv6_mask) { 1153 vtc_flow = ntohl(ipv6_mask->hdr.vtc_flow); 1154 ver_mask = htonl(BNXT_ULP_GET_IPV6_VER(vtc_flow)); 1155 tc_mask = htonl(BNXT_ULP_GET_IPV6_TC(vtc_flow)); 1156 lab_mask = htonl(BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow)); 1157 1158 /* Some of the PMD applications may set the protocol field 1159 * in the IPv6 spec but don't set the mask. So, consider 1160 * the mask in proto value calculation. 1161 */ 1162 proto &= ipv6_mask->hdr.proto; 1163 proto_mask = ipv6_mask->hdr.proto; 1164 } 1165 1166 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow); 1167 ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask, 1168 ULP_PRSR_ACT_DEFAULT); 1169 /* 1170 * The TC and flow label field are ignored since OVS is 1171 * setting it for match and it is not supported. 1172 * This is a work around and 1173 * shall be addressed in the future. 1174 */ 1175 ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask, 1176 (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ? 1177 ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MASK_IGNORE); 1178 ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask, 1179 ULP_PRSR_ACT_MASK_IGNORE); 1180 1181 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len); 1182 ulp_rte_prsr_fld_mask(params, &idx, size, 1183 ulp_deference_struct(ipv6_spec, hdr.payload_len), 1184 ulp_deference_struct(ipv6_mask, hdr.payload_len), 1185 ULP_PRSR_ACT_DEFAULT); 1186 1187 /* Ignore proto for template matching */ 1188 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto); 1189 ulp_rte_prsr_fld_mask(params, &idx, size, 1190 ulp_deference_struct(ipv6_spec, hdr.proto), 1191 ulp_deference_struct(ipv6_mask, hdr.proto), 1192 (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ? 1193 ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE); 1194 1195 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits); 1196 ulp_rte_prsr_fld_mask(params, &idx, size, 1197 ulp_deference_struct(ipv6_spec, hdr.hop_limits), 1198 ulp_deference_struct(ipv6_mask, hdr.hop_limits), 1199 ULP_PRSR_ACT_DEFAULT); 1200 1201 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr); 1202 ulp_rte_prsr_fld_mask(params, &idx, size, 1203 ulp_deference_struct(ipv6_spec, hdr.src_addr), 1204 ulp_deference_struct(ipv6_mask, hdr.src_addr), 1205 ULP_PRSR_ACT_DEFAULT); 1206 1207 dip_idx = idx; 1208 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr); 1209 ulp_rte_prsr_fld_mask(params, &idx, size, 1210 ulp_deference_struct(ipv6_spec, hdr.dst_addr), 1211 ulp_deference_struct(ipv6_mask, hdr.dst_addr), 1212 ULP_PRSR_ACT_DEFAULT); 1213 1214 /* Set the ipv6 header bitmap and computed l3 header bitmaps */ 1215 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 1216 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) || 1217 ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) { 1218 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6); 1219 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); 1220 inner_flag = 1; 1221 } else { 1222 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6); 1223 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); 1224 /* Update the tunnel offload dest ip offset */ 1225 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID, 1226 dip_idx); 1227 } 1228 1229 /* Update the field protocol hdr bitmap */ 1230 if (proto_mask) 1231 ulp_rte_l3_proto_type_update(params, proto, inner_flag); 1232 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); 1233 1234 return BNXT_TF_RC_SUCCESS; 1235 } 1236 1237 /* Function to handle the update of proto header based on field values */ 1238 static void 1239 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params, 1240 uint16_t src_port, uint16_t src_mask, 1241 uint16_t dst_port, uint16_t dst_mask, 1242 enum bnxt_ulp_hdr_bit hdr_bit) 1243 { 1244 struct bnxt *bp; 1245 1246 switch (hdr_bit) { 1247 case BNXT_ULP_HDR_BIT_I_UDP: 1248 case BNXT_ULP_HDR_BIT_I_TCP: 1249 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); 1250 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1); 1251 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT, 1252 (uint64_t)rte_be_to_cpu_16(src_port)); 1253 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT, 1254 (uint64_t)rte_be_to_cpu_16(dst_port)); 1255 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK, 1256 (uint64_t)rte_be_to_cpu_16(src_mask)); 1257 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK, 1258 (uint64_t)rte_be_to_cpu_16(dst_mask)); 1259 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, 1260 1); 1261 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT, 1262 !!(src_port & src_mask)); 1263 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT, 1264 !!(dst_port & dst_mask)); 1265 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID, 1266 (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ? 1267 IPPROTO_UDP : IPPROTO_TCP); 1268 break; 1269 case BNXT_ULP_HDR_BIT_O_UDP: 1270 case BNXT_ULP_HDR_BIT_O_TCP: 1271 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); 1272 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1); 1273 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT, 1274 (uint64_t)rte_be_to_cpu_16(src_port)); 1275 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT, 1276 (uint64_t)rte_be_to_cpu_16(dst_port)); 1277 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK, 1278 (uint64_t)rte_be_to_cpu_16(src_mask)); 1279 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK, 1280 (uint64_t)rte_be_to_cpu_16(dst_mask)); 1281 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, 1282 1); 1283 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT, 1284 !!(src_port & src_mask)); 1285 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT, 1286 !!(dst_port & dst_mask)); 1287 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID, 1288 (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ? 1289 IPPROTO_UDP : IPPROTO_TCP); 1290 break; 1291 default: 1292 break; 1293 } 1294 1295 bp = bnxt_pmd_get_bp(params->port_id); 1296 if (bp == NULL) { 1297 BNXT_TF_DBG(ERR, "Invalid bp\n"); 1298 return; 1299 } 1300 1301 /* vxlan dynamic customized port */ 1302 if (ULP_APP_CUST_VXLAN_EN(params->ulp_ctx)) { 1303 /* ulp_rte_vxlan_hdr_handler will parser it further */ 1304 return; 1305 } 1306 /* vxlan static cutomized port */ 1307 else if (ULP_APP_CUST_VXLAN_SUPPORT(bp->ulp_ctx)) { 1308 if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && 1309 dst_port == tfp_cpu_to_be_16(bp->ulp_ctx->cfg_data->vxlan_port)) { 1310 ULP_BITMAP_SET(params->hdr_fp_bit.bits, BNXT_ULP_HDR_BIT_T_VXLAN); 1311 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 1312 } 1313 } 1314 /* vxlan ip port */ 1315 else if (ULP_APP_CUST_VXLAN_IP_SUPPORT(bp->ulp_ctx)) { 1316 if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && 1317 dst_port == tfp_cpu_to_be_16(bp->ulp_ctx->cfg_data->vxlan_ip_port)) { 1318 ULP_BITMAP_SET(params->hdr_fp_bit.bits, BNXT_ULP_HDR_BIT_T_VXLAN); 1319 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 1320 if (bp->vxlan_ip_upar_in_use & 1321 HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR0) { 1322 ULP_COMP_FLD_IDX_WR(params, 1323 BNXT_ULP_CF_IDX_VXLAN_IP_UPAR_ID, 1324 ULP_WP_SYM_TUN_HDR_TYPE_UPAR1); 1325 } 1326 } 1327 } 1328 /* vxlan gpe port */ 1329 else if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && 1330 dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN_GPE)) { 1331 ULP_BITMAP_SET(params->hdr_fp_bit.bits, 1332 BNXT_ULP_HDR_BIT_T_VXLAN_GPE); 1333 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 1334 } 1335 /* vxlan standard port */ 1336 else if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && 1337 dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) { 1338 ULP_BITMAP_SET(params->hdr_fp_bit.bits, 1339 BNXT_ULP_HDR_BIT_T_VXLAN); 1340 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 1341 } 1342 } 1343 1344 /* Function to handle the parsing of RTE Flow item UDP Header. */ 1345 int32_t 1346 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, 1347 struct ulp_rte_parser_params *params) 1348 { 1349 const struct rte_flow_item_udp *udp_spec = item->spec; 1350 const struct rte_flow_item_udp *udp_mask = item->mask; 1351 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1352 uint32_t idx = 0; 1353 uint32_t size; 1354 uint16_t dport = 0, sport = 0; 1355 uint16_t dport_mask = 0, sport_mask = 0; 1356 uint32_t cnt; 1357 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP; 1358 1359 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 1360 if (cnt == 2) { 1361 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 1362 return BNXT_TF_RC_ERROR; 1363 } 1364 1365 if (udp_spec) { 1366 sport = udp_spec->hdr.src_port; 1367 dport = udp_spec->hdr.dst_port; 1368 } 1369 if (udp_mask) { 1370 sport_mask = udp_mask->hdr.src_port; 1371 dport_mask = udp_mask->hdr.dst_port; 1372 } 1373 1374 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1375 BNXT_ULP_PROTO_HDR_UDP_NUM)) { 1376 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1377 return BNXT_TF_RC_ERROR; 1378 } 1379 1380 /* 1381 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1382 * header fields 1383 */ 1384 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port); 1385 ulp_rte_prsr_fld_mask(params, &idx, size, 1386 ulp_deference_struct(udp_spec, hdr.src_port), 1387 ulp_deference_struct(udp_mask, hdr.src_port), 1388 ULP_PRSR_ACT_DEFAULT); 1389 1390 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port); 1391 ulp_rte_prsr_fld_mask(params, &idx, size, 1392 ulp_deference_struct(udp_spec, hdr.dst_port), 1393 ulp_deference_struct(udp_mask, hdr.dst_port), 1394 ULP_PRSR_ACT_DEFAULT); 1395 1396 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len); 1397 ulp_rte_prsr_fld_mask(params, &idx, size, 1398 ulp_deference_struct(udp_spec, hdr.dgram_len), 1399 ulp_deference_struct(udp_mask, hdr.dgram_len), 1400 ULP_PRSR_ACT_DEFAULT); 1401 1402 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum); 1403 ulp_rte_prsr_fld_mask(params, &idx, size, 1404 ulp_deference_struct(udp_spec, hdr.dgram_cksum), 1405 ulp_deference_struct(udp_mask, hdr.dgram_cksum), 1406 ULP_PRSR_ACT_DEFAULT); 1407 1408 /* Set the udp header bitmap and computed l4 header bitmaps */ 1409 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 1410 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) || 1411 ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) 1412 out_l4 = BNXT_ULP_HDR_BIT_I_UDP; 1413 1414 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport, 1415 dport_mask, out_l4); 1416 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); 1417 return BNXT_TF_RC_SUCCESS; 1418 } 1419 1420 /* Function to handle the parsing of RTE Flow item TCP Header. */ 1421 int32_t 1422 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, 1423 struct ulp_rte_parser_params *params) 1424 { 1425 const struct rte_flow_item_tcp *tcp_spec = item->spec; 1426 const struct rte_flow_item_tcp *tcp_mask = item->mask; 1427 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1428 uint32_t idx = 0; 1429 uint16_t dport = 0, sport = 0; 1430 uint16_t dport_mask = 0, sport_mask = 0; 1431 uint32_t size; 1432 uint32_t cnt; 1433 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP; 1434 1435 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 1436 if (cnt == 2) { 1437 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 1438 return BNXT_TF_RC_ERROR; 1439 } 1440 1441 if (tcp_spec) { 1442 sport = tcp_spec->hdr.src_port; 1443 dport = tcp_spec->hdr.dst_port; 1444 } 1445 if (tcp_mask) { 1446 sport_mask = tcp_mask->hdr.src_port; 1447 dport_mask = tcp_mask->hdr.dst_port; 1448 } 1449 1450 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1451 BNXT_ULP_PROTO_HDR_TCP_NUM)) { 1452 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1453 return BNXT_TF_RC_ERROR; 1454 } 1455 1456 /* 1457 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1458 * header fields 1459 */ 1460 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port); 1461 ulp_rte_prsr_fld_mask(params, &idx, size, 1462 ulp_deference_struct(tcp_spec, hdr.src_port), 1463 ulp_deference_struct(tcp_mask, hdr.src_port), 1464 ULP_PRSR_ACT_DEFAULT); 1465 1466 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port); 1467 ulp_rte_prsr_fld_mask(params, &idx, size, 1468 ulp_deference_struct(tcp_spec, hdr.dst_port), 1469 ulp_deference_struct(tcp_mask, hdr.dst_port), 1470 ULP_PRSR_ACT_DEFAULT); 1471 1472 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq); 1473 ulp_rte_prsr_fld_mask(params, &idx, size, 1474 ulp_deference_struct(tcp_spec, hdr.sent_seq), 1475 ulp_deference_struct(tcp_mask, hdr.sent_seq), 1476 ULP_PRSR_ACT_DEFAULT); 1477 1478 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack); 1479 ulp_rte_prsr_fld_mask(params, &idx, size, 1480 ulp_deference_struct(tcp_spec, hdr.recv_ack), 1481 ulp_deference_struct(tcp_mask, hdr.recv_ack), 1482 ULP_PRSR_ACT_DEFAULT); 1483 1484 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off); 1485 ulp_rte_prsr_fld_mask(params, &idx, size, 1486 ulp_deference_struct(tcp_spec, hdr.data_off), 1487 ulp_deference_struct(tcp_mask, hdr.data_off), 1488 ULP_PRSR_ACT_DEFAULT); 1489 1490 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags); 1491 ulp_rte_prsr_fld_mask(params, &idx, size, 1492 ulp_deference_struct(tcp_spec, hdr.tcp_flags), 1493 ulp_deference_struct(tcp_mask, hdr.tcp_flags), 1494 ULP_PRSR_ACT_DEFAULT); 1495 1496 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win); 1497 ulp_rte_prsr_fld_mask(params, &idx, size, 1498 ulp_deference_struct(tcp_spec, hdr.rx_win), 1499 ulp_deference_struct(tcp_mask, hdr.rx_win), 1500 ULP_PRSR_ACT_DEFAULT); 1501 1502 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum); 1503 ulp_rte_prsr_fld_mask(params, &idx, size, 1504 ulp_deference_struct(tcp_spec, hdr.cksum), 1505 ulp_deference_struct(tcp_mask, hdr.cksum), 1506 ULP_PRSR_ACT_DEFAULT); 1507 1508 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp); 1509 ulp_rte_prsr_fld_mask(params, &idx, size, 1510 ulp_deference_struct(tcp_spec, hdr.tcp_urp), 1511 ulp_deference_struct(tcp_mask, hdr.tcp_urp), 1512 ULP_PRSR_ACT_DEFAULT); 1513 1514 /* Set the udp header bitmap and computed l4 header bitmaps */ 1515 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 1516 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) || 1517 ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) 1518 out_l4 = BNXT_ULP_HDR_BIT_I_TCP; 1519 1520 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport, 1521 dport_mask, out_l4); 1522 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); 1523 return BNXT_TF_RC_SUCCESS; 1524 } 1525 1526 /* Function to handle the parsing of RTE Flow item Vxlan Header. */ 1527 int32_t 1528 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item, 1529 struct ulp_rte_parser_params *params) 1530 { 1531 const struct rte_flow_item_vxlan *vxlan_spec = item->spec; 1532 const struct rte_flow_item_vxlan *vxlan_mask = item->mask; 1533 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1534 uint32_t idx = 0; 1535 uint16_t dport; 1536 uint32_t size; 1537 1538 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1539 BNXT_ULP_PROTO_HDR_VXLAN_NUM)) { 1540 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1541 return BNXT_TF_RC_ERROR; 1542 } 1543 1544 /* 1545 * Copy the rte_flow_item for vxlan into hdr_field using vxlan 1546 * header fields 1547 */ 1548 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.flags); 1549 ulp_rte_prsr_fld_mask(params, &idx, size, 1550 ulp_deference_struct(vxlan_spec, hdr.flags), 1551 ulp_deference_struct(vxlan_mask, hdr.flags), 1552 ULP_PRSR_ACT_DEFAULT); 1553 1554 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.rsvd0); 1555 ulp_rte_prsr_fld_mask(params, &idx, size, 1556 ulp_deference_struct(vxlan_spec, hdr.rsvd0), 1557 ulp_deference_struct(vxlan_mask, hdr.rsvd0), 1558 ULP_PRSR_ACT_DEFAULT); 1559 1560 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.vni); 1561 ulp_rte_prsr_fld_mask(params, &idx, size, 1562 ulp_deference_struct(vxlan_spec, hdr.vni), 1563 ulp_deference_struct(vxlan_mask, hdr.vni), 1564 ULP_PRSR_ACT_DEFAULT); 1565 1566 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.rsvd1); 1567 ulp_rte_prsr_fld_mask(params, &idx, size, 1568 ulp_deference_struct(vxlan_spec, hdr.rsvd1), 1569 ulp_deference_struct(vxlan_mask, hdr.rsvd1), 1570 ULP_PRSR_ACT_DEFAULT); 1571 1572 /* Update the hdr_bitmap with vxlan */ 1573 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN); 1574 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 1575 1576 dport = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT); 1577 if (!dport) { 1578 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT, 1579 ULP_UDP_PORT_VXLAN); 1580 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK, 1581 ULP_UDP_PORT_VXLAN_MASK); 1582 } 1583 1584 /* No need to check vxlan port for these conditions here */ 1585 if (ULP_APP_CUST_VXLAN_EN(params->ulp_ctx) || 1586 ULP_APP_CUST_VXLAN_SUPPORT(params->ulp_ctx) || 1587 ULP_APP_CUST_VXLAN_IP_SUPPORT(params->ulp_ctx)) 1588 return BNXT_TF_RC_SUCCESS; 1589 1590 /* Verify vxlan port */ 1591 if (dport != 0 && dport != ULP_UDP_PORT_VXLAN) { 1592 BNXT_TF_DBG(ERR, "ParseErr:vxlan port is not valid\n"); 1593 return BNXT_TF_RC_PARSE_ERR; 1594 } 1595 return BNXT_TF_RC_SUCCESS; 1596 } 1597 1598 /* Function to handle the parsing of RTE Flow item Vxlan GPE Header. */ 1599 int32_t 1600 ulp_rte_vxlan_gpe_hdr_handler(const struct rte_flow_item *item, 1601 struct ulp_rte_parser_params *params) 1602 { 1603 const struct rte_flow_item_vxlan_gpe *vxlan_gpe_spec = item->spec; 1604 const struct rte_flow_item_vxlan_gpe *vxlan_gpe_mask = item->mask; 1605 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1606 uint32_t idx = 0; 1607 uint16_t dport; 1608 uint32_t size; 1609 1610 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1611 BNXT_ULP_PROTO_HDR_VXLAN_GPE_NUM)) { 1612 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1613 return BNXT_TF_RC_ERROR; 1614 } 1615 1616 /* 1617 * Copy the rte_flow_item for vxlan gpe into hdr_field using vxlan 1618 * header fields 1619 */ 1620 size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->flags); 1621 ulp_rte_prsr_fld_mask(params, &idx, size, 1622 ulp_deference_struct(vxlan_gpe_spec, flags), 1623 ulp_deference_struct(vxlan_gpe_mask, flags), 1624 ULP_PRSR_ACT_DEFAULT); 1625 1626 size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->rsvd0); 1627 ulp_rte_prsr_fld_mask(params, &idx, size, 1628 ulp_deference_struct(vxlan_gpe_spec, rsvd0), 1629 ulp_deference_struct(vxlan_gpe_mask, rsvd0), 1630 ULP_PRSR_ACT_DEFAULT); 1631 1632 size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->protocol); 1633 ulp_rte_prsr_fld_mask(params, &idx, size, 1634 ulp_deference_struct(vxlan_gpe_spec, protocol), 1635 ulp_deference_struct(vxlan_gpe_mask, protocol), 1636 ULP_PRSR_ACT_DEFAULT); 1637 1638 size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->vni); 1639 ulp_rte_prsr_fld_mask(params, &idx, size, 1640 ulp_deference_struct(vxlan_gpe_spec, vni), 1641 ulp_deference_struct(vxlan_gpe_mask, vni), 1642 ULP_PRSR_ACT_DEFAULT); 1643 1644 size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->rsvd1); 1645 ulp_rte_prsr_fld_mask(params, &idx, size, 1646 ulp_deference_struct(vxlan_gpe_spec, rsvd1), 1647 ulp_deference_struct(vxlan_gpe_mask, rsvd1), 1648 ULP_PRSR_ACT_DEFAULT); 1649 1650 /* Update the hdr_bitmap with vxlan gpe*/ 1651 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN_GPE); 1652 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 1653 1654 dport = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT); 1655 if (!dport) { 1656 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT, 1657 ULP_UDP_PORT_VXLAN_GPE); 1658 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK, 1659 ULP_UDP_PORT_VXLAN_GPE_MASK); 1660 } 1661 1662 if (ULP_APP_CUST_VXLAN_EN(params->ulp_ctx) || 1663 ULP_APP_CUST_VXLAN_SUPPORT(params->ulp_ctx) || 1664 ULP_APP_CUST_VXLAN_IP_SUPPORT(params->ulp_ctx)) { 1665 BNXT_TF_DBG(ERR, "ParseErr:vxlan setting is not valid\n"); 1666 return BNXT_TF_RC_PARSE_ERR; 1667 } 1668 1669 /* Verify the vxlan gpe port */ 1670 if (dport != 0 && dport != ULP_UDP_PORT_VXLAN_GPE) { 1671 BNXT_TF_DBG(ERR, "ParseErr:vxlan gpe port is not valid\n"); 1672 return BNXT_TF_RC_PARSE_ERR; 1673 } 1674 return BNXT_TF_RC_SUCCESS; 1675 } 1676 1677 /* Function to handle the parsing of RTE Flow item GRE Header. */ 1678 int32_t 1679 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item, 1680 struct ulp_rte_parser_params *params) 1681 { 1682 const struct rte_flow_item_gre *gre_spec = item->spec; 1683 const struct rte_flow_item_gre *gre_mask = item->mask; 1684 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1685 uint32_t idx = 0; 1686 uint32_t size; 1687 1688 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1689 BNXT_ULP_PROTO_HDR_GRE_NUM)) { 1690 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1691 return BNXT_TF_RC_ERROR; 1692 } 1693 1694 size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver); 1695 ulp_rte_prsr_fld_mask(params, &idx, size, 1696 ulp_deference_struct(gre_spec, c_rsvd0_ver), 1697 ulp_deference_struct(gre_mask, c_rsvd0_ver), 1698 ULP_PRSR_ACT_DEFAULT); 1699 1700 size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol); 1701 ulp_rte_prsr_fld_mask(params, &idx, size, 1702 ulp_deference_struct(gre_spec, protocol), 1703 ulp_deference_struct(gre_mask, protocol), 1704 ULP_PRSR_ACT_DEFAULT); 1705 1706 /* Update the hdr_bitmap with GRE */ 1707 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE); 1708 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); 1709 return BNXT_TF_RC_SUCCESS; 1710 } 1711 1712 /* Function to handle the parsing of RTE Flow item ANY. */ 1713 int32_t 1714 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused, 1715 struct ulp_rte_parser_params *params __rte_unused) 1716 { 1717 return BNXT_TF_RC_SUCCESS; 1718 } 1719 1720 /* Function to handle the parsing of RTE Flow item ICMP Header. */ 1721 int32_t 1722 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item, 1723 struct ulp_rte_parser_params *params) 1724 { 1725 const struct rte_flow_item_icmp *icmp_spec = item->spec; 1726 const struct rte_flow_item_icmp *icmp_mask = item->mask; 1727 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1728 uint32_t idx = 0; 1729 uint32_t size; 1730 1731 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1732 BNXT_ULP_PROTO_HDR_ICMP_NUM)) { 1733 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1734 return BNXT_TF_RC_ERROR; 1735 } 1736 1737 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type); 1738 ulp_rte_prsr_fld_mask(params, &idx, size, 1739 ulp_deference_struct(icmp_spec, hdr.icmp_type), 1740 ulp_deference_struct(icmp_mask, hdr.icmp_type), 1741 ULP_PRSR_ACT_DEFAULT); 1742 1743 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code); 1744 ulp_rte_prsr_fld_mask(params, &idx, size, 1745 ulp_deference_struct(icmp_spec, hdr.icmp_code), 1746 ulp_deference_struct(icmp_mask, hdr.icmp_code), 1747 ULP_PRSR_ACT_DEFAULT); 1748 1749 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum); 1750 ulp_rte_prsr_fld_mask(params, &idx, size, 1751 ulp_deference_struct(icmp_spec, hdr.icmp_cksum), 1752 ulp_deference_struct(icmp_mask, hdr.icmp_cksum), 1753 ULP_PRSR_ACT_DEFAULT); 1754 1755 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident); 1756 ulp_rte_prsr_fld_mask(params, &idx, size, 1757 ulp_deference_struct(icmp_spec, hdr.icmp_ident), 1758 ulp_deference_struct(icmp_mask, hdr.icmp_ident), 1759 ULP_PRSR_ACT_DEFAULT); 1760 1761 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb); 1762 ulp_rte_prsr_fld_mask(params, &idx, size, 1763 ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb), 1764 ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb), 1765 ULP_PRSR_ACT_DEFAULT); 1766 1767 /* Update the hdr_bitmap with ICMP */ 1768 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) 1769 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP); 1770 else 1771 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP); 1772 return BNXT_TF_RC_SUCCESS; 1773 } 1774 1775 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */ 1776 int32_t 1777 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item, 1778 struct ulp_rte_parser_params *params) 1779 { 1780 const struct rte_flow_item_icmp6 *icmp_spec = item->spec; 1781 const struct rte_flow_item_icmp6 *icmp_mask = item->mask; 1782 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1783 uint32_t idx = 0; 1784 uint32_t size; 1785 1786 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1787 BNXT_ULP_PROTO_HDR_ICMP_NUM)) { 1788 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1789 return BNXT_TF_RC_ERROR; 1790 } 1791 1792 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type); 1793 ulp_rte_prsr_fld_mask(params, &idx, size, 1794 ulp_deference_struct(icmp_spec, type), 1795 ulp_deference_struct(icmp_mask, type), 1796 ULP_PRSR_ACT_DEFAULT); 1797 1798 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code); 1799 ulp_rte_prsr_fld_mask(params, &idx, size, 1800 ulp_deference_struct(icmp_spec, code), 1801 ulp_deference_struct(icmp_mask, code), 1802 ULP_PRSR_ACT_DEFAULT); 1803 1804 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum); 1805 ulp_rte_prsr_fld_mask(params, &idx, size, 1806 ulp_deference_struct(icmp_spec, checksum), 1807 ulp_deference_struct(icmp_mask, checksum), 1808 ULP_PRSR_ACT_DEFAULT); 1809 1810 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) { 1811 BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n"); 1812 return BNXT_TF_RC_ERROR; 1813 } 1814 1815 /* Update the hdr_bitmap with ICMP */ 1816 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) 1817 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP); 1818 else 1819 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP); 1820 return BNXT_TF_RC_SUCCESS; 1821 } 1822 1823 /* Function to handle the parsing of RTE Flow item ECPRI Header. */ 1824 int32_t 1825 ulp_rte_ecpri_hdr_handler(const struct rte_flow_item *item, 1826 struct ulp_rte_parser_params *params) 1827 { 1828 const struct rte_flow_item_ecpri *ecpri_spec = item->spec; 1829 const struct rte_flow_item_ecpri *ecpri_mask = item->mask; 1830 struct rte_flow_item_ecpri l_ecpri_spec, l_ecpri_mask; 1831 struct rte_flow_item_ecpri *p_ecpri_spec = &l_ecpri_spec; 1832 struct rte_flow_item_ecpri *p_ecpri_mask = &l_ecpri_mask; 1833 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1834 uint32_t idx = 0, cnt; 1835 uint32_t size; 1836 1837 if (ulp_rte_prsr_fld_size_validate(params, &idx, 1838 BNXT_ULP_PROTO_HDR_ECPRI_NUM)) { 1839 BNXT_TF_DBG(ERR, "Error parsing protocol header\n"); 1840 return BNXT_TF_RC_ERROR; 1841 } 1842 1843 /* Figure out if eCPRI is within L4(UDP), unsupported, for now */ 1844 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 1845 if (cnt >= 1) { 1846 BNXT_TF_DBG(ERR, "Parse Err: L4 header stack >= 2 not supported\n"); 1847 return BNXT_TF_RC_ERROR; 1848 } 1849 1850 if (!ecpri_spec || !ecpri_mask) 1851 goto parser_set_ecpri_hdr_bit; 1852 1853 memcpy(p_ecpri_spec, ecpri_spec, sizeof(*ecpri_spec)); 1854 memcpy(p_ecpri_mask, ecpri_mask, sizeof(*ecpri_mask)); 1855 1856 p_ecpri_spec->hdr.common.u32 = rte_be_to_cpu_32(p_ecpri_spec->hdr.common.u32); 1857 p_ecpri_mask->hdr.common.u32 = rte_be_to_cpu_32(p_ecpri_mask->hdr.common.u32); 1858 1859 /* 1860 * Init eCPRI spec+mask to correct defaults, also clear masks of fields 1861 * we ignore in the TCAM. 1862 */ 1863 1864 l_ecpri_spec.hdr.common.size = 0; 1865 l_ecpri_spec.hdr.common.c = 0; 1866 l_ecpri_spec.hdr.common.res = 0; 1867 l_ecpri_spec.hdr.common.revision = 1; 1868 l_ecpri_mask.hdr.common.size = 0; 1869 l_ecpri_mask.hdr.common.c = 1; 1870 l_ecpri_mask.hdr.common.res = 0; 1871 l_ecpri_mask.hdr.common.revision = 0xf; 1872 1873 switch (p_ecpri_spec->hdr.common.type) { 1874 case RTE_ECPRI_MSG_TYPE_IQ_DATA: 1875 l_ecpri_mask.hdr.type0.seq_id = 0; 1876 break; 1877 1878 case RTE_ECPRI_MSG_TYPE_BIT_SEQ: 1879 l_ecpri_mask.hdr.type1.seq_id = 0; 1880 break; 1881 1882 case RTE_ECPRI_MSG_TYPE_RTC_CTRL: 1883 l_ecpri_mask.hdr.type2.seq_id = 0; 1884 break; 1885 1886 case RTE_ECPRI_MSG_TYPE_GEN_DATA: 1887 l_ecpri_mask.hdr.type3.seq_id = 0; 1888 break; 1889 1890 case RTE_ECPRI_MSG_TYPE_RM_ACC: 1891 l_ecpri_mask.hdr.type4.rr = 0; 1892 l_ecpri_mask.hdr.type4.rw = 0; 1893 l_ecpri_mask.hdr.type4.rma_id = 0; 1894 break; 1895 1896 case RTE_ECPRI_MSG_TYPE_DLY_MSR: 1897 l_ecpri_spec.hdr.type5.act_type = 0; 1898 break; 1899 1900 case RTE_ECPRI_MSG_TYPE_RMT_RST: 1901 l_ecpri_spec.hdr.type6.rst_op = 0; 1902 break; 1903 1904 case RTE_ECPRI_MSG_TYPE_EVT_IND: 1905 l_ecpri_spec.hdr.type7.evt_type = 0; 1906 l_ecpri_spec.hdr.type7.seq = 0; 1907 l_ecpri_spec.hdr.type7.number = 0; 1908 break; 1909 1910 default: 1911 break; 1912 } 1913 1914 p_ecpri_spec->hdr.common.u32 = rte_cpu_to_be_32(p_ecpri_spec->hdr.common.u32); 1915 p_ecpri_mask->hdr.common.u32 = rte_cpu_to_be_32(p_ecpri_mask->hdr.common.u32); 1916 1917 /* Type */ 1918 size = sizeof(((struct rte_flow_item_ecpri *)NULL)->hdr.common.u32); 1919 ulp_rte_prsr_fld_mask(params, &idx, size, 1920 ulp_deference_struct(p_ecpri_spec, hdr.common.u32), 1921 ulp_deference_struct(p_ecpri_mask, hdr.common.u32), 1922 ULP_PRSR_ACT_DEFAULT); 1923 1924 /* PC/RTC/MSR_ID */ 1925 size = sizeof(((struct rte_flow_item_ecpri *)NULL)->hdr.dummy[0]); 1926 ulp_rte_prsr_fld_mask(params, &idx, size, 1927 ulp_deference_struct(p_ecpri_spec, hdr.dummy), 1928 ulp_deference_struct(p_ecpri_mask, hdr.dummy), 1929 ULP_PRSR_ACT_DEFAULT); 1930 1931 parser_set_ecpri_hdr_bit: 1932 /* Update the hdr_bitmap with eCPRI */ 1933 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ECPRI); 1934 return BNXT_TF_RC_SUCCESS; 1935 } 1936 1937 /* Function to handle the parsing of RTE Flow item void Header */ 1938 int32_t 1939 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused, 1940 struct ulp_rte_parser_params *params __rte_unused) 1941 { 1942 return BNXT_TF_RC_SUCCESS; 1943 } 1944 1945 /* Function to handle the parsing of RTE Flow action void Header. */ 1946 int32_t 1947 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused, 1948 struct ulp_rte_parser_params *params __rte_unused) 1949 { 1950 return BNXT_TF_RC_SUCCESS; 1951 } 1952 1953 /* Function to handle the parsing of RTE Flow action Mark Header. */ 1954 int32_t 1955 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item, 1956 struct ulp_rte_parser_params *param) 1957 { 1958 const struct rte_flow_action_mark *mark; 1959 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap; 1960 uint32_t mark_id; 1961 1962 mark = action_item->conf; 1963 if (mark) { 1964 mark_id = tfp_cpu_to_be_32(mark->id); 1965 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK], 1966 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK); 1967 1968 /* Update the hdr_bitmap with vxlan */ 1969 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK); 1970 return BNXT_TF_RC_SUCCESS; 1971 } 1972 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n"); 1973 return BNXT_TF_RC_ERROR; 1974 } 1975 1976 /* Function to handle the parsing of RTE Flow action RSS Header. */ 1977 int32_t 1978 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item, 1979 struct ulp_rte_parser_params *param) 1980 { 1981 const struct rte_flow_action_rss *rss; 1982 struct ulp_rte_act_prop *ap = ¶m->act_prop; 1983 uint64_t queue_list[BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE / sizeof(uint64_t)]; 1984 uint32_t idx = 0, id; 1985 1986 if (action_item == NULL || action_item->conf == NULL) { 1987 BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n"); 1988 return BNXT_TF_RC_ERROR; 1989 } 1990 1991 rss = action_item->conf; 1992 /* Copy the rss into the specific action properties */ 1993 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types, 1994 BNXT_ULP_ACT_PROP_SZ_RSS_TYPES); 1995 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level, 1996 BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL); 1997 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN], 1998 &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN); 1999 2000 if (rss->key_len != 0 && rss->key_len != BNXT_ULP_ACT_PROP_SZ_RSS_KEY) { 2001 BNXT_TF_DBG(ERR, "Parse Err: RSS key length must be 40 bytes\n"); 2002 return BNXT_TF_RC_ERROR; 2003 } 2004 2005 /* User may specify only key length. In that case, rss->key will be NULL. 2006 * So, reject the flow if key_length is valid but rss->key is NULL. 2007 * Also, copy the RSS hash key only when rss->key is valid. 2008 */ 2009 if (rss->key_len != 0 && rss->key == NULL) { 2010 BNXT_TF_DBG(ERR, 2011 "Parse Err: A valid RSS key must be provided with a valid key len.\n"); 2012 return BNXT_TF_RC_ERROR; 2013 } 2014 if (rss->key) 2015 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key, rss->key_len); 2016 2017 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE_NUM], 2018 &rss->queue_num, BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE_NUM); 2019 2020 if (rss->queue_num >= ULP_BYTE_2_BITS(BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE)) { 2021 BNXT_TF_DBG(ERR, "Parse Err: RSS queue num too big\n"); 2022 return BNXT_TF_RC_ERROR; 2023 } 2024 2025 /* Queues converted into a bitmap format */ 2026 memset(queue_list, 0, sizeof(queue_list)); 2027 for (idx = 0; idx < rss->queue_num; idx++) { 2028 id = rss->queue[idx]; 2029 if (id >= ULP_BYTE_2_BITS(BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE)) { 2030 BNXT_TF_DBG(ERR, "Parse Err: RSS queue id too big\n"); 2031 return BNXT_TF_RC_ERROR; 2032 } 2033 if ((queue_list[id / ULP_INDEX_BITMAP_SIZE] >> 2034 ((ULP_INDEX_BITMAP_SIZE - 1) - 2035 (id % ULP_INDEX_BITMAP_SIZE)) & 1)) { 2036 BNXT_TF_DBG(ERR, "Parse Err: duplicate queue ids\n"); 2037 return BNXT_TF_RC_ERROR; 2038 } 2039 queue_list[id / ULP_INDEX_BITMAP_SIZE] |= (1UL << 2040 ((ULP_INDEX_BITMAP_SIZE - 1) - (id % ULP_INDEX_BITMAP_SIZE))); 2041 } 2042 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE], 2043 (uint8_t *)queue_list, BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE); 2044 2045 /* set the RSS action header bit */ 2046 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS); 2047 2048 return BNXT_TF_RC_SUCCESS; 2049 } 2050 2051 /* Function to handle the parsing of RTE Flow item eth Header. */ 2052 static void 2053 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params, 2054 const struct rte_flow_item_eth *eth_spec) 2055 { 2056 struct ulp_rte_hdr_field *field; 2057 uint32_t size; 2058 2059 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC]; 2060 size = sizeof(eth_spec->hdr.dst_addr.addr_bytes); 2061 field = ulp_rte_parser_fld_copy(field, eth_spec->hdr.dst_addr.addr_bytes, size); 2062 2063 size = sizeof(eth_spec->hdr.src_addr.addr_bytes); 2064 field = ulp_rte_parser_fld_copy(field, eth_spec->hdr.src_addr.addr_bytes, size); 2065 2066 size = sizeof(eth_spec->hdr.ether_type); 2067 field = ulp_rte_parser_fld_copy(field, ð_spec->hdr.ether_type, size); 2068 2069 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); 2070 } 2071 2072 /* Function to handle the parsing of RTE Flow item vlan Header. */ 2073 static void 2074 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params, 2075 const struct rte_flow_item_vlan *vlan_spec, 2076 uint32_t inner) 2077 { 2078 struct ulp_rte_hdr_field *field; 2079 uint32_t size; 2080 2081 if (!inner) { 2082 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI]; 2083 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, 2084 BNXT_ULP_HDR_BIT_OO_VLAN); 2085 } else { 2086 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI]; 2087 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, 2088 BNXT_ULP_HDR_BIT_OI_VLAN); 2089 } 2090 2091 size = sizeof(vlan_spec->hdr.vlan_tci); 2092 field = ulp_rte_parser_fld_copy(field, &vlan_spec->hdr.vlan_tci, size); 2093 2094 size = sizeof(vlan_spec->hdr.eth_proto); 2095 field = ulp_rte_parser_fld_copy(field, &vlan_spec->hdr.eth_proto, size); 2096 } 2097 2098 /* Function to handle the parsing of RTE Flow item ipv4 Header. */ 2099 static void 2100 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params, 2101 const struct rte_flow_item_ipv4 *ip) 2102 { 2103 struct ulp_rte_hdr_field *field; 2104 uint32_t size; 2105 uint8_t val8; 2106 2107 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL]; 2108 size = sizeof(ip->hdr.version_ihl); 2109 if (!ip->hdr.version_ihl) 2110 val8 = RTE_IPV4_VHL_DEF; 2111 else 2112 val8 = ip->hdr.version_ihl; 2113 field = ulp_rte_parser_fld_copy(field, &val8, size); 2114 2115 size = sizeof(ip->hdr.type_of_service); 2116 field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size); 2117 2118 size = sizeof(ip->hdr.packet_id); 2119 field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size); 2120 2121 size = sizeof(ip->hdr.fragment_offset); 2122 field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size); 2123 2124 size = sizeof(ip->hdr.time_to_live); 2125 if (!ip->hdr.time_to_live) 2126 val8 = BNXT_ULP_DEFAULT_TTL; 2127 else 2128 val8 = ip->hdr.time_to_live; 2129 field = ulp_rte_parser_fld_copy(field, &val8, size); 2130 2131 size = sizeof(ip->hdr.next_proto_id); 2132 field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size); 2133 2134 size = sizeof(ip->hdr.src_addr); 2135 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size); 2136 2137 size = sizeof(ip->hdr.dst_addr); 2138 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size); 2139 2140 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4); 2141 } 2142 2143 /* Function to handle the parsing of RTE Flow item ipv6 Header. */ 2144 static void 2145 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params, 2146 const struct rte_flow_item_ipv6 *ip) 2147 { 2148 struct ulp_rte_hdr_field *field; 2149 uint32_t size; 2150 uint32_t val32; 2151 uint8_t val8; 2152 2153 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW]; 2154 size = sizeof(ip->hdr.vtc_flow); 2155 if (!ip->hdr.vtc_flow) 2156 val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER); 2157 else 2158 val32 = ip->hdr.vtc_flow; 2159 field = ulp_rte_parser_fld_copy(field, &val32, size); 2160 2161 size = sizeof(ip->hdr.proto); 2162 field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size); 2163 2164 size = sizeof(ip->hdr.hop_limits); 2165 if (!ip->hdr.hop_limits) 2166 val8 = BNXT_ULP_DEFAULT_TTL; 2167 else 2168 val8 = ip->hdr.hop_limits; 2169 field = ulp_rte_parser_fld_copy(field, &val8, size); 2170 2171 size = sizeof(ip->hdr.src_addr); 2172 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size); 2173 2174 size = sizeof(ip->hdr.dst_addr); 2175 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size); 2176 2177 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6); 2178 } 2179 2180 /* Function to handle the parsing of RTE Flow item UDP Header. */ 2181 static void 2182 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params, 2183 const struct rte_flow_item_udp *udp_spec) 2184 { 2185 struct ulp_rte_hdr_field *field; 2186 uint32_t size; 2187 uint8_t type = IPPROTO_UDP; 2188 2189 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT]; 2190 size = sizeof(udp_spec->hdr.src_port); 2191 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size); 2192 2193 size = sizeof(udp_spec->hdr.dst_port); 2194 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size); 2195 2196 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP); 2197 2198 /* Update thhe ip header protocol */ 2199 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO]; 2200 ulp_rte_parser_fld_copy(field, &type, sizeof(type)); 2201 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO]; 2202 ulp_rte_parser_fld_copy(field, &type, sizeof(type)); 2203 } 2204 2205 /* Function to handle the parsing of RTE Flow item vxlan Header. */ 2206 static void 2207 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params, 2208 struct rte_flow_item_vxlan *vxlan_spec) 2209 { 2210 struct ulp_rte_hdr_field *field; 2211 uint32_t size; 2212 2213 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS]; 2214 size = sizeof(vxlan_spec->hdr.flags); 2215 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.flags, size); 2216 2217 size = sizeof(vxlan_spec->hdr.rsvd0); 2218 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.rsvd0, size); 2219 2220 size = sizeof(vxlan_spec->hdr.vni); 2221 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.vni, size); 2222 2223 size = sizeof(vxlan_spec->hdr.rsvd1); 2224 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.rsvd1, size); 2225 2226 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN); 2227 } 2228 2229 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */ 2230 int32_t 2231 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, 2232 struct ulp_rte_parser_params *params) 2233 { 2234 const struct rte_flow_action_vxlan_encap *vxlan_encap; 2235 const struct rte_flow_item *item; 2236 const struct rte_flow_item_ipv4 *ipv4_spec; 2237 const struct rte_flow_item_ipv6 *ipv6_spec; 2238 struct rte_flow_item_vxlan vxlan_spec; 2239 uint32_t vlan_num = 0, vlan_size = 0; 2240 uint32_t ip_size = 0, ip_type = 0; 2241 uint32_t vxlan_size = 0; 2242 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap; 2243 struct ulp_rte_act_prop *ap = ¶ms->act_prop; 2244 2245 vxlan_encap = action_item->conf; 2246 if (!vxlan_encap) { 2247 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n"); 2248 return BNXT_TF_RC_ERROR; 2249 } 2250 2251 item = vxlan_encap->definition; 2252 if (!item) { 2253 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n"); 2254 return BNXT_TF_RC_ERROR; 2255 } 2256 2257 if (!ulp_rte_item_skip_void(&item, 0)) 2258 return BNXT_TF_RC_ERROR; 2259 2260 /* must have ethernet header */ 2261 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { 2262 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n"); 2263 return BNXT_TF_RC_ERROR; 2264 } 2265 2266 /* Parse the ethernet header */ 2267 if (item->spec) 2268 ulp_rte_enc_eth_hdr_handler(params, item->spec); 2269 2270 /* Goto the next item */ 2271 if (!ulp_rte_item_skip_void(&item, 1)) 2272 return BNXT_TF_RC_ERROR; 2273 2274 /* May have vlan header */ 2275 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2276 vlan_num++; 2277 if (item->spec) 2278 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0); 2279 2280 if (!ulp_rte_item_skip_void(&item, 1)) 2281 return BNXT_TF_RC_ERROR; 2282 } 2283 2284 /* may have two vlan headers */ 2285 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2286 vlan_num++; 2287 if (item->spec) 2288 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1); 2289 2290 if (!ulp_rte_item_skip_void(&item, 1)) 2291 return BNXT_TF_RC_ERROR; 2292 } 2293 2294 /* Update the vlan count and size of more than one */ 2295 if (vlan_num) { 2296 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan); 2297 vlan_num = tfp_cpu_to_be_32(vlan_num); 2298 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM], 2299 &vlan_num, 2300 sizeof(uint32_t)); 2301 vlan_size = tfp_cpu_to_be_32(vlan_size); 2302 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ], 2303 &vlan_size, 2304 sizeof(uint32_t)); 2305 } 2306 2307 /* L3 must be IPv4, IPv6 */ 2308 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { 2309 ipv4_spec = item->spec; 2310 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE; 2311 2312 /* Update the ip size details */ 2313 ip_size = tfp_cpu_to_be_32(ip_size); 2314 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 2315 &ip_size, sizeof(uint32_t)); 2316 2317 /* update the ip type */ 2318 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4); 2319 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 2320 &ip_type, sizeof(uint32_t)); 2321 2322 /* update the computed field to notify it is ipv4 header */ 2323 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG, 2324 1); 2325 if (ipv4_spec) 2326 ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec); 2327 2328 if (!ulp_rte_item_skip_void(&item, 1)) 2329 return BNXT_TF_RC_ERROR; 2330 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { 2331 ipv6_spec = item->spec; 2332 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE; 2333 2334 /* Update the ip size details */ 2335 ip_size = tfp_cpu_to_be_32(ip_size); 2336 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 2337 &ip_size, sizeof(uint32_t)); 2338 2339 /* update the ip type */ 2340 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6); 2341 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 2342 &ip_type, sizeof(uint32_t)); 2343 2344 /* update the computed field to notify it is ipv6 header */ 2345 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG, 2346 1); 2347 if (ipv6_spec) 2348 ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec); 2349 2350 if (!ulp_rte_item_skip_void(&item, 1)) 2351 return BNXT_TF_RC_ERROR; 2352 } else { 2353 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n"); 2354 return BNXT_TF_RC_ERROR; 2355 } 2356 2357 /* L4 is UDP */ 2358 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) { 2359 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n"); 2360 return BNXT_TF_RC_ERROR; 2361 } 2362 if (item->spec) 2363 ulp_rte_enc_udp_hdr_handler(params, item->spec); 2364 2365 if (!ulp_rte_item_skip_void(&item, 1)) 2366 return BNXT_TF_RC_ERROR; 2367 2368 /* Finally VXLAN */ 2369 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { 2370 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n"); 2371 return BNXT_TF_RC_ERROR; 2372 } 2373 vxlan_size = sizeof(struct rte_flow_item_vxlan); 2374 /* copy the vxlan details */ 2375 memcpy(&vxlan_spec, item->spec, vxlan_size); 2376 vxlan_spec.hdr.flags = 0x08; 2377 vxlan_size = tfp_cpu_to_be_32(vxlan_size); 2378 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ], 2379 &vxlan_size, sizeof(uint32_t)); 2380 2381 ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec); 2382 2383 /* update the hdr_bitmap with vxlan */ 2384 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP); 2385 return BNXT_TF_RC_SUCCESS; 2386 } 2387 2388 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */ 2389 int32_t 2390 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item 2391 __rte_unused, 2392 struct ulp_rte_parser_params *params) 2393 { 2394 /* update the hdr_bitmap with vxlan */ 2395 ULP_BITMAP_SET(params->act_bitmap.bits, 2396 BNXT_ULP_ACT_BIT_VXLAN_DECAP); 2397 /* Update computational field with tunnel decap info */ 2398 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1); 2399 return BNXT_TF_RC_SUCCESS; 2400 } 2401 2402 /* Function to handle the parsing of RTE Flow action drop Header. */ 2403 int32_t 2404 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused, 2405 struct ulp_rte_parser_params *params) 2406 { 2407 /* Update the hdr_bitmap with drop */ 2408 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP); 2409 return BNXT_TF_RC_SUCCESS; 2410 } 2411 2412 /* Function to handle the parsing of RTE Flow action count. */ 2413 int32_t 2414 ulp_rte_count_act_handler(const struct rte_flow_action *action_item, 2415 struct ulp_rte_parser_params *params) 2416 { 2417 const struct rte_flow_action_count *act_count; 2418 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop; 2419 2420 act_count = action_item->conf; 2421 if (act_count) { 2422 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT], 2423 &act_count->id, 2424 BNXT_ULP_ACT_PROP_SZ_COUNT); 2425 } 2426 2427 /* Update the hdr_bitmap with count */ 2428 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT); 2429 return BNXT_TF_RC_SUCCESS; 2430 } 2431 2432 static bool ulp_rte_parser_is_portb_vfrep(struct ulp_rte_parser_params *param) 2433 { 2434 return ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_B_IS_VFREP); 2435 } 2436 2437 /* 2438 * Swaps info related to multi-port: 2439 * common: 2440 * BNXT_ULP_CF_IDX_MP_B_IS_VFREP, BNXT_ULP_CF_IDX_MP_A_IS_VFREP 2441 * BNXT_ULP_CF_IDX_MP_PORT_A, BNXT_ULP_CF_IDX_MP_PORT_B 2442 * 2443 * ingress: 2444 * BNXT_ULP_CF_IDX_MP_VNIC_B, BNXT_ULP_CF_IDX_MP_VNIC_A 2445 * 2446 * egress: 2447 * BNXT_ULP_CF_IDX_MP_MDATA_B, BNXT_ULP_CF_IDX_MP_MDATA_A 2448 * BNXT_ULP_CF_IDX_MP_VPORT_B, BNXT_ULP_CF_IDX_MP_VPORT_A 2449 * 2450 * Note: This is done as OVS could give us a non-VFREP port in port B, and we 2451 * cannot use that to mirror, so we swap out the ports so that a VFREP is now 2452 * in port B instead. 2453 */ 2454 static int32_t 2455 ulp_rte_parser_normalize_port_info(struct ulp_rte_parser_params *param) 2456 { 2457 uint16_t mp_port_a, mp_port_b, mp_mdata_a, mp_mdata_b, 2458 mp_vport_a, mp_vport_b, mp_vnic_a, mp_vnic_b, 2459 mp_is_vfrep_a, mp_is_vfrep_b; 2460 2461 mp_is_vfrep_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_A_IS_VFREP); 2462 mp_is_vfrep_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_B_IS_VFREP); 2463 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_B_IS_VFREP, mp_is_vfrep_a); 2464 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_A_IS_VFREP, mp_is_vfrep_b); 2465 2466 mp_port_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_PORT_A); 2467 mp_port_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_PORT_B); 2468 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_B, mp_port_a); 2469 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_A, mp_port_b); 2470 2471 mp_vport_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VPORT_A); 2472 mp_vport_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VPORT_B); 2473 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VPORT_B, mp_vport_a); 2474 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VPORT_A, mp_vport_b); 2475 2476 mp_vnic_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VNIC_A); 2477 mp_vnic_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VNIC_B); 2478 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VNIC_B, mp_vnic_a); 2479 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VNIC_A, mp_vnic_b); 2480 2481 mp_mdata_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_MDATA_A); 2482 mp_mdata_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_MDATA_B); 2483 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_MDATA_B, mp_mdata_a); 2484 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_MDATA_A, mp_mdata_b); 2485 2486 return BNXT_TF_RC_SUCCESS; 2487 } 2488 2489 2490 /* Function to handle the parsing of action ports. */ 2491 static int32_t 2492 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param, 2493 uint32_t ifindex, bool multi_port, 2494 enum bnxt_ulp_direction_type act_dir) 2495 { 2496 enum bnxt_ulp_direction_type dir; 2497 uint16_t pid_s; 2498 uint8_t *p_mdata; 2499 uint32_t pid, port_index; 2500 struct ulp_rte_act_prop *act = ¶m->act_prop; 2501 enum bnxt_ulp_intf_type port_type; 2502 uint32_t vnic_type; 2503 2504 /* Get the direction */ 2505 /* If action implicitly specifies direction, use the specification. */ 2506 dir = (act_dir == BNXT_ULP_DIR_INVALID) ? 2507 ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION) : 2508 act_dir; 2509 2510 port_type = ULP_COMP_FLD_IDX_RD(param, 2511 BNXT_ULP_CF_IDX_ACT_PORT_TYPE); 2512 2513 /* Update flag if Port A/B type is VF-REP */ 2514 ULP_COMP_FLD_IDX_WR(param, multi_port ? 2515 BNXT_ULP_CF_IDX_MP_B_IS_VFREP : 2516 BNXT_ULP_CF_IDX_MP_A_IS_VFREP, 2517 (port_type == BNXT_ULP_INTF_TYPE_VF_REP) ? 1 : 0); 2518 if (dir == BNXT_ULP_DIR_EGRESS) { 2519 /* For egress direction, fill vport */ 2520 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s)) 2521 return BNXT_TF_RC_ERROR; 2522 2523 pid = pid_s; 2524 pid = rte_cpu_to_be_32(pid); 2525 if (!multi_port) 2526 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], 2527 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); 2528 2529 /* Fill metadata */ 2530 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP) { 2531 port_index = ULP_COMP_FLD_IDX_RD(param, multi_port ? 2532 BNXT_ULP_CF_IDX_MP_PORT_B : 2533 BNXT_ULP_CF_IDX_MP_PORT_A); 2534 if (ulp_port_db_port_meta_data_get(param->ulp_ctx, 2535 port_index, &p_mdata)) 2536 return BNXT_TF_RC_ERROR; 2537 /* 2538 * Update appropriate port (A/B) metadata based on multi-port 2539 * indication 2540 */ 2541 ULP_COMP_FLD_IDX_WR(param, 2542 multi_port ? 2543 BNXT_ULP_CF_IDX_MP_MDATA_B : 2544 BNXT_ULP_CF_IDX_MP_MDATA_A, 2545 rte_cpu_to_be_16(*((uint16_t *)p_mdata))); 2546 } 2547 /* 2548 * Update appropriate port (A/B) VPORT based on multi-port 2549 * indication. 2550 */ 2551 ULP_COMP_FLD_IDX_WR(param, 2552 multi_port ? 2553 BNXT_ULP_CF_IDX_MP_VPORT_B : 2554 BNXT_ULP_CF_IDX_MP_VPORT_A, 2555 pid_s); 2556 } else { 2557 /* For ingress direction, fill vnic */ 2558 /* 2559 * Action Destination 2560 * ------------------------------------ 2561 * PORT_REPRESENTOR Driver Function 2562 * ------------------------------------ 2563 * REPRESENTED_PORT VF 2564 * ------------------------------------ 2565 * PORT_ID VF 2566 */ 2567 if (act_dir != BNXT_ULP_DIR_INGRESS && 2568 port_type == BNXT_ULP_INTF_TYPE_VF_REP) 2569 vnic_type = BNXT_ULP_VF_FUNC_VNIC; 2570 else 2571 vnic_type = BNXT_ULP_DRV_FUNC_VNIC; 2572 2573 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex, 2574 vnic_type, &pid_s)) 2575 return BNXT_TF_RC_ERROR; 2576 2577 pid = pid_s; 2578 pid = rte_cpu_to_be_32(pid); 2579 if (!multi_port) 2580 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], 2581 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC); 2582 /* 2583 * Update appropriate port (A/B) VNIC based on multi-port 2584 * indication. 2585 */ 2586 ULP_COMP_FLD_IDX_WR(param, 2587 multi_port ? 2588 BNXT_ULP_CF_IDX_MP_VNIC_B : 2589 BNXT_ULP_CF_IDX_MP_VNIC_A, 2590 pid_s); 2591 } 2592 2593 if (multi_port && !ulp_rte_parser_is_portb_vfrep(param)) 2594 ulp_rte_parser_normalize_port_info(param); 2595 2596 /* Update the action port set bit */ 2597 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1); 2598 return BNXT_TF_RC_SUCCESS; 2599 } 2600 2601 /* Function to handle the parsing of RTE Flow action PF. */ 2602 int32_t 2603 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused, 2604 struct ulp_rte_parser_params *params) 2605 { 2606 uint32_t port_id; 2607 uint32_t ifindex; 2608 enum bnxt_ulp_intf_type intf_type; 2609 2610 /* Get the port id of the current device */ 2611 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 2612 2613 /* Get the port db ifindex */ 2614 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id, 2615 &ifindex)) { 2616 BNXT_TF_DBG(ERR, "Invalid port id\n"); 2617 return BNXT_TF_RC_ERROR; 2618 } 2619 2620 /* Check the port is PF port */ 2621 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 2622 if (intf_type != BNXT_ULP_INTF_TYPE_PF) { 2623 BNXT_TF_DBG(ERR, "Port is not a PF port\n"); 2624 return BNXT_TF_RC_ERROR; 2625 } 2626 /* Update the action properties */ 2627 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2628 return ulp_rte_parser_act_port_set(params, ifindex, false, 2629 BNXT_ULP_DIR_INVALID); 2630 } 2631 2632 /* Function to handle the parsing of RTE Flow action VF. */ 2633 int32_t 2634 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, 2635 struct ulp_rte_parser_params *params) 2636 { 2637 const struct rte_flow_action_vf *vf_action; 2638 enum bnxt_ulp_intf_type intf_type; 2639 uint32_t ifindex; 2640 struct bnxt *bp; 2641 2642 vf_action = action_item->conf; 2643 if (!vf_action) { 2644 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n"); 2645 return BNXT_TF_RC_PARSE_ERR; 2646 } 2647 2648 if (vf_action->original) { 2649 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n"); 2650 return BNXT_TF_RC_PARSE_ERR; 2651 } 2652 2653 bp = bnxt_pmd_get_bp(params->port_id); 2654 if (bp == NULL) { 2655 BNXT_TF_DBG(ERR, "Invalid bp\n"); 2656 return BNXT_TF_RC_ERROR; 2657 } 2658 2659 /* vf_action->id is a logical number which in this case is an 2660 * offset from the first VF. So, to get the absolute VF id, the 2661 * offset must be added to the absolute first vf id of that port. 2662 */ 2663 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, 2664 bp->first_vf_id + 2665 vf_action->id, 2666 &ifindex)) { 2667 BNXT_TF_DBG(ERR, "VF is not valid interface\n"); 2668 return BNXT_TF_RC_ERROR; 2669 } 2670 /* Check the port is VF port */ 2671 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 2672 if (intf_type != BNXT_ULP_INTF_TYPE_VF && 2673 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) { 2674 BNXT_TF_DBG(ERR, "Port is not a VF port\n"); 2675 return BNXT_TF_RC_ERROR; 2676 } 2677 2678 /* Update the action properties */ 2679 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2680 return ulp_rte_parser_act_port_set(params, ifindex, false, 2681 BNXT_ULP_DIR_INVALID); 2682 } 2683 2684 /* Parse actions PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */ 2685 int32_t 2686 ulp_rte_port_act_handler(const struct rte_flow_action *act_item, 2687 struct ulp_rte_parser_params *param) 2688 { 2689 uint32_t ethdev_id; 2690 uint32_t ifindex; 2691 const struct rte_flow_action_port_id *port_id = act_item->conf; 2692 uint32_t num_ports; 2693 enum bnxt_ulp_intf_type intf_type; 2694 enum bnxt_ulp_direction_type act_dir; 2695 2696 if (!act_item->conf) { 2697 BNXT_TF_DBG(ERR, 2698 "ParseErr: Invalid Argument\n"); 2699 return BNXT_TF_RC_PARSE_ERR; 2700 } 2701 switch (act_item->type) { 2702 case RTE_FLOW_ACTION_TYPE_PORT_ID: { 2703 const struct rte_flow_action_port_id *port_id = act_item->conf; 2704 2705 if (port_id->original) { 2706 BNXT_TF_DBG(ERR, 2707 "ParseErr:Portid Original not supported\n"); 2708 return BNXT_TF_RC_PARSE_ERR; 2709 } 2710 ethdev_id = port_id->id; 2711 act_dir = BNXT_ULP_DIR_INVALID; 2712 break; 2713 } 2714 case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: { 2715 const struct rte_flow_action_ethdev *ethdev = act_item->conf; 2716 2717 ethdev_id = ethdev->port_id; 2718 act_dir = BNXT_ULP_DIR_INGRESS; 2719 break; 2720 } 2721 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: { 2722 const struct rte_flow_action_ethdev *ethdev = act_item->conf; 2723 2724 ethdev_id = ethdev->port_id; 2725 act_dir = BNXT_ULP_DIR_EGRESS; 2726 break; 2727 } 2728 default: 2729 BNXT_TF_DBG(ERR, "Unknown port action\n"); 2730 return BNXT_TF_RC_ERROR; 2731 } 2732 2733 num_ports = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_NPORTS); 2734 2735 if (num_ports) { 2736 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_B, 2737 port_id->id); 2738 ULP_BITMAP_SET(param->act_bitmap.bits, 2739 BNXT_ULP_ACT_BIT_MULTIPLE_PORT); 2740 } else { 2741 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_A, 2742 port_id->id); 2743 } 2744 2745 /* Get the port db ifindex */ 2746 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, ethdev_id, 2747 &ifindex)) { 2748 BNXT_TF_DBG(ERR, "Invalid port id\n"); 2749 return BNXT_TF_RC_ERROR; 2750 } 2751 2752 /* Get the intf type */ 2753 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex); 2754 if (!intf_type) { 2755 BNXT_TF_DBG(ERR, "Invalid port type\n"); 2756 return BNXT_TF_RC_ERROR; 2757 } 2758 2759 /* Set the action port */ 2760 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2761 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID, 2762 ethdev_id); 2763 2764 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_NPORTS, ++num_ports); 2765 return ulp_rte_parser_act_port_set(param, ifindex, 2766 ULP_BITMAP_ISSET(param->act_bitmap.bits, 2767 BNXT_ULP_ACT_BIT_MULTIPLE_PORT), 2768 act_dir); 2769 } 2770 2771 /* Function to handle the parsing of RTE Flow action pop vlan. */ 2772 int32_t 2773 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused, 2774 struct ulp_rte_parser_params *params) 2775 { 2776 /* Update the act_bitmap with pop */ 2777 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN); 2778 return BNXT_TF_RC_SUCCESS; 2779 } 2780 2781 /* Function to handle the parsing of RTE Flow action push vlan. */ 2782 int32_t 2783 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item, 2784 struct ulp_rte_parser_params *params) 2785 { 2786 const struct rte_flow_action_of_push_vlan *push_vlan; 2787 uint16_t ethertype; 2788 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2789 2790 push_vlan = action_item->conf; 2791 if (push_vlan) { 2792 ethertype = push_vlan->ethertype; 2793 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) { 2794 BNXT_TF_DBG(ERR, 2795 "Parse Err: Ethertype not supported\n"); 2796 return BNXT_TF_RC_PARSE_ERR; 2797 } 2798 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN], 2799 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN); 2800 /* Update the hdr_bitmap with push vlan */ 2801 ULP_BITMAP_SET(params->act_bitmap.bits, 2802 BNXT_ULP_ACT_BIT_PUSH_VLAN); 2803 return BNXT_TF_RC_SUCCESS; 2804 } 2805 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n"); 2806 return BNXT_TF_RC_ERROR; 2807 } 2808 2809 /* Function to handle the parsing of RTE Flow action set vlan id. */ 2810 int32_t 2811 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item, 2812 struct ulp_rte_parser_params *params) 2813 { 2814 const struct rte_flow_action_of_set_vlan_vid *vlan_vid; 2815 uint32_t vid; 2816 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2817 2818 vlan_vid = action_item->conf; 2819 if (vlan_vid && vlan_vid->vlan_vid) { 2820 vid = vlan_vid->vlan_vid; 2821 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID], 2822 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID); 2823 /* Update the hdr_bitmap with vlan vid */ 2824 ULP_BITMAP_SET(params->act_bitmap.bits, 2825 BNXT_ULP_ACT_BIT_SET_VLAN_VID); 2826 return BNXT_TF_RC_SUCCESS; 2827 } 2828 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n"); 2829 return BNXT_TF_RC_ERROR; 2830 } 2831 2832 /* Function to handle the parsing of RTE Flow action set vlan pcp. */ 2833 int32_t 2834 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item, 2835 struct ulp_rte_parser_params *params) 2836 { 2837 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp; 2838 uint8_t pcp; 2839 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2840 2841 vlan_pcp = action_item->conf; 2842 if (vlan_pcp) { 2843 pcp = vlan_pcp->vlan_pcp; 2844 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP], 2845 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP); 2846 /* Update the hdr_bitmap with vlan vid */ 2847 ULP_BITMAP_SET(params->act_bitmap.bits, 2848 BNXT_ULP_ACT_BIT_SET_VLAN_PCP); 2849 return BNXT_TF_RC_SUCCESS; 2850 } 2851 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n"); 2852 return BNXT_TF_RC_ERROR; 2853 } 2854 2855 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/ 2856 int32_t 2857 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item, 2858 struct ulp_rte_parser_params *params) 2859 { 2860 const struct rte_flow_action_set_ipv4 *set_ipv4; 2861 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2862 2863 set_ipv4 = action_item->conf; 2864 if (set_ipv4) { 2865 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC], 2866 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC); 2867 /* Update the hdr_bitmap with set ipv4 src */ 2868 ULP_BITMAP_SET(params->act_bitmap.bits, 2869 BNXT_ULP_ACT_BIT_SET_IPV4_SRC); 2870 return BNXT_TF_RC_SUCCESS; 2871 } 2872 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n"); 2873 return BNXT_TF_RC_ERROR; 2874 } 2875 2876 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/ 2877 int32_t 2878 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item, 2879 struct ulp_rte_parser_params *params) 2880 { 2881 const struct rte_flow_action_set_ipv4 *set_ipv4; 2882 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2883 2884 set_ipv4 = action_item->conf; 2885 if (set_ipv4) { 2886 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST], 2887 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST); 2888 /* Update the hdr_bitmap with set ipv4 dst */ 2889 ULP_BITMAP_SET(params->act_bitmap.bits, 2890 BNXT_ULP_ACT_BIT_SET_IPV4_DST); 2891 return BNXT_TF_RC_SUCCESS; 2892 } 2893 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n"); 2894 return BNXT_TF_RC_ERROR; 2895 } 2896 2897 /* Function to handle the parsing of RTE Flow action set tp src.*/ 2898 int32_t 2899 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item, 2900 struct ulp_rte_parser_params *params) 2901 { 2902 const struct rte_flow_action_set_tp *set_tp; 2903 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2904 2905 set_tp = action_item->conf; 2906 if (set_tp) { 2907 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC], 2908 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC); 2909 /* Update the hdr_bitmap with set tp src */ 2910 ULP_BITMAP_SET(params->act_bitmap.bits, 2911 BNXT_ULP_ACT_BIT_SET_TP_SRC); 2912 return BNXT_TF_RC_SUCCESS; 2913 } 2914 2915 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n"); 2916 return BNXT_TF_RC_ERROR; 2917 } 2918 2919 /* Function to handle the parsing of RTE Flow action set tp dst.*/ 2920 int32_t 2921 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item, 2922 struct ulp_rte_parser_params *params) 2923 { 2924 const struct rte_flow_action_set_tp *set_tp; 2925 struct ulp_rte_act_prop *act = ¶ms->act_prop; 2926 2927 set_tp = action_item->conf; 2928 if (set_tp) { 2929 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST], 2930 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST); 2931 /* Update the hdr_bitmap with set tp dst */ 2932 ULP_BITMAP_SET(params->act_bitmap.bits, 2933 BNXT_ULP_ACT_BIT_SET_TP_DST); 2934 return BNXT_TF_RC_SUCCESS; 2935 } 2936 2937 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n"); 2938 return BNXT_TF_RC_ERROR; 2939 } 2940 2941 /* Function to handle the parsing of RTE Flow action dec ttl.*/ 2942 int32_t 2943 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused, 2944 struct ulp_rte_parser_params *params) 2945 { 2946 /* Update the act_bitmap with dec ttl */ 2947 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL); 2948 return BNXT_TF_RC_SUCCESS; 2949 } 2950 2951 /* Function to handle the parsing of RTE Flow action JUMP */ 2952 int32_t 2953 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused, 2954 struct ulp_rte_parser_params *params) 2955 { 2956 /* Update the act_bitmap with dec ttl */ 2957 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP); 2958 return BNXT_TF_RC_SUCCESS; 2959 } 2960 2961 int32_t 2962 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item, 2963 struct ulp_rte_parser_params *params) 2964 { 2965 const struct rte_flow_action_sample *sample; 2966 int ret; 2967 2968 sample = action_item->conf; 2969 2970 /* if SAMPLE bit is set it means this sample action is nested within the 2971 * actions of another sample action; this is not allowed 2972 */ 2973 if (ULP_BITMAP_ISSET(params->act_bitmap.bits, 2974 BNXT_ULP_ACT_BIT_SAMPLE)) 2975 return BNXT_TF_RC_ERROR; 2976 2977 /* a sample action is only allowed as a shared action */ 2978 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits, 2979 BNXT_ULP_ACT_BIT_SHARED)) 2980 return BNXT_TF_RC_ERROR; 2981 2982 /* only a ratio of 1 i.e. 100% is supported */ 2983 if (sample->ratio != 1) 2984 return BNXT_TF_RC_ERROR; 2985 2986 if (!sample->actions) 2987 return BNXT_TF_RC_ERROR; 2988 2989 /* parse the nested actions for a sample action */ 2990 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params); 2991 if (ret == BNXT_TF_RC_SUCCESS) 2992 /* Update the act_bitmap with sample */ 2993 ULP_BITMAP_SET(params->act_bitmap.bits, 2994 BNXT_ULP_ACT_BIT_SAMPLE); 2995 2996 return ret; 2997 } 2998 2999 int32_t 3000 ulp_rte_action_hdlr_handler(const struct rte_flow_action *action_item, 3001 struct ulp_rte_parser_params *params) 3002 { 3003 const struct rte_flow_action_handle *handle; 3004 struct bnxt_ulp_shared_act_info *act_info; 3005 uint64_t action_bitmask; 3006 uint32_t shared_action_type; 3007 struct ulp_rte_act_prop *act = ¶ms->act_prop; 3008 uint64_t tmp64; 3009 enum bnxt_ulp_direction_type dir, handle_dir; 3010 uint32_t act_info_entries = 0; 3011 int32_t ret; 3012 3013 handle = action_item->conf; 3014 3015 /* Have to use the computed direction since the params->dir_attr 3016 * can be different (transfer, ingress, egress) 3017 */ 3018 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 3019 3020 /* direction of shared action must match direction of flow */ 3021 ret = bnxt_get_action_handle_direction(handle, &handle_dir); 3022 if (ret || dir != handle_dir) { 3023 BNXT_TF_DBG(ERR, "Invalid shared handle or direction\n"); 3024 return BNXT_TF_RC_ERROR; 3025 } 3026 3027 if (bnxt_get_action_handle_type(handle, &shared_action_type)) { 3028 BNXT_TF_DBG(ERR, "Invalid shared handle\n"); 3029 return BNXT_TF_RC_ERROR; 3030 } 3031 3032 act_info = bnxt_ulp_shared_act_info_get(&act_info_entries); 3033 if (shared_action_type >= act_info_entries || !act_info) { 3034 BNXT_TF_DBG(ERR, "Invalid shared handle\n"); 3035 return BNXT_TF_RC_ERROR; 3036 } 3037 3038 action_bitmask = act_info[shared_action_type].act_bitmask; 3039 3040 /* shared actions of the same type cannot be repeated */ 3041 if (params->act_bitmap.bits & action_bitmask) { 3042 BNXT_TF_DBG(ERR, "indirect actions cannot be repeated\n"); 3043 return BNXT_TF_RC_ERROR; 3044 } 3045 3046 tmp64 = tfp_cpu_to_be_64((uint64_t)bnxt_get_action_handle_index(handle)); 3047 3048 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE], 3049 &tmp64, BNXT_ULP_ACT_PROP_SZ_SHARED_HANDLE); 3050 3051 ULP_BITMAP_SET(params->act_bitmap.bits, action_bitmask); 3052 3053 return BNXT_TF_RC_SUCCESS; 3054 } 3055 3056 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */ 3057 int32_t 3058 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item, 3059 struct ulp_rte_parser_params *params) 3060 { 3061 /* Set the F1 flow header bit */ 3062 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1); 3063 return ulp_rte_vxlan_decap_act_handler(action_item, params); 3064 } 3065 3066 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */ 3067 int32_t 3068 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item, 3069 struct ulp_rte_parser_params *params) 3070 { 3071 RTE_SET_USED(item); 3072 /* Set the F2 flow header bit */ 3073 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2); 3074 return ulp_rte_vxlan_decap_act_handler(NULL, params); 3075 } 3076 3077 /* Function to handle the parsing of RTE Flow action queue. */ 3078 int32_t 3079 ulp_rte_queue_act_handler(const struct rte_flow_action *action_item, 3080 struct ulp_rte_parser_params *param) 3081 { 3082 const struct rte_flow_action_queue *q_info; 3083 struct ulp_rte_act_prop *ap = ¶m->act_prop; 3084 3085 if (action_item == NULL || action_item->conf == NULL) { 3086 BNXT_TF_DBG(ERR, "Parse Err: invalid queue configuration\n"); 3087 return BNXT_TF_RC_ERROR; 3088 } 3089 3090 q_info = action_item->conf; 3091 /* Copy the queue into the specific action properties */ 3092 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_QUEUE_INDEX], 3093 &q_info->index, BNXT_ULP_ACT_PROP_SZ_QUEUE_INDEX); 3094 3095 /* set the queue action header bit */ 3096 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_QUEUE); 3097 3098 return BNXT_TF_RC_SUCCESS; 3099 } 3100 3101 /* Function to handle the parsing of RTE Flow action meter. */ 3102 int32_t 3103 ulp_rte_meter_act_handler(const struct rte_flow_action *action_item, 3104 struct ulp_rte_parser_params *params) 3105 { 3106 const struct rte_flow_action_meter *meter; 3107 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop; 3108 uint32_t tmp_meter_id; 3109 3110 if (action_item == NULL || action_item->conf == NULL) { 3111 BNXT_TF_DBG(ERR, "Parse Err: invalid meter configuration\n"); 3112 return BNXT_TF_RC_ERROR; 3113 } 3114 3115 meter = action_item->conf; 3116 if (meter) { 3117 /* validate the mtr_id and update the reference counter */ 3118 tmp_meter_id = tfp_cpu_to_be_32(meter->mtr_id); 3119 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER], 3120 &tmp_meter_id, 3121 BNXT_ULP_ACT_PROP_SZ_METER); 3122 } 3123 3124 /* set the meter action header bit */ 3125 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_METER); 3126 3127 return BNXT_TF_RC_SUCCESS; 3128 } 3129 3130 /* Function to handle the parsing of RTE Flow action set mac src.*/ 3131 int32_t 3132 ulp_rte_set_mac_src_act_handler(const struct rte_flow_action *action_item, 3133 struct ulp_rte_parser_params *params) 3134 { 3135 const struct rte_flow_action_set_mac *set_mac; 3136 struct ulp_rte_act_prop *act = ¶ms->act_prop; 3137 3138 set_mac = action_item->conf; 3139 if (set_mac) { 3140 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC], 3141 set_mac->mac_addr, BNXT_ULP_ACT_PROP_SZ_SET_MAC_SRC); 3142 /* Update the hdr_bitmap with set mac src */ 3143 ULP_BITMAP_SET(params->act_bitmap.bits, 3144 BNXT_ULP_ACT_BIT_SET_MAC_SRC); 3145 return BNXT_TF_RC_SUCCESS; 3146 } 3147 BNXT_TF_DBG(ERR, "Parse Error: set mac src arg is invalid\n"); 3148 return BNXT_TF_RC_ERROR; 3149 } 3150 3151 /* Function to handle the parsing of RTE Flow action set mac dst.*/ 3152 int32_t 3153 ulp_rte_set_mac_dst_act_handler(const struct rte_flow_action *action_item, 3154 struct ulp_rte_parser_params *params) 3155 { 3156 const struct rte_flow_action_set_mac *set_mac; 3157 struct ulp_rte_act_prop *act = ¶ms->act_prop; 3158 3159 set_mac = action_item->conf; 3160 if (set_mac) { 3161 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST], 3162 set_mac->mac_addr, BNXT_ULP_ACT_PROP_SZ_SET_MAC_DST); 3163 /* Update the hdr_bitmap with set ipv4 dst */ 3164 ULP_BITMAP_SET(params->act_bitmap.bits, 3165 BNXT_ULP_ACT_BIT_SET_MAC_DST); 3166 return BNXT_TF_RC_SUCCESS; 3167 } 3168 BNXT_TF_DBG(ERR, "Parse Error: set mac dst arg is invalid\n"); 3169 return BNXT_TF_RC_ERROR; 3170 } 3171