1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2023 Broadcom 3 * All rights reserved. 4 */ 5 6 #include "bnxt.h" 7 #include "ulp_template_db_enum.h" 8 #include "ulp_template_struct.h" 9 #include "bnxt_ulp.h" 10 #include "bnxt_ulp_utils.h" 11 #include "bnxt_tf_common.h" 12 #include "bnxt_tf_pmd_shim.h" 13 #include "ulp_rte_parser.h" 14 #include "ulp_matcher.h" 15 #include "ulp_utils.h" 16 #include "tfp.h" 17 #include "ulp_port_db.h" 18 #include "ulp_flow_db.h" 19 #include "ulp_mapper.h" 20 #include "ulp_tun.h" 21 #include "ulp_template_db_tbl.h" 22 23 /* Local defines for the parsing functions */ 24 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */ 25 #define ULP_VLAN_PRIORITY_MASK 0x700 26 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/ 27 #define ULP_UDP_PORT_VXLAN 4789 28 #define ULP_UDP_PORT_VXLAN_MASK 0xFFFF 29 #define ULP_UDP_PORT_VXLAN_GPE 4790 30 #define ULP_UDP_PORT_VXLAN_GPE_MASK 0xFFFF 31 #define ULP_UDP_PORT_GENEVE 6081 32 #define ULP_UDP_PORT_GENEVE_MASK 0xFFFF 33 34 /** 35 * Geneve header first 16Bit 36 * Version (2b), length of the options fields (6b), OAM packet (1b), 37 * critical options present (1b), reserved 0 (6b). 38 */ 39 #define ULP_GENEVE_OPT_MAX_SIZE 6 /* HW only supports 6 words */ 40 #define ULP_GENEVE_OPTLEN_MASK 0x3F 41 #define ULP_GENEVE_OPTLEN_SHIFT 8 42 #define ULP_GENEVE_OPTLEN_VAL(a) \ 43 (((a) >> (ULP_GENEVE_OPTLEN_SHIFT)) & (ULP_GENEVE_OPTLEN_MASK)) 44 45 /* Utility function to skip the void items. */ 46 static inline int32_t 47 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment) 48 { 49 if (!*item) 50 return 0; 51 if (increment) 52 (*item)++; 53 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID) 54 (*item)++; 55 if (*item) 56 return 1; 57 return 0; 58 } 59 60 /* Utility function to copy field spec items */ 61 static inline struct ulp_rte_hdr_field * 62 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field, 63 const void *buffer, 64 uint32_t size) 65 { 66 field->size = size; 67 memcpy(field->spec, buffer, field->size); 68 field++; 69 return field; 70 } 71 72 /* Utility function to update the field_bitmap */ 73 static void 74 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params, 75 uint32_t idx, 76 enum bnxt_ulp_prsr_action prsr_act) 77 { 78 struct ulp_rte_hdr_field *field; 79 80 field = ¶ms->hdr_field[idx]; 81 if (ulp_bitmap_notzero(field->mask, field->size)) { 82 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx); 83 if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE)) 84 ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx); 85 /* Not exact match */ 86 if (!ulp_bitmap_is_ones(field->mask, field->size)) 87 ULP_COMP_FLD_IDX_WR(params, 88 BNXT_ULP_CF_IDX_WC_MATCH, 1); 89 } else { 90 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx); 91 } 92 } 93 94 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL) 95 /* Utility function to copy field spec and masks items */ 96 static inline void 97 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params, 98 uint32_t *idx, 99 uint32_t size, 100 const void *spec_buff, 101 const void *mask_buff, 102 enum bnxt_ulp_prsr_action prsr_act) 103 { 104 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx]; 105 106 /* update the field size */ 107 field->size = size; 108 109 /* copy the mask specifications only if mask is not null */ 110 if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff && 111 spec_buff && ulp_bitmap_notzero(spec_buff, size)) { 112 memcpy(field->mask, mask_buff, size); 113 ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act); 114 } 115 116 /* copy the protocol specifications only if mask is not null*/ 117 if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size)) 118 memcpy(field->spec, spec_buff, size); 119 120 /* Increment the index */ 121 *idx = *idx + 1; 122 } 123 124 /* Utility function to copy field spec and masks items */ 125 static inline int32_t 126 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params, 127 uint32_t *idx, 128 uint32_t size) 129 { 130 if (unlikely(params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX)) { 131 BNXT_DRV_DBG(ERR, "OOB for field processing %u\n", *idx); 132 return -EINVAL; 133 } 134 *idx = params->field_idx; 135 params->field_idx += size; 136 return 0; 137 } 138 139 /* 140 * Function to handle the parsing of RTE Flows and placing 141 * the RTE flow items into the ulp structures. 142 */ 143 int32_t 144 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], 145 struct ulp_rte_parser_params *params) 146 { 147 const struct rte_flow_item *item = pattern; 148 struct bnxt_ulp_rte_hdr_info *hdr_info; 149 150 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM; 151 152 /* Parse all the items in the pattern */ 153 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) { 154 if (item->type >= (typeof(item->type)) 155 BNXT_RTE_FLOW_ITEM_TYPE_END) { 156 if (item->type >= 157 (typeof(item->type))BNXT_RTE_FLOW_ITEM_TYPE_LAST) 158 goto hdr_parser_error; 159 /* get the header information */ 160 hdr_info = &ulp_vendor_hdr_info[item->type - 161 BNXT_RTE_FLOW_ITEM_TYPE_END]; 162 } else { 163 if (item->type > RTE_FLOW_ITEM_TYPE_ECPRI) 164 goto hdr_parser_error; 165 hdr_info = &ulp_hdr_info[item->type]; 166 } 167 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) { 168 goto hdr_parser_error; 169 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) { 170 /* call the registered callback handler */ 171 if (hdr_info->proto_hdr_func) { 172 if (hdr_info->proto_hdr_func(item, params) != 173 BNXT_TF_RC_SUCCESS) { 174 return BNXT_TF_RC_ERROR; 175 } 176 } 177 } 178 item++; 179 } 180 /* update the implied SVIF */ 181 return ulp_rte_parser_implicit_match_port_process(params); 182 183 hdr_parser_error: 184 BNXT_DRV_DBG(ERR, "Truflow parser does not support type %d\n", 185 item->type); 186 return BNXT_TF_RC_PARSE_ERR; 187 } 188 189 /* 190 * Function to handle the parsing of RTE Flows and placing 191 * the RTE flow actions into the ulp structures. 192 */ 193 int32_t 194 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], 195 struct ulp_rte_parser_params *params) 196 { 197 const struct rte_flow_action *action_item = actions; 198 struct bnxt_ulp_rte_act_info *hdr_info; 199 200 /* Parse all the items in the pattern */ 201 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) { 202 if (action_item->type >= 203 (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_END) { 204 if (action_item->type >= 205 (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_LAST) 206 goto act_parser_error; 207 /* get the header information from bnxt actinfo table */ 208 hdr_info = &ulp_vendor_act_info[action_item->type - 209 BNXT_RTE_FLOW_ACTION_TYPE_END]; 210 } else { 211 if (action_item->type > RTE_FLOW_ACTION_TYPE_INDIRECT) 212 goto act_parser_error; 213 /* get the header information from the act info table */ 214 hdr_info = &ulp_act_info[action_item->type]; 215 } 216 if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) { 217 goto act_parser_error; 218 } else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) { 219 /* call the registered callback handler */ 220 if (hdr_info->proto_act_func) { 221 if (hdr_info->proto_act_func(action_item, 222 params) != 223 BNXT_TF_RC_SUCCESS) { 224 return BNXT_TF_RC_ERROR; 225 } 226 } 227 } 228 action_item++; 229 } 230 /* update the implied port details */ 231 ulp_rte_parser_implicit_act_port_process(params); 232 return BNXT_TF_RC_SUCCESS; 233 234 act_parser_error: 235 BNXT_DRV_DBG(ERR, "Truflow parser does not support act %u\n", 236 action_item->type); 237 return BNXT_TF_RC_ERROR; 238 } 239 240 /* 241 * Function to handle the post processing of the computed 242 * fields for the interface. 243 */ 244 static void 245 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params) 246 { 247 uint32_t ifindex; 248 uint16_t port_id, parif, svif; 249 uint32_t mtype; 250 enum bnxt_ulp_direction_type dir; 251 252 /* get the direction details */ 253 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 254 255 /* read the port id details */ 256 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 257 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 258 port_id, 259 &ifindex)) { 260 BNXT_DRV_DBG(ERR, "ParseErr:Portid is not valid\n"); 261 return; 262 } 263 264 if (dir == BNXT_ULP_DIR_INGRESS) { 265 /* Set port PARIF */ 266 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 267 BNXT_ULP_DRV_FUNC_PARIF, &parif)) { 268 BNXT_DRV_DBG(ERR, "ParseErr:ifindex is not valid\n"); 269 return; 270 } 271 /* Note: 272 * We save the drv_func_parif into CF_IDX of phy_port_parif, 273 * since that index is currently referenced by ingress templates 274 * for datapath flows. If in the future we change the parser to 275 * save it in the CF_IDX of drv_func_parif we also need to update 276 * the template. 277 * WARNING: Two VFs on same parent PF will not work, as the parif is 278 * based on fw fid of the parent PF. 279 */ 280 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF, 281 parif); 282 /* Set port SVIF */ 283 if (ulp_port_db_svif_get(params->ulp_ctx, ifindex, 284 BNXT_ULP_PHY_PORT_SVIF, &svif)) { 285 BNXT_DRV_DBG(ERR, "ParseErr:ifindex is not valid\n"); 286 return; 287 } 288 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_SVIF, 289 svif); 290 } else { 291 /* Get the match port type */ 292 mtype = ULP_COMP_FLD_IDX_RD(params, 293 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 294 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) { 295 ULP_COMP_FLD_IDX_WR(params, 296 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP, 297 1); 298 /* Set VF func PARIF */ 299 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 300 BNXT_ULP_VF_FUNC_PARIF, 301 &parif)) { 302 BNXT_DRV_DBG(ERR, 303 "ParseErr:ifindex is not valid\n"); 304 return; 305 } 306 ULP_COMP_FLD_IDX_WR(params, 307 BNXT_ULP_CF_IDX_VF_FUNC_PARIF, 308 parif); 309 310 } else { 311 /* Set DRV func PARIF */ 312 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, 313 BNXT_ULP_DRV_FUNC_PARIF, 314 &parif)) { 315 BNXT_DRV_DBG(ERR, 316 "ParseErr:ifindex is not valid\n"); 317 return; 318 } 319 ULP_COMP_FLD_IDX_WR(params, 320 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF, 321 parif); 322 } 323 if (mtype == BNXT_ULP_INTF_TYPE_PF) { 324 ULP_COMP_FLD_IDX_WR(params, 325 BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF, 326 1); 327 } 328 } 329 } 330 331 static int32_t 332 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params) 333 { 334 enum bnxt_ulp_intf_type match_port_type, act_port_type; 335 enum bnxt_ulp_direction_type dir; 336 uint32_t act_port_set; 337 338 /* Get the computed details */ 339 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 340 match_port_type = ULP_COMP_FLD_IDX_RD(params, 341 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 342 act_port_type = ULP_COMP_FLD_IDX_RD(params, 343 BNXT_ULP_CF_IDX_ACT_PORT_TYPE); 344 act_port_set = ULP_COMP_FLD_IDX_RD(params, 345 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET); 346 347 /* set the flow direction in the proto and action header */ 348 if (dir == BNXT_ULP_DIR_EGRESS) { 349 ULP_BITMAP_SET(params->hdr_bitmap.bits, 350 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 351 ULP_BITMAP_SET(params->act_bitmap.bits, 352 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 353 } else { 354 ULP_BITMAP_SET(params->hdr_bitmap.bits, 355 BNXT_ULP_FLOW_DIR_BITMASK_ING); 356 ULP_BITMAP_SET(params->act_bitmap.bits, 357 BNXT_ULP_FLOW_DIR_BITMASK_ING); 358 } 359 360 /* Evaluate the VF to VF flag */ 361 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP && 362 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) { 363 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits, 364 BNXT_ULP_ACT_BIT_MULTIPLE_PORT)) { 365 ULP_BITMAP_SET(params->act_bitmap.bits, 366 BNXT_ULP_ACT_BIT_VF_TO_VF); 367 } else { 368 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_MP_A_IS_VFREP) && 369 ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_MP_B_IS_VFREP)) 370 ULP_BITMAP_SET(params->act_bitmap.bits, 371 BNXT_ULP_ACT_BIT_VF_TO_VF); 372 else 373 ULP_BITMAP_RESET(params->act_bitmap.bits, 374 BNXT_ULP_ACT_BIT_VF_TO_VF); 375 } 376 } 377 378 /* Update the decrement ttl computational fields */ 379 if (ULP_BITMAP_ISSET(params->act_bitmap.bits, 380 BNXT_ULP_ACT_BIT_DEC_TTL)) { 381 /* 382 * Check that vxlan proto is included and vxlan decap 383 * action is not set then decrement tunnel ttl. 384 * Similarly add GRE and NVGRE in future. 385 */ 386 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 387 BNXT_ULP_HDR_BIT_T_VXLAN) && 388 !ULP_BITMAP_ISSET(params->act_bitmap.bits, 389 BNXT_ULP_ACT_BIT_VXLAN_DECAP))) { 390 ULP_COMP_FLD_IDX_WR(params, 391 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1); 392 } else { 393 ULP_COMP_FLD_IDX_WR(params, 394 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1); 395 } 396 } 397 398 /* Merge the hdr_fp_bit into the proto header bit */ 399 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits; 400 401 /* Update the comp fld fid */ 402 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid); 403 404 /* set the L2 context usage shall change it later */ 405 ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_L2_CNTXT_ID); 406 407 /* Update the computed interface parameters */ 408 bnxt_ulp_comp_fld_intf_update(params); 409 410 /* TBD: Handle the flow rejection scenarios */ 411 return 0; 412 } 413 414 /* 415 * Function to handle the post processing of the parsing details 416 */ 417 void 418 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params) 419 { 420 ulp_post_process_normal_flow(params); 421 } 422 423 /* 424 * Function to compute the flow direction based on the match port details 425 */ 426 static void 427 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params) 428 { 429 enum bnxt_ulp_intf_type match_port_type; 430 431 /* Get the match port type */ 432 match_port_type = ULP_COMP_FLD_IDX_RD(params, 433 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); 434 435 /* If ingress flow and matchport is vf rep then dir is egress*/ 436 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) && 437 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) { 438 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 439 BNXT_ULP_DIR_EGRESS); 440 } else { 441 /* Assign the input direction */ 442 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) 443 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 444 BNXT_ULP_DIR_INGRESS); 445 else if (params->dir_attr & BNXT_ULP_FLOW_ATTR_EGRESS) 446 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 447 BNXT_ULP_DIR_EGRESS); 448 else if (match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) 449 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 450 BNXT_ULP_DIR_EGRESS); 451 else 452 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, 453 BNXT_ULP_DIR_INGRESS); 454 } 455 } 456 457 /* Function to handle the parsing of RTE Flow item PF Header. */ 458 static int32_t 459 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params, 460 uint32_t ifindex, 461 uint16_t mask, 462 enum bnxt_ulp_direction_type item_dir) 463 { 464 uint16_t svif; 465 enum bnxt_ulp_direction_type dir; 466 struct ulp_rte_hdr_field *hdr_field; 467 enum bnxt_ulp_svif_type svif_type; 468 enum bnxt_ulp_intf_type port_type; 469 470 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 471 BNXT_ULP_INVALID_SVIF_VAL) { 472 BNXT_DRV_DBG(ERR, 473 "SVIF already set,multiple source not support'd\n"); 474 return BNXT_TF_RC_ERROR; 475 } 476 477 /* Get port type details */ 478 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 479 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) { 480 BNXT_DRV_DBG(ERR, "Invalid port type\n"); 481 return BNXT_TF_RC_ERROR; 482 } 483 484 /* Update the match port type */ 485 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type); 486 487 /* compute the direction */ 488 bnxt_ulp_rte_parser_direction_compute(params); 489 490 /* Get the computed direction */ 491 dir = (item_dir != BNXT_ULP_DIR_INVALID) ? item_dir : 492 ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 493 if (dir == BNXT_ULP_DIR_INGRESS && 494 port_type != BNXT_ULP_INTF_TYPE_VF_REP) { 495 svif_type = BNXT_ULP_PHY_PORT_SVIF; 496 } else { 497 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP && 498 item_dir != BNXT_ULP_DIR_EGRESS) 499 svif_type = BNXT_ULP_VF_FUNC_SVIF; 500 else 501 svif_type = BNXT_ULP_DRV_FUNC_SVIF; 502 } 503 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type, &svif); 504 svif = rte_cpu_to_be_16(svif); 505 mask = rte_cpu_to_be_16(mask); 506 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; 507 memcpy(hdr_field->spec, &svif, sizeof(svif)); 508 memcpy(hdr_field->mask, &mask, sizeof(mask)); 509 hdr_field->size = sizeof(svif); 510 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, 511 rte_be_to_cpu_16(svif)); 512 return BNXT_TF_RC_SUCCESS; 513 } 514 515 /* Function to handle the parsing of the RTE port id */ 516 int32_t 517 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params) 518 { 519 uint16_t port_id = 0; 520 uint16_t svif_mask = 0xFFFF; 521 uint32_t ifindex; 522 int32_t rc = BNXT_TF_RC_ERROR; 523 524 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 525 BNXT_ULP_INVALID_SVIF_VAL) 526 return BNXT_TF_RC_SUCCESS; 527 528 /* SVIF not set. So get the port id */ 529 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 530 531 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 532 port_id, 533 &ifindex)) { 534 BNXT_DRV_DBG(ERR, "ParseErr:Portid is not valid\n"); 535 return rc; 536 } 537 538 /* Update the SVIF details */ 539 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask, 540 BNXT_ULP_DIR_INVALID); 541 return rc; 542 } 543 544 /* Function to handle the implicit action port id */ 545 int32_t 546 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params) 547 { 548 struct rte_flow_action action_item = {0}; 549 struct rte_flow_action_port_id port_id = {0}; 550 551 /* Read the action port set bit */ 552 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) { 553 /* Already set, so just exit */ 554 return BNXT_TF_RC_SUCCESS; 555 } 556 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 557 action_item.type = RTE_FLOW_ACTION_TYPE_PORT_ID; 558 action_item.conf = &port_id; 559 560 /* Update the action port based on incoming port */ 561 ulp_rte_port_act_handler(&action_item, params); 562 563 /* Reset the action port set bit */ 564 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0); 565 return BNXT_TF_RC_SUCCESS; 566 } 567 568 /* Parse items PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */ 569 int32_t 570 ulp_rte_port_hdr_handler(const struct rte_flow_item *item, 571 struct ulp_rte_parser_params *params) 572 { 573 enum bnxt_ulp_direction_type item_dir; 574 uint16_t ethdev_id; 575 uint16_t mask = 0; 576 uint32_t ifindex; 577 int32_t rc = BNXT_TF_RC_PARSE_ERR; 578 579 if (!item->spec) { 580 BNXT_DRV_DBG(ERR, "ParseErr:Port spec is not valid\n"); 581 return rc; 582 } 583 if (!item->mask) { 584 BNXT_DRV_DBG(ERR, "ParseErr:Port mask is not valid\n"); 585 return rc; 586 } 587 588 switch (item->type) { 589 case RTE_FLOW_ITEM_TYPE_PORT_ID: { 590 const struct rte_flow_item_port_id *port_spec = item->spec; 591 const struct rte_flow_item_port_id *port_mask = item->mask; 592 593 item_dir = BNXT_ULP_DIR_INVALID; 594 ethdev_id = port_spec->id; 595 mask = port_mask->id; 596 597 if (!port_mask->id) { 598 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF_IGNORE); 599 } 600 break; 601 } 602 case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: { 603 const struct rte_flow_item_ethdev *ethdev_spec = item->spec; 604 const struct rte_flow_item_ethdev *ethdev_mask = item->mask; 605 606 item_dir = BNXT_ULP_DIR_INGRESS; 607 ethdev_id = ethdev_spec->port_id; 608 mask = ethdev_mask->port_id; 609 break; 610 } 611 case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: { 612 const struct rte_flow_item_ethdev *ethdev_spec = item->spec; 613 const struct rte_flow_item_ethdev *ethdev_mask = item->mask; 614 615 item_dir = BNXT_ULP_DIR_EGRESS; 616 ethdev_id = ethdev_spec->port_id; 617 mask = ethdev_mask->port_id; 618 break; 619 } 620 default: 621 BNXT_DRV_DBG(ERR, "ParseErr:Unexpected item\n"); 622 return rc; 623 } 624 625 /* perform the conversion from dpdk port to bnxt ifindex */ 626 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, 627 ethdev_id, 628 &ifindex)) { 629 BNXT_DRV_DBG(ERR, "ParseErr:Portid is not valid\n"); 630 return rc; 631 } 632 /* Update the SVIF details */ 633 return ulp_rte_parser_svif_set(params, ifindex, mask, item_dir); 634 } 635 636 /* Function to handle the update of proto header based on field values */ 637 static void 638 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param, 639 uint16_t type, uint32_t in_flag, 640 uint32_t has_vlan, uint32_t has_vlan_mask) 641 { 642 #define ULP_RTE_ETHER_TYPE_ROE 0xfc3d 643 644 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { 645 if (in_flag) { 646 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 647 BNXT_ULP_HDR_BIT_I_IPV4); 648 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); 649 } else { 650 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 651 BNXT_ULP_HDR_BIT_O_IPV4); 652 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); 653 } 654 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { 655 if (in_flag) { 656 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 657 BNXT_ULP_HDR_BIT_I_IPV6); 658 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); 659 } else { 660 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 661 BNXT_ULP_HDR_BIT_O_IPV6); 662 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); 663 } 664 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) { 665 has_vlan_mask = 1; 666 has_vlan = 1; 667 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_ECPRI)) { 668 /* Update the hdr_bitmap with eCPRI */ 669 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 670 BNXT_ULP_HDR_BIT_O_ECPRI); 671 } else if (type == tfp_cpu_to_be_16(ULP_RTE_ETHER_TYPE_ROE)) { 672 /* Update the hdr_bitmap with RoE */ 673 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 674 BNXT_ULP_HDR_BIT_O_ROE); 675 } 676 677 if (has_vlan_mask) { 678 if (in_flag) { 679 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_HAS_VTAG, 680 has_vlan); 681 ULP_COMP_FLD_IDX_WR(param, 682 BNXT_ULP_CF_IDX_I_VLAN_NO_IGNORE, 683 1); 684 } else { 685 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_HAS_VTAG, 686 has_vlan); 687 ULP_COMP_FLD_IDX_WR(param, 688 BNXT_ULP_CF_IDX_O_VLAN_NO_IGNORE, 689 1); 690 } 691 } 692 } 693 694 /* Internal Function to identify broadcast or multicast packets */ 695 static int32_t 696 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr) 697 { 698 if (rte_is_multicast_ether_addr(eth_addr) || 699 rte_is_broadcast_ether_addr(eth_addr)) { 700 BNXT_DRV_DBG(DEBUG, 701 "No support for bcast or mcast addr offload\n"); 702 return 1; 703 } 704 return 0; 705 } 706 707 /* Function to handle the parsing of RTE Flow item Ethernet Header. */ 708 int32_t 709 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item, 710 struct ulp_rte_parser_params *params) 711 { 712 const struct rte_flow_item_eth *eth_spec = item->spec; 713 const struct rte_flow_item_eth *eth_mask = item->mask; 714 uint32_t idx = 0, dmac_idx = 0; 715 uint32_t size; 716 uint16_t eth_type = 0; 717 uint32_t inner_flag = 0; 718 uint32_t has_vlan = 0, has_vlan_mask = 0; 719 720 /* Perform validations */ 721 if (eth_spec) { 722 /* Avoid multicast and broadcast addr */ 723 if (!ULP_APP_BC_MC_SUPPORT(params->ulp_ctx) && 724 ulp_rte_parser_is_bcmc_addr(ð_spec->hdr.dst_addr)) 725 return BNXT_TF_RC_PARSE_ERR; 726 727 if (!ULP_APP_BC_MC_SUPPORT(params->ulp_ctx) && 728 ulp_rte_parser_is_bcmc_addr(ð_spec->hdr.src_addr)) 729 return BNXT_TF_RC_PARSE_ERR; 730 731 eth_type = eth_spec->hdr.ether_type; 732 has_vlan = eth_spec->has_vlan; 733 } 734 735 /* If mask is not specified then use the default mask */ 736 if (eth_spec && !eth_mask) 737 eth_mask = &rte_flow_item_eth_mask; 738 739 if (eth_mask) { 740 eth_type &= eth_mask->type; 741 has_vlan_mask = eth_mask->has_vlan; 742 } 743 744 if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx, 745 BNXT_ULP_PROTO_HDR_ETH_NUM))) { 746 BNXT_DRV_DBG(ERR, "Error parsing protocol header\n"); 747 return BNXT_TF_RC_ERROR; 748 } 749 /* 750 * Copy the rte_flow_item for eth into hdr_field using ethernet 751 * header fields 752 */ 753 dmac_idx = idx; 754 size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.dst_addr.addr_bytes); 755 ulp_rte_prsr_fld_mask(params, &idx, size, 756 ulp_deference_struct(eth_spec, hdr.dst_addr.addr_bytes), 757 ulp_deference_struct(eth_mask, hdr.dst_addr.addr_bytes), 758 ULP_PRSR_ACT_DEFAULT); 759 760 size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.src_addr.addr_bytes); 761 ulp_rte_prsr_fld_mask(params, &idx, size, 762 ulp_deference_struct(eth_spec, hdr.src_addr.addr_bytes), 763 ulp_deference_struct(eth_mask, hdr.src_addr.addr_bytes), 764 ULP_PRSR_ACT_DEFAULT); 765 766 size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.ether_type); 767 ulp_rte_prsr_fld_mask(params, &idx, size, 768 ulp_deference_struct(eth_spec, hdr.ether_type), 769 ulp_deference_struct(eth_mask, hdr.ether_type), 770 (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ? 771 ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE); 772 773 /* Update the protocol hdr bitmap */ 774 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 775 BNXT_ULP_HDR_BIT_O_ETH) || 776 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 777 BNXT_ULP_HDR_BIT_O_IPV4) || 778 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 779 BNXT_ULP_HDR_BIT_O_IPV6) || 780 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 781 BNXT_ULP_HDR_BIT_O_UDP) || 782 ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 783 BNXT_ULP_HDR_BIT_O_TCP)) { 784 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH); 785 inner_flag = 1; 786 } else { 787 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); 788 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID, 789 dmac_idx); 790 } 791 /* Update the field protocol hdr bitmap */ 792 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag, 793 has_vlan, has_vlan_mask); 794 795 return BNXT_TF_RC_SUCCESS; 796 } 797 798 /* Function to handle the parsing of RTE Flow item Vlan Header. */ 799 int32_t 800 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, 801 struct ulp_rte_parser_params *params) 802 { 803 const struct rte_flow_item_vlan *vlan_spec = item->spec; 804 const struct rte_flow_item_vlan *vlan_mask = item->mask; 805 struct ulp_rte_hdr_bitmap *hdr_bit; 806 uint32_t idx = 0; 807 uint16_t vlan_tag = 0, priority = 0; 808 uint16_t vlan_tag_mask = 0, priority_mask = 0; 809 uint32_t outer_vtag_num; 810 uint32_t inner_vtag_num; 811 uint16_t eth_type = 0; 812 uint32_t inner_flag = 0; 813 uint32_t size; 814 815 if (vlan_spec) { 816 vlan_tag = ntohs(vlan_spec->hdr.vlan_tci); 817 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT); 818 vlan_tag &= ULP_VLAN_TAG_MASK; 819 vlan_tag = htons(vlan_tag); 820 eth_type = vlan_spec->hdr.eth_proto; 821 } 822 823 /* assign default vlan mask if spec is valid and mask is not */ 824 if (vlan_spec && !vlan_mask) 825 vlan_mask = &rte_flow_item_vlan_mask; 826 827 if (vlan_mask) { 828 vlan_tag_mask = ntohs(vlan_mask->tci); 829 priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT); 830 vlan_tag_mask &= 0xfff; 831 /* 832 * the storage for priority and vlan tag is 2 bytes 833 * The mask of priority which is 3 bits if it is all 1's 834 * then make the rest bits 13 bits as 1's 835 * so that it is matched as exact match. 836 */ 837 if (priority_mask == ULP_VLAN_PRIORITY_MASK) 838 priority_mask |= ~ULP_VLAN_PRIORITY_MASK; 839 if (vlan_tag_mask == ULP_VLAN_TAG_MASK) 840 vlan_tag_mask |= ~ULP_VLAN_TAG_MASK; 841 vlan_tag_mask = htons(vlan_tag_mask); 842 } 843 844 if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx, 845 BNXT_ULP_PROTO_HDR_S_VLAN_NUM))) { 846 BNXT_DRV_DBG(ERR, "Error parsing protocol header\n"); 847 return BNXT_TF_RC_ERROR; 848 } 849 850 /* 851 * Copy the rte_flow_item for vlan into hdr_field using Vlan 852 * header fields 853 */ 854 size = sizeof(((struct rte_flow_item_vlan *)NULL)->hdr.vlan_tci); 855 /* 856 * The priority field is ignored since OVS is setting it as 857 * wild card match and it is not supported. This is a work 858 * around and shall be addressed in the future. 859 */ 860 ulp_rte_prsr_fld_mask(params, &idx, size, 861 &priority, 862 (vlan_mask) ? &priority_mask : NULL, 863 ULP_PRSR_ACT_MASK_IGNORE); 864 865 ulp_rte_prsr_fld_mask(params, &idx, size, 866 &vlan_tag, 867 (vlan_mask) ? &vlan_tag_mask : NULL, 868 ULP_PRSR_ACT_DEFAULT); 869 870 size = sizeof(((struct rte_flow_item_vlan *)NULL)->hdr.eth_proto); 871 ulp_rte_prsr_fld_mask(params, &idx, size, 872 ulp_deference_struct(vlan_spec, hdr.eth_proto), 873 ulp_deference_struct(vlan_mask, hdr.eth_proto), 874 ULP_PRSR_ACT_MATCH_IGNORE); 875 876 /* Get the outer tag and inner tag counts */ 877 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params, 878 BNXT_ULP_CF_IDX_O_VTAG_NUM); 879 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params, 880 BNXT_ULP_CF_IDX_I_VTAG_NUM); 881 882 /* Update the hdr_bitmap of the vlans */ 883 hdr_bit = ¶ms->hdr_bitmap; 884 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 885 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 886 !outer_vtag_num) { 887 /* Update the vlan tag num */ 888 outer_vtag_num++; 889 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 890 outer_vtag_num); 891 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_HAS_VTAG, 1); 892 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1); 893 ULP_BITMAP_SET(params->hdr_bitmap.bits, 894 BNXT_ULP_HDR_BIT_OO_VLAN); 895 if (vlan_mask && vlan_tag_mask) 896 ULP_COMP_FLD_IDX_WR(params, 897 BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1); 898 899 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 900 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 901 outer_vtag_num == 1) { 902 /* update the vlan tag num */ 903 outer_vtag_num++; 904 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 905 outer_vtag_num); 906 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1); 907 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0); 908 ULP_BITMAP_SET(params->hdr_bitmap.bits, 909 BNXT_ULP_HDR_BIT_OI_VLAN); 910 if (vlan_mask && vlan_tag_mask) 911 ULP_COMP_FLD_IDX_WR(params, 912 BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1); 913 914 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 915 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 916 !inner_vtag_num) { 917 /* update the vlan tag num */ 918 inner_vtag_num++; 919 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 920 inner_vtag_num); 921 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_HAS_VTAG, 1); 922 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1); 923 ULP_BITMAP_SET(params->hdr_bitmap.bits, 924 BNXT_ULP_HDR_BIT_IO_VLAN); 925 if (vlan_mask && vlan_tag_mask) 926 ULP_COMP_FLD_IDX_WR(params, 927 BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1); 928 inner_flag = 1; 929 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 930 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 931 inner_vtag_num == 1) { 932 /* update the vlan tag num */ 933 inner_vtag_num++; 934 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 935 inner_vtag_num); 936 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1); 937 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0); 938 ULP_BITMAP_SET(params->hdr_bitmap.bits, 939 BNXT_ULP_HDR_BIT_II_VLAN); 940 if (vlan_mask && vlan_tag_mask) 941 ULP_COMP_FLD_IDX_WR(params, 942 BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1); 943 inner_flag = 1; 944 } else { 945 BNXT_DRV_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n"); 946 return BNXT_TF_RC_ERROR; 947 } 948 /* Update the field protocol hdr bitmap */ 949 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag, 1, 1); 950 return BNXT_TF_RC_SUCCESS; 951 } 952 953 /* Function to handle the update of proto header based on field values */ 954 static void 955 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param, 956 uint8_t proto, uint32_t in_flag) 957 { 958 if (proto == IPPROTO_UDP) { 959 if (in_flag) { 960 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 961 BNXT_ULP_HDR_BIT_I_UDP); 962 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); 963 } else { 964 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 965 BNXT_ULP_HDR_BIT_O_UDP); 966 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); 967 } 968 } else if (proto == IPPROTO_TCP) { 969 if (in_flag) { 970 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 971 BNXT_ULP_HDR_BIT_I_TCP); 972 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); 973 } else { 974 ULP_BITMAP_SET(param->hdr_fp_bit.bits, 975 BNXT_ULP_HDR_BIT_O_TCP); 976 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); 977 } 978 } else if (proto == IPPROTO_GRE) { 979 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE); 980 } else if (proto == IPPROTO_ICMP) { 981 if (ULP_BITMAP_ISSET(param->cf_bitmap, 982 BNXT_ULP_CF_BIT_IS_TUNNEL)) 983 ULP_BITMAP_SET(param->hdr_bitmap.bits, 984 BNXT_ULP_HDR_BIT_I_ICMP); 985 else 986 ULP_BITMAP_SET(param->hdr_bitmap.bits, 987 BNXT_ULP_HDR_BIT_O_ICMP); 988 } 989 990 if (in_flag) { 991 ULP_COMP_FLD_IDX_WR(param, 992 BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, 993 1); 994 ULP_COMP_FLD_IDX_WR(param, 995 BNXT_ULP_CF_IDX_I_L3_PROTO_ID, 996 proto); 997 } else { 998 ULP_COMP_FLD_IDX_WR(param, 999 BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, 1000 1); 1001 ULP_COMP_FLD_IDX_WR(param, 1002 BNXT_ULP_CF_IDX_O_L3_PROTO_ID, 1003 proto); 1004 } 1005 } 1006 1007 /* Function to handle the parsing of RTE Flow item IPV4 Header. */ 1008 int32_t 1009 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, 1010 struct ulp_rte_parser_params *params) 1011 { 1012 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec; 1013 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask; 1014 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1015 uint32_t idx = 0, dip_idx = 0; 1016 uint32_t size; 1017 uint8_t proto = 0; 1018 uint8_t ttl = 0; 1019 uint8_t proto_mask = 0; 1020 uint32_t inner_flag = 0; 1021 uint32_t cnt; 1022 1023 /* validate there are no 3rd L3 header */ 1024 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); 1025 if (cnt == 2) { 1026 BNXT_DRV_DBG(ERR, "Parse Err:Third L3 header not supported\n"); 1027 return BNXT_TF_RC_ERROR; 1028 } 1029 1030 if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx, 1031 BNXT_ULP_PROTO_HDR_IPV4_NUM))) { 1032 BNXT_DRV_DBG(ERR, "Error parsing protocol header\n"); 1033 return BNXT_TF_RC_ERROR; 1034 } 1035 1036 /* If mask is not specified then use the default mask */ 1037 if (ipv4_spec && !ipv4_mask) 1038 ipv4_mask = &rte_flow_item_ipv4_mask; 1039 1040 /* 1041 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1042 * header fields 1043 */ 1044 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl); 1045 ulp_rte_prsr_fld_mask(params, &idx, size, 1046 ulp_deference_struct(ipv4_spec, hdr.version_ihl), 1047 ulp_deference_struct(ipv4_mask, hdr.version_ihl), 1048 ULP_PRSR_ACT_DEFAULT); 1049 1050 /* 1051 * The tos field is ignored since OVS is setting it as wild card 1052 * match and it is not supported. An application can enable tos support. 1053 */ 1054 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service); 1055 ulp_rte_prsr_fld_mask(params, &idx, size, 1056 ulp_deference_struct(ipv4_spec, 1057 hdr.type_of_service), 1058 ulp_deference_struct(ipv4_mask, 1059 hdr.type_of_service), 1060 (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ? 1061 ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MASK_IGNORE); 1062 1063 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length); 1064 ulp_rte_prsr_fld_mask(params, &idx, size, 1065 ulp_deference_struct(ipv4_spec, hdr.total_length), 1066 ulp_deference_struct(ipv4_mask, hdr.total_length), 1067 ULP_PRSR_ACT_DEFAULT); 1068 1069 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id); 1070 ulp_rte_prsr_fld_mask(params, &idx, size, 1071 ulp_deference_struct(ipv4_spec, hdr.packet_id), 1072 ulp_deference_struct(ipv4_mask, hdr.packet_id), 1073 ULP_PRSR_ACT_DEFAULT); 1074 1075 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset); 1076 ulp_rte_prsr_fld_mask(params, &idx, size, 1077 ulp_deference_struct(ipv4_spec, 1078 hdr.fragment_offset), 1079 ulp_deference_struct(ipv4_mask, 1080 hdr.fragment_offset), 1081 ULP_PRSR_ACT_MASK_IGNORE); 1082 1083 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live); 1084 ulp_rte_prsr_fld_mask(params, &idx, size, 1085 ulp_deference_struct(ipv4_spec, hdr.time_to_live), 1086 ulp_deference_struct(ipv4_mask, hdr.time_to_live), 1087 ULP_PRSR_ACT_DEFAULT); 1088 if (ipv4_spec) 1089 ttl = ipv4_spec->hdr.time_to_live; 1090 if (!ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL)) 1091 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_TTL, ttl); 1092 1093 /* Ignore proto for matching templates */ 1094 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id); 1095 ulp_rte_prsr_fld_mask(params, &idx, size, 1096 ulp_deference_struct(ipv4_spec, 1097 hdr.next_proto_id), 1098 ulp_deference_struct(ipv4_mask, 1099 hdr.next_proto_id), 1100 (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ? 1101 ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE); 1102 1103 if (ipv4_spec) 1104 proto = ipv4_spec->hdr.next_proto_id; 1105 1106 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum); 1107 ulp_rte_prsr_fld_mask(params, &idx, size, 1108 ulp_deference_struct(ipv4_spec, hdr.hdr_checksum), 1109 ulp_deference_struct(ipv4_mask, hdr.hdr_checksum), 1110 ULP_PRSR_ACT_DEFAULT); 1111 1112 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr); 1113 ulp_rte_prsr_fld_mask(params, &idx, size, 1114 ulp_deference_struct(ipv4_spec, hdr.src_addr), 1115 ulp_deference_struct(ipv4_mask, hdr.src_addr), 1116 ULP_PRSR_ACT_DEFAULT); 1117 1118 dip_idx = idx; 1119 size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr); 1120 ulp_rte_prsr_fld_mask(params, &idx, size, 1121 ulp_deference_struct(ipv4_spec, hdr.dst_addr), 1122 ulp_deference_struct(ipv4_mask, hdr.dst_addr), 1123 ULP_PRSR_ACT_DEFAULT); 1124 1125 /* Set the ipv4 header bitmap and computed l3 header bitmaps */ 1126 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 1127 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) || 1128 ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL)) { 1129 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4); 1130 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); 1131 inner_flag = 1; 1132 } else { 1133 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4); 1134 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); 1135 /* Update the tunnel offload dest ip offset */ 1136 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID, 1137 dip_idx); 1138 } 1139 1140 /* Some of the PMD applications may set the protocol field 1141 * in the IPv4 spec but don't set the mask. So, consider 1142 * the mask in the proto value calculation. 1143 */ 1144 if (ipv4_mask) { 1145 proto &= ipv4_mask->hdr.next_proto_id; 1146 proto_mask = ipv4_mask->hdr.next_proto_id; 1147 } 1148 1149 /* Update the field protocol hdr bitmap */ 1150 if (proto_mask) 1151 ulp_rte_l3_proto_type_update(params, proto, inner_flag); 1152 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); 1153 return BNXT_TF_RC_SUCCESS; 1154 } 1155 1156 /* Function to handle the parsing of RTE Flow item IPV6 Header */ 1157 int32_t 1158 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, 1159 struct ulp_rte_parser_params *params) 1160 { 1161 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec; 1162 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask; 1163 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1164 uint32_t idx = 0, dip_idx = 0; 1165 uint32_t size, vtc_flow; 1166 uint32_t ver_spec = 0, ver_mask = 0; 1167 uint32_t tc_spec = 0, tc_mask = 0; 1168 uint32_t lab_spec = 0, lab_mask = 0; 1169 uint8_t proto = 0; 1170 uint8_t proto_mask = 0; 1171 uint8_t ttl = 0; 1172 uint32_t inner_flag = 0; 1173 uint32_t cnt; 1174 1175 /* validate there are no 3rd L3 header */ 1176 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); 1177 if (cnt == 2) { 1178 BNXT_DRV_DBG(ERR, "Parse Err:Third L3 header not supported\n"); 1179 return BNXT_TF_RC_ERROR; 1180 } 1181 1182 if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx, 1183 BNXT_ULP_PROTO_HDR_IPV6_NUM))) { 1184 BNXT_DRV_DBG(ERR, "Error parsing protocol header\n"); 1185 return BNXT_TF_RC_ERROR; 1186 } 1187 1188 /* If mask is not specified then use the default mask */ 1189 if (ipv6_spec && !ipv6_mask) 1190 ipv6_mask = &rte_flow_item_ipv6_mask; 1191 1192 /* 1193 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6 1194 * header fields 1195 */ 1196 if (ipv6_spec) { 1197 vtc_flow = ntohl(ipv6_spec->hdr.vtc_flow); 1198 ver_spec = htonl(BNXT_ULP_GET_IPV6_VER(vtc_flow)); 1199 tc_spec = htonl(BNXT_ULP_GET_IPV6_TC(vtc_flow)); 1200 lab_spec = htonl(BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow)); 1201 proto = ipv6_spec->hdr.proto; 1202 } 1203 1204 if (ipv6_mask) { 1205 vtc_flow = ntohl(ipv6_mask->hdr.vtc_flow); 1206 ver_mask = htonl(BNXT_ULP_GET_IPV6_VER(vtc_flow)); 1207 tc_mask = htonl(BNXT_ULP_GET_IPV6_TC(vtc_flow)); 1208 lab_mask = htonl(BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow)); 1209 1210 /* Some of the PMD applications may set the protocol field 1211 * in the IPv6 spec but don't set the mask. So, consider 1212 * the mask in proto value calculation. 1213 */ 1214 proto &= ipv6_mask->hdr.proto; 1215 proto_mask = ipv6_mask->hdr.proto; 1216 } 1217 1218 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow); 1219 ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask, 1220 ULP_PRSR_ACT_DEFAULT); 1221 /* 1222 * The TC and flow label field are ignored since OVS is 1223 * setting it for match and it is not supported. 1224 * This is a work around and 1225 * shall be addressed in the future. 1226 */ 1227 ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask, 1228 (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ? 1229 ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MASK_IGNORE); 1230 ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask, 1231 ULP_PRSR_ACT_MASK_IGNORE); 1232 1233 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len); 1234 ulp_rte_prsr_fld_mask(params, &idx, size, 1235 ulp_deference_struct(ipv6_spec, hdr.payload_len), 1236 ulp_deference_struct(ipv6_mask, hdr.payload_len), 1237 ULP_PRSR_ACT_DEFAULT); 1238 1239 /* Ignore proto for template matching */ 1240 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto); 1241 ulp_rte_prsr_fld_mask(params, &idx, size, 1242 ulp_deference_struct(ipv6_spec, hdr.proto), 1243 ulp_deference_struct(ipv6_mask, hdr.proto), 1244 (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ? 1245 ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE); 1246 1247 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits); 1248 ulp_rte_prsr_fld_mask(params, &idx, size, 1249 ulp_deference_struct(ipv6_spec, hdr.hop_limits), 1250 ulp_deference_struct(ipv6_mask, hdr.hop_limits), 1251 ULP_PRSR_ACT_DEFAULT); 1252 if (ipv6_spec) 1253 ttl = ipv6_spec->hdr.hop_limits; 1254 if (!ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL)) 1255 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_TTL, ttl); 1256 1257 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr); 1258 ulp_rte_prsr_fld_mask(params, &idx, size, 1259 ulp_deference_struct(ipv6_spec, hdr.src_addr), 1260 ulp_deference_struct(ipv6_mask, hdr.src_addr), 1261 ULP_PRSR_ACT_DEFAULT); 1262 1263 dip_idx = idx; 1264 size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr); 1265 ulp_rte_prsr_fld_mask(params, &idx, size, 1266 ulp_deference_struct(ipv6_spec, hdr.dst_addr), 1267 ulp_deference_struct(ipv6_mask, hdr.dst_addr), 1268 ULP_PRSR_ACT_DEFAULT); 1269 1270 /* Set the ipv6 header bitmap and computed l3 header bitmaps */ 1271 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 1272 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) || 1273 ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL)) { 1274 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6); 1275 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); 1276 inner_flag = 1; 1277 } else { 1278 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6); 1279 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); 1280 /* Update the tunnel offload dest ip offset */ 1281 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID, 1282 dip_idx); 1283 } 1284 1285 /* Update the field protocol hdr bitmap */ 1286 if (proto_mask) 1287 ulp_rte_l3_proto_type_update(params, proto, inner_flag); 1288 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); 1289 1290 return BNXT_TF_RC_SUCCESS; 1291 } 1292 1293 /* Function to handle the update of proto header based on field values */ 1294 static void 1295 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params, 1296 uint16_t src_port, uint16_t src_mask, 1297 uint16_t dst_port, uint16_t dst_mask, 1298 enum bnxt_ulp_hdr_bit hdr_bit) 1299 { 1300 uint16_t stat_port = 0; 1301 struct bnxt *bp; 1302 1303 switch (hdr_bit) { 1304 case BNXT_ULP_HDR_BIT_I_UDP: 1305 case BNXT_ULP_HDR_BIT_I_TCP: 1306 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); 1307 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1); 1308 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT, 1309 (uint64_t)rte_be_to_cpu_16(src_port)); 1310 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT, 1311 (uint64_t)rte_be_to_cpu_16(dst_port)); 1312 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK, 1313 (uint64_t)rte_be_to_cpu_16(src_mask)); 1314 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK, 1315 (uint64_t)rte_be_to_cpu_16(dst_mask)); 1316 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, 1317 1); 1318 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT, 1319 !!(src_port & src_mask)); 1320 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT, 1321 !!(dst_port & dst_mask)); 1322 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID, 1323 (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ? 1324 IPPROTO_UDP : IPPROTO_TCP); 1325 break; 1326 case BNXT_ULP_HDR_BIT_O_UDP: 1327 case BNXT_ULP_HDR_BIT_O_TCP: 1328 ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); 1329 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1); 1330 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT, 1331 (uint64_t)rte_be_to_cpu_16(src_port)); 1332 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT, 1333 (uint64_t)rte_be_to_cpu_16(dst_port)); 1334 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK, 1335 (uint64_t)rte_be_to_cpu_16(src_mask)); 1336 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK, 1337 (uint64_t)rte_be_to_cpu_16(dst_mask)); 1338 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, 1339 1); 1340 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT, 1341 !!(src_port & src_mask)); 1342 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT, 1343 !!(dst_port & dst_mask)); 1344 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID, 1345 (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ? 1346 IPPROTO_UDP : IPPROTO_TCP); 1347 break; 1348 default: 1349 break; 1350 } 1351 1352 /* If it is not udp port then there is no need to set tunnel bits */ 1353 if (hdr_bit != BNXT_ULP_HDR_BIT_O_UDP) 1354 return; 1355 1356 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUNNEL_PORT, 1357 tfp_be_to_cpu_16(dst_port)); 1358 1359 /* vxlan static customized port */ 1360 if (ULP_APP_STATIC_VXLAN_PORT_EN(params->ulp_ctx)) { 1361 stat_port = bnxt_ulp_cntxt_vxlan_ip_port_get(params->ulp_ctx); 1362 if (!stat_port) 1363 stat_port = 1364 bnxt_ulp_cntxt_vxlan_port_get(params->ulp_ctx); 1365 1366 /* if udp and equal to static vxlan port then set tunnel bits*/ 1367 if (stat_port && dst_port == tfp_cpu_to_be_16(stat_port)) { 1368 bp = bnxt_pmd_get_bp(params->port_id); 1369 if (bp == NULL) { 1370 BNXT_DRV_DBG(ERR, "Invalid bp\n"); 1371 return; 1372 } 1373 ULP_BITMAP_SET(params->hdr_fp_bit.bits, 1374 BNXT_ULP_HDR_BIT_T_VXLAN); 1375 ULP_BITMAP_SET(params->cf_bitmap, 1376 BNXT_ULP_CF_BIT_IS_TUNNEL); 1377 if (bp->vxlan_ip_upar_in_use & 1378 HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR0) { 1379 ULP_COMP_FLD_IDX_WR(params, 1380 BNXT_ULP_CF_IDX_VXLAN_IP_UPAR_ID, 1381 ULP_WP_SYM_TUN_HDR_TYPE_UPAR1); 1382 } 1383 } 1384 } else { 1385 /* if dynamic Vxlan is enabled then skip dport checks */ 1386 if (ULP_APP_DYNAMIC_VXLAN_PORT_EN(params->ulp_ctx)) 1387 return; 1388 1389 /* Vxlan and GPE port check */ 1390 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN_GPE)) { 1391 ULP_BITMAP_SET(params->hdr_fp_bit.bits, 1392 BNXT_ULP_HDR_BIT_T_VXLAN_GPE); 1393 ULP_BITMAP_SET(params->cf_bitmap, 1394 BNXT_ULP_CF_BIT_IS_TUNNEL); 1395 } else if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) { 1396 ULP_BITMAP_SET(params->hdr_fp_bit.bits, 1397 BNXT_ULP_HDR_BIT_T_VXLAN); 1398 ULP_BITMAP_SET(params->cf_bitmap, 1399 BNXT_ULP_CF_BIT_IS_TUNNEL); 1400 } 1401 } 1402 } 1403 1404 /* Function to handle the parsing of RTE Flow item UDP Header. */ 1405 int32_t 1406 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, 1407 struct ulp_rte_parser_params *params) 1408 { 1409 const struct rte_flow_item_udp *udp_spec = item->spec; 1410 const struct rte_flow_item_udp *udp_mask = item->mask; 1411 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1412 uint32_t idx = 0; 1413 uint32_t size; 1414 uint16_t dport = 0, sport = 0; 1415 uint16_t dport_mask = 0, sport_mask = 0; 1416 uint32_t cnt; 1417 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP; 1418 1419 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 1420 if (cnt == 2) { 1421 BNXT_DRV_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 1422 return BNXT_TF_RC_ERROR; 1423 } 1424 1425 if (udp_spec) { 1426 sport = udp_spec->hdr.src_port; 1427 dport = udp_spec->hdr.dst_port; 1428 } 1429 if (udp_spec && !udp_mask) 1430 udp_mask = &rte_flow_item_udp_mask; 1431 1432 if (udp_mask) { 1433 sport_mask = udp_mask->hdr.src_port; 1434 dport_mask = udp_mask->hdr.dst_port; 1435 } 1436 1437 if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx, 1438 BNXT_ULP_PROTO_HDR_UDP_NUM))) { 1439 BNXT_DRV_DBG(ERR, "Error parsing protocol header\n"); 1440 return BNXT_TF_RC_ERROR; 1441 } 1442 1443 /* 1444 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1445 * header fields 1446 */ 1447 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port); 1448 ulp_rte_prsr_fld_mask(params, &idx, size, 1449 ulp_deference_struct(udp_spec, hdr.src_port), 1450 ulp_deference_struct(udp_mask, hdr.src_port), 1451 ULP_PRSR_ACT_DEFAULT); 1452 1453 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port); 1454 ulp_rte_prsr_fld_mask(params, &idx, size, 1455 ulp_deference_struct(udp_spec, hdr.dst_port), 1456 ulp_deference_struct(udp_mask, hdr.dst_port), 1457 ULP_PRSR_ACT_DEFAULT); 1458 1459 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len); 1460 ulp_rte_prsr_fld_mask(params, &idx, size, 1461 ulp_deference_struct(udp_spec, hdr.dgram_len), 1462 ulp_deference_struct(udp_mask, hdr.dgram_len), 1463 ULP_PRSR_ACT_DEFAULT); 1464 1465 size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum); 1466 ulp_rte_prsr_fld_mask(params, &idx, size, 1467 ulp_deference_struct(udp_spec, hdr.dgram_cksum), 1468 ulp_deference_struct(udp_mask, hdr.dgram_cksum), 1469 ULP_PRSR_ACT_DEFAULT); 1470 1471 /* Set the udp header bitmap and computed l4 header bitmaps */ 1472 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 1473 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) || 1474 ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL)) 1475 out_l4 = BNXT_ULP_HDR_BIT_I_UDP; 1476 1477 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport, 1478 dport_mask, out_l4); 1479 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); 1480 return BNXT_TF_RC_SUCCESS; 1481 } 1482 1483 /* Function to handle the parsing of RTE Flow item TCP Header. */ 1484 int32_t 1485 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, 1486 struct ulp_rte_parser_params *params) 1487 { 1488 const struct rte_flow_item_tcp *tcp_spec = item->spec; 1489 const struct rte_flow_item_tcp *tcp_mask = item->mask; 1490 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1491 uint32_t idx = 0; 1492 uint16_t dport = 0, sport = 0; 1493 uint16_t dport_mask = 0, sport_mask = 0; 1494 uint32_t size; 1495 uint32_t cnt; 1496 enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP; 1497 1498 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 1499 if (cnt == 2) { 1500 BNXT_DRV_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 1501 return BNXT_TF_RC_ERROR; 1502 } 1503 1504 if (tcp_spec) { 1505 sport = tcp_spec->hdr.src_port; 1506 dport = tcp_spec->hdr.dst_port; 1507 } 1508 1509 if (tcp_spec && !tcp_mask) 1510 tcp_mask = &rte_flow_item_tcp_mask; 1511 1512 if (tcp_mask) { 1513 sport_mask = tcp_mask->hdr.src_port; 1514 dport_mask = tcp_mask->hdr.dst_port; 1515 } 1516 1517 if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx, 1518 BNXT_ULP_PROTO_HDR_TCP_NUM))) { 1519 BNXT_DRV_DBG(ERR, "Error parsing protocol header\n"); 1520 return BNXT_TF_RC_ERROR; 1521 } 1522 1523 /* 1524 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 1525 * header fields 1526 */ 1527 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port); 1528 ulp_rte_prsr_fld_mask(params, &idx, size, 1529 ulp_deference_struct(tcp_spec, hdr.src_port), 1530 ulp_deference_struct(tcp_mask, hdr.src_port), 1531 ULP_PRSR_ACT_DEFAULT); 1532 1533 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port); 1534 ulp_rte_prsr_fld_mask(params, &idx, size, 1535 ulp_deference_struct(tcp_spec, hdr.dst_port), 1536 ulp_deference_struct(tcp_mask, hdr.dst_port), 1537 ULP_PRSR_ACT_DEFAULT); 1538 1539 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq); 1540 ulp_rte_prsr_fld_mask(params, &idx, size, 1541 ulp_deference_struct(tcp_spec, hdr.sent_seq), 1542 ulp_deference_struct(tcp_mask, hdr.sent_seq), 1543 ULP_PRSR_ACT_DEFAULT); 1544 1545 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack); 1546 ulp_rte_prsr_fld_mask(params, &idx, size, 1547 ulp_deference_struct(tcp_spec, hdr.recv_ack), 1548 ulp_deference_struct(tcp_mask, hdr.recv_ack), 1549 ULP_PRSR_ACT_DEFAULT); 1550 1551 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off); 1552 ulp_rte_prsr_fld_mask(params, &idx, size, 1553 ulp_deference_struct(tcp_spec, hdr.data_off), 1554 ulp_deference_struct(tcp_mask, hdr.data_off), 1555 ULP_PRSR_ACT_DEFAULT); 1556 1557 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags); 1558 ulp_rte_prsr_fld_mask(params, &idx, size, 1559 ulp_deference_struct(tcp_spec, hdr.tcp_flags), 1560 ulp_deference_struct(tcp_mask, hdr.tcp_flags), 1561 ULP_PRSR_ACT_DEFAULT); 1562 1563 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win); 1564 ulp_rte_prsr_fld_mask(params, &idx, size, 1565 ulp_deference_struct(tcp_spec, hdr.rx_win), 1566 ulp_deference_struct(tcp_mask, hdr.rx_win), 1567 ULP_PRSR_ACT_DEFAULT); 1568 1569 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum); 1570 ulp_rte_prsr_fld_mask(params, &idx, size, 1571 ulp_deference_struct(tcp_spec, hdr.cksum), 1572 ulp_deference_struct(tcp_mask, hdr.cksum), 1573 ULP_PRSR_ACT_DEFAULT); 1574 1575 size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp); 1576 ulp_rte_prsr_fld_mask(params, &idx, size, 1577 ulp_deference_struct(tcp_spec, hdr.tcp_urp), 1578 ulp_deference_struct(tcp_mask, hdr.tcp_urp), 1579 ULP_PRSR_ACT_DEFAULT); 1580 1581 /* Set the udp header bitmap and computed l4 header bitmaps */ 1582 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 1583 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) || 1584 ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL)) 1585 out_l4 = BNXT_ULP_HDR_BIT_I_TCP; 1586 1587 ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport, 1588 dport_mask, out_l4); 1589 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); 1590 return BNXT_TF_RC_SUCCESS; 1591 } 1592 1593 /* Function to handle the parsing of RTE Flow item Vxlan Header. */ 1594 int32_t 1595 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item, 1596 struct ulp_rte_parser_params *params) 1597 { 1598 const struct rte_flow_item_vxlan *vxlan_spec = item->spec; 1599 const struct rte_flow_item_vxlan *vxlan_mask = item->mask; 1600 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1601 struct bnxt_ulp_context *ulp_ctx = params->ulp_ctx; 1602 uint32_t idx = 0; 1603 uint16_t dport, stat_port; 1604 uint32_t size; 1605 1606 if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx, 1607 BNXT_ULP_PROTO_HDR_VXLAN_NUM))) { 1608 BNXT_DRV_DBG(ERR, "Error parsing protocol header\n"); 1609 return BNXT_TF_RC_ERROR; 1610 } 1611 1612 if (vxlan_spec && !vxlan_mask) 1613 vxlan_mask = &rte_flow_item_vxlan_mask; 1614 1615 /* Update if the outer headers have any partial masks */ 1616 if (!ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_WC_MATCH)) 1617 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_OUTER_EM_ONLY, 1); 1618 1619 /* 1620 * Copy the rte_flow_item for vxlan into hdr_field using vxlan 1621 * header fields 1622 */ 1623 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.flags); 1624 ulp_rte_prsr_fld_mask(params, &idx, size, 1625 ulp_deference_struct(vxlan_spec, hdr.flags), 1626 ulp_deference_struct(vxlan_mask, hdr.flags), 1627 ULP_PRSR_ACT_DEFAULT); 1628 1629 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.rsvd0); 1630 ulp_rte_prsr_fld_mask(params, &idx, size, 1631 ulp_deference_struct(vxlan_spec, hdr.rsvd0), 1632 ulp_deference_struct(vxlan_mask, hdr.rsvd0), 1633 ULP_PRSR_ACT_DEFAULT); 1634 1635 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.vni); 1636 ulp_rte_prsr_fld_mask(params, &idx, size, 1637 ulp_deference_struct(vxlan_spec, hdr.vni), 1638 ulp_deference_struct(vxlan_mask, hdr.vni), 1639 ULP_PRSR_ACT_DEFAULT); 1640 1641 size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.rsvd1); 1642 ulp_rte_prsr_fld_mask(params, &idx, size, 1643 ulp_deference_struct(vxlan_spec, hdr.rsvd1), 1644 ulp_deference_struct(vxlan_mask, hdr.rsvd1), 1645 ULP_PRSR_ACT_DEFAULT); 1646 1647 /* Update the hdr_bitmap with vxlan */ 1648 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN); 1649 ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL); 1650 1651 /* if l4 protocol header updated it then reset it */ 1652 ULP_BITMAP_RESET(params->hdr_fp_bit.bits, BNXT_ULP_HDR_BIT_T_VXLAN_GPE); 1653 1654 dport = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT); 1655 if (!dport) { 1656 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT, 1657 ULP_UDP_PORT_VXLAN); 1658 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK, 1659 ULP_UDP_PORT_VXLAN_MASK); 1660 } 1661 1662 /* vxlan static customized port */ 1663 if (ULP_APP_STATIC_VXLAN_PORT_EN(ulp_ctx)) { 1664 stat_port = bnxt_ulp_cntxt_vxlan_ip_port_get(ulp_ctx); 1665 if (!stat_port) 1666 stat_port = bnxt_ulp_cntxt_vxlan_port_get(ulp_ctx); 1667 1668 /* validate that static ports match if not reject */ 1669 if (dport != 0 && dport != tfp_cpu_to_be_16(stat_port)) { 1670 BNXT_DRV_DBG(ERR, "ParseErr:vxlan port is not valid\n"); 1671 return BNXT_TF_RC_PARSE_ERR; 1672 } else if (dport == 0) { 1673 ULP_COMP_FLD_IDX_WR(params, 1674 BNXT_ULP_CF_IDX_TUNNEL_PORT, 1675 tfp_cpu_to_be_16(stat_port)); 1676 } 1677 } else { 1678 /* dynamic vxlan support */ 1679 if (ULP_APP_DYNAMIC_VXLAN_PORT_EN(params->ulp_ctx)) { 1680 if (dport == 0) { 1681 BNXT_DRV_DBG(ERR, 1682 "ParseErr:vxlan port is null\n"); 1683 return BNXT_TF_RC_PARSE_ERR; 1684 } 1685 /* set the dynamic vxlan port check */ 1686 ULP_BITMAP_SET(params->cf_bitmap, 1687 BNXT_ULP_CF_BIT_DYNAMIC_VXLAN_PORT); 1688 ULP_COMP_FLD_IDX_WR(params, 1689 BNXT_ULP_CF_IDX_TUNNEL_PORT, dport); 1690 } else if (dport != 0 && dport != ULP_UDP_PORT_VXLAN) { 1691 /* set the dynamic vxlan port check */ 1692 ULP_BITMAP_SET(params->cf_bitmap, 1693 BNXT_ULP_CF_BIT_DYNAMIC_VXLAN_PORT); 1694 ULP_COMP_FLD_IDX_WR(params, 1695 BNXT_ULP_CF_IDX_TUNNEL_PORT, dport); 1696 } else { 1697 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUNNEL_PORT, 1698 ULP_UDP_PORT_VXLAN); 1699 } 1700 } 1701 return BNXT_TF_RC_SUCCESS; 1702 } 1703 1704 /* Function to handle the parsing of RTE Flow item Vxlan GPE Header. */ 1705 int32_t 1706 ulp_rte_vxlan_gpe_hdr_handler(const struct rte_flow_item *item, 1707 struct ulp_rte_parser_params *params) 1708 { 1709 const struct rte_flow_item_vxlan_gpe *vxlan_gpe_spec = item->spec; 1710 const struct rte_flow_item_vxlan_gpe *vxlan_gpe_mask = item->mask; 1711 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1712 uint32_t idx = 0; 1713 uint16_t dport; 1714 uint32_t size; 1715 1716 if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx, 1717 BNXT_ULP_PROTO_HDR_VXLAN_GPE_NUM))) { 1718 BNXT_DRV_DBG(ERR, "Error parsing protocol header\n"); 1719 return BNXT_TF_RC_ERROR; 1720 } 1721 1722 if (vxlan_gpe_spec && !vxlan_gpe_mask) 1723 vxlan_gpe_mask = &rte_flow_item_vxlan_gpe_mask; 1724 /* 1725 * Copy the rte_flow_item for vxlan gpe into hdr_field using vxlan 1726 * header fields 1727 */ 1728 size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->flags); 1729 ulp_rte_prsr_fld_mask(params, &idx, size, 1730 ulp_deference_struct(vxlan_gpe_spec, flags), 1731 ulp_deference_struct(vxlan_gpe_mask, flags), 1732 ULP_PRSR_ACT_DEFAULT); 1733 1734 size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->rsvd0); 1735 ulp_rte_prsr_fld_mask(params, &idx, size, 1736 ulp_deference_struct(vxlan_gpe_spec, rsvd0), 1737 ulp_deference_struct(vxlan_gpe_mask, rsvd0), 1738 ULP_PRSR_ACT_DEFAULT); 1739 1740 size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->protocol); 1741 ulp_rte_prsr_fld_mask(params, &idx, size, 1742 ulp_deference_struct(vxlan_gpe_spec, protocol), 1743 ulp_deference_struct(vxlan_gpe_mask, protocol), 1744 ULP_PRSR_ACT_DEFAULT); 1745 1746 size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->vni); 1747 ulp_rte_prsr_fld_mask(params, &idx, size, 1748 ulp_deference_struct(vxlan_gpe_spec, vni), 1749 ulp_deference_struct(vxlan_gpe_mask, vni), 1750 ULP_PRSR_ACT_DEFAULT); 1751 1752 size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->rsvd1); 1753 ulp_rte_prsr_fld_mask(params, &idx, size, 1754 ulp_deference_struct(vxlan_gpe_spec, rsvd1), 1755 ulp_deference_struct(vxlan_gpe_mask, rsvd1), 1756 ULP_PRSR_ACT_DEFAULT); 1757 1758 /* Update the hdr_bitmap with vxlan gpe*/ 1759 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN_GPE); 1760 ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL); 1761 1762 /* if l4 protocol header updated it then reset it */ 1763 ULP_BITMAP_RESET(params->hdr_fp_bit.bits, BNXT_ULP_HDR_BIT_T_VXLAN); 1764 1765 dport = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT); 1766 if (!dport) { 1767 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT, 1768 ULP_UDP_PORT_VXLAN_GPE); 1769 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK, 1770 ULP_UDP_PORT_VXLAN_GPE_MASK); 1771 } 1772 /* TBD: currently dynamic or static gpe port config is not supported */ 1773 /* Update the tunnel port */ 1774 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUNNEL_PORT, dport); 1775 1776 /* Verify the vxlan gpe port */ 1777 if (dport != 0 && dport != ULP_UDP_PORT_VXLAN_GPE) { 1778 BNXT_DRV_DBG(ERR, "ParseErr:vxlan gpe port is not valid\n"); 1779 return BNXT_TF_RC_PARSE_ERR; 1780 } 1781 return BNXT_TF_RC_SUCCESS; 1782 } 1783 1784 /* Function to handle the parsing of RTE Flow item GENEVE Header. */ 1785 int32_t 1786 ulp_rte_geneve_hdr_handler(const struct rte_flow_item *item, 1787 struct ulp_rte_parser_params *params) 1788 { 1789 const struct rte_flow_item_geneve *geneve_spec = item->spec; 1790 const struct rte_flow_item_geneve *geneve_mask = item->mask; 1791 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1792 uint32_t idx = 0; 1793 uint16_t dport; 1794 uint32_t size; 1795 1796 if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx, 1797 BNXT_ULP_PROTO_HDR_GENEVE_NUM))) { 1798 BNXT_DRV_DBG(ERR, "Error parsing protocol header\n"); 1799 return BNXT_TF_RC_ERROR; 1800 } 1801 1802 if (geneve_spec && !geneve_mask) 1803 geneve_mask = &rte_flow_item_geneve_mask; 1804 1805 /* 1806 * Copy the rte_flow_item for geneve into hdr_field using geneve 1807 * header fields 1808 */ 1809 size = sizeof(((struct rte_flow_item_geneve *)NULL)->ver_opt_len_o_c_rsvd0); 1810 ulp_rte_prsr_fld_mask(params, &idx, size, 1811 ulp_deference_struct(geneve_spec, ver_opt_len_o_c_rsvd0), 1812 ulp_deference_struct(geneve_mask, ver_opt_len_o_c_rsvd0), 1813 ULP_PRSR_ACT_DEFAULT); 1814 1815 size = sizeof(((struct rte_flow_item_geneve *)NULL)->protocol); 1816 ulp_rte_prsr_fld_mask(params, &idx, size, 1817 ulp_deference_struct(geneve_spec, protocol), 1818 ulp_deference_struct(geneve_mask, protocol), 1819 ULP_PRSR_ACT_DEFAULT); 1820 1821 size = sizeof(((struct rte_flow_item_geneve *)NULL)->vni); 1822 ulp_rte_prsr_fld_mask(params, &idx, size, 1823 ulp_deference_struct(geneve_spec, vni), 1824 ulp_deference_struct(geneve_mask, vni), 1825 ULP_PRSR_ACT_DEFAULT); 1826 1827 size = sizeof(((struct rte_flow_item_geneve *)NULL)->rsvd1); 1828 ulp_rte_prsr_fld_mask(params, &idx, size, 1829 ulp_deference_struct(geneve_spec, rsvd1), 1830 ulp_deference_struct(geneve_mask, rsvd1), 1831 ULP_PRSR_ACT_DEFAULT); 1832 1833 /* Update the hdr_bitmap with geneve */ 1834 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GENEVE); 1835 ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL); 1836 1837 /* update the tunnel port */ 1838 dport = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT); 1839 if (ULP_APP_DYNAMIC_GENEVE_PORT_EN(params->ulp_ctx)) { 1840 if (dport == 0) { 1841 BNXT_DRV_DBG(ERR, "ParseErr:geneve port is null\n"); 1842 return BNXT_TF_RC_PARSE_ERR; 1843 } 1844 /* set the dynamic geneve port check */ 1845 ULP_BITMAP_SET(params->cf_bitmap, 1846 BNXT_ULP_CF_BIT_DYNAMIC_GENEVE_PORT); 1847 ULP_COMP_FLD_IDX_WR(params, 1848 BNXT_ULP_CF_IDX_TUNNEL_PORT, dport); 1849 } else { 1850 if (dport == 0) { 1851 ULP_COMP_FLD_IDX_WR(params, 1852 BNXT_ULP_CF_IDX_O_L4_DST_PORT, 1853 ULP_UDP_PORT_GENEVE); 1854 ULP_COMP_FLD_IDX_WR(params, 1855 BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK, 1856 ULP_UDP_PORT_GENEVE_MASK); 1857 } else if (dport != 0 && dport != ULP_UDP_PORT_GENEVE) { 1858 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUNNEL_PORT, 1859 dport); 1860 ULP_BITMAP_SET(params->cf_bitmap, 1861 BNXT_ULP_CF_BIT_DYNAMIC_GENEVE_PORT); 1862 } 1863 } 1864 return BNXT_TF_RC_SUCCESS; 1865 } 1866 1867 /* Function to handle the parsing of RTE Flow item GRE Header. */ 1868 int32_t 1869 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item, 1870 struct ulp_rte_parser_params *params) 1871 { 1872 const struct rte_flow_item_gre *gre_spec = item->spec; 1873 const struct rte_flow_item_gre *gre_mask = item->mask; 1874 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1875 uint32_t idx = 0; 1876 uint32_t size; 1877 1878 if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx, 1879 BNXT_ULP_PROTO_HDR_GRE_NUM))) { 1880 BNXT_DRV_DBG(ERR, "Error parsing protocol header\n"); 1881 return BNXT_TF_RC_ERROR; 1882 } 1883 1884 if (gre_spec && !gre_mask) 1885 gre_mask = &rte_flow_item_gre_mask; 1886 1887 size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver); 1888 ulp_rte_prsr_fld_mask(params, &idx, size, 1889 ulp_deference_struct(gre_spec, c_rsvd0_ver), 1890 ulp_deference_struct(gre_mask, c_rsvd0_ver), 1891 ULP_PRSR_ACT_DEFAULT); 1892 1893 size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol); 1894 ulp_rte_prsr_fld_mask(params, &idx, size, 1895 ulp_deference_struct(gre_spec, protocol), 1896 ulp_deference_struct(gre_mask, protocol), 1897 ULP_PRSR_ACT_DEFAULT); 1898 1899 /* Update the hdr_bitmap with GRE */ 1900 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE); 1901 ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL); 1902 return BNXT_TF_RC_SUCCESS; 1903 } 1904 1905 /* Function to handle the parsing of RTE Flow item ANY. */ 1906 int32_t 1907 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused, 1908 struct ulp_rte_parser_params *params __rte_unused) 1909 { 1910 return BNXT_TF_RC_SUCCESS; 1911 } 1912 1913 /* Function to handle the parsing of RTE Flow item ICMP Header. */ 1914 int32_t 1915 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item, 1916 struct ulp_rte_parser_params *params) 1917 { 1918 const struct rte_flow_item_icmp *icmp_spec = item->spec; 1919 const struct rte_flow_item_icmp *icmp_mask = item->mask; 1920 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1921 uint32_t idx = 0; 1922 uint32_t size; 1923 1924 if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx, 1925 BNXT_ULP_PROTO_HDR_ICMP_NUM))) { 1926 BNXT_DRV_DBG(ERR, "Error parsing protocol header\n"); 1927 return BNXT_TF_RC_ERROR; 1928 } 1929 1930 if (icmp_spec && !icmp_mask) 1931 icmp_mask = &rte_flow_item_icmp_mask; 1932 1933 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type); 1934 ulp_rte_prsr_fld_mask(params, &idx, size, 1935 ulp_deference_struct(icmp_spec, hdr.icmp_type), 1936 ulp_deference_struct(icmp_mask, hdr.icmp_type), 1937 ULP_PRSR_ACT_DEFAULT); 1938 1939 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code); 1940 ulp_rte_prsr_fld_mask(params, &idx, size, 1941 ulp_deference_struct(icmp_spec, hdr.icmp_code), 1942 ulp_deference_struct(icmp_mask, hdr.icmp_code), 1943 ULP_PRSR_ACT_DEFAULT); 1944 1945 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum); 1946 ulp_rte_prsr_fld_mask(params, &idx, size, 1947 ulp_deference_struct(icmp_spec, hdr.icmp_cksum), 1948 ulp_deference_struct(icmp_mask, hdr.icmp_cksum), 1949 ULP_PRSR_ACT_DEFAULT); 1950 1951 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident); 1952 ulp_rte_prsr_fld_mask(params, &idx, size, 1953 ulp_deference_struct(icmp_spec, hdr.icmp_ident), 1954 ulp_deference_struct(icmp_mask, hdr.icmp_ident), 1955 ULP_PRSR_ACT_DEFAULT); 1956 1957 size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb); 1958 ulp_rte_prsr_fld_mask(params, &idx, size, 1959 ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb), 1960 ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb), 1961 ULP_PRSR_ACT_DEFAULT); 1962 1963 /* Update the hdr_bitmap with ICMP */ 1964 if (ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL)) 1965 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP); 1966 else 1967 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP); 1968 return BNXT_TF_RC_SUCCESS; 1969 } 1970 1971 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */ 1972 int32_t 1973 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item, 1974 struct ulp_rte_parser_params *params) 1975 { 1976 const struct rte_flow_item_icmp6 *icmp_spec = item->spec; 1977 const struct rte_flow_item_icmp6 *icmp_mask = item->mask; 1978 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 1979 uint32_t idx = 0; 1980 uint32_t size; 1981 1982 if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx, 1983 BNXT_ULP_PROTO_HDR_ICMP_NUM))) { 1984 BNXT_DRV_DBG(ERR, "Error parsing protocol header\n"); 1985 return BNXT_TF_RC_ERROR; 1986 } 1987 1988 if (icmp_spec && !icmp_mask) 1989 icmp_mask = &rte_flow_item_icmp6_mask; 1990 1991 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type); 1992 ulp_rte_prsr_fld_mask(params, &idx, size, 1993 ulp_deference_struct(icmp_spec, type), 1994 ulp_deference_struct(icmp_mask, type), 1995 ULP_PRSR_ACT_DEFAULT); 1996 1997 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code); 1998 ulp_rte_prsr_fld_mask(params, &idx, size, 1999 ulp_deference_struct(icmp_spec, code), 2000 ulp_deference_struct(icmp_mask, code), 2001 ULP_PRSR_ACT_DEFAULT); 2002 2003 size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum); 2004 ulp_rte_prsr_fld_mask(params, &idx, size, 2005 ulp_deference_struct(icmp_spec, checksum), 2006 ulp_deference_struct(icmp_mask, checksum), 2007 ULP_PRSR_ACT_DEFAULT); 2008 2009 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) { 2010 BNXT_DRV_DBG(ERR, "Error: incorrect icmp version\n"); 2011 return BNXT_TF_RC_ERROR; 2012 } 2013 2014 /* Update the hdr_bitmap with ICMP */ 2015 if (ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL)) 2016 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP); 2017 else 2018 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP); 2019 return BNXT_TF_RC_SUCCESS; 2020 } 2021 2022 /* Function to handle the parsing of RTE Flow item ECPRI Header. */ 2023 int32_t 2024 ulp_rte_ecpri_hdr_handler(const struct rte_flow_item *item, 2025 struct ulp_rte_parser_params *params) 2026 { 2027 const struct rte_flow_item_ecpri *ecpri_spec = item->spec; 2028 const struct rte_flow_item_ecpri *ecpri_mask = item->mask; 2029 struct rte_flow_item_ecpri l_ecpri_spec, l_ecpri_mask; 2030 struct rte_flow_item_ecpri *p_ecpri_spec = &l_ecpri_spec; 2031 struct rte_flow_item_ecpri *p_ecpri_mask = &l_ecpri_mask; 2032 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 2033 uint32_t idx = 0, cnt; 2034 uint32_t size; 2035 2036 if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx, 2037 BNXT_ULP_PROTO_HDR_ECPRI_NUM))) { 2038 BNXT_DRV_DBG(ERR, "Error parsing protocol header\n"); 2039 return BNXT_TF_RC_ERROR; 2040 } 2041 2042 if (ecpri_spec && !ecpri_mask) 2043 ecpri_mask = &rte_flow_item_ecpri_mask; 2044 2045 /* Figure out if eCPRI is within L4(UDP), unsupported, for now */ 2046 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); 2047 if (cnt >= 1) { 2048 BNXT_DRV_DBG(ERR, "Parse Err: L4 header stack >= 2 not supported\n"); 2049 return BNXT_TF_RC_ERROR; 2050 } 2051 2052 if (!ecpri_spec || !ecpri_mask) 2053 goto parser_set_ecpri_hdr_bit; 2054 2055 memcpy(p_ecpri_spec, ecpri_spec, sizeof(*ecpri_spec)); 2056 memcpy(p_ecpri_mask, ecpri_mask, sizeof(*ecpri_mask)); 2057 2058 p_ecpri_spec->hdr.common.u32 = rte_be_to_cpu_32(p_ecpri_spec->hdr.common.u32); 2059 p_ecpri_mask->hdr.common.u32 = rte_be_to_cpu_32(p_ecpri_mask->hdr.common.u32); 2060 2061 /* 2062 * Init eCPRI spec+mask to correct defaults, also clear masks of fields 2063 * we ignore in the TCAM. 2064 */ 2065 2066 l_ecpri_spec.hdr.common.size = 0; 2067 l_ecpri_spec.hdr.common.c = 0; 2068 l_ecpri_spec.hdr.common.res = 0; 2069 l_ecpri_spec.hdr.common.revision = 1; 2070 l_ecpri_mask.hdr.common.size = 0; 2071 l_ecpri_mask.hdr.common.c = 1; 2072 l_ecpri_mask.hdr.common.res = 0; 2073 l_ecpri_mask.hdr.common.revision = 0xf; 2074 2075 switch (p_ecpri_spec->hdr.common.type) { 2076 case RTE_ECPRI_MSG_TYPE_IQ_DATA: 2077 l_ecpri_mask.hdr.type0.seq_id = 0; 2078 break; 2079 2080 case RTE_ECPRI_MSG_TYPE_BIT_SEQ: 2081 l_ecpri_mask.hdr.type1.seq_id = 0; 2082 break; 2083 2084 case RTE_ECPRI_MSG_TYPE_RTC_CTRL: 2085 l_ecpri_mask.hdr.type2.seq_id = 0; 2086 break; 2087 2088 case RTE_ECPRI_MSG_TYPE_GEN_DATA: 2089 l_ecpri_mask.hdr.type3.seq_id = 0; 2090 break; 2091 2092 case RTE_ECPRI_MSG_TYPE_RM_ACC: 2093 l_ecpri_mask.hdr.type4.rr = 0; 2094 l_ecpri_mask.hdr.type4.rw = 0; 2095 l_ecpri_mask.hdr.type4.rma_id = 0; 2096 break; 2097 2098 case RTE_ECPRI_MSG_TYPE_DLY_MSR: 2099 l_ecpri_spec.hdr.type5.act_type = 0; 2100 break; 2101 2102 case RTE_ECPRI_MSG_TYPE_RMT_RST: 2103 l_ecpri_spec.hdr.type6.rst_op = 0; 2104 break; 2105 2106 case RTE_ECPRI_MSG_TYPE_EVT_IND: 2107 l_ecpri_spec.hdr.type7.evt_type = 0; 2108 l_ecpri_spec.hdr.type7.seq = 0; 2109 l_ecpri_spec.hdr.type7.number = 0; 2110 break; 2111 2112 default: 2113 break; 2114 } 2115 2116 p_ecpri_spec->hdr.common.u32 = rte_cpu_to_be_32(p_ecpri_spec->hdr.common.u32); 2117 p_ecpri_mask->hdr.common.u32 = rte_cpu_to_be_32(p_ecpri_mask->hdr.common.u32); 2118 2119 /* Type */ 2120 size = sizeof(((struct rte_flow_item_ecpri *)NULL)->hdr.common.u32); 2121 ulp_rte_prsr_fld_mask(params, &idx, size, 2122 ulp_deference_struct(p_ecpri_spec, hdr.common.u32), 2123 ulp_deference_struct(p_ecpri_mask, hdr.common.u32), 2124 ULP_PRSR_ACT_DEFAULT); 2125 2126 /* PC/RTC/MSR_ID */ 2127 size = sizeof(((struct rte_flow_item_ecpri *)NULL)->hdr.dummy[0]); 2128 ulp_rte_prsr_fld_mask(params, &idx, size, 2129 ulp_deference_struct(p_ecpri_spec, hdr.dummy), 2130 ulp_deference_struct(p_ecpri_mask, hdr.dummy), 2131 ULP_PRSR_ACT_DEFAULT); 2132 2133 parser_set_ecpri_hdr_bit: 2134 /* Update the hdr_bitmap with eCPRI */ 2135 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ECPRI); 2136 return BNXT_TF_RC_SUCCESS; 2137 } 2138 2139 /* Function to handle the parsing of RTE Flow item void Header */ 2140 int32_t 2141 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused, 2142 struct ulp_rte_parser_params *params __rte_unused) 2143 { 2144 return BNXT_TF_RC_SUCCESS; 2145 } 2146 2147 /* Function to handle the parsing of RTE Flow action void Header. */ 2148 int32_t 2149 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused, 2150 struct ulp_rte_parser_params *params __rte_unused) 2151 { 2152 return BNXT_TF_RC_SUCCESS; 2153 } 2154 2155 /* Function to handle the parsing of RTE Flow action Mark Header. */ 2156 int32_t 2157 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item, 2158 struct ulp_rte_parser_params *param) 2159 { 2160 const struct rte_flow_action_mark *mark; 2161 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap; 2162 uint32_t mark_id; 2163 2164 mark = action_item->conf; 2165 if (mark) { 2166 mark_id = tfp_cpu_to_be_32(mark->id); 2167 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK], 2168 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK); 2169 2170 /* Update the hdr_bitmap with vxlan */ 2171 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK); 2172 return BNXT_TF_RC_SUCCESS; 2173 } 2174 BNXT_DRV_DBG(ERR, "Parse Error: Mark arg is invalid\n"); 2175 return BNXT_TF_RC_ERROR; 2176 } 2177 2178 /* Function to handle the parsing of RTE Flow action RSS Header. */ 2179 int32_t 2180 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item, 2181 struct ulp_rte_parser_params *param) 2182 { 2183 const struct rte_flow_action_rss *rss; 2184 struct ulp_rte_act_prop *ap = ¶m->act_prop; 2185 uint64_t queue_list[BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE / sizeof(uint64_t)]; 2186 uint32_t idx = 0, id; 2187 2188 if (action_item == NULL || action_item->conf == NULL) { 2189 BNXT_DRV_DBG(ERR, "Parse Err: invalid rss configuration\n"); 2190 return BNXT_TF_RC_ERROR; 2191 } 2192 2193 rss = action_item->conf; 2194 /* Copy the rss into the specific action properties */ 2195 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_FUNC], &rss->func, 2196 BNXT_ULP_ACT_PROP_SZ_RSS_FUNC); 2197 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types, 2198 BNXT_ULP_ACT_PROP_SZ_RSS_TYPES); 2199 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level, 2200 BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL); 2201 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN], 2202 &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN); 2203 2204 if (rss->key_len != 0 && rss->key_len != BNXT_ULP_ACT_PROP_SZ_RSS_KEY) { 2205 BNXT_DRV_DBG(ERR, "Parse Err: RSS key length must be 40 bytes\n"); 2206 return BNXT_TF_RC_ERROR; 2207 } 2208 2209 /* User may specify only key length. In that case, rss->key will be NULL. 2210 * So, reject the flow if key_length is valid but rss->key is NULL. 2211 * Also, copy the RSS hash key only when rss->key is valid. 2212 */ 2213 if (rss->key_len != 0 && rss->key == NULL) { 2214 BNXT_DRV_DBG(ERR, 2215 "Parse Err: A valid RSS key must be provided with a valid key len.\n"); 2216 return BNXT_TF_RC_ERROR; 2217 } 2218 if (rss->key) 2219 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key, rss->key_len); 2220 2221 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE_NUM], 2222 &rss->queue_num, BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE_NUM); 2223 2224 if (rss->queue_num >= ULP_BYTE_2_BITS(BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE)) { 2225 BNXT_DRV_DBG(ERR, "Parse Err: RSS queue num too big\n"); 2226 return BNXT_TF_RC_ERROR; 2227 } 2228 2229 /* Queues converted into a bitmap format */ 2230 memset(queue_list, 0, sizeof(queue_list)); 2231 for (idx = 0; idx < rss->queue_num; idx++) { 2232 id = rss->queue[idx]; 2233 if (id >= ULP_BYTE_2_BITS(BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE)) { 2234 BNXT_DRV_DBG(ERR, "Parse Err: RSS queue id too big\n"); 2235 return BNXT_TF_RC_ERROR; 2236 } 2237 if ((queue_list[id / ULP_INDEX_BITMAP_SIZE] >> 2238 ((ULP_INDEX_BITMAP_SIZE - 1) - 2239 (id % ULP_INDEX_BITMAP_SIZE)) & 1)) { 2240 BNXT_DRV_DBG(ERR, "Parse Err: duplicate queue ids\n"); 2241 return BNXT_TF_RC_ERROR; 2242 } 2243 queue_list[id / ULP_INDEX_BITMAP_SIZE] |= (1UL << 2244 ((ULP_INDEX_BITMAP_SIZE - 1) - (id % ULP_INDEX_BITMAP_SIZE))); 2245 } 2246 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE], 2247 (uint8_t *)queue_list, BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE); 2248 2249 /* set the RSS action header bit */ 2250 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS); 2251 2252 return BNXT_TF_RC_SUCCESS; 2253 } 2254 2255 /* Function to handle the parsing of RTE Flow item eth Header. */ 2256 static void 2257 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params, 2258 const struct rte_flow_item_eth *eth_spec) 2259 { 2260 struct ulp_rte_hdr_field *field; 2261 uint32_t size; 2262 2263 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC]; 2264 size = sizeof(eth_spec->hdr.dst_addr.addr_bytes); 2265 field = ulp_rte_parser_fld_copy(field, eth_spec->hdr.dst_addr.addr_bytes, size); 2266 2267 size = sizeof(eth_spec->hdr.src_addr.addr_bytes); 2268 field = ulp_rte_parser_fld_copy(field, eth_spec->hdr.src_addr.addr_bytes, size); 2269 2270 size = sizeof(eth_spec->hdr.ether_type); 2271 field = ulp_rte_parser_fld_copy(field, ð_spec->hdr.ether_type, size); 2272 2273 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); 2274 } 2275 2276 /* Function to handle the parsing of RTE Flow item vlan Header. */ 2277 static void 2278 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params, 2279 const struct rte_flow_item_vlan *vlan_spec, 2280 uint32_t inner) 2281 { 2282 struct ulp_rte_hdr_field *field; 2283 uint32_t size; 2284 2285 if (!inner) { 2286 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI]; 2287 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, 2288 BNXT_ULP_HDR_BIT_OO_VLAN); 2289 } else { 2290 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI]; 2291 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, 2292 BNXT_ULP_HDR_BIT_OI_VLAN); 2293 } 2294 2295 size = sizeof(vlan_spec->hdr.vlan_tci); 2296 field = ulp_rte_parser_fld_copy(field, &vlan_spec->hdr.vlan_tci, size); 2297 2298 size = sizeof(vlan_spec->hdr.eth_proto); 2299 field = ulp_rte_parser_fld_copy(field, &vlan_spec->hdr.eth_proto, size); 2300 } 2301 2302 /* Function to handle the parsing of RTE Flow item ipv4 Header. */ 2303 static void 2304 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params, 2305 const struct rte_flow_item_ipv4 *ip) 2306 { 2307 struct ulp_rte_hdr_field *field; 2308 uint32_t size; 2309 uint8_t val8; 2310 2311 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL]; 2312 size = sizeof(ip->hdr.version_ihl); 2313 if (!ip->hdr.version_ihl) 2314 val8 = RTE_IPV4_VHL_DEF; 2315 else 2316 val8 = ip->hdr.version_ihl; 2317 field = ulp_rte_parser_fld_copy(field, &val8, size); 2318 2319 size = sizeof(ip->hdr.type_of_service); 2320 field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size); 2321 2322 size = sizeof(ip->hdr.packet_id); 2323 field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size); 2324 2325 size = sizeof(ip->hdr.fragment_offset); 2326 field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size); 2327 2328 size = sizeof(ip->hdr.time_to_live); 2329 if (!ip->hdr.time_to_live) 2330 val8 = BNXT_ULP_DEFAULT_TTL; 2331 else 2332 val8 = ip->hdr.time_to_live; 2333 field = ulp_rte_parser_fld_copy(field, &val8, size); 2334 2335 size = sizeof(ip->hdr.next_proto_id); 2336 field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size); 2337 2338 size = sizeof(ip->hdr.src_addr); 2339 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size); 2340 2341 size = sizeof(ip->hdr.dst_addr); 2342 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size); 2343 2344 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4); 2345 } 2346 2347 /* Function to handle the parsing of RTE Flow item ipv6 Header. */ 2348 static void 2349 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params, 2350 const struct rte_flow_item_ipv6 *ip) 2351 { 2352 struct ulp_rte_hdr_field *field; 2353 uint32_t size; 2354 uint32_t val32; 2355 uint8_t val8; 2356 2357 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW]; 2358 size = sizeof(ip->hdr.vtc_flow); 2359 if (!ip->hdr.vtc_flow) 2360 val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER); 2361 else 2362 val32 = ip->hdr.vtc_flow; 2363 field = ulp_rte_parser_fld_copy(field, &val32, size); 2364 2365 size = sizeof(ip->hdr.proto); 2366 field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size); 2367 2368 size = sizeof(ip->hdr.hop_limits); 2369 if (!ip->hdr.hop_limits) 2370 val8 = BNXT_ULP_DEFAULT_TTL; 2371 else 2372 val8 = ip->hdr.hop_limits; 2373 field = ulp_rte_parser_fld_copy(field, &val8, size); 2374 2375 size = sizeof(ip->hdr.src_addr); 2376 field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size); 2377 2378 size = sizeof(ip->hdr.dst_addr); 2379 field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size); 2380 2381 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6); 2382 } 2383 2384 /* Function to handle the parsing of RTE Flow item UDP Header. */ 2385 static void 2386 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params, 2387 const struct rte_flow_item_udp *udp_spec) 2388 { 2389 struct ulp_rte_hdr_field *field; 2390 uint32_t size; 2391 uint8_t type = IPPROTO_UDP; 2392 2393 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT]; 2394 size = sizeof(udp_spec->hdr.src_port); 2395 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size); 2396 2397 size = sizeof(udp_spec->hdr.dst_port); 2398 field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size); 2399 2400 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP); 2401 2402 /* Update the ip header protocol */ 2403 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO]; 2404 ulp_rte_parser_fld_copy(field, &type, sizeof(type)); 2405 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO]; 2406 ulp_rte_parser_fld_copy(field, &type, sizeof(type)); 2407 } 2408 2409 /* Function to handle the parsing of RTE Flow item vxlan Header. */ 2410 static void 2411 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params, 2412 struct rte_flow_item_vxlan *vxlan_spec) 2413 { 2414 struct ulp_rte_hdr_field *field; 2415 uint32_t size; 2416 2417 field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS]; 2418 size = sizeof(vxlan_spec->hdr.flags); 2419 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.flags, size); 2420 2421 size = sizeof(vxlan_spec->hdr.rsvd0); 2422 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.rsvd0, size); 2423 2424 size = sizeof(vxlan_spec->hdr.vni); 2425 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.vni, size); 2426 2427 size = sizeof(vxlan_spec->hdr.rsvd1); 2428 field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.rsvd1, size); 2429 2430 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN); 2431 } 2432 2433 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */ 2434 int32_t 2435 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, 2436 struct ulp_rte_parser_params *params) 2437 { 2438 const struct rte_flow_action_vxlan_encap *vxlan_encap; 2439 const struct rte_flow_item *item; 2440 const struct rte_flow_item_ipv4 *ipv4_spec; 2441 const struct rte_flow_item_ipv6 *ipv6_spec; 2442 struct rte_flow_item_vxlan vxlan_spec; 2443 uint32_t vlan_num = 0, vlan_size = 0; 2444 uint32_t ip_size = 0, ip_type = 0; 2445 uint32_t vxlan_size = 0; 2446 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap; 2447 struct ulp_rte_act_prop *ap = ¶ms->act_prop; 2448 2449 vxlan_encap = action_item->conf; 2450 if (!vxlan_encap) { 2451 BNXT_DRV_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n"); 2452 return BNXT_TF_RC_ERROR; 2453 } 2454 2455 item = vxlan_encap->definition; 2456 if (!item) { 2457 BNXT_DRV_DBG(ERR, "Parse Error: definition arg is invalid\n"); 2458 return BNXT_TF_RC_ERROR; 2459 } 2460 2461 if (!ulp_rte_item_skip_void(&item, 0)) 2462 return BNXT_TF_RC_ERROR; 2463 2464 /* must have ethernet header */ 2465 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { 2466 BNXT_DRV_DBG(ERR, "Parse Error:vxlan encap does not have eth\n"); 2467 return BNXT_TF_RC_ERROR; 2468 } 2469 2470 /* Parse the ethernet header */ 2471 if (item->spec) 2472 ulp_rte_enc_eth_hdr_handler(params, item->spec); 2473 2474 /* Goto the next item */ 2475 if (!ulp_rte_item_skip_void(&item, 1)) 2476 return BNXT_TF_RC_ERROR; 2477 2478 /* May have vlan header */ 2479 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2480 vlan_num++; 2481 if (item->spec) 2482 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0); 2483 2484 if (!ulp_rte_item_skip_void(&item, 1)) 2485 return BNXT_TF_RC_ERROR; 2486 } 2487 2488 /* may have two vlan headers */ 2489 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2490 vlan_num++; 2491 if (item->spec) 2492 ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1); 2493 2494 if (!ulp_rte_item_skip_void(&item, 1)) 2495 return BNXT_TF_RC_ERROR; 2496 } 2497 2498 /* Update the vlan count and size of more than one */ 2499 if (vlan_num) { 2500 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan); 2501 vlan_num = tfp_cpu_to_be_32(vlan_num); 2502 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM], 2503 &vlan_num, 2504 sizeof(uint32_t)); 2505 vlan_size = tfp_cpu_to_be_32(vlan_size); 2506 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ], 2507 &vlan_size, 2508 sizeof(uint32_t)); 2509 } 2510 2511 /* L3 must be IPv4, IPv6 */ 2512 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { 2513 ipv4_spec = item->spec; 2514 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE; 2515 2516 /* Update the ip size details */ 2517 ip_size = tfp_cpu_to_be_32(ip_size); 2518 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 2519 &ip_size, sizeof(uint32_t)); 2520 2521 /* update the ip type */ 2522 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4); 2523 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 2524 &ip_type, sizeof(uint32_t)); 2525 2526 /* update the computed field to notify it is ipv4 header */ 2527 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG, 2528 1); 2529 if (ipv4_spec) 2530 ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec); 2531 2532 if (!ulp_rte_item_skip_void(&item, 1)) 2533 return BNXT_TF_RC_ERROR; 2534 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { 2535 ipv6_spec = item->spec; 2536 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE; 2537 2538 /* Update the ip size details */ 2539 ip_size = tfp_cpu_to_be_32(ip_size); 2540 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 2541 &ip_size, sizeof(uint32_t)); 2542 2543 /* update the ip type */ 2544 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6); 2545 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 2546 &ip_type, sizeof(uint32_t)); 2547 2548 /* update the computed field to notify it is ipv6 header */ 2549 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG, 2550 1); 2551 if (ipv6_spec) 2552 ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec); 2553 2554 if (!ulp_rte_item_skip_void(&item, 1)) 2555 return BNXT_TF_RC_ERROR; 2556 } else { 2557 BNXT_DRV_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n"); 2558 return BNXT_TF_RC_ERROR; 2559 } 2560 2561 /* L4 is UDP */ 2562 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) { 2563 BNXT_DRV_DBG(ERR, "vxlan encap does not have udp\n"); 2564 return BNXT_TF_RC_ERROR; 2565 } 2566 if (item->spec) 2567 ulp_rte_enc_udp_hdr_handler(params, item->spec); 2568 2569 if (!ulp_rte_item_skip_void(&item, 1)) 2570 return BNXT_TF_RC_ERROR; 2571 2572 /* Finally VXLAN */ 2573 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { 2574 BNXT_DRV_DBG(ERR, "vxlan encap does not have vni\n"); 2575 return BNXT_TF_RC_ERROR; 2576 } 2577 vxlan_size = sizeof(struct rte_flow_item_vxlan); 2578 /* copy the vxlan details */ 2579 memcpy(&vxlan_spec, item->spec, vxlan_size); 2580 vxlan_spec.hdr.flags = 0x08; 2581 vxlan_size = tfp_cpu_to_be_32(vxlan_size); 2582 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ], 2583 &vxlan_size, sizeof(uint32_t)); 2584 2585 ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec); 2586 2587 /* update the hdr_bitmap with vxlan */ 2588 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP); 2589 return BNXT_TF_RC_SUCCESS; 2590 } 2591 2592 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */ 2593 int32_t 2594 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item 2595 __rte_unused, 2596 struct ulp_rte_parser_params *params) 2597 { 2598 /* update the hdr_bitmap with vxlan */ 2599 ULP_BITMAP_SET(params->act_bitmap.bits, 2600 BNXT_ULP_ACT_BIT_VXLAN_DECAP); 2601 /* Update computational field with tunnel decap info */ 2602 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1); 2603 return BNXT_TF_RC_SUCCESS; 2604 } 2605 2606 /* Function to handle the parsing of RTE Flow action drop Header. */ 2607 int32_t 2608 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused, 2609 struct ulp_rte_parser_params *params) 2610 { 2611 /* Update the hdr_bitmap with drop */ 2612 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP); 2613 return BNXT_TF_RC_SUCCESS; 2614 } 2615 2616 /* Function to handle the parsing of RTE Flow action count. */ 2617 int32_t 2618 ulp_rte_count_act_handler(const struct rte_flow_action *action_item, 2619 struct ulp_rte_parser_params *params) 2620 { 2621 const struct rte_flow_action_count *act_count; 2622 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop; 2623 2624 act_count = action_item->conf; 2625 if (act_count) { 2626 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT], 2627 &act_count->id, 2628 BNXT_ULP_ACT_PROP_SZ_COUNT); 2629 } 2630 2631 /* Update the hdr_bitmap with count */ 2632 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT); 2633 return BNXT_TF_RC_SUCCESS; 2634 } 2635 2636 static bool ulp_rte_parser_is_portb_vfrep(struct ulp_rte_parser_params *param) 2637 { 2638 return ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_B_IS_VFREP); 2639 } 2640 2641 /* 2642 * Swaps info related to multi-port: 2643 * common: 2644 * BNXT_ULP_CF_IDX_MP_B_IS_VFREP, BNXT_ULP_CF_IDX_MP_A_IS_VFREP 2645 * BNXT_ULP_CF_IDX_MP_PORT_A, BNXT_ULP_CF_IDX_MP_PORT_B 2646 * 2647 * ingress: 2648 * BNXT_ULP_CF_IDX_MP_VNIC_B, BNXT_ULP_CF_IDX_MP_VNIC_A 2649 * 2650 * egress: 2651 * BNXT_ULP_CF_IDX_MP_MDATA_B, BNXT_ULP_CF_IDX_MP_MDATA_A 2652 * BNXT_ULP_CF_IDX_MP_VPORT_B, BNXT_ULP_CF_IDX_MP_VPORT_A 2653 * 2654 * Note: This is done as OVS could give us a non-VFREP port in port B, and we 2655 * cannot use that to mirror, so we swap out the ports so that a VFREP is now 2656 * in port B instead. 2657 */ 2658 static int32_t 2659 ulp_rte_parser_normalize_port_info(struct ulp_rte_parser_params *param) 2660 { 2661 uint16_t mp_port_a, mp_port_b, mp_mdata_a, mp_mdata_b, 2662 mp_vport_a, mp_vport_b, mp_vnic_a, mp_vnic_b, 2663 mp_is_vfrep_a, mp_is_vfrep_b; 2664 2665 mp_is_vfrep_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_A_IS_VFREP); 2666 mp_is_vfrep_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_B_IS_VFREP); 2667 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_B_IS_VFREP, mp_is_vfrep_a); 2668 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_A_IS_VFREP, mp_is_vfrep_b); 2669 2670 mp_port_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_PORT_A); 2671 mp_port_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_PORT_B); 2672 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_B, mp_port_a); 2673 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_A, mp_port_b); 2674 2675 mp_vport_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VPORT_A); 2676 mp_vport_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VPORT_B); 2677 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VPORT_B, mp_vport_a); 2678 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VPORT_A, mp_vport_b); 2679 2680 mp_vnic_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VNIC_A); 2681 mp_vnic_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VNIC_B); 2682 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VNIC_B, mp_vnic_a); 2683 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VNIC_A, mp_vnic_b); 2684 2685 mp_mdata_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_MDATA_A); 2686 mp_mdata_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_MDATA_B); 2687 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_MDATA_B, mp_mdata_a); 2688 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_MDATA_A, mp_mdata_b); 2689 2690 return BNXT_TF_RC_SUCCESS; 2691 } 2692 2693 2694 /* Function to handle the parsing of action ports. */ 2695 static int32_t 2696 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param, 2697 uint32_t ifindex, bool multi_port, 2698 enum bnxt_ulp_direction_type act_dir) 2699 { 2700 enum bnxt_ulp_direction_type dir; 2701 uint16_t pid_s; 2702 uint8_t *p_mdata; 2703 uint32_t pid, port_index; 2704 struct ulp_rte_act_prop *act = ¶m->act_prop; 2705 enum bnxt_ulp_intf_type port_type; 2706 uint32_t vnic_type; 2707 2708 /* Get the direction */ 2709 /* If action implicitly specifies direction, use the specification. */ 2710 dir = (act_dir == BNXT_ULP_DIR_INVALID) ? 2711 ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION) : 2712 act_dir; 2713 2714 port_type = ULP_COMP_FLD_IDX_RD(param, 2715 BNXT_ULP_CF_IDX_ACT_PORT_TYPE); 2716 2717 /* Update flag if Port A/B type is VF-REP */ 2718 ULP_COMP_FLD_IDX_WR(param, multi_port ? 2719 BNXT_ULP_CF_IDX_MP_B_IS_VFREP : 2720 BNXT_ULP_CF_IDX_MP_A_IS_VFREP, 2721 (port_type == BNXT_ULP_INTF_TYPE_VF_REP) ? 1 : 0); 2722 2723 /* An egress flow where the action port is not another VF endpoint 2724 * requires a VPORT. 2725 */ 2726 if (dir == BNXT_ULP_DIR_EGRESS) { 2727 /* For egress direction, fill vport */ 2728 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s)) 2729 return BNXT_TF_RC_ERROR; 2730 2731 pid = pid_s; 2732 pid = rte_cpu_to_be_32(pid); 2733 if (!multi_port) 2734 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], 2735 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); 2736 2737 /* Fill metadata */ 2738 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP) { 2739 port_index = ULP_COMP_FLD_IDX_RD(param, multi_port ? 2740 BNXT_ULP_CF_IDX_MP_PORT_B : 2741 BNXT_ULP_CF_IDX_MP_PORT_A); 2742 if (ulp_port_db_port_meta_data_get(param->ulp_ctx, 2743 port_index, &p_mdata)) 2744 return BNXT_TF_RC_ERROR; 2745 /* 2746 * Update appropriate port (A/B) metadata based on multi-port 2747 * indication 2748 */ 2749 ULP_COMP_FLD_IDX_WR(param, 2750 multi_port ? 2751 BNXT_ULP_CF_IDX_MP_MDATA_B : 2752 BNXT_ULP_CF_IDX_MP_MDATA_A, 2753 rte_cpu_to_be_16(*((uint16_t *)p_mdata))); 2754 } 2755 /* 2756 * Update appropriate port (A/B) VPORT based on multi-port 2757 * indication. 2758 */ 2759 ULP_COMP_FLD_IDX_WR(param, 2760 multi_port ? 2761 BNXT_ULP_CF_IDX_MP_VPORT_B : 2762 BNXT_ULP_CF_IDX_MP_VPORT_A, 2763 pid_s); 2764 2765 /* Setup the VF_TO_VF VNIC information */ 2766 if (!multi_port && port_type == BNXT_ULP_INTF_TYPE_VF_REP) { 2767 if (ulp_port_db_default_vnic_get(param->ulp_ctx, 2768 ifindex, 2769 BNXT_ULP_VF_FUNC_VNIC, 2770 &pid_s)) 2771 return BNXT_TF_RC_ERROR; 2772 pid = pid_s; 2773 2774 /* Allows use of func_opcode with VNIC */ 2775 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_VNIC, pid); 2776 } 2777 } else { 2778 /* For ingress direction, fill vnic */ 2779 /* 2780 * Action Destination 2781 * ------------------------------------ 2782 * PORT_REPRESENTOR Driver Function 2783 * ------------------------------------ 2784 * REPRESENTED_PORT VF 2785 * ------------------------------------ 2786 * PORT_ID VF 2787 */ 2788 if (act_dir != BNXT_ULP_DIR_INGRESS && 2789 port_type == BNXT_ULP_INTF_TYPE_VF_REP) 2790 vnic_type = BNXT_ULP_VF_FUNC_VNIC; 2791 else 2792 vnic_type = BNXT_ULP_DRV_FUNC_VNIC; 2793 2794 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex, 2795 vnic_type, &pid_s)) 2796 return BNXT_TF_RC_ERROR; 2797 2798 pid = pid_s; 2799 2800 /* Allows use of func_opcode with VNIC */ 2801 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_VNIC, pid); 2802 2803 pid = rte_cpu_to_be_32(pid); 2804 if (!multi_port) 2805 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], 2806 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC); 2807 /* 2808 * Update appropriate port (A/B) VNIC based on multi-port 2809 * indication. 2810 */ 2811 ULP_COMP_FLD_IDX_WR(param, 2812 multi_port ? 2813 BNXT_ULP_CF_IDX_MP_VNIC_B : 2814 BNXT_ULP_CF_IDX_MP_VNIC_A, 2815 pid_s); 2816 } 2817 2818 if (multi_port && !ulp_rte_parser_is_portb_vfrep(param)) 2819 ulp_rte_parser_normalize_port_info(param); 2820 2821 /* Update the action port set bit */ 2822 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1); 2823 return BNXT_TF_RC_SUCCESS; 2824 } 2825 2826 /* Function to handle the parsing of RTE Flow action PF. */ 2827 int32_t 2828 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused, 2829 struct ulp_rte_parser_params *params) 2830 { 2831 uint32_t port_id; 2832 uint32_t ifindex; 2833 enum bnxt_ulp_intf_type intf_type; 2834 2835 /* Get the port id of the current device */ 2836 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 2837 2838 /* Get the port db ifindex */ 2839 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id, 2840 &ifindex)) { 2841 BNXT_DRV_DBG(ERR, "Invalid port id\n"); 2842 return BNXT_TF_RC_ERROR; 2843 } 2844 2845 /* Check the port is PF port */ 2846 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 2847 if (intf_type != BNXT_ULP_INTF_TYPE_PF) { 2848 BNXT_DRV_DBG(ERR, "Port is not a PF port\n"); 2849 return BNXT_TF_RC_ERROR; 2850 } 2851 /* Update the action properties */ 2852 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2853 return ulp_rte_parser_act_port_set(params, ifindex, false, 2854 BNXT_ULP_DIR_INVALID); 2855 } 2856 2857 /* Function to handle the parsing of RTE Flow action VF. */ 2858 int32_t 2859 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, 2860 struct ulp_rte_parser_params *params) 2861 { 2862 const struct rte_flow_action_vf *vf_action; 2863 enum bnxt_ulp_intf_type intf_type; 2864 uint32_t ifindex; 2865 struct bnxt *bp; 2866 2867 vf_action = action_item->conf; 2868 if (!vf_action) { 2869 BNXT_DRV_DBG(ERR, "ParseErr: Invalid Argument\n"); 2870 return BNXT_TF_RC_PARSE_ERR; 2871 } 2872 2873 if (vf_action->original) { 2874 BNXT_DRV_DBG(ERR, "ParseErr:VF Original not supported\n"); 2875 return BNXT_TF_RC_PARSE_ERR; 2876 } 2877 2878 bp = bnxt_pmd_get_bp(params->port_id); 2879 if (bp == NULL) { 2880 BNXT_DRV_DBG(ERR, "Invalid bp\n"); 2881 return BNXT_TF_RC_ERROR; 2882 } 2883 2884 /* vf_action->id is a logical number which in this case is an 2885 * offset from the first VF. So, to get the absolute VF id, the 2886 * offset must be added to the absolute first vf id of that port. 2887 */ 2888 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, 2889 bp->first_vf_id + 2890 vf_action->id, 2891 &ifindex)) { 2892 BNXT_DRV_DBG(ERR, "VF is not valid interface\n"); 2893 return BNXT_TF_RC_ERROR; 2894 } 2895 /* Check the port is VF port */ 2896 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); 2897 if (intf_type != BNXT_ULP_INTF_TYPE_VF && 2898 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) { 2899 BNXT_DRV_DBG(ERR, "Port is not a VF port\n"); 2900 return BNXT_TF_RC_ERROR; 2901 } 2902 2903 /* Update the action properties */ 2904 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2905 return ulp_rte_parser_act_port_set(params, ifindex, false, 2906 BNXT_ULP_DIR_INVALID); 2907 } 2908 2909 /* Parse actions PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */ 2910 int32_t 2911 ulp_rte_port_act_handler(const struct rte_flow_action *act_item, 2912 struct ulp_rte_parser_params *param) 2913 { 2914 uint32_t ethdev_id; 2915 uint32_t ifindex; 2916 const struct rte_flow_action_port_id *port_id = act_item->conf; 2917 uint32_t num_ports; 2918 enum bnxt_ulp_intf_type intf_type; 2919 enum bnxt_ulp_direction_type act_dir; 2920 2921 if (!act_item->conf) { 2922 BNXT_DRV_DBG(ERR, 2923 "ParseErr: Invalid Argument\n"); 2924 return BNXT_TF_RC_PARSE_ERR; 2925 } 2926 switch (act_item->type) { 2927 case RTE_FLOW_ACTION_TYPE_PORT_ID: { 2928 const struct rte_flow_action_port_id *port_id = act_item->conf; 2929 2930 if (port_id->original) { 2931 BNXT_DRV_DBG(ERR, 2932 "ParseErr:Portid Original not supported\n"); 2933 return BNXT_TF_RC_PARSE_ERR; 2934 } 2935 ethdev_id = port_id->id; 2936 act_dir = BNXT_ULP_DIR_INVALID; 2937 break; 2938 } 2939 case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: { 2940 const struct rte_flow_action_ethdev *ethdev = act_item->conf; 2941 2942 ethdev_id = ethdev->port_id; 2943 act_dir = BNXT_ULP_DIR_INGRESS; 2944 break; 2945 } 2946 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: { 2947 const struct rte_flow_action_ethdev *ethdev = act_item->conf; 2948 2949 ethdev_id = ethdev->port_id; 2950 act_dir = BNXT_ULP_DIR_EGRESS; 2951 break; 2952 } 2953 default: 2954 BNXT_DRV_DBG(ERR, "Unknown port action\n"); 2955 return BNXT_TF_RC_ERROR; 2956 } 2957 2958 num_ports = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_NPORTS); 2959 2960 if (num_ports) { 2961 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_B, 2962 port_id->id); 2963 ULP_BITMAP_SET(param->act_bitmap.bits, 2964 BNXT_ULP_ACT_BIT_MULTIPLE_PORT); 2965 } else { 2966 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_A, 2967 port_id->id); 2968 } 2969 2970 /* Get the port db ifindex */ 2971 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, ethdev_id, 2972 &ifindex)) { 2973 BNXT_DRV_DBG(ERR, "Invalid port id\n"); 2974 return BNXT_TF_RC_ERROR; 2975 } 2976 2977 /* Get the intf type */ 2978 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex); 2979 if (!intf_type) { 2980 BNXT_DRV_DBG(ERR, "Invalid port type\n"); 2981 return BNXT_TF_RC_ERROR; 2982 } 2983 2984 /* Set the action port */ 2985 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); 2986 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID, 2987 ethdev_id); 2988 2989 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_NPORTS, ++num_ports); 2990 return ulp_rte_parser_act_port_set(param, ifindex, 2991 ULP_BITMAP_ISSET(param->act_bitmap.bits, 2992 BNXT_ULP_ACT_BIT_MULTIPLE_PORT), 2993 act_dir); 2994 } 2995 2996 /* Function to handle the parsing of RTE Flow action pop vlan. */ 2997 int32_t 2998 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused, 2999 struct ulp_rte_parser_params *params) 3000 { 3001 /* Update the act_bitmap with pop */ 3002 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN); 3003 return BNXT_TF_RC_SUCCESS; 3004 } 3005 3006 /* Function to handle the parsing of RTE Flow action push vlan. */ 3007 int32_t 3008 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item, 3009 struct ulp_rte_parser_params *params) 3010 { 3011 const struct rte_flow_action_of_push_vlan *push_vlan; 3012 uint16_t ethertype; 3013 struct ulp_rte_act_prop *act = ¶ms->act_prop; 3014 3015 push_vlan = action_item->conf; 3016 if (push_vlan) { 3017 ethertype = push_vlan->ethertype; 3018 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) { 3019 BNXT_DRV_DBG(ERR, 3020 "Parse Err: Ethertype not supported\n"); 3021 return BNXT_TF_RC_PARSE_ERR; 3022 } 3023 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN], 3024 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN); 3025 /* Update the hdr_bitmap with push vlan */ 3026 ULP_BITMAP_SET(params->act_bitmap.bits, 3027 BNXT_ULP_ACT_BIT_PUSH_VLAN); 3028 return BNXT_TF_RC_SUCCESS; 3029 } 3030 BNXT_DRV_DBG(ERR, "Parse Error: Push vlan arg is invalid\n"); 3031 return BNXT_TF_RC_ERROR; 3032 } 3033 3034 /* Function to handle the parsing of RTE Flow action set vlan id. */ 3035 int32_t 3036 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item, 3037 struct ulp_rte_parser_params *params) 3038 { 3039 const struct rte_flow_action_of_set_vlan_vid *vlan_vid; 3040 uint32_t vid; 3041 struct ulp_rte_act_prop *act = ¶ms->act_prop; 3042 3043 vlan_vid = action_item->conf; 3044 if (vlan_vid && vlan_vid->vlan_vid) { 3045 vid = vlan_vid->vlan_vid; 3046 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID], 3047 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID); 3048 /* Update the hdr_bitmap with vlan vid */ 3049 ULP_BITMAP_SET(params->act_bitmap.bits, 3050 BNXT_ULP_ACT_BIT_SET_VLAN_VID); 3051 return BNXT_TF_RC_SUCCESS; 3052 } 3053 BNXT_DRV_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n"); 3054 return BNXT_TF_RC_ERROR; 3055 } 3056 3057 /* Function to handle the parsing of RTE Flow action set vlan pcp. */ 3058 int32_t 3059 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item, 3060 struct ulp_rte_parser_params *params) 3061 { 3062 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp; 3063 uint8_t pcp; 3064 struct ulp_rte_act_prop *act = ¶ms->act_prop; 3065 3066 vlan_pcp = action_item->conf; 3067 if (vlan_pcp) { 3068 pcp = vlan_pcp->vlan_pcp; 3069 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP], 3070 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP); 3071 /* Update the hdr_bitmap with vlan vid */ 3072 ULP_BITMAP_SET(params->act_bitmap.bits, 3073 BNXT_ULP_ACT_BIT_SET_VLAN_PCP); 3074 return BNXT_TF_RC_SUCCESS; 3075 } 3076 BNXT_DRV_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n"); 3077 return BNXT_TF_RC_ERROR; 3078 } 3079 3080 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/ 3081 int32_t 3082 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item, 3083 struct ulp_rte_parser_params *params) 3084 { 3085 const struct rte_flow_action_set_ipv4 *set_ipv4; 3086 struct ulp_rte_act_prop *act = ¶ms->act_prop; 3087 3088 set_ipv4 = action_item->conf; 3089 if (set_ipv4) { 3090 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC], 3091 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC); 3092 /* Update the hdr_bitmap with set ipv4 src */ 3093 ULP_BITMAP_SET(params->act_bitmap.bits, 3094 BNXT_ULP_ACT_BIT_SET_IPV4_SRC); 3095 return BNXT_TF_RC_SUCCESS; 3096 } 3097 BNXT_DRV_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n"); 3098 return BNXT_TF_RC_ERROR; 3099 } 3100 3101 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/ 3102 int32_t 3103 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item, 3104 struct ulp_rte_parser_params *params) 3105 { 3106 const struct rte_flow_action_set_ipv4 *set_ipv4; 3107 struct ulp_rte_act_prop *act = ¶ms->act_prop; 3108 3109 set_ipv4 = action_item->conf; 3110 if (set_ipv4) { 3111 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST], 3112 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST); 3113 /* Update the hdr_bitmap with set ipv4 dst */ 3114 ULP_BITMAP_SET(params->act_bitmap.bits, 3115 BNXT_ULP_ACT_BIT_SET_IPV4_DST); 3116 return BNXT_TF_RC_SUCCESS; 3117 } 3118 BNXT_DRV_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n"); 3119 return BNXT_TF_RC_ERROR; 3120 } 3121 3122 /* Function to handle the parsing of RTE Flow action set ipv6 src.*/ 3123 int32_t 3124 ulp_rte_set_ipv6_src_act_handler(const struct rte_flow_action *action_item, 3125 struct ulp_rte_parser_params *params) 3126 { 3127 const struct rte_flow_action_set_ipv6 *set_ipv6; 3128 struct ulp_rte_act_prop *act = ¶ms->act_prop; 3129 3130 set_ipv6 = action_item->conf; 3131 if (set_ipv6) { 3132 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC], 3133 &set_ipv6->ipv6_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV6_SRC); 3134 /* Update the hdr_bitmap with set ipv4 src */ 3135 ULP_BITMAP_SET(params->act_bitmap.bits, 3136 BNXT_ULP_ACT_BIT_SET_IPV6_SRC); 3137 return BNXT_TF_RC_SUCCESS; 3138 } 3139 BNXT_DRV_DBG(ERR, "Parse Error: set ipv6 src arg is invalid\n"); 3140 return BNXT_TF_RC_ERROR; 3141 } 3142 3143 /* Function to handle the parsing of RTE Flow action set ipv6 dst.*/ 3144 int32_t 3145 ulp_rte_set_ipv6_dst_act_handler(const struct rte_flow_action *action_item, 3146 struct ulp_rte_parser_params *params) 3147 { 3148 const struct rte_flow_action_set_ipv6 *set_ipv6; 3149 struct ulp_rte_act_prop *act = ¶ms->act_prop; 3150 3151 set_ipv6 = action_item->conf; 3152 if (set_ipv6) { 3153 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST], 3154 &set_ipv6->ipv6_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV6_DST); 3155 /* Update the hdr_bitmap with set ipv6 dst */ 3156 ULP_BITMAP_SET(params->act_bitmap.bits, 3157 BNXT_ULP_ACT_BIT_SET_IPV6_DST); 3158 return BNXT_TF_RC_SUCCESS; 3159 } 3160 BNXT_DRV_DBG(ERR, "Parse Error: set ipv6 dst arg is invalid\n"); 3161 return BNXT_TF_RC_ERROR; 3162 } 3163 3164 /* Function to handle the parsing of RTE Flow action set tp src.*/ 3165 int32_t 3166 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item, 3167 struct ulp_rte_parser_params *params) 3168 { 3169 const struct rte_flow_action_set_tp *set_tp; 3170 struct ulp_rte_act_prop *act = ¶ms->act_prop; 3171 3172 set_tp = action_item->conf; 3173 if (set_tp) { 3174 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC], 3175 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC); 3176 /* Update the hdr_bitmap with set tp src */ 3177 ULP_BITMAP_SET(params->act_bitmap.bits, 3178 BNXT_ULP_ACT_BIT_SET_TP_SRC); 3179 return BNXT_TF_RC_SUCCESS; 3180 } 3181 3182 BNXT_DRV_DBG(ERR, "Parse Error: set tp src arg is invalid\n"); 3183 return BNXT_TF_RC_ERROR; 3184 } 3185 3186 /* Function to handle the parsing of RTE Flow action set tp dst.*/ 3187 int32_t 3188 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item, 3189 struct ulp_rte_parser_params *params) 3190 { 3191 const struct rte_flow_action_set_tp *set_tp; 3192 struct ulp_rte_act_prop *act = ¶ms->act_prop; 3193 3194 set_tp = action_item->conf; 3195 if (set_tp) { 3196 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST], 3197 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST); 3198 /* Update the hdr_bitmap with set tp dst */ 3199 ULP_BITMAP_SET(params->act_bitmap.bits, 3200 BNXT_ULP_ACT_BIT_SET_TP_DST); 3201 return BNXT_TF_RC_SUCCESS; 3202 } 3203 3204 BNXT_DRV_DBG(ERR, "Parse Error: set tp src arg is invalid\n"); 3205 return BNXT_TF_RC_ERROR; 3206 } 3207 3208 /* Function to handle the parsing of RTE Flow action dec ttl.*/ 3209 int32_t 3210 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused, 3211 struct ulp_rte_parser_params *params) 3212 { 3213 /* Update the act_bitmap with dec ttl */ 3214 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL); 3215 return BNXT_TF_RC_SUCCESS; 3216 } 3217 3218 /* Function to handle the parsing of RTE Flow action set ttl.*/ 3219 int32_t 3220 ulp_rte_set_ttl_act_handler(const struct rte_flow_action *action_item, 3221 struct ulp_rte_parser_params *params) 3222 { 3223 const struct rte_flow_action_set_ttl *set_ttl; 3224 struct ulp_rte_act_prop *act = ¶ms->act_prop; 3225 3226 set_ttl = action_item->conf; 3227 if (set_ttl) { 3228 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TTL], 3229 &set_ttl->ttl_value, BNXT_ULP_ACT_PROP_SZ_SET_TTL); 3230 /* Update the act_bitmap with dec ttl */ 3231 /* Note: NIC HW not support the set_ttl action, here using dec_ttl to simulate 3232 * the set_ttl action. And ensure the ttl field must be one more than the value 3233 * of action set_ttl. 3234 */ 3235 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3_TTL) == 3236 (uint32_t)(set_ttl->ttl_value + 1)) { 3237 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL); 3238 return BNXT_TF_RC_SUCCESS; 3239 } 3240 BNXT_DRV_DBG(ERR, "Parse Error: set_ttl value not match with flow ttl field.\n"); 3241 return BNXT_TF_RC_ERROR; 3242 } 3243 3244 BNXT_DRV_DBG(ERR, "Parse Error: set ttl arg is invalid.\n"); 3245 return BNXT_TF_RC_ERROR; 3246 } 3247 3248 /* Function to handle the parsing of RTE Flow action JUMP */ 3249 int32_t 3250 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item, 3251 struct ulp_rte_parser_params *params) 3252 { 3253 const struct rte_flow_action_jump *jump_act; 3254 struct ulp_rte_act_prop *act = ¶ms->act_prop; 3255 uint32_t group_id; 3256 3257 jump_act = action_item->conf; 3258 if (jump_act) { 3259 group_id = rte_cpu_to_be_32(jump_act->group); 3260 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_JUMP], 3261 &group_id, BNXT_ULP_ACT_PROP_SZ_JUMP); 3262 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP); 3263 } 3264 return BNXT_TF_RC_SUCCESS; 3265 } 3266 3267 int32_t 3268 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item, 3269 struct ulp_rte_parser_params *params) 3270 { 3271 const struct rte_flow_action_sample *sample; 3272 int ret; 3273 3274 sample = action_item->conf; 3275 3276 /* if SAMPLE bit is set it means this sample action is nested within the 3277 * actions of another sample action; this is not allowed 3278 */ 3279 if (ULP_BITMAP_ISSET(params->act_bitmap.bits, 3280 BNXT_ULP_ACT_BIT_SAMPLE)) 3281 return BNXT_TF_RC_ERROR; 3282 3283 /* a sample action is only allowed as a shared action */ 3284 if (!ULP_BITMAP_ISSET(params->act_bitmap.bits, 3285 BNXT_ULP_ACT_BIT_SHARED)) 3286 return BNXT_TF_RC_ERROR; 3287 3288 /* only a ratio of 1 i.e. 100% is supported */ 3289 if (sample->ratio != 1) 3290 return BNXT_TF_RC_ERROR; 3291 3292 if (!sample->actions) 3293 return BNXT_TF_RC_ERROR; 3294 3295 /* parse the nested actions for a sample action */ 3296 ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params); 3297 if (ret == BNXT_TF_RC_SUCCESS) 3298 /* Update the act_bitmap with sample */ 3299 ULP_BITMAP_SET(params->act_bitmap.bits, 3300 BNXT_ULP_ACT_BIT_SAMPLE); 3301 3302 return ret; 3303 } 3304 3305 int32_t 3306 ulp_rte_action_hdlr_handler(const struct rte_flow_action *action_item, 3307 struct ulp_rte_parser_params *params) 3308 { 3309 const struct rte_flow_action_handle *handle; 3310 struct bnxt_ulp_shared_act_info *act_info; 3311 uint64_t action_bitmask; 3312 uint32_t shared_action_type; 3313 struct ulp_rte_act_prop *act = ¶ms->act_prop; 3314 uint64_t tmp64; 3315 enum bnxt_ulp_direction_type dir, handle_dir; 3316 uint32_t act_info_entries = 0; 3317 int32_t ret; 3318 3319 handle = action_item->conf; 3320 3321 /* Have to use the computed direction since the params->dir_attr 3322 * can be different (transfer, ingress, egress) 3323 */ 3324 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); 3325 3326 /* direction of shared action must match direction of flow */ 3327 ret = bnxt_get_action_handle_direction(handle, &handle_dir); 3328 if (unlikely(ret || dir != handle_dir)) { 3329 BNXT_DRV_DBG(ERR, "Invalid shared handle or direction\n"); 3330 return BNXT_TF_RC_ERROR; 3331 } 3332 3333 if (unlikely(bnxt_get_action_handle_type(handle, &shared_action_type))) { 3334 BNXT_DRV_DBG(ERR, "Invalid shared handle\n"); 3335 return BNXT_TF_RC_ERROR; 3336 } 3337 3338 act_info = bnxt_ulp_shared_act_info_get(&act_info_entries); 3339 if (unlikely(shared_action_type >= act_info_entries || !act_info)) { 3340 BNXT_DRV_DBG(ERR, "Invalid shared handle\n"); 3341 return BNXT_TF_RC_ERROR; 3342 } 3343 3344 action_bitmask = act_info[shared_action_type].act_bitmask; 3345 3346 /* shared actions of the same type cannot be repeated */ 3347 if (unlikely(params->act_bitmap.bits & action_bitmask)) { 3348 BNXT_DRV_DBG(ERR, "indirect actions cannot be repeated\n"); 3349 return BNXT_TF_RC_ERROR; 3350 } 3351 3352 tmp64 = tfp_cpu_to_be_64((uint64_t)bnxt_get_action_handle_index(handle)); 3353 3354 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE], 3355 &tmp64, BNXT_ULP_ACT_PROP_SZ_SHARED_HANDLE); 3356 3357 ULP_BITMAP_SET(params->act_bitmap.bits, action_bitmask); 3358 3359 return BNXT_TF_RC_SUCCESS; 3360 } 3361 3362 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */ 3363 int32_t 3364 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item, 3365 struct ulp_rte_parser_params *params) 3366 { 3367 /* Set the F1 flow header bit */ 3368 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1); 3369 return ulp_rte_vxlan_decap_act_handler(action_item, params); 3370 } 3371 3372 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */ 3373 int32_t 3374 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item, 3375 struct ulp_rte_parser_params *params) 3376 { 3377 RTE_SET_USED(item); 3378 /* Set the F2 flow header bit */ 3379 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2); 3380 return ulp_rte_vxlan_decap_act_handler(NULL, params); 3381 } 3382 3383 /* Function to handle the parsing of RTE Flow action queue. */ 3384 int32_t 3385 ulp_rte_queue_act_handler(const struct rte_flow_action *action_item, 3386 struct ulp_rte_parser_params *param) 3387 { 3388 const struct rte_flow_action_queue *q_info; 3389 struct ulp_rte_act_prop *ap = ¶m->act_prop; 3390 3391 if (action_item == NULL || action_item->conf == NULL) { 3392 BNXT_DRV_DBG(ERR, "Parse Err: invalid queue configuration\n"); 3393 return BNXT_TF_RC_ERROR; 3394 } 3395 3396 q_info = action_item->conf; 3397 /* Copy the queue into the specific action properties */ 3398 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_QUEUE_INDEX], 3399 &q_info->index, BNXT_ULP_ACT_PROP_SZ_QUEUE_INDEX); 3400 3401 /* set the queue action header bit */ 3402 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_QUEUE); 3403 3404 return BNXT_TF_RC_SUCCESS; 3405 } 3406 3407 /* Function to handle the parsing of RTE Flow action meter. */ 3408 int32_t 3409 ulp_rte_meter_act_handler(const struct rte_flow_action *action_item, 3410 struct ulp_rte_parser_params *params) 3411 { 3412 const struct rte_flow_action_meter *meter; 3413 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop; 3414 uint32_t tmp_meter_id; 3415 3416 if (unlikely(action_item == NULL || action_item->conf == NULL)) { 3417 BNXT_DRV_DBG(ERR, "Parse Err: invalid meter configuration\n"); 3418 return BNXT_TF_RC_ERROR; 3419 } 3420 3421 meter = action_item->conf; 3422 /* validate the mtr_id and update the reference counter */ 3423 tmp_meter_id = tfp_cpu_to_be_32(meter->mtr_id); 3424 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER], 3425 &tmp_meter_id, 3426 BNXT_ULP_ACT_PROP_SZ_METER); 3427 3428 /* set the meter action header bit */ 3429 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_METER); 3430 3431 return BNXT_TF_RC_SUCCESS; 3432 } 3433 3434 /* Function to handle the parsing of RTE Flow action set mac src.*/ 3435 int32_t 3436 ulp_rte_set_mac_src_act_handler(const struct rte_flow_action *action_item, 3437 struct ulp_rte_parser_params *params) 3438 { 3439 const struct rte_flow_action_set_mac *set_mac; 3440 struct ulp_rte_act_prop *act = ¶ms->act_prop; 3441 3442 set_mac = action_item->conf; 3443 if (likely(set_mac)) { 3444 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC], 3445 set_mac->mac_addr, BNXT_ULP_ACT_PROP_SZ_SET_MAC_SRC); 3446 /* Update the hdr_bitmap with set mac src */ 3447 ULP_BITMAP_SET(params->act_bitmap.bits, 3448 BNXT_ULP_ACT_BIT_SET_MAC_SRC); 3449 return BNXT_TF_RC_SUCCESS; 3450 } 3451 BNXT_DRV_DBG(ERR, "Parse Error: set mac src arg is invalid\n"); 3452 return BNXT_TF_RC_ERROR; 3453 } 3454 3455 /* Function to handle the parsing of RTE Flow action set mac dst.*/ 3456 int32_t 3457 ulp_rte_set_mac_dst_act_handler(const struct rte_flow_action *action_item, 3458 struct ulp_rte_parser_params *params) 3459 { 3460 const struct rte_flow_action_set_mac *set_mac; 3461 struct ulp_rte_act_prop *act = ¶ms->act_prop; 3462 3463 set_mac = action_item->conf; 3464 if (likely(set_mac)) { 3465 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST], 3466 set_mac->mac_addr, BNXT_ULP_ACT_PROP_SZ_SET_MAC_DST); 3467 /* Update the hdr_bitmap with set ipv4 dst */ 3468 ULP_BITMAP_SET(params->act_bitmap.bits, 3469 BNXT_ULP_ACT_BIT_SET_MAC_DST); 3470 return BNXT_TF_RC_SUCCESS; 3471 } 3472 BNXT_DRV_DBG(ERR, "Parse Error: set mac dst arg is invalid\n"); 3473 return BNXT_TF_RC_ERROR; 3474 } 3475