1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2020 Broadcom 3 * All rights reserved. 4 */ 5 6 #include "bnxt.h" 7 #include "ulp_template_db_enum.h" 8 #include "ulp_template_struct.h" 9 #include "bnxt_tf_common.h" 10 #include "ulp_rte_parser.h" 11 #include "ulp_utils.h" 12 #include "tfp.h" 13 #include "ulp_port_db.h" 14 15 /* Utility function to skip the void items. */ 16 static inline int32_t 17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment) 18 { 19 if (!*item) 20 return 0; 21 if (increment) 22 (*item)++; 23 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID) 24 (*item)++; 25 if (*item) 26 return 1; 27 return 0; 28 } 29 30 /* Utility function to update the field_bitmap */ 31 static void 32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params, 33 uint32_t idx) 34 { 35 struct ulp_rte_hdr_field *field; 36 37 field = ¶ms->hdr_field[idx]; 38 if (ulp_bitmap_notzero(field->mask, field->size)) { 39 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx); 40 /* Not exact match */ 41 if (!ulp_bitmap_is_ones(field->mask, field->size)) 42 ULP_BITMAP_SET(params->fld_bitmap.bits, 43 BNXT_ULP_MATCH_TYPE_BITMASK_WM); 44 } else { 45 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx); 46 } 47 } 48 49 /* Utility function to copy field spec items */ 50 static struct ulp_rte_hdr_field * 51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field, 52 const void *buffer, 53 uint32_t size) 54 { 55 field->size = size; 56 memcpy(field->spec, buffer, field->size); 57 field++; 58 return field; 59 } 60 61 /* Utility function to copy field masks items */ 62 static void 63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params, 64 uint32_t *idx, 65 const void *buffer, 66 uint32_t size) 67 { 68 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx]; 69 70 memcpy(field->mask, buffer, size); 71 ulp_rte_parser_field_bitmap_update(params, *idx); 72 *idx = *idx + 1; 73 } 74 75 /* 76 * Function to handle the parsing of RTE Flows and placing 77 * the RTE flow items into the ulp structures. 78 */ 79 int32_t 80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], 81 struct ulp_rte_parser_params *params) 82 { 83 const struct rte_flow_item *item = pattern; 84 struct bnxt_ulp_rte_hdr_info *hdr_info; 85 86 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM; 87 if (params->dir == ULP_DIR_EGRESS) 88 ULP_BITMAP_SET(params->hdr_bitmap.bits, 89 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 90 91 /* Parse all the items in the pattern */ 92 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) { 93 /* get the header information from the flow_hdr_info table */ 94 hdr_info = &ulp_hdr_info[item->type]; 95 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) { 96 BNXT_TF_DBG(ERR, 97 "Truflow parser does not support type %d\n", 98 item->type); 99 return BNXT_TF_RC_PARSE_ERR; 100 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) { 101 /* call the registered callback handler */ 102 if (hdr_info->proto_hdr_func) { 103 if (hdr_info->proto_hdr_func(item, params) != 104 BNXT_TF_RC_SUCCESS) { 105 return BNXT_TF_RC_ERROR; 106 } 107 } 108 } 109 item++; 110 } 111 /* update the implied SVIF */ 112 (void)ulp_rte_parser_svif_process(params); 113 return BNXT_TF_RC_SUCCESS; 114 } 115 116 /* 117 * Function to handle the parsing of RTE Flows and placing 118 * the RTE flow actions into the ulp structures. 119 */ 120 int32_t 121 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], 122 struct ulp_rte_parser_params *params) 123 { 124 const struct rte_flow_action *action_item = actions; 125 struct bnxt_ulp_rte_act_info *hdr_info; 126 127 if (params->dir == ULP_DIR_EGRESS) 128 ULP_BITMAP_SET(params->act_bitmap.bits, 129 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 130 131 /* Parse all the items in the pattern */ 132 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) { 133 /* get the header information from the flow_hdr_info table */ 134 hdr_info = &ulp_act_info[action_item->type]; 135 if (hdr_info->act_type == 136 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) { 137 BNXT_TF_DBG(ERR, 138 "Truflow parser does not support act %u\n", 139 action_item->type); 140 return BNXT_TF_RC_ERROR; 141 } else if (hdr_info->act_type == 142 BNXT_ULP_ACT_TYPE_SUPPORTED) { 143 /* call the registered callback handler */ 144 if (hdr_info->proto_act_func) { 145 if (hdr_info->proto_act_func(action_item, 146 params) != 147 BNXT_TF_RC_SUCCESS) { 148 return BNXT_TF_RC_ERROR; 149 } 150 } 151 } 152 action_item++; 153 } 154 /* update the implied VNIC */ 155 ulp_rte_parser_vnic_process(params); 156 return BNXT_TF_RC_SUCCESS; 157 } 158 159 /* Function to handle the parsing of RTE Flow item PF Header. */ 160 static int32_t 161 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params, 162 enum rte_flow_item_type proto, 163 uint16_t svif, 164 uint16_t mask) 165 { 166 uint16_t port_id = svif; 167 uint32_t dir = 0; 168 struct ulp_rte_hdr_field *hdr_field; 169 uint32_t ifindex; 170 int32_t rc; 171 172 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 173 BNXT_ULP_INVALID_SVIF_VAL) { 174 BNXT_TF_DBG(ERR, 175 "SVIF already set,multiple source not support'd\n"); 176 return BNXT_TF_RC_ERROR; 177 } 178 179 if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) { 180 dir = ULP_COMP_FLD_IDX_RD(params, 181 BNXT_ULP_CF_IDX_DIRECTION); 182 /* perform the conversion from dpdk port to bnxt svif */ 183 rc = ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id, 184 &ifindex); 185 if (rc) { 186 BNXT_TF_DBG(ERR, 187 "Invalid port id\n"); 188 return BNXT_TF_RC_ERROR; 189 } 190 ulp_port_db_svif_get(params->ulp_ctx, ifindex, dir, &svif); 191 svif = rte_cpu_to_be_16(svif); 192 } 193 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; 194 memcpy(hdr_field->spec, &svif, sizeof(svif)); 195 memcpy(hdr_field->mask, &mask, sizeof(mask)); 196 hdr_field->size = sizeof(svif); 197 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, 198 rte_be_to_cpu_16(svif)); 199 return BNXT_TF_RC_SUCCESS; 200 } 201 202 /* Function to handle the parsing of the RTE port id */ 203 int32_t 204 ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params) 205 { 206 uint16_t port_id = 0; 207 uint16_t svif_mask = 0xFFFF; 208 209 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != 210 BNXT_ULP_INVALID_SVIF_VAL) 211 return BNXT_TF_RC_SUCCESS; 212 213 /* SVIF not set. So get the port id */ 214 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 215 216 /* Update the SVIF details */ 217 return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID, 218 port_id, svif_mask); 219 } 220 221 /* Function to handle the implicit VNIC RTE port id */ 222 int32_t 223 ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params) 224 { 225 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap; 226 227 if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VNIC) || 228 ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VPORT)) 229 return BNXT_TF_RC_SUCCESS; 230 231 /* Update the vnic details */ 232 ulp_rte_pf_act_handler(NULL, params); 233 /* Reset the hdr_bitmap with vnic bit */ 234 ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC); 235 236 return BNXT_TF_RC_SUCCESS; 237 } 238 239 /* Function to handle the parsing of RTE Flow item PF Header. */ 240 int32_t 241 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item, 242 struct ulp_rte_parser_params *params) 243 { 244 uint16_t port_id = 0; 245 uint16_t svif_mask = 0xFFFF; 246 247 /* Get the port id */ 248 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 249 250 /* Update the SVIF details */ 251 return ulp_rte_parser_svif_set(params, 252 item->type, 253 port_id, svif_mask); 254 } 255 256 /* Function to handle the parsing of RTE Flow item VF Header. */ 257 int32_t 258 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item, 259 struct ulp_rte_parser_params *params) 260 { 261 const struct rte_flow_item_vf *vf_spec = item->spec; 262 const struct rte_flow_item_vf *vf_mask = item->mask; 263 uint16_t svif = 0, mask = 0; 264 265 /* Get VF rte_flow_item for Port details */ 266 if (vf_spec) 267 svif = (uint16_t)vf_spec->id; 268 if (vf_mask) 269 mask = (uint16_t)vf_mask->id; 270 271 return ulp_rte_parser_svif_set(params, item->type, svif, mask); 272 } 273 274 /* Function to handle the parsing of RTE Flow item port id Header. */ 275 int32_t 276 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item, 277 struct ulp_rte_parser_params *params) 278 { 279 const struct rte_flow_item_port_id *port_spec = item->spec; 280 const struct rte_flow_item_port_id *port_mask = item->mask; 281 uint16_t svif = 0, mask = 0; 282 283 /* 284 * Copy the rte_flow_item for Port into hdr_field using port id 285 * header fields. 286 */ 287 if (port_spec) 288 svif = (uint16_t)port_spec->id; 289 if (port_mask) 290 mask = (uint16_t)port_mask->id; 291 292 /* Update the SVIF details */ 293 return ulp_rte_parser_svif_set(params, item->type, svif, mask); 294 } 295 296 /* Function to handle the parsing of RTE Flow item phy port Header. */ 297 int32_t 298 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item, 299 struct ulp_rte_parser_params *params) 300 { 301 const struct rte_flow_item_phy_port *port_spec = item->spec; 302 const struct rte_flow_item_phy_port *port_mask = item->mask; 303 uint32_t svif = 0, mask = 0; 304 305 /* Copy the rte_flow_item for phy port into hdr_field */ 306 if (port_spec) 307 svif = port_spec->index; 308 if (port_mask) 309 mask = port_mask->index; 310 311 /* Update the SVIF details */ 312 return ulp_rte_parser_svif_set(params, item->type, svif, mask); 313 } 314 315 /* Function to handle the parsing of RTE Flow item Ethernet Header. */ 316 int32_t 317 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item, 318 struct ulp_rte_parser_params *params) 319 { 320 const struct rte_flow_item_eth *eth_spec = item->spec; 321 const struct rte_flow_item_eth *eth_mask = item->mask; 322 struct ulp_rte_hdr_field *field; 323 uint32_t idx = params->field_idx; 324 uint64_t set_flag = 0; 325 uint32_t size; 326 327 /* 328 * Copy the rte_flow_item for eth into hdr_field using ethernet 329 * header fields 330 */ 331 if (eth_spec) { 332 size = sizeof(eth_spec->dst.addr_bytes); 333 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 334 eth_spec->dst.addr_bytes, 335 size); 336 size = sizeof(eth_spec->src.addr_bytes); 337 field = ulp_rte_parser_fld_copy(field, 338 eth_spec->src.addr_bytes, 339 size); 340 field = ulp_rte_parser_fld_copy(field, 341 ð_spec->type, 342 sizeof(eth_spec->type)); 343 } 344 if (eth_mask) { 345 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes, 346 sizeof(eth_mask->dst.addr_bytes)); 347 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes, 348 sizeof(eth_mask->src.addr_bytes)); 349 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type, 350 sizeof(eth_mask->type)); 351 } 352 /* Add number of vlan header elements */ 353 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM; 354 params->vlan_idx = params->field_idx; 355 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM; 356 357 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */ 358 set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 359 BNXT_ULP_HDR_BIT_O_ETH); 360 if (set_flag) 361 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH); 362 else 363 ULP_BITMAP_RESET(params->hdr_bitmap.bits, 364 BNXT_ULP_HDR_BIT_I_ETH); 365 366 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */ 367 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); 368 369 return BNXT_TF_RC_SUCCESS; 370 } 371 372 /* Function to handle the parsing of RTE Flow item Vlan Header. */ 373 int32_t 374 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, 375 struct ulp_rte_parser_params *params) 376 { 377 const struct rte_flow_item_vlan *vlan_spec = item->spec; 378 const struct rte_flow_item_vlan *vlan_mask = item->mask; 379 struct ulp_rte_hdr_field *field; 380 struct ulp_rte_hdr_bitmap *hdr_bit; 381 uint32_t idx = params->vlan_idx; 382 uint16_t vlan_tag, priority; 383 uint32_t outer_vtag_num; 384 uint32_t inner_vtag_num; 385 386 /* 387 * Copy the rte_flow_item for vlan into hdr_field using Vlan 388 * header fields 389 */ 390 if (vlan_spec) { 391 vlan_tag = ntohs(vlan_spec->tci); 392 priority = htons(vlan_tag >> 13); 393 vlan_tag &= 0xfff; 394 vlan_tag = htons(vlan_tag); 395 396 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 397 &priority, 398 sizeof(priority)); 399 field = ulp_rte_parser_fld_copy(field, 400 &vlan_tag, 401 sizeof(vlan_tag)); 402 field = ulp_rte_parser_fld_copy(field, 403 &vlan_spec->inner_type, 404 sizeof(vlan_spec->inner_type)); 405 } 406 407 if (vlan_mask) { 408 vlan_tag = ntohs(vlan_mask->tci); 409 priority = htons(vlan_tag >> 13); 410 vlan_tag &= 0xfff; 411 vlan_tag = htons(vlan_tag); 412 413 field = ¶ms->hdr_field[idx]; 414 memcpy(field->mask, &priority, field->size); 415 field++; 416 memcpy(field->mask, &vlan_tag, field->size); 417 field++; 418 memcpy(field->mask, &vlan_mask->inner_type, field->size); 419 } 420 /* Set the vlan index to new incremented value */ 421 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM; 422 423 /* Get the outer tag and inner tag counts */ 424 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params, 425 BNXT_ULP_CF_IDX_O_VTAG_NUM); 426 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params, 427 BNXT_ULP_CF_IDX_I_VTAG_NUM); 428 429 /* Update the hdr_bitmap of the vlans */ 430 hdr_bit = ¶ms->hdr_bitmap; 431 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 432 !outer_vtag_num) { 433 /* Update the vlan tag num */ 434 outer_vtag_num++; 435 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 436 outer_vtag_num); 437 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_PRESENT, 1); 438 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 439 ULP_COMP_FLD_IDX_RD(params, 440 BNXT_ULP_CF_IDX_O_VTAG_PRESENT) && 441 outer_vtag_num == 1) { 442 /* update the vlan tag num */ 443 outer_vtag_num++; 444 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, 445 outer_vtag_num); 446 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1); 447 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 448 ULP_COMP_FLD_IDX_RD(params, 449 BNXT_ULP_CF_IDX_O_VTAG_PRESENT) && 450 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 451 !inner_vtag_num) { 452 /* update the vlan tag num */ 453 inner_vtag_num++; 454 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 455 inner_vtag_num); 456 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_PRESENT, 1); 457 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 458 ULP_COMP_FLD_IDX_RD(params, 459 BNXT_ULP_CF_IDX_O_VTAG_PRESENT) && 460 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 461 ULP_COMP_FLD_IDX_RD(params, 462 BNXT_ULP_CF_IDX_O_VTAG_PRESENT) && 463 inner_vtag_num == 1) { 464 /* update the vlan tag num */ 465 inner_vtag_num++; 466 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, 467 inner_vtag_num); 468 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1); 469 } else { 470 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n"); 471 return BNXT_TF_RC_ERROR; 472 } 473 return BNXT_TF_RC_SUCCESS; 474 } 475 476 /* Function to handle the parsing of RTE Flow item IPV4 Header. */ 477 int32_t 478 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, 479 struct ulp_rte_parser_params *params) 480 { 481 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec; 482 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask; 483 struct ulp_rte_hdr_field *field; 484 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 485 uint32_t idx = params->field_idx; 486 uint32_t size; 487 uint32_t inner_l3, outer_l3; 488 489 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3); 490 if (inner_l3) { 491 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n"); 492 return BNXT_TF_RC_ERROR; 493 } 494 495 /* 496 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 497 * header fields 498 */ 499 if (ipv4_spec) { 500 size = sizeof(ipv4_spec->hdr.version_ihl); 501 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 502 &ipv4_spec->hdr.version_ihl, 503 size); 504 size = sizeof(ipv4_spec->hdr.type_of_service); 505 field = ulp_rte_parser_fld_copy(field, 506 &ipv4_spec->hdr.type_of_service, 507 size); 508 size = sizeof(ipv4_spec->hdr.total_length); 509 field = ulp_rte_parser_fld_copy(field, 510 &ipv4_spec->hdr.total_length, 511 size); 512 size = sizeof(ipv4_spec->hdr.packet_id); 513 field = ulp_rte_parser_fld_copy(field, 514 &ipv4_spec->hdr.packet_id, 515 size); 516 size = sizeof(ipv4_spec->hdr.fragment_offset); 517 field = ulp_rte_parser_fld_copy(field, 518 &ipv4_spec->hdr.fragment_offset, 519 size); 520 size = sizeof(ipv4_spec->hdr.time_to_live); 521 field = ulp_rte_parser_fld_copy(field, 522 &ipv4_spec->hdr.time_to_live, 523 size); 524 size = sizeof(ipv4_spec->hdr.next_proto_id); 525 field = ulp_rte_parser_fld_copy(field, 526 &ipv4_spec->hdr.next_proto_id, 527 size); 528 size = sizeof(ipv4_spec->hdr.hdr_checksum); 529 field = ulp_rte_parser_fld_copy(field, 530 &ipv4_spec->hdr.hdr_checksum, 531 size); 532 size = sizeof(ipv4_spec->hdr.src_addr); 533 field = ulp_rte_parser_fld_copy(field, 534 &ipv4_spec->hdr.src_addr, 535 size); 536 size = sizeof(ipv4_spec->hdr.dst_addr); 537 field = ulp_rte_parser_fld_copy(field, 538 &ipv4_spec->hdr.dst_addr, 539 size); 540 } 541 if (ipv4_mask) { 542 ulp_rte_prsr_mask_copy(params, &idx, 543 &ipv4_mask->hdr.version_ihl, 544 sizeof(ipv4_mask->hdr.version_ihl)); 545 ulp_rte_prsr_mask_copy(params, &idx, 546 &ipv4_mask->hdr.type_of_service, 547 sizeof(ipv4_mask->hdr.type_of_service)); 548 ulp_rte_prsr_mask_copy(params, &idx, 549 &ipv4_mask->hdr.total_length, 550 sizeof(ipv4_mask->hdr.total_length)); 551 ulp_rte_prsr_mask_copy(params, &idx, 552 &ipv4_mask->hdr.packet_id, 553 sizeof(ipv4_mask->hdr.packet_id)); 554 ulp_rte_prsr_mask_copy(params, &idx, 555 &ipv4_mask->hdr.fragment_offset, 556 sizeof(ipv4_mask->hdr.fragment_offset)); 557 ulp_rte_prsr_mask_copy(params, &idx, 558 &ipv4_mask->hdr.time_to_live, 559 sizeof(ipv4_mask->hdr.time_to_live)); 560 ulp_rte_prsr_mask_copy(params, &idx, 561 &ipv4_mask->hdr.next_proto_id, 562 sizeof(ipv4_mask->hdr.next_proto_id)); 563 ulp_rte_prsr_mask_copy(params, &idx, 564 &ipv4_mask->hdr.hdr_checksum, 565 sizeof(ipv4_mask->hdr.hdr_checksum)); 566 ulp_rte_prsr_mask_copy(params, &idx, 567 &ipv4_mask->hdr.src_addr, 568 sizeof(ipv4_mask->hdr.src_addr)); 569 ulp_rte_prsr_mask_copy(params, &idx, 570 &ipv4_mask->hdr.dst_addr, 571 sizeof(ipv4_mask->hdr.dst_addr)); 572 } 573 /* Add the number of ipv4 header elements */ 574 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM; 575 576 /* Set the ipv4 header bitmap and computed l3 header bitmaps */ 577 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3); 578 if (outer_l3 || 579 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 580 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { 581 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4); 582 inner_l3++; 583 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, inner_l3); 584 } else { 585 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4); 586 outer_l3++; 587 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, outer_l3); 588 } 589 return BNXT_TF_RC_SUCCESS; 590 } 591 592 /* Function to handle the parsing of RTE Flow item IPV6 Header */ 593 int32_t 594 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, 595 struct ulp_rte_parser_params *params) 596 { 597 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec; 598 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask; 599 struct ulp_rte_hdr_field *field; 600 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 601 uint32_t idx = params->field_idx; 602 uint32_t size; 603 uint32_t inner_l3, outer_l3; 604 uint32_t vtcf, vtcf_mask; 605 606 inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3); 607 if (inner_l3) { 608 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n"); 609 return BNXT_TF_RC_ERROR; 610 } 611 612 /* 613 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6 614 * header fields 615 */ 616 if (ipv6_spec) { 617 size = sizeof(ipv6_spec->hdr.vtc_flow); 618 619 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow); 620 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 621 &vtcf, 622 size); 623 624 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow); 625 field = ulp_rte_parser_fld_copy(field, 626 &vtcf, 627 size); 628 629 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow); 630 field = ulp_rte_parser_fld_copy(field, 631 &vtcf, 632 size); 633 634 size = sizeof(ipv6_spec->hdr.payload_len); 635 field = ulp_rte_parser_fld_copy(field, 636 &ipv6_spec->hdr.payload_len, 637 size); 638 size = sizeof(ipv6_spec->hdr.proto); 639 field = ulp_rte_parser_fld_copy(field, 640 &ipv6_spec->hdr.proto, 641 size); 642 size = sizeof(ipv6_spec->hdr.hop_limits); 643 field = ulp_rte_parser_fld_copy(field, 644 &ipv6_spec->hdr.hop_limits, 645 size); 646 size = sizeof(ipv6_spec->hdr.src_addr); 647 field = ulp_rte_parser_fld_copy(field, 648 &ipv6_spec->hdr.src_addr, 649 size); 650 size = sizeof(ipv6_spec->hdr.dst_addr); 651 field = ulp_rte_parser_fld_copy(field, 652 &ipv6_spec->hdr.dst_addr, 653 size); 654 } 655 if (ipv6_mask) { 656 size = sizeof(ipv6_mask->hdr.vtc_flow); 657 658 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow); 659 ulp_rte_prsr_mask_copy(params, &idx, 660 &vtcf_mask, 661 size); 662 663 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow); 664 ulp_rte_prsr_mask_copy(params, &idx, 665 &vtcf_mask, 666 size); 667 668 vtcf_mask = 669 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow); 670 ulp_rte_prsr_mask_copy(params, &idx, 671 &vtcf_mask, 672 size); 673 674 ulp_rte_prsr_mask_copy(params, &idx, 675 &ipv6_mask->hdr.payload_len, 676 sizeof(ipv6_mask->hdr.payload_len)); 677 ulp_rte_prsr_mask_copy(params, &idx, 678 &ipv6_mask->hdr.proto, 679 sizeof(ipv6_mask->hdr.proto)); 680 ulp_rte_prsr_mask_copy(params, &idx, 681 &ipv6_mask->hdr.hop_limits, 682 sizeof(ipv6_mask->hdr.hop_limits)); 683 ulp_rte_prsr_mask_copy(params, &idx, 684 &ipv6_mask->hdr.src_addr, 685 sizeof(ipv6_mask->hdr.src_addr)); 686 ulp_rte_prsr_mask_copy(params, &idx, 687 &ipv6_mask->hdr.dst_addr, 688 sizeof(ipv6_mask->hdr.dst_addr)); 689 } 690 /* add number of ipv6 header elements */ 691 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM; 692 693 /* Set the ipv6 header bitmap and computed l3 header bitmaps */ 694 outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3); 695 if (outer_l3 || 696 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 697 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { 698 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6); 699 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); 700 } else { 701 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6); 702 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); 703 } 704 return BNXT_TF_RC_SUCCESS; 705 } 706 707 /* Function to handle the parsing of RTE Flow item UDP Header. */ 708 int32_t 709 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, 710 struct ulp_rte_parser_params *params) 711 { 712 const struct rte_flow_item_udp *udp_spec = item->spec; 713 const struct rte_flow_item_udp *udp_mask = item->mask; 714 struct ulp_rte_hdr_field *field; 715 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 716 uint32_t idx = params->field_idx; 717 uint32_t size; 718 uint32_t inner_l4, outer_l4; 719 720 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4); 721 if (inner_l4) { 722 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 723 return BNXT_TF_RC_ERROR; 724 } 725 726 /* 727 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 728 * header fields 729 */ 730 if (udp_spec) { 731 size = sizeof(udp_spec->hdr.src_port); 732 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 733 &udp_spec->hdr.src_port, 734 size); 735 size = sizeof(udp_spec->hdr.dst_port); 736 field = ulp_rte_parser_fld_copy(field, 737 &udp_spec->hdr.dst_port, 738 size); 739 size = sizeof(udp_spec->hdr.dgram_len); 740 field = ulp_rte_parser_fld_copy(field, 741 &udp_spec->hdr.dgram_len, 742 size); 743 size = sizeof(udp_spec->hdr.dgram_cksum); 744 field = ulp_rte_parser_fld_copy(field, 745 &udp_spec->hdr.dgram_cksum, 746 size); 747 } 748 if (udp_mask) { 749 ulp_rte_prsr_mask_copy(params, &idx, 750 &udp_mask->hdr.src_port, 751 sizeof(udp_mask->hdr.src_port)); 752 ulp_rte_prsr_mask_copy(params, &idx, 753 &udp_mask->hdr.dst_port, 754 sizeof(udp_mask->hdr.dst_port)); 755 ulp_rte_prsr_mask_copy(params, &idx, 756 &udp_mask->hdr.dgram_len, 757 sizeof(udp_mask->hdr.dgram_len)); 758 ulp_rte_prsr_mask_copy(params, &idx, 759 &udp_mask->hdr.dgram_cksum, 760 sizeof(udp_mask->hdr.dgram_cksum)); 761 } 762 763 /* Add number of UDP header elements */ 764 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM; 765 766 /* Set the udp header bitmap and computed l4 header bitmaps */ 767 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4); 768 if (outer_l4 || 769 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 770 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) { 771 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP); 772 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1); 773 } else { 774 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP); 775 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1); 776 } 777 return BNXT_TF_RC_SUCCESS; 778 } 779 780 /* Function to handle the parsing of RTE Flow item TCP Header. */ 781 int32_t 782 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, 783 struct ulp_rte_parser_params *params) 784 { 785 const struct rte_flow_item_tcp *tcp_spec = item->spec; 786 const struct rte_flow_item_tcp *tcp_mask = item->mask; 787 struct ulp_rte_hdr_field *field; 788 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 789 uint32_t idx = params->field_idx; 790 uint32_t size; 791 uint32_t inner_l4, outer_l4; 792 793 inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4); 794 if (inner_l4) { 795 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n"); 796 return BNXT_TF_RC_ERROR; 797 } 798 799 /* 800 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 801 * header fields 802 */ 803 if (tcp_spec) { 804 size = sizeof(tcp_spec->hdr.src_port); 805 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 806 &tcp_spec->hdr.src_port, 807 size); 808 size = sizeof(tcp_spec->hdr.dst_port); 809 field = ulp_rte_parser_fld_copy(field, 810 &tcp_spec->hdr.dst_port, 811 size); 812 size = sizeof(tcp_spec->hdr.sent_seq); 813 field = ulp_rte_parser_fld_copy(field, 814 &tcp_spec->hdr.sent_seq, 815 size); 816 size = sizeof(tcp_spec->hdr.recv_ack); 817 field = ulp_rte_parser_fld_copy(field, 818 &tcp_spec->hdr.recv_ack, 819 size); 820 size = sizeof(tcp_spec->hdr.data_off); 821 field = ulp_rte_parser_fld_copy(field, 822 &tcp_spec->hdr.data_off, 823 size); 824 size = sizeof(tcp_spec->hdr.tcp_flags); 825 field = ulp_rte_parser_fld_copy(field, 826 &tcp_spec->hdr.tcp_flags, 827 size); 828 size = sizeof(tcp_spec->hdr.rx_win); 829 field = ulp_rte_parser_fld_copy(field, 830 &tcp_spec->hdr.rx_win, 831 size); 832 size = sizeof(tcp_spec->hdr.cksum); 833 field = ulp_rte_parser_fld_copy(field, 834 &tcp_spec->hdr.cksum, 835 size); 836 size = sizeof(tcp_spec->hdr.tcp_urp); 837 field = ulp_rte_parser_fld_copy(field, 838 &tcp_spec->hdr.tcp_urp, 839 size); 840 } else { 841 idx += BNXT_ULP_PROTO_HDR_TCP_NUM; 842 } 843 844 if (tcp_mask) { 845 ulp_rte_prsr_mask_copy(params, &idx, 846 &tcp_mask->hdr.src_port, 847 sizeof(tcp_mask->hdr.src_port)); 848 ulp_rte_prsr_mask_copy(params, &idx, 849 &tcp_mask->hdr.dst_port, 850 sizeof(tcp_mask->hdr.dst_port)); 851 ulp_rte_prsr_mask_copy(params, &idx, 852 &tcp_mask->hdr.sent_seq, 853 sizeof(tcp_mask->hdr.sent_seq)); 854 ulp_rte_prsr_mask_copy(params, &idx, 855 &tcp_mask->hdr.recv_ack, 856 sizeof(tcp_mask->hdr.recv_ack)); 857 ulp_rte_prsr_mask_copy(params, &idx, 858 &tcp_mask->hdr.data_off, 859 sizeof(tcp_mask->hdr.data_off)); 860 ulp_rte_prsr_mask_copy(params, &idx, 861 &tcp_mask->hdr.tcp_flags, 862 sizeof(tcp_mask->hdr.tcp_flags)); 863 ulp_rte_prsr_mask_copy(params, &idx, 864 &tcp_mask->hdr.rx_win, 865 sizeof(tcp_mask->hdr.rx_win)); 866 ulp_rte_prsr_mask_copy(params, &idx, 867 &tcp_mask->hdr.cksum, 868 sizeof(tcp_mask->hdr.cksum)); 869 ulp_rte_prsr_mask_copy(params, &idx, 870 &tcp_mask->hdr.tcp_urp, 871 sizeof(tcp_mask->hdr.tcp_urp)); 872 } 873 /* add number of TCP header elements */ 874 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM; 875 876 /* Set the udp header bitmap and computed l4 header bitmaps */ 877 outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4); 878 if (outer_l4 || 879 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 880 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) { 881 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP); 882 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1); 883 } else { 884 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP); 885 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1); 886 } 887 return BNXT_TF_RC_SUCCESS; 888 } 889 890 /* Function to handle the parsing of RTE Flow item Vxlan Header. */ 891 int32_t 892 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item, 893 struct ulp_rte_parser_params *params) 894 { 895 const struct rte_flow_item_vxlan *vxlan_spec = item->spec; 896 const struct rte_flow_item_vxlan *vxlan_mask = item->mask; 897 struct ulp_rte_hdr_field *field; 898 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 899 uint32_t idx = params->field_idx; 900 uint32_t size; 901 902 /* 903 * Copy the rte_flow_item for vxlan into hdr_field using vxlan 904 * header fields 905 */ 906 if (vxlan_spec) { 907 size = sizeof(vxlan_spec->flags); 908 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 909 &vxlan_spec->flags, 910 size); 911 size = sizeof(vxlan_spec->rsvd0); 912 field = ulp_rte_parser_fld_copy(field, 913 &vxlan_spec->rsvd0, 914 size); 915 size = sizeof(vxlan_spec->vni); 916 field = ulp_rte_parser_fld_copy(field, 917 &vxlan_spec->vni, 918 size); 919 size = sizeof(vxlan_spec->rsvd1); 920 field = ulp_rte_parser_fld_copy(field, 921 &vxlan_spec->rsvd1, 922 size); 923 } 924 if (vxlan_mask) { 925 ulp_rte_prsr_mask_copy(params, &idx, 926 &vxlan_mask->flags, 927 sizeof(vxlan_mask->flags)); 928 ulp_rte_prsr_mask_copy(params, &idx, 929 &vxlan_mask->rsvd0, 930 sizeof(vxlan_mask->rsvd0)); 931 ulp_rte_prsr_mask_copy(params, &idx, 932 &vxlan_mask->vni, 933 sizeof(vxlan_mask->vni)); 934 ulp_rte_prsr_mask_copy(params, &idx, 935 &vxlan_mask->rsvd1, 936 sizeof(vxlan_mask->rsvd1)); 937 } 938 /* Add number of vxlan header elements */ 939 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM; 940 941 /* Update the hdr_bitmap with vxlan */ 942 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN); 943 return BNXT_TF_RC_SUCCESS; 944 } 945 946 /* Function to handle the parsing of RTE Flow item void Header */ 947 int32_t 948 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused, 949 struct ulp_rte_parser_params *params __rte_unused) 950 { 951 return BNXT_TF_RC_SUCCESS; 952 } 953 954 /* Function to handle the parsing of RTE Flow action void Header. */ 955 int32_t 956 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused, 957 struct ulp_rte_parser_params *params __rte_unused) 958 { 959 return BNXT_TF_RC_SUCCESS; 960 } 961 962 /* Function to handle the parsing of RTE Flow action Mark Header. */ 963 int32_t 964 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item, 965 struct ulp_rte_parser_params *param) 966 { 967 const struct rte_flow_action_mark *mark; 968 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap; 969 uint32_t mark_id; 970 971 mark = action_item->conf; 972 if (mark) { 973 mark_id = tfp_cpu_to_be_32(mark->id); 974 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK], 975 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK); 976 977 /* Update the hdr_bitmap with vxlan */ 978 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK); 979 return BNXT_TF_RC_SUCCESS; 980 } 981 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n"); 982 return BNXT_TF_RC_ERROR; 983 } 984 985 /* Function to handle the parsing of RTE Flow action RSS Header. */ 986 int32_t 987 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item, 988 struct ulp_rte_parser_params *param) 989 { 990 const struct rte_flow_action_rss *rss = action_item->conf; 991 992 if (rss) { 993 /* Update the hdr_bitmap with vxlan */ 994 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS); 995 return BNXT_TF_RC_SUCCESS; 996 } 997 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n"); 998 return BNXT_TF_RC_ERROR; 999 } 1000 1001 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */ 1002 int32_t 1003 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, 1004 struct ulp_rte_parser_params *params) 1005 { 1006 const struct rte_flow_action_vxlan_encap *vxlan_encap; 1007 const struct rte_flow_item *item; 1008 const struct rte_flow_item_eth *eth_spec; 1009 const struct rte_flow_item_ipv4 *ipv4_spec; 1010 const struct rte_flow_item_ipv6 *ipv6_spec; 1011 struct rte_flow_item_vxlan vxlan_spec; 1012 uint32_t vlan_num = 0, vlan_size = 0; 1013 uint32_t ip_size = 0, ip_type = 0; 1014 uint32_t vxlan_size = 0; 1015 uint8_t *buff; 1016 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */ 1017 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00, 1018 0x00, 0x40, 0x11}; 1019 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap; 1020 struct ulp_rte_act_prop *ap = ¶ms->act_prop; 1021 1022 vxlan_encap = action_item->conf; 1023 if (!vxlan_encap) { 1024 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n"); 1025 return BNXT_TF_RC_ERROR; 1026 } 1027 1028 item = vxlan_encap->definition; 1029 if (!item) { 1030 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n"); 1031 return BNXT_TF_RC_ERROR; 1032 } 1033 1034 if (!ulp_rte_item_skip_void(&item, 0)) 1035 return BNXT_TF_RC_ERROR; 1036 1037 /* must have ethernet header */ 1038 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { 1039 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n"); 1040 return BNXT_TF_RC_ERROR; 1041 } 1042 eth_spec = item->spec; 1043 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC]; 1044 ulp_encap_buffer_copy(buff, 1045 eth_spec->dst.addr_bytes, 1046 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC); 1047 1048 /* Goto the next item */ 1049 if (!ulp_rte_item_skip_void(&item, 1)) 1050 return BNXT_TF_RC_ERROR; 1051 1052 /* May have vlan header */ 1053 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 1054 vlan_num++; 1055 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG]; 1056 ulp_encap_buffer_copy(buff, 1057 item->spec, 1058 sizeof(struct rte_flow_item_vlan)); 1059 1060 if (!ulp_rte_item_skip_void(&item, 1)) 1061 return BNXT_TF_RC_ERROR; 1062 } 1063 1064 /* may have two vlan headers */ 1065 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 1066 vlan_num++; 1067 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG + 1068 sizeof(struct rte_flow_item_vlan)], 1069 item->spec, 1070 sizeof(struct rte_flow_item_vlan)); 1071 if (!ulp_rte_item_skip_void(&item, 1)) 1072 return BNXT_TF_RC_ERROR; 1073 } 1074 /* Update the vlan count and size of more than one */ 1075 if (vlan_num) { 1076 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan); 1077 vlan_num = tfp_cpu_to_be_32(vlan_num); 1078 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM], 1079 &vlan_num, 1080 sizeof(uint32_t)); 1081 vlan_size = tfp_cpu_to_be_32(vlan_size); 1082 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ], 1083 &vlan_size, 1084 sizeof(uint32_t)); 1085 } 1086 1087 /* L3 must be IPv4, IPv6 */ 1088 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { 1089 ipv4_spec = item->spec; 1090 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE; 1091 1092 /* copy the ipv4 details */ 1093 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl, 1094 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) { 1095 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; 1096 ulp_encap_buffer_copy(buff, 1097 def_ipv4_hdr, 1098 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS + 1099 BNXT_ULP_ENCAP_IPV4_ID_PROTO); 1100 } else { 1101 const uint8_t *tmp_buff; 1102 1103 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; 1104 ulp_encap_buffer_copy(buff, 1105 &ipv4_spec->hdr.version_ihl, 1106 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS); 1107 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + 1108 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS]; 1109 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id; 1110 ulp_encap_buffer_copy(buff, 1111 tmp_buff, 1112 BNXT_ULP_ENCAP_IPV4_ID_PROTO); 1113 } 1114 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + 1115 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS + 1116 BNXT_ULP_ENCAP_IPV4_ID_PROTO]; 1117 ulp_encap_buffer_copy(buff, 1118 (const uint8_t *)&ipv4_spec->hdr.dst_addr, 1119 BNXT_ULP_ENCAP_IPV4_DEST_IP); 1120 1121 /* Update the ip size details */ 1122 ip_size = tfp_cpu_to_be_32(ip_size); 1123 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 1124 &ip_size, sizeof(uint32_t)); 1125 1126 /* update the ip type */ 1127 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4); 1128 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 1129 &ip_type, sizeof(uint32_t)); 1130 1131 if (!ulp_rte_item_skip_void(&item, 1)) 1132 return BNXT_TF_RC_ERROR; 1133 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { 1134 ipv6_spec = item->spec; 1135 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE; 1136 1137 /* copy the ipv4 details */ 1138 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP], 1139 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE); 1140 1141 /* Update the ip size details */ 1142 ip_size = tfp_cpu_to_be_32(ip_size); 1143 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 1144 &ip_size, sizeof(uint32_t)); 1145 1146 /* update the ip type */ 1147 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6); 1148 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 1149 &ip_type, sizeof(uint32_t)); 1150 1151 if (!ulp_rte_item_skip_void(&item, 1)) 1152 return BNXT_TF_RC_ERROR; 1153 } else { 1154 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n"); 1155 return BNXT_TF_RC_ERROR; 1156 } 1157 1158 /* L4 is UDP */ 1159 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) { 1160 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n"); 1161 return BNXT_TF_RC_ERROR; 1162 } 1163 /* copy the udp details */ 1164 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP], 1165 item->spec, BNXT_ULP_ENCAP_UDP_SIZE); 1166 1167 if (!ulp_rte_item_skip_void(&item, 1)) 1168 return BNXT_TF_RC_ERROR; 1169 1170 /* Finally VXLAN */ 1171 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { 1172 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n"); 1173 return BNXT_TF_RC_ERROR; 1174 } 1175 vxlan_size = sizeof(struct rte_flow_item_vxlan); 1176 /* copy the vxlan details */ 1177 memcpy(&vxlan_spec, item->spec, vxlan_size); 1178 vxlan_spec.flags = 0x08; 1179 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN], 1180 (const uint8_t *)&vxlan_spec, 1181 vxlan_size); 1182 vxlan_size = tfp_cpu_to_be_32(vxlan_size); 1183 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ], 1184 &vxlan_size, sizeof(uint32_t)); 1185 1186 /*update the hdr_bitmap with vxlan */ 1187 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP); 1188 return BNXT_TF_RC_SUCCESS; 1189 } 1190 1191 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */ 1192 int32_t 1193 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item 1194 __rte_unused, 1195 struct ulp_rte_parser_params *params) 1196 { 1197 /* update the hdr_bitmap with vxlan */ 1198 ULP_BITMAP_SET(params->act_bitmap.bits, 1199 BNXT_ULP_ACTION_BIT_VXLAN_DECAP); 1200 return BNXT_TF_RC_SUCCESS; 1201 } 1202 1203 /* Function to handle the parsing of RTE Flow action drop Header. */ 1204 int32_t 1205 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused, 1206 struct ulp_rte_parser_params *params) 1207 { 1208 /* Update the hdr_bitmap with drop */ 1209 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP); 1210 return BNXT_TF_RC_SUCCESS; 1211 } 1212 1213 /* Function to handle the parsing of RTE Flow action count. */ 1214 int32_t 1215 ulp_rte_count_act_handler(const struct rte_flow_action *action_item, 1216 struct ulp_rte_parser_params *params) 1217 1218 { 1219 const struct rte_flow_action_count *act_count; 1220 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop; 1221 1222 act_count = action_item->conf; 1223 if (act_count) { 1224 if (act_count->shared) { 1225 BNXT_TF_DBG(ERR, 1226 "Parse Error:Shared count not supported\n"); 1227 return BNXT_TF_RC_PARSE_ERR; 1228 } 1229 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT], 1230 &act_count->id, 1231 BNXT_ULP_ACT_PROP_SZ_COUNT); 1232 } 1233 1234 /* Update the hdr_bitmap with count */ 1235 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT); 1236 return BNXT_TF_RC_SUCCESS; 1237 } 1238 1239 /* Function to handle the parsing of RTE Flow action PF. */ 1240 int32_t 1241 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused, 1242 struct ulp_rte_parser_params *params) 1243 { 1244 uint32_t svif; 1245 1246 /* Update the hdr_bitmap with vnic bit */ 1247 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC); 1248 1249 /* copy the PF of the current device into VNIC Property */ 1250 svif = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); 1251 svif = bnxt_get_vnic_id(svif); 1252 svif = rte_cpu_to_be_32(svif); 1253 memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], 1254 &svif, BNXT_ULP_ACT_PROP_SZ_VNIC); 1255 1256 return BNXT_TF_RC_SUCCESS; 1257 } 1258 1259 /* Function to handle the parsing of RTE Flow action VF. */ 1260 int32_t 1261 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, 1262 struct ulp_rte_parser_params *param) 1263 { 1264 const struct rte_flow_action_vf *vf_action; 1265 uint32_t pid; 1266 1267 vf_action = action_item->conf; 1268 if (vf_action) { 1269 if (vf_action->original) { 1270 BNXT_TF_DBG(ERR, 1271 "Parse Error:VF Original not supported\n"); 1272 return BNXT_TF_RC_PARSE_ERR; 1273 } 1274 /* TBD: Update the computed VNIC using VF conversion */ 1275 pid = bnxt_get_vnic_id(vf_action->id); 1276 pid = rte_cpu_to_be_32(pid); 1277 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], 1278 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC); 1279 } 1280 1281 /* Update the hdr_bitmap with count */ 1282 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC); 1283 return BNXT_TF_RC_SUCCESS; 1284 } 1285 1286 /* Function to handle the parsing of RTE Flow action port_id. */ 1287 int32_t 1288 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item, 1289 struct ulp_rte_parser_params *param) 1290 { 1291 const struct rte_flow_action_port_id *port_id; 1292 uint32_t pid; 1293 1294 port_id = act_item->conf; 1295 if (port_id) { 1296 if (port_id->original) { 1297 BNXT_TF_DBG(ERR, 1298 "ParseErr:Portid Original not supported\n"); 1299 return BNXT_TF_RC_PARSE_ERR; 1300 } 1301 /* TBD: Update the computed VNIC using port conversion */ 1302 pid = bnxt_get_vnic_id(port_id->id); 1303 pid = rte_cpu_to_be_32(pid); 1304 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], 1305 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC); 1306 } 1307 1308 /* Update the hdr_bitmap with count */ 1309 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC); 1310 return BNXT_TF_RC_SUCCESS; 1311 } 1312 1313 /* Function to handle the parsing of RTE Flow action phy_port. */ 1314 int32_t 1315 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item, 1316 struct ulp_rte_parser_params *prm) 1317 { 1318 const struct rte_flow_action_phy_port *phy_port; 1319 uint32_t vport; 1320 1321 phy_port = action_item->conf; 1322 if (phy_port) { 1323 if (phy_port->original) { 1324 BNXT_TF_DBG(ERR, 1325 "Parse Err:Port Original not supported\n"); 1326 return BNXT_TF_RC_PARSE_ERR; 1327 } 1328 /* Get the vport of the physical port */ 1329 /* TBD: shall be changed later to portdb call */ 1330 vport = 1 << phy_port->index; 1331 vport = rte_cpu_to_be_32(vport); 1332 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], 1333 &vport, BNXT_ULP_ACT_PROP_SZ_VPORT); 1334 } 1335 1336 /* Update the hdr_bitmap with count */ 1337 ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT); 1338 return BNXT_TF_RC_SUCCESS; 1339 } 1340