1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2020 Broadcom 3 * All rights reserved. 4 */ 5 6 #include "bnxt.h" 7 #include "ulp_template_db.h" 8 #include "ulp_template_struct.h" 9 #include "bnxt_tf_common.h" 10 #include "ulp_rte_parser.h" 11 #include "ulp_utils.h" 12 #include "tfp.h" 13 #include "ulp_port_db.h" 14 15 /* Utility function to skip the void items. */ 16 static inline int32_t 17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment) 18 { 19 if (!*item) 20 return 0; 21 if (increment) 22 (*item)++; 23 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID) 24 (*item)++; 25 if (*item) 26 return 1; 27 return 0; 28 } 29 30 /* Utility function to update the field_bitmap */ 31 static void 32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params, 33 uint32_t idx) 34 { 35 struct ulp_rte_hdr_field *field; 36 37 field = ¶ms->hdr_field[idx]; 38 if (ulp_bitmap_notzero(field->mask, field->size)) { 39 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx); 40 /* Not exact match */ 41 if (!ulp_bitmap_is_ones(field->mask, field->size)) 42 ULP_BITMAP_SET(params->fld_bitmap.bits, 43 BNXT_ULP_MATCH_TYPE_BITMASK_WM); 44 } else { 45 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx); 46 } 47 } 48 49 /* Utility function to copy field spec items */ 50 static struct ulp_rte_hdr_field * 51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field, 52 const void *buffer, 53 uint32_t size) 54 { 55 field->size = size; 56 memcpy(field->spec, buffer, field->size); 57 field++; 58 return field; 59 } 60 61 /* Utility function to copy field masks items */ 62 static void 63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params, 64 uint32_t *idx, 65 const void *buffer, 66 uint32_t size) 67 { 68 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx]; 69 70 memcpy(field->mask, buffer, size); 71 ulp_rte_parser_field_bitmap_update(params, *idx); 72 *idx = *idx + 1; 73 } 74 75 /* 76 * Function to handle the parsing of RTE Flows and placing 77 * the RTE flow items into the ulp structures. 78 */ 79 int32_t 80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], 81 struct ulp_rte_parser_params *params) 82 { 83 const struct rte_flow_item *item = pattern; 84 struct bnxt_ulp_rte_hdr_info *hdr_info; 85 86 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM; 87 if (params->dir == ULP_DIR_EGRESS) 88 ULP_BITMAP_SET(params->hdr_bitmap.bits, 89 BNXT_ULP_FLOW_DIR_BITMASK_EGR); 90 91 /* Parse all the items in the pattern */ 92 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) { 93 /* get the header information from the flow_hdr_info table */ 94 hdr_info = &ulp_hdr_info[item->type]; 95 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) { 96 BNXT_TF_DBG(ERR, 97 "Truflow parser does not support type %d\n", 98 item->type); 99 return BNXT_TF_RC_PARSE_ERR; 100 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) { 101 /* call the registered callback handler */ 102 if (hdr_info->proto_hdr_func) { 103 if (hdr_info->proto_hdr_func(item, params) != 104 BNXT_TF_RC_SUCCESS) { 105 return BNXT_TF_RC_ERROR; 106 } 107 } 108 } 109 item++; 110 } 111 /* update the implied SVIF */ 112 (void)ulp_rte_parser_svif_process(params); 113 return BNXT_TF_RC_SUCCESS; 114 } 115 116 /* 117 * Function to handle the parsing of RTE Flows and placing 118 * the RTE flow actions into the ulp structures. 119 */ 120 int32_t 121 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], 122 struct ulp_rte_parser_params *params) 123 { 124 const struct rte_flow_action *action_item = actions; 125 struct bnxt_ulp_rte_act_info *hdr_info; 126 127 /* Parse all the items in the pattern */ 128 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) { 129 /* get the header information from the flow_hdr_info table */ 130 hdr_info = &ulp_act_info[action_item->type]; 131 if (hdr_info->act_type == 132 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) { 133 BNXT_TF_DBG(ERR, 134 "Truflow parser does not support act %u\n", 135 action_item->type); 136 return BNXT_TF_RC_ERROR; 137 } else if (hdr_info->act_type == 138 BNXT_ULP_ACT_TYPE_SUPPORTED) { 139 /* call the registered callback handler */ 140 if (hdr_info->proto_act_func) { 141 if (hdr_info->proto_act_func(action_item, 142 params) != 143 BNXT_TF_RC_SUCCESS) { 144 return BNXT_TF_RC_ERROR; 145 } 146 } 147 } 148 action_item++; 149 } 150 /* update the implied VNIC */ 151 ulp_rte_parser_vnic_process(params); 152 return BNXT_TF_RC_SUCCESS; 153 } 154 155 /* Function to handle the parsing of RTE Flow item PF Header. */ 156 static int32_t 157 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params, 158 enum rte_flow_item_type proto, 159 uint16_t svif, 160 uint16_t mask) 161 { 162 uint16_t port_id = svif; 163 uint32_t dir = 0; 164 struct ulp_rte_hdr_field *hdr_field; 165 uint32_t ifindex; 166 int32_t rc; 167 168 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF)) { 169 BNXT_TF_DBG(ERR, 170 "SVIF already set,multiple source not support'd\n"); 171 return BNXT_TF_RC_ERROR; 172 } 173 174 /*update the hdr_bitmap with BNXT_ULP_HDR_PROTO_SVIF */ 175 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF); 176 177 if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) { 178 dir = ULP_UTIL_CHF_IDX_RD(params, 179 BNXT_ULP_CHF_IDX_DIRECTION); 180 /* perform the conversion from dpdk port to bnxt svif */ 181 rc = ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id, 182 &ifindex); 183 if (rc) { 184 BNXT_TF_DBG(ERR, 185 "Invalid port id\n"); 186 return BNXT_TF_RC_ERROR; 187 } 188 ulp_port_db_svif_get(params->ulp_ctx, ifindex, dir, &svif); 189 svif = rte_cpu_to_be_16(svif); 190 } 191 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; 192 memcpy(hdr_field->spec, &svif, sizeof(svif)); 193 memcpy(hdr_field->mask, &mask, sizeof(mask)); 194 hdr_field->size = sizeof(svif); 195 return BNXT_TF_RC_SUCCESS; 196 } 197 198 /* Function to handle the parsing of the RTE port id */ 199 int32_t 200 ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params) 201 { 202 uint16_t port_id = 0; 203 uint16_t svif_mask = 0xFFFF; 204 205 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF)) 206 return BNXT_TF_RC_SUCCESS; 207 208 /* SVIF not set. So get the port id */ 209 port_id = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF); 210 211 /* Update the SVIF details */ 212 return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID, 213 port_id, svif_mask); 214 } 215 216 /* Function to handle the implicit VNIC RTE port id */ 217 int32_t 218 ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params) 219 { 220 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap; 221 222 if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VNIC) || 223 ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VPORT)) 224 return BNXT_TF_RC_SUCCESS; 225 226 /* Update the vnic details */ 227 ulp_rte_pf_act_handler(NULL, params); 228 return BNXT_TF_RC_SUCCESS; 229 } 230 231 /* Function to handle the parsing of RTE Flow item PF Header. */ 232 int32_t 233 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item, 234 struct ulp_rte_parser_params *params) 235 { 236 uint16_t port_id = 0; 237 uint16_t svif_mask = 0xFFFF; 238 239 /* Get the port id */ 240 port_id = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF); 241 242 /* Update the SVIF details */ 243 return ulp_rte_parser_svif_set(params, 244 item->type, 245 port_id, svif_mask); 246 } 247 248 /* Function to handle the parsing of RTE Flow item VF Header. */ 249 int32_t 250 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item, 251 struct ulp_rte_parser_params *params) 252 { 253 const struct rte_flow_item_vf *vf_spec = item->spec; 254 const struct rte_flow_item_vf *vf_mask = item->mask; 255 uint16_t svif = 0, mask = 0; 256 257 /* Get VF rte_flow_item for Port details */ 258 if (vf_spec) 259 svif = (uint16_t)vf_spec->id; 260 if (vf_mask) 261 mask = (uint16_t)vf_mask->id; 262 263 return ulp_rte_parser_svif_set(params, item->type, svif, mask); 264 } 265 266 /* Function to handle the parsing of RTE Flow item port id Header. */ 267 int32_t 268 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item, 269 struct ulp_rte_parser_params *params) 270 { 271 const struct rte_flow_item_port_id *port_spec = item->spec; 272 const struct rte_flow_item_port_id *port_mask = item->mask; 273 uint16_t svif = 0, mask = 0; 274 275 /* 276 * Copy the rte_flow_item for Port into hdr_field using port id 277 * header fields. 278 */ 279 if (port_spec) 280 svif = (uint16_t)port_spec->id; 281 if (port_mask) 282 mask = (uint16_t)port_mask->id; 283 284 /* Update the SVIF details */ 285 return ulp_rte_parser_svif_set(params, item->type, svif, mask); 286 } 287 288 /* Function to handle the parsing of RTE Flow item phy port Header. */ 289 int32_t 290 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item, 291 struct ulp_rte_parser_params *params) 292 { 293 const struct rte_flow_item_phy_port *port_spec = item->spec; 294 const struct rte_flow_item_phy_port *port_mask = item->mask; 295 uint32_t svif = 0, mask = 0; 296 297 /* Copy the rte_flow_item for phy port into hdr_field */ 298 if (port_spec) 299 svif = port_spec->index; 300 if (port_mask) 301 mask = port_mask->index; 302 303 /* Update the SVIF details */ 304 return ulp_rte_parser_svif_set(params, item->type, svif, mask); 305 } 306 307 /* Function to handle the parsing of RTE Flow item Ethernet Header. */ 308 int32_t 309 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item, 310 struct ulp_rte_parser_params *params) 311 { 312 const struct rte_flow_item_eth *eth_spec = item->spec; 313 const struct rte_flow_item_eth *eth_mask = item->mask; 314 struct ulp_rte_hdr_field *field; 315 uint32_t idx = params->field_idx; 316 uint64_t set_flag = 0; 317 uint32_t size; 318 319 /* 320 * Copy the rte_flow_item for eth into hdr_field using ethernet 321 * header fields 322 */ 323 if (eth_spec) { 324 size = sizeof(eth_spec->dst.addr_bytes); 325 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 326 eth_spec->dst.addr_bytes, 327 size); 328 size = sizeof(eth_spec->src.addr_bytes); 329 field = ulp_rte_parser_fld_copy(field, 330 eth_spec->src.addr_bytes, 331 size); 332 field = ulp_rte_parser_fld_copy(field, 333 ð_spec->type, 334 sizeof(eth_spec->type)); 335 } 336 if (eth_mask) { 337 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes, 338 sizeof(eth_mask->dst.addr_bytes)); 339 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes, 340 sizeof(eth_mask->src.addr_bytes)); 341 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type, 342 sizeof(eth_mask->type)); 343 } 344 /* Add number of vlan header elements */ 345 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM; 346 params->vlan_idx = params->field_idx; 347 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM; 348 349 /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */ 350 set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits, 351 BNXT_ULP_HDR_BIT_O_ETH); 352 if (set_flag) 353 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH); 354 else 355 ULP_BITMAP_RESET(params->hdr_bitmap.bits, 356 BNXT_ULP_HDR_BIT_I_ETH); 357 358 /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */ 359 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); 360 361 return BNXT_TF_RC_SUCCESS; 362 } 363 364 /* Function to handle the parsing of RTE Flow item Vlan Header. */ 365 int32_t 366 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, 367 struct ulp_rte_parser_params *params) 368 { 369 const struct rte_flow_item_vlan *vlan_spec = item->spec; 370 const struct rte_flow_item_vlan *vlan_mask = item->mask; 371 struct ulp_rte_hdr_field *field; 372 struct ulp_rte_hdr_bitmap *hdr_bit; 373 uint32_t idx = params->vlan_idx; 374 uint16_t vlan_tag, priority; 375 uint32_t outer_vtag_num; 376 uint32_t inner_vtag_num; 377 378 /* 379 * Copy the rte_flow_item for vlan into hdr_field using Vlan 380 * header fields 381 */ 382 if (vlan_spec) { 383 vlan_tag = ntohs(vlan_spec->tci); 384 priority = htons(vlan_tag >> 13); 385 vlan_tag &= 0xfff; 386 vlan_tag = htons(vlan_tag); 387 388 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 389 &priority, 390 sizeof(priority)); 391 field = ulp_rte_parser_fld_copy(field, 392 &vlan_tag, 393 sizeof(vlan_tag)); 394 field = ulp_rte_parser_fld_copy(field, 395 &vlan_spec->inner_type, 396 sizeof(vlan_spec->inner_type)); 397 } 398 399 if (vlan_mask) { 400 vlan_tag = ntohs(vlan_mask->tci); 401 priority = htons(vlan_tag >> 13); 402 vlan_tag &= 0xfff; 403 vlan_tag = htons(vlan_tag); 404 405 field = ¶ms->hdr_field[idx]; 406 memcpy(field->mask, &priority, field->size); 407 field++; 408 memcpy(field->mask, &vlan_tag, field->size); 409 field++; 410 memcpy(field->mask, &vlan_mask->inner_type, field->size); 411 } 412 /* Set the vlan index to new incremented value */ 413 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM; 414 415 /* Get the outer tag and inner tag counts */ 416 outer_vtag_num = ULP_UTIL_CHF_IDX_RD(params, 417 BNXT_ULP_CHF_IDX_O_VTAG_NUM); 418 inner_vtag_num = ULP_UTIL_CHF_IDX_RD(params, 419 BNXT_ULP_CHF_IDX_I_VTAG_NUM); 420 421 /* Update the hdr_bitmap of the vlans */ 422 hdr_bit = ¶ms->hdr_bitmap; 423 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 424 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN)) { 425 /* Set the outer vlan bit and update the vlan tag num */ 426 ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN); 427 outer_vtag_num++; 428 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_NUM, 429 outer_vtag_num); 430 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_PRESENT, 1); 431 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 432 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN) && 433 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN)) { 434 /* Set the outer vlan bit and update the vlan tag num */ 435 ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN); 436 outer_vtag_num++; 437 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_NUM, 438 outer_vtag_num); 439 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_TWO_VTAGS, 1); 440 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 441 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN) && 442 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN) && 443 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 444 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_IO_VLAN)) { 445 /* Set the inner vlan bit and update the vlan tag num */ 446 ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_IO_VLAN); 447 inner_vtag_num++; 448 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_NUM, 449 inner_vtag_num); 450 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_PRESENT, 1); 451 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && 452 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN) && 453 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN) && 454 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && 455 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_IO_VLAN) && 456 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_II_VLAN)) { 457 /* Set the inner vlan bit and update the vlan tag num */ 458 ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_II_VLAN); 459 inner_vtag_num++; 460 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_NUM, 461 inner_vtag_num); 462 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_TWO_VTAGS, 1); 463 } else { 464 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n"); 465 return BNXT_TF_RC_ERROR; 466 } 467 return BNXT_TF_RC_SUCCESS; 468 } 469 470 /* Function to handle the parsing of RTE Flow item IPV4 Header. */ 471 int32_t 472 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, 473 struct ulp_rte_parser_params *params) 474 { 475 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec; 476 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask; 477 struct ulp_rte_hdr_field *field; 478 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 479 uint32_t idx = params->field_idx; 480 uint32_t size; 481 uint32_t inner_l3, outer_l3; 482 483 inner_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L3); 484 if (inner_l3) { 485 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n"); 486 return BNXT_TF_RC_ERROR; 487 } 488 489 /* 490 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 491 * header fields 492 */ 493 if (ipv4_spec) { 494 size = sizeof(ipv4_spec->hdr.version_ihl); 495 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 496 &ipv4_spec->hdr.version_ihl, 497 size); 498 size = sizeof(ipv4_spec->hdr.type_of_service); 499 field = ulp_rte_parser_fld_copy(field, 500 &ipv4_spec->hdr.type_of_service, 501 size); 502 size = sizeof(ipv4_spec->hdr.total_length); 503 field = ulp_rte_parser_fld_copy(field, 504 &ipv4_spec->hdr.total_length, 505 size); 506 size = sizeof(ipv4_spec->hdr.packet_id); 507 field = ulp_rte_parser_fld_copy(field, 508 &ipv4_spec->hdr.packet_id, 509 size); 510 size = sizeof(ipv4_spec->hdr.fragment_offset); 511 field = ulp_rte_parser_fld_copy(field, 512 &ipv4_spec->hdr.fragment_offset, 513 size); 514 size = sizeof(ipv4_spec->hdr.time_to_live); 515 field = ulp_rte_parser_fld_copy(field, 516 &ipv4_spec->hdr.time_to_live, 517 size); 518 size = sizeof(ipv4_spec->hdr.next_proto_id); 519 field = ulp_rte_parser_fld_copy(field, 520 &ipv4_spec->hdr.next_proto_id, 521 size); 522 size = sizeof(ipv4_spec->hdr.hdr_checksum); 523 field = ulp_rte_parser_fld_copy(field, 524 &ipv4_spec->hdr.hdr_checksum, 525 size); 526 size = sizeof(ipv4_spec->hdr.src_addr); 527 field = ulp_rte_parser_fld_copy(field, 528 &ipv4_spec->hdr.src_addr, 529 size); 530 size = sizeof(ipv4_spec->hdr.dst_addr); 531 field = ulp_rte_parser_fld_copy(field, 532 &ipv4_spec->hdr.dst_addr, 533 size); 534 } 535 if (ipv4_mask) { 536 ulp_rte_prsr_mask_copy(params, &idx, 537 &ipv4_mask->hdr.version_ihl, 538 sizeof(ipv4_mask->hdr.version_ihl)); 539 ulp_rte_prsr_mask_copy(params, &idx, 540 &ipv4_mask->hdr.type_of_service, 541 sizeof(ipv4_mask->hdr.type_of_service)); 542 ulp_rte_prsr_mask_copy(params, &idx, 543 &ipv4_mask->hdr.total_length, 544 sizeof(ipv4_mask->hdr.total_length)); 545 ulp_rte_prsr_mask_copy(params, &idx, 546 &ipv4_mask->hdr.packet_id, 547 sizeof(ipv4_mask->hdr.packet_id)); 548 ulp_rte_prsr_mask_copy(params, &idx, 549 &ipv4_mask->hdr.fragment_offset, 550 sizeof(ipv4_mask->hdr.fragment_offset)); 551 ulp_rte_prsr_mask_copy(params, &idx, 552 &ipv4_mask->hdr.time_to_live, 553 sizeof(ipv4_mask->hdr.time_to_live)); 554 ulp_rte_prsr_mask_copy(params, &idx, 555 &ipv4_mask->hdr.next_proto_id, 556 sizeof(ipv4_mask->hdr.next_proto_id)); 557 ulp_rte_prsr_mask_copy(params, &idx, 558 &ipv4_mask->hdr.hdr_checksum, 559 sizeof(ipv4_mask->hdr.hdr_checksum)); 560 ulp_rte_prsr_mask_copy(params, &idx, 561 &ipv4_mask->hdr.src_addr, 562 sizeof(ipv4_mask->hdr.src_addr)); 563 ulp_rte_prsr_mask_copy(params, &idx, 564 &ipv4_mask->hdr.dst_addr, 565 sizeof(ipv4_mask->hdr.dst_addr)); 566 } 567 /* Add the number of ipv4 header elements */ 568 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM; 569 570 /* Set the ipv4 header bitmap and computed l3 header bitmaps */ 571 outer_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L3); 572 if (outer_l3 || 573 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 574 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { 575 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4); 576 inner_l3++; 577 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L3, inner_l3); 578 } else { 579 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4); 580 outer_l3++; 581 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L3, outer_l3); 582 } 583 return BNXT_TF_RC_SUCCESS; 584 } 585 586 /* Function to handle the parsing of RTE Flow item IPV6 Header */ 587 int32_t 588 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, 589 struct ulp_rte_parser_params *params) 590 { 591 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec; 592 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask; 593 struct ulp_rte_hdr_field *field; 594 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 595 uint32_t idx = params->field_idx; 596 uint32_t size; 597 uint32_t inner_l3, outer_l3; 598 599 inner_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L3); 600 if (inner_l3) { 601 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n"); 602 return BNXT_TF_RC_ERROR; 603 } 604 605 /* 606 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 607 * header fields 608 */ 609 if (ipv6_spec) { 610 size = sizeof(ipv6_spec->hdr.vtc_flow); 611 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 612 &ipv6_spec->hdr.vtc_flow, 613 size); 614 size = sizeof(ipv6_spec->hdr.payload_len); 615 field = ulp_rte_parser_fld_copy(field, 616 &ipv6_spec->hdr.payload_len, 617 size); 618 size = sizeof(ipv6_spec->hdr.proto); 619 field = ulp_rte_parser_fld_copy(field, 620 &ipv6_spec->hdr.proto, 621 size); 622 size = sizeof(ipv6_spec->hdr.hop_limits); 623 field = ulp_rte_parser_fld_copy(field, 624 &ipv6_spec->hdr.hop_limits, 625 size); 626 size = sizeof(ipv6_spec->hdr.src_addr); 627 field = ulp_rte_parser_fld_copy(field, 628 &ipv6_spec->hdr.src_addr, 629 size); 630 size = sizeof(ipv6_spec->hdr.dst_addr); 631 field = ulp_rte_parser_fld_copy(field, 632 &ipv6_spec->hdr.dst_addr, 633 size); 634 } 635 if (ipv6_mask) { 636 ulp_rte_prsr_mask_copy(params, &idx, 637 &ipv6_mask->hdr.vtc_flow, 638 sizeof(ipv6_mask->hdr.vtc_flow)); 639 ulp_rte_prsr_mask_copy(params, &idx, 640 &ipv6_mask->hdr.payload_len, 641 sizeof(ipv6_mask->hdr.payload_len)); 642 ulp_rte_prsr_mask_copy(params, &idx, 643 &ipv6_mask->hdr.proto, 644 sizeof(ipv6_mask->hdr.proto)); 645 ulp_rte_prsr_mask_copy(params, &idx, 646 &ipv6_mask->hdr.hop_limits, 647 sizeof(ipv6_mask->hdr.hop_limits)); 648 ulp_rte_prsr_mask_copy(params, &idx, 649 &ipv6_mask->hdr.src_addr, 650 sizeof(ipv6_mask->hdr.src_addr)); 651 ulp_rte_prsr_mask_copy(params, &idx, 652 &ipv6_mask->hdr.dst_addr, 653 sizeof(ipv6_mask->hdr.dst_addr)); 654 } 655 /* add number of ipv6 header elements */ 656 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM; 657 658 /* Set the ipv6 header bitmap and computed l3 header bitmaps */ 659 outer_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L3); 660 if (outer_l3 || 661 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || 662 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { 663 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6); 664 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L3, 1); 665 } else { 666 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6); 667 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L3, 1); 668 } 669 return BNXT_TF_RC_SUCCESS; 670 } 671 672 /* Function to handle the parsing of RTE Flow item UDP Header. */ 673 int32_t 674 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, 675 struct ulp_rte_parser_params *params) 676 { 677 const struct rte_flow_item_udp *udp_spec = item->spec; 678 const struct rte_flow_item_udp *udp_mask = item->mask; 679 struct ulp_rte_hdr_field *field; 680 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 681 uint32_t idx = params->field_idx; 682 uint32_t size; 683 uint32_t inner_l4, outer_l4; 684 685 inner_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L4); 686 if (inner_l4) { 687 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); 688 return BNXT_TF_RC_ERROR; 689 } 690 691 /* 692 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 693 * header fields 694 */ 695 if (udp_spec) { 696 size = sizeof(udp_spec->hdr.src_port); 697 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 698 &udp_spec->hdr.src_port, 699 size); 700 size = sizeof(udp_spec->hdr.dst_port); 701 field = ulp_rte_parser_fld_copy(field, 702 &udp_spec->hdr.dst_port, 703 size); 704 size = sizeof(udp_spec->hdr.dgram_len); 705 field = ulp_rte_parser_fld_copy(field, 706 &udp_spec->hdr.dgram_len, 707 size); 708 size = sizeof(udp_spec->hdr.dgram_cksum); 709 field = ulp_rte_parser_fld_copy(field, 710 &udp_spec->hdr.dgram_cksum, 711 size); 712 } 713 if (udp_mask) { 714 ulp_rte_prsr_mask_copy(params, &idx, 715 &udp_mask->hdr.src_port, 716 sizeof(udp_mask->hdr.src_port)); 717 ulp_rte_prsr_mask_copy(params, &idx, 718 &udp_mask->hdr.dst_port, 719 sizeof(udp_mask->hdr.dst_port)); 720 ulp_rte_prsr_mask_copy(params, &idx, 721 &udp_mask->hdr.dgram_len, 722 sizeof(udp_mask->hdr.dgram_len)); 723 ulp_rte_prsr_mask_copy(params, &idx, 724 &udp_mask->hdr.dgram_cksum, 725 sizeof(udp_mask->hdr.dgram_cksum)); 726 } 727 728 /* Add number of UDP header elements */ 729 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM; 730 731 /* Set the udp header bitmap and computed l4 header bitmaps */ 732 outer_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L4); 733 if (outer_l4 || 734 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 735 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) { 736 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP); 737 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L4, 1); 738 } else { 739 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP); 740 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L4, 1); 741 } 742 return BNXT_TF_RC_SUCCESS; 743 } 744 745 /* Function to handle the parsing of RTE Flow item TCP Header. */ 746 int32_t 747 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, 748 struct ulp_rte_parser_params *params) 749 { 750 const struct rte_flow_item_tcp *tcp_spec = item->spec; 751 const struct rte_flow_item_tcp *tcp_mask = item->mask; 752 struct ulp_rte_hdr_field *field; 753 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 754 uint32_t idx = params->field_idx; 755 uint32_t size; 756 uint32_t inner_l4, outer_l4; 757 758 inner_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L4); 759 if (inner_l4) { 760 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n"); 761 return BNXT_TF_RC_ERROR; 762 } 763 764 /* 765 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 766 * header fields 767 */ 768 if (tcp_spec) { 769 size = sizeof(tcp_spec->hdr.src_port); 770 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 771 &tcp_spec->hdr.src_port, 772 size); 773 size = sizeof(tcp_spec->hdr.dst_port); 774 field = ulp_rte_parser_fld_copy(field, 775 &tcp_spec->hdr.dst_port, 776 size); 777 size = sizeof(tcp_spec->hdr.sent_seq); 778 field = ulp_rte_parser_fld_copy(field, 779 &tcp_spec->hdr.sent_seq, 780 size); 781 size = sizeof(tcp_spec->hdr.recv_ack); 782 field = ulp_rte_parser_fld_copy(field, 783 &tcp_spec->hdr.recv_ack, 784 size); 785 size = sizeof(tcp_spec->hdr.data_off); 786 field = ulp_rte_parser_fld_copy(field, 787 &tcp_spec->hdr.data_off, 788 size); 789 size = sizeof(tcp_spec->hdr.tcp_flags); 790 field = ulp_rte_parser_fld_copy(field, 791 &tcp_spec->hdr.tcp_flags, 792 size); 793 size = sizeof(tcp_spec->hdr.rx_win); 794 field = ulp_rte_parser_fld_copy(field, 795 &tcp_spec->hdr.rx_win, 796 size); 797 size = sizeof(tcp_spec->hdr.cksum); 798 field = ulp_rte_parser_fld_copy(field, 799 &tcp_spec->hdr.cksum, 800 size); 801 size = sizeof(tcp_spec->hdr.tcp_urp); 802 field = ulp_rte_parser_fld_copy(field, 803 &tcp_spec->hdr.tcp_urp, 804 size); 805 } else { 806 idx += BNXT_ULP_PROTO_HDR_TCP_NUM; 807 } 808 809 if (tcp_mask) { 810 ulp_rte_prsr_mask_copy(params, &idx, 811 &tcp_mask->hdr.src_port, 812 sizeof(tcp_mask->hdr.src_port)); 813 ulp_rte_prsr_mask_copy(params, &idx, 814 &tcp_mask->hdr.dst_port, 815 sizeof(tcp_mask->hdr.dst_port)); 816 ulp_rte_prsr_mask_copy(params, &idx, 817 &tcp_mask->hdr.sent_seq, 818 sizeof(tcp_mask->hdr.sent_seq)); 819 ulp_rte_prsr_mask_copy(params, &idx, 820 &tcp_mask->hdr.recv_ack, 821 sizeof(tcp_mask->hdr.recv_ack)); 822 ulp_rte_prsr_mask_copy(params, &idx, 823 &tcp_mask->hdr.data_off, 824 sizeof(tcp_mask->hdr.data_off)); 825 ulp_rte_prsr_mask_copy(params, &idx, 826 &tcp_mask->hdr.tcp_flags, 827 sizeof(tcp_mask->hdr.tcp_flags)); 828 ulp_rte_prsr_mask_copy(params, &idx, 829 &tcp_mask->hdr.rx_win, 830 sizeof(tcp_mask->hdr.rx_win)); 831 ulp_rte_prsr_mask_copy(params, &idx, 832 &tcp_mask->hdr.cksum, 833 sizeof(tcp_mask->hdr.cksum)); 834 ulp_rte_prsr_mask_copy(params, &idx, 835 &tcp_mask->hdr.tcp_urp, 836 sizeof(tcp_mask->hdr.tcp_urp)); 837 } 838 /* add number of TCP header elements */ 839 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM; 840 841 /* Set the udp header bitmap and computed l4 header bitmaps */ 842 outer_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L4); 843 if (outer_l4 || 844 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || 845 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) { 846 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP); 847 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L4, 1); 848 } else { 849 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP); 850 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L4, 1); 851 } 852 return BNXT_TF_RC_SUCCESS; 853 } 854 855 /* Function to handle the parsing of RTE Flow item Vxlan Header. */ 856 int32_t 857 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item, 858 struct ulp_rte_parser_params *params) 859 { 860 const struct rte_flow_item_vxlan *vxlan_spec = item->spec; 861 const struct rte_flow_item_vxlan *vxlan_mask = item->mask; 862 struct ulp_rte_hdr_field *field; 863 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; 864 uint32_t idx = params->field_idx; 865 uint32_t size; 866 867 /* 868 * Copy the rte_flow_item for vxlan into hdr_field using vxlan 869 * header fields 870 */ 871 if (vxlan_spec) { 872 size = sizeof(vxlan_spec->flags); 873 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], 874 &vxlan_spec->flags, 875 size); 876 size = sizeof(vxlan_spec->rsvd0); 877 field = ulp_rte_parser_fld_copy(field, 878 &vxlan_spec->rsvd0, 879 size); 880 size = sizeof(vxlan_spec->vni); 881 field = ulp_rte_parser_fld_copy(field, 882 &vxlan_spec->vni, 883 size); 884 size = sizeof(vxlan_spec->rsvd1); 885 field = ulp_rte_parser_fld_copy(field, 886 &vxlan_spec->rsvd1, 887 size); 888 } 889 if (vxlan_mask) { 890 ulp_rte_prsr_mask_copy(params, &idx, 891 &vxlan_mask->flags, 892 sizeof(vxlan_mask->flags)); 893 ulp_rte_prsr_mask_copy(params, &idx, 894 &vxlan_mask->rsvd0, 895 sizeof(vxlan_mask->rsvd0)); 896 ulp_rte_prsr_mask_copy(params, &idx, 897 &vxlan_mask->vni, 898 sizeof(vxlan_mask->vni)); 899 ulp_rte_prsr_mask_copy(params, &idx, 900 &vxlan_mask->rsvd1, 901 sizeof(vxlan_mask->rsvd1)); 902 } 903 /* Add number of vxlan header elements */ 904 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM; 905 906 /* Update the hdr_bitmap with vxlan */ 907 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN); 908 return BNXT_TF_RC_SUCCESS; 909 } 910 911 /* Function to handle the parsing of RTE Flow item void Header */ 912 int32_t 913 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused, 914 struct ulp_rte_parser_params *params __rte_unused) 915 { 916 return BNXT_TF_RC_SUCCESS; 917 } 918 919 /* Function to handle the parsing of RTE Flow action void Header. */ 920 int32_t 921 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused, 922 struct ulp_rte_parser_params *params __rte_unused) 923 { 924 return BNXT_TF_RC_SUCCESS; 925 } 926 927 /* Function to handle the parsing of RTE Flow action Mark Header. */ 928 int32_t 929 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item, 930 struct ulp_rte_parser_params *param) 931 { 932 const struct rte_flow_action_mark *mark; 933 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap; 934 uint32_t mark_id; 935 936 mark = action_item->conf; 937 if (mark) { 938 mark_id = tfp_cpu_to_be_32(mark->id); 939 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK], 940 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK); 941 942 /* Update the hdr_bitmap with vxlan */ 943 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK); 944 return BNXT_TF_RC_SUCCESS; 945 } 946 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n"); 947 return BNXT_TF_RC_ERROR; 948 } 949 950 /* Function to handle the parsing of RTE Flow action RSS Header. */ 951 int32_t 952 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item, 953 struct ulp_rte_parser_params *param) 954 { 955 const struct rte_flow_action_rss *rss = action_item->conf; 956 957 if (rss) { 958 /* Update the hdr_bitmap with vxlan */ 959 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS); 960 return BNXT_TF_RC_SUCCESS; 961 } 962 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n"); 963 return BNXT_TF_RC_ERROR; 964 } 965 966 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */ 967 int32_t 968 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, 969 struct ulp_rte_parser_params *params) 970 { 971 const struct rte_flow_action_vxlan_encap *vxlan_encap; 972 const struct rte_flow_item *item; 973 const struct rte_flow_item_eth *eth_spec; 974 const struct rte_flow_item_ipv4 *ipv4_spec; 975 const struct rte_flow_item_ipv6 *ipv6_spec; 976 struct rte_flow_item_vxlan vxlan_spec; 977 uint32_t vlan_num = 0, vlan_size = 0; 978 uint32_t ip_size = 0, ip_type = 0; 979 uint32_t vxlan_size = 0; 980 uint8_t *buff; 981 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */ 982 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00, 983 0x00, 0x40, 0x11}; 984 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap; 985 struct ulp_rte_act_prop *ap = ¶ms->act_prop; 986 987 vxlan_encap = action_item->conf; 988 if (!vxlan_encap) { 989 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n"); 990 return BNXT_TF_RC_ERROR; 991 } 992 993 item = vxlan_encap->definition; 994 if (!item) { 995 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n"); 996 return BNXT_TF_RC_ERROR; 997 } 998 999 if (!ulp_rte_item_skip_void(&item, 0)) 1000 return BNXT_TF_RC_ERROR; 1001 1002 /* must have ethernet header */ 1003 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { 1004 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n"); 1005 return BNXT_TF_RC_ERROR; 1006 } 1007 eth_spec = item->spec; 1008 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC]; 1009 ulp_encap_buffer_copy(buff, 1010 eth_spec->dst.addr_bytes, 1011 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC); 1012 1013 /* Goto the next item */ 1014 if (!ulp_rte_item_skip_void(&item, 1)) 1015 return BNXT_TF_RC_ERROR; 1016 1017 /* May have vlan header */ 1018 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 1019 vlan_num++; 1020 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG]; 1021 ulp_encap_buffer_copy(buff, 1022 item->spec, 1023 sizeof(struct rte_flow_item_vlan)); 1024 1025 if (!ulp_rte_item_skip_void(&item, 1)) 1026 return BNXT_TF_RC_ERROR; 1027 } 1028 1029 /* may have two vlan headers */ 1030 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 1031 vlan_num++; 1032 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG + 1033 sizeof(struct rte_flow_item_vlan)], 1034 item->spec, 1035 sizeof(struct rte_flow_item_vlan)); 1036 if (!ulp_rte_item_skip_void(&item, 1)) 1037 return BNXT_TF_RC_ERROR; 1038 } 1039 /* Update the vlan count and size of more than one */ 1040 if (vlan_num) { 1041 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan); 1042 vlan_num = tfp_cpu_to_be_32(vlan_num); 1043 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM], 1044 &vlan_num, 1045 sizeof(uint32_t)); 1046 vlan_size = tfp_cpu_to_be_32(vlan_size); 1047 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ], 1048 &vlan_size, 1049 sizeof(uint32_t)); 1050 } 1051 1052 /* L3 must be IPv4, IPv6 */ 1053 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { 1054 ipv4_spec = item->spec; 1055 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE; 1056 1057 /* copy the ipv4 details */ 1058 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl, 1059 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) { 1060 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; 1061 ulp_encap_buffer_copy(buff, 1062 def_ipv4_hdr, 1063 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS + 1064 BNXT_ULP_ENCAP_IPV4_ID_PROTO); 1065 } else { 1066 const uint8_t *tmp_buff; 1067 1068 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; 1069 ulp_encap_buffer_copy(buff, 1070 &ipv4_spec->hdr.version_ihl, 1071 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS); 1072 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + 1073 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS]; 1074 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id; 1075 ulp_encap_buffer_copy(buff, 1076 tmp_buff, 1077 BNXT_ULP_ENCAP_IPV4_ID_PROTO); 1078 } 1079 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + 1080 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS + 1081 BNXT_ULP_ENCAP_IPV4_ID_PROTO]; 1082 ulp_encap_buffer_copy(buff, 1083 (const uint8_t *)&ipv4_spec->hdr.dst_addr, 1084 BNXT_ULP_ENCAP_IPV4_DEST_IP); 1085 1086 /* Update the ip size details */ 1087 ip_size = tfp_cpu_to_be_32(ip_size); 1088 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 1089 &ip_size, sizeof(uint32_t)); 1090 1091 /* update the ip type */ 1092 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4); 1093 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 1094 &ip_type, sizeof(uint32_t)); 1095 1096 if (!ulp_rte_item_skip_void(&item, 1)) 1097 return BNXT_TF_RC_ERROR; 1098 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { 1099 ipv6_spec = item->spec; 1100 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE; 1101 1102 /* copy the ipv4 details */ 1103 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP], 1104 ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE); 1105 1106 /* Update the ip size details */ 1107 ip_size = tfp_cpu_to_be_32(ip_size); 1108 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], 1109 &ip_size, sizeof(uint32_t)); 1110 1111 /* update the ip type */ 1112 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6); 1113 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], 1114 &ip_type, sizeof(uint32_t)); 1115 1116 if (!ulp_rte_item_skip_void(&item, 1)) 1117 return BNXT_TF_RC_ERROR; 1118 } else { 1119 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n"); 1120 return BNXT_TF_RC_ERROR; 1121 } 1122 1123 /* L4 is UDP */ 1124 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) { 1125 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n"); 1126 return BNXT_TF_RC_ERROR; 1127 } 1128 /* copy the udp details */ 1129 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP], 1130 item->spec, BNXT_ULP_ENCAP_UDP_SIZE); 1131 1132 if (!ulp_rte_item_skip_void(&item, 1)) 1133 return BNXT_TF_RC_ERROR; 1134 1135 /* Finally VXLAN */ 1136 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { 1137 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n"); 1138 return BNXT_TF_RC_ERROR; 1139 } 1140 vxlan_size = sizeof(struct rte_flow_item_vxlan); 1141 /* copy the vxlan details */ 1142 memcpy(&vxlan_spec, item->spec, vxlan_size); 1143 vxlan_spec.flags = 0x08; 1144 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN], 1145 (const uint8_t *)&vxlan_spec, 1146 vxlan_size); 1147 vxlan_size = tfp_cpu_to_be_32(vxlan_size); 1148 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ], 1149 &vxlan_size, sizeof(uint32_t)); 1150 1151 /*update the hdr_bitmap with vxlan */ 1152 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP); 1153 return BNXT_TF_RC_SUCCESS; 1154 } 1155 1156 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */ 1157 int32_t 1158 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item 1159 __rte_unused, 1160 struct ulp_rte_parser_params *params) 1161 { 1162 /* update the hdr_bitmap with vxlan */ 1163 ULP_BITMAP_SET(params->act_bitmap.bits, 1164 BNXT_ULP_ACTION_BIT_VXLAN_DECAP); 1165 return BNXT_TF_RC_SUCCESS; 1166 } 1167 1168 /* Function to handle the parsing of RTE Flow action drop Header. */ 1169 int32_t 1170 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused, 1171 struct ulp_rte_parser_params *params) 1172 { 1173 /* Update the hdr_bitmap with drop */ 1174 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP); 1175 return BNXT_TF_RC_SUCCESS; 1176 } 1177 1178 /* Function to handle the parsing of RTE Flow action count. */ 1179 int32_t 1180 ulp_rte_count_act_handler(const struct rte_flow_action *action_item, 1181 struct ulp_rte_parser_params *params) 1182 1183 { 1184 const struct rte_flow_action_count *act_count; 1185 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop; 1186 1187 act_count = action_item->conf; 1188 if (act_count) { 1189 if (act_count->shared) { 1190 BNXT_TF_DBG(ERR, 1191 "Parse Error:Shared count not supported\n"); 1192 return BNXT_TF_RC_PARSE_ERR; 1193 } 1194 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT], 1195 &act_count->id, 1196 BNXT_ULP_ACT_PROP_SZ_COUNT); 1197 } 1198 1199 /* Update the hdr_bitmap with count */ 1200 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT); 1201 return BNXT_TF_RC_SUCCESS; 1202 } 1203 1204 /* Function to handle the parsing of RTE Flow action PF. */ 1205 int32_t 1206 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused, 1207 struct ulp_rte_parser_params *params) 1208 { 1209 uint32_t svif; 1210 1211 /* Update the hdr_bitmap with vnic bit */ 1212 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC); 1213 1214 /* copy the PF of the current device into VNIC Property */ 1215 svif = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF); 1216 svif = bnxt_get_vnic_id(svif); 1217 svif = rte_cpu_to_be_32(svif); 1218 memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], 1219 &svif, BNXT_ULP_ACT_PROP_SZ_VNIC); 1220 1221 return BNXT_TF_RC_SUCCESS; 1222 } 1223 1224 /* Function to handle the parsing of RTE Flow action VF. */ 1225 int32_t 1226 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, 1227 struct ulp_rte_parser_params *param) 1228 { 1229 const struct rte_flow_action_vf *vf_action; 1230 uint32_t pid; 1231 1232 vf_action = action_item->conf; 1233 if (vf_action) { 1234 if (vf_action->original) { 1235 BNXT_TF_DBG(ERR, 1236 "Parse Error:VF Original not supported\n"); 1237 return BNXT_TF_RC_PARSE_ERR; 1238 } 1239 /* TBD: Update the computed VNIC using VF conversion */ 1240 pid = bnxt_get_vnic_id(vf_action->id); 1241 pid = rte_cpu_to_be_32(pid); 1242 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], 1243 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC); 1244 } 1245 1246 /* Update the hdr_bitmap with count */ 1247 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC); 1248 return BNXT_TF_RC_SUCCESS; 1249 } 1250 1251 /* Function to handle the parsing of RTE Flow action port_id. */ 1252 int32_t 1253 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item, 1254 struct ulp_rte_parser_params *param) 1255 { 1256 const struct rte_flow_action_port_id *port_id; 1257 uint32_t pid; 1258 1259 port_id = act_item->conf; 1260 if (port_id) { 1261 if (port_id->original) { 1262 BNXT_TF_DBG(ERR, 1263 "ParseErr:Portid Original not supported\n"); 1264 return BNXT_TF_RC_PARSE_ERR; 1265 } 1266 /* TBD: Update the computed VNIC using port conversion */ 1267 pid = bnxt_get_vnic_id(port_id->id); 1268 pid = rte_cpu_to_be_32(pid); 1269 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], 1270 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC); 1271 } 1272 1273 /* Update the hdr_bitmap with count */ 1274 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC); 1275 return BNXT_TF_RC_SUCCESS; 1276 } 1277 1278 /* Function to handle the parsing of RTE Flow action phy_port. */ 1279 int32_t 1280 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item, 1281 struct ulp_rte_parser_params *prm) 1282 { 1283 const struct rte_flow_action_phy_port *phy_port; 1284 uint32_t pid; 1285 1286 phy_port = action_item->conf; 1287 if (phy_port) { 1288 if (phy_port->original) { 1289 BNXT_TF_DBG(ERR, 1290 "Parse Err:Port Original not supported\n"); 1291 return BNXT_TF_RC_PARSE_ERR; 1292 } 1293 pid = bnxt_get_vnic_id(phy_port->index); 1294 pid = rte_cpu_to_be_32(pid); 1295 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], 1296 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); 1297 } 1298 1299 /* Update the hdr_bitmap with count */ 1300 ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT); 1301 return BNXT_TF_RC_SUCCESS; 1302 } 1303