1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2021 Xilinx, Inc. 4 * Copyright(c) 2017-2019 Solarflare Communications Inc. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #include <rte_byteorder.h> 11 #include <rte_tailq.h> 12 #include <rte_common.h> 13 #include <ethdev_driver.h> 14 #include <rte_ether.h> 15 #include <rte_flow.h> 16 #include <rte_flow_driver.h> 17 18 #include "efx.h" 19 20 #include "sfc.h" 21 #include "sfc_debug.h" 22 #include "sfc_rx.h" 23 #include "sfc_filter.h" 24 #include "sfc_flow.h" 25 #include "sfc_log.h" 26 #include "sfc_dp_rx.h" 27 #include "sfc_mae_counter.h" 28 29 struct sfc_flow_ops_by_spec { 30 sfc_flow_parse_cb_t *parse; 31 sfc_flow_verify_cb_t *verify; 32 sfc_flow_cleanup_cb_t *cleanup; 33 sfc_flow_insert_cb_t *insert; 34 sfc_flow_remove_cb_t *remove; 35 sfc_flow_query_cb_t *query; 36 }; 37 38 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter; 39 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae; 40 static sfc_flow_insert_cb_t sfc_flow_filter_insert; 41 static sfc_flow_remove_cb_t sfc_flow_filter_remove; 42 43 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = { 44 .parse = sfc_flow_parse_rte_to_filter, 45 .verify = NULL, 46 .cleanup = NULL, 47 .insert = sfc_flow_filter_insert, 48 .remove = sfc_flow_filter_remove, 49 .query = NULL, 50 }; 51 52 static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = { 53 .parse = sfc_flow_parse_rte_to_mae, 54 .verify = sfc_mae_flow_verify, 55 .cleanup = sfc_mae_flow_cleanup, 56 .insert = sfc_mae_flow_insert, 57 .remove = sfc_mae_flow_remove, 58 .query = sfc_mae_flow_query, 59 }; 60 61 static const struct sfc_flow_ops_by_spec * 62 sfc_flow_get_ops_by_spec(struct rte_flow *flow) 63 { 64 struct sfc_flow_spec *spec = &flow->spec; 65 const struct sfc_flow_ops_by_spec *ops = NULL; 66 67 switch (spec->type) { 68 case SFC_FLOW_SPEC_FILTER: 69 ops = &sfc_flow_ops_filter; 70 break; 71 case SFC_FLOW_SPEC_MAE: 72 ops = &sfc_flow_ops_mae; 73 break; 74 default: 75 SFC_ASSERT(false); 76 break; 77 } 78 79 return ops; 80 } 81 82 /* 83 * Currently, filter-based (VNIC) flow API is implemented in such a manner 84 * that each flow rule is converted to one or more hardware filters. 85 * All elements of flow rule (attributes, pattern items, actions) 86 * correspond to one or more fields in the efx_filter_spec_s structure 87 * that is responsible for the hardware filter. 88 * If some required field is unset in the flow rule, then a handful 89 * of filter copies will be created to cover all possible values 90 * of such a field. 91 */ 92 93 static sfc_flow_item_parse sfc_flow_parse_void; 94 static sfc_flow_item_parse sfc_flow_parse_eth; 95 static sfc_flow_item_parse sfc_flow_parse_vlan; 96 static sfc_flow_item_parse sfc_flow_parse_ipv4; 97 static sfc_flow_item_parse sfc_flow_parse_ipv6; 98 static sfc_flow_item_parse sfc_flow_parse_tcp; 99 static sfc_flow_item_parse sfc_flow_parse_udp; 100 static sfc_flow_item_parse sfc_flow_parse_vxlan; 101 static sfc_flow_item_parse sfc_flow_parse_geneve; 102 static sfc_flow_item_parse sfc_flow_parse_nvgre; 103 static sfc_flow_item_parse sfc_flow_parse_pppoex; 104 105 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec, 106 unsigned int filters_count_for_one_val, 107 struct rte_flow_error *error); 108 109 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match, 110 efx_filter_spec_t *spec, 111 struct sfc_filter *filter); 112 113 struct sfc_flow_copy_flag { 114 /* EFX filter specification match flag */ 115 efx_filter_match_flags_t flag; 116 /* Number of values of corresponding field */ 117 unsigned int vals_count; 118 /* Function to set values in specifications */ 119 sfc_flow_spec_set_vals *set_vals; 120 /* 121 * Function to check that the specification is suitable 122 * for adding this match flag 123 */ 124 sfc_flow_spec_check *spec_check; 125 }; 126 127 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags; 128 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags; 129 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes; 130 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags; 131 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags; 132 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag; 133 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag; 134 135 static boolean_t 136 sfc_flow_is_zero(const uint8_t *buf, unsigned int size) 137 { 138 uint8_t sum = 0; 139 unsigned int i; 140 141 for (i = 0; i < size; i++) 142 sum |= buf[i]; 143 144 return (sum == 0) ? B_TRUE : B_FALSE; 145 } 146 147 /* 148 * Validate item and prepare structures spec and mask for parsing 149 */ 150 int 151 sfc_flow_parse_init(const struct rte_flow_item *item, 152 const void **spec_ptr, 153 const void **mask_ptr, 154 const void *supp_mask, 155 const void *def_mask, 156 unsigned int size, 157 struct rte_flow_error *error) 158 { 159 const uint8_t *spec; 160 const uint8_t *mask; 161 const uint8_t *last; 162 uint8_t supp; 163 unsigned int i; 164 165 if (item == NULL) { 166 rte_flow_error_set(error, EINVAL, 167 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 168 "NULL item"); 169 return -rte_errno; 170 } 171 172 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) { 173 rte_flow_error_set(error, EINVAL, 174 RTE_FLOW_ERROR_TYPE_ITEM, item, 175 "Mask or last is set without spec"); 176 return -rte_errno; 177 } 178 179 /* 180 * If "mask" is not set, default mask is used, 181 * but if default mask is NULL, "mask" should be set 182 */ 183 if (item->mask == NULL) { 184 if (def_mask == NULL) { 185 rte_flow_error_set(error, EINVAL, 186 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 187 "Mask should be specified"); 188 return -rte_errno; 189 } 190 191 mask = def_mask; 192 } else { 193 mask = item->mask; 194 } 195 196 spec = item->spec; 197 last = item->last; 198 199 if (spec == NULL) 200 goto exit; 201 202 /* 203 * If field values in "last" are either 0 or equal to the corresponding 204 * values in "spec" then they are ignored 205 */ 206 if (last != NULL && 207 !sfc_flow_is_zero(last, size) && 208 memcmp(last, spec, size) != 0) { 209 rte_flow_error_set(error, ENOTSUP, 210 RTE_FLOW_ERROR_TYPE_ITEM, item, 211 "Ranging is not supported"); 212 return -rte_errno; 213 } 214 215 if (supp_mask == NULL) { 216 rte_flow_error_set(error, EINVAL, 217 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 218 "Supported mask for item should be specified"); 219 return -rte_errno; 220 } 221 222 /* Check that mask does not ask for more match than supp_mask */ 223 for (i = 0; i < size; i++) { 224 supp = ((const uint8_t *)supp_mask)[i]; 225 226 if (~supp & mask[i]) { 227 rte_flow_error_set(error, ENOTSUP, 228 RTE_FLOW_ERROR_TYPE_ITEM, item, 229 "Item's field is not supported"); 230 return -rte_errno; 231 } 232 } 233 234 exit: 235 *spec_ptr = spec; 236 *mask_ptr = mask; 237 return 0; 238 } 239 240 /* 241 * Protocol parsers. 242 * Masking is not supported, so masks in items should be either 243 * full or empty (zeroed) and set only for supported fields which 244 * are specified in the supp_mask. 245 */ 246 247 static int 248 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item, 249 __rte_unused struct sfc_flow_parse_ctx *parse_ctx, 250 __rte_unused struct rte_flow_error *error) 251 { 252 return 0; 253 } 254 255 /** 256 * Convert Ethernet item to EFX filter specification. 257 * 258 * @param item[in] 259 * Item specification. Outer frame specification may only comprise 260 * source/destination addresses and Ethertype field. 261 * Inner frame specification may contain destination address only. 262 * There is support for individual/group mask as well as for empty and full. 263 * If the mask is NULL, default mask will be used. Ranging is not supported. 264 * @param efx_spec[in, out] 265 * EFX filter specification to update. 266 * @param[out] error 267 * Perform verbose error reporting if not NULL. 268 */ 269 static int 270 sfc_flow_parse_eth(const struct rte_flow_item *item, 271 struct sfc_flow_parse_ctx *parse_ctx, 272 struct rte_flow_error *error) 273 { 274 int rc; 275 efx_filter_spec_t *efx_spec = parse_ctx->filter; 276 const struct rte_flow_item_eth *spec = NULL; 277 const struct rte_flow_item_eth *mask = NULL; 278 const struct rte_flow_item_eth supp_mask = { 279 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 280 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 281 .type = 0xffff, 282 }; 283 const struct rte_flow_item_eth ifrm_supp_mask = { 284 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 285 }; 286 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = { 287 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 288 }; 289 const struct rte_flow_item_eth *supp_mask_p; 290 const struct rte_flow_item_eth *def_mask_p; 291 uint8_t *loc_mac = NULL; 292 boolean_t is_ifrm = (efx_spec->efs_encap_type != 293 EFX_TUNNEL_PROTOCOL_NONE); 294 295 if (is_ifrm) { 296 supp_mask_p = &ifrm_supp_mask; 297 def_mask_p = &ifrm_supp_mask; 298 loc_mac = efx_spec->efs_ifrm_loc_mac; 299 } else { 300 supp_mask_p = &supp_mask; 301 def_mask_p = &rte_flow_item_eth_mask; 302 loc_mac = efx_spec->efs_loc_mac; 303 } 304 305 rc = sfc_flow_parse_init(item, 306 (const void **)&spec, 307 (const void **)&mask, 308 supp_mask_p, def_mask_p, 309 sizeof(struct rte_flow_item_eth), 310 error); 311 if (rc != 0) 312 return rc; 313 314 /* If "spec" is not set, could be any Ethernet */ 315 if (spec == NULL) 316 return 0; 317 318 if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) { 319 efx_spec->efs_match_flags |= is_ifrm ? 320 EFX_FILTER_MATCH_IFRM_LOC_MAC : 321 EFX_FILTER_MATCH_LOC_MAC; 322 rte_memcpy(loc_mac, spec->dst.addr_bytes, 323 EFX_MAC_ADDR_LEN); 324 } else if (memcmp(mask->dst.addr_bytes, ig_mask, 325 EFX_MAC_ADDR_LEN) == 0) { 326 if (rte_is_unicast_ether_addr(&spec->dst)) 327 efx_spec->efs_match_flags |= is_ifrm ? 328 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST : 329 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST; 330 else 331 efx_spec->efs_match_flags |= is_ifrm ? 332 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST : 333 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; 334 } else if (!rte_is_zero_ether_addr(&mask->dst)) { 335 goto fail_bad_mask; 336 } 337 338 /* 339 * ifrm_supp_mask ensures that the source address and 340 * ethertype masks are equal to zero in inner frame, 341 * so these fields are filled in only for the outer frame 342 */ 343 if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) { 344 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC; 345 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes, 346 EFX_MAC_ADDR_LEN); 347 } else if (!rte_is_zero_ether_addr(&mask->src)) { 348 goto fail_bad_mask; 349 } 350 351 /* 352 * Ether type is in big-endian byte order in item and 353 * in little-endian in efx_spec, so byte swap is used 354 */ 355 if (mask->type == supp_mask.type) { 356 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 357 efx_spec->efs_ether_type = rte_bswap16(spec->type); 358 } else if (mask->type != 0) { 359 goto fail_bad_mask; 360 } 361 362 return 0; 363 364 fail_bad_mask: 365 rte_flow_error_set(error, EINVAL, 366 RTE_FLOW_ERROR_TYPE_ITEM, item, 367 "Bad mask in the ETH pattern item"); 368 return -rte_errno; 369 } 370 371 /** 372 * Convert VLAN item to EFX filter specification. 373 * 374 * @param item[in] 375 * Item specification. Only VID field is supported. 376 * The mask can not be NULL. Ranging is not supported. 377 * @param efx_spec[in, out] 378 * EFX filter specification to update. 379 * @param[out] error 380 * Perform verbose error reporting if not NULL. 381 */ 382 static int 383 sfc_flow_parse_vlan(const struct rte_flow_item *item, 384 struct sfc_flow_parse_ctx *parse_ctx, 385 struct rte_flow_error *error) 386 { 387 int rc; 388 uint16_t vid; 389 efx_filter_spec_t *efx_spec = parse_ctx->filter; 390 const struct rte_flow_item_vlan *spec = NULL; 391 const struct rte_flow_item_vlan *mask = NULL; 392 const struct rte_flow_item_vlan supp_mask = { 393 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX), 394 .inner_type = RTE_BE16(0xffff), 395 }; 396 397 rc = sfc_flow_parse_init(item, 398 (const void **)&spec, 399 (const void **)&mask, 400 &supp_mask, 401 NULL, 402 sizeof(struct rte_flow_item_vlan), 403 error); 404 if (rc != 0) 405 return rc; 406 407 /* 408 * VID is in big-endian byte order in item and 409 * in little-endian in efx_spec, so byte swap is used. 410 * If two VLAN items are included, the first matches 411 * the outer tag and the next matches the inner tag. 412 */ 413 if (mask->tci == supp_mask.tci) { 414 /* Apply mask to keep VID only */ 415 vid = rte_bswap16(spec->tci & mask->tci); 416 417 if (!(efx_spec->efs_match_flags & 418 EFX_FILTER_MATCH_OUTER_VID)) { 419 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID; 420 efx_spec->efs_outer_vid = vid; 421 } else if (!(efx_spec->efs_match_flags & 422 EFX_FILTER_MATCH_INNER_VID)) { 423 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID; 424 efx_spec->efs_inner_vid = vid; 425 } else { 426 rte_flow_error_set(error, EINVAL, 427 RTE_FLOW_ERROR_TYPE_ITEM, item, 428 "More than two VLAN items"); 429 return -rte_errno; 430 } 431 } else { 432 rte_flow_error_set(error, EINVAL, 433 RTE_FLOW_ERROR_TYPE_ITEM, item, 434 "VLAN ID in TCI match is required"); 435 return -rte_errno; 436 } 437 438 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { 439 rte_flow_error_set(error, EINVAL, 440 RTE_FLOW_ERROR_TYPE_ITEM, item, 441 "VLAN TPID matching is not supported"); 442 return -rte_errno; 443 } 444 if (mask->inner_type == supp_mask.inner_type) { 445 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 446 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type); 447 } else if (mask->inner_type) { 448 rte_flow_error_set(error, EINVAL, 449 RTE_FLOW_ERROR_TYPE_ITEM, item, 450 "Bad mask for VLAN inner_type"); 451 return -rte_errno; 452 } 453 454 return 0; 455 } 456 457 /** 458 * Convert IPv4 item to EFX filter specification. 459 * 460 * @param item[in] 461 * Item specification. Only source and destination addresses and 462 * protocol fields are supported. If the mask is NULL, default 463 * mask will be used. Ranging is not supported. 464 * @param efx_spec[in, out] 465 * EFX filter specification to update. 466 * @param[out] error 467 * Perform verbose error reporting if not NULL. 468 */ 469 static int 470 sfc_flow_parse_ipv4(const struct rte_flow_item *item, 471 struct sfc_flow_parse_ctx *parse_ctx, 472 struct rte_flow_error *error) 473 { 474 int rc; 475 efx_filter_spec_t *efx_spec = parse_ctx->filter; 476 const struct rte_flow_item_ipv4 *spec = NULL; 477 const struct rte_flow_item_ipv4 *mask = NULL; 478 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4); 479 const struct rte_flow_item_ipv4 supp_mask = { 480 .hdr = { 481 .src_addr = 0xffffffff, 482 .dst_addr = 0xffffffff, 483 .next_proto_id = 0xff, 484 } 485 }; 486 487 rc = sfc_flow_parse_init(item, 488 (const void **)&spec, 489 (const void **)&mask, 490 &supp_mask, 491 &rte_flow_item_ipv4_mask, 492 sizeof(struct rte_flow_item_ipv4), 493 error); 494 if (rc != 0) 495 return rc; 496 497 /* 498 * Filtering by IPv4 source and destination addresses requires 499 * the appropriate ETHER_TYPE in hardware filters 500 */ 501 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) { 502 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 503 efx_spec->efs_ether_type = ether_type_ipv4; 504 } else if (efx_spec->efs_ether_type != ether_type_ipv4) { 505 rte_flow_error_set(error, EINVAL, 506 RTE_FLOW_ERROR_TYPE_ITEM, item, 507 "Ethertype in pattern with IPV4 item should be appropriate"); 508 return -rte_errno; 509 } 510 511 if (spec == NULL) 512 return 0; 513 514 /* 515 * IPv4 addresses are in big-endian byte order in item and in 516 * efx_spec 517 */ 518 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) { 519 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST; 520 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr; 521 } else if (mask->hdr.src_addr != 0) { 522 goto fail_bad_mask; 523 } 524 525 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) { 526 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST; 527 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr; 528 } else if (mask->hdr.dst_addr != 0) { 529 goto fail_bad_mask; 530 } 531 532 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) { 533 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 534 efx_spec->efs_ip_proto = spec->hdr.next_proto_id; 535 } else if (mask->hdr.next_proto_id != 0) { 536 goto fail_bad_mask; 537 } 538 539 return 0; 540 541 fail_bad_mask: 542 rte_flow_error_set(error, EINVAL, 543 RTE_FLOW_ERROR_TYPE_ITEM, item, 544 "Bad mask in the IPV4 pattern item"); 545 return -rte_errno; 546 } 547 548 /** 549 * Convert IPv6 item to EFX filter specification. 550 * 551 * @param item[in] 552 * Item specification. Only source and destination addresses and 553 * next header fields are supported. If the mask is NULL, default 554 * mask will be used. Ranging is not supported. 555 * @param efx_spec[in, out] 556 * EFX filter specification to update. 557 * @param[out] error 558 * Perform verbose error reporting if not NULL. 559 */ 560 static int 561 sfc_flow_parse_ipv6(const struct rte_flow_item *item, 562 struct sfc_flow_parse_ctx *parse_ctx, 563 struct rte_flow_error *error) 564 { 565 int rc; 566 efx_filter_spec_t *efx_spec = parse_ctx->filter; 567 const struct rte_flow_item_ipv6 *spec = NULL; 568 const struct rte_flow_item_ipv6 *mask = NULL; 569 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6); 570 const struct rte_flow_item_ipv6 supp_mask = { 571 .hdr = { 572 .src_addr = { 0xff, 0xff, 0xff, 0xff, 573 0xff, 0xff, 0xff, 0xff, 574 0xff, 0xff, 0xff, 0xff, 575 0xff, 0xff, 0xff, 0xff }, 576 .dst_addr = { 0xff, 0xff, 0xff, 0xff, 577 0xff, 0xff, 0xff, 0xff, 578 0xff, 0xff, 0xff, 0xff, 579 0xff, 0xff, 0xff, 0xff }, 580 .proto = 0xff, 581 } 582 }; 583 584 rc = sfc_flow_parse_init(item, 585 (const void **)&spec, 586 (const void **)&mask, 587 &supp_mask, 588 &rte_flow_item_ipv6_mask, 589 sizeof(struct rte_flow_item_ipv6), 590 error); 591 if (rc != 0) 592 return rc; 593 594 /* 595 * Filtering by IPv6 source and destination addresses requires 596 * the appropriate ETHER_TYPE in hardware filters 597 */ 598 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) { 599 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 600 efx_spec->efs_ether_type = ether_type_ipv6; 601 } else if (efx_spec->efs_ether_type != ether_type_ipv6) { 602 rte_flow_error_set(error, EINVAL, 603 RTE_FLOW_ERROR_TYPE_ITEM, item, 604 "Ethertype in pattern with IPV6 item should be appropriate"); 605 return -rte_errno; 606 } 607 608 if (spec == NULL) 609 return 0; 610 611 /* 612 * IPv6 addresses are in big-endian byte order in item and in 613 * efx_spec 614 */ 615 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr, 616 sizeof(mask->hdr.src_addr)) == 0) { 617 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST; 618 619 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) != 620 sizeof(spec->hdr.src_addr)); 621 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr, 622 sizeof(efx_spec->efs_rem_host)); 623 } else if (!sfc_flow_is_zero(mask->hdr.src_addr, 624 sizeof(mask->hdr.src_addr))) { 625 goto fail_bad_mask; 626 } 627 628 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr, 629 sizeof(mask->hdr.dst_addr)) == 0) { 630 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST; 631 632 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) != 633 sizeof(spec->hdr.dst_addr)); 634 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr, 635 sizeof(efx_spec->efs_loc_host)); 636 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr, 637 sizeof(mask->hdr.dst_addr))) { 638 goto fail_bad_mask; 639 } 640 641 if (mask->hdr.proto == supp_mask.hdr.proto) { 642 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 643 efx_spec->efs_ip_proto = spec->hdr.proto; 644 } else if (mask->hdr.proto != 0) { 645 goto fail_bad_mask; 646 } 647 648 return 0; 649 650 fail_bad_mask: 651 rte_flow_error_set(error, EINVAL, 652 RTE_FLOW_ERROR_TYPE_ITEM, item, 653 "Bad mask in the IPV6 pattern item"); 654 return -rte_errno; 655 } 656 657 /** 658 * Convert TCP item to EFX filter specification. 659 * 660 * @param item[in] 661 * Item specification. Only source and destination ports fields 662 * are supported. If the mask is NULL, default mask will be used. 663 * Ranging is not supported. 664 * @param efx_spec[in, out] 665 * EFX filter specification to update. 666 * @param[out] error 667 * Perform verbose error reporting if not NULL. 668 */ 669 static int 670 sfc_flow_parse_tcp(const struct rte_flow_item *item, 671 struct sfc_flow_parse_ctx *parse_ctx, 672 struct rte_flow_error *error) 673 { 674 int rc; 675 efx_filter_spec_t *efx_spec = parse_ctx->filter; 676 const struct rte_flow_item_tcp *spec = NULL; 677 const struct rte_flow_item_tcp *mask = NULL; 678 const struct rte_flow_item_tcp supp_mask = { 679 .hdr = { 680 .src_port = 0xffff, 681 .dst_port = 0xffff, 682 } 683 }; 684 685 rc = sfc_flow_parse_init(item, 686 (const void **)&spec, 687 (const void **)&mask, 688 &supp_mask, 689 &rte_flow_item_tcp_mask, 690 sizeof(struct rte_flow_item_tcp), 691 error); 692 if (rc != 0) 693 return rc; 694 695 /* 696 * Filtering by TCP source and destination ports requires 697 * the appropriate IP_PROTO in hardware filters 698 */ 699 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 700 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 701 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP; 702 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) { 703 rte_flow_error_set(error, EINVAL, 704 RTE_FLOW_ERROR_TYPE_ITEM, item, 705 "IP proto in pattern with TCP item should be appropriate"); 706 return -rte_errno; 707 } 708 709 if (spec == NULL) 710 return 0; 711 712 /* 713 * Source and destination ports are in big-endian byte order in item and 714 * in little-endian in efx_spec, so byte swap is used 715 */ 716 if (mask->hdr.src_port == supp_mask.hdr.src_port) { 717 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT; 718 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port); 719 } else if (mask->hdr.src_port != 0) { 720 goto fail_bad_mask; 721 } 722 723 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) { 724 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT; 725 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port); 726 } else if (mask->hdr.dst_port != 0) { 727 goto fail_bad_mask; 728 } 729 730 return 0; 731 732 fail_bad_mask: 733 rte_flow_error_set(error, EINVAL, 734 RTE_FLOW_ERROR_TYPE_ITEM, item, 735 "Bad mask in the TCP pattern item"); 736 return -rte_errno; 737 } 738 739 /** 740 * Convert UDP item to EFX filter specification. 741 * 742 * @param item[in] 743 * Item specification. Only source and destination ports fields 744 * are supported. If the mask is NULL, default mask will be used. 745 * Ranging is not supported. 746 * @param efx_spec[in, out] 747 * EFX filter specification to update. 748 * @param[out] error 749 * Perform verbose error reporting if not NULL. 750 */ 751 static int 752 sfc_flow_parse_udp(const struct rte_flow_item *item, 753 struct sfc_flow_parse_ctx *parse_ctx, 754 struct rte_flow_error *error) 755 { 756 int rc; 757 efx_filter_spec_t *efx_spec = parse_ctx->filter; 758 const struct rte_flow_item_udp *spec = NULL; 759 const struct rte_flow_item_udp *mask = NULL; 760 const struct rte_flow_item_udp supp_mask = { 761 .hdr = { 762 .src_port = 0xffff, 763 .dst_port = 0xffff, 764 } 765 }; 766 767 rc = sfc_flow_parse_init(item, 768 (const void **)&spec, 769 (const void **)&mask, 770 &supp_mask, 771 &rte_flow_item_udp_mask, 772 sizeof(struct rte_flow_item_udp), 773 error); 774 if (rc != 0) 775 return rc; 776 777 /* 778 * Filtering by UDP source and destination ports requires 779 * the appropriate IP_PROTO in hardware filters 780 */ 781 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 782 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 783 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP; 784 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) { 785 rte_flow_error_set(error, EINVAL, 786 RTE_FLOW_ERROR_TYPE_ITEM, item, 787 "IP proto in pattern with UDP item should be appropriate"); 788 return -rte_errno; 789 } 790 791 if (spec == NULL) 792 return 0; 793 794 /* 795 * Source and destination ports are in big-endian byte order in item and 796 * in little-endian in efx_spec, so byte swap is used 797 */ 798 if (mask->hdr.src_port == supp_mask.hdr.src_port) { 799 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT; 800 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port); 801 } else if (mask->hdr.src_port != 0) { 802 goto fail_bad_mask; 803 } 804 805 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) { 806 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT; 807 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port); 808 } else if (mask->hdr.dst_port != 0) { 809 goto fail_bad_mask; 810 } 811 812 return 0; 813 814 fail_bad_mask: 815 rte_flow_error_set(error, EINVAL, 816 RTE_FLOW_ERROR_TYPE_ITEM, item, 817 "Bad mask in the UDP pattern item"); 818 return -rte_errno; 819 } 820 821 /* 822 * Filters for encapsulated packets match based on the EtherType and IP 823 * protocol in the outer frame. 824 */ 825 static int 826 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item, 827 efx_filter_spec_t *efx_spec, 828 uint8_t ip_proto, 829 struct rte_flow_error *error) 830 { 831 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 832 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 833 efx_spec->efs_ip_proto = ip_proto; 834 } else if (efx_spec->efs_ip_proto != ip_proto) { 835 switch (ip_proto) { 836 case EFX_IPPROTO_UDP: 837 rte_flow_error_set(error, EINVAL, 838 RTE_FLOW_ERROR_TYPE_ITEM, item, 839 "Outer IP header protocol must be UDP " 840 "in VxLAN/GENEVE pattern"); 841 return -rte_errno; 842 843 case EFX_IPPROTO_GRE: 844 rte_flow_error_set(error, EINVAL, 845 RTE_FLOW_ERROR_TYPE_ITEM, item, 846 "Outer IP header protocol must be GRE " 847 "in NVGRE pattern"); 848 return -rte_errno; 849 850 default: 851 rte_flow_error_set(error, EINVAL, 852 RTE_FLOW_ERROR_TYPE_ITEM, item, 853 "Only VxLAN/GENEVE/NVGRE tunneling patterns " 854 "are supported"); 855 return -rte_errno; 856 } 857 } 858 859 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE && 860 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 && 861 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) { 862 rte_flow_error_set(error, EINVAL, 863 RTE_FLOW_ERROR_TYPE_ITEM, item, 864 "Outer frame EtherType in pattern with tunneling " 865 "must be IPv4 or IPv6"); 866 return -rte_errno; 867 } 868 869 return 0; 870 } 871 872 static int 873 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec, 874 const uint8_t *vni_or_vsid_val, 875 const uint8_t *vni_or_vsid_mask, 876 const struct rte_flow_item *item, 877 struct rte_flow_error *error) 878 { 879 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = { 880 0xff, 0xff, 0xff 881 }; 882 883 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask, 884 EFX_VNI_OR_VSID_LEN) == 0) { 885 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID; 886 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val, 887 EFX_VNI_OR_VSID_LEN); 888 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) { 889 rte_flow_error_set(error, EINVAL, 890 RTE_FLOW_ERROR_TYPE_ITEM, item, 891 "Unsupported VNI/VSID mask"); 892 return -rte_errno; 893 } 894 895 return 0; 896 } 897 898 /** 899 * Convert VXLAN item to EFX filter specification. 900 * 901 * @param item[in] 902 * Item specification. Only VXLAN network identifier field is supported. 903 * If the mask is NULL, default mask will be used. 904 * Ranging is not supported. 905 * @param efx_spec[in, out] 906 * EFX filter specification to update. 907 * @param[out] error 908 * Perform verbose error reporting if not NULL. 909 */ 910 static int 911 sfc_flow_parse_vxlan(const struct rte_flow_item *item, 912 struct sfc_flow_parse_ctx *parse_ctx, 913 struct rte_flow_error *error) 914 { 915 int rc; 916 efx_filter_spec_t *efx_spec = parse_ctx->filter; 917 const struct rte_flow_item_vxlan *spec = NULL; 918 const struct rte_flow_item_vxlan *mask = NULL; 919 const struct rte_flow_item_vxlan supp_mask = { 920 .vni = { 0xff, 0xff, 0xff } 921 }; 922 923 rc = sfc_flow_parse_init(item, 924 (const void **)&spec, 925 (const void **)&mask, 926 &supp_mask, 927 &rte_flow_item_vxlan_mask, 928 sizeof(struct rte_flow_item_vxlan), 929 error); 930 if (rc != 0) 931 return rc; 932 933 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 934 EFX_IPPROTO_UDP, error); 935 if (rc != 0) 936 return rc; 937 938 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN; 939 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 940 941 if (spec == NULL) 942 return 0; 943 944 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, 945 mask->vni, item, error); 946 947 return rc; 948 } 949 950 /** 951 * Convert GENEVE item to EFX filter specification. 952 * 953 * @param item[in] 954 * Item specification. Only Virtual Network Identifier and protocol type 955 * fields are supported. But protocol type can be only Ethernet (0x6558). 956 * If the mask is NULL, default mask will be used. 957 * Ranging is not supported. 958 * @param efx_spec[in, out] 959 * EFX filter specification to update. 960 * @param[out] error 961 * Perform verbose error reporting if not NULL. 962 */ 963 static int 964 sfc_flow_parse_geneve(const struct rte_flow_item *item, 965 struct sfc_flow_parse_ctx *parse_ctx, 966 struct rte_flow_error *error) 967 { 968 int rc; 969 efx_filter_spec_t *efx_spec = parse_ctx->filter; 970 const struct rte_flow_item_geneve *spec = NULL; 971 const struct rte_flow_item_geneve *mask = NULL; 972 const struct rte_flow_item_geneve supp_mask = { 973 .protocol = RTE_BE16(0xffff), 974 .vni = { 0xff, 0xff, 0xff } 975 }; 976 977 rc = sfc_flow_parse_init(item, 978 (const void **)&spec, 979 (const void **)&mask, 980 &supp_mask, 981 &rte_flow_item_geneve_mask, 982 sizeof(struct rte_flow_item_geneve), 983 error); 984 if (rc != 0) 985 return rc; 986 987 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 988 EFX_IPPROTO_UDP, error); 989 if (rc != 0) 990 return rc; 991 992 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE; 993 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 994 995 if (spec == NULL) 996 return 0; 997 998 if (mask->protocol == supp_mask.protocol) { 999 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) { 1000 rte_flow_error_set(error, EINVAL, 1001 RTE_FLOW_ERROR_TYPE_ITEM, item, 1002 "GENEVE encap. protocol must be Ethernet " 1003 "(0x6558) in the GENEVE pattern item"); 1004 return -rte_errno; 1005 } 1006 } else if (mask->protocol != 0) { 1007 rte_flow_error_set(error, EINVAL, 1008 RTE_FLOW_ERROR_TYPE_ITEM, item, 1009 "Unsupported mask for GENEVE encap. protocol"); 1010 return -rte_errno; 1011 } 1012 1013 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, 1014 mask->vni, item, error); 1015 1016 return rc; 1017 } 1018 1019 /** 1020 * Convert NVGRE item to EFX filter specification. 1021 * 1022 * @param item[in] 1023 * Item specification. Only virtual subnet ID field is supported. 1024 * If the mask is NULL, default mask will be used. 1025 * Ranging is not supported. 1026 * @param efx_spec[in, out] 1027 * EFX filter specification to update. 1028 * @param[out] error 1029 * Perform verbose error reporting if not NULL. 1030 */ 1031 static int 1032 sfc_flow_parse_nvgre(const struct rte_flow_item *item, 1033 struct sfc_flow_parse_ctx *parse_ctx, 1034 struct rte_flow_error *error) 1035 { 1036 int rc; 1037 efx_filter_spec_t *efx_spec = parse_ctx->filter; 1038 const struct rte_flow_item_nvgre *spec = NULL; 1039 const struct rte_flow_item_nvgre *mask = NULL; 1040 const struct rte_flow_item_nvgre supp_mask = { 1041 .tni = { 0xff, 0xff, 0xff } 1042 }; 1043 1044 rc = sfc_flow_parse_init(item, 1045 (const void **)&spec, 1046 (const void **)&mask, 1047 &supp_mask, 1048 &rte_flow_item_nvgre_mask, 1049 sizeof(struct rte_flow_item_nvgre), 1050 error); 1051 if (rc != 0) 1052 return rc; 1053 1054 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 1055 EFX_IPPROTO_GRE, error); 1056 if (rc != 0) 1057 return rc; 1058 1059 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE; 1060 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 1061 1062 if (spec == NULL) 1063 return 0; 1064 1065 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni, 1066 mask->tni, item, error); 1067 1068 return rc; 1069 } 1070 1071 /** 1072 * Convert PPPoEx item to EFX filter specification. 1073 * 1074 * @param item[in] 1075 * Item specification. 1076 * Matching on PPPoEx fields is not supported. 1077 * This item can only be used to set or validate the EtherType filter. 1078 * Only zero masks are allowed. 1079 * Ranging is not supported. 1080 * @param efx_spec[in, out] 1081 * EFX filter specification to update. 1082 * @param[out] error 1083 * Perform verbose error reporting if not NULL. 1084 */ 1085 static int 1086 sfc_flow_parse_pppoex(const struct rte_flow_item *item, 1087 struct sfc_flow_parse_ctx *parse_ctx, 1088 struct rte_flow_error *error) 1089 { 1090 efx_filter_spec_t *efx_spec = parse_ctx->filter; 1091 const struct rte_flow_item_pppoe *spec = NULL; 1092 const struct rte_flow_item_pppoe *mask = NULL; 1093 const struct rte_flow_item_pppoe supp_mask = {}; 1094 const struct rte_flow_item_pppoe def_mask = {}; 1095 uint16_t ether_type; 1096 int rc; 1097 1098 rc = sfc_flow_parse_init(item, 1099 (const void **)&spec, 1100 (const void **)&mask, 1101 &supp_mask, 1102 &def_mask, 1103 sizeof(struct rte_flow_item_pppoe), 1104 error); 1105 if (rc != 0) 1106 return rc; 1107 1108 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED) 1109 ether_type = RTE_ETHER_TYPE_PPPOE_DISCOVERY; 1110 else 1111 ether_type = RTE_ETHER_TYPE_PPPOE_SESSION; 1112 1113 if ((efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) != 0) { 1114 if (efx_spec->efs_ether_type != ether_type) { 1115 rte_flow_error_set(error, EINVAL, 1116 RTE_FLOW_ERROR_TYPE_ITEM, item, 1117 "Invalid EtherType for a PPPoE flow item"); 1118 return -rte_errno; 1119 } 1120 } else { 1121 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 1122 efx_spec->efs_ether_type = ether_type; 1123 } 1124 1125 return 0; 1126 } 1127 1128 static const struct sfc_flow_item sfc_flow_items[] = { 1129 { 1130 .type = RTE_FLOW_ITEM_TYPE_VOID, 1131 .name = "VOID", 1132 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER, 1133 .layer = SFC_FLOW_ITEM_ANY_LAYER, 1134 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1135 .parse = sfc_flow_parse_void, 1136 }, 1137 { 1138 .type = RTE_FLOW_ITEM_TYPE_ETH, 1139 .name = "ETH", 1140 .prev_layer = SFC_FLOW_ITEM_START_LAYER, 1141 .layer = SFC_FLOW_ITEM_L2, 1142 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1143 .parse = sfc_flow_parse_eth, 1144 }, 1145 { 1146 .type = RTE_FLOW_ITEM_TYPE_VLAN, 1147 .name = "VLAN", 1148 .prev_layer = SFC_FLOW_ITEM_L2, 1149 .layer = SFC_FLOW_ITEM_L2, 1150 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1151 .parse = sfc_flow_parse_vlan, 1152 }, 1153 { 1154 .type = RTE_FLOW_ITEM_TYPE_PPPOED, 1155 .name = "PPPOED", 1156 .prev_layer = SFC_FLOW_ITEM_L2, 1157 .layer = SFC_FLOW_ITEM_L2, 1158 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1159 .parse = sfc_flow_parse_pppoex, 1160 }, 1161 { 1162 .type = RTE_FLOW_ITEM_TYPE_PPPOES, 1163 .name = "PPPOES", 1164 .prev_layer = SFC_FLOW_ITEM_L2, 1165 .layer = SFC_FLOW_ITEM_L2, 1166 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1167 .parse = sfc_flow_parse_pppoex, 1168 }, 1169 { 1170 .type = RTE_FLOW_ITEM_TYPE_IPV4, 1171 .name = "IPV4", 1172 .prev_layer = SFC_FLOW_ITEM_L2, 1173 .layer = SFC_FLOW_ITEM_L3, 1174 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1175 .parse = sfc_flow_parse_ipv4, 1176 }, 1177 { 1178 .type = RTE_FLOW_ITEM_TYPE_IPV6, 1179 .name = "IPV6", 1180 .prev_layer = SFC_FLOW_ITEM_L2, 1181 .layer = SFC_FLOW_ITEM_L3, 1182 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1183 .parse = sfc_flow_parse_ipv6, 1184 }, 1185 { 1186 .type = RTE_FLOW_ITEM_TYPE_TCP, 1187 .name = "TCP", 1188 .prev_layer = SFC_FLOW_ITEM_L3, 1189 .layer = SFC_FLOW_ITEM_L4, 1190 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1191 .parse = sfc_flow_parse_tcp, 1192 }, 1193 { 1194 .type = RTE_FLOW_ITEM_TYPE_UDP, 1195 .name = "UDP", 1196 .prev_layer = SFC_FLOW_ITEM_L3, 1197 .layer = SFC_FLOW_ITEM_L4, 1198 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1199 .parse = sfc_flow_parse_udp, 1200 }, 1201 { 1202 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 1203 .name = "VXLAN", 1204 .prev_layer = SFC_FLOW_ITEM_L4, 1205 .layer = SFC_FLOW_ITEM_START_LAYER, 1206 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1207 .parse = sfc_flow_parse_vxlan, 1208 }, 1209 { 1210 .type = RTE_FLOW_ITEM_TYPE_GENEVE, 1211 .name = "GENEVE", 1212 .prev_layer = SFC_FLOW_ITEM_L4, 1213 .layer = SFC_FLOW_ITEM_START_LAYER, 1214 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1215 .parse = sfc_flow_parse_geneve, 1216 }, 1217 { 1218 .type = RTE_FLOW_ITEM_TYPE_NVGRE, 1219 .name = "NVGRE", 1220 .prev_layer = SFC_FLOW_ITEM_L3, 1221 .layer = SFC_FLOW_ITEM_START_LAYER, 1222 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1223 .parse = sfc_flow_parse_nvgre, 1224 }, 1225 }; 1226 1227 /* 1228 * Protocol-independent flow API support 1229 */ 1230 static int 1231 sfc_flow_parse_attr(struct sfc_adapter *sa, 1232 const struct rte_flow_attr *attr, 1233 struct rte_flow *flow, 1234 struct rte_flow_error *error) 1235 { 1236 struct sfc_flow_spec *spec = &flow->spec; 1237 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1238 struct sfc_flow_spec_mae *spec_mae = &spec->mae; 1239 struct sfc_mae *mae = &sa->mae; 1240 1241 if (attr == NULL) { 1242 rte_flow_error_set(error, EINVAL, 1243 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 1244 "NULL attribute"); 1245 return -rte_errno; 1246 } 1247 if (attr->group != 0) { 1248 rte_flow_error_set(error, ENOTSUP, 1249 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr, 1250 "Groups are not supported"); 1251 return -rte_errno; 1252 } 1253 if (attr->egress != 0) { 1254 rte_flow_error_set(error, ENOTSUP, 1255 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, 1256 "Egress is not supported"); 1257 return -rte_errno; 1258 } 1259 if (attr->ingress == 0) { 1260 rte_flow_error_set(error, ENOTSUP, 1261 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, 1262 "Ingress is compulsory"); 1263 return -rte_errno; 1264 } 1265 if (attr->transfer == 0) { 1266 if (attr->priority != 0) { 1267 rte_flow_error_set(error, ENOTSUP, 1268 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1269 attr, "Priorities are unsupported"); 1270 return -rte_errno; 1271 } 1272 spec->type = SFC_FLOW_SPEC_FILTER; 1273 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX; 1274 spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; 1275 spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL; 1276 } else { 1277 if (mae->status != SFC_MAE_STATUS_SUPPORTED) { 1278 rte_flow_error_set(error, ENOTSUP, 1279 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 1280 attr, "Transfer is not supported"); 1281 return -rte_errno; 1282 } 1283 if (attr->priority > mae->nb_action_rule_prios_max) { 1284 rte_flow_error_set(error, ENOTSUP, 1285 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1286 attr, "Unsupported priority level"); 1287 return -rte_errno; 1288 } 1289 spec->type = SFC_FLOW_SPEC_MAE; 1290 spec_mae->priority = attr->priority; 1291 spec_mae->match_spec = NULL; 1292 spec_mae->action_set = NULL; 1293 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID; 1294 } 1295 1296 return 0; 1297 } 1298 1299 /* Get item from array sfc_flow_items */ 1300 static const struct sfc_flow_item * 1301 sfc_flow_get_item(const struct sfc_flow_item *items, 1302 unsigned int nb_items, 1303 enum rte_flow_item_type type) 1304 { 1305 unsigned int i; 1306 1307 for (i = 0; i < nb_items; i++) 1308 if (items[i].type == type) 1309 return &items[i]; 1310 1311 return NULL; 1312 } 1313 1314 int 1315 sfc_flow_parse_pattern(struct sfc_adapter *sa, 1316 const struct sfc_flow_item *flow_items, 1317 unsigned int nb_flow_items, 1318 const struct rte_flow_item pattern[], 1319 struct sfc_flow_parse_ctx *parse_ctx, 1320 struct rte_flow_error *error) 1321 { 1322 int rc; 1323 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER; 1324 boolean_t is_ifrm = B_FALSE; 1325 const struct sfc_flow_item *item; 1326 1327 if (pattern == NULL) { 1328 rte_flow_error_set(error, EINVAL, 1329 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, 1330 "NULL pattern"); 1331 return -rte_errno; 1332 } 1333 1334 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { 1335 item = sfc_flow_get_item(flow_items, nb_flow_items, 1336 pattern->type); 1337 if (item == NULL) { 1338 rte_flow_error_set(error, ENOTSUP, 1339 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1340 "Unsupported pattern item"); 1341 return -rte_errno; 1342 } 1343 1344 /* 1345 * Omitting one or several protocol layers at the beginning 1346 * of pattern is supported 1347 */ 1348 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER && 1349 prev_layer != SFC_FLOW_ITEM_ANY_LAYER && 1350 item->prev_layer != prev_layer) { 1351 rte_flow_error_set(error, ENOTSUP, 1352 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1353 "Unexpected sequence of pattern items"); 1354 return -rte_errno; 1355 } 1356 1357 /* 1358 * Allow only VOID and ETH pattern items in the inner frame. 1359 * Also check that there is only one tunneling protocol. 1360 */ 1361 switch (item->type) { 1362 case RTE_FLOW_ITEM_TYPE_VOID: 1363 case RTE_FLOW_ITEM_TYPE_ETH: 1364 break; 1365 1366 case RTE_FLOW_ITEM_TYPE_VXLAN: 1367 case RTE_FLOW_ITEM_TYPE_GENEVE: 1368 case RTE_FLOW_ITEM_TYPE_NVGRE: 1369 if (is_ifrm) { 1370 rte_flow_error_set(error, EINVAL, 1371 RTE_FLOW_ERROR_TYPE_ITEM, 1372 pattern, 1373 "More than one tunneling protocol"); 1374 return -rte_errno; 1375 } 1376 is_ifrm = B_TRUE; 1377 break; 1378 1379 default: 1380 if (parse_ctx->type == SFC_FLOW_PARSE_CTX_FILTER && 1381 is_ifrm) { 1382 rte_flow_error_set(error, EINVAL, 1383 RTE_FLOW_ERROR_TYPE_ITEM, 1384 pattern, 1385 "There is an unsupported pattern item " 1386 "in the inner frame"); 1387 return -rte_errno; 1388 } 1389 break; 1390 } 1391 1392 if (parse_ctx->type != item->ctx_type) { 1393 rte_flow_error_set(error, EINVAL, 1394 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1395 "Parse context type mismatch"); 1396 return -rte_errno; 1397 } 1398 1399 rc = item->parse(pattern, parse_ctx, error); 1400 if (rc != 0) { 1401 sfc_err(sa, "failed to parse item %s: %s", 1402 item->name, strerror(-rc)); 1403 return rc; 1404 } 1405 1406 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER) 1407 prev_layer = item->layer; 1408 } 1409 1410 return 0; 1411 } 1412 1413 static int 1414 sfc_flow_parse_queue(struct sfc_adapter *sa, 1415 const struct rte_flow_action_queue *queue, 1416 struct rte_flow *flow) 1417 { 1418 struct sfc_flow_spec *spec = &flow->spec; 1419 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1420 struct sfc_rxq *rxq; 1421 struct sfc_rxq_info *rxq_info; 1422 1423 if (queue->index >= sfc_sa2shared(sa)->ethdev_rxq_count) 1424 return -EINVAL; 1425 1426 rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, queue->index); 1427 spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index; 1428 1429 rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index]; 1430 spec_filter->rss_hash_required = !!(rxq_info->rxq_flags & 1431 SFC_RXQ_FLAG_RSS_HASH); 1432 1433 return 0; 1434 } 1435 1436 static int 1437 sfc_flow_parse_rss(struct sfc_adapter *sa, 1438 const struct rte_flow_action_rss *action_rss, 1439 struct rte_flow *flow) 1440 { 1441 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 1442 struct sfc_rss *rss = &sas->rss; 1443 sfc_ethdev_qid_t ethdev_qid; 1444 struct sfc_rxq *rxq; 1445 unsigned int rxq_hw_index_min; 1446 unsigned int rxq_hw_index_max; 1447 efx_rx_hash_type_t efx_hash_types; 1448 const uint8_t *rss_key; 1449 struct sfc_flow_spec *spec = &flow->spec; 1450 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1451 struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf; 1452 unsigned int i; 1453 1454 if (action_rss->queue_num == 0) 1455 return -EINVAL; 1456 1457 ethdev_qid = sfc_sa2shared(sa)->ethdev_rxq_count - 1; 1458 rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid); 1459 rxq_hw_index_min = rxq->hw_index; 1460 rxq_hw_index_max = 0; 1461 1462 for (i = 0; i < action_rss->queue_num; ++i) { 1463 ethdev_qid = action_rss->queue[i]; 1464 1465 if ((unsigned int)ethdev_qid >= 1466 sfc_sa2shared(sa)->ethdev_rxq_count) 1467 return -EINVAL; 1468 1469 rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid); 1470 1471 if (rxq->hw_index < rxq_hw_index_min) 1472 rxq_hw_index_min = rxq->hw_index; 1473 1474 if (rxq->hw_index > rxq_hw_index_max) 1475 rxq_hw_index_max = rxq->hw_index; 1476 } 1477 1478 switch (action_rss->func) { 1479 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1480 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1481 break; 1482 default: 1483 return -EINVAL; 1484 } 1485 1486 if (action_rss->level) 1487 return -EINVAL; 1488 1489 /* 1490 * Dummy RSS action with only one queue and no specific settings 1491 * for hash types and key does not require dedicated RSS context 1492 * and may be simplified to single queue action. 1493 */ 1494 if (action_rss->queue_num == 1 && action_rss->types == 0 && 1495 action_rss->key_len == 0) { 1496 spec_filter->template.efs_dmaq_id = rxq_hw_index_min; 1497 return 0; 1498 } 1499 1500 if (action_rss->types) { 1501 int rc; 1502 1503 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types, 1504 &efx_hash_types); 1505 if (rc != 0) 1506 return -rc; 1507 } else { 1508 unsigned int i; 1509 1510 efx_hash_types = 0; 1511 for (i = 0; i < rss->hf_map_nb_entries; ++i) 1512 efx_hash_types |= rss->hf_map[i].efx; 1513 } 1514 1515 if (action_rss->key_len) { 1516 if (action_rss->key_len != sizeof(rss->key)) 1517 return -EINVAL; 1518 1519 rss_key = action_rss->key; 1520 } else { 1521 rss_key = rss->key; 1522 } 1523 1524 spec_filter->rss = B_TRUE; 1525 1526 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min; 1527 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max; 1528 sfc_rss_conf->rss_hash_types = efx_hash_types; 1529 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key)); 1530 1531 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) { 1532 unsigned int nb_queues = action_rss->queue_num; 1533 struct sfc_rxq *rxq; 1534 1535 ethdev_qid = action_rss->queue[i % nb_queues]; 1536 rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid); 1537 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min; 1538 } 1539 1540 return 0; 1541 } 1542 1543 static int 1544 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec, 1545 unsigned int filters_count) 1546 { 1547 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1548 unsigned int i; 1549 int ret = 0; 1550 1551 for (i = 0; i < filters_count; i++) { 1552 int rc; 1553 1554 rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]); 1555 if (ret == 0 && rc != 0) { 1556 sfc_err(sa, "failed to remove filter specification " 1557 "(rc = %d)", rc); 1558 ret = rc; 1559 } 1560 } 1561 1562 return ret; 1563 } 1564 1565 static int 1566 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec) 1567 { 1568 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1569 unsigned int i; 1570 int rc = 0; 1571 1572 for (i = 0; i < spec_filter->count; i++) { 1573 rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]); 1574 if (rc != 0) { 1575 sfc_flow_spec_flush(sa, spec, i); 1576 break; 1577 } 1578 } 1579 1580 return rc; 1581 } 1582 1583 static int 1584 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec) 1585 { 1586 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1587 1588 return sfc_flow_spec_flush(sa, spec, spec_filter->count); 1589 } 1590 1591 static int 1592 sfc_flow_filter_insert(struct sfc_adapter *sa, 1593 struct rte_flow *flow) 1594 { 1595 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 1596 struct sfc_rss *rss = &sas->rss; 1597 struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter; 1598 struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf; 1599 uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; 1600 boolean_t create_context; 1601 unsigned int i; 1602 int rc = 0; 1603 1604 create_context = spec_filter->rss || (spec_filter->rss_hash_required && 1605 rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT); 1606 1607 if (create_context) { 1608 unsigned int rss_spread; 1609 unsigned int rss_hash_types; 1610 uint8_t *rss_key; 1611 1612 if (spec_filter->rss) { 1613 rss_spread = MIN(flow_rss->rxq_hw_index_max - 1614 flow_rss->rxq_hw_index_min + 1, 1615 EFX_MAXRSS); 1616 rss_hash_types = flow_rss->rss_hash_types; 1617 rss_key = flow_rss->rss_key; 1618 } else { 1619 /* 1620 * Initialize dummy RSS context parameters to have 1621 * valid RSS hash. Use default RSS hash function and 1622 * key. 1623 */ 1624 rss_spread = 1; 1625 rss_hash_types = rss->hash_types; 1626 rss_key = rss->key; 1627 } 1628 1629 rc = efx_rx_scale_context_alloc(sa->nic, 1630 EFX_RX_SCALE_EXCLUSIVE, 1631 rss_spread, 1632 &efs_rss_context); 1633 if (rc != 0) 1634 goto fail_scale_context_alloc; 1635 1636 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context, 1637 rss->hash_alg, 1638 rss_hash_types, B_TRUE); 1639 if (rc != 0) 1640 goto fail_scale_mode_set; 1641 1642 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context, 1643 rss_key, sizeof(rss->key)); 1644 if (rc != 0) 1645 goto fail_scale_key_set; 1646 } else { 1647 efs_rss_context = rss->dummy_rss_context; 1648 } 1649 1650 if (spec_filter->rss || spec_filter->rss_hash_required) { 1651 /* 1652 * At this point, fully elaborated filter specifications 1653 * have been produced from the template. To make sure that 1654 * RSS behaviour is consistent between them, set the same 1655 * RSS context value everywhere. 1656 */ 1657 for (i = 0; i < spec_filter->count; i++) { 1658 efx_filter_spec_t *spec = &spec_filter->filters[i]; 1659 1660 spec->efs_rss_context = efs_rss_context; 1661 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS; 1662 if (spec_filter->rss) 1663 spec->efs_dmaq_id = flow_rss->rxq_hw_index_min; 1664 } 1665 } 1666 1667 rc = sfc_flow_spec_insert(sa, &flow->spec); 1668 if (rc != 0) 1669 goto fail_filter_insert; 1670 1671 if (create_context) { 1672 unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0}; 1673 unsigned int *tbl; 1674 1675 tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl; 1676 1677 /* 1678 * Scale table is set after filter insertion because 1679 * the table entries are relative to the base RxQ ID 1680 * and the latter is submitted to the HW by means of 1681 * inserting a filter, so by the time of the request 1682 * the HW knows all the information needed to verify 1683 * the table entries, and the operation will succeed 1684 */ 1685 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context, 1686 tbl, RTE_DIM(flow_rss->rss_tbl)); 1687 if (rc != 0) 1688 goto fail_scale_tbl_set; 1689 1690 /* Remember created dummy RSS context */ 1691 if (!spec_filter->rss) 1692 rss->dummy_rss_context = efs_rss_context; 1693 } 1694 1695 return 0; 1696 1697 fail_scale_tbl_set: 1698 sfc_flow_spec_remove(sa, &flow->spec); 1699 1700 fail_filter_insert: 1701 fail_scale_key_set: 1702 fail_scale_mode_set: 1703 if (create_context) 1704 efx_rx_scale_context_free(sa->nic, efs_rss_context); 1705 1706 fail_scale_context_alloc: 1707 return rc; 1708 } 1709 1710 static int 1711 sfc_flow_filter_remove(struct sfc_adapter *sa, 1712 struct rte_flow *flow) 1713 { 1714 struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter; 1715 int rc = 0; 1716 1717 rc = sfc_flow_spec_remove(sa, &flow->spec); 1718 if (rc != 0) 1719 return rc; 1720 1721 if (spec_filter->rss) { 1722 /* 1723 * All specifications for a given flow rule have the same RSS 1724 * context, so that RSS context value is taken from the first 1725 * filter specification 1726 */ 1727 efx_filter_spec_t *spec = &spec_filter->filters[0]; 1728 1729 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context); 1730 } 1731 1732 return rc; 1733 } 1734 1735 static int 1736 sfc_flow_parse_mark(struct sfc_adapter *sa, 1737 const struct rte_flow_action_mark *mark, 1738 struct rte_flow *flow) 1739 { 1740 struct sfc_flow_spec *spec = &flow->spec; 1741 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1742 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 1743 1744 if (mark == NULL || mark->id > encp->enc_filter_action_mark_max) 1745 return EINVAL; 1746 1747 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK; 1748 spec_filter->template.efs_mark = mark->id; 1749 1750 return 0; 1751 } 1752 1753 static int 1754 sfc_flow_parse_actions(struct sfc_adapter *sa, 1755 const struct rte_flow_action actions[], 1756 struct rte_flow *flow, 1757 struct rte_flow_error *error) 1758 { 1759 int rc; 1760 struct sfc_flow_spec *spec = &flow->spec; 1761 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1762 const unsigned int dp_rx_features = sa->priv.dp_rx->features; 1763 const uint64_t rx_metadata = sa->negotiated_rx_metadata; 1764 uint32_t actions_set = 0; 1765 const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) | 1766 (1UL << RTE_FLOW_ACTION_TYPE_RSS) | 1767 (1UL << RTE_FLOW_ACTION_TYPE_DROP); 1768 const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) | 1769 (1UL << RTE_FLOW_ACTION_TYPE_FLAG); 1770 1771 if (actions == NULL) { 1772 rte_flow_error_set(error, EINVAL, 1773 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, 1774 "NULL actions"); 1775 return -rte_errno; 1776 } 1777 1778 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1779 switch (actions->type) { 1780 case RTE_FLOW_ACTION_TYPE_VOID: 1781 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID, 1782 actions_set); 1783 break; 1784 1785 case RTE_FLOW_ACTION_TYPE_QUEUE: 1786 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE, 1787 actions_set); 1788 if ((actions_set & fate_actions_mask) != 0) 1789 goto fail_fate_actions; 1790 1791 rc = sfc_flow_parse_queue(sa, actions->conf, flow); 1792 if (rc != 0) { 1793 rte_flow_error_set(error, EINVAL, 1794 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1795 "Bad QUEUE action"); 1796 return -rte_errno; 1797 } 1798 break; 1799 1800 case RTE_FLOW_ACTION_TYPE_RSS: 1801 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS, 1802 actions_set); 1803 if ((actions_set & fate_actions_mask) != 0) 1804 goto fail_fate_actions; 1805 1806 rc = sfc_flow_parse_rss(sa, actions->conf, flow); 1807 if (rc != 0) { 1808 rte_flow_error_set(error, -rc, 1809 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1810 "Bad RSS action"); 1811 return -rte_errno; 1812 } 1813 break; 1814 1815 case RTE_FLOW_ACTION_TYPE_DROP: 1816 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP, 1817 actions_set); 1818 if ((actions_set & fate_actions_mask) != 0) 1819 goto fail_fate_actions; 1820 1821 spec_filter->template.efs_dmaq_id = 1822 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; 1823 break; 1824 1825 case RTE_FLOW_ACTION_TYPE_FLAG: 1826 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG, 1827 actions_set); 1828 if ((actions_set & mark_actions_mask) != 0) 1829 goto fail_actions_overlap; 1830 1831 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) { 1832 rte_flow_error_set(error, ENOTSUP, 1833 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1834 "FLAG action is not supported on the current Rx datapath"); 1835 return -rte_errno; 1836 } else if ((rx_metadata & 1837 RTE_ETH_RX_METADATA_USER_FLAG) == 0) { 1838 rte_flow_error_set(error, ENOTSUP, 1839 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1840 "flag delivery has not been negotiated"); 1841 return -rte_errno; 1842 } 1843 1844 spec_filter->template.efs_flags |= 1845 EFX_FILTER_FLAG_ACTION_FLAG; 1846 break; 1847 1848 case RTE_FLOW_ACTION_TYPE_MARK: 1849 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK, 1850 actions_set); 1851 if ((actions_set & mark_actions_mask) != 0) 1852 goto fail_actions_overlap; 1853 1854 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) { 1855 rte_flow_error_set(error, ENOTSUP, 1856 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1857 "MARK action is not supported on the current Rx datapath"); 1858 return -rte_errno; 1859 } else if ((rx_metadata & 1860 RTE_ETH_RX_METADATA_USER_MARK) == 0) { 1861 rte_flow_error_set(error, ENOTSUP, 1862 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1863 "mark delivery has not been negotiated"); 1864 return -rte_errno; 1865 } 1866 1867 rc = sfc_flow_parse_mark(sa, actions->conf, flow); 1868 if (rc != 0) { 1869 rte_flow_error_set(error, rc, 1870 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1871 "Bad MARK action"); 1872 return -rte_errno; 1873 } 1874 break; 1875 1876 default: 1877 rte_flow_error_set(error, ENOTSUP, 1878 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1879 "Action is not supported"); 1880 return -rte_errno; 1881 } 1882 1883 actions_set |= (1UL << actions->type); 1884 } 1885 1886 /* When fate is unknown, drop traffic. */ 1887 if ((actions_set & fate_actions_mask) == 0) { 1888 spec_filter->template.efs_dmaq_id = 1889 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; 1890 } 1891 1892 return 0; 1893 1894 fail_fate_actions: 1895 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, 1896 "Cannot combine several fate-deciding actions, " 1897 "choose between QUEUE, RSS or DROP"); 1898 return -rte_errno; 1899 1900 fail_actions_overlap: 1901 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, 1902 "Overlapping actions are not supported"); 1903 return -rte_errno; 1904 } 1905 1906 /** 1907 * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST 1908 * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same 1909 * specifications after copying. 1910 * 1911 * @param spec[in, out] 1912 * SFC flow specification to update. 1913 * @param filters_count_for_one_val[in] 1914 * How many specifications should have the same match flag, what is the 1915 * number of specifications before copying. 1916 * @param error[out] 1917 * Perform verbose error reporting if not NULL. 1918 */ 1919 static int 1920 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec, 1921 unsigned int filters_count_for_one_val, 1922 struct rte_flow_error *error) 1923 { 1924 unsigned int i; 1925 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1926 static const efx_filter_match_flags_t vals[] = { 1927 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, 1928 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST 1929 }; 1930 1931 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) { 1932 rte_flow_error_set(error, EINVAL, 1933 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1934 "Number of specifications is incorrect while copying " 1935 "by unknown destination flags"); 1936 return -rte_errno; 1937 } 1938 1939 for (i = 0; i < spec_filter->count; i++) { 1940 /* The check above ensures that divisor can't be zero here */ 1941 spec_filter->filters[i].efs_match_flags |= 1942 vals[i / filters_count_for_one_val]; 1943 } 1944 1945 return 0; 1946 } 1947 1948 /** 1949 * Check that the following conditions are met: 1950 * - the list of supported filters has a filter 1951 * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of 1952 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also 1953 * be inserted. 1954 * 1955 * @param match[in] 1956 * The match flags of filter. 1957 * @param spec[in] 1958 * Specification to be supplemented. 1959 * @param filter[in] 1960 * SFC filter with list of supported filters. 1961 */ 1962 static boolean_t 1963 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match, 1964 __rte_unused efx_filter_spec_t *spec, 1965 struct sfc_filter *filter) 1966 { 1967 unsigned int i; 1968 efx_filter_match_flags_t match_mcast_dst; 1969 1970 match_mcast_dst = 1971 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) | 1972 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; 1973 for (i = 0; i < filter->supported_match_num; i++) { 1974 if (match_mcast_dst == filter->supported_match[i]) 1975 return B_TRUE; 1976 } 1977 1978 return B_FALSE; 1979 } 1980 1981 /** 1982 * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and 1983 * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same 1984 * specifications after copying. 1985 * 1986 * @param spec[in, out] 1987 * SFC flow specification to update. 1988 * @param filters_count_for_one_val[in] 1989 * How many specifications should have the same EtherType value, what is the 1990 * number of specifications before copying. 1991 * @param error[out] 1992 * Perform verbose error reporting if not NULL. 1993 */ 1994 static int 1995 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec, 1996 unsigned int filters_count_for_one_val, 1997 struct rte_flow_error *error) 1998 { 1999 unsigned int i; 2000 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2001 static const uint16_t vals[] = { 2002 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6 2003 }; 2004 2005 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) { 2006 rte_flow_error_set(error, EINVAL, 2007 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2008 "Number of specifications is incorrect " 2009 "while copying by Ethertype"); 2010 return -rte_errno; 2011 } 2012 2013 for (i = 0; i < spec_filter->count; i++) { 2014 spec_filter->filters[i].efs_match_flags |= 2015 EFX_FILTER_MATCH_ETHER_TYPE; 2016 2017 /* 2018 * The check above ensures that 2019 * filters_count_for_one_val is not 0 2020 */ 2021 spec_filter->filters[i].efs_ether_type = 2022 vals[i / filters_count_for_one_val]; 2023 } 2024 2025 return 0; 2026 } 2027 2028 /** 2029 * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0 2030 * in the same specifications after copying. 2031 * 2032 * @param spec[in, out] 2033 * SFC flow specification to update. 2034 * @param filters_count_for_one_val[in] 2035 * How many specifications should have the same match flag, what is the 2036 * number of specifications before copying. 2037 * @param error[out] 2038 * Perform verbose error reporting if not NULL. 2039 */ 2040 static int 2041 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec, 2042 unsigned int filters_count_for_one_val, 2043 struct rte_flow_error *error) 2044 { 2045 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2046 unsigned int i; 2047 2048 if (filters_count_for_one_val != spec_filter->count) { 2049 rte_flow_error_set(error, EINVAL, 2050 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2051 "Number of specifications is incorrect " 2052 "while copying by outer VLAN ID"); 2053 return -rte_errno; 2054 } 2055 2056 for (i = 0; i < spec_filter->count; i++) { 2057 spec_filter->filters[i].efs_match_flags |= 2058 EFX_FILTER_MATCH_OUTER_VID; 2059 2060 spec_filter->filters[i].efs_outer_vid = 0; 2061 } 2062 2063 return 0; 2064 } 2065 2066 /** 2067 * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and 2068 * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same 2069 * specifications after copying. 2070 * 2071 * @param spec[in, out] 2072 * SFC flow specification to update. 2073 * @param filters_count_for_one_val[in] 2074 * How many specifications should have the same match flag, what is the 2075 * number of specifications before copying. 2076 * @param error[out] 2077 * Perform verbose error reporting if not NULL. 2078 */ 2079 static int 2080 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec, 2081 unsigned int filters_count_for_one_val, 2082 struct rte_flow_error *error) 2083 { 2084 unsigned int i; 2085 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2086 static const efx_filter_match_flags_t vals[] = { 2087 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, 2088 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST 2089 }; 2090 2091 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) { 2092 rte_flow_error_set(error, EINVAL, 2093 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2094 "Number of specifications is incorrect while copying " 2095 "by inner frame unknown destination flags"); 2096 return -rte_errno; 2097 } 2098 2099 for (i = 0; i < spec_filter->count; i++) { 2100 /* The check above ensures that divisor can't be zero here */ 2101 spec_filter->filters[i].efs_match_flags |= 2102 vals[i / filters_count_for_one_val]; 2103 } 2104 2105 return 0; 2106 } 2107 2108 /** 2109 * Check that the following conditions are met: 2110 * - the specification corresponds to a filter for encapsulated traffic 2111 * - the list of supported filters has a filter 2112 * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of 2113 * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also 2114 * be inserted. 2115 * 2116 * @param match[in] 2117 * The match flags of filter. 2118 * @param spec[in] 2119 * Specification to be supplemented. 2120 * @param filter[in] 2121 * SFC filter with list of supported filters. 2122 */ 2123 static boolean_t 2124 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match, 2125 efx_filter_spec_t *spec, 2126 struct sfc_filter *filter) 2127 { 2128 unsigned int i; 2129 efx_tunnel_protocol_t encap_type = spec->efs_encap_type; 2130 efx_filter_match_flags_t match_mcast_dst; 2131 2132 if (encap_type == EFX_TUNNEL_PROTOCOL_NONE) 2133 return B_FALSE; 2134 2135 match_mcast_dst = 2136 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) | 2137 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST; 2138 for (i = 0; i < filter->supported_match_num; i++) { 2139 if (match_mcast_dst == filter->supported_match[i]) 2140 return B_TRUE; 2141 } 2142 2143 return B_FALSE; 2144 } 2145 2146 /** 2147 * Check that the list of supported filters has a filter that differs 2148 * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID 2149 * in this case that filter will be used and the flag 2150 * EFX_FILTER_MATCH_OUTER_VID is not needed. 2151 * 2152 * @param match[in] 2153 * The match flags of filter. 2154 * @param spec[in] 2155 * Specification to be supplemented. 2156 * @param filter[in] 2157 * SFC filter with list of supported filters. 2158 */ 2159 static boolean_t 2160 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match, 2161 __rte_unused efx_filter_spec_t *spec, 2162 struct sfc_filter *filter) 2163 { 2164 unsigned int i; 2165 efx_filter_match_flags_t match_without_vid = 2166 match & ~EFX_FILTER_MATCH_OUTER_VID; 2167 2168 for (i = 0; i < filter->supported_match_num; i++) { 2169 if (match_without_vid == filter->supported_match[i]) 2170 return B_FALSE; 2171 } 2172 2173 return B_TRUE; 2174 } 2175 2176 /* 2177 * Match flags that can be automatically added to filters. 2178 * Selecting the last minimum when searching for the copy flag ensures that the 2179 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than 2180 * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter 2181 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported 2182 * filters. 2183 */ 2184 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = { 2185 { 2186 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, 2187 .vals_count = 2, 2188 .set_vals = sfc_flow_set_unknown_dst_flags, 2189 .spec_check = sfc_flow_check_unknown_dst_flags, 2190 }, 2191 { 2192 .flag = EFX_FILTER_MATCH_ETHER_TYPE, 2193 .vals_count = 2, 2194 .set_vals = sfc_flow_set_ethertypes, 2195 .spec_check = NULL, 2196 }, 2197 { 2198 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, 2199 .vals_count = 2, 2200 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags, 2201 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags, 2202 }, 2203 { 2204 .flag = EFX_FILTER_MATCH_OUTER_VID, 2205 .vals_count = 1, 2206 .set_vals = sfc_flow_set_outer_vid_flag, 2207 .spec_check = sfc_flow_check_outer_vid_flag, 2208 }, 2209 }; 2210 2211 /* Get item from array sfc_flow_copy_flags */ 2212 static const struct sfc_flow_copy_flag * 2213 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag) 2214 { 2215 unsigned int i; 2216 2217 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 2218 if (sfc_flow_copy_flags[i].flag == flag) 2219 return &sfc_flow_copy_flags[i]; 2220 } 2221 2222 return NULL; 2223 } 2224 2225 /** 2226 * Make copies of the specifications, set match flag and values 2227 * of the field that corresponds to it. 2228 * 2229 * @param spec[in, out] 2230 * SFC flow specification to update. 2231 * @param flag[in] 2232 * The match flag to add. 2233 * @param error[out] 2234 * Perform verbose error reporting if not NULL. 2235 */ 2236 static int 2237 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec, 2238 efx_filter_match_flags_t flag, 2239 struct rte_flow_error *error) 2240 { 2241 unsigned int i; 2242 unsigned int new_filters_count; 2243 unsigned int filters_count_for_one_val; 2244 const struct sfc_flow_copy_flag *copy_flag; 2245 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2246 int rc; 2247 2248 copy_flag = sfc_flow_get_copy_flag(flag); 2249 if (copy_flag == NULL) { 2250 rte_flow_error_set(error, ENOTSUP, 2251 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2252 "Unsupported spec field for copying"); 2253 return -rte_errno; 2254 } 2255 2256 new_filters_count = spec_filter->count * copy_flag->vals_count; 2257 if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) { 2258 rte_flow_error_set(error, EINVAL, 2259 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2260 "Too much EFX specifications in the flow rule"); 2261 return -rte_errno; 2262 } 2263 2264 /* Copy filters specifications */ 2265 for (i = spec_filter->count; i < new_filters_count; i++) { 2266 spec_filter->filters[i] = 2267 spec_filter->filters[i - spec_filter->count]; 2268 } 2269 2270 filters_count_for_one_val = spec_filter->count; 2271 spec_filter->count = new_filters_count; 2272 2273 rc = copy_flag->set_vals(spec, filters_count_for_one_val, error); 2274 if (rc != 0) 2275 return rc; 2276 2277 return 0; 2278 } 2279 2280 /** 2281 * Check that the given set of match flags missing in the original filter spec 2282 * could be covered by adding spec copies which specify the corresponding 2283 * flags and packet field values to match. 2284 * 2285 * @param miss_flags[in] 2286 * Flags that are missing until the supported filter. 2287 * @param spec[in] 2288 * Specification to be supplemented. 2289 * @param filter[in] 2290 * SFC filter. 2291 * 2292 * @return 2293 * Number of specifications after copy or 0, if the flags can not be added. 2294 */ 2295 static unsigned int 2296 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags, 2297 efx_filter_spec_t *spec, 2298 struct sfc_filter *filter) 2299 { 2300 unsigned int i; 2301 efx_filter_match_flags_t copy_flags = 0; 2302 efx_filter_match_flags_t flag; 2303 efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags; 2304 sfc_flow_spec_check *check; 2305 unsigned int multiplier = 1; 2306 2307 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 2308 flag = sfc_flow_copy_flags[i].flag; 2309 check = sfc_flow_copy_flags[i].spec_check; 2310 if ((flag & miss_flags) == flag) { 2311 if (check != NULL && (!check(match, spec, filter))) 2312 continue; 2313 2314 copy_flags |= flag; 2315 multiplier *= sfc_flow_copy_flags[i].vals_count; 2316 } 2317 } 2318 2319 if (copy_flags == miss_flags) 2320 return multiplier; 2321 2322 return 0; 2323 } 2324 2325 /** 2326 * Attempt to supplement the specification template to the minimally 2327 * supported set of match flags. To do this, it is necessary to copy 2328 * the specifications, filling them with the values of fields that 2329 * correspond to the missing flags. 2330 * The necessary and sufficient filter is built from the fewest number 2331 * of copies which could be made to cover the minimally required set 2332 * of flags. 2333 * 2334 * @param sa[in] 2335 * SFC adapter. 2336 * @param spec[in, out] 2337 * SFC flow specification to update. 2338 * @param error[out] 2339 * Perform verbose error reporting if not NULL. 2340 */ 2341 static int 2342 sfc_flow_spec_filters_complete(struct sfc_adapter *sa, 2343 struct sfc_flow_spec *spec, 2344 struct rte_flow_error *error) 2345 { 2346 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2347 struct sfc_filter *filter = &sa->filter; 2348 efx_filter_match_flags_t miss_flags; 2349 efx_filter_match_flags_t min_miss_flags = 0; 2350 efx_filter_match_flags_t match; 2351 unsigned int min_multiplier = UINT_MAX; 2352 unsigned int multiplier; 2353 unsigned int i; 2354 int rc; 2355 2356 match = spec_filter->template.efs_match_flags; 2357 for (i = 0; i < filter->supported_match_num; i++) { 2358 if ((match & filter->supported_match[i]) == match) { 2359 miss_flags = filter->supported_match[i] & (~match); 2360 multiplier = sfc_flow_check_missing_flags(miss_flags, 2361 &spec_filter->template, filter); 2362 if (multiplier > 0) { 2363 if (multiplier <= min_multiplier) { 2364 min_multiplier = multiplier; 2365 min_miss_flags = miss_flags; 2366 } 2367 } 2368 } 2369 } 2370 2371 if (min_multiplier == UINT_MAX) { 2372 rte_flow_error_set(error, ENOTSUP, 2373 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2374 "The flow rule pattern is unsupported"); 2375 return -rte_errno; 2376 } 2377 2378 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 2379 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag; 2380 2381 if ((flag & min_miss_flags) == flag) { 2382 rc = sfc_flow_spec_add_match_flag(spec, flag, error); 2383 if (rc != 0) 2384 return rc; 2385 } 2386 } 2387 2388 return 0; 2389 } 2390 2391 /** 2392 * Check that set of match flags is referred to by a filter. Filter is 2393 * described by match flags with the ability to add OUTER_VID and INNER_VID 2394 * flags. 2395 * 2396 * @param match_flags[in] 2397 * Set of match flags. 2398 * @param flags_pattern[in] 2399 * Pattern of filter match flags. 2400 */ 2401 static boolean_t 2402 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags, 2403 efx_filter_match_flags_t flags_pattern) 2404 { 2405 if ((match_flags & flags_pattern) != flags_pattern) 2406 return B_FALSE; 2407 2408 switch (match_flags & ~flags_pattern) { 2409 case 0: 2410 case EFX_FILTER_MATCH_OUTER_VID: 2411 case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID: 2412 return B_TRUE; 2413 default: 2414 return B_FALSE; 2415 } 2416 } 2417 2418 /** 2419 * Check whether the spec maps to a hardware filter which is known to be 2420 * ineffective despite being valid. 2421 * 2422 * @param filter[in] 2423 * SFC filter with list of supported filters. 2424 * @param spec[in] 2425 * SFC flow specification. 2426 */ 2427 static boolean_t 2428 sfc_flow_is_match_flags_exception(struct sfc_filter *filter, 2429 struct sfc_flow_spec *spec) 2430 { 2431 unsigned int i; 2432 uint16_t ether_type; 2433 uint8_t ip_proto; 2434 efx_filter_match_flags_t match_flags; 2435 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2436 2437 for (i = 0; i < spec_filter->count; i++) { 2438 match_flags = spec_filter->filters[i].efs_match_flags; 2439 2440 if (sfc_flow_is_match_with_vids(match_flags, 2441 EFX_FILTER_MATCH_ETHER_TYPE) || 2442 sfc_flow_is_match_with_vids(match_flags, 2443 EFX_FILTER_MATCH_ETHER_TYPE | 2444 EFX_FILTER_MATCH_LOC_MAC)) { 2445 ether_type = spec_filter->filters[i].efs_ether_type; 2446 if (filter->supports_ip_proto_or_addr_filter && 2447 (ether_type == EFX_ETHER_TYPE_IPV4 || 2448 ether_type == EFX_ETHER_TYPE_IPV6)) 2449 return B_TRUE; 2450 } else if (sfc_flow_is_match_with_vids(match_flags, 2451 EFX_FILTER_MATCH_ETHER_TYPE | 2452 EFX_FILTER_MATCH_IP_PROTO) || 2453 sfc_flow_is_match_with_vids(match_flags, 2454 EFX_FILTER_MATCH_ETHER_TYPE | 2455 EFX_FILTER_MATCH_IP_PROTO | 2456 EFX_FILTER_MATCH_LOC_MAC)) { 2457 ip_proto = spec_filter->filters[i].efs_ip_proto; 2458 if (filter->supports_rem_or_local_port_filter && 2459 (ip_proto == EFX_IPPROTO_TCP || 2460 ip_proto == EFX_IPPROTO_UDP)) 2461 return B_TRUE; 2462 } 2463 } 2464 2465 return B_FALSE; 2466 } 2467 2468 static int 2469 sfc_flow_validate_match_flags(struct sfc_adapter *sa, 2470 struct rte_flow *flow, 2471 struct rte_flow_error *error) 2472 { 2473 struct sfc_flow_spec *spec = &flow->spec; 2474 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2475 efx_filter_spec_t *spec_tmpl = &spec_filter->template; 2476 efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags; 2477 int rc; 2478 2479 /* Initialize the first filter spec with template */ 2480 spec_filter->filters[0] = *spec_tmpl; 2481 spec_filter->count = 1; 2482 2483 if (!sfc_filter_is_match_supported(sa, match_flags)) { 2484 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error); 2485 if (rc != 0) 2486 return rc; 2487 } 2488 2489 if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) { 2490 rte_flow_error_set(error, ENOTSUP, 2491 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2492 "The flow rule pattern is unsupported"); 2493 return -rte_errno; 2494 } 2495 2496 return 0; 2497 } 2498 2499 static int 2500 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev, 2501 const struct rte_flow_item pattern[], 2502 const struct rte_flow_action actions[], 2503 struct rte_flow *flow, 2504 struct rte_flow_error *error) 2505 { 2506 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2507 struct sfc_flow_spec *spec = &flow->spec; 2508 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2509 struct sfc_flow_parse_ctx ctx; 2510 int rc; 2511 2512 ctx.type = SFC_FLOW_PARSE_CTX_FILTER; 2513 ctx.filter = &spec_filter->template; 2514 2515 rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items), 2516 pattern, &ctx, error); 2517 if (rc != 0) 2518 goto fail_bad_value; 2519 2520 rc = sfc_flow_parse_actions(sa, actions, flow, error); 2521 if (rc != 0) 2522 goto fail_bad_value; 2523 2524 rc = sfc_flow_validate_match_flags(sa, flow, error); 2525 if (rc != 0) 2526 goto fail_bad_value; 2527 2528 return 0; 2529 2530 fail_bad_value: 2531 return rc; 2532 } 2533 2534 static int 2535 sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev, 2536 const struct rte_flow_item pattern[], 2537 const struct rte_flow_action actions[], 2538 struct rte_flow *flow, 2539 struct rte_flow_error *error) 2540 { 2541 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2542 struct sfc_flow_spec *spec = &flow->spec; 2543 struct sfc_flow_spec_mae *spec_mae = &spec->mae; 2544 int rc; 2545 2546 rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error); 2547 if (rc != 0) 2548 return rc; 2549 2550 rc = sfc_mae_rule_parse_actions(sa, actions, spec_mae, error); 2551 if (rc != 0) 2552 return rc; 2553 2554 return 0; 2555 } 2556 2557 static int 2558 sfc_flow_parse(struct rte_eth_dev *dev, 2559 const struct rte_flow_attr *attr, 2560 const struct rte_flow_item pattern[], 2561 const struct rte_flow_action actions[], 2562 struct rte_flow *flow, 2563 struct rte_flow_error *error) 2564 { 2565 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2566 const struct sfc_flow_ops_by_spec *ops; 2567 int rc; 2568 2569 rc = sfc_flow_parse_attr(sa, attr, flow, error); 2570 if (rc != 0) 2571 return rc; 2572 2573 ops = sfc_flow_get_ops_by_spec(flow); 2574 if (ops == NULL || ops->parse == NULL) { 2575 rte_flow_error_set(error, ENOTSUP, 2576 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2577 "No backend to handle this flow"); 2578 return -rte_errno; 2579 } 2580 2581 return ops->parse(dev, pattern, actions, flow, error); 2582 } 2583 2584 static struct rte_flow * 2585 sfc_flow_zmalloc(struct rte_flow_error *error) 2586 { 2587 struct rte_flow *flow; 2588 2589 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0); 2590 if (flow == NULL) { 2591 rte_flow_error_set(error, ENOMEM, 2592 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2593 "Failed to allocate memory"); 2594 } 2595 2596 return flow; 2597 } 2598 2599 static void 2600 sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow) 2601 { 2602 const struct sfc_flow_ops_by_spec *ops; 2603 2604 ops = sfc_flow_get_ops_by_spec(flow); 2605 if (ops != NULL && ops->cleanup != NULL) 2606 ops->cleanup(sa, flow); 2607 2608 rte_free(flow); 2609 } 2610 2611 static int 2612 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow, 2613 struct rte_flow_error *error) 2614 { 2615 const struct sfc_flow_ops_by_spec *ops; 2616 int rc; 2617 2618 ops = sfc_flow_get_ops_by_spec(flow); 2619 if (ops == NULL || ops->insert == NULL) { 2620 rte_flow_error_set(error, ENOTSUP, 2621 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2622 "No backend to handle this flow"); 2623 return rte_errno; 2624 } 2625 2626 rc = ops->insert(sa, flow); 2627 if (rc != 0) { 2628 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2629 NULL, "Failed to insert the flow rule"); 2630 } 2631 2632 return rc; 2633 } 2634 2635 static int 2636 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow, 2637 struct rte_flow_error *error) 2638 { 2639 const struct sfc_flow_ops_by_spec *ops; 2640 int rc; 2641 2642 ops = sfc_flow_get_ops_by_spec(flow); 2643 if (ops == NULL || ops->remove == NULL) { 2644 rte_flow_error_set(error, ENOTSUP, 2645 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2646 "No backend to handle this flow"); 2647 return rte_errno; 2648 } 2649 2650 rc = ops->remove(sa, flow); 2651 if (rc != 0) { 2652 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2653 NULL, "Failed to remove the flow rule"); 2654 } 2655 2656 return rc; 2657 } 2658 2659 static int 2660 sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow, 2661 struct rte_flow_error *error) 2662 { 2663 const struct sfc_flow_ops_by_spec *ops; 2664 int rc = 0; 2665 2666 ops = sfc_flow_get_ops_by_spec(flow); 2667 if (ops == NULL) { 2668 rte_flow_error_set(error, ENOTSUP, 2669 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2670 "No backend to handle this flow"); 2671 return -rte_errno; 2672 } 2673 2674 if (ops->verify != NULL) { 2675 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2676 rc = ops->verify(sa, flow); 2677 } 2678 2679 if (rc != 0) { 2680 rte_flow_error_set(error, rc, 2681 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2682 "Failed to verify flow validity with FW"); 2683 return -rte_errno; 2684 } 2685 2686 return 0; 2687 } 2688 2689 static int 2690 sfc_flow_validate(struct rte_eth_dev *dev, 2691 const struct rte_flow_attr *attr, 2692 const struct rte_flow_item pattern[], 2693 const struct rte_flow_action actions[], 2694 struct rte_flow_error *error) 2695 { 2696 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2697 struct rte_flow *flow; 2698 int rc; 2699 2700 flow = sfc_flow_zmalloc(error); 2701 if (flow == NULL) 2702 return -rte_errno; 2703 2704 sfc_adapter_lock(sa); 2705 2706 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error); 2707 if (rc == 0) 2708 rc = sfc_flow_verify(sa, flow, error); 2709 2710 sfc_flow_free(sa, flow); 2711 2712 sfc_adapter_unlock(sa); 2713 2714 return rc; 2715 } 2716 2717 static struct rte_flow * 2718 sfc_flow_create(struct rte_eth_dev *dev, 2719 const struct rte_flow_attr *attr, 2720 const struct rte_flow_item pattern[], 2721 const struct rte_flow_action actions[], 2722 struct rte_flow_error *error) 2723 { 2724 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2725 struct rte_flow *flow = NULL; 2726 int rc; 2727 2728 flow = sfc_flow_zmalloc(error); 2729 if (flow == NULL) 2730 goto fail_no_mem; 2731 2732 sfc_adapter_lock(sa); 2733 2734 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error); 2735 if (rc != 0) 2736 goto fail_bad_value; 2737 2738 TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries); 2739 2740 if (sa->state == SFC_ETHDEV_STARTED) { 2741 rc = sfc_flow_insert(sa, flow, error); 2742 if (rc != 0) 2743 goto fail_flow_insert; 2744 } 2745 2746 sfc_adapter_unlock(sa); 2747 2748 return flow; 2749 2750 fail_flow_insert: 2751 TAILQ_REMOVE(&sa->flow_list, flow, entries); 2752 2753 fail_bad_value: 2754 sfc_flow_free(sa, flow); 2755 sfc_adapter_unlock(sa); 2756 2757 fail_no_mem: 2758 return NULL; 2759 } 2760 2761 static int 2762 sfc_flow_destroy(struct rte_eth_dev *dev, 2763 struct rte_flow *flow, 2764 struct rte_flow_error *error) 2765 { 2766 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2767 struct rte_flow *flow_ptr; 2768 int rc = EINVAL; 2769 2770 sfc_adapter_lock(sa); 2771 2772 TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) { 2773 if (flow_ptr == flow) 2774 rc = 0; 2775 } 2776 if (rc != 0) { 2777 rte_flow_error_set(error, rc, 2778 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2779 "Failed to find flow rule to destroy"); 2780 goto fail_bad_value; 2781 } 2782 2783 if (sa->state == SFC_ETHDEV_STARTED) 2784 rc = sfc_flow_remove(sa, flow, error); 2785 2786 TAILQ_REMOVE(&sa->flow_list, flow, entries); 2787 sfc_flow_free(sa, flow); 2788 2789 fail_bad_value: 2790 sfc_adapter_unlock(sa); 2791 2792 return -rc; 2793 } 2794 2795 static int 2796 sfc_flow_flush(struct rte_eth_dev *dev, 2797 struct rte_flow_error *error) 2798 { 2799 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2800 struct rte_flow *flow; 2801 int ret = 0; 2802 2803 sfc_adapter_lock(sa); 2804 2805 while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) { 2806 if (sa->state == SFC_ETHDEV_STARTED) { 2807 int rc; 2808 2809 rc = sfc_flow_remove(sa, flow, error); 2810 if (rc != 0) 2811 ret = rc; 2812 } 2813 2814 TAILQ_REMOVE(&sa->flow_list, flow, entries); 2815 sfc_flow_free(sa, flow); 2816 } 2817 2818 sfc_adapter_unlock(sa); 2819 2820 return -ret; 2821 } 2822 2823 static int 2824 sfc_flow_query(struct rte_eth_dev *dev, 2825 struct rte_flow *flow, 2826 const struct rte_flow_action *action, 2827 void *data, 2828 struct rte_flow_error *error) 2829 { 2830 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2831 const struct sfc_flow_ops_by_spec *ops; 2832 int ret; 2833 2834 sfc_adapter_lock(sa); 2835 2836 ops = sfc_flow_get_ops_by_spec(flow); 2837 if (ops == NULL || ops->query == NULL) { 2838 ret = rte_flow_error_set(error, ENOTSUP, 2839 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2840 "No backend to handle this flow"); 2841 goto fail_no_backend; 2842 } 2843 2844 if (sa->state != SFC_ETHDEV_STARTED) { 2845 ret = rte_flow_error_set(error, EINVAL, 2846 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2847 "Can't query the flow: the adapter is not started"); 2848 goto fail_not_started; 2849 } 2850 2851 ret = ops->query(dev, flow, action, data, error); 2852 if (ret != 0) 2853 goto fail_query; 2854 2855 sfc_adapter_unlock(sa); 2856 2857 return 0; 2858 2859 fail_query: 2860 fail_not_started: 2861 fail_no_backend: 2862 sfc_adapter_unlock(sa); 2863 return ret; 2864 } 2865 2866 static int 2867 sfc_flow_isolate(struct rte_eth_dev *dev, int enable, 2868 struct rte_flow_error *error) 2869 { 2870 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2871 int ret = 0; 2872 2873 sfc_adapter_lock(sa); 2874 if (sa->state != SFC_ETHDEV_INITIALIZED) { 2875 rte_flow_error_set(error, EBUSY, 2876 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2877 NULL, "please close the port first"); 2878 ret = -rte_errno; 2879 } else { 2880 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE; 2881 } 2882 sfc_adapter_unlock(sa); 2883 2884 return ret; 2885 } 2886 2887 const struct rte_flow_ops sfc_flow_ops = { 2888 .validate = sfc_flow_validate, 2889 .create = sfc_flow_create, 2890 .destroy = sfc_flow_destroy, 2891 .flush = sfc_flow_flush, 2892 .query = sfc_flow_query, 2893 .isolate = sfc_flow_isolate, 2894 }; 2895 2896 void 2897 sfc_flow_init(struct sfc_adapter *sa) 2898 { 2899 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2900 2901 TAILQ_INIT(&sa->flow_list); 2902 } 2903 2904 void 2905 sfc_flow_fini(struct sfc_adapter *sa) 2906 { 2907 struct rte_flow *flow; 2908 2909 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2910 2911 while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) { 2912 TAILQ_REMOVE(&sa->flow_list, flow, entries); 2913 sfc_flow_free(sa, flow); 2914 } 2915 } 2916 2917 void 2918 sfc_flow_stop(struct sfc_adapter *sa) 2919 { 2920 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 2921 struct sfc_rss *rss = &sas->rss; 2922 struct rte_flow *flow; 2923 2924 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2925 2926 TAILQ_FOREACH(flow, &sa->flow_list, entries) 2927 sfc_flow_remove(sa, flow, NULL); 2928 2929 if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) { 2930 efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context); 2931 rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT; 2932 } 2933 2934 /* 2935 * MAE counter service is not stopped on flow rule remove to avoid 2936 * extra work. Make sure that it is stopped here. 2937 */ 2938 sfc_mae_counter_stop(sa); 2939 } 2940 2941 int 2942 sfc_flow_start(struct sfc_adapter *sa) 2943 { 2944 struct rte_flow *flow; 2945 int rc = 0; 2946 2947 sfc_log_init(sa, "entry"); 2948 2949 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2950 2951 TAILQ_FOREACH(flow, &sa->flow_list, entries) { 2952 rc = sfc_flow_insert(sa, flow, NULL); 2953 if (rc != 0) 2954 goto fail_bad_flow; 2955 } 2956 2957 sfc_log_init(sa, "done"); 2958 2959 fail_bad_flow: 2960 return rc; 2961 } 2962