1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2021 Xilinx, Inc. 4 * Copyright(c) 2017-2019 Solarflare Communications Inc. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #include <rte_byteorder.h> 11 #include <rte_tailq.h> 12 #include <rte_common.h> 13 #include <ethdev_driver.h> 14 #include <rte_ether.h> 15 #include <rte_flow.h> 16 #include <rte_flow_driver.h> 17 18 #include "efx.h" 19 20 #include "sfc.h" 21 #include "sfc_debug.h" 22 #include "sfc_rx.h" 23 #include "sfc_filter.h" 24 #include "sfc_flow.h" 25 #include "sfc_log.h" 26 #include "sfc_dp_rx.h" 27 #include "sfc_mae_counter.h" 28 29 struct sfc_flow_ops_by_spec { 30 sfc_flow_parse_cb_t *parse; 31 sfc_flow_verify_cb_t *verify; 32 sfc_flow_cleanup_cb_t *cleanup; 33 sfc_flow_insert_cb_t *insert; 34 sfc_flow_remove_cb_t *remove; 35 sfc_flow_query_cb_t *query; 36 }; 37 38 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter; 39 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae; 40 static sfc_flow_insert_cb_t sfc_flow_filter_insert; 41 static sfc_flow_remove_cb_t sfc_flow_filter_remove; 42 43 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = { 44 .parse = sfc_flow_parse_rte_to_filter, 45 .verify = NULL, 46 .cleanup = NULL, 47 .insert = sfc_flow_filter_insert, 48 .remove = sfc_flow_filter_remove, 49 .query = NULL, 50 }; 51 52 static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = { 53 .parse = sfc_flow_parse_rte_to_mae, 54 .verify = sfc_mae_flow_verify, 55 .cleanup = sfc_mae_flow_cleanup, 56 .insert = sfc_mae_flow_insert, 57 .remove = sfc_mae_flow_remove, 58 .query = sfc_mae_flow_query, 59 }; 60 61 static const struct sfc_flow_ops_by_spec * 62 sfc_flow_get_ops_by_spec(struct rte_flow *flow) 63 { 64 struct sfc_flow_spec *spec = &flow->spec; 65 const struct sfc_flow_ops_by_spec *ops = NULL; 66 67 switch (spec->type) { 68 case SFC_FLOW_SPEC_FILTER: 69 ops = &sfc_flow_ops_filter; 70 break; 71 case SFC_FLOW_SPEC_MAE: 72 ops = &sfc_flow_ops_mae; 73 break; 74 default: 75 SFC_ASSERT(false); 76 break; 77 } 78 79 return ops; 80 } 81 82 /* 83 * Currently, filter-based (VNIC) flow API is implemented in such a manner 84 * that each flow rule is converted to one or more hardware filters. 85 * All elements of flow rule (attributes, pattern items, actions) 86 * correspond to one or more fields in the efx_filter_spec_s structure 87 * that is responsible for the hardware filter. 88 * If some required field is unset in the flow rule, then a handful 89 * of filter copies will be created to cover all possible values 90 * of such a field. 91 */ 92 93 static sfc_flow_item_parse sfc_flow_parse_void; 94 static sfc_flow_item_parse sfc_flow_parse_eth; 95 static sfc_flow_item_parse sfc_flow_parse_vlan; 96 static sfc_flow_item_parse sfc_flow_parse_ipv4; 97 static sfc_flow_item_parse sfc_flow_parse_ipv6; 98 static sfc_flow_item_parse sfc_flow_parse_tcp; 99 static sfc_flow_item_parse sfc_flow_parse_udp; 100 static sfc_flow_item_parse sfc_flow_parse_vxlan; 101 static sfc_flow_item_parse sfc_flow_parse_geneve; 102 static sfc_flow_item_parse sfc_flow_parse_nvgre; 103 static sfc_flow_item_parse sfc_flow_parse_pppoex; 104 105 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec, 106 unsigned int filters_count_for_one_val, 107 struct rte_flow_error *error); 108 109 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match, 110 efx_filter_spec_t *spec, 111 struct sfc_filter *filter); 112 113 struct sfc_flow_copy_flag { 114 /* EFX filter specification match flag */ 115 efx_filter_match_flags_t flag; 116 /* Number of values of corresponding field */ 117 unsigned int vals_count; 118 /* Function to set values in specifications */ 119 sfc_flow_spec_set_vals *set_vals; 120 /* 121 * Function to check that the specification is suitable 122 * for adding this match flag 123 */ 124 sfc_flow_spec_check *spec_check; 125 }; 126 127 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags; 128 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags; 129 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes; 130 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags; 131 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags; 132 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag; 133 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag; 134 135 static boolean_t 136 sfc_flow_is_zero(const uint8_t *buf, unsigned int size) 137 { 138 uint8_t sum = 0; 139 unsigned int i; 140 141 for (i = 0; i < size; i++) 142 sum |= buf[i]; 143 144 return (sum == 0) ? B_TRUE : B_FALSE; 145 } 146 147 /* 148 * Validate item and prepare structures spec and mask for parsing 149 */ 150 int 151 sfc_flow_parse_init(const struct rte_flow_item *item, 152 const void **spec_ptr, 153 const void **mask_ptr, 154 const void *supp_mask, 155 const void *def_mask, 156 unsigned int size, 157 struct rte_flow_error *error) 158 { 159 const uint8_t *spec; 160 const uint8_t *mask; 161 const uint8_t *last; 162 uint8_t supp; 163 unsigned int i; 164 165 if (item == NULL) { 166 rte_flow_error_set(error, EINVAL, 167 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 168 "NULL item"); 169 return -rte_errno; 170 } 171 172 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) { 173 rte_flow_error_set(error, EINVAL, 174 RTE_FLOW_ERROR_TYPE_ITEM, item, 175 "Mask or last is set without spec"); 176 return -rte_errno; 177 } 178 179 /* 180 * If "mask" is not set, default mask is used, 181 * but if default mask is NULL, "mask" should be set 182 */ 183 if (item->mask == NULL) { 184 if (def_mask == NULL) { 185 rte_flow_error_set(error, EINVAL, 186 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 187 "Mask should be specified"); 188 return -rte_errno; 189 } 190 191 mask = def_mask; 192 } else { 193 mask = item->mask; 194 } 195 196 spec = item->spec; 197 last = item->last; 198 199 if (spec == NULL) 200 goto exit; 201 202 /* 203 * If field values in "last" are either 0 or equal to the corresponding 204 * values in "spec" then they are ignored 205 */ 206 if (last != NULL && 207 !sfc_flow_is_zero(last, size) && 208 memcmp(last, spec, size) != 0) { 209 rte_flow_error_set(error, ENOTSUP, 210 RTE_FLOW_ERROR_TYPE_ITEM, item, 211 "Ranging is not supported"); 212 return -rte_errno; 213 } 214 215 if (supp_mask == NULL) { 216 rte_flow_error_set(error, EINVAL, 217 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 218 "Supported mask for item should be specified"); 219 return -rte_errno; 220 } 221 222 /* Check that mask does not ask for more match than supp_mask */ 223 for (i = 0; i < size; i++) { 224 supp = ((const uint8_t *)supp_mask)[i]; 225 226 if (~supp & mask[i]) { 227 rte_flow_error_set(error, ENOTSUP, 228 RTE_FLOW_ERROR_TYPE_ITEM, item, 229 "Item's field is not supported"); 230 return -rte_errno; 231 } 232 } 233 234 exit: 235 *spec_ptr = spec; 236 *mask_ptr = mask; 237 return 0; 238 } 239 240 /* 241 * Protocol parsers. 242 * Masking is not supported, so masks in items should be either 243 * full or empty (zeroed) and set only for supported fields which 244 * are specified in the supp_mask. 245 */ 246 247 static int 248 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item, 249 __rte_unused struct sfc_flow_parse_ctx *parse_ctx, 250 __rte_unused struct rte_flow_error *error) 251 { 252 return 0; 253 } 254 255 /** 256 * Convert Ethernet item to EFX filter specification. 257 * 258 * @param item[in] 259 * Item specification. Outer frame specification may only comprise 260 * source/destination addresses and Ethertype field. 261 * Inner frame specification may contain destination address only. 262 * There is support for individual/group mask as well as for empty and full. 263 * If the mask is NULL, default mask will be used. Ranging is not supported. 264 * @param efx_spec[in, out] 265 * EFX filter specification to update. 266 * @param[out] error 267 * Perform verbose error reporting if not NULL. 268 */ 269 static int 270 sfc_flow_parse_eth(const struct rte_flow_item *item, 271 struct sfc_flow_parse_ctx *parse_ctx, 272 struct rte_flow_error *error) 273 { 274 int rc; 275 efx_filter_spec_t *efx_spec = parse_ctx->filter; 276 const struct rte_flow_item_eth *spec = NULL; 277 const struct rte_flow_item_eth *mask = NULL; 278 const struct rte_flow_item_eth supp_mask = { 279 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 280 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 281 .type = 0xffff, 282 }; 283 const struct rte_flow_item_eth ifrm_supp_mask = { 284 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 285 }; 286 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = { 287 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 288 }; 289 const struct rte_flow_item_eth *supp_mask_p; 290 const struct rte_flow_item_eth *def_mask_p; 291 uint8_t *loc_mac = NULL; 292 boolean_t is_ifrm = (efx_spec->efs_encap_type != 293 EFX_TUNNEL_PROTOCOL_NONE); 294 295 if (is_ifrm) { 296 supp_mask_p = &ifrm_supp_mask; 297 def_mask_p = &ifrm_supp_mask; 298 loc_mac = efx_spec->efs_ifrm_loc_mac; 299 } else { 300 supp_mask_p = &supp_mask; 301 def_mask_p = &rte_flow_item_eth_mask; 302 loc_mac = efx_spec->efs_loc_mac; 303 } 304 305 rc = sfc_flow_parse_init(item, 306 (const void **)&spec, 307 (const void **)&mask, 308 supp_mask_p, def_mask_p, 309 sizeof(struct rte_flow_item_eth), 310 error); 311 if (rc != 0) 312 return rc; 313 314 /* If "spec" is not set, could be any Ethernet */ 315 if (spec == NULL) 316 return 0; 317 318 if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) { 319 efx_spec->efs_match_flags |= is_ifrm ? 320 EFX_FILTER_MATCH_IFRM_LOC_MAC : 321 EFX_FILTER_MATCH_LOC_MAC; 322 rte_memcpy(loc_mac, spec->dst.addr_bytes, 323 EFX_MAC_ADDR_LEN); 324 } else if (memcmp(mask->dst.addr_bytes, ig_mask, 325 EFX_MAC_ADDR_LEN) == 0) { 326 if (rte_is_unicast_ether_addr(&spec->dst)) 327 efx_spec->efs_match_flags |= is_ifrm ? 328 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST : 329 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST; 330 else 331 efx_spec->efs_match_flags |= is_ifrm ? 332 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST : 333 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; 334 } else if (!rte_is_zero_ether_addr(&mask->dst)) { 335 goto fail_bad_mask; 336 } 337 338 /* 339 * ifrm_supp_mask ensures that the source address and 340 * ethertype masks are equal to zero in inner frame, 341 * so these fields are filled in only for the outer frame 342 */ 343 if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) { 344 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC; 345 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes, 346 EFX_MAC_ADDR_LEN); 347 } else if (!rte_is_zero_ether_addr(&mask->src)) { 348 goto fail_bad_mask; 349 } 350 351 /* 352 * Ether type is in big-endian byte order in item and 353 * in little-endian in efx_spec, so byte swap is used 354 */ 355 if (mask->type == supp_mask.type) { 356 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 357 efx_spec->efs_ether_type = rte_bswap16(spec->type); 358 } else if (mask->type != 0) { 359 goto fail_bad_mask; 360 } 361 362 return 0; 363 364 fail_bad_mask: 365 rte_flow_error_set(error, EINVAL, 366 RTE_FLOW_ERROR_TYPE_ITEM, item, 367 "Bad mask in the ETH pattern item"); 368 return -rte_errno; 369 } 370 371 /** 372 * Convert VLAN item to EFX filter specification. 373 * 374 * @param item[in] 375 * Item specification. Only VID field is supported. 376 * The mask can not be NULL. Ranging is not supported. 377 * @param efx_spec[in, out] 378 * EFX filter specification to update. 379 * @param[out] error 380 * Perform verbose error reporting if not NULL. 381 */ 382 static int 383 sfc_flow_parse_vlan(const struct rte_flow_item *item, 384 struct sfc_flow_parse_ctx *parse_ctx, 385 struct rte_flow_error *error) 386 { 387 int rc; 388 uint16_t vid; 389 efx_filter_spec_t *efx_spec = parse_ctx->filter; 390 const struct rte_flow_item_vlan *spec = NULL; 391 const struct rte_flow_item_vlan *mask = NULL; 392 const struct rte_flow_item_vlan supp_mask = { 393 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX), 394 .inner_type = RTE_BE16(0xffff), 395 }; 396 397 rc = sfc_flow_parse_init(item, 398 (const void **)&spec, 399 (const void **)&mask, 400 &supp_mask, 401 NULL, 402 sizeof(struct rte_flow_item_vlan), 403 error); 404 if (rc != 0) 405 return rc; 406 407 /* 408 * VID is in big-endian byte order in item and 409 * in little-endian in efx_spec, so byte swap is used. 410 * If two VLAN items are included, the first matches 411 * the outer tag and the next matches the inner tag. 412 */ 413 if (mask->tci == supp_mask.tci) { 414 /* Apply mask to keep VID only */ 415 vid = rte_bswap16(spec->tci & mask->tci); 416 417 if (!(efx_spec->efs_match_flags & 418 EFX_FILTER_MATCH_OUTER_VID)) { 419 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID; 420 efx_spec->efs_outer_vid = vid; 421 } else if (!(efx_spec->efs_match_flags & 422 EFX_FILTER_MATCH_INNER_VID)) { 423 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID; 424 efx_spec->efs_inner_vid = vid; 425 } else { 426 rte_flow_error_set(error, EINVAL, 427 RTE_FLOW_ERROR_TYPE_ITEM, item, 428 "More than two VLAN items"); 429 return -rte_errno; 430 } 431 } else { 432 rte_flow_error_set(error, EINVAL, 433 RTE_FLOW_ERROR_TYPE_ITEM, item, 434 "VLAN ID in TCI match is required"); 435 return -rte_errno; 436 } 437 438 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { 439 rte_flow_error_set(error, EINVAL, 440 RTE_FLOW_ERROR_TYPE_ITEM, item, 441 "VLAN TPID matching is not supported"); 442 return -rte_errno; 443 } 444 if (mask->inner_type == supp_mask.inner_type) { 445 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 446 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type); 447 } else if (mask->inner_type) { 448 rte_flow_error_set(error, EINVAL, 449 RTE_FLOW_ERROR_TYPE_ITEM, item, 450 "Bad mask for VLAN inner_type"); 451 return -rte_errno; 452 } 453 454 return 0; 455 } 456 457 /** 458 * Convert IPv4 item to EFX filter specification. 459 * 460 * @param item[in] 461 * Item specification. Only source and destination addresses and 462 * protocol fields are supported. If the mask is NULL, default 463 * mask will be used. Ranging is not supported. 464 * @param efx_spec[in, out] 465 * EFX filter specification to update. 466 * @param[out] error 467 * Perform verbose error reporting if not NULL. 468 */ 469 static int 470 sfc_flow_parse_ipv4(const struct rte_flow_item *item, 471 struct sfc_flow_parse_ctx *parse_ctx, 472 struct rte_flow_error *error) 473 { 474 int rc; 475 efx_filter_spec_t *efx_spec = parse_ctx->filter; 476 const struct rte_flow_item_ipv4 *spec = NULL; 477 const struct rte_flow_item_ipv4 *mask = NULL; 478 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4); 479 const struct rte_flow_item_ipv4 supp_mask = { 480 .hdr = { 481 .src_addr = 0xffffffff, 482 .dst_addr = 0xffffffff, 483 .next_proto_id = 0xff, 484 } 485 }; 486 487 rc = sfc_flow_parse_init(item, 488 (const void **)&spec, 489 (const void **)&mask, 490 &supp_mask, 491 &rte_flow_item_ipv4_mask, 492 sizeof(struct rte_flow_item_ipv4), 493 error); 494 if (rc != 0) 495 return rc; 496 497 /* 498 * Filtering by IPv4 source and destination addresses requires 499 * the appropriate ETHER_TYPE in hardware filters 500 */ 501 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) { 502 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 503 efx_spec->efs_ether_type = ether_type_ipv4; 504 } else if (efx_spec->efs_ether_type != ether_type_ipv4) { 505 rte_flow_error_set(error, EINVAL, 506 RTE_FLOW_ERROR_TYPE_ITEM, item, 507 "Ethertype in pattern with IPV4 item should be appropriate"); 508 return -rte_errno; 509 } 510 511 if (spec == NULL) 512 return 0; 513 514 /* 515 * IPv4 addresses are in big-endian byte order in item and in 516 * efx_spec 517 */ 518 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) { 519 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST; 520 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr; 521 } else if (mask->hdr.src_addr != 0) { 522 goto fail_bad_mask; 523 } 524 525 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) { 526 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST; 527 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr; 528 } else if (mask->hdr.dst_addr != 0) { 529 goto fail_bad_mask; 530 } 531 532 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) { 533 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 534 efx_spec->efs_ip_proto = spec->hdr.next_proto_id; 535 } else if (mask->hdr.next_proto_id != 0) { 536 goto fail_bad_mask; 537 } 538 539 return 0; 540 541 fail_bad_mask: 542 rte_flow_error_set(error, EINVAL, 543 RTE_FLOW_ERROR_TYPE_ITEM, item, 544 "Bad mask in the IPV4 pattern item"); 545 return -rte_errno; 546 } 547 548 /** 549 * Convert IPv6 item to EFX filter specification. 550 * 551 * @param item[in] 552 * Item specification. Only source and destination addresses and 553 * next header fields are supported. If the mask is NULL, default 554 * mask will be used. Ranging is not supported. 555 * @param efx_spec[in, out] 556 * EFX filter specification to update. 557 * @param[out] error 558 * Perform verbose error reporting if not NULL. 559 */ 560 static int 561 sfc_flow_parse_ipv6(const struct rte_flow_item *item, 562 struct sfc_flow_parse_ctx *parse_ctx, 563 struct rte_flow_error *error) 564 { 565 int rc; 566 efx_filter_spec_t *efx_spec = parse_ctx->filter; 567 const struct rte_flow_item_ipv6 *spec = NULL; 568 const struct rte_flow_item_ipv6 *mask = NULL; 569 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6); 570 const struct rte_flow_item_ipv6 supp_mask = { 571 .hdr = { 572 .src_addr = { 0xff, 0xff, 0xff, 0xff, 573 0xff, 0xff, 0xff, 0xff, 574 0xff, 0xff, 0xff, 0xff, 575 0xff, 0xff, 0xff, 0xff }, 576 .dst_addr = { 0xff, 0xff, 0xff, 0xff, 577 0xff, 0xff, 0xff, 0xff, 578 0xff, 0xff, 0xff, 0xff, 579 0xff, 0xff, 0xff, 0xff }, 580 .proto = 0xff, 581 } 582 }; 583 584 rc = sfc_flow_parse_init(item, 585 (const void **)&spec, 586 (const void **)&mask, 587 &supp_mask, 588 &rte_flow_item_ipv6_mask, 589 sizeof(struct rte_flow_item_ipv6), 590 error); 591 if (rc != 0) 592 return rc; 593 594 /* 595 * Filtering by IPv6 source and destination addresses requires 596 * the appropriate ETHER_TYPE in hardware filters 597 */ 598 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) { 599 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 600 efx_spec->efs_ether_type = ether_type_ipv6; 601 } else if (efx_spec->efs_ether_type != ether_type_ipv6) { 602 rte_flow_error_set(error, EINVAL, 603 RTE_FLOW_ERROR_TYPE_ITEM, item, 604 "Ethertype in pattern with IPV6 item should be appropriate"); 605 return -rte_errno; 606 } 607 608 if (spec == NULL) 609 return 0; 610 611 /* 612 * IPv6 addresses are in big-endian byte order in item and in 613 * efx_spec 614 */ 615 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr, 616 sizeof(mask->hdr.src_addr)) == 0) { 617 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST; 618 619 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) != 620 sizeof(spec->hdr.src_addr)); 621 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr, 622 sizeof(efx_spec->efs_rem_host)); 623 } else if (!sfc_flow_is_zero(mask->hdr.src_addr, 624 sizeof(mask->hdr.src_addr))) { 625 goto fail_bad_mask; 626 } 627 628 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr, 629 sizeof(mask->hdr.dst_addr)) == 0) { 630 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST; 631 632 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) != 633 sizeof(spec->hdr.dst_addr)); 634 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr, 635 sizeof(efx_spec->efs_loc_host)); 636 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr, 637 sizeof(mask->hdr.dst_addr))) { 638 goto fail_bad_mask; 639 } 640 641 if (mask->hdr.proto == supp_mask.hdr.proto) { 642 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 643 efx_spec->efs_ip_proto = spec->hdr.proto; 644 } else if (mask->hdr.proto != 0) { 645 goto fail_bad_mask; 646 } 647 648 return 0; 649 650 fail_bad_mask: 651 rte_flow_error_set(error, EINVAL, 652 RTE_FLOW_ERROR_TYPE_ITEM, item, 653 "Bad mask in the IPV6 pattern item"); 654 return -rte_errno; 655 } 656 657 /** 658 * Convert TCP item to EFX filter specification. 659 * 660 * @param item[in] 661 * Item specification. Only source and destination ports fields 662 * are supported. If the mask is NULL, default mask will be used. 663 * Ranging is not supported. 664 * @param efx_spec[in, out] 665 * EFX filter specification to update. 666 * @param[out] error 667 * Perform verbose error reporting if not NULL. 668 */ 669 static int 670 sfc_flow_parse_tcp(const struct rte_flow_item *item, 671 struct sfc_flow_parse_ctx *parse_ctx, 672 struct rte_flow_error *error) 673 { 674 int rc; 675 efx_filter_spec_t *efx_spec = parse_ctx->filter; 676 const struct rte_flow_item_tcp *spec = NULL; 677 const struct rte_flow_item_tcp *mask = NULL; 678 const struct rte_flow_item_tcp supp_mask = { 679 .hdr = { 680 .src_port = 0xffff, 681 .dst_port = 0xffff, 682 } 683 }; 684 685 rc = sfc_flow_parse_init(item, 686 (const void **)&spec, 687 (const void **)&mask, 688 &supp_mask, 689 &rte_flow_item_tcp_mask, 690 sizeof(struct rte_flow_item_tcp), 691 error); 692 if (rc != 0) 693 return rc; 694 695 /* 696 * Filtering by TCP source and destination ports requires 697 * the appropriate IP_PROTO in hardware filters 698 */ 699 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 700 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 701 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP; 702 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) { 703 rte_flow_error_set(error, EINVAL, 704 RTE_FLOW_ERROR_TYPE_ITEM, item, 705 "IP proto in pattern with TCP item should be appropriate"); 706 return -rte_errno; 707 } 708 709 if (spec == NULL) 710 return 0; 711 712 /* 713 * Source and destination ports are in big-endian byte order in item and 714 * in little-endian in efx_spec, so byte swap is used 715 */ 716 if (mask->hdr.src_port == supp_mask.hdr.src_port) { 717 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT; 718 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port); 719 } else if (mask->hdr.src_port != 0) { 720 goto fail_bad_mask; 721 } 722 723 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) { 724 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT; 725 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port); 726 } else if (mask->hdr.dst_port != 0) { 727 goto fail_bad_mask; 728 } 729 730 return 0; 731 732 fail_bad_mask: 733 rte_flow_error_set(error, EINVAL, 734 RTE_FLOW_ERROR_TYPE_ITEM, item, 735 "Bad mask in the TCP pattern item"); 736 return -rte_errno; 737 } 738 739 /** 740 * Convert UDP item to EFX filter specification. 741 * 742 * @param item[in] 743 * Item specification. Only source and destination ports fields 744 * are supported. If the mask is NULL, default mask will be used. 745 * Ranging is not supported. 746 * @param efx_spec[in, out] 747 * EFX filter specification to update. 748 * @param[out] error 749 * Perform verbose error reporting if not NULL. 750 */ 751 static int 752 sfc_flow_parse_udp(const struct rte_flow_item *item, 753 struct sfc_flow_parse_ctx *parse_ctx, 754 struct rte_flow_error *error) 755 { 756 int rc; 757 efx_filter_spec_t *efx_spec = parse_ctx->filter; 758 const struct rte_flow_item_udp *spec = NULL; 759 const struct rte_flow_item_udp *mask = NULL; 760 const struct rte_flow_item_udp supp_mask = { 761 .hdr = { 762 .src_port = 0xffff, 763 .dst_port = 0xffff, 764 } 765 }; 766 767 rc = sfc_flow_parse_init(item, 768 (const void **)&spec, 769 (const void **)&mask, 770 &supp_mask, 771 &rte_flow_item_udp_mask, 772 sizeof(struct rte_flow_item_udp), 773 error); 774 if (rc != 0) 775 return rc; 776 777 /* 778 * Filtering by UDP source and destination ports requires 779 * the appropriate IP_PROTO in hardware filters 780 */ 781 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 782 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 783 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP; 784 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) { 785 rte_flow_error_set(error, EINVAL, 786 RTE_FLOW_ERROR_TYPE_ITEM, item, 787 "IP proto in pattern with UDP item should be appropriate"); 788 return -rte_errno; 789 } 790 791 if (spec == NULL) 792 return 0; 793 794 /* 795 * Source and destination ports are in big-endian byte order in item and 796 * in little-endian in efx_spec, so byte swap is used 797 */ 798 if (mask->hdr.src_port == supp_mask.hdr.src_port) { 799 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT; 800 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port); 801 } else if (mask->hdr.src_port != 0) { 802 goto fail_bad_mask; 803 } 804 805 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) { 806 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT; 807 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port); 808 } else if (mask->hdr.dst_port != 0) { 809 goto fail_bad_mask; 810 } 811 812 return 0; 813 814 fail_bad_mask: 815 rte_flow_error_set(error, EINVAL, 816 RTE_FLOW_ERROR_TYPE_ITEM, item, 817 "Bad mask in the UDP pattern item"); 818 return -rte_errno; 819 } 820 821 /* 822 * Filters for encapsulated packets match based on the EtherType and IP 823 * protocol in the outer frame. 824 */ 825 static int 826 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item, 827 efx_filter_spec_t *efx_spec, 828 uint8_t ip_proto, 829 struct rte_flow_error *error) 830 { 831 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 832 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 833 efx_spec->efs_ip_proto = ip_proto; 834 } else if (efx_spec->efs_ip_proto != ip_proto) { 835 switch (ip_proto) { 836 case EFX_IPPROTO_UDP: 837 rte_flow_error_set(error, EINVAL, 838 RTE_FLOW_ERROR_TYPE_ITEM, item, 839 "Outer IP header protocol must be UDP " 840 "in VxLAN/GENEVE pattern"); 841 return -rte_errno; 842 843 case EFX_IPPROTO_GRE: 844 rte_flow_error_set(error, EINVAL, 845 RTE_FLOW_ERROR_TYPE_ITEM, item, 846 "Outer IP header protocol must be GRE " 847 "in NVGRE pattern"); 848 return -rte_errno; 849 850 default: 851 rte_flow_error_set(error, EINVAL, 852 RTE_FLOW_ERROR_TYPE_ITEM, item, 853 "Only VxLAN/GENEVE/NVGRE tunneling patterns " 854 "are supported"); 855 return -rte_errno; 856 } 857 } 858 859 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE && 860 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 && 861 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) { 862 rte_flow_error_set(error, EINVAL, 863 RTE_FLOW_ERROR_TYPE_ITEM, item, 864 "Outer frame EtherType in pattern with tunneling " 865 "must be IPv4 or IPv6"); 866 return -rte_errno; 867 } 868 869 return 0; 870 } 871 872 static int 873 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec, 874 const uint8_t *vni_or_vsid_val, 875 const uint8_t *vni_or_vsid_mask, 876 const struct rte_flow_item *item, 877 struct rte_flow_error *error) 878 { 879 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = { 880 0xff, 0xff, 0xff 881 }; 882 883 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask, 884 EFX_VNI_OR_VSID_LEN) == 0) { 885 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID; 886 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val, 887 EFX_VNI_OR_VSID_LEN); 888 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) { 889 rte_flow_error_set(error, EINVAL, 890 RTE_FLOW_ERROR_TYPE_ITEM, item, 891 "Unsupported VNI/VSID mask"); 892 return -rte_errno; 893 } 894 895 return 0; 896 } 897 898 /** 899 * Convert VXLAN item to EFX filter specification. 900 * 901 * @param item[in] 902 * Item specification. Only VXLAN network identifier field is supported. 903 * If the mask is NULL, default mask will be used. 904 * Ranging is not supported. 905 * @param efx_spec[in, out] 906 * EFX filter specification to update. 907 * @param[out] error 908 * Perform verbose error reporting if not NULL. 909 */ 910 static int 911 sfc_flow_parse_vxlan(const struct rte_flow_item *item, 912 struct sfc_flow_parse_ctx *parse_ctx, 913 struct rte_flow_error *error) 914 { 915 int rc; 916 efx_filter_spec_t *efx_spec = parse_ctx->filter; 917 const struct rte_flow_item_vxlan *spec = NULL; 918 const struct rte_flow_item_vxlan *mask = NULL; 919 const struct rte_flow_item_vxlan supp_mask = { 920 .vni = { 0xff, 0xff, 0xff } 921 }; 922 923 rc = sfc_flow_parse_init(item, 924 (const void **)&spec, 925 (const void **)&mask, 926 &supp_mask, 927 &rte_flow_item_vxlan_mask, 928 sizeof(struct rte_flow_item_vxlan), 929 error); 930 if (rc != 0) 931 return rc; 932 933 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 934 EFX_IPPROTO_UDP, error); 935 if (rc != 0) 936 return rc; 937 938 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN; 939 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 940 941 if (spec == NULL) 942 return 0; 943 944 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, 945 mask->vni, item, error); 946 947 return rc; 948 } 949 950 /** 951 * Convert GENEVE item to EFX filter specification. 952 * 953 * @param item[in] 954 * Item specification. Only Virtual Network Identifier and protocol type 955 * fields are supported. But protocol type can be only Ethernet (0x6558). 956 * If the mask is NULL, default mask will be used. 957 * Ranging is not supported. 958 * @param efx_spec[in, out] 959 * EFX filter specification to update. 960 * @param[out] error 961 * Perform verbose error reporting if not NULL. 962 */ 963 static int 964 sfc_flow_parse_geneve(const struct rte_flow_item *item, 965 struct sfc_flow_parse_ctx *parse_ctx, 966 struct rte_flow_error *error) 967 { 968 int rc; 969 efx_filter_spec_t *efx_spec = parse_ctx->filter; 970 const struct rte_flow_item_geneve *spec = NULL; 971 const struct rte_flow_item_geneve *mask = NULL; 972 const struct rte_flow_item_geneve supp_mask = { 973 .protocol = RTE_BE16(0xffff), 974 .vni = { 0xff, 0xff, 0xff } 975 }; 976 977 rc = sfc_flow_parse_init(item, 978 (const void **)&spec, 979 (const void **)&mask, 980 &supp_mask, 981 &rte_flow_item_geneve_mask, 982 sizeof(struct rte_flow_item_geneve), 983 error); 984 if (rc != 0) 985 return rc; 986 987 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 988 EFX_IPPROTO_UDP, error); 989 if (rc != 0) 990 return rc; 991 992 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE; 993 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 994 995 if (spec == NULL) 996 return 0; 997 998 if (mask->protocol == supp_mask.protocol) { 999 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) { 1000 rte_flow_error_set(error, EINVAL, 1001 RTE_FLOW_ERROR_TYPE_ITEM, item, 1002 "GENEVE encap. protocol must be Ethernet " 1003 "(0x6558) in the GENEVE pattern item"); 1004 return -rte_errno; 1005 } 1006 } else if (mask->protocol != 0) { 1007 rte_flow_error_set(error, EINVAL, 1008 RTE_FLOW_ERROR_TYPE_ITEM, item, 1009 "Unsupported mask for GENEVE encap. protocol"); 1010 return -rte_errno; 1011 } 1012 1013 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, 1014 mask->vni, item, error); 1015 1016 return rc; 1017 } 1018 1019 /** 1020 * Convert NVGRE item to EFX filter specification. 1021 * 1022 * @param item[in] 1023 * Item specification. Only virtual subnet ID field is supported. 1024 * If the mask is NULL, default mask will be used. 1025 * Ranging is not supported. 1026 * @param efx_spec[in, out] 1027 * EFX filter specification to update. 1028 * @param[out] error 1029 * Perform verbose error reporting if not NULL. 1030 */ 1031 static int 1032 sfc_flow_parse_nvgre(const struct rte_flow_item *item, 1033 struct sfc_flow_parse_ctx *parse_ctx, 1034 struct rte_flow_error *error) 1035 { 1036 int rc; 1037 efx_filter_spec_t *efx_spec = parse_ctx->filter; 1038 const struct rte_flow_item_nvgre *spec = NULL; 1039 const struct rte_flow_item_nvgre *mask = NULL; 1040 const struct rte_flow_item_nvgre supp_mask = { 1041 .tni = { 0xff, 0xff, 0xff } 1042 }; 1043 1044 rc = sfc_flow_parse_init(item, 1045 (const void **)&spec, 1046 (const void **)&mask, 1047 &supp_mask, 1048 &rte_flow_item_nvgre_mask, 1049 sizeof(struct rte_flow_item_nvgre), 1050 error); 1051 if (rc != 0) 1052 return rc; 1053 1054 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 1055 EFX_IPPROTO_GRE, error); 1056 if (rc != 0) 1057 return rc; 1058 1059 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE; 1060 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 1061 1062 if (spec == NULL) 1063 return 0; 1064 1065 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni, 1066 mask->tni, item, error); 1067 1068 return rc; 1069 } 1070 1071 /** 1072 * Convert PPPoEx item to EFX filter specification. 1073 * 1074 * @param item[in] 1075 * Item specification. 1076 * Matching on PPPoEx fields is not supported. 1077 * This item can only be used to set or validate the EtherType filter. 1078 * Only zero masks are allowed. 1079 * Ranging is not supported. 1080 * @param efx_spec[in, out] 1081 * EFX filter specification to update. 1082 * @param[out] error 1083 * Perform verbose error reporting if not NULL. 1084 */ 1085 static int 1086 sfc_flow_parse_pppoex(const struct rte_flow_item *item, 1087 struct sfc_flow_parse_ctx *parse_ctx, 1088 struct rte_flow_error *error) 1089 { 1090 efx_filter_spec_t *efx_spec = parse_ctx->filter; 1091 const struct rte_flow_item_pppoe *spec = NULL; 1092 const struct rte_flow_item_pppoe *mask = NULL; 1093 const struct rte_flow_item_pppoe supp_mask = {}; 1094 const struct rte_flow_item_pppoe def_mask = {}; 1095 uint16_t ether_type; 1096 int rc; 1097 1098 rc = sfc_flow_parse_init(item, 1099 (const void **)&spec, 1100 (const void **)&mask, 1101 &supp_mask, 1102 &def_mask, 1103 sizeof(struct rte_flow_item_pppoe), 1104 error); 1105 if (rc != 0) 1106 return rc; 1107 1108 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED) 1109 ether_type = RTE_ETHER_TYPE_PPPOE_DISCOVERY; 1110 else 1111 ether_type = RTE_ETHER_TYPE_PPPOE_SESSION; 1112 1113 if ((efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) != 0) { 1114 if (efx_spec->efs_ether_type != ether_type) { 1115 rte_flow_error_set(error, EINVAL, 1116 RTE_FLOW_ERROR_TYPE_ITEM, item, 1117 "Invalid EtherType for a PPPoE flow item"); 1118 return -rte_errno; 1119 } 1120 } else { 1121 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 1122 efx_spec->efs_ether_type = ether_type; 1123 } 1124 1125 return 0; 1126 } 1127 1128 static const struct sfc_flow_item sfc_flow_items[] = { 1129 { 1130 .type = RTE_FLOW_ITEM_TYPE_VOID, 1131 .name = "VOID", 1132 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER, 1133 .layer = SFC_FLOW_ITEM_ANY_LAYER, 1134 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1135 .parse = sfc_flow_parse_void, 1136 }, 1137 { 1138 .type = RTE_FLOW_ITEM_TYPE_ETH, 1139 .name = "ETH", 1140 .prev_layer = SFC_FLOW_ITEM_START_LAYER, 1141 .layer = SFC_FLOW_ITEM_L2, 1142 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1143 .parse = sfc_flow_parse_eth, 1144 }, 1145 { 1146 .type = RTE_FLOW_ITEM_TYPE_VLAN, 1147 .name = "VLAN", 1148 .prev_layer = SFC_FLOW_ITEM_L2, 1149 .layer = SFC_FLOW_ITEM_L2, 1150 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1151 .parse = sfc_flow_parse_vlan, 1152 }, 1153 { 1154 .type = RTE_FLOW_ITEM_TYPE_PPPOED, 1155 .name = "PPPOED", 1156 .prev_layer = SFC_FLOW_ITEM_L2, 1157 .layer = SFC_FLOW_ITEM_L2, 1158 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1159 .parse = sfc_flow_parse_pppoex, 1160 }, 1161 { 1162 .type = RTE_FLOW_ITEM_TYPE_PPPOES, 1163 .name = "PPPOES", 1164 .prev_layer = SFC_FLOW_ITEM_L2, 1165 .layer = SFC_FLOW_ITEM_L2, 1166 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1167 .parse = sfc_flow_parse_pppoex, 1168 }, 1169 { 1170 .type = RTE_FLOW_ITEM_TYPE_IPV4, 1171 .name = "IPV4", 1172 .prev_layer = SFC_FLOW_ITEM_L2, 1173 .layer = SFC_FLOW_ITEM_L3, 1174 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1175 .parse = sfc_flow_parse_ipv4, 1176 }, 1177 { 1178 .type = RTE_FLOW_ITEM_TYPE_IPV6, 1179 .name = "IPV6", 1180 .prev_layer = SFC_FLOW_ITEM_L2, 1181 .layer = SFC_FLOW_ITEM_L3, 1182 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1183 .parse = sfc_flow_parse_ipv6, 1184 }, 1185 { 1186 .type = RTE_FLOW_ITEM_TYPE_TCP, 1187 .name = "TCP", 1188 .prev_layer = SFC_FLOW_ITEM_L3, 1189 .layer = SFC_FLOW_ITEM_L4, 1190 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1191 .parse = sfc_flow_parse_tcp, 1192 }, 1193 { 1194 .type = RTE_FLOW_ITEM_TYPE_UDP, 1195 .name = "UDP", 1196 .prev_layer = SFC_FLOW_ITEM_L3, 1197 .layer = SFC_FLOW_ITEM_L4, 1198 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1199 .parse = sfc_flow_parse_udp, 1200 }, 1201 { 1202 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 1203 .name = "VXLAN", 1204 .prev_layer = SFC_FLOW_ITEM_L4, 1205 .layer = SFC_FLOW_ITEM_START_LAYER, 1206 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1207 .parse = sfc_flow_parse_vxlan, 1208 }, 1209 { 1210 .type = RTE_FLOW_ITEM_TYPE_GENEVE, 1211 .name = "GENEVE", 1212 .prev_layer = SFC_FLOW_ITEM_L4, 1213 .layer = SFC_FLOW_ITEM_START_LAYER, 1214 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1215 .parse = sfc_flow_parse_geneve, 1216 }, 1217 { 1218 .type = RTE_FLOW_ITEM_TYPE_NVGRE, 1219 .name = "NVGRE", 1220 .prev_layer = SFC_FLOW_ITEM_L3, 1221 .layer = SFC_FLOW_ITEM_START_LAYER, 1222 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1223 .parse = sfc_flow_parse_nvgre, 1224 }, 1225 }; 1226 1227 /* 1228 * Protocol-independent flow API support 1229 */ 1230 static int 1231 sfc_flow_parse_attr(struct sfc_adapter *sa, 1232 const struct rte_flow_attr *attr, 1233 struct rte_flow *flow, 1234 struct rte_flow_error *error) 1235 { 1236 struct sfc_flow_spec *spec = &flow->spec; 1237 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1238 struct sfc_flow_spec_mae *spec_mae = &spec->mae; 1239 struct sfc_mae *mae = &sa->mae; 1240 1241 if (attr == NULL) { 1242 rte_flow_error_set(error, EINVAL, 1243 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 1244 "NULL attribute"); 1245 return -rte_errno; 1246 } 1247 if (attr->group != 0) { 1248 rte_flow_error_set(error, ENOTSUP, 1249 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr, 1250 "Groups are not supported"); 1251 return -rte_errno; 1252 } 1253 if (attr->egress != 0) { 1254 rte_flow_error_set(error, ENOTSUP, 1255 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, 1256 "Egress is not supported"); 1257 return -rte_errno; 1258 } 1259 if (attr->ingress == 0) { 1260 rte_flow_error_set(error, ENOTSUP, 1261 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, 1262 "Ingress is compulsory"); 1263 return -rte_errno; 1264 } 1265 if (attr->transfer == 0) { 1266 if (attr->priority != 0) { 1267 rte_flow_error_set(error, ENOTSUP, 1268 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1269 attr, "Priorities are unsupported"); 1270 return -rte_errno; 1271 } 1272 spec->type = SFC_FLOW_SPEC_FILTER; 1273 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX; 1274 spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; 1275 spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL; 1276 } else { 1277 if (mae->status != SFC_MAE_STATUS_SUPPORTED) { 1278 rte_flow_error_set(error, ENOTSUP, 1279 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 1280 attr, "Transfer is not supported"); 1281 return -rte_errno; 1282 } 1283 if (attr->priority > mae->nb_action_rule_prios_max) { 1284 rte_flow_error_set(error, ENOTSUP, 1285 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1286 attr, "Unsupported priority level"); 1287 return -rte_errno; 1288 } 1289 spec->type = SFC_FLOW_SPEC_MAE; 1290 spec_mae->priority = attr->priority; 1291 spec_mae->match_spec = NULL; 1292 spec_mae->action_set = NULL; 1293 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID; 1294 } 1295 1296 return 0; 1297 } 1298 1299 /* Get item from array sfc_flow_items */ 1300 static const struct sfc_flow_item * 1301 sfc_flow_get_item(const struct sfc_flow_item *items, 1302 unsigned int nb_items, 1303 enum rte_flow_item_type type) 1304 { 1305 unsigned int i; 1306 1307 for (i = 0; i < nb_items; i++) 1308 if (items[i].type == type) 1309 return &items[i]; 1310 1311 return NULL; 1312 } 1313 1314 int 1315 sfc_flow_parse_pattern(struct sfc_adapter *sa, 1316 const struct sfc_flow_item *flow_items, 1317 unsigned int nb_flow_items, 1318 const struct rte_flow_item pattern[], 1319 struct sfc_flow_parse_ctx *parse_ctx, 1320 struct rte_flow_error *error) 1321 { 1322 int rc; 1323 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER; 1324 boolean_t is_ifrm = B_FALSE; 1325 const struct sfc_flow_item *item; 1326 1327 if (pattern == NULL) { 1328 rte_flow_error_set(error, EINVAL, 1329 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, 1330 "NULL pattern"); 1331 return -rte_errno; 1332 } 1333 1334 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { 1335 item = sfc_flow_get_item(flow_items, nb_flow_items, 1336 pattern->type); 1337 if (item == NULL) { 1338 rte_flow_error_set(error, ENOTSUP, 1339 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1340 "Unsupported pattern item"); 1341 return -rte_errno; 1342 } 1343 1344 /* 1345 * Omitting one or several protocol layers at the beginning 1346 * of pattern is supported 1347 */ 1348 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER && 1349 prev_layer != SFC_FLOW_ITEM_ANY_LAYER && 1350 item->prev_layer != prev_layer) { 1351 rte_flow_error_set(error, ENOTSUP, 1352 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1353 "Unexpected sequence of pattern items"); 1354 return -rte_errno; 1355 } 1356 1357 /* 1358 * Allow only VOID and ETH pattern items in the inner frame. 1359 * Also check that there is only one tunneling protocol. 1360 */ 1361 switch (item->type) { 1362 case RTE_FLOW_ITEM_TYPE_VOID: 1363 case RTE_FLOW_ITEM_TYPE_ETH: 1364 break; 1365 1366 case RTE_FLOW_ITEM_TYPE_VXLAN: 1367 case RTE_FLOW_ITEM_TYPE_GENEVE: 1368 case RTE_FLOW_ITEM_TYPE_NVGRE: 1369 if (is_ifrm) { 1370 rte_flow_error_set(error, EINVAL, 1371 RTE_FLOW_ERROR_TYPE_ITEM, 1372 pattern, 1373 "More than one tunneling protocol"); 1374 return -rte_errno; 1375 } 1376 is_ifrm = B_TRUE; 1377 break; 1378 1379 default: 1380 if (parse_ctx->type == SFC_FLOW_PARSE_CTX_FILTER && 1381 is_ifrm) { 1382 rte_flow_error_set(error, EINVAL, 1383 RTE_FLOW_ERROR_TYPE_ITEM, 1384 pattern, 1385 "There is an unsupported pattern item " 1386 "in the inner frame"); 1387 return -rte_errno; 1388 } 1389 break; 1390 } 1391 1392 if (parse_ctx->type != item->ctx_type) { 1393 rte_flow_error_set(error, EINVAL, 1394 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1395 "Parse context type mismatch"); 1396 return -rte_errno; 1397 } 1398 1399 rc = item->parse(pattern, parse_ctx, error); 1400 if (rc != 0) { 1401 sfc_err(sa, "failed to parse item %s: %s", 1402 item->name, strerror(-rc)); 1403 return rc; 1404 } 1405 1406 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER) 1407 prev_layer = item->layer; 1408 } 1409 1410 return 0; 1411 } 1412 1413 static int 1414 sfc_flow_parse_queue(struct sfc_adapter *sa, 1415 const struct rte_flow_action_queue *queue, 1416 struct rte_flow *flow) 1417 { 1418 struct sfc_flow_spec *spec = &flow->spec; 1419 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1420 struct sfc_rxq *rxq; 1421 struct sfc_rxq_info *rxq_info; 1422 1423 if (queue->index >= sfc_sa2shared(sa)->ethdev_rxq_count) 1424 return -EINVAL; 1425 1426 rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, queue->index); 1427 spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index; 1428 1429 rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index]; 1430 spec_filter->rss_hash_required = !!(rxq_info->rxq_flags & 1431 SFC_RXQ_FLAG_RSS_HASH); 1432 1433 return 0; 1434 } 1435 1436 static int 1437 sfc_flow_parse_rss(struct sfc_adapter *sa, 1438 const struct rte_flow_action_rss *action_rss, 1439 struct rte_flow *flow) 1440 { 1441 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 1442 struct sfc_rss *rss = &sas->rss; 1443 sfc_ethdev_qid_t ethdev_qid; 1444 struct sfc_rxq *rxq; 1445 unsigned int rxq_hw_index_min; 1446 unsigned int rxq_hw_index_max; 1447 efx_rx_hash_type_t efx_hash_types; 1448 const uint8_t *rss_key; 1449 struct sfc_flow_spec *spec = &flow->spec; 1450 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1451 struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf; 1452 unsigned int i; 1453 1454 if (action_rss->queue_num == 0) 1455 return -EINVAL; 1456 1457 ethdev_qid = sfc_sa2shared(sa)->ethdev_rxq_count - 1; 1458 rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid); 1459 rxq_hw_index_min = rxq->hw_index; 1460 rxq_hw_index_max = 0; 1461 1462 for (i = 0; i < action_rss->queue_num; ++i) { 1463 ethdev_qid = action_rss->queue[i]; 1464 1465 if ((unsigned int)ethdev_qid >= 1466 sfc_sa2shared(sa)->ethdev_rxq_count) 1467 return -EINVAL; 1468 1469 rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid); 1470 1471 if (rxq->hw_index < rxq_hw_index_min) 1472 rxq_hw_index_min = rxq->hw_index; 1473 1474 if (rxq->hw_index > rxq_hw_index_max) 1475 rxq_hw_index_max = rxq->hw_index; 1476 } 1477 1478 switch (action_rss->func) { 1479 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1480 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1481 break; 1482 default: 1483 return -EINVAL; 1484 } 1485 1486 if (action_rss->level) 1487 return -EINVAL; 1488 1489 /* 1490 * Dummy RSS action with only one queue and no specific settings 1491 * for hash types and key does not require dedicated RSS context 1492 * and may be simplified to single queue action. 1493 */ 1494 if (action_rss->queue_num == 1 && action_rss->types == 0 && 1495 action_rss->key_len == 0) { 1496 spec_filter->template.efs_dmaq_id = rxq_hw_index_min; 1497 return 0; 1498 } 1499 1500 if (action_rss->types) { 1501 int rc; 1502 1503 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types, 1504 &efx_hash_types); 1505 if (rc != 0) 1506 return -rc; 1507 } else { 1508 unsigned int i; 1509 1510 efx_hash_types = 0; 1511 for (i = 0; i < rss->hf_map_nb_entries; ++i) 1512 efx_hash_types |= rss->hf_map[i].efx; 1513 } 1514 1515 if (action_rss->key_len) { 1516 if (action_rss->key_len != sizeof(rss->key)) 1517 return -EINVAL; 1518 1519 rss_key = action_rss->key; 1520 } else { 1521 rss_key = rss->key; 1522 } 1523 1524 spec_filter->rss = B_TRUE; 1525 1526 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min; 1527 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max; 1528 sfc_rss_conf->rss_hash_types = efx_hash_types; 1529 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key)); 1530 1531 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) { 1532 unsigned int nb_queues = action_rss->queue_num; 1533 struct sfc_rxq *rxq; 1534 1535 ethdev_qid = action_rss->queue[i % nb_queues]; 1536 rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid); 1537 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min; 1538 } 1539 1540 return 0; 1541 } 1542 1543 static int 1544 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec, 1545 unsigned int filters_count) 1546 { 1547 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1548 unsigned int i; 1549 int ret = 0; 1550 1551 for (i = 0; i < filters_count; i++) { 1552 int rc; 1553 1554 rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]); 1555 if (ret == 0 && rc != 0) { 1556 sfc_err(sa, "failed to remove filter specification " 1557 "(rc = %d)", rc); 1558 ret = rc; 1559 } 1560 } 1561 1562 return ret; 1563 } 1564 1565 static int 1566 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec) 1567 { 1568 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1569 unsigned int i; 1570 int rc = 0; 1571 1572 for (i = 0; i < spec_filter->count; i++) { 1573 rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]); 1574 if (rc != 0) { 1575 sfc_flow_spec_flush(sa, spec, i); 1576 break; 1577 } 1578 } 1579 1580 return rc; 1581 } 1582 1583 static int 1584 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec) 1585 { 1586 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1587 1588 return sfc_flow_spec_flush(sa, spec, spec_filter->count); 1589 } 1590 1591 static int 1592 sfc_flow_filter_insert(struct sfc_adapter *sa, 1593 struct rte_flow *flow) 1594 { 1595 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 1596 struct sfc_rss *rss = &sas->rss; 1597 struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter; 1598 struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf; 1599 uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; 1600 boolean_t create_context; 1601 unsigned int i; 1602 int rc = 0; 1603 1604 create_context = spec_filter->rss || (spec_filter->rss_hash_required && 1605 rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT); 1606 1607 if (create_context) { 1608 unsigned int rss_spread; 1609 unsigned int rss_hash_types; 1610 uint8_t *rss_key; 1611 1612 if (spec_filter->rss) { 1613 rss_spread = MIN(flow_rss->rxq_hw_index_max - 1614 flow_rss->rxq_hw_index_min + 1, 1615 EFX_MAXRSS); 1616 rss_hash_types = flow_rss->rss_hash_types; 1617 rss_key = flow_rss->rss_key; 1618 } else { 1619 /* 1620 * Initialize dummy RSS context parameters to have 1621 * valid RSS hash. Use default RSS hash function and 1622 * key. 1623 */ 1624 rss_spread = 1; 1625 rss_hash_types = rss->hash_types; 1626 rss_key = rss->key; 1627 } 1628 1629 rc = efx_rx_scale_context_alloc(sa->nic, 1630 EFX_RX_SCALE_EXCLUSIVE, 1631 rss_spread, 1632 &efs_rss_context); 1633 if (rc != 0) 1634 goto fail_scale_context_alloc; 1635 1636 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context, 1637 rss->hash_alg, 1638 rss_hash_types, B_TRUE); 1639 if (rc != 0) 1640 goto fail_scale_mode_set; 1641 1642 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context, 1643 rss_key, sizeof(rss->key)); 1644 if (rc != 0) 1645 goto fail_scale_key_set; 1646 } else { 1647 efs_rss_context = rss->dummy_rss_context; 1648 } 1649 1650 if (spec_filter->rss || spec_filter->rss_hash_required) { 1651 /* 1652 * At this point, fully elaborated filter specifications 1653 * have been produced from the template. To make sure that 1654 * RSS behaviour is consistent between them, set the same 1655 * RSS context value everywhere. 1656 */ 1657 for (i = 0; i < spec_filter->count; i++) { 1658 efx_filter_spec_t *spec = &spec_filter->filters[i]; 1659 1660 spec->efs_rss_context = efs_rss_context; 1661 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS; 1662 if (spec_filter->rss) 1663 spec->efs_dmaq_id = flow_rss->rxq_hw_index_min; 1664 } 1665 } 1666 1667 rc = sfc_flow_spec_insert(sa, &flow->spec); 1668 if (rc != 0) 1669 goto fail_filter_insert; 1670 1671 if (create_context) { 1672 unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0}; 1673 unsigned int *tbl; 1674 1675 tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl; 1676 1677 /* 1678 * Scale table is set after filter insertion because 1679 * the table entries are relative to the base RxQ ID 1680 * and the latter is submitted to the HW by means of 1681 * inserting a filter, so by the time of the request 1682 * the HW knows all the information needed to verify 1683 * the table entries, and the operation will succeed 1684 */ 1685 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context, 1686 tbl, RTE_DIM(flow_rss->rss_tbl)); 1687 if (rc != 0) 1688 goto fail_scale_tbl_set; 1689 1690 /* Remember created dummy RSS context */ 1691 if (!spec_filter->rss) 1692 rss->dummy_rss_context = efs_rss_context; 1693 } 1694 1695 return 0; 1696 1697 fail_scale_tbl_set: 1698 sfc_flow_spec_remove(sa, &flow->spec); 1699 1700 fail_filter_insert: 1701 fail_scale_key_set: 1702 fail_scale_mode_set: 1703 if (create_context) 1704 efx_rx_scale_context_free(sa->nic, efs_rss_context); 1705 1706 fail_scale_context_alloc: 1707 return rc; 1708 } 1709 1710 static int 1711 sfc_flow_filter_remove(struct sfc_adapter *sa, 1712 struct rte_flow *flow) 1713 { 1714 struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter; 1715 int rc = 0; 1716 1717 rc = sfc_flow_spec_remove(sa, &flow->spec); 1718 if (rc != 0) 1719 return rc; 1720 1721 if (spec_filter->rss) { 1722 /* 1723 * All specifications for a given flow rule have the same RSS 1724 * context, so that RSS context value is taken from the first 1725 * filter specification 1726 */ 1727 efx_filter_spec_t *spec = &spec_filter->filters[0]; 1728 1729 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context); 1730 } 1731 1732 return rc; 1733 } 1734 1735 static int 1736 sfc_flow_parse_mark(struct sfc_adapter *sa, 1737 const struct rte_flow_action_mark *mark, 1738 struct rte_flow *flow) 1739 { 1740 struct sfc_flow_spec *spec = &flow->spec; 1741 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1742 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 1743 1744 if (mark == NULL || mark->id > encp->enc_filter_action_mark_max) 1745 return EINVAL; 1746 1747 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK; 1748 spec_filter->template.efs_mark = mark->id; 1749 1750 return 0; 1751 } 1752 1753 static int 1754 sfc_flow_parse_actions(struct sfc_adapter *sa, 1755 const struct rte_flow_action actions[], 1756 struct rte_flow *flow, 1757 struct rte_flow_error *error) 1758 { 1759 int rc; 1760 struct sfc_flow_spec *spec = &flow->spec; 1761 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1762 const unsigned int dp_rx_features = sa->priv.dp_rx->features; 1763 uint32_t actions_set = 0; 1764 const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) | 1765 (1UL << RTE_FLOW_ACTION_TYPE_RSS) | 1766 (1UL << RTE_FLOW_ACTION_TYPE_DROP); 1767 const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) | 1768 (1UL << RTE_FLOW_ACTION_TYPE_FLAG); 1769 1770 if (actions == NULL) { 1771 rte_flow_error_set(error, EINVAL, 1772 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, 1773 "NULL actions"); 1774 return -rte_errno; 1775 } 1776 1777 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1778 switch (actions->type) { 1779 case RTE_FLOW_ACTION_TYPE_VOID: 1780 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID, 1781 actions_set); 1782 break; 1783 1784 case RTE_FLOW_ACTION_TYPE_QUEUE: 1785 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE, 1786 actions_set); 1787 if ((actions_set & fate_actions_mask) != 0) 1788 goto fail_fate_actions; 1789 1790 rc = sfc_flow_parse_queue(sa, actions->conf, flow); 1791 if (rc != 0) { 1792 rte_flow_error_set(error, EINVAL, 1793 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1794 "Bad QUEUE action"); 1795 return -rte_errno; 1796 } 1797 break; 1798 1799 case RTE_FLOW_ACTION_TYPE_RSS: 1800 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS, 1801 actions_set); 1802 if ((actions_set & fate_actions_mask) != 0) 1803 goto fail_fate_actions; 1804 1805 rc = sfc_flow_parse_rss(sa, actions->conf, flow); 1806 if (rc != 0) { 1807 rte_flow_error_set(error, -rc, 1808 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1809 "Bad RSS action"); 1810 return -rte_errno; 1811 } 1812 break; 1813 1814 case RTE_FLOW_ACTION_TYPE_DROP: 1815 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP, 1816 actions_set); 1817 if ((actions_set & fate_actions_mask) != 0) 1818 goto fail_fate_actions; 1819 1820 spec_filter->template.efs_dmaq_id = 1821 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; 1822 break; 1823 1824 case RTE_FLOW_ACTION_TYPE_FLAG: 1825 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG, 1826 actions_set); 1827 if ((actions_set & mark_actions_mask) != 0) 1828 goto fail_actions_overlap; 1829 1830 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) { 1831 rte_flow_error_set(error, ENOTSUP, 1832 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1833 "FLAG action is not supported on the current Rx datapath"); 1834 return -rte_errno; 1835 } 1836 1837 spec_filter->template.efs_flags |= 1838 EFX_FILTER_FLAG_ACTION_FLAG; 1839 break; 1840 1841 case RTE_FLOW_ACTION_TYPE_MARK: 1842 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK, 1843 actions_set); 1844 if ((actions_set & mark_actions_mask) != 0) 1845 goto fail_actions_overlap; 1846 1847 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) { 1848 rte_flow_error_set(error, ENOTSUP, 1849 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1850 "MARK action is not supported on the current Rx datapath"); 1851 return -rte_errno; 1852 } 1853 1854 rc = sfc_flow_parse_mark(sa, actions->conf, flow); 1855 if (rc != 0) { 1856 rte_flow_error_set(error, rc, 1857 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1858 "Bad MARK action"); 1859 return -rte_errno; 1860 } 1861 break; 1862 1863 default: 1864 rte_flow_error_set(error, ENOTSUP, 1865 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1866 "Action is not supported"); 1867 return -rte_errno; 1868 } 1869 1870 actions_set |= (1UL << actions->type); 1871 } 1872 1873 /* When fate is unknown, drop traffic. */ 1874 if ((actions_set & fate_actions_mask) == 0) { 1875 spec_filter->template.efs_dmaq_id = 1876 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; 1877 } 1878 1879 return 0; 1880 1881 fail_fate_actions: 1882 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, 1883 "Cannot combine several fate-deciding actions, " 1884 "choose between QUEUE, RSS or DROP"); 1885 return -rte_errno; 1886 1887 fail_actions_overlap: 1888 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, 1889 "Overlapping actions are not supported"); 1890 return -rte_errno; 1891 } 1892 1893 /** 1894 * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST 1895 * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same 1896 * specifications after copying. 1897 * 1898 * @param spec[in, out] 1899 * SFC flow specification to update. 1900 * @param filters_count_for_one_val[in] 1901 * How many specifications should have the same match flag, what is the 1902 * number of specifications before copying. 1903 * @param error[out] 1904 * Perform verbose error reporting if not NULL. 1905 */ 1906 static int 1907 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec, 1908 unsigned int filters_count_for_one_val, 1909 struct rte_flow_error *error) 1910 { 1911 unsigned int i; 1912 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1913 static const efx_filter_match_flags_t vals[] = { 1914 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, 1915 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST 1916 }; 1917 1918 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) { 1919 rte_flow_error_set(error, EINVAL, 1920 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1921 "Number of specifications is incorrect while copying " 1922 "by unknown destination flags"); 1923 return -rte_errno; 1924 } 1925 1926 for (i = 0; i < spec_filter->count; i++) { 1927 /* The check above ensures that divisor can't be zero here */ 1928 spec_filter->filters[i].efs_match_flags |= 1929 vals[i / filters_count_for_one_val]; 1930 } 1931 1932 return 0; 1933 } 1934 1935 /** 1936 * Check that the following conditions are met: 1937 * - the list of supported filters has a filter 1938 * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of 1939 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also 1940 * be inserted. 1941 * 1942 * @param match[in] 1943 * The match flags of filter. 1944 * @param spec[in] 1945 * Specification to be supplemented. 1946 * @param filter[in] 1947 * SFC filter with list of supported filters. 1948 */ 1949 static boolean_t 1950 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match, 1951 __rte_unused efx_filter_spec_t *spec, 1952 struct sfc_filter *filter) 1953 { 1954 unsigned int i; 1955 efx_filter_match_flags_t match_mcast_dst; 1956 1957 match_mcast_dst = 1958 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) | 1959 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; 1960 for (i = 0; i < filter->supported_match_num; i++) { 1961 if (match_mcast_dst == filter->supported_match[i]) 1962 return B_TRUE; 1963 } 1964 1965 return B_FALSE; 1966 } 1967 1968 /** 1969 * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and 1970 * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same 1971 * specifications after copying. 1972 * 1973 * @param spec[in, out] 1974 * SFC flow specification to update. 1975 * @param filters_count_for_one_val[in] 1976 * How many specifications should have the same EtherType value, what is the 1977 * number of specifications before copying. 1978 * @param error[out] 1979 * Perform verbose error reporting if not NULL. 1980 */ 1981 static int 1982 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec, 1983 unsigned int filters_count_for_one_val, 1984 struct rte_flow_error *error) 1985 { 1986 unsigned int i; 1987 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1988 static const uint16_t vals[] = { 1989 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6 1990 }; 1991 1992 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) { 1993 rte_flow_error_set(error, EINVAL, 1994 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1995 "Number of specifications is incorrect " 1996 "while copying by Ethertype"); 1997 return -rte_errno; 1998 } 1999 2000 for (i = 0; i < spec_filter->count; i++) { 2001 spec_filter->filters[i].efs_match_flags |= 2002 EFX_FILTER_MATCH_ETHER_TYPE; 2003 2004 /* 2005 * The check above ensures that 2006 * filters_count_for_one_val is not 0 2007 */ 2008 spec_filter->filters[i].efs_ether_type = 2009 vals[i / filters_count_for_one_val]; 2010 } 2011 2012 return 0; 2013 } 2014 2015 /** 2016 * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0 2017 * in the same specifications after copying. 2018 * 2019 * @param spec[in, out] 2020 * SFC flow specification to update. 2021 * @param filters_count_for_one_val[in] 2022 * How many specifications should have the same match flag, what is the 2023 * number of specifications before copying. 2024 * @param error[out] 2025 * Perform verbose error reporting if not NULL. 2026 */ 2027 static int 2028 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec, 2029 unsigned int filters_count_for_one_val, 2030 struct rte_flow_error *error) 2031 { 2032 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2033 unsigned int i; 2034 2035 if (filters_count_for_one_val != spec_filter->count) { 2036 rte_flow_error_set(error, EINVAL, 2037 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2038 "Number of specifications is incorrect " 2039 "while copying by outer VLAN ID"); 2040 return -rte_errno; 2041 } 2042 2043 for (i = 0; i < spec_filter->count; i++) { 2044 spec_filter->filters[i].efs_match_flags |= 2045 EFX_FILTER_MATCH_OUTER_VID; 2046 2047 spec_filter->filters[i].efs_outer_vid = 0; 2048 } 2049 2050 return 0; 2051 } 2052 2053 /** 2054 * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and 2055 * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same 2056 * specifications after copying. 2057 * 2058 * @param spec[in, out] 2059 * SFC flow specification to update. 2060 * @param filters_count_for_one_val[in] 2061 * How many specifications should have the same match flag, what is the 2062 * number of specifications before copying. 2063 * @param error[out] 2064 * Perform verbose error reporting if not NULL. 2065 */ 2066 static int 2067 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec, 2068 unsigned int filters_count_for_one_val, 2069 struct rte_flow_error *error) 2070 { 2071 unsigned int i; 2072 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2073 static const efx_filter_match_flags_t vals[] = { 2074 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, 2075 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST 2076 }; 2077 2078 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) { 2079 rte_flow_error_set(error, EINVAL, 2080 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2081 "Number of specifications is incorrect while copying " 2082 "by inner frame unknown destination flags"); 2083 return -rte_errno; 2084 } 2085 2086 for (i = 0; i < spec_filter->count; i++) { 2087 /* The check above ensures that divisor can't be zero here */ 2088 spec_filter->filters[i].efs_match_flags |= 2089 vals[i / filters_count_for_one_val]; 2090 } 2091 2092 return 0; 2093 } 2094 2095 /** 2096 * Check that the following conditions are met: 2097 * - the specification corresponds to a filter for encapsulated traffic 2098 * - the list of supported filters has a filter 2099 * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of 2100 * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also 2101 * be inserted. 2102 * 2103 * @param match[in] 2104 * The match flags of filter. 2105 * @param spec[in] 2106 * Specification to be supplemented. 2107 * @param filter[in] 2108 * SFC filter with list of supported filters. 2109 */ 2110 static boolean_t 2111 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match, 2112 efx_filter_spec_t *spec, 2113 struct sfc_filter *filter) 2114 { 2115 unsigned int i; 2116 efx_tunnel_protocol_t encap_type = spec->efs_encap_type; 2117 efx_filter_match_flags_t match_mcast_dst; 2118 2119 if (encap_type == EFX_TUNNEL_PROTOCOL_NONE) 2120 return B_FALSE; 2121 2122 match_mcast_dst = 2123 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) | 2124 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST; 2125 for (i = 0; i < filter->supported_match_num; i++) { 2126 if (match_mcast_dst == filter->supported_match[i]) 2127 return B_TRUE; 2128 } 2129 2130 return B_FALSE; 2131 } 2132 2133 /** 2134 * Check that the list of supported filters has a filter that differs 2135 * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID 2136 * in this case that filter will be used and the flag 2137 * EFX_FILTER_MATCH_OUTER_VID is not needed. 2138 * 2139 * @param match[in] 2140 * The match flags of filter. 2141 * @param spec[in] 2142 * Specification to be supplemented. 2143 * @param filter[in] 2144 * SFC filter with list of supported filters. 2145 */ 2146 static boolean_t 2147 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match, 2148 __rte_unused efx_filter_spec_t *spec, 2149 struct sfc_filter *filter) 2150 { 2151 unsigned int i; 2152 efx_filter_match_flags_t match_without_vid = 2153 match & ~EFX_FILTER_MATCH_OUTER_VID; 2154 2155 for (i = 0; i < filter->supported_match_num; i++) { 2156 if (match_without_vid == filter->supported_match[i]) 2157 return B_FALSE; 2158 } 2159 2160 return B_TRUE; 2161 } 2162 2163 /* 2164 * Match flags that can be automatically added to filters. 2165 * Selecting the last minimum when searching for the copy flag ensures that the 2166 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than 2167 * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter 2168 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported 2169 * filters. 2170 */ 2171 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = { 2172 { 2173 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, 2174 .vals_count = 2, 2175 .set_vals = sfc_flow_set_unknown_dst_flags, 2176 .spec_check = sfc_flow_check_unknown_dst_flags, 2177 }, 2178 { 2179 .flag = EFX_FILTER_MATCH_ETHER_TYPE, 2180 .vals_count = 2, 2181 .set_vals = sfc_flow_set_ethertypes, 2182 .spec_check = NULL, 2183 }, 2184 { 2185 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, 2186 .vals_count = 2, 2187 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags, 2188 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags, 2189 }, 2190 { 2191 .flag = EFX_FILTER_MATCH_OUTER_VID, 2192 .vals_count = 1, 2193 .set_vals = sfc_flow_set_outer_vid_flag, 2194 .spec_check = sfc_flow_check_outer_vid_flag, 2195 }, 2196 }; 2197 2198 /* Get item from array sfc_flow_copy_flags */ 2199 static const struct sfc_flow_copy_flag * 2200 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag) 2201 { 2202 unsigned int i; 2203 2204 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 2205 if (sfc_flow_copy_flags[i].flag == flag) 2206 return &sfc_flow_copy_flags[i]; 2207 } 2208 2209 return NULL; 2210 } 2211 2212 /** 2213 * Make copies of the specifications, set match flag and values 2214 * of the field that corresponds to it. 2215 * 2216 * @param spec[in, out] 2217 * SFC flow specification to update. 2218 * @param flag[in] 2219 * The match flag to add. 2220 * @param error[out] 2221 * Perform verbose error reporting if not NULL. 2222 */ 2223 static int 2224 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec, 2225 efx_filter_match_flags_t flag, 2226 struct rte_flow_error *error) 2227 { 2228 unsigned int i; 2229 unsigned int new_filters_count; 2230 unsigned int filters_count_for_one_val; 2231 const struct sfc_flow_copy_flag *copy_flag; 2232 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2233 int rc; 2234 2235 copy_flag = sfc_flow_get_copy_flag(flag); 2236 if (copy_flag == NULL) { 2237 rte_flow_error_set(error, ENOTSUP, 2238 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2239 "Unsupported spec field for copying"); 2240 return -rte_errno; 2241 } 2242 2243 new_filters_count = spec_filter->count * copy_flag->vals_count; 2244 if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) { 2245 rte_flow_error_set(error, EINVAL, 2246 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2247 "Too much EFX specifications in the flow rule"); 2248 return -rte_errno; 2249 } 2250 2251 /* Copy filters specifications */ 2252 for (i = spec_filter->count; i < new_filters_count; i++) { 2253 spec_filter->filters[i] = 2254 spec_filter->filters[i - spec_filter->count]; 2255 } 2256 2257 filters_count_for_one_val = spec_filter->count; 2258 spec_filter->count = new_filters_count; 2259 2260 rc = copy_flag->set_vals(spec, filters_count_for_one_val, error); 2261 if (rc != 0) 2262 return rc; 2263 2264 return 0; 2265 } 2266 2267 /** 2268 * Check that the given set of match flags missing in the original filter spec 2269 * could be covered by adding spec copies which specify the corresponding 2270 * flags and packet field values to match. 2271 * 2272 * @param miss_flags[in] 2273 * Flags that are missing until the supported filter. 2274 * @param spec[in] 2275 * Specification to be supplemented. 2276 * @param filter[in] 2277 * SFC filter. 2278 * 2279 * @return 2280 * Number of specifications after copy or 0, if the flags can not be added. 2281 */ 2282 static unsigned int 2283 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags, 2284 efx_filter_spec_t *spec, 2285 struct sfc_filter *filter) 2286 { 2287 unsigned int i; 2288 efx_filter_match_flags_t copy_flags = 0; 2289 efx_filter_match_flags_t flag; 2290 efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags; 2291 sfc_flow_spec_check *check; 2292 unsigned int multiplier = 1; 2293 2294 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 2295 flag = sfc_flow_copy_flags[i].flag; 2296 check = sfc_flow_copy_flags[i].spec_check; 2297 if ((flag & miss_flags) == flag) { 2298 if (check != NULL && (!check(match, spec, filter))) 2299 continue; 2300 2301 copy_flags |= flag; 2302 multiplier *= sfc_flow_copy_flags[i].vals_count; 2303 } 2304 } 2305 2306 if (copy_flags == miss_flags) 2307 return multiplier; 2308 2309 return 0; 2310 } 2311 2312 /** 2313 * Attempt to supplement the specification template to the minimally 2314 * supported set of match flags. To do this, it is necessary to copy 2315 * the specifications, filling them with the values of fields that 2316 * correspond to the missing flags. 2317 * The necessary and sufficient filter is built from the fewest number 2318 * of copies which could be made to cover the minimally required set 2319 * of flags. 2320 * 2321 * @param sa[in] 2322 * SFC adapter. 2323 * @param spec[in, out] 2324 * SFC flow specification to update. 2325 * @param error[out] 2326 * Perform verbose error reporting if not NULL. 2327 */ 2328 static int 2329 sfc_flow_spec_filters_complete(struct sfc_adapter *sa, 2330 struct sfc_flow_spec *spec, 2331 struct rte_flow_error *error) 2332 { 2333 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2334 struct sfc_filter *filter = &sa->filter; 2335 efx_filter_match_flags_t miss_flags; 2336 efx_filter_match_flags_t min_miss_flags = 0; 2337 efx_filter_match_flags_t match; 2338 unsigned int min_multiplier = UINT_MAX; 2339 unsigned int multiplier; 2340 unsigned int i; 2341 int rc; 2342 2343 match = spec_filter->template.efs_match_flags; 2344 for (i = 0; i < filter->supported_match_num; i++) { 2345 if ((match & filter->supported_match[i]) == match) { 2346 miss_flags = filter->supported_match[i] & (~match); 2347 multiplier = sfc_flow_check_missing_flags(miss_flags, 2348 &spec_filter->template, filter); 2349 if (multiplier > 0) { 2350 if (multiplier <= min_multiplier) { 2351 min_multiplier = multiplier; 2352 min_miss_flags = miss_flags; 2353 } 2354 } 2355 } 2356 } 2357 2358 if (min_multiplier == UINT_MAX) { 2359 rte_flow_error_set(error, ENOTSUP, 2360 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2361 "The flow rule pattern is unsupported"); 2362 return -rte_errno; 2363 } 2364 2365 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 2366 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag; 2367 2368 if ((flag & min_miss_flags) == flag) { 2369 rc = sfc_flow_spec_add_match_flag(spec, flag, error); 2370 if (rc != 0) 2371 return rc; 2372 } 2373 } 2374 2375 return 0; 2376 } 2377 2378 /** 2379 * Check that set of match flags is referred to by a filter. Filter is 2380 * described by match flags with the ability to add OUTER_VID and INNER_VID 2381 * flags. 2382 * 2383 * @param match_flags[in] 2384 * Set of match flags. 2385 * @param flags_pattern[in] 2386 * Pattern of filter match flags. 2387 */ 2388 static boolean_t 2389 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags, 2390 efx_filter_match_flags_t flags_pattern) 2391 { 2392 if ((match_flags & flags_pattern) != flags_pattern) 2393 return B_FALSE; 2394 2395 switch (match_flags & ~flags_pattern) { 2396 case 0: 2397 case EFX_FILTER_MATCH_OUTER_VID: 2398 case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID: 2399 return B_TRUE; 2400 default: 2401 return B_FALSE; 2402 } 2403 } 2404 2405 /** 2406 * Check whether the spec maps to a hardware filter which is known to be 2407 * ineffective despite being valid. 2408 * 2409 * @param filter[in] 2410 * SFC filter with list of supported filters. 2411 * @param spec[in] 2412 * SFC flow specification. 2413 */ 2414 static boolean_t 2415 sfc_flow_is_match_flags_exception(struct sfc_filter *filter, 2416 struct sfc_flow_spec *spec) 2417 { 2418 unsigned int i; 2419 uint16_t ether_type; 2420 uint8_t ip_proto; 2421 efx_filter_match_flags_t match_flags; 2422 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2423 2424 for (i = 0; i < spec_filter->count; i++) { 2425 match_flags = spec_filter->filters[i].efs_match_flags; 2426 2427 if (sfc_flow_is_match_with_vids(match_flags, 2428 EFX_FILTER_MATCH_ETHER_TYPE) || 2429 sfc_flow_is_match_with_vids(match_flags, 2430 EFX_FILTER_MATCH_ETHER_TYPE | 2431 EFX_FILTER_MATCH_LOC_MAC)) { 2432 ether_type = spec_filter->filters[i].efs_ether_type; 2433 if (filter->supports_ip_proto_or_addr_filter && 2434 (ether_type == EFX_ETHER_TYPE_IPV4 || 2435 ether_type == EFX_ETHER_TYPE_IPV6)) 2436 return B_TRUE; 2437 } else if (sfc_flow_is_match_with_vids(match_flags, 2438 EFX_FILTER_MATCH_ETHER_TYPE | 2439 EFX_FILTER_MATCH_IP_PROTO) || 2440 sfc_flow_is_match_with_vids(match_flags, 2441 EFX_FILTER_MATCH_ETHER_TYPE | 2442 EFX_FILTER_MATCH_IP_PROTO | 2443 EFX_FILTER_MATCH_LOC_MAC)) { 2444 ip_proto = spec_filter->filters[i].efs_ip_proto; 2445 if (filter->supports_rem_or_local_port_filter && 2446 (ip_proto == EFX_IPPROTO_TCP || 2447 ip_proto == EFX_IPPROTO_UDP)) 2448 return B_TRUE; 2449 } 2450 } 2451 2452 return B_FALSE; 2453 } 2454 2455 static int 2456 sfc_flow_validate_match_flags(struct sfc_adapter *sa, 2457 struct rte_flow *flow, 2458 struct rte_flow_error *error) 2459 { 2460 struct sfc_flow_spec *spec = &flow->spec; 2461 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2462 efx_filter_spec_t *spec_tmpl = &spec_filter->template; 2463 efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags; 2464 int rc; 2465 2466 /* Initialize the first filter spec with template */ 2467 spec_filter->filters[0] = *spec_tmpl; 2468 spec_filter->count = 1; 2469 2470 if (!sfc_filter_is_match_supported(sa, match_flags)) { 2471 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error); 2472 if (rc != 0) 2473 return rc; 2474 } 2475 2476 if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) { 2477 rte_flow_error_set(error, ENOTSUP, 2478 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2479 "The flow rule pattern is unsupported"); 2480 return -rte_errno; 2481 } 2482 2483 return 0; 2484 } 2485 2486 static int 2487 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev, 2488 const struct rte_flow_item pattern[], 2489 const struct rte_flow_action actions[], 2490 struct rte_flow *flow, 2491 struct rte_flow_error *error) 2492 { 2493 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2494 struct sfc_flow_spec *spec = &flow->spec; 2495 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2496 struct sfc_flow_parse_ctx ctx; 2497 int rc; 2498 2499 ctx.type = SFC_FLOW_PARSE_CTX_FILTER; 2500 ctx.filter = &spec_filter->template; 2501 2502 rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items), 2503 pattern, &ctx, error); 2504 if (rc != 0) 2505 goto fail_bad_value; 2506 2507 rc = sfc_flow_parse_actions(sa, actions, flow, error); 2508 if (rc != 0) 2509 goto fail_bad_value; 2510 2511 rc = sfc_flow_validate_match_flags(sa, flow, error); 2512 if (rc != 0) 2513 goto fail_bad_value; 2514 2515 return 0; 2516 2517 fail_bad_value: 2518 return rc; 2519 } 2520 2521 static int 2522 sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev, 2523 const struct rte_flow_item pattern[], 2524 const struct rte_flow_action actions[], 2525 struct rte_flow *flow, 2526 struct rte_flow_error *error) 2527 { 2528 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2529 struct sfc_flow_spec *spec = &flow->spec; 2530 struct sfc_flow_spec_mae *spec_mae = &spec->mae; 2531 int rc; 2532 2533 rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error); 2534 if (rc != 0) 2535 return rc; 2536 2537 rc = sfc_mae_rule_parse_actions(sa, actions, spec_mae, error); 2538 if (rc != 0) 2539 return rc; 2540 2541 return 0; 2542 } 2543 2544 static int 2545 sfc_flow_parse(struct rte_eth_dev *dev, 2546 const struct rte_flow_attr *attr, 2547 const struct rte_flow_item pattern[], 2548 const struct rte_flow_action actions[], 2549 struct rte_flow *flow, 2550 struct rte_flow_error *error) 2551 { 2552 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2553 const struct sfc_flow_ops_by_spec *ops; 2554 int rc; 2555 2556 rc = sfc_flow_parse_attr(sa, attr, flow, error); 2557 if (rc != 0) 2558 return rc; 2559 2560 ops = sfc_flow_get_ops_by_spec(flow); 2561 if (ops == NULL || ops->parse == NULL) { 2562 rte_flow_error_set(error, ENOTSUP, 2563 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2564 "No backend to handle this flow"); 2565 return -rte_errno; 2566 } 2567 2568 return ops->parse(dev, pattern, actions, flow, error); 2569 } 2570 2571 static struct rte_flow * 2572 sfc_flow_zmalloc(struct rte_flow_error *error) 2573 { 2574 struct rte_flow *flow; 2575 2576 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0); 2577 if (flow == NULL) { 2578 rte_flow_error_set(error, ENOMEM, 2579 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2580 "Failed to allocate memory"); 2581 } 2582 2583 return flow; 2584 } 2585 2586 static void 2587 sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow) 2588 { 2589 const struct sfc_flow_ops_by_spec *ops; 2590 2591 ops = sfc_flow_get_ops_by_spec(flow); 2592 if (ops != NULL && ops->cleanup != NULL) 2593 ops->cleanup(sa, flow); 2594 2595 rte_free(flow); 2596 } 2597 2598 static int 2599 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow, 2600 struct rte_flow_error *error) 2601 { 2602 const struct sfc_flow_ops_by_spec *ops; 2603 int rc; 2604 2605 ops = sfc_flow_get_ops_by_spec(flow); 2606 if (ops == NULL || ops->insert == NULL) { 2607 rte_flow_error_set(error, ENOTSUP, 2608 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2609 "No backend to handle this flow"); 2610 return rte_errno; 2611 } 2612 2613 rc = ops->insert(sa, flow); 2614 if (rc != 0) { 2615 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2616 NULL, "Failed to insert the flow rule"); 2617 } 2618 2619 return rc; 2620 } 2621 2622 static int 2623 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow, 2624 struct rte_flow_error *error) 2625 { 2626 const struct sfc_flow_ops_by_spec *ops; 2627 int rc; 2628 2629 ops = sfc_flow_get_ops_by_spec(flow); 2630 if (ops == NULL || ops->remove == NULL) { 2631 rte_flow_error_set(error, ENOTSUP, 2632 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2633 "No backend to handle this flow"); 2634 return rte_errno; 2635 } 2636 2637 rc = ops->remove(sa, flow); 2638 if (rc != 0) { 2639 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2640 NULL, "Failed to remove the flow rule"); 2641 } 2642 2643 return rc; 2644 } 2645 2646 static int 2647 sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow, 2648 struct rte_flow_error *error) 2649 { 2650 const struct sfc_flow_ops_by_spec *ops; 2651 int rc = 0; 2652 2653 ops = sfc_flow_get_ops_by_spec(flow); 2654 if (ops == NULL) { 2655 rte_flow_error_set(error, ENOTSUP, 2656 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2657 "No backend to handle this flow"); 2658 return -rte_errno; 2659 } 2660 2661 if (ops->verify != NULL) { 2662 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2663 rc = ops->verify(sa, flow); 2664 } 2665 2666 if (rc != 0) { 2667 rte_flow_error_set(error, rc, 2668 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2669 "Failed to verify flow validity with FW"); 2670 return -rte_errno; 2671 } 2672 2673 return 0; 2674 } 2675 2676 static int 2677 sfc_flow_validate(struct rte_eth_dev *dev, 2678 const struct rte_flow_attr *attr, 2679 const struct rte_flow_item pattern[], 2680 const struct rte_flow_action actions[], 2681 struct rte_flow_error *error) 2682 { 2683 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2684 struct rte_flow *flow; 2685 int rc; 2686 2687 flow = sfc_flow_zmalloc(error); 2688 if (flow == NULL) 2689 return -rte_errno; 2690 2691 sfc_adapter_lock(sa); 2692 2693 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error); 2694 if (rc == 0) 2695 rc = sfc_flow_verify(sa, flow, error); 2696 2697 sfc_flow_free(sa, flow); 2698 2699 sfc_adapter_unlock(sa); 2700 2701 return rc; 2702 } 2703 2704 static struct rte_flow * 2705 sfc_flow_create(struct rte_eth_dev *dev, 2706 const struct rte_flow_attr *attr, 2707 const struct rte_flow_item pattern[], 2708 const struct rte_flow_action actions[], 2709 struct rte_flow_error *error) 2710 { 2711 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2712 struct rte_flow *flow = NULL; 2713 int rc; 2714 2715 flow = sfc_flow_zmalloc(error); 2716 if (flow == NULL) 2717 goto fail_no_mem; 2718 2719 sfc_adapter_lock(sa); 2720 2721 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error); 2722 if (rc != 0) 2723 goto fail_bad_value; 2724 2725 TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries); 2726 2727 if (sa->state == SFC_ADAPTER_STARTED) { 2728 rc = sfc_flow_insert(sa, flow, error); 2729 if (rc != 0) 2730 goto fail_flow_insert; 2731 } 2732 2733 sfc_adapter_unlock(sa); 2734 2735 return flow; 2736 2737 fail_flow_insert: 2738 TAILQ_REMOVE(&sa->flow_list, flow, entries); 2739 2740 fail_bad_value: 2741 sfc_flow_free(sa, flow); 2742 sfc_adapter_unlock(sa); 2743 2744 fail_no_mem: 2745 return NULL; 2746 } 2747 2748 static int 2749 sfc_flow_destroy(struct rte_eth_dev *dev, 2750 struct rte_flow *flow, 2751 struct rte_flow_error *error) 2752 { 2753 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2754 struct rte_flow *flow_ptr; 2755 int rc = EINVAL; 2756 2757 sfc_adapter_lock(sa); 2758 2759 TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) { 2760 if (flow_ptr == flow) 2761 rc = 0; 2762 } 2763 if (rc != 0) { 2764 rte_flow_error_set(error, rc, 2765 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2766 "Failed to find flow rule to destroy"); 2767 goto fail_bad_value; 2768 } 2769 2770 if (sa->state == SFC_ADAPTER_STARTED) 2771 rc = sfc_flow_remove(sa, flow, error); 2772 2773 TAILQ_REMOVE(&sa->flow_list, flow, entries); 2774 sfc_flow_free(sa, flow); 2775 2776 fail_bad_value: 2777 sfc_adapter_unlock(sa); 2778 2779 return -rc; 2780 } 2781 2782 static int 2783 sfc_flow_flush(struct rte_eth_dev *dev, 2784 struct rte_flow_error *error) 2785 { 2786 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2787 struct rte_flow *flow; 2788 int ret = 0; 2789 2790 sfc_adapter_lock(sa); 2791 2792 while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) { 2793 if (sa->state == SFC_ADAPTER_STARTED) { 2794 int rc; 2795 2796 rc = sfc_flow_remove(sa, flow, error); 2797 if (rc != 0) 2798 ret = rc; 2799 } 2800 2801 TAILQ_REMOVE(&sa->flow_list, flow, entries); 2802 sfc_flow_free(sa, flow); 2803 } 2804 2805 sfc_adapter_unlock(sa); 2806 2807 return -ret; 2808 } 2809 2810 static int 2811 sfc_flow_query(struct rte_eth_dev *dev, 2812 struct rte_flow *flow, 2813 const struct rte_flow_action *action, 2814 void *data, 2815 struct rte_flow_error *error) 2816 { 2817 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2818 const struct sfc_flow_ops_by_spec *ops; 2819 int ret; 2820 2821 sfc_adapter_lock(sa); 2822 2823 ops = sfc_flow_get_ops_by_spec(flow); 2824 if (ops == NULL || ops->query == NULL) { 2825 ret = rte_flow_error_set(error, ENOTSUP, 2826 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2827 "No backend to handle this flow"); 2828 goto fail_no_backend; 2829 } 2830 2831 if (sa->state != SFC_ADAPTER_STARTED) { 2832 ret = rte_flow_error_set(error, EINVAL, 2833 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2834 "Can't query the flow: the adapter is not started"); 2835 goto fail_not_started; 2836 } 2837 2838 ret = ops->query(dev, flow, action, data, error); 2839 if (ret != 0) 2840 goto fail_query; 2841 2842 sfc_adapter_unlock(sa); 2843 2844 return 0; 2845 2846 fail_query: 2847 fail_not_started: 2848 fail_no_backend: 2849 sfc_adapter_unlock(sa); 2850 return ret; 2851 } 2852 2853 static int 2854 sfc_flow_isolate(struct rte_eth_dev *dev, int enable, 2855 struct rte_flow_error *error) 2856 { 2857 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2858 int ret = 0; 2859 2860 sfc_adapter_lock(sa); 2861 if (sa->state != SFC_ADAPTER_INITIALIZED) { 2862 rte_flow_error_set(error, EBUSY, 2863 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2864 NULL, "please close the port first"); 2865 ret = -rte_errno; 2866 } else { 2867 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE; 2868 } 2869 sfc_adapter_unlock(sa); 2870 2871 return ret; 2872 } 2873 2874 const struct rte_flow_ops sfc_flow_ops = { 2875 .validate = sfc_flow_validate, 2876 .create = sfc_flow_create, 2877 .destroy = sfc_flow_destroy, 2878 .flush = sfc_flow_flush, 2879 .query = sfc_flow_query, 2880 .isolate = sfc_flow_isolate, 2881 }; 2882 2883 void 2884 sfc_flow_init(struct sfc_adapter *sa) 2885 { 2886 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2887 2888 TAILQ_INIT(&sa->flow_list); 2889 } 2890 2891 void 2892 sfc_flow_fini(struct sfc_adapter *sa) 2893 { 2894 struct rte_flow *flow; 2895 2896 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2897 2898 while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) { 2899 TAILQ_REMOVE(&sa->flow_list, flow, entries); 2900 sfc_flow_free(sa, flow); 2901 } 2902 } 2903 2904 void 2905 sfc_flow_stop(struct sfc_adapter *sa) 2906 { 2907 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 2908 struct sfc_rss *rss = &sas->rss; 2909 struct rte_flow *flow; 2910 2911 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2912 2913 TAILQ_FOREACH(flow, &sa->flow_list, entries) 2914 sfc_flow_remove(sa, flow, NULL); 2915 2916 if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) { 2917 efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context); 2918 rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT; 2919 } 2920 2921 /* 2922 * MAE counter service is not stopped on flow rule remove to avoid 2923 * extra work. Make sure that it is stopped here. 2924 */ 2925 sfc_mae_counter_stop(sa); 2926 } 2927 2928 int 2929 sfc_flow_start(struct sfc_adapter *sa) 2930 { 2931 struct rte_flow *flow; 2932 int rc = 0; 2933 2934 sfc_log_init(sa, "entry"); 2935 2936 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2937 2938 TAILQ_FOREACH(flow, &sa->flow_list, entries) { 2939 rc = sfc_flow_insert(sa, flow, NULL); 2940 if (rc != 0) 2941 goto fail_bad_flow; 2942 } 2943 2944 sfc_log_init(sa, "done"); 2945 2946 fail_bad_flow: 2947 return rc; 2948 } 2949