1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2021 Xilinx, Inc. 4 * Copyright(c) 2017-2019 Solarflare Communications Inc. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #include <rte_byteorder.h> 11 #include <rte_tailq.h> 12 #include <rte_common.h> 13 #include <ethdev_driver.h> 14 #include <rte_ether.h> 15 #include <rte_flow.h> 16 #include <rte_flow_driver.h> 17 18 #include "efx.h" 19 20 #include "sfc.h" 21 #include "sfc_debug.h" 22 #include "sfc_rx.h" 23 #include "sfc_filter.h" 24 #include "sfc_flow.h" 25 #include "sfc_log.h" 26 #include "sfc_dp_rx.h" 27 #include "sfc_mae_counter.h" 28 29 struct sfc_flow_ops_by_spec { 30 sfc_flow_parse_cb_t *parse; 31 sfc_flow_verify_cb_t *verify; 32 sfc_flow_cleanup_cb_t *cleanup; 33 sfc_flow_insert_cb_t *insert; 34 sfc_flow_remove_cb_t *remove; 35 sfc_flow_query_cb_t *query; 36 }; 37 38 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter; 39 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae; 40 static sfc_flow_insert_cb_t sfc_flow_filter_insert; 41 static sfc_flow_remove_cb_t sfc_flow_filter_remove; 42 43 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = { 44 .parse = sfc_flow_parse_rte_to_filter, 45 .verify = NULL, 46 .cleanup = NULL, 47 .insert = sfc_flow_filter_insert, 48 .remove = sfc_flow_filter_remove, 49 .query = NULL, 50 }; 51 52 static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = { 53 .parse = sfc_flow_parse_rte_to_mae, 54 .verify = sfc_mae_flow_verify, 55 .cleanup = sfc_mae_flow_cleanup, 56 .insert = sfc_mae_flow_insert, 57 .remove = sfc_mae_flow_remove, 58 .query = sfc_mae_flow_query, 59 }; 60 61 static const struct sfc_flow_ops_by_spec * 62 sfc_flow_get_ops_by_spec(struct rte_flow *flow) 63 { 64 struct sfc_flow_spec *spec = &flow->spec; 65 const struct sfc_flow_ops_by_spec *ops = NULL; 66 67 switch (spec->type) { 68 case SFC_FLOW_SPEC_FILTER: 69 ops = &sfc_flow_ops_filter; 70 break; 71 case SFC_FLOW_SPEC_MAE: 72 ops = &sfc_flow_ops_mae; 73 break; 74 default: 75 SFC_ASSERT(false); 76 break; 77 } 78 79 return ops; 80 } 81 82 /* 83 * Currently, filter-based (VNIC) flow API is implemented in such a manner 84 * that each flow rule is converted to one or more hardware filters. 85 * All elements of flow rule (attributes, pattern items, actions) 86 * correspond to one or more fields in the efx_filter_spec_s structure 87 * that is responsible for the hardware filter. 88 * If some required field is unset in the flow rule, then a handful 89 * of filter copies will be created to cover all possible values 90 * of such a field. 91 */ 92 93 static sfc_flow_item_parse sfc_flow_parse_void; 94 static sfc_flow_item_parse sfc_flow_parse_eth; 95 static sfc_flow_item_parse sfc_flow_parse_vlan; 96 static sfc_flow_item_parse sfc_flow_parse_ipv4; 97 static sfc_flow_item_parse sfc_flow_parse_ipv6; 98 static sfc_flow_item_parse sfc_flow_parse_tcp; 99 static sfc_flow_item_parse sfc_flow_parse_udp; 100 static sfc_flow_item_parse sfc_flow_parse_vxlan; 101 static sfc_flow_item_parse sfc_flow_parse_geneve; 102 static sfc_flow_item_parse sfc_flow_parse_nvgre; 103 static sfc_flow_item_parse sfc_flow_parse_pppoex; 104 105 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec, 106 unsigned int filters_count_for_one_val, 107 struct rte_flow_error *error); 108 109 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match, 110 efx_filter_spec_t *spec, 111 struct sfc_filter *filter); 112 113 struct sfc_flow_copy_flag { 114 /* EFX filter specification match flag */ 115 efx_filter_match_flags_t flag; 116 /* Number of values of corresponding field */ 117 unsigned int vals_count; 118 /* Function to set values in specifications */ 119 sfc_flow_spec_set_vals *set_vals; 120 /* 121 * Function to check that the specification is suitable 122 * for adding this match flag 123 */ 124 sfc_flow_spec_check *spec_check; 125 }; 126 127 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags; 128 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags; 129 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes; 130 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags; 131 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags; 132 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag; 133 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag; 134 135 static boolean_t 136 sfc_flow_is_zero(const uint8_t *buf, unsigned int size) 137 { 138 uint8_t sum = 0; 139 unsigned int i; 140 141 for (i = 0; i < size; i++) 142 sum |= buf[i]; 143 144 return (sum == 0) ? B_TRUE : B_FALSE; 145 } 146 147 /* 148 * Validate item and prepare structures spec and mask for parsing 149 */ 150 int 151 sfc_flow_parse_init(const struct rte_flow_item *item, 152 const void **spec_ptr, 153 const void **mask_ptr, 154 const void *supp_mask, 155 const void *def_mask, 156 unsigned int size, 157 struct rte_flow_error *error) 158 { 159 const uint8_t *spec; 160 const uint8_t *mask; 161 const uint8_t *last; 162 uint8_t supp; 163 unsigned int i; 164 165 if (item == NULL) { 166 rte_flow_error_set(error, EINVAL, 167 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 168 "NULL item"); 169 return -rte_errno; 170 } 171 172 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) { 173 rte_flow_error_set(error, EINVAL, 174 RTE_FLOW_ERROR_TYPE_ITEM, item, 175 "Mask or last is set without spec"); 176 return -rte_errno; 177 } 178 179 /* 180 * If "mask" is not set, default mask is used, 181 * but if default mask is NULL, "mask" should be set 182 */ 183 if (item->mask == NULL) { 184 if (def_mask == NULL) { 185 rte_flow_error_set(error, EINVAL, 186 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 187 "Mask should be specified"); 188 return -rte_errno; 189 } 190 191 mask = def_mask; 192 } else { 193 mask = item->mask; 194 } 195 196 spec = item->spec; 197 last = item->last; 198 199 if (spec == NULL) 200 goto exit; 201 202 /* 203 * If field values in "last" are either 0 or equal to the corresponding 204 * values in "spec" then they are ignored 205 */ 206 if (last != NULL && 207 !sfc_flow_is_zero(last, size) && 208 memcmp(last, spec, size) != 0) { 209 rte_flow_error_set(error, ENOTSUP, 210 RTE_FLOW_ERROR_TYPE_ITEM, item, 211 "Ranging is not supported"); 212 return -rte_errno; 213 } 214 215 if (supp_mask == NULL) { 216 rte_flow_error_set(error, EINVAL, 217 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 218 "Supported mask for item should be specified"); 219 return -rte_errno; 220 } 221 222 /* Check that mask does not ask for more match than supp_mask */ 223 for (i = 0; i < size; i++) { 224 supp = ((const uint8_t *)supp_mask)[i]; 225 226 if (~supp & mask[i]) { 227 rte_flow_error_set(error, ENOTSUP, 228 RTE_FLOW_ERROR_TYPE_ITEM, item, 229 "Item's field is not supported"); 230 return -rte_errno; 231 } 232 } 233 234 exit: 235 *spec_ptr = spec; 236 *mask_ptr = mask; 237 return 0; 238 } 239 240 /* 241 * Protocol parsers. 242 * Masking is not supported, so masks in items should be either 243 * full or empty (zeroed) and set only for supported fields which 244 * are specified in the supp_mask. 245 */ 246 247 static int 248 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item, 249 __rte_unused struct sfc_flow_parse_ctx *parse_ctx, 250 __rte_unused struct rte_flow_error *error) 251 { 252 return 0; 253 } 254 255 /** 256 * Convert Ethernet item to EFX filter specification. 257 * 258 * @param item[in] 259 * Item specification. Outer frame specification may only comprise 260 * source/destination addresses and Ethertype field. 261 * Inner frame specification may contain destination address only. 262 * There is support for individual/group mask as well as for empty and full. 263 * If the mask is NULL, default mask will be used. Ranging is not supported. 264 * @param efx_spec[in, out] 265 * EFX filter specification to update. 266 * @param[out] error 267 * Perform verbose error reporting if not NULL. 268 */ 269 static int 270 sfc_flow_parse_eth(const struct rte_flow_item *item, 271 struct sfc_flow_parse_ctx *parse_ctx, 272 struct rte_flow_error *error) 273 { 274 int rc; 275 efx_filter_spec_t *efx_spec = parse_ctx->filter; 276 const struct rte_flow_item_eth *spec = NULL; 277 const struct rte_flow_item_eth *mask = NULL; 278 const struct rte_flow_item_eth supp_mask = { 279 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 280 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 281 .type = 0xffff, 282 }; 283 const struct rte_flow_item_eth ifrm_supp_mask = { 284 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 285 }; 286 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = { 287 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 288 }; 289 const struct rte_flow_item_eth *supp_mask_p; 290 const struct rte_flow_item_eth *def_mask_p; 291 uint8_t *loc_mac = NULL; 292 boolean_t is_ifrm = (efx_spec->efs_encap_type != 293 EFX_TUNNEL_PROTOCOL_NONE); 294 295 if (is_ifrm) { 296 supp_mask_p = &ifrm_supp_mask; 297 def_mask_p = &ifrm_supp_mask; 298 loc_mac = efx_spec->efs_ifrm_loc_mac; 299 } else { 300 supp_mask_p = &supp_mask; 301 def_mask_p = &rte_flow_item_eth_mask; 302 loc_mac = efx_spec->efs_loc_mac; 303 } 304 305 rc = sfc_flow_parse_init(item, 306 (const void **)&spec, 307 (const void **)&mask, 308 supp_mask_p, def_mask_p, 309 sizeof(struct rte_flow_item_eth), 310 error); 311 if (rc != 0) 312 return rc; 313 314 /* If "spec" is not set, could be any Ethernet */ 315 if (spec == NULL) 316 return 0; 317 318 if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) { 319 efx_spec->efs_match_flags |= is_ifrm ? 320 EFX_FILTER_MATCH_IFRM_LOC_MAC : 321 EFX_FILTER_MATCH_LOC_MAC; 322 rte_memcpy(loc_mac, spec->dst.addr_bytes, 323 EFX_MAC_ADDR_LEN); 324 } else if (memcmp(mask->dst.addr_bytes, ig_mask, 325 EFX_MAC_ADDR_LEN) == 0) { 326 if (rte_is_unicast_ether_addr(&spec->dst)) 327 efx_spec->efs_match_flags |= is_ifrm ? 328 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST : 329 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST; 330 else 331 efx_spec->efs_match_flags |= is_ifrm ? 332 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST : 333 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; 334 } else if (!rte_is_zero_ether_addr(&mask->dst)) { 335 goto fail_bad_mask; 336 } 337 338 /* 339 * ifrm_supp_mask ensures that the source address and 340 * ethertype masks are equal to zero in inner frame, 341 * so these fields are filled in only for the outer frame 342 */ 343 if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) { 344 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC; 345 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes, 346 EFX_MAC_ADDR_LEN); 347 } else if (!rte_is_zero_ether_addr(&mask->src)) { 348 goto fail_bad_mask; 349 } 350 351 /* 352 * Ether type is in big-endian byte order in item and 353 * in little-endian in efx_spec, so byte swap is used 354 */ 355 if (mask->type == supp_mask.type) { 356 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 357 efx_spec->efs_ether_type = rte_bswap16(spec->type); 358 } else if (mask->type != 0) { 359 goto fail_bad_mask; 360 } 361 362 return 0; 363 364 fail_bad_mask: 365 rte_flow_error_set(error, EINVAL, 366 RTE_FLOW_ERROR_TYPE_ITEM, item, 367 "Bad mask in the ETH pattern item"); 368 return -rte_errno; 369 } 370 371 /** 372 * Convert VLAN item to EFX filter specification. 373 * 374 * @param item[in] 375 * Item specification. Only VID field is supported. 376 * The mask can not be NULL. Ranging is not supported. 377 * @param efx_spec[in, out] 378 * EFX filter specification to update. 379 * @param[out] error 380 * Perform verbose error reporting if not NULL. 381 */ 382 static int 383 sfc_flow_parse_vlan(const struct rte_flow_item *item, 384 struct sfc_flow_parse_ctx *parse_ctx, 385 struct rte_flow_error *error) 386 { 387 int rc; 388 uint16_t vid; 389 efx_filter_spec_t *efx_spec = parse_ctx->filter; 390 const struct rte_flow_item_vlan *spec = NULL; 391 const struct rte_flow_item_vlan *mask = NULL; 392 const struct rte_flow_item_vlan supp_mask = { 393 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX), 394 .inner_type = RTE_BE16(0xffff), 395 }; 396 397 rc = sfc_flow_parse_init(item, 398 (const void **)&spec, 399 (const void **)&mask, 400 &supp_mask, 401 NULL, 402 sizeof(struct rte_flow_item_vlan), 403 error); 404 if (rc != 0) 405 return rc; 406 407 /* 408 * VID is in big-endian byte order in item and 409 * in little-endian in efx_spec, so byte swap is used. 410 * If two VLAN items are included, the first matches 411 * the outer tag and the next matches the inner tag. 412 */ 413 if (mask->tci == supp_mask.tci) { 414 /* Apply mask to keep VID only */ 415 vid = rte_bswap16(spec->tci & mask->tci); 416 417 if (!(efx_spec->efs_match_flags & 418 EFX_FILTER_MATCH_OUTER_VID)) { 419 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID; 420 efx_spec->efs_outer_vid = vid; 421 } else if (!(efx_spec->efs_match_flags & 422 EFX_FILTER_MATCH_INNER_VID)) { 423 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID; 424 efx_spec->efs_inner_vid = vid; 425 } else { 426 rte_flow_error_set(error, EINVAL, 427 RTE_FLOW_ERROR_TYPE_ITEM, item, 428 "More than two VLAN items"); 429 return -rte_errno; 430 } 431 } else { 432 rte_flow_error_set(error, EINVAL, 433 RTE_FLOW_ERROR_TYPE_ITEM, item, 434 "VLAN ID in TCI match is required"); 435 return -rte_errno; 436 } 437 438 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { 439 rte_flow_error_set(error, EINVAL, 440 RTE_FLOW_ERROR_TYPE_ITEM, item, 441 "VLAN TPID matching is not supported"); 442 return -rte_errno; 443 } 444 if (mask->inner_type == supp_mask.inner_type) { 445 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 446 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type); 447 } else if (mask->inner_type) { 448 rte_flow_error_set(error, EINVAL, 449 RTE_FLOW_ERROR_TYPE_ITEM, item, 450 "Bad mask for VLAN inner_type"); 451 return -rte_errno; 452 } 453 454 return 0; 455 } 456 457 /** 458 * Convert IPv4 item to EFX filter specification. 459 * 460 * @param item[in] 461 * Item specification. Only source and destination addresses and 462 * protocol fields are supported. If the mask is NULL, default 463 * mask will be used. Ranging is not supported. 464 * @param efx_spec[in, out] 465 * EFX filter specification to update. 466 * @param[out] error 467 * Perform verbose error reporting if not NULL. 468 */ 469 static int 470 sfc_flow_parse_ipv4(const struct rte_flow_item *item, 471 struct sfc_flow_parse_ctx *parse_ctx, 472 struct rte_flow_error *error) 473 { 474 int rc; 475 efx_filter_spec_t *efx_spec = parse_ctx->filter; 476 const struct rte_flow_item_ipv4 *spec = NULL; 477 const struct rte_flow_item_ipv4 *mask = NULL; 478 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4); 479 const struct rte_flow_item_ipv4 supp_mask = { 480 .hdr = { 481 .src_addr = 0xffffffff, 482 .dst_addr = 0xffffffff, 483 .next_proto_id = 0xff, 484 } 485 }; 486 487 rc = sfc_flow_parse_init(item, 488 (const void **)&spec, 489 (const void **)&mask, 490 &supp_mask, 491 &rte_flow_item_ipv4_mask, 492 sizeof(struct rte_flow_item_ipv4), 493 error); 494 if (rc != 0) 495 return rc; 496 497 /* 498 * Filtering by IPv4 source and destination addresses requires 499 * the appropriate ETHER_TYPE in hardware filters 500 */ 501 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) { 502 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 503 efx_spec->efs_ether_type = ether_type_ipv4; 504 } else if (efx_spec->efs_ether_type != ether_type_ipv4) { 505 rte_flow_error_set(error, EINVAL, 506 RTE_FLOW_ERROR_TYPE_ITEM, item, 507 "Ethertype in pattern with IPV4 item should be appropriate"); 508 return -rte_errno; 509 } 510 511 if (spec == NULL) 512 return 0; 513 514 /* 515 * IPv4 addresses are in big-endian byte order in item and in 516 * efx_spec 517 */ 518 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) { 519 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST; 520 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr; 521 } else if (mask->hdr.src_addr != 0) { 522 goto fail_bad_mask; 523 } 524 525 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) { 526 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST; 527 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr; 528 } else if (mask->hdr.dst_addr != 0) { 529 goto fail_bad_mask; 530 } 531 532 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) { 533 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 534 efx_spec->efs_ip_proto = spec->hdr.next_proto_id; 535 } else if (mask->hdr.next_proto_id != 0) { 536 goto fail_bad_mask; 537 } 538 539 return 0; 540 541 fail_bad_mask: 542 rte_flow_error_set(error, EINVAL, 543 RTE_FLOW_ERROR_TYPE_ITEM, item, 544 "Bad mask in the IPV4 pattern item"); 545 return -rte_errno; 546 } 547 548 /** 549 * Convert IPv6 item to EFX filter specification. 550 * 551 * @param item[in] 552 * Item specification. Only source and destination addresses and 553 * next header fields are supported. If the mask is NULL, default 554 * mask will be used. Ranging is not supported. 555 * @param efx_spec[in, out] 556 * EFX filter specification to update. 557 * @param[out] error 558 * Perform verbose error reporting if not NULL. 559 */ 560 static int 561 sfc_flow_parse_ipv6(const struct rte_flow_item *item, 562 struct sfc_flow_parse_ctx *parse_ctx, 563 struct rte_flow_error *error) 564 { 565 int rc; 566 efx_filter_spec_t *efx_spec = parse_ctx->filter; 567 const struct rte_flow_item_ipv6 *spec = NULL; 568 const struct rte_flow_item_ipv6 *mask = NULL; 569 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6); 570 const struct rte_flow_item_ipv6 supp_mask = { 571 .hdr = { 572 .src_addr = { 0xff, 0xff, 0xff, 0xff, 573 0xff, 0xff, 0xff, 0xff, 574 0xff, 0xff, 0xff, 0xff, 575 0xff, 0xff, 0xff, 0xff }, 576 .dst_addr = { 0xff, 0xff, 0xff, 0xff, 577 0xff, 0xff, 0xff, 0xff, 578 0xff, 0xff, 0xff, 0xff, 579 0xff, 0xff, 0xff, 0xff }, 580 .proto = 0xff, 581 } 582 }; 583 584 rc = sfc_flow_parse_init(item, 585 (const void **)&spec, 586 (const void **)&mask, 587 &supp_mask, 588 &rte_flow_item_ipv6_mask, 589 sizeof(struct rte_flow_item_ipv6), 590 error); 591 if (rc != 0) 592 return rc; 593 594 /* 595 * Filtering by IPv6 source and destination addresses requires 596 * the appropriate ETHER_TYPE in hardware filters 597 */ 598 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) { 599 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 600 efx_spec->efs_ether_type = ether_type_ipv6; 601 } else if (efx_spec->efs_ether_type != ether_type_ipv6) { 602 rte_flow_error_set(error, EINVAL, 603 RTE_FLOW_ERROR_TYPE_ITEM, item, 604 "Ethertype in pattern with IPV6 item should be appropriate"); 605 return -rte_errno; 606 } 607 608 if (spec == NULL) 609 return 0; 610 611 /* 612 * IPv6 addresses are in big-endian byte order in item and in 613 * efx_spec 614 */ 615 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr, 616 sizeof(mask->hdr.src_addr)) == 0) { 617 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST; 618 619 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) != 620 sizeof(spec->hdr.src_addr)); 621 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr, 622 sizeof(efx_spec->efs_rem_host)); 623 } else if (!sfc_flow_is_zero(mask->hdr.src_addr, 624 sizeof(mask->hdr.src_addr))) { 625 goto fail_bad_mask; 626 } 627 628 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr, 629 sizeof(mask->hdr.dst_addr)) == 0) { 630 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST; 631 632 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) != 633 sizeof(spec->hdr.dst_addr)); 634 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr, 635 sizeof(efx_spec->efs_loc_host)); 636 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr, 637 sizeof(mask->hdr.dst_addr))) { 638 goto fail_bad_mask; 639 } 640 641 if (mask->hdr.proto == supp_mask.hdr.proto) { 642 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 643 efx_spec->efs_ip_proto = spec->hdr.proto; 644 } else if (mask->hdr.proto != 0) { 645 goto fail_bad_mask; 646 } 647 648 return 0; 649 650 fail_bad_mask: 651 rte_flow_error_set(error, EINVAL, 652 RTE_FLOW_ERROR_TYPE_ITEM, item, 653 "Bad mask in the IPV6 pattern item"); 654 return -rte_errno; 655 } 656 657 /** 658 * Convert TCP item to EFX filter specification. 659 * 660 * @param item[in] 661 * Item specification. Only source and destination ports fields 662 * are supported. If the mask is NULL, default mask will be used. 663 * Ranging is not supported. 664 * @param efx_spec[in, out] 665 * EFX filter specification to update. 666 * @param[out] error 667 * Perform verbose error reporting if not NULL. 668 */ 669 static int 670 sfc_flow_parse_tcp(const struct rte_flow_item *item, 671 struct sfc_flow_parse_ctx *parse_ctx, 672 struct rte_flow_error *error) 673 { 674 int rc; 675 efx_filter_spec_t *efx_spec = parse_ctx->filter; 676 const struct rte_flow_item_tcp *spec = NULL; 677 const struct rte_flow_item_tcp *mask = NULL; 678 const struct rte_flow_item_tcp supp_mask = { 679 .hdr = { 680 .src_port = 0xffff, 681 .dst_port = 0xffff, 682 } 683 }; 684 685 rc = sfc_flow_parse_init(item, 686 (const void **)&spec, 687 (const void **)&mask, 688 &supp_mask, 689 &rte_flow_item_tcp_mask, 690 sizeof(struct rte_flow_item_tcp), 691 error); 692 if (rc != 0) 693 return rc; 694 695 /* 696 * Filtering by TCP source and destination ports requires 697 * the appropriate IP_PROTO in hardware filters 698 */ 699 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 700 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 701 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP; 702 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) { 703 rte_flow_error_set(error, EINVAL, 704 RTE_FLOW_ERROR_TYPE_ITEM, item, 705 "IP proto in pattern with TCP item should be appropriate"); 706 return -rte_errno; 707 } 708 709 if (spec == NULL) 710 return 0; 711 712 /* 713 * Source and destination ports are in big-endian byte order in item and 714 * in little-endian in efx_spec, so byte swap is used 715 */ 716 if (mask->hdr.src_port == supp_mask.hdr.src_port) { 717 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT; 718 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port); 719 } else if (mask->hdr.src_port != 0) { 720 goto fail_bad_mask; 721 } 722 723 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) { 724 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT; 725 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port); 726 } else if (mask->hdr.dst_port != 0) { 727 goto fail_bad_mask; 728 } 729 730 return 0; 731 732 fail_bad_mask: 733 rte_flow_error_set(error, EINVAL, 734 RTE_FLOW_ERROR_TYPE_ITEM, item, 735 "Bad mask in the TCP pattern item"); 736 return -rte_errno; 737 } 738 739 /** 740 * Convert UDP item to EFX filter specification. 741 * 742 * @param item[in] 743 * Item specification. Only source and destination ports fields 744 * are supported. If the mask is NULL, default mask will be used. 745 * Ranging is not supported. 746 * @param efx_spec[in, out] 747 * EFX filter specification to update. 748 * @param[out] error 749 * Perform verbose error reporting if not NULL. 750 */ 751 static int 752 sfc_flow_parse_udp(const struct rte_flow_item *item, 753 struct sfc_flow_parse_ctx *parse_ctx, 754 struct rte_flow_error *error) 755 { 756 int rc; 757 efx_filter_spec_t *efx_spec = parse_ctx->filter; 758 const struct rte_flow_item_udp *spec = NULL; 759 const struct rte_flow_item_udp *mask = NULL; 760 const struct rte_flow_item_udp supp_mask = { 761 .hdr = { 762 .src_port = 0xffff, 763 .dst_port = 0xffff, 764 } 765 }; 766 767 rc = sfc_flow_parse_init(item, 768 (const void **)&spec, 769 (const void **)&mask, 770 &supp_mask, 771 &rte_flow_item_udp_mask, 772 sizeof(struct rte_flow_item_udp), 773 error); 774 if (rc != 0) 775 return rc; 776 777 /* 778 * Filtering by UDP source and destination ports requires 779 * the appropriate IP_PROTO in hardware filters 780 */ 781 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 782 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 783 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP; 784 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) { 785 rte_flow_error_set(error, EINVAL, 786 RTE_FLOW_ERROR_TYPE_ITEM, item, 787 "IP proto in pattern with UDP item should be appropriate"); 788 return -rte_errno; 789 } 790 791 if (spec == NULL) 792 return 0; 793 794 /* 795 * Source and destination ports are in big-endian byte order in item and 796 * in little-endian in efx_spec, so byte swap is used 797 */ 798 if (mask->hdr.src_port == supp_mask.hdr.src_port) { 799 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT; 800 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port); 801 } else if (mask->hdr.src_port != 0) { 802 goto fail_bad_mask; 803 } 804 805 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) { 806 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT; 807 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port); 808 } else if (mask->hdr.dst_port != 0) { 809 goto fail_bad_mask; 810 } 811 812 return 0; 813 814 fail_bad_mask: 815 rte_flow_error_set(error, EINVAL, 816 RTE_FLOW_ERROR_TYPE_ITEM, item, 817 "Bad mask in the UDP pattern item"); 818 return -rte_errno; 819 } 820 821 /* 822 * Filters for encapsulated packets match based on the EtherType and IP 823 * protocol in the outer frame. 824 */ 825 static int 826 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item, 827 efx_filter_spec_t *efx_spec, 828 uint8_t ip_proto, 829 struct rte_flow_error *error) 830 { 831 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 832 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 833 efx_spec->efs_ip_proto = ip_proto; 834 } else if (efx_spec->efs_ip_proto != ip_proto) { 835 switch (ip_proto) { 836 case EFX_IPPROTO_UDP: 837 rte_flow_error_set(error, EINVAL, 838 RTE_FLOW_ERROR_TYPE_ITEM, item, 839 "Outer IP header protocol must be UDP " 840 "in VxLAN/GENEVE pattern"); 841 return -rte_errno; 842 843 case EFX_IPPROTO_GRE: 844 rte_flow_error_set(error, EINVAL, 845 RTE_FLOW_ERROR_TYPE_ITEM, item, 846 "Outer IP header protocol must be GRE " 847 "in NVGRE pattern"); 848 return -rte_errno; 849 850 default: 851 rte_flow_error_set(error, EINVAL, 852 RTE_FLOW_ERROR_TYPE_ITEM, item, 853 "Only VxLAN/GENEVE/NVGRE tunneling patterns " 854 "are supported"); 855 return -rte_errno; 856 } 857 } 858 859 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE && 860 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 && 861 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) { 862 rte_flow_error_set(error, EINVAL, 863 RTE_FLOW_ERROR_TYPE_ITEM, item, 864 "Outer frame EtherType in pattern with tunneling " 865 "must be IPv4 or IPv6"); 866 return -rte_errno; 867 } 868 869 return 0; 870 } 871 872 static int 873 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec, 874 const uint8_t *vni_or_vsid_val, 875 const uint8_t *vni_or_vsid_mask, 876 const struct rte_flow_item *item, 877 struct rte_flow_error *error) 878 { 879 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = { 880 0xff, 0xff, 0xff 881 }; 882 883 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask, 884 EFX_VNI_OR_VSID_LEN) == 0) { 885 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID; 886 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val, 887 EFX_VNI_OR_VSID_LEN); 888 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) { 889 rte_flow_error_set(error, EINVAL, 890 RTE_FLOW_ERROR_TYPE_ITEM, item, 891 "Unsupported VNI/VSID mask"); 892 return -rte_errno; 893 } 894 895 return 0; 896 } 897 898 /** 899 * Convert VXLAN item to EFX filter specification. 900 * 901 * @param item[in] 902 * Item specification. Only VXLAN network identifier field is supported. 903 * If the mask is NULL, default mask will be used. 904 * Ranging is not supported. 905 * @param efx_spec[in, out] 906 * EFX filter specification to update. 907 * @param[out] error 908 * Perform verbose error reporting if not NULL. 909 */ 910 static int 911 sfc_flow_parse_vxlan(const struct rte_flow_item *item, 912 struct sfc_flow_parse_ctx *parse_ctx, 913 struct rte_flow_error *error) 914 { 915 int rc; 916 efx_filter_spec_t *efx_spec = parse_ctx->filter; 917 const struct rte_flow_item_vxlan *spec = NULL; 918 const struct rte_flow_item_vxlan *mask = NULL; 919 const struct rte_flow_item_vxlan supp_mask = { 920 .vni = { 0xff, 0xff, 0xff } 921 }; 922 923 rc = sfc_flow_parse_init(item, 924 (const void **)&spec, 925 (const void **)&mask, 926 &supp_mask, 927 &rte_flow_item_vxlan_mask, 928 sizeof(struct rte_flow_item_vxlan), 929 error); 930 if (rc != 0) 931 return rc; 932 933 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 934 EFX_IPPROTO_UDP, error); 935 if (rc != 0) 936 return rc; 937 938 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN; 939 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 940 941 if (spec == NULL) 942 return 0; 943 944 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, 945 mask->vni, item, error); 946 947 return rc; 948 } 949 950 /** 951 * Convert GENEVE item to EFX filter specification. 952 * 953 * @param item[in] 954 * Item specification. Only Virtual Network Identifier and protocol type 955 * fields are supported. But protocol type can be only Ethernet (0x6558). 956 * If the mask is NULL, default mask will be used. 957 * Ranging is not supported. 958 * @param efx_spec[in, out] 959 * EFX filter specification to update. 960 * @param[out] error 961 * Perform verbose error reporting if not NULL. 962 */ 963 static int 964 sfc_flow_parse_geneve(const struct rte_flow_item *item, 965 struct sfc_flow_parse_ctx *parse_ctx, 966 struct rte_flow_error *error) 967 { 968 int rc; 969 efx_filter_spec_t *efx_spec = parse_ctx->filter; 970 const struct rte_flow_item_geneve *spec = NULL; 971 const struct rte_flow_item_geneve *mask = NULL; 972 const struct rte_flow_item_geneve supp_mask = { 973 .protocol = RTE_BE16(0xffff), 974 .vni = { 0xff, 0xff, 0xff } 975 }; 976 977 rc = sfc_flow_parse_init(item, 978 (const void **)&spec, 979 (const void **)&mask, 980 &supp_mask, 981 &rte_flow_item_geneve_mask, 982 sizeof(struct rte_flow_item_geneve), 983 error); 984 if (rc != 0) 985 return rc; 986 987 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 988 EFX_IPPROTO_UDP, error); 989 if (rc != 0) 990 return rc; 991 992 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE; 993 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 994 995 if (spec == NULL) 996 return 0; 997 998 if (mask->protocol == supp_mask.protocol) { 999 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) { 1000 rte_flow_error_set(error, EINVAL, 1001 RTE_FLOW_ERROR_TYPE_ITEM, item, 1002 "GENEVE encap. protocol must be Ethernet " 1003 "(0x6558) in the GENEVE pattern item"); 1004 return -rte_errno; 1005 } 1006 } else if (mask->protocol != 0) { 1007 rte_flow_error_set(error, EINVAL, 1008 RTE_FLOW_ERROR_TYPE_ITEM, item, 1009 "Unsupported mask for GENEVE encap. protocol"); 1010 return -rte_errno; 1011 } 1012 1013 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, 1014 mask->vni, item, error); 1015 1016 return rc; 1017 } 1018 1019 /** 1020 * Convert NVGRE item to EFX filter specification. 1021 * 1022 * @param item[in] 1023 * Item specification. Only virtual subnet ID field is supported. 1024 * If the mask is NULL, default mask will be used. 1025 * Ranging is not supported. 1026 * @param efx_spec[in, out] 1027 * EFX filter specification to update. 1028 * @param[out] error 1029 * Perform verbose error reporting if not NULL. 1030 */ 1031 static int 1032 sfc_flow_parse_nvgre(const struct rte_flow_item *item, 1033 struct sfc_flow_parse_ctx *parse_ctx, 1034 struct rte_flow_error *error) 1035 { 1036 int rc; 1037 efx_filter_spec_t *efx_spec = parse_ctx->filter; 1038 const struct rte_flow_item_nvgre *spec = NULL; 1039 const struct rte_flow_item_nvgre *mask = NULL; 1040 const struct rte_flow_item_nvgre supp_mask = { 1041 .tni = { 0xff, 0xff, 0xff } 1042 }; 1043 1044 rc = sfc_flow_parse_init(item, 1045 (const void **)&spec, 1046 (const void **)&mask, 1047 &supp_mask, 1048 &rte_flow_item_nvgre_mask, 1049 sizeof(struct rte_flow_item_nvgre), 1050 error); 1051 if (rc != 0) 1052 return rc; 1053 1054 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 1055 EFX_IPPROTO_GRE, error); 1056 if (rc != 0) 1057 return rc; 1058 1059 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE; 1060 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 1061 1062 if (spec == NULL) 1063 return 0; 1064 1065 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni, 1066 mask->tni, item, error); 1067 1068 return rc; 1069 } 1070 1071 /** 1072 * Convert PPPoEx item to EFX filter specification. 1073 * 1074 * @param item[in] 1075 * Item specification. 1076 * Matching on PPPoEx fields is not supported. 1077 * This item can only be used to set or validate the EtherType filter. 1078 * Only zero masks are allowed. 1079 * Ranging is not supported. 1080 * @param efx_spec[in, out] 1081 * EFX filter specification to update. 1082 * @param[out] error 1083 * Perform verbose error reporting if not NULL. 1084 */ 1085 static int 1086 sfc_flow_parse_pppoex(const struct rte_flow_item *item, 1087 struct sfc_flow_parse_ctx *parse_ctx, 1088 struct rte_flow_error *error) 1089 { 1090 efx_filter_spec_t *efx_spec = parse_ctx->filter; 1091 const struct rte_flow_item_pppoe *spec = NULL; 1092 const struct rte_flow_item_pppoe *mask = NULL; 1093 const struct rte_flow_item_pppoe supp_mask = {}; 1094 const struct rte_flow_item_pppoe def_mask = {}; 1095 uint16_t ether_type; 1096 int rc; 1097 1098 rc = sfc_flow_parse_init(item, 1099 (const void **)&spec, 1100 (const void **)&mask, 1101 &supp_mask, 1102 &def_mask, 1103 sizeof(struct rte_flow_item_pppoe), 1104 error); 1105 if (rc != 0) 1106 return rc; 1107 1108 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED) 1109 ether_type = RTE_ETHER_TYPE_PPPOE_DISCOVERY; 1110 else 1111 ether_type = RTE_ETHER_TYPE_PPPOE_SESSION; 1112 1113 if ((efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) != 0) { 1114 if (efx_spec->efs_ether_type != ether_type) { 1115 rte_flow_error_set(error, EINVAL, 1116 RTE_FLOW_ERROR_TYPE_ITEM, item, 1117 "Invalid EtherType for a PPPoE flow item"); 1118 return -rte_errno; 1119 } 1120 } else { 1121 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 1122 efx_spec->efs_ether_type = ether_type; 1123 } 1124 1125 return 0; 1126 } 1127 1128 static const struct sfc_flow_item sfc_flow_items[] = { 1129 { 1130 .type = RTE_FLOW_ITEM_TYPE_VOID, 1131 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER, 1132 .layer = SFC_FLOW_ITEM_ANY_LAYER, 1133 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1134 .parse = sfc_flow_parse_void, 1135 }, 1136 { 1137 .type = RTE_FLOW_ITEM_TYPE_ETH, 1138 .prev_layer = SFC_FLOW_ITEM_START_LAYER, 1139 .layer = SFC_FLOW_ITEM_L2, 1140 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1141 .parse = sfc_flow_parse_eth, 1142 }, 1143 { 1144 .type = RTE_FLOW_ITEM_TYPE_VLAN, 1145 .prev_layer = SFC_FLOW_ITEM_L2, 1146 .layer = SFC_FLOW_ITEM_L2, 1147 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1148 .parse = sfc_flow_parse_vlan, 1149 }, 1150 { 1151 .type = RTE_FLOW_ITEM_TYPE_PPPOED, 1152 .prev_layer = SFC_FLOW_ITEM_L2, 1153 .layer = SFC_FLOW_ITEM_L2, 1154 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1155 .parse = sfc_flow_parse_pppoex, 1156 }, 1157 { 1158 .type = RTE_FLOW_ITEM_TYPE_PPPOES, 1159 .prev_layer = SFC_FLOW_ITEM_L2, 1160 .layer = SFC_FLOW_ITEM_L2, 1161 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1162 .parse = sfc_flow_parse_pppoex, 1163 }, 1164 { 1165 .type = RTE_FLOW_ITEM_TYPE_IPV4, 1166 .prev_layer = SFC_FLOW_ITEM_L2, 1167 .layer = SFC_FLOW_ITEM_L3, 1168 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1169 .parse = sfc_flow_parse_ipv4, 1170 }, 1171 { 1172 .type = RTE_FLOW_ITEM_TYPE_IPV6, 1173 .prev_layer = SFC_FLOW_ITEM_L2, 1174 .layer = SFC_FLOW_ITEM_L3, 1175 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1176 .parse = sfc_flow_parse_ipv6, 1177 }, 1178 { 1179 .type = RTE_FLOW_ITEM_TYPE_TCP, 1180 .prev_layer = SFC_FLOW_ITEM_L3, 1181 .layer = SFC_FLOW_ITEM_L4, 1182 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1183 .parse = sfc_flow_parse_tcp, 1184 }, 1185 { 1186 .type = RTE_FLOW_ITEM_TYPE_UDP, 1187 .prev_layer = SFC_FLOW_ITEM_L3, 1188 .layer = SFC_FLOW_ITEM_L4, 1189 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1190 .parse = sfc_flow_parse_udp, 1191 }, 1192 { 1193 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 1194 .prev_layer = SFC_FLOW_ITEM_L4, 1195 .layer = SFC_FLOW_ITEM_START_LAYER, 1196 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1197 .parse = sfc_flow_parse_vxlan, 1198 }, 1199 { 1200 .type = RTE_FLOW_ITEM_TYPE_GENEVE, 1201 .prev_layer = SFC_FLOW_ITEM_L4, 1202 .layer = SFC_FLOW_ITEM_START_LAYER, 1203 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1204 .parse = sfc_flow_parse_geneve, 1205 }, 1206 { 1207 .type = RTE_FLOW_ITEM_TYPE_NVGRE, 1208 .prev_layer = SFC_FLOW_ITEM_L3, 1209 .layer = SFC_FLOW_ITEM_START_LAYER, 1210 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1211 .parse = sfc_flow_parse_nvgre, 1212 }, 1213 }; 1214 1215 /* 1216 * Protocol-independent flow API support 1217 */ 1218 static int 1219 sfc_flow_parse_attr(struct sfc_adapter *sa, 1220 const struct rte_flow_attr *attr, 1221 struct rte_flow *flow, 1222 struct rte_flow_error *error) 1223 { 1224 struct sfc_flow_spec *spec = &flow->spec; 1225 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1226 struct sfc_flow_spec_mae *spec_mae = &spec->mae; 1227 struct sfc_mae *mae = &sa->mae; 1228 1229 if (attr == NULL) { 1230 rte_flow_error_set(error, EINVAL, 1231 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 1232 "NULL attribute"); 1233 return -rte_errno; 1234 } 1235 if (attr->group != 0) { 1236 rte_flow_error_set(error, ENOTSUP, 1237 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr, 1238 "Groups are not supported"); 1239 return -rte_errno; 1240 } 1241 if (attr->egress != 0) { 1242 rte_flow_error_set(error, ENOTSUP, 1243 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, 1244 "Egress is not supported"); 1245 return -rte_errno; 1246 } 1247 if (attr->ingress == 0) { 1248 rte_flow_error_set(error, ENOTSUP, 1249 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, 1250 "Ingress is compulsory"); 1251 return -rte_errno; 1252 } 1253 if (attr->transfer == 0) { 1254 if (attr->priority != 0) { 1255 rte_flow_error_set(error, ENOTSUP, 1256 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1257 attr, "Priorities are unsupported"); 1258 return -rte_errno; 1259 } 1260 spec->type = SFC_FLOW_SPEC_FILTER; 1261 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX; 1262 spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; 1263 spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL; 1264 } else { 1265 if (mae->status != SFC_MAE_STATUS_SUPPORTED) { 1266 rte_flow_error_set(error, ENOTSUP, 1267 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 1268 attr, "Transfer is not supported"); 1269 return -rte_errno; 1270 } 1271 if (attr->priority > mae->nb_action_rule_prios_max) { 1272 rte_flow_error_set(error, ENOTSUP, 1273 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1274 attr, "Unsupported priority level"); 1275 return -rte_errno; 1276 } 1277 spec->type = SFC_FLOW_SPEC_MAE; 1278 spec_mae->priority = attr->priority; 1279 spec_mae->match_spec = NULL; 1280 spec_mae->action_set = NULL; 1281 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID; 1282 } 1283 1284 return 0; 1285 } 1286 1287 /* Get item from array sfc_flow_items */ 1288 static const struct sfc_flow_item * 1289 sfc_flow_get_item(const struct sfc_flow_item *items, 1290 unsigned int nb_items, 1291 enum rte_flow_item_type type) 1292 { 1293 unsigned int i; 1294 1295 for (i = 0; i < nb_items; i++) 1296 if (items[i].type == type) 1297 return &items[i]; 1298 1299 return NULL; 1300 } 1301 1302 int 1303 sfc_flow_parse_pattern(const struct sfc_flow_item *flow_items, 1304 unsigned int nb_flow_items, 1305 const struct rte_flow_item pattern[], 1306 struct sfc_flow_parse_ctx *parse_ctx, 1307 struct rte_flow_error *error) 1308 { 1309 int rc; 1310 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER; 1311 boolean_t is_ifrm = B_FALSE; 1312 const struct sfc_flow_item *item; 1313 1314 if (pattern == NULL) { 1315 rte_flow_error_set(error, EINVAL, 1316 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, 1317 "NULL pattern"); 1318 return -rte_errno; 1319 } 1320 1321 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { 1322 item = sfc_flow_get_item(flow_items, nb_flow_items, 1323 pattern->type); 1324 if (item == NULL) { 1325 rte_flow_error_set(error, ENOTSUP, 1326 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1327 "Unsupported pattern item"); 1328 return -rte_errno; 1329 } 1330 1331 /* 1332 * Omitting one or several protocol layers at the beginning 1333 * of pattern is supported 1334 */ 1335 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER && 1336 prev_layer != SFC_FLOW_ITEM_ANY_LAYER && 1337 item->prev_layer != prev_layer) { 1338 rte_flow_error_set(error, ENOTSUP, 1339 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1340 "Unexpected sequence of pattern items"); 1341 return -rte_errno; 1342 } 1343 1344 /* 1345 * Allow only VOID and ETH pattern items in the inner frame. 1346 * Also check that there is only one tunneling protocol. 1347 */ 1348 switch (item->type) { 1349 case RTE_FLOW_ITEM_TYPE_VOID: 1350 case RTE_FLOW_ITEM_TYPE_ETH: 1351 break; 1352 1353 case RTE_FLOW_ITEM_TYPE_VXLAN: 1354 case RTE_FLOW_ITEM_TYPE_GENEVE: 1355 case RTE_FLOW_ITEM_TYPE_NVGRE: 1356 if (is_ifrm) { 1357 rte_flow_error_set(error, EINVAL, 1358 RTE_FLOW_ERROR_TYPE_ITEM, 1359 pattern, 1360 "More than one tunneling protocol"); 1361 return -rte_errno; 1362 } 1363 is_ifrm = B_TRUE; 1364 break; 1365 1366 default: 1367 if (parse_ctx->type == SFC_FLOW_PARSE_CTX_FILTER && 1368 is_ifrm) { 1369 rte_flow_error_set(error, EINVAL, 1370 RTE_FLOW_ERROR_TYPE_ITEM, 1371 pattern, 1372 "There is an unsupported pattern item " 1373 "in the inner frame"); 1374 return -rte_errno; 1375 } 1376 break; 1377 } 1378 1379 if (parse_ctx->type != item->ctx_type) { 1380 rte_flow_error_set(error, EINVAL, 1381 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1382 "Parse context type mismatch"); 1383 return -rte_errno; 1384 } 1385 1386 rc = item->parse(pattern, parse_ctx, error); 1387 if (rc != 0) 1388 return rc; 1389 1390 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER) 1391 prev_layer = item->layer; 1392 } 1393 1394 return 0; 1395 } 1396 1397 static int 1398 sfc_flow_parse_queue(struct sfc_adapter *sa, 1399 const struct rte_flow_action_queue *queue, 1400 struct rte_flow *flow) 1401 { 1402 struct sfc_flow_spec *spec = &flow->spec; 1403 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1404 struct sfc_rxq *rxq; 1405 struct sfc_rxq_info *rxq_info; 1406 1407 if (queue->index >= sfc_sa2shared(sa)->ethdev_rxq_count) 1408 return -EINVAL; 1409 1410 rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, queue->index); 1411 spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index; 1412 1413 rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index]; 1414 spec_filter->rss_hash_required = !!(rxq_info->rxq_flags & 1415 SFC_RXQ_FLAG_RSS_HASH); 1416 1417 return 0; 1418 } 1419 1420 static int 1421 sfc_flow_parse_rss(struct sfc_adapter *sa, 1422 const struct rte_flow_action_rss *action_rss, 1423 struct rte_flow *flow) 1424 { 1425 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 1426 struct sfc_rss *rss = &sas->rss; 1427 sfc_ethdev_qid_t ethdev_qid; 1428 struct sfc_rxq *rxq; 1429 unsigned int rxq_hw_index_min; 1430 unsigned int rxq_hw_index_max; 1431 efx_rx_hash_type_t efx_hash_types; 1432 const uint8_t *rss_key; 1433 struct sfc_flow_spec *spec = &flow->spec; 1434 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1435 struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf; 1436 unsigned int i; 1437 1438 if (action_rss->queue_num == 0) 1439 return -EINVAL; 1440 1441 ethdev_qid = sfc_sa2shared(sa)->ethdev_rxq_count - 1; 1442 rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid); 1443 rxq_hw_index_min = rxq->hw_index; 1444 rxq_hw_index_max = 0; 1445 1446 for (i = 0; i < action_rss->queue_num; ++i) { 1447 ethdev_qid = action_rss->queue[i]; 1448 1449 if ((unsigned int)ethdev_qid >= 1450 sfc_sa2shared(sa)->ethdev_rxq_count) 1451 return -EINVAL; 1452 1453 rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid); 1454 1455 if (rxq->hw_index < rxq_hw_index_min) 1456 rxq_hw_index_min = rxq->hw_index; 1457 1458 if (rxq->hw_index > rxq_hw_index_max) 1459 rxq_hw_index_max = rxq->hw_index; 1460 } 1461 1462 switch (action_rss->func) { 1463 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1464 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1465 break; 1466 default: 1467 return -EINVAL; 1468 } 1469 1470 if (action_rss->level) 1471 return -EINVAL; 1472 1473 /* 1474 * Dummy RSS action with only one queue and no specific settings 1475 * for hash types and key does not require dedicated RSS context 1476 * and may be simplified to single queue action. 1477 */ 1478 if (action_rss->queue_num == 1 && action_rss->types == 0 && 1479 action_rss->key_len == 0) { 1480 spec_filter->template.efs_dmaq_id = rxq_hw_index_min; 1481 return 0; 1482 } 1483 1484 if (action_rss->types) { 1485 int rc; 1486 1487 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types, 1488 &efx_hash_types); 1489 if (rc != 0) 1490 return -rc; 1491 } else { 1492 unsigned int i; 1493 1494 efx_hash_types = 0; 1495 for (i = 0; i < rss->hf_map_nb_entries; ++i) 1496 efx_hash_types |= rss->hf_map[i].efx; 1497 } 1498 1499 if (action_rss->key_len) { 1500 if (action_rss->key_len != sizeof(rss->key)) 1501 return -EINVAL; 1502 1503 rss_key = action_rss->key; 1504 } else { 1505 rss_key = rss->key; 1506 } 1507 1508 spec_filter->rss = B_TRUE; 1509 1510 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min; 1511 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max; 1512 sfc_rss_conf->rss_hash_types = efx_hash_types; 1513 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key)); 1514 1515 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) { 1516 unsigned int nb_queues = action_rss->queue_num; 1517 struct sfc_rxq *rxq; 1518 1519 ethdev_qid = action_rss->queue[i % nb_queues]; 1520 rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid); 1521 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min; 1522 } 1523 1524 return 0; 1525 } 1526 1527 static int 1528 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec, 1529 unsigned int filters_count) 1530 { 1531 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1532 unsigned int i; 1533 int ret = 0; 1534 1535 for (i = 0; i < filters_count; i++) { 1536 int rc; 1537 1538 rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]); 1539 if (ret == 0 && rc != 0) { 1540 sfc_err(sa, "failed to remove filter specification " 1541 "(rc = %d)", rc); 1542 ret = rc; 1543 } 1544 } 1545 1546 return ret; 1547 } 1548 1549 static int 1550 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec) 1551 { 1552 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1553 unsigned int i; 1554 int rc = 0; 1555 1556 for (i = 0; i < spec_filter->count; i++) { 1557 rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]); 1558 if (rc != 0) { 1559 sfc_flow_spec_flush(sa, spec, i); 1560 break; 1561 } 1562 } 1563 1564 return rc; 1565 } 1566 1567 static int 1568 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec) 1569 { 1570 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1571 1572 return sfc_flow_spec_flush(sa, spec, spec_filter->count); 1573 } 1574 1575 static int 1576 sfc_flow_filter_insert(struct sfc_adapter *sa, 1577 struct rte_flow *flow) 1578 { 1579 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 1580 struct sfc_rss *rss = &sas->rss; 1581 struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter; 1582 struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf; 1583 uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; 1584 boolean_t create_context; 1585 unsigned int i; 1586 int rc = 0; 1587 1588 create_context = spec_filter->rss || (spec_filter->rss_hash_required && 1589 rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT); 1590 1591 if (create_context) { 1592 unsigned int rss_spread; 1593 unsigned int rss_hash_types; 1594 uint8_t *rss_key; 1595 1596 if (spec_filter->rss) { 1597 rss_spread = MIN(flow_rss->rxq_hw_index_max - 1598 flow_rss->rxq_hw_index_min + 1, 1599 EFX_MAXRSS); 1600 rss_hash_types = flow_rss->rss_hash_types; 1601 rss_key = flow_rss->rss_key; 1602 } else { 1603 /* 1604 * Initialize dummy RSS context parameters to have 1605 * valid RSS hash. Use default RSS hash function and 1606 * key. 1607 */ 1608 rss_spread = 1; 1609 rss_hash_types = rss->hash_types; 1610 rss_key = rss->key; 1611 } 1612 1613 rc = efx_rx_scale_context_alloc(sa->nic, 1614 EFX_RX_SCALE_EXCLUSIVE, 1615 rss_spread, 1616 &efs_rss_context); 1617 if (rc != 0) 1618 goto fail_scale_context_alloc; 1619 1620 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context, 1621 rss->hash_alg, 1622 rss_hash_types, B_TRUE); 1623 if (rc != 0) 1624 goto fail_scale_mode_set; 1625 1626 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context, 1627 rss_key, sizeof(rss->key)); 1628 if (rc != 0) 1629 goto fail_scale_key_set; 1630 } else { 1631 efs_rss_context = rss->dummy_rss_context; 1632 } 1633 1634 if (spec_filter->rss || spec_filter->rss_hash_required) { 1635 /* 1636 * At this point, fully elaborated filter specifications 1637 * have been produced from the template. To make sure that 1638 * RSS behaviour is consistent between them, set the same 1639 * RSS context value everywhere. 1640 */ 1641 for (i = 0; i < spec_filter->count; i++) { 1642 efx_filter_spec_t *spec = &spec_filter->filters[i]; 1643 1644 spec->efs_rss_context = efs_rss_context; 1645 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS; 1646 if (spec_filter->rss) 1647 spec->efs_dmaq_id = flow_rss->rxq_hw_index_min; 1648 } 1649 } 1650 1651 rc = sfc_flow_spec_insert(sa, &flow->spec); 1652 if (rc != 0) 1653 goto fail_filter_insert; 1654 1655 if (create_context) { 1656 unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0}; 1657 unsigned int *tbl; 1658 1659 tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl; 1660 1661 /* 1662 * Scale table is set after filter insertion because 1663 * the table entries are relative to the base RxQ ID 1664 * and the latter is submitted to the HW by means of 1665 * inserting a filter, so by the time of the request 1666 * the HW knows all the information needed to verify 1667 * the table entries, and the operation will succeed 1668 */ 1669 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context, 1670 tbl, RTE_DIM(flow_rss->rss_tbl)); 1671 if (rc != 0) 1672 goto fail_scale_tbl_set; 1673 1674 /* Remember created dummy RSS context */ 1675 if (!spec_filter->rss) 1676 rss->dummy_rss_context = efs_rss_context; 1677 } 1678 1679 return 0; 1680 1681 fail_scale_tbl_set: 1682 sfc_flow_spec_remove(sa, &flow->spec); 1683 1684 fail_filter_insert: 1685 fail_scale_key_set: 1686 fail_scale_mode_set: 1687 if (create_context) 1688 efx_rx_scale_context_free(sa->nic, efs_rss_context); 1689 1690 fail_scale_context_alloc: 1691 return rc; 1692 } 1693 1694 static int 1695 sfc_flow_filter_remove(struct sfc_adapter *sa, 1696 struct rte_flow *flow) 1697 { 1698 struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter; 1699 int rc = 0; 1700 1701 rc = sfc_flow_spec_remove(sa, &flow->spec); 1702 if (rc != 0) 1703 return rc; 1704 1705 if (spec_filter->rss) { 1706 /* 1707 * All specifications for a given flow rule have the same RSS 1708 * context, so that RSS context value is taken from the first 1709 * filter specification 1710 */ 1711 efx_filter_spec_t *spec = &spec_filter->filters[0]; 1712 1713 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context); 1714 } 1715 1716 return rc; 1717 } 1718 1719 static int 1720 sfc_flow_parse_mark(struct sfc_adapter *sa, 1721 const struct rte_flow_action_mark *mark, 1722 struct rte_flow *flow) 1723 { 1724 struct sfc_flow_spec *spec = &flow->spec; 1725 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1726 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 1727 1728 if (mark == NULL || mark->id > encp->enc_filter_action_mark_max) 1729 return EINVAL; 1730 1731 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK; 1732 spec_filter->template.efs_mark = mark->id; 1733 1734 return 0; 1735 } 1736 1737 static int 1738 sfc_flow_parse_actions(struct sfc_adapter *sa, 1739 const struct rte_flow_action actions[], 1740 struct rte_flow *flow, 1741 struct rte_flow_error *error) 1742 { 1743 int rc; 1744 struct sfc_flow_spec *spec = &flow->spec; 1745 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1746 const unsigned int dp_rx_features = sa->priv.dp_rx->features; 1747 uint32_t actions_set = 0; 1748 const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) | 1749 (1UL << RTE_FLOW_ACTION_TYPE_RSS) | 1750 (1UL << RTE_FLOW_ACTION_TYPE_DROP); 1751 const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) | 1752 (1UL << RTE_FLOW_ACTION_TYPE_FLAG); 1753 1754 if (actions == NULL) { 1755 rte_flow_error_set(error, EINVAL, 1756 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, 1757 "NULL actions"); 1758 return -rte_errno; 1759 } 1760 1761 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1762 switch (actions->type) { 1763 case RTE_FLOW_ACTION_TYPE_VOID: 1764 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID, 1765 actions_set); 1766 break; 1767 1768 case RTE_FLOW_ACTION_TYPE_QUEUE: 1769 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE, 1770 actions_set); 1771 if ((actions_set & fate_actions_mask) != 0) 1772 goto fail_fate_actions; 1773 1774 rc = sfc_flow_parse_queue(sa, actions->conf, flow); 1775 if (rc != 0) { 1776 rte_flow_error_set(error, EINVAL, 1777 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1778 "Bad QUEUE action"); 1779 return -rte_errno; 1780 } 1781 break; 1782 1783 case RTE_FLOW_ACTION_TYPE_RSS: 1784 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS, 1785 actions_set); 1786 if ((actions_set & fate_actions_mask) != 0) 1787 goto fail_fate_actions; 1788 1789 rc = sfc_flow_parse_rss(sa, actions->conf, flow); 1790 if (rc != 0) { 1791 rte_flow_error_set(error, -rc, 1792 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1793 "Bad RSS action"); 1794 return -rte_errno; 1795 } 1796 break; 1797 1798 case RTE_FLOW_ACTION_TYPE_DROP: 1799 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP, 1800 actions_set); 1801 if ((actions_set & fate_actions_mask) != 0) 1802 goto fail_fate_actions; 1803 1804 spec_filter->template.efs_dmaq_id = 1805 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; 1806 break; 1807 1808 case RTE_FLOW_ACTION_TYPE_FLAG: 1809 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG, 1810 actions_set); 1811 if ((actions_set & mark_actions_mask) != 0) 1812 goto fail_actions_overlap; 1813 1814 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) { 1815 rte_flow_error_set(error, ENOTSUP, 1816 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1817 "FLAG action is not supported on the current Rx datapath"); 1818 return -rte_errno; 1819 } 1820 1821 spec_filter->template.efs_flags |= 1822 EFX_FILTER_FLAG_ACTION_FLAG; 1823 break; 1824 1825 case RTE_FLOW_ACTION_TYPE_MARK: 1826 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK, 1827 actions_set); 1828 if ((actions_set & mark_actions_mask) != 0) 1829 goto fail_actions_overlap; 1830 1831 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) { 1832 rte_flow_error_set(error, ENOTSUP, 1833 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1834 "MARK action is not supported on the current Rx datapath"); 1835 return -rte_errno; 1836 } 1837 1838 rc = sfc_flow_parse_mark(sa, actions->conf, flow); 1839 if (rc != 0) { 1840 rte_flow_error_set(error, rc, 1841 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1842 "Bad MARK action"); 1843 return -rte_errno; 1844 } 1845 break; 1846 1847 default: 1848 rte_flow_error_set(error, ENOTSUP, 1849 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1850 "Action is not supported"); 1851 return -rte_errno; 1852 } 1853 1854 actions_set |= (1UL << actions->type); 1855 } 1856 1857 /* When fate is unknown, drop traffic. */ 1858 if ((actions_set & fate_actions_mask) == 0) { 1859 spec_filter->template.efs_dmaq_id = 1860 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; 1861 } 1862 1863 return 0; 1864 1865 fail_fate_actions: 1866 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, 1867 "Cannot combine several fate-deciding actions, " 1868 "choose between QUEUE, RSS or DROP"); 1869 return -rte_errno; 1870 1871 fail_actions_overlap: 1872 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, 1873 "Overlapping actions are not supported"); 1874 return -rte_errno; 1875 } 1876 1877 /** 1878 * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST 1879 * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same 1880 * specifications after copying. 1881 * 1882 * @param spec[in, out] 1883 * SFC flow specification to update. 1884 * @param filters_count_for_one_val[in] 1885 * How many specifications should have the same match flag, what is the 1886 * number of specifications before copying. 1887 * @param error[out] 1888 * Perform verbose error reporting if not NULL. 1889 */ 1890 static int 1891 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec, 1892 unsigned int filters_count_for_one_val, 1893 struct rte_flow_error *error) 1894 { 1895 unsigned int i; 1896 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1897 static const efx_filter_match_flags_t vals[] = { 1898 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, 1899 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST 1900 }; 1901 1902 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) { 1903 rte_flow_error_set(error, EINVAL, 1904 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1905 "Number of specifications is incorrect while copying " 1906 "by unknown destination flags"); 1907 return -rte_errno; 1908 } 1909 1910 for (i = 0; i < spec_filter->count; i++) { 1911 /* The check above ensures that divisor can't be zero here */ 1912 spec_filter->filters[i].efs_match_flags |= 1913 vals[i / filters_count_for_one_val]; 1914 } 1915 1916 return 0; 1917 } 1918 1919 /** 1920 * Check that the following conditions are met: 1921 * - the list of supported filters has a filter 1922 * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of 1923 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also 1924 * be inserted. 1925 * 1926 * @param match[in] 1927 * The match flags of filter. 1928 * @param spec[in] 1929 * Specification to be supplemented. 1930 * @param filter[in] 1931 * SFC filter with list of supported filters. 1932 */ 1933 static boolean_t 1934 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match, 1935 __rte_unused efx_filter_spec_t *spec, 1936 struct sfc_filter *filter) 1937 { 1938 unsigned int i; 1939 efx_filter_match_flags_t match_mcast_dst; 1940 1941 match_mcast_dst = 1942 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) | 1943 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; 1944 for (i = 0; i < filter->supported_match_num; i++) { 1945 if (match_mcast_dst == filter->supported_match[i]) 1946 return B_TRUE; 1947 } 1948 1949 return B_FALSE; 1950 } 1951 1952 /** 1953 * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and 1954 * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same 1955 * specifications after copying. 1956 * 1957 * @param spec[in, out] 1958 * SFC flow specification to update. 1959 * @param filters_count_for_one_val[in] 1960 * How many specifications should have the same EtherType value, what is the 1961 * number of specifications before copying. 1962 * @param error[out] 1963 * Perform verbose error reporting if not NULL. 1964 */ 1965 static int 1966 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec, 1967 unsigned int filters_count_for_one_val, 1968 struct rte_flow_error *error) 1969 { 1970 unsigned int i; 1971 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1972 static const uint16_t vals[] = { 1973 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6 1974 }; 1975 1976 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) { 1977 rte_flow_error_set(error, EINVAL, 1978 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1979 "Number of specifications is incorrect " 1980 "while copying by Ethertype"); 1981 return -rte_errno; 1982 } 1983 1984 for (i = 0; i < spec_filter->count; i++) { 1985 spec_filter->filters[i].efs_match_flags |= 1986 EFX_FILTER_MATCH_ETHER_TYPE; 1987 1988 /* 1989 * The check above ensures that 1990 * filters_count_for_one_val is not 0 1991 */ 1992 spec_filter->filters[i].efs_ether_type = 1993 vals[i / filters_count_for_one_val]; 1994 } 1995 1996 return 0; 1997 } 1998 1999 /** 2000 * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0 2001 * in the same specifications after copying. 2002 * 2003 * @param spec[in, out] 2004 * SFC flow specification to update. 2005 * @param filters_count_for_one_val[in] 2006 * How many specifications should have the same match flag, what is the 2007 * number of specifications before copying. 2008 * @param error[out] 2009 * Perform verbose error reporting if not NULL. 2010 */ 2011 static int 2012 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec, 2013 unsigned int filters_count_for_one_val, 2014 struct rte_flow_error *error) 2015 { 2016 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2017 unsigned int i; 2018 2019 if (filters_count_for_one_val != spec_filter->count) { 2020 rte_flow_error_set(error, EINVAL, 2021 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2022 "Number of specifications is incorrect " 2023 "while copying by outer VLAN ID"); 2024 return -rte_errno; 2025 } 2026 2027 for (i = 0; i < spec_filter->count; i++) { 2028 spec_filter->filters[i].efs_match_flags |= 2029 EFX_FILTER_MATCH_OUTER_VID; 2030 2031 spec_filter->filters[i].efs_outer_vid = 0; 2032 } 2033 2034 return 0; 2035 } 2036 2037 /** 2038 * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and 2039 * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same 2040 * specifications after copying. 2041 * 2042 * @param spec[in, out] 2043 * SFC flow specification to update. 2044 * @param filters_count_for_one_val[in] 2045 * How many specifications should have the same match flag, what is the 2046 * number of specifications before copying. 2047 * @param error[out] 2048 * Perform verbose error reporting if not NULL. 2049 */ 2050 static int 2051 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec, 2052 unsigned int filters_count_for_one_val, 2053 struct rte_flow_error *error) 2054 { 2055 unsigned int i; 2056 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2057 static const efx_filter_match_flags_t vals[] = { 2058 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, 2059 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST 2060 }; 2061 2062 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) { 2063 rte_flow_error_set(error, EINVAL, 2064 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2065 "Number of specifications is incorrect while copying " 2066 "by inner frame unknown destination flags"); 2067 return -rte_errno; 2068 } 2069 2070 for (i = 0; i < spec_filter->count; i++) { 2071 /* The check above ensures that divisor can't be zero here */ 2072 spec_filter->filters[i].efs_match_flags |= 2073 vals[i / filters_count_for_one_val]; 2074 } 2075 2076 return 0; 2077 } 2078 2079 /** 2080 * Check that the following conditions are met: 2081 * - the specification corresponds to a filter for encapsulated traffic 2082 * - the list of supported filters has a filter 2083 * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of 2084 * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also 2085 * be inserted. 2086 * 2087 * @param match[in] 2088 * The match flags of filter. 2089 * @param spec[in] 2090 * Specification to be supplemented. 2091 * @param filter[in] 2092 * SFC filter with list of supported filters. 2093 */ 2094 static boolean_t 2095 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match, 2096 efx_filter_spec_t *spec, 2097 struct sfc_filter *filter) 2098 { 2099 unsigned int i; 2100 efx_tunnel_protocol_t encap_type = spec->efs_encap_type; 2101 efx_filter_match_flags_t match_mcast_dst; 2102 2103 if (encap_type == EFX_TUNNEL_PROTOCOL_NONE) 2104 return B_FALSE; 2105 2106 match_mcast_dst = 2107 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) | 2108 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST; 2109 for (i = 0; i < filter->supported_match_num; i++) { 2110 if (match_mcast_dst == filter->supported_match[i]) 2111 return B_TRUE; 2112 } 2113 2114 return B_FALSE; 2115 } 2116 2117 /** 2118 * Check that the list of supported filters has a filter that differs 2119 * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID 2120 * in this case that filter will be used and the flag 2121 * EFX_FILTER_MATCH_OUTER_VID is not needed. 2122 * 2123 * @param match[in] 2124 * The match flags of filter. 2125 * @param spec[in] 2126 * Specification to be supplemented. 2127 * @param filter[in] 2128 * SFC filter with list of supported filters. 2129 */ 2130 static boolean_t 2131 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match, 2132 __rte_unused efx_filter_spec_t *spec, 2133 struct sfc_filter *filter) 2134 { 2135 unsigned int i; 2136 efx_filter_match_flags_t match_without_vid = 2137 match & ~EFX_FILTER_MATCH_OUTER_VID; 2138 2139 for (i = 0; i < filter->supported_match_num; i++) { 2140 if (match_without_vid == filter->supported_match[i]) 2141 return B_FALSE; 2142 } 2143 2144 return B_TRUE; 2145 } 2146 2147 /* 2148 * Match flags that can be automatically added to filters. 2149 * Selecting the last minimum when searching for the copy flag ensures that the 2150 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than 2151 * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter 2152 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported 2153 * filters. 2154 */ 2155 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = { 2156 { 2157 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, 2158 .vals_count = 2, 2159 .set_vals = sfc_flow_set_unknown_dst_flags, 2160 .spec_check = sfc_flow_check_unknown_dst_flags, 2161 }, 2162 { 2163 .flag = EFX_FILTER_MATCH_ETHER_TYPE, 2164 .vals_count = 2, 2165 .set_vals = sfc_flow_set_ethertypes, 2166 .spec_check = NULL, 2167 }, 2168 { 2169 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, 2170 .vals_count = 2, 2171 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags, 2172 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags, 2173 }, 2174 { 2175 .flag = EFX_FILTER_MATCH_OUTER_VID, 2176 .vals_count = 1, 2177 .set_vals = sfc_flow_set_outer_vid_flag, 2178 .spec_check = sfc_flow_check_outer_vid_flag, 2179 }, 2180 }; 2181 2182 /* Get item from array sfc_flow_copy_flags */ 2183 static const struct sfc_flow_copy_flag * 2184 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag) 2185 { 2186 unsigned int i; 2187 2188 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 2189 if (sfc_flow_copy_flags[i].flag == flag) 2190 return &sfc_flow_copy_flags[i]; 2191 } 2192 2193 return NULL; 2194 } 2195 2196 /** 2197 * Make copies of the specifications, set match flag and values 2198 * of the field that corresponds to it. 2199 * 2200 * @param spec[in, out] 2201 * SFC flow specification to update. 2202 * @param flag[in] 2203 * The match flag to add. 2204 * @param error[out] 2205 * Perform verbose error reporting if not NULL. 2206 */ 2207 static int 2208 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec, 2209 efx_filter_match_flags_t flag, 2210 struct rte_flow_error *error) 2211 { 2212 unsigned int i; 2213 unsigned int new_filters_count; 2214 unsigned int filters_count_for_one_val; 2215 const struct sfc_flow_copy_flag *copy_flag; 2216 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2217 int rc; 2218 2219 copy_flag = sfc_flow_get_copy_flag(flag); 2220 if (copy_flag == NULL) { 2221 rte_flow_error_set(error, ENOTSUP, 2222 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2223 "Unsupported spec field for copying"); 2224 return -rte_errno; 2225 } 2226 2227 new_filters_count = spec_filter->count * copy_flag->vals_count; 2228 if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) { 2229 rte_flow_error_set(error, EINVAL, 2230 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2231 "Too much EFX specifications in the flow rule"); 2232 return -rte_errno; 2233 } 2234 2235 /* Copy filters specifications */ 2236 for (i = spec_filter->count; i < new_filters_count; i++) { 2237 spec_filter->filters[i] = 2238 spec_filter->filters[i - spec_filter->count]; 2239 } 2240 2241 filters_count_for_one_val = spec_filter->count; 2242 spec_filter->count = new_filters_count; 2243 2244 rc = copy_flag->set_vals(spec, filters_count_for_one_val, error); 2245 if (rc != 0) 2246 return rc; 2247 2248 return 0; 2249 } 2250 2251 /** 2252 * Check that the given set of match flags missing in the original filter spec 2253 * could be covered by adding spec copies which specify the corresponding 2254 * flags and packet field values to match. 2255 * 2256 * @param miss_flags[in] 2257 * Flags that are missing until the supported filter. 2258 * @param spec[in] 2259 * Specification to be supplemented. 2260 * @param filter[in] 2261 * SFC filter. 2262 * 2263 * @return 2264 * Number of specifications after copy or 0, if the flags can not be added. 2265 */ 2266 static unsigned int 2267 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags, 2268 efx_filter_spec_t *spec, 2269 struct sfc_filter *filter) 2270 { 2271 unsigned int i; 2272 efx_filter_match_flags_t copy_flags = 0; 2273 efx_filter_match_flags_t flag; 2274 efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags; 2275 sfc_flow_spec_check *check; 2276 unsigned int multiplier = 1; 2277 2278 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 2279 flag = sfc_flow_copy_flags[i].flag; 2280 check = sfc_flow_copy_flags[i].spec_check; 2281 if ((flag & miss_flags) == flag) { 2282 if (check != NULL && (!check(match, spec, filter))) 2283 continue; 2284 2285 copy_flags |= flag; 2286 multiplier *= sfc_flow_copy_flags[i].vals_count; 2287 } 2288 } 2289 2290 if (copy_flags == miss_flags) 2291 return multiplier; 2292 2293 return 0; 2294 } 2295 2296 /** 2297 * Attempt to supplement the specification template to the minimally 2298 * supported set of match flags. To do this, it is necessary to copy 2299 * the specifications, filling them with the values of fields that 2300 * correspond to the missing flags. 2301 * The necessary and sufficient filter is built from the fewest number 2302 * of copies which could be made to cover the minimally required set 2303 * of flags. 2304 * 2305 * @param sa[in] 2306 * SFC adapter. 2307 * @param spec[in, out] 2308 * SFC flow specification to update. 2309 * @param error[out] 2310 * Perform verbose error reporting if not NULL. 2311 */ 2312 static int 2313 sfc_flow_spec_filters_complete(struct sfc_adapter *sa, 2314 struct sfc_flow_spec *spec, 2315 struct rte_flow_error *error) 2316 { 2317 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2318 struct sfc_filter *filter = &sa->filter; 2319 efx_filter_match_flags_t miss_flags; 2320 efx_filter_match_flags_t min_miss_flags = 0; 2321 efx_filter_match_flags_t match; 2322 unsigned int min_multiplier = UINT_MAX; 2323 unsigned int multiplier; 2324 unsigned int i; 2325 int rc; 2326 2327 match = spec_filter->template.efs_match_flags; 2328 for (i = 0; i < filter->supported_match_num; i++) { 2329 if ((match & filter->supported_match[i]) == match) { 2330 miss_flags = filter->supported_match[i] & (~match); 2331 multiplier = sfc_flow_check_missing_flags(miss_flags, 2332 &spec_filter->template, filter); 2333 if (multiplier > 0) { 2334 if (multiplier <= min_multiplier) { 2335 min_multiplier = multiplier; 2336 min_miss_flags = miss_flags; 2337 } 2338 } 2339 } 2340 } 2341 2342 if (min_multiplier == UINT_MAX) { 2343 rte_flow_error_set(error, ENOTSUP, 2344 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2345 "The flow rule pattern is unsupported"); 2346 return -rte_errno; 2347 } 2348 2349 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 2350 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag; 2351 2352 if ((flag & min_miss_flags) == flag) { 2353 rc = sfc_flow_spec_add_match_flag(spec, flag, error); 2354 if (rc != 0) 2355 return rc; 2356 } 2357 } 2358 2359 return 0; 2360 } 2361 2362 /** 2363 * Check that set of match flags is referred to by a filter. Filter is 2364 * described by match flags with the ability to add OUTER_VID and INNER_VID 2365 * flags. 2366 * 2367 * @param match_flags[in] 2368 * Set of match flags. 2369 * @param flags_pattern[in] 2370 * Pattern of filter match flags. 2371 */ 2372 static boolean_t 2373 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags, 2374 efx_filter_match_flags_t flags_pattern) 2375 { 2376 if ((match_flags & flags_pattern) != flags_pattern) 2377 return B_FALSE; 2378 2379 switch (match_flags & ~flags_pattern) { 2380 case 0: 2381 case EFX_FILTER_MATCH_OUTER_VID: 2382 case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID: 2383 return B_TRUE; 2384 default: 2385 return B_FALSE; 2386 } 2387 } 2388 2389 /** 2390 * Check whether the spec maps to a hardware filter which is known to be 2391 * ineffective despite being valid. 2392 * 2393 * @param filter[in] 2394 * SFC filter with list of supported filters. 2395 * @param spec[in] 2396 * SFC flow specification. 2397 */ 2398 static boolean_t 2399 sfc_flow_is_match_flags_exception(struct sfc_filter *filter, 2400 struct sfc_flow_spec *spec) 2401 { 2402 unsigned int i; 2403 uint16_t ether_type; 2404 uint8_t ip_proto; 2405 efx_filter_match_flags_t match_flags; 2406 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2407 2408 for (i = 0; i < spec_filter->count; i++) { 2409 match_flags = spec_filter->filters[i].efs_match_flags; 2410 2411 if (sfc_flow_is_match_with_vids(match_flags, 2412 EFX_FILTER_MATCH_ETHER_TYPE) || 2413 sfc_flow_is_match_with_vids(match_flags, 2414 EFX_FILTER_MATCH_ETHER_TYPE | 2415 EFX_FILTER_MATCH_LOC_MAC)) { 2416 ether_type = spec_filter->filters[i].efs_ether_type; 2417 if (filter->supports_ip_proto_or_addr_filter && 2418 (ether_type == EFX_ETHER_TYPE_IPV4 || 2419 ether_type == EFX_ETHER_TYPE_IPV6)) 2420 return B_TRUE; 2421 } else if (sfc_flow_is_match_with_vids(match_flags, 2422 EFX_FILTER_MATCH_ETHER_TYPE | 2423 EFX_FILTER_MATCH_IP_PROTO) || 2424 sfc_flow_is_match_with_vids(match_flags, 2425 EFX_FILTER_MATCH_ETHER_TYPE | 2426 EFX_FILTER_MATCH_IP_PROTO | 2427 EFX_FILTER_MATCH_LOC_MAC)) { 2428 ip_proto = spec_filter->filters[i].efs_ip_proto; 2429 if (filter->supports_rem_or_local_port_filter && 2430 (ip_proto == EFX_IPPROTO_TCP || 2431 ip_proto == EFX_IPPROTO_UDP)) 2432 return B_TRUE; 2433 } 2434 } 2435 2436 return B_FALSE; 2437 } 2438 2439 static int 2440 sfc_flow_validate_match_flags(struct sfc_adapter *sa, 2441 struct rte_flow *flow, 2442 struct rte_flow_error *error) 2443 { 2444 struct sfc_flow_spec *spec = &flow->spec; 2445 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2446 efx_filter_spec_t *spec_tmpl = &spec_filter->template; 2447 efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags; 2448 int rc; 2449 2450 /* Initialize the first filter spec with template */ 2451 spec_filter->filters[0] = *spec_tmpl; 2452 spec_filter->count = 1; 2453 2454 if (!sfc_filter_is_match_supported(sa, match_flags)) { 2455 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error); 2456 if (rc != 0) 2457 return rc; 2458 } 2459 2460 if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) { 2461 rte_flow_error_set(error, ENOTSUP, 2462 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2463 "The flow rule pattern is unsupported"); 2464 return -rte_errno; 2465 } 2466 2467 return 0; 2468 } 2469 2470 static int 2471 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev, 2472 const struct rte_flow_item pattern[], 2473 const struct rte_flow_action actions[], 2474 struct rte_flow *flow, 2475 struct rte_flow_error *error) 2476 { 2477 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2478 struct sfc_flow_spec *spec = &flow->spec; 2479 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2480 struct sfc_flow_parse_ctx ctx; 2481 int rc; 2482 2483 ctx.type = SFC_FLOW_PARSE_CTX_FILTER; 2484 ctx.filter = &spec_filter->template; 2485 2486 rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items), 2487 pattern, &ctx, error); 2488 if (rc != 0) 2489 goto fail_bad_value; 2490 2491 rc = sfc_flow_parse_actions(sa, actions, flow, error); 2492 if (rc != 0) 2493 goto fail_bad_value; 2494 2495 rc = sfc_flow_validate_match_flags(sa, flow, error); 2496 if (rc != 0) 2497 goto fail_bad_value; 2498 2499 return 0; 2500 2501 fail_bad_value: 2502 return rc; 2503 } 2504 2505 static int 2506 sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev, 2507 const struct rte_flow_item pattern[], 2508 const struct rte_flow_action actions[], 2509 struct rte_flow *flow, 2510 struct rte_flow_error *error) 2511 { 2512 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2513 struct sfc_flow_spec *spec = &flow->spec; 2514 struct sfc_flow_spec_mae *spec_mae = &spec->mae; 2515 int rc; 2516 2517 rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error); 2518 if (rc != 0) 2519 return rc; 2520 2521 rc = sfc_mae_rule_parse_actions(sa, actions, spec_mae, error); 2522 if (rc != 0) 2523 return rc; 2524 2525 return 0; 2526 } 2527 2528 static int 2529 sfc_flow_parse(struct rte_eth_dev *dev, 2530 const struct rte_flow_attr *attr, 2531 const struct rte_flow_item pattern[], 2532 const struct rte_flow_action actions[], 2533 struct rte_flow *flow, 2534 struct rte_flow_error *error) 2535 { 2536 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2537 const struct sfc_flow_ops_by_spec *ops; 2538 int rc; 2539 2540 rc = sfc_flow_parse_attr(sa, attr, flow, error); 2541 if (rc != 0) 2542 return rc; 2543 2544 ops = sfc_flow_get_ops_by_spec(flow); 2545 if (ops == NULL || ops->parse == NULL) { 2546 rte_flow_error_set(error, ENOTSUP, 2547 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2548 "No backend to handle this flow"); 2549 return -rte_errno; 2550 } 2551 2552 return ops->parse(dev, pattern, actions, flow, error); 2553 } 2554 2555 static struct rte_flow * 2556 sfc_flow_zmalloc(struct rte_flow_error *error) 2557 { 2558 struct rte_flow *flow; 2559 2560 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0); 2561 if (flow == NULL) { 2562 rte_flow_error_set(error, ENOMEM, 2563 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2564 "Failed to allocate memory"); 2565 } 2566 2567 return flow; 2568 } 2569 2570 static void 2571 sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow) 2572 { 2573 const struct sfc_flow_ops_by_spec *ops; 2574 2575 ops = sfc_flow_get_ops_by_spec(flow); 2576 if (ops != NULL && ops->cleanup != NULL) 2577 ops->cleanup(sa, flow); 2578 2579 rte_free(flow); 2580 } 2581 2582 static int 2583 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow, 2584 struct rte_flow_error *error) 2585 { 2586 const struct sfc_flow_ops_by_spec *ops; 2587 int rc; 2588 2589 ops = sfc_flow_get_ops_by_spec(flow); 2590 if (ops == NULL || ops->insert == NULL) { 2591 rte_flow_error_set(error, ENOTSUP, 2592 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2593 "No backend to handle this flow"); 2594 return rte_errno; 2595 } 2596 2597 rc = ops->insert(sa, flow); 2598 if (rc != 0) { 2599 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2600 NULL, "Failed to insert the flow rule"); 2601 } 2602 2603 return rc; 2604 } 2605 2606 static int 2607 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow, 2608 struct rte_flow_error *error) 2609 { 2610 const struct sfc_flow_ops_by_spec *ops; 2611 int rc; 2612 2613 ops = sfc_flow_get_ops_by_spec(flow); 2614 if (ops == NULL || ops->remove == NULL) { 2615 rte_flow_error_set(error, ENOTSUP, 2616 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2617 "No backend to handle this flow"); 2618 return rte_errno; 2619 } 2620 2621 rc = ops->remove(sa, flow); 2622 if (rc != 0) { 2623 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2624 NULL, "Failed to remove the flow rule"); 2625 } 2626 2627 return rc; 2628 } 2629 2630 static int 2631 sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow, 2632 struct rte_flow_error *error) 2633 { 2634 const struct sfc_flow_ops_by_spec *ops; 2635 int rc = 0; 2636 2637 ops = sfc_flow_get_ops_by_spec(flow); 2638 if (ops == NULL) { 2639 rte_flow_error_set(error, ENOTSUP, 2640 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2641 "No backend to handle this flow"); 2642 return -rte_errno; 2643 } 2644 2645 if (ops->verify != NULL) { 2646 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2647 rc = ops->verify(sa, flow); 2648 } 2649 2650 if (rc != 0) { 2651 rte_flow_error_set(error, rc, 2652 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2653 "Failed to verify flow validity with FW"); 2654 return -rte_errno; 2655 } 2656 2657 return 0; 2658 } 2659 2660 static int 2661 sfc_flow_validate(struct rte_eth_dev *dev, 2662 const struct rte_flow_attr *attr, 2663 const struct rte_flow_item pattern[], 2664 const struct rte_flow_action actions[], 2665 struct rte_flow_error *error) 2666 { 2667 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2668 struct rte_flow *flow; 2669 int rc; 2670 2671 flow = sfc_flow_zmalloc(error); 2672 if (flow == NULL) 2673 return -rte_errno; 2674 2675 sfc_adapter_lock(sa); 2676 2677 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error); 2678 if (rc == 0) 2679 rc = sfc_flow_verify(sa, flow, error); 2680 2681 sfc_flow_free(sa, flow); 2682 2683 sfc_adapter_unlock(sa); 2684 2685 return rc; 2686 } 2687 2688 static struct rte_flow * 2689 sfc_flow_create(struct rte_eth_dev *dev, 2690 const struct rte_flow_attr *attr, 2691 const struct rte_flow_item pattern[], 2692 const struct rte_flow_action actions[], 2693 struct rte_flow_error *error) 2694 { 2695 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2696 struct rte_flow *flow = NULL; 2697 int rc; 2698 2699 flow = sfc_flow_zmalloc(error); 2700 if (flow == NULL) 2701 goto fail_no_mem; 2702 2703 sfc_adapter_lock(sa); 2704 2705 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error); 2706 if (rc != 0) 2707 goto fail_bad_value; 2708 2709 TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries); 2710 2711 if (sa->state == SFC_ADAPTER_STARTED) { 2712 rc = sfc_flow_insert(sa, flow, error); 2713 if (rc != 0) 2714 goto fail_flow_insert; 2715 } 2716 2717 sfc_adapter_unlock(sa); 2718 2719 return flow; 2720 2721 fail_flow_insert: 2722 TAILQ_REMOVE(&sa->flow_list, flow, entries); 2723 2724 fail_bad_value: 2725 sfc_flow_free(sa, flow); 2726 sfc_adapter_unlock(sa); 2727 2728 fail_no_mem: 2729 return NULL; 2730 } 2731 2732 static int 2733 sfc_flow_destroy(struct rte_eth_dev *dev, 2734 struct rte_flow *flow, 2735 struct rte_flow_error *error) 2736 { 2737 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2738 struct rte_flow *flow_ptr; 2739 int rc = EINVAL; 2740 2741 sfc_adapter_lock(sa); 2742 2743 TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) { 2744 if (flow_ptr == flow) 2745 rc = 0; 2746 } 2747 if (rc != 0) { 2748 rte_flow_error_set(error, rc, 2749 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2750 "Failed to find flow rule to destroy"); 2751 goto fail_bad_value; 2752 } 2753 2754 if (sa->state == SFC_ADAPTER_STARTED) 2755 rc = sfc_flow_remove(sa, flow, error); 2756 2757 TAILQ_REMOVE(&sa->flow_list, flow, entries); 2758 sfc_flow_free(sa, flow); 2759 2760 fail_bad_value: 2761 sfc_adapter_unlock(sa); 2762 2763 return -rc; 2764 } 2765 2766 static int 2767 sfc_flow_flush(struct rte_eth_dev *dev, 2768 struct rte_flow_error *error) 2769 { 2770 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2771 struct rte_flow *flow; 2772 int ret = 0; 2773 2774 sfc_adapter_lock(sa); 2775 2776 while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) { 2777 if (sa->state == SFC_ADAPTER_STARTED) { 2778 int rc; 2779 2780 rc = sfc_flow_remove(sa, flow, error); 2781 if (rc != 0) 2782 ret = rc; 2783 } 2784 2785 TAILQ_REMOVE(&sa->flow_list, flow, entries); 2786 sfc_flow_free(sa, flow); 2787 } 2788 2789 sfc_adapter_unlock(sa); 2790 2791 return -ret; 2792 } 2793 2794 static int 2795 sfc_flow_query(struct rte_eth_dev *dev, 2796 struct rte_flow *flow, 2797 const struct rte_flow_action *action, 2798 void *data, 2799 struct rte_flow_error *error) 2800 { 2801 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2802 const struct sfc_flow_ops_by_spec *ops; 2803 int ret; 2804 2805 sfc_adapter_lock(sa); 2806 2807 ops = sfc_flow_get_ops_by_spec(flow); 2808 if (ops == NULL || ops->query == NULL) { 2809 ret = rte_flow_error_set(error, ENOTSUP, 2810 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2811 "No backend to handle this flow"); 2812 goto fail_no_backend; 2813 } 2814 2815 if (sa->state != SFC_ADAPTER_STARTED) { 2816 ret = rte_flow_error_set(error, EINVAL, 2817 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2818 "Can't query the flow: the adapter is not started"); 2819 goto fail_not_started; 2820 } 2821 2822 ret = ops->query(dev, flow, action, data, error); 2823 if (ret != 0) 2824 goto fail_query; 2825 2826 sfc_adapter_unlock(sa); 2827 2828 return 0; 2829 2830 fail_query: 2831 fail_not_started: 2832 fail_no_backend: 2833 sfc_adapter_unlock(sa); 2834 return ret; 2835 } 2836 2837 static int 2838 sfc_flow_isolate(struct rte_eth_dev *dev, int enable, 2839 struct rte_flow_error *error) 2840 { 2841 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2842 int ret = 0; 2843 2844 sfc_adapter_lock(sa); 2845 if (sa->state != SFC_ADAPTER_INITIALIZED) { 2846 rte_flow_error_set(error, EBUSY, 2847 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2848 NULL, "please close the port first"); 2849 ret = -rte_errno; 2850 } else { 2851 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE; 2852 } 2853 sfc_adapter_unlock(sa); 2854 2855 return ret; 2856 } 2857 2858 const struct rte_flow_ops sfc_flow_ops = { 2859 .validate = sfc_flow_validate, 2860 .create = sfc_flow_create, 2861 .destroy = sfc_flow_destroy, 2862 .flush = sfc_flow_flush, 2863 .query = sfc_flow_query, 2864 .isolate = sfc_flow_isolate, 2865 }; 2866 2867 void 2868 sfc_flow_init(struct sfc_adapter *sa) 2869 { 2870 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2871 2872 TAILQ_INIT(&sa->flow_list); 2873 } 2874 2875 void 2876 sfc_flow_fini(struct sfc_adapter *sa) 2877 { 2878 struct rte_flow *flow; 2879 2880 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2881 2882 while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) { 2883 TAILQ_REMOVE(&sa->flow_list, flow, entries); 2884 sfc_flow_free(sa, flow); 2885 } 2886 } 2887 2888 void 2889 sfc_flow_stop(struct sfc_adapter *sa) 2890 { 2891 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 2892 struct sfc_rss *rss = &sas->rss; 2893 struct rte_flow *flow; 2894 2895 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2896 2897 TAILQ_FOREACH(flow, &sa->flow_list, entries) 2898 sfc_flow_remove(sa, flow, NULL); 2899 2900 if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) { 2901 efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context); 2902 rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT; 2903 } 2904 2905 /* 2906 * MAE counter service is not stopped on flow rule remove to avoid 2907 * extra work. Make sure that it is stopped here. 2908 */ 2909 sfc_mae_counter_stop(sa); 2910 } 2911 2912 int 2913 sfc_flow_start(struct sfc_adapter *sa) 2914 { 2915 struct rte_flow *flow; 2916 int rc = 0; 2917 2918 sfc_log_init(sa, "entry"); 2919 2920 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2921 2922 TAILQ_FOREACH(flow, &sa->flow_list, entries) { 2923 rc = sfc_flow_insert(sa, flow, NULL); 2924 if (rc != 0) 2925 goto fail_bad_flow; 2926 } 2927 2928 sfc_log_init(sa, "done"); 2929 2930 fail_bad_flow: 2931 return rc; 2932 } 2933