1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2017-2018 Solarflare Communications Inc. 4 * All rights reserved. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #include <rte_byteorder.h> 11 #include <rte_tailq.h> 12 #include <rte_common.h> 13 #include <rte_ethdev_driver.h> 14 #include <rte_eth_ctrl.h> 15 #include <rte_ether.h> 16 #include <rte_flow.h> 17 #include <rte_flow_driver.h> 18 19 #include "efx.h" 20 21 #include "sfc.h" 22 #include "sfc_rx.h" 23 #include "sfc_filter.h" 24 #include "sfc_flow.h" 25 #include "sfc_log.h" 26 #include "sfc_dp_rx.h" 27 28 /* 29 * At now flow API is implemented in such a manner that each 30 * flow rule is converted to one or more hardware filters. 31 * All elements of flow rule (attributes, pattern items, actions) 32 * correspond to one or more fields in the efx_filter_spec_s structure 33 * that is responsible for the hardware filter. 34 * If some required field is unset in the flow rule, then a handful 35 * of filter copies will be created to cover all possible values 36 * of such a field. 37 */ 38 39 enum sfc_flow_item_layers { 40 SFC_FLOW_ITEM_ANY_LAYER, 41 SFC_FLOW_ITEM_START_LAYER, 42 SFC_FLOW_ITEM_L2, 43 SFC_FLOW_ITEM_L3, 44 SFC_FLOW_ITEM_L4, 45 }; 46 47 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item, 48 efx_filter_spec_t *spec, 49 struct rte_flow_error *error); 50 51 struct sfc_flow_item { 52 enum rte_flow_item_type type; /* Type of item */ 53 enum sfc_flow_item_layers layer; /* Layer of item */ 54 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */ 55 sfc_flow_item_parse *parse; /* Parsing function */ 56 }; 57 58 static sfc_flow_item_parse sfc_flow_parse_void; 59 static sfc_flow_item_parse sfc_flow_parse_eth; 60 static sfc_flow_item_parse sfc_flow_parse_vlan; 61 static sfc_flow_item_parse sfc_flow_parse_ipv4; 62 static sfc_flow_item_parse sfc_flow_parse_ipv6; 63 static sfc_flow_item_parse sfc_flow_parse_tcp; 64 static sfc_flow_item_parse sfc_flow_parse_udp; 65 static sfc_flow_item_parse sfc_flow_parse_vxlan; 66 static sfc_flow_item_parse sfc_flow_parse_geneve; 67 static sfc_flow_item_parse sfc_flow_parse_nvgre; 68 69 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec, 70 unsigned int filters_count_for_one_val, 71 struct rte_flow_error *error); 72 73 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match, 74 efx_filter_spec_t *spec, 75 struct sfc_filter *filter); 76 77 struct sfc_flow_copy_flag { 78 /* EFX filter specification match flag */ 79 efx_filter_match_flags_t flag; 80 /* Number of values of corresponding field */ 81 unsigned int vals_count; 82 /* Function to set values in specifications */ 83 sfc_flow_spec_set_vals *set_vals; 84 /* 85 * Function to check that the specification is suitable 86 * for adding this match flag 87 */ 88 sfc_flow_spec_check *spec_check; 89 }; 90 91 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags; 92 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags; 93 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes; 94 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags; 95 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags; 96 97 static boolean_t 98 sfc_flow_is_zero(const uint8_t *buf, unsigned int size) 99 { 100 uint8_t sum = 0; 101 unsigned int i; 102 103 for (i = 0; i < size; i++) 104 sum |= buf[i]; 105 106 return (sum == 0) ? B_TRUE : B_FALSE; 107 } 108 109 /* 110 * Validate item and prepare structures spec and mask for parsing 111 */ 112 static int 113 sfc_flow_parse_init(const struct rte_flow_item *item, 114 const void **spec_ptr, 115 const void **mask_ptr, 116 const void *supp_mask, 117 const void *def_mask, 118 unsigned int size, 119 struct rte_flow_error *error) 120 { 121 const uint8_t *spec; 122 const uint8_t *mask; 123 const uint8_t *last; 124 uint8_t supp; 125 unsigned int i; 126 127 if (item == NULL) { 128 rte_flow_error_set(error, EINVAL, 129 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 130 "NULL item"); 131 return -rte_errno; 132 } 133 134 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) { 135 rte_flow_error_set(error, EINVAL, 136 RTE_FLOW_ERROR_TYPE_ITEM, item, 137 "Mask or last is set without spec"); 138 return -rte_errno; 139 } 140 141 /* 142 * If "mask" is not set, default mask is used, 143 * but if default mask is NULL, "mask" should be set 144 */ 145 if (item->mask == NULL) { 146 if (def_mask == NULL) { 147 rte_flow_error_set(error, EINVAL, 148 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 149 "Mask should be specified"); 150 return -rte_errno; 151 } 152 153 mask = def_mask; 154 } else { 155 mask = item->mask; 156 } 157 158 spec = item->spec; 159 last = item->last; 160 161 if (spec == NULL) 162 goto exit; 163 164 /* 165 * If field values in "last" are either 0 or equal to the corresponding 166 * values in "spec" then they are ignored 167 */ 168 if (last != NULL && 169 !sfc_flow_is_zero(last, size) && 170 memcmp(last, spec, size) != 0) { 171 rte_flow_error_set(error, ENOTSUP, 172 RTE_FLOW_ERROR_TYPE_ITEM, item, 173 "Ranging is not supported"); 174 return -rte_errno; 175 } 176 177 if (supp_mask == NULL) { 178 rte_flow_error_set(error, EINVAL, 179 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 180 "Supported mask for item should be specified"); 181 return -rte_errno; 182 } 183 184 /* Check that mask does not ask for more match than supp_mask */ 185 for (i = 0; i < size; i++) { 186 supp = ((const uint8_t *)supp_mask)[i]; 187 188 if (~supp & mask[i]) { 189 rte_flow_error_set(error, ENOTSUP, 190 RTE_FLOW_ERROR_TYPE_ITEM, item, 191 "Item's field is not supported"); 192 return -rte_errno; 193 } 194 } 195 196 exit: 197 *spec_ptr = spec; 198 *mask_ptr = mask; 199 return 0; 200 } 201 202 /* 203 * Protocol parsers. 204 * Masking is not supported, so masks in items should be either 205 * full or empty (zeroed) and set only for supported fields which 206 * are specified in the supp_mask. 207 */ 208 209 static int 210 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item, 211 __rte_unused efx_filter_spec_t *efx_spec, 212 __rte_unused struct rte_flow_error *error) 213 { 214 return 0; 215 } 216 217 /** 218 * Convert Ethernet item to EFX filter specification. 219 * 220 * @param item[in] 221 * Item specification. Outer frame specification may only comprise 222 * source/destination addresses and Ethertype field. 223 * Inner frame specification may contain destination address only. 224 * There is support for individual/group mask as well as for empty and full. 225 * If the mask is NULL, default mask will be used. Ranging is not supported. 226 * @param efx_spec[in, out] 227 * EFX filter specification to update. 228 * @param[out] error 229 * Perform verbose error reporting if not NULL. 230 */ 231 static int 232 sfc_flow_parse_eth(const struct rte_flow_item *item, 233 efx_filter_spec_t *efx_spec, 234 struct rte_flow_error *error) 235 { 236 int rc; 237 const struct rte_flow_item_eth *spec = NULL; 238 const struct rte_flow_item_eth *mask = NULL; 239 const struct rte_flow_item_eth supp_mask = { 240 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 241 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 242 .type = 0xffff, 243 }; 244 const struct rte_flow_item_eth ifrm_supp_mask = { 245 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 246 }; 247 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = { 248 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 249 }; 250 const struct rte_flow_item_eth *supp_mask_p; 251 const struct rte_flow_item_eth *def_mask_p; 252 uint8_t *loc_mac = NULL; 253 boolean_t is_ifrm = (efx_spec->efs_encap_type != 254 EFX_TUNNEL_PROTOCOL_NONE); 255 256 if (is_ifrm) { 257 supp_mask_p = &ifrm_supp_mask; 258 def_mask_p = &ifrm_supp_mask; 259 loc_mac = efx_spec->efs_ifrm_loc_mac; 260 } else { 261 supp_mask_p = &supp_mask; 262 def_mask_p = &rte_flow_item_eth_mask; 263 loc_mac = efx_spec->efs_loc_mac; 264 } 265 266 rc = sfc_flow_parse_init(item, 267 (const void **)&spec, 268 (const void **)&mask, 269 supp_mask_p, def_mask_p, 270 sizeof(struct rte_flow_item_eth), 271 error); 272 if (rc != 0) 273 return rc; 274 275 /* If "spec" is not set, could be any Ethernet */ 276 if (spec == NULL) 277 return 0; 278 279 if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) { 280 efx_spec->efs_match_flags |= is_ifrm ? 281 EFX_FILTER_MATCH_IFRM_LOC_MAC : 282 EFX_FILTER_MATCH_LOC_MAC; 283 rte_memcpy(loc_mac, spec->dst.addr_bytes, 284 EFX_MAC_ADDR_LEN); 285 } else if (memcmp(mask->dst.addr_bytes, ig_mask, 286 EFX_MAC_ADDR_LEN) == 0) { 287 if (is_unicast_ether_addr(&spec->dst)) 288 efx_spec->efs_match_flags |= is_ifrm ? 289 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST : 290 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST; 291 else 292 efx_spec->efs_match_flags |= is_ifrm ? 293 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST : 294 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; 295 } else if (!is_zero_ether_addr(&mask->dst)) { 296 goto fail_bad_mask; 297 } 298 299 /* 300 * ifrm_supp_mask ensures that the source address and 301 * ethertype masks are equal to zero in inner frame, 302 * so these fields are filled in only for the outer frame 303 */ 304 if (is_same_ether_addr(&mask->src, &supp_mask.src)) { 305 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC; 306 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes, 307 EFX_MAC_ADDR_LEN); 308 } else if (!is_zero_ether_addr(&mask->src)) { 309 goto fail_bad_mask; 310 } 311 312 /* 313 * Ether type is in big-endian byte order in item and 314 * in little-endian in efx_spec, so byte swap is used 315 */ 316 if (mask->type == supp_mask.type) { 317 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 318 efx_spec->efs_ether_type = rte_bswap16(spec->type); 319 } else if (mask->type != 0) { 320 goto fail_bad_mask; 321 } 322 323 return 0; 324 325 fail_bad_mask: 326 rte_flow_error_set(error, EINVAL, 327 RTE_FLOW_ERROR_TYPE_ITEM, item, 328 "Bad mask in the ETH pattern item"); 329 return -rte_errno; 330 } 331 332 /** 333 * Convert VLAN item to EFX filter specification. 334 * 335 * @param item[in] 336 * Item specification. Only VID field is supported. 337 * The mask can not be NULL. Ranging is not supported. 338 * @param efx_spec[in, out] 339 * EFX filter specification to update. 340 * @param[out] error 341 * Perform verbose error reporting if not NULL. 342 */ 343 static int 344 sfc_flow_parse_vlan(const struct rte_flow_item *item, 345 efx_filter_spec_t *efx_spec, 346 struct rte_flow_error *error) 347 { 348 int rc; 349 uint16_t vid; 350 const struct rte_flow_item_vlan *spec = NULL; 351 const struct rte_flow_item_vlan *mask = NULL; 352 const struct rte_flow_item_vlan supp_mask = { 353 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX), 354 .inner_type = RTE_BE16(0xffff), 355 }; 356 357 rc = sfc_flow_parse_init(item, 358 (const void **)&spec, 359 (const void **)&mask, 360 &supp_mask, 361 NULL, 362 sizeof(struct rte_flow_item_vlan), 363 error); 364 if (rc != 0) 365 return rc; 366 367 /* 368 * VID is in big-endian byte order in item and 369 * in little-endian in efx_spec, so byte swap is used. 370 * If two VLAN items are included, the first matches 371 * the outer tag and the next matches the inner tag. 372 */ 373 if (mask->tci == supp_mask.tci) { 374 /* Apply mask to keep VID only */ 375 vid = rte_bswap16(spec->tci & mask->tci); 376 377 if (!(efx_spec->efs_match_flags & 378 EFX_FILTER_MATCH_OUTER_VID)) { 379 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID; 380 efx_spec->efs_outer_vid = vid; 381 } else if (!(efx_spec->efs_match_flags & 382 EFX_FILTER_MATCH_INNER_VID)) { 383 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID; 384 efx_spec->efs_inner_vid = vid; 385 } else { 386 rte_flow_error_set(error, EINVAL, 387 RTE_FLOW_ERROR_TYPE_ITEM, item, 388 "More than two VLAN items"); 389 return -rte_errno; 390 } 391 } else { 392 rte_flow_error_set(error, EINVAL, 393 RTE_FLOW_ERROR_TYPE_ITEM, item, 394 "VLAN ID in TCI match is required"); 395 return -rte_errno; 396 } 397 398 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { 399 rte_flow_error_set(error, EINVAL, 400 RTE_FLOW_ERROR_TYPE_ITEM, item, 401 "VLAN TPID matching is not supported"); 402 return -rte_errno; 403 } 404 if (mask->inner_type == supp_mask.inner_type) { 405 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 406 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type); 407 } else if (mask->inner_type) { 408 rte_flow_error_set(error, EINVAL, 409 RTE_FLOW_ERROR_TYPE_ITEM, item, 410 "Bad mask for VLAN inner_type"); 411 return -rte_errno; 412 } 413 414 return 0; 415 } 416 417 /** 418 * Convert IPv4 item to EFX filter specification. 419 * 420 * @param item[in] 421 * Item specification. Only source and destination addresses and 422 * protocol fields are supported. If the mask is NULL, default 423 * mask will be used. Ranging is not supported. 424 * @param efx_spec[in, out] 425 * EFX filter specification to update. 426 * @param[out] error 427 * Perform verbose error reporting if not NULL. 428 */ 429 static int 430 sfc_flow_parse_ipv4(const struct rte_flow_item *item, 431 efx_filter_spec_t *efx_spec, 432 struct rte_flow_error *error) 433 { 434 int rc; 435 const struct rte_flow_item_ipv4 *spec = NULL; 436 const struct rte_flow_item_ipv4 *mask = NULL; 437 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4); 438 const struct rte_flow_item_ipv4 supp_mask = { 439 .hdr = { 440 .src_addr = 0xffffffff, 441 .dst_addr = 0xffffffff, 442 .next_proto_id = 0xff, 443 } 444 }; 445 446 rc = sfc_flow_parse_init(item, 447 (const void **)&spec, 448 (const void **)&mask, 449 &supp_mask, 450 &rte_flow_item_ipv4_mask, 451 sizeof(struct rte_flow_item_ipv4), 452 error); 453 if (rc != 0) 454 return rc; 455 456 /* 457 * Filtering by IPv4 source and destination addresses requires 458 * the appropriate ETHER_TYPE in hardware filters 459 */ 460 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) { 461 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 462 efx_spec->efs_ether_type = ether_type_ipv4; 463 } else if (efx_spec->efs_ether_type != ether_type_ipv4) { 464 rte_flow_error_set(error, EINVAL, 465 RTE_FLOW_ERROR_TYPE_ITEM, item, 466 "Ethertype in pattern with IPV4 item should be appropriate"); 467 return -rte_errno; 468 } 469 470 if (spec == NULL) 471 return 0; 472 473 /* 474 * IPv4 addresses are in big-endian byte order in item and in 475 * efx_spec 476 */ 477 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) { 478 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST; 479 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr; 480 } else if (mask->hdr.src_addr != 0) { 481 goto fail_bad_mask; 482 } 483 484 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) { 485 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST; 486 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr; 487 } else if (mask->hdr.dst_addr != 0) { 488 goto fail_bad_mask; 489 } 490 491 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) { 492 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 493 efx_spec->efs_ip_proto = spec->hdr.next_proto_id; 494 } else if (mask->hdr.next_proto_id != 0) { 495 goto fail_bad_mask; 496 } 497 498 return 0; 499 500 fail_bad_mask: 501 rte_flow_error_set(error, EINVAL, 502 RTE_FLOW_ERROR_TYPE_ITEM, item, 503 "Bad mask in the IPV4 pattern item"); 504 return -rte_errno; 505 } 506 507 /** 508 * Convert IPv6 item to EFX filter specification. 509 * 510 * @param item[in] 511 * Item specification. Only source and destination addresses and 512 * next header fields are supported. If the mask is NULL, default 513 * mask will be used. Ranging is not supported. 514 * @param efx_spec[in, out] 515 * EFX filter specification to update. 516 * @param[out] error 517 * Perform verbose error reporting if not NULL. 518 */ 519 static int 520 sfc_flow_parse_ipv6(const struct rte_flow_item *item, 521 efx_filter_spec_t *efx_spec, 522 struct rte_flow_error *error) 523 { 524 int rc; 525 const struct rte_flow_item_ipv6 *spec = NULL; 526 const struct rte_flow_item_ipv6 *mask = NULL; 527 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6); 528 const struct rte_flow_item_ipv6 supp_mask = { 529 .hdr = { 530 .src_addr = { 0xff, 0xff, 0xff, 0xff, 531 0xff, 0xff, 0xff, 0xff, 532 0xff, 0xff, 0xff, 0xff, 533 0xff, 0xff, 0xff, 0xff }, 534 .dst_addr = { 0xff, 0xff, 0xff, 0xff, 535 0xff, 0xff, 0xff, 0xff, 536 0xff, 0xff, 0xff, 0xff, 537 0xff, 0xff, 0xff, 0xff }, 538 .proto = 0xff, 539 } 540 }; 541 542 rc = sfc_flow_parse_init(item, 543 (const void **)&spec, 544 (const void **)&mask, 545 &supp_mask, 546 &rte_flow_item_ipv6_mask, 547 sizeof(struct rte_flow_item_ipv6), 548 error); 549 if (rc != 0) 550 return rc; 551 552 /* 553 * Filtering by IPv6 source and destination addresses requires 554 * the appropriate ETHER_TYPE in hardware filters 555 */ 556 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) { 557 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 558 efx_spec->efs_ether_type = ether_type_ipv6; 559 } else if (efx_spec->efs_ether_type != ether_type_ipv6) { 560 rte_flow_error_set(error, EINVAL, 561 RTE_FLOW_ERROR_TYPE_ITEM, item, 562 "Ethertype in pattern with IPV6 item should be appropriate"); 563 return -rte_errno; 564 } 565 566 if (spec == NULL) 567 return 0; 568 569 /* 570 * IPv6 addresses are in big-endian byte order in item and in 571 * efx_spec 572 */ 573 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr, 574 sizeof(mask->hdr.src_addr)) == 0) { 575 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST; 576 577 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) != 578 sizeof(spec->hdr.src_addr)); 579 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr, 580 sizeof(efx_spec->efs_rem_host)); 581 } else if (!sfc_flow_is_zero(mask->hdr.src_addr, 582 sizeof(mask->hdr.src_addr))) { 583 goto fail_bad_mask; 584 } 585 586 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr, 587 sizeof(mask->hdr.dst_addr)) == 0) { 588 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST; 589 590 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) != 591 sizeof(spec->hdr.dst_addr)); 592 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr, 593 sizeof(efx_spec->efs_loc_host)); 594 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr, 595 sizeof(mask->hdr.dst_addr))) { 596 goto fail_bad_mask; 597 } 598 599 if (mask->hdr.proto == supp_mask.hdr.proto) { 600 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 601 efx_spec->efs_ip_proto = spec->hdr.proto; 602 } else if (mask->hdr.proto != 0) { 603 goto fail_bad_mask; 604 } 605 606 return 0; 607 608 fail_bad_mask: 609 rte_flow_error_set(error, EINVAL, 610 RTE_FLOW_ERROR_TYPE_ITEM, item, 611 "Bad mask in the IPV6 pattern item"); 612 return -rte_errno; 613 } 614 615 /** 616 * Convert TCP item to EFX filter specification. 617 * 618 * @param item[in] 619 * Item specification. Only source and destination ports fields 620 * are supported. If the mask is NULL, default mask will be used. 621 * Ranging is not supported. 622 * @param efx_spec[in, out] 623 * EFX filter specification to update. 624 * @param[out] error 625 * Perform verbose error reporting if not NULL. 626 */ 627 static int 628 sfc_flow_parse_tcp(const struct rte_flow_item *item, 629 efx_filter_spec_t *efx_spec, 630 struct rte_flow_error *error) 631 { 632 int rc; 633 const struct rte_flow_item_tcp *spec = NULL; 634 const struct rte_flow_item_tcp *mask = NULL; 635 const struct rte_flow_item_tcp supp_mask = { 636 .hdr = { 637 .src_port = 0xffff, 638 .dst_port = 0xffff, 639 } 640 }; 641 642 rc = sfc_flow_parse_init(item, 643 (const void **)&spec, 644 (const void **)&mask, 645 &supp_mask, 646 &rte_flow_item_tcp_mask, 647 sizeof(struct rte_flow_item_tcp), 648 error); 649 if (rc != 0) 650 return rc; 651 652 /* 653 * Filtering by TCP source and destination ports requires 654 * the appropriate IP_PROTO in hardware filters 655 */ 656 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 657 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 658 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP; 659 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) { 660 rte_flow_error_set(error, EINVAL, 661 RTE_FLOW_ERROR_TYPE_ITEM, item, 662 "IP proto in pattern with TCP item should be appropriate"); 663 return -rte_errno; 664 } 665 666 if (spec == NULL) 667 return 0; 668 669 /* 670 * Source and destination ports are in big-endian byte order in item and 671 * in little-endian in efx_spec, so byte swap is used 672 */ 673 if (mask->hdr.src_port == supp_mask.hdr.src_port) { 674 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT; 675 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port); 676 } else if (mask->hdr.src_port != 0) { 677 goto fail_bad_mask; 678 } 679 680 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) { 681 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT; 682 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port); 683 } else if (mask->hdr.dst_port != 0) { 684 goto fail_bad_mask; 685 } 686 687 return 0; 688 689 fail_bad_mask: 690 rte_flow_error_set(error, EINVAL, 691 RTE_FLOW_ERROR_TYPE_ITEM, item, 692 "Bad mask in the TCP pattern item"); 693 return -rte_errno; 694 } 695 696 /** 697 * Convert UDP item to EFX filter specification. 698 * 699 * @param item[in] 700 * Item specification. Only source and destination ports fields 701 * are supported. If the mask is NULL, default mask will be used. 702 * Ranging is not supported. 703 * @param efx_spec[in, out] 704 * EFX filter specification to update. 705 * @param[out] error 706 * Perform verbose error reporting if not NULL. 707 */ 708 static int 709 sfc_flow_parse_udp(const struct rte_flow_item *item, 710 efx_filter_spec_t *efx_spec, 711 struct rte_flow_error *error) 712 { 713 int rc; 714 const struct rte_flow_item_udp *spec = NULL; 715 const struct rte_flow_item_udp *mask = NULL; 716 const struct rte_flow_item_udp supp_mask = { 717 .hdr = { 718 .src_port = 0xffff, 719 .dst_port = 0xffff, 720 } 721 }; 722 723 rc = sfc_flow_parse_init(item, 724 (const void **)&spec, 725 (const void **)&mask, 726 &supp_mask, 727 &rte_flow_item_udp_mask, 728 sizeof(struct rte_flow_item_udp), 729 error); 730 if (rc != 0) 731 return rc; 732 733 /* 734 * Filtering by UDP source and destination ports requires 735 * the appropriate IP_PROTO in hardware filters 736 */ 737 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 738 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 739 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP; 740 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) { 741 rte_flow_error_set(error, EINVAL, 742 RTE_FLOW_ERROR_TYPE_ITEM, item, 743 "IP proto in pattern with UDP item should be appropriate"); 744 return -rte_errno; 745 } 746 747 if (spec == NULL) 748 return 0; 749 750 /* 751 * Source and destination ports are in big-endian byte order in item and 752 * in little-endian in efx_spec, so byte swap is used 753 */ 754 if (mask->hdr.src_port == supp_mask.hdr.src_port) { 755 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT; 756 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port); 757 } else if (mask->hdr.src_port != 0) { 758 goto fail_bad_mask; 759 } 760 761 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) { 762 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT; 763 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port); 764 } else if (mask->hdr.dst_port != 0) { 765 goto fail_bad_mask; 766 } 767 768 return 0; 769 770 fail_bad_mask: 771 rte_flow_error_set(error, EINVAL, 772 RTE_FLOW_ERROR_TYPE_ITEM, item, 773 "Bad mask in the UDP pattern item"); 774 return -rte_errno; 775 } 776 777 /* 778 * Filters for encapsulated packets match based on the EtherType and IP 779 * protocol in the outer frame. 780 */ 781 static int 782 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item, 783 efx_filter_spec_t *efx_spec, 784 uint8_t ip_proto, 785 struct rte_flow_error *error) 786 { 787 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 788 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 789 efx_spec->efs_ip_proto = ip_proto; 790 } else if (efx_spec->efs_ip_proto != ip_proto) { 791 switch (ip_proto) { 792 case EFX_IPPROTO_UDP: 793 rte_flow_error_set(error, EINVAL, 794 RTE_FLOW_ERROR_TYPE_ITEM, item, 795 "Outer IP header protocol must be UDP " 796 "in VxLAN/GENEVE pattern"); 797 return -rte_errno; 798 799 case EFX_IPPROTO_GRE: 800 rte_flow_error_set(error, EINVAL, 801 RTE_FLOW_ERROR_TYPE_ITEM, item, 802 "Outer IP header protocol must be GRE " 803 "in NVGRE pattern"); 804 return -rte_errno; 805 806 default: 807 rte_flow_error_set(error, EINVAL, 808 RTE_FLOW_ERROR_TYPE_ITEM, item, 809 "Only VxLAN/GENEVE/NVGRE tunneling patterns " 810 "are supported"); 811 return -rte_errno; 812 } 813 } 814 815 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE && 816 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 && 817 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) { 818 rte_flow_error_set(error, EINVAL, 819 RTE_FLOW_ERROR_TYPE_ITEM, item, 820 "Outer frame EtherType in pattern with tunneling " 821 "must be IPv4 or IPv6"); 822 return -rte_errno; 823 } 824 825 return 0; 826 } 827 828 static int 829 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec, 830 const uint8_t *vni_or_vsid_val, 831 const uint8_t *vni_or_vsid_mask, 832 const struct rte_flow_item *item, 833 struct rte_flow_error *error) 834 { 835 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = { 836 0xff, 0xff, 0xff 837 }; 838 839 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask, 840 EFX_VNI_OR_VSID_LEN) == 0) { 841 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID; 842 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val, 843 EFX_VNI_OR_VSID_LEN); 844 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) { 845 rte_flow_error_set(error, EINVAL, 846 RTE_FLOW_ERROR_TYPE_ITEM, item, 847 "Unsupported VNI/VSID mask"); 848 return -rte_errno; 849 } 850 851 return 0; 852 } 853 854 /** 855 * Convert VXLAN item to EFX filter specification. 856 * 857 * @param item[in] 858 * Item specification. Only VXLAN network identifier field is supported. 859 * If the mask is NULL, default mask will be used. 860 * Ranging is not supported. 861 * @param efx_spec[in, out] 862 * EFX filter specification to update. 863 * @param[out] error 864 * Perform verbose error reporting if not NULL. 865 */ 866 static int 867 sfc_flow_parse_vxlan(const struct rte_flow_item *item, 868 efx_filter_spec_t *efx_spec, 869 struct rte_flow_error *error) 870 { 871 int rc; 872 const struct rte_flow_item_vxlan *spec = NULL; 873 const struct rte_flow_item_vxlan *mask = NULL; 874 const struct rte_flow_item_vxlan supp_mask = { 875 .vni = { 0xff, 0xff, 0xff } 876 }; 877 878 rc = sfc_flow_parse_init(item, 879 (const void **)&spec, 880 (const void **)&mask, 881 &supp_mask, 882 &rte_flow_item_vxlan_mask, 883 sizeof(struct rte_flow_item_vxlan), 884 error); 885 if (rc != 0) 886 return rc; 887 888 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 889 EFX_IPPROTO_UDP, error); 890 if (rc != 0) 891 return rc; 892 893 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN; 894 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 895 896 if (spec == NULL) 897 return 0; 898 899 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, 900 mask->vni, item, error); 901 902 return rc; 903 } 904 905 /** 906 * Convert GENEVE item to EFX filter specification. 907 * 908 * @param item[in] 909 * Item specification. Only Virtual Network Identifier and protocol type 910 * fields are supported. But protocol type can be only Ethernet (0x6558). 911 * If the mask is NULL, default mask will be used. 912 * Ranging is not supported. 913 * @param efx_spec[in, out] 914 * EFX filter specification to update. 915 * @param[out] error 916 * Perform verbose error reporting if not NULL. 917 */ 918 static int 919 sfc_flow_parse_geneve(const struct rte_flow_item *item, 920 efx_filter_spec_t *efx_spec, 921 struct rte_flow_error *error) 922 { 923 int rc; 924 const struct rte_flow_item_geneve *spec = NULL; 925 const struct rte_flow_item_geneve *mask = NULL; 926 const struct rte_flow_item_geneve supp_mask = { 927 .protocol = RTE_BE16(0xffff), 928 .vni = { 0xff, 0xff, 0xff } 929 }; 930 931 rc = sfc_flow_parse_init(item, 932 (const void **)&spec, 933 (const void **)&mask, 934 &supp_mask, 935 &rte_flow_item_geneve_mask, 936 sizeof(struct rte_flow_item_geneve), 937 error); 938 if (rc != 0) 939 return rc; 940 941 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 942 EFX_IPPROTO_UDP, error); 943 if (rc != 0) 944 return rc; 945 946 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE; 947 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 948 949 if (spec == NULL) 950 return 0; 951 952 if (mask->protocol == supp_mask.protocol) { 953 if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) { 954 rte_flow_error_set(error, EINVAL, 955 RTE_FLOW_ERROR_TYPE_ITEM, item, 956 "GENEVE encap. protocol must be Ethernet " 957 "(0x6558) in the GENEVE pattern item"); 958 return -rte_errno; 959 } 960 } else if (mask->protocol != 0) { 961 rte_flow_error_set(error, EINVAL, 962 RTE_FLOW_ERROR_TYPE_ITEM, item, 963 "Unsupported mask for GENEVE encap. protocol"); 964 return -rte_errno; 965 } 966 967 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, 968 mask->vni, item, error); 969 970 return rc; 971 } 972 973 /** 974 * Convert NVGRE item to EFX filter specification. 975 * 976 * @param item[in] 977 * Item specification. Only virtual subnet ID field is supported. 978 * If the mask is NULL, default mask will be used. 979 * Ranging is not supported. 980 * @param efx_spec[in, out] 981 * EFX filter specification to update. 982 * @param[out] error 983 * Perform verbose error reporting if not NULL. 984 */ 985 static int 986 sfc_flow_parse_nvgre(const struct rte_flow_item *item, 987 efx_filter_spec_t *efx_spec, 988 struct rte_flow_error *error) 989 { 990 int rc; 991 const struct rte_flow_item_nvgre *spec = NULL; 992 const struct rte_flow_item_nvgre *mask = NULL; 993 const struct rte_flow_item_nvgre supp_mask = { 994 .tni = { 0xff, 0xff, 0xff } 995 }; 996 997 rc = sfc_flow_parse_init(item, 998 (const void **)&spec, 999 (const void **)&mask, 1000 &supp_mask, 1001 &rte_flow_item_nvgre_mask, 1002 sizeof(struct rte_flow_item_nvgre), 1003 error); 1004 if (rc != 0) 1005 return rc; 1006 1007 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 1008 EFX_IPPROTO_GRE, error); 1009 if (rc != 0) 1010 return rc; 1011 1012 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE; 1013 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 1014 1015 if (spec == NULL) 1016 return 0; 1017 1018 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni, 1019 mask->tni, item, error); 1020 1021 return rc; 1022 } 1023 1024 static const struct sfc_flow_item sfc_flow_items[] = { 1025 { 1026 .type = RTE_FLOW_ITEM_TYPE_VOID, 1027 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER, 1028 .layer = SFC_FLOW_ITEM_ANY_LAYER, 1029 .parse = sfc_flow_parse_void, 1030 }, 1031 { 1032 .type = RTE_FLOW_ITEM_TYPE_ETH, 1033 .prev_layer = SFC_FLOW_ITEM_START_LAYER, 1034 .layer = SFC_FLOW_ITEM_L2, 1035 .parse = sfc_flow_parse_eth, 1036 }, 1037 { 1038 .type = RTE_FLOW_ITEM_TYPE_VLAN, 1039 .prev_layer = SFC_FLOW_ITEM_L2, 1040 .layer = SFC_FLOW_ITEM_L2, 1041 .parse = sfc_flow_parse_vlan, 1042 }, 1043 { 1044 .type = RTE_FLOW_ITEM_TYPE_IPV4, 1045 .prev_layer = SFC_FLOW_ITEM_L2, 1046 .layer = SFC_FLOW_ITEM_L3, 1047 .parse = sfc_flow_parse_ipv4, 1048 }, 1049 { 1050 .type = RTE_FLOW_ITEM_TYPE_IPV6, 1051 .prev_layer = SFC_FLOW_ITEM_L2, 1052 .layer = SFC_FLOW_ITEM_L3, 1053 .parse = sfc_flow_parse_ipv6, 1054 }, 1055 { 1056 .type = RTE_FLOW_ITEM_TYPE_TCP, 1057 .prev_layer = SFC_FLOW_ITEM_L3, 1058 .layer = SFC_FLOW_ITEM_L4, 1059 .parse = sfc_flow_parse_tcp, 1060 }, 1061 { 1062 .type = RTE_FLOW_ITEM_TYPE_UDP, 1063 .prev_layer = SFC_FLOW_ITEM_L3, 1064 .layer = SFC_FLOW_ITEM_L4, 1065 .parse = sfc_flow_parse_udp, 1066 }, 1067 { 1068 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 1069 .prev_layer = SFC_FLOW_ITEM_L4, 1070 .layer = SFC_FLOW_ITEM_START_LAYER, 1071 .parse = sfc_flow_parse_vxlan, 1072 }, 1073 { 1074 .type = RTE_FLOW_ITEM_TYPE_GENEVE, 1075 .prev_layer = SFC_FLOW_ITEM_L4, 1076 .layer = SFC_FLOW_ITEM_START_LAYER, 1077 .parse = sfc_flow_parse_geneve, 1078 }, 1079 { 1080 .type = RTE_FLOW_ITEM_TYPE_NVGRE, 1081 .prev_layer = SFC_FLOW_ITEM_L3, 1082 .layer = SFC_FLOW_ITEM_START_LAYER, 1083 .parse = sfc_flow_parse_nvgre, 1084 }, 1085 }; 1086 1087 /* 1088 * Protocol-independent flow API support 1089 */ 1090 static int 1091 sfc_flow_parse_attr(const struct rte_flow_attr *attr, 1092 struct rte_flow *flow, 1093 struct rte_flow_error *error) 1094 { 1095 if (attr == NULL) { 1096 rte_flow_error_set(error, EINVAL, 1097 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 1098 "NULL attribute"); 1099 return -rte_errno; 1100 } 1101 if (attr->group != 0) { 1102 rte_flow_error_set(error, ENOTSUP, 1103 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr, 1104 "Groups are not supported"); 1105 return -rte_errno; 1106 } 1107 if (attr->priority != 0) { 1108 rte_flow_error_set(error, ENOTSUP, 1109 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, 1110 "Priorities are not supported"); 1111 return -rte_errno; 1112 } 1113 if (attr->egress != 0) { 1114 rte_flow_error_set(error, ENOTSUP, 1115 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, 1116 "Egress is not supported"); 1117 return -rte_errno; 1118 } 1119 if (attr->transfer != 0) { 1120 rte_flow_error_set(error, ENOTSUP, 1121 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, 1122 "Transfer is not supported"); 1123 return -rte_errno; 1124 } 1125 if (attr->ingress == 0) { 1126 rte_flow_error_set(error, ENOTSUP, 1127 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, 1128 "Only ingress is supported"); 1129 return -rte_errno; 1130 } 1131 1132 flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX; 1133 flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; 1134 1135 return 0; 1136 } 1137 1138 /* Get item from array sfc_flow_items */ 1139 static const struct sfc_flow_item * 1140 sfc_flow_get_item(enum rte_flow_item_type type) 1141 { 1142 unsigned int i; 1143 1144 for (i = 0; i < RTE_DIM(sfc_flow_items); i++) 1145 if (sfc_flow_items[i].type == type) 1146 return &sfc_flow_items[i]; 1147 1148 return NULL; 1149 } 1150 1151 static int 1152 sfc_flow_parse_pattern(const struct rte_flow_item pattern[], 1153 struct rte_flow *flow, 1154 struct rte_flow_error *error) 1155 { 1156 int rc; 1157 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER; 1158 boolean_t is_ifrm = B_FALSE; 1159 const struct sfc_flow_item *item; 1160 1161 if (pattern == NULL) { 1162 rte_flow_error_set(error, EINVAL, 1163 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, 1164 "NULL pattern"); 1165 return -rte_errno; 1166 } 1167 1168 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { 1169 item = sfc_flow_get_item(pattern->type); 1170 if (item == NULL) { 1171 rte_flow_error_set(error, ENOTSUP, 1172 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1173 "Unsupported pattern item"); 1174 return -rte_errno; 1175 } 1176 1177 /* 1178 * Omitting one or several protocol layers at the beginning 1179 * of pattern is supported 1180 */ 1181 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER && 1182 prev_layer != SFC_FLOW_ITEM_ANY_LAYER && 1183 item->prev_layer != prev_layer) { 1184 rte_flow_error_set(error, ENOTSUP, 1185 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1186 "Unexpected sequence of pattern items"); 1187 return -rte_errno; 1188 } 1189 1190 /* 1191 * Allow only VOID and ETH pattern items in the inner frame. 1192 * Also check that there is only one tunneling protocol. 1193 */ 1194 switch (item->type) { 1195 case RTE_FLOW_ITEM_TYPE_VOID: 1196 case RTE_FLOW_ITEM_TYPE_ETH: 1197 break; 1198 1199 case RTE_FLOW_ITEM_TYPE_VXLAN: 1200 case RTE_FLOW_ITEM_TYPE_GENEVE: 1201 case RTE_FLOW_ITEM_TYPE_NVGRE: 1202 if (is_ifrm) { 1203 rte_flow_error_set(error, EINVAL, 1204 RTE_FLOW_ERROR_TYPE_ITEM, 1205 pattern, 1206 "More than one tunneling protocol"); 1207 return -rte_errno; 1208 } 1209 is_ifrm = B_TRUE; 1210 break; 1211 1212 default: 1213 if (is_ifrm) { 1214 rte_flow_error_set(error, EINVAL, 1215 RTE_FLOW_ERROR_TYPE_ITEM, 1216 pattern, 1217 "There is an unsupported pattern item " 1218 "in the inner frame"); 1219 return -rte_errno; 1220 } 1221 break; 1222 } 1223 1224 rc = item->parse(pattern, &flow->spec.template, error); 1225 if (rc != 0) 1226 return rc; 1227 1228 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER) 1229 prev_layer = item->layer; 1230 } 1231 1232 return 0; 1233 } 1234 1235 static int 1236 sfc_flow_parse_queue(struct sfc_adapter *sa, 1237 const struct rte_flow_action_queue *queue, 1238 struct rte_flow *flow) 1239 { 1240 struct sfc_rxq *rxq; 1241 1242 if (queue->index >= sa->rxq_count) 1243 return -EINVAL; 1244 1245 rxq = sa->rxq_info[queue->index].rxq; 1246 flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index; 1247 1248 return 0; 1249 } 1250 1251 static int 1252 sfc_flow_parse_rss(struct sfc_adapter *sa, 1253 const struct rte_flow_action_rss *action_rss, 1254 struct rte_flow *flow) 1255 { 1256 struct sfc_rss *rss = &sa->rss; 1257 unsigned int rxq_sw_index; 1258 struct sfc_rxq *rxq; 1259 unsigned int rxq_hw_index_min; 1260 unsigned int rxq_hw_index_max; 1261 efx_rx_hash_type_t efx_hash_types; 1262 const uint8_t *rss_key; 1263 struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf; 1264 unsigned int i; 1265 1266 if (action_rss->queue_num == 0) 1267 return -EINVAL; 1268 1269 rxq_sw_index = sa->rxq_count - 1; 1270 rxq = sa->rxq_info[rxq_sw_index].rxq; 1271 rxq_hw_index_min = rxq->hw_index; 1272 rxq_hw_index_max = 0; 1273 1274 for (i = 0; i < action_rss->queue_num; ++i) { 1275 rxq_sw_index = action_rss->queue[i]; 1276 1277 if (rxq_sw_index >= sa->rxq_count) 1278 return -EINVAL; 1279 1280 rxq = sa->rxq_info[rxq_sw_index].rxq; 1281 1282 if (rxq->hw_index < rxq_hw_index_min) 1283 rxq_hw_index_min = rxq->hw_index; 1284 1285 if (rxq->hw_index > rxq_hw_index_max) 1286 rxq_hw_index_max = rxq->hw_index; 1287 } 1288 1289 switch (action_rss->func) { 1290 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1291 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1292 break; 1293 default: 1294 return -EINVAL; 1295 } 1296 1297 if (action_rss->level) 1298 return -EINVAL; 1299 1300 /* 1301 * Dummy RSS action with only one queue and no specific settings 1302 * for hash types and key does not require dedicated RSS context 1303 * and may be simplified to single queue action. 1304 */ 1305 if (action_rss->queue_num == 1 && action_rss->types == 0 && 1306 action_rss->key_len == 0) { 1307 flow->spec.template.efs_dmaq_id = rxq_hw_index_min; 1308 return 0; 1309 } 1310 1311 if (action_rss->types) { 1312 int rc; 1313 1314 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types, 1315 &efx_hash_types); 1316 if (rc != 0) 1317 return -rc; 1318 } else { 1319 unsigned int i; 1320 1321 efx_hash_types = 0; 1322 for (i = 0; i < rss->hf_map_nb_entries; ++i) 1323 efx_hash_types |= rss->hf_map[i].efx; 1324 } 1325 1326 if (action_rss->key_len) { 1327 if (action_rss->key_len != sizeof(rss->key)) 1328 return -EINVAL; 1329 1330 rss_key = action_rss->key; 1331 } else { 1332 rss_key = rss->key; 1333 } 1334 1335 flow->rss = B_TRUE; 1336 1337 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min; 1338 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max; 1339 sfc_rss_conf->rss_hash_types = efx_hash_types; 1340 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key)); 1341 1342 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) { 1343 unsigned int nb_queues = action_rss->queue_num; 1344 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues]; 1345 struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq; 1346 1347 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min; 1348 } 1349 1350 return 0; 1351 } 1352 1353 static int 1354 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec, 1355 unsigned int filters_count) 1356 { 1357 unsigned int i; 1358 int ret = 0; 1359 1360 for (i = 0; i < filters_count; i++) { 1361 int rc; 1362 1363 rc = efx_filter_remove(sa->nic, &spec->filters[i]); 1364 if (ret == 0 && rc != 0) { 1365 sfc_err(sa, "failed to remove filter specification " 1366 "(rc = %d)", rc); 1367 ret = rc; 1368 } 1369 } 1370 1371 return ret; 1372 } 1373 1374 static int 1375 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec) 1376 { 1377 unsigned int i; 1378 int rc = 0; 1379 1380 for (i = 0; i < spec->count; i++) { 1381 rc = efx_filter_insert(sa->nic, &spec->filters[i]); 1382 if (rc != 0) { 1383 sfc_flow_spec_flush(sa, spec, i); 1384 break; 1385 } 1386 } 1387 1388 return rc; 1389 } 1390 1391 static int 1392 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec) 1393 { 1394 return sfc_flow_spec_flush(sa, spec, spec->count); 1395 } 1396 1397 static int 1398 sfc_flow_filter_insert(struct sfc_adapter *sa, 1399 struct rte_flow *flow) 1400 { 1401 struct sfc_rss *rss = &sa->rss; 1402 struct sfc_flow_rss *flow_rss = &flow->rss_conf; 1403 uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; 1404 unsigned int i; 1405 int rc = 0; 1406 1407 if (flow->rss) { 1408 unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max - 1409 flow_rss->rxq_hw_index_min + 1, 1410 EFX_MAXRSS); 1411 1412 rc = efx_rx_scale_context_alloc(sa->nic, 1413 EFX_RX_SCALE_EXCLUSIVE, 1414 rss_spread, 1415 &efs_rss_context); 1416 if (rc != 0) 1417 goto fail_scale_context_alloc; 1418 1419 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context, 1420 rss->hash_alg, 1421 flow_rss->rss_hash_types, B_TRUE); 1422 if (rc != 0) 1423 goto fail_scale_mode_set; 1424 1425 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context, 1426 flow_rss->rss_key, 1427 sizeof(rss->key)); 1428 if (rc != 0) 1429 goto fail_scale_key_set; 1430 1431 /* 1432 * At this point, fully elaborated filter specifications 1433 * have been produced from the template. To make sure that 1434 * RSS behaviour is consistent between them, set the same 1435 * RSS context value everywhere. 1436 */ 1437 for (i = 0; i < flow->spec.count; i++) { 1438 efx_filter_spec_t *spec = &flow->spec.filters[i]; 1439 1440 spec->efs_rss_context = efs_rss_context; 1441 spec->efs_dmaq_id = flow_rss->rxq_hw_index_min; 1442 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS; 1443 } 1444 } 1445 1446 rc = sfc_flow_spec_insert(sa, &flow->spec); 1447 if (rc != 0) 1448 goto fail_filter_insert; 1449 1450 if (flow->rss) { 1451 /* 1452 * Scale table is set after filter insertion because 1453 * the table entries are relative to the base RxQ ID 1454 * and the latter is submitted to the HW by means of 1455 * inserting a filter, so by the time of the request 1456 * the HW knows all the information needed to verify 1457 * the table entries, and the operation will succeed 1458 */ 1459 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context, 1460 flow_rss->rss_tbl, 1461 RTE_DIM(flow_rss->rss_tbl)); 1462 if (rc != 0) 1463 goto fail_scale_tbl_set; 1464 } 1465 1466 return 0; 1467 1468 fail_scale_tbl_set: 1469 sfc_flow_spec_remove(sa, &flow->spec); 1470 1471 fail_filter_insert: 1472 fail_scale_key_set: 1473 fail_scale_mode_set: 1474 if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT) 1475 efx_rx_scale_context_free(sa->nic, efs_rss_context); 1476 1477 fail_scale_context_alloc: 1478 return rc; 1479 } 1480 1481 static int 1482 sfc_flow_filter_remove(struct sfc_adapter *sa, 1483 struct rte_flow *flow) 1484 { 1485 int rc = 0; 1486 1487 rc = sfc_flow_spec_remove(sa, &flow->spec); 1488 if (rc != 0) 1489 return rc; 1490 1491 if (flow->rss) { 1492 /* 1493 * All specifications for a given flow rule have the same RSS 1494 * context, so that RSS context value is taken from the first 1495 * filter specification 1496 */ 1497 efx_filter_spec_t *spec = &flow->spec.filters[0]; 1498 1499 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context); 1500 } 1501 1502 return rc; 1503 } 1504 1505 static int 1506 sfc_flow_parse_mark(struct sfc_adapter *sa, 1507 const struct rte_flow_action_mark *mark, 1508 struct rte_flow *flow) 1509 { 1510 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 1511 1512 if (mark == NULL || mark->id > encp->enc_filter_action_mark_max) 1513 return EINVAL; 1514 1515 flow->spec.template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK; 1516 flow->spec.template.efs_mark = mark->id; 1517 1518 return 0; 1519 } 1520 1521 static int 1522 sfc_flow_parse_actions(struct sfc_adapter *sa, 1523 const struct rte_flow_action actions[], 1524 struct rte_flow *flow, 1525 struct rte_flow_error *error) 1526 { 1527 int rc; 1528 const unsigned int dp_rx_features = sa->dp_rx->features; 1529 uint32_t actions_set = 0; 1530 const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) | 1531 (1UL << RTE_FLOW_ACTION_TYPE_RSS) | 1532 (1UL << RTE_FLOW_ACTION_TYPE_DROP); 1533 const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) | 1534 (1UL << RTE_FLOW_ACTION_TYPE_FLAG); 1535 1536 if (actions == NULL) { 1537 rte_flow_error_set(error, EINVAL, 1538 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, 1539 "NULL actions"); 1540 return -rte_errno; 1541 } 1542 1543 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \ 1544 RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT) 1545 1546 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1547 switch (actions->type) { 1548 case RTE_FLOW_ACTION_TYPE_VOID: 1549 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID, 1550 actions_set); 1551 break; 1552 1553 case RTE_FLOW_ACTION_TYPE_QUEUE: 1554 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE, 1555 actions_set); 1556 if ((actions_set & fate_actions_mask) != 0) 1557 goto fail_fate_actions; 1558 1559 rc = sfc_flow_parse_queue(sa, actions->conf, flow); 1560 if (rc != 0) { 1561 rte_flow_error_set(error, EINVAL, 1562 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1563 "Bad QUEUE action"); 1564 return -rte_errno; 1565 } 1566 break; 1567 1568 case RTE_FLOW_ACTION_TYPE_RSS: 1569 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS, 1570 actions_set); 1571 if ((actions_set & fate_actions_mask) != 0) 1572 goto fail_fate_actions; 1573 1574 rc = sfc_flow_parse_rss(sa, actions->conf, flow); 1575 if (rc != 0) { 1576 rte_flow_error_set(error, -rc, 1577 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1578 "Bad RSS action"); 1579 return -rte_errno; 1580 } 1581 break; 1582 1583 case RTE_FLOW_ACTION_TYPE_DROP: 1584 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP, 1585 actions_set); 1586 if ((actions_set & fate_actions_mask) != 0) 1587 goto fail_fate_actions; 1588 1589 flow->spec.template.efs_dmaq_id = 1590 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; 1591 break; 1592 1593 case RTE_FLOW_ACTION_TYPE_FLAG: 1594 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG, 1595 actions_set); 1596 if ((actions_set & mark_actions_mask) != 0) 1597 goto fail_actions_overlap; 1598 1599 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) { 1600 rte_flow_error_set(error, ENOTSUP, 1601 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1602 "FLAG action is not supported on the current Rx datapath"); 1603 return -rte_errno; 1604 } 1605 1606 flow->spec.template.efs_flags |= 1607 EFX_FILTER_FLAG_ACTION_FLAG; 1608 break; 1609 1610 case RTE_FLOW_ACTION_TYPE_MARK: 1611 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK, 1612 actions_set); 1613 if ((actions_set & mark_actions_mask) != 0) 1614 goto fail_actions_overlap; 1615 1616 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) { 1617 rte_flow_error_set(error, ENOTSUP, 1618 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1619 "MARK action is not supported on the current Rx datapath"); 1620 return -rte_errno; 1621 } 1622 1623 rc = sfc_flow_parse_mark(sa, actions->conf, flow); 1624 if (rc != 0) { 1625 rte_flow_error_set(error, rc, 1626 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1627 "Bad MARK action"); 1628 return -rte_errno; 1629 } 1630 break; 1631 1632 default: 1633 rte_flow_error_set(error, ENOTSUP, 1634 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1635 "Action is not supported"); 1636 return -rte_errno; 1637 } 1638 1639 actions_set |= (1UL << actions->type); 1640 } 1641 #undef SFC_BUILD_SET_OVERFLOW 1642 1643 /* When fate is unknown, drop traffic. */ 1644 if ((actions_set & fate_actions_mask) == 0) { 1645 flow->spec.template.efs_dmaq_id = 1646 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; 1647 } 1648 1649 return 0; 1650 1651 fail_fate_actions: 1652 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, 1653 "Cannot combine several fate-deciding actions, " 1654 "choose between QUEUE, RSS or DROP"); 1655 return -rte_errno; 1656 1657 fail_actions_overlap: 1658 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, 1659 "Overlapping actions are not supported"); 1660 return -rte_errno; 1661 } 1662 1663 /** 1664 * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST 1665 * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same 1666 * specifications after copying. 1667 * 1668 * @param spec[in, out] 1669 * SFC flow specification to update. 1670 * @param filters_count_for_one_val[in] 1671 * How many specifications should have the same match flag, what is the 1672 * number of specifications before copying. 1673 * @param error[out] 1674 * Perform verbose error reporting if not NULL. 1675 */ 1676 static int 1677 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec, 1678 unsigned int filters_count_for_one_val, 1679 struct rte_flow_error *error) 1680 { 1681 unsigned int i; 1682 static const efx_filter_match_flags_t vals[] = { 1683 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, 1684 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST 1685 }; 1686 1687 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) { 1688 rte_flow_error_set(error, EINVAL, 1689 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1690 "Number of specifications is incorrect while copying " 1691 "by unknown destination flags"); 1692 return -rte_errno; 1693 } 1694 1695 for (i = 0; i < spec->count; i++) { 1696 /* The check above ensures that divisor can't be zero here */ 1697 spec->filters[i].efs_match_flags |= 1698 vals[i / filters_count_for_one_val]; 1699 } 1700 1701 return 0; 1702 } 1703 1704 /** 1705 * Check that the following conditions are met: 1706 * - the list of supported filters has a filter 1707 * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of 1708 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also 1709 * be inserted. 1710 * 1711 * @param match[in] 1712 * The match flags of filter. 1713 * @param spec[in] 1714 * Specification to be supplemented. 1715 * @param filter[in] 1716 * SFC filter with list of supported filters. 1717 */ 1718 static boolean_t 1719 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match, 1720 __rte_unused efx_filter_spec_t *spec, 1721 struct sfc_filter *filter) 1722 { 1723 unsigned int i; 1724 efx_filter_match_flags_t match_mcast_dst; 1725 1726 match_mcast_dst = 1727 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) | 1728 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; 1729 for (i = 0; i < filter->supported_match_num; i++) { 1730 if (match_mcast_dst == filter->supported_match[i]) 1731 return B_TRUE; 1732 } 1733 1734 return B_FALSE; 1735 } 1736 1737 /** 1738 * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and 1739 * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same 1740 * specifications after copying. 1741 * 1742 * @param spec[in, out] 1743 * SFC flow specification to update. 1744 * @param filters_count_for_one_val[in] 1745 * How many specifications should have the same EtherType value, what is the 1746 * number of specifications before copying. 1747 * @param error[out] 1748 * Perform verbose error reporting if not NULL. 1749 */ 1750 static int 1751 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec, 1752 unsigned int filters_count_for_one_val, 1753 struct rte_flow_error *error) 1754 { 1755 unsigned int i; 1756 static const uint16_t vals[] = { 1757 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6 1758 }; 1759 1760 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) { 1761 rte_flow_error_set(error, EINVAL, 1762 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1763 "Number of specifications is incorrect " 1764 "while copying by Ethertype"); 1765 return -rte_errno; 1766 } 1767 1768 for (i = 0; i < spec->count; i++) { 1769 spec->filters[i].efs_match_flags |= 1770 EFX_FILTER_MATCH_ETHER_TYPE; 1771 1772 /* 1773 * The check above ensures that 1774 * filters_count_for_one_val is not 0 1775 */ 1776 spec->filters[i].efs_ether_type = 1777 vals[i / filters_count_for_one_val]; 1778 } 1779 1780 return 0; 1781 } 1782 1783 /** 1784 * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and 1785 * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same 1786 * specifications after copying. 1787 * 1788 * @param spec[in, out] 1789 * SFC flow specification to update. 1790 * @param filters_count_for_one_val[in] 1791 * How many specifications should have the same match flag, what is the 1792 * number of specifications before copying. 1793 * @param error[out] 1794 * Perform verbose error reporting if not NULL. 1795 */ 1796 static int 1797 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec, 1798 unsigned int filters_count_for_one_val, 1799 struct rte_flow_error *error) 1800 { 1801 unsigned int i; 1802 static const efx_filter_match_flags_t vals[] = { 1803 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, 1804 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST 1805 }; 1806 1807 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) { 1808 rte_flow_error_set(error, EINVAL, 1809 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1810 "Number of specifications is incorrect while copying " 1811 "by inner frame unknown destination flags"); 1812 return -rte_errno; 1813 } 1814 1815 for (i = 0; i < spec->count; i++) { 1816 /* The check above ensures that divisor can't be zero here */ 1817 spec->filters[i].efs_match_flags |= 1818 vals[i / filters_count_for_one_val]; 1819 } 1820 1821 return 0; 1822 } 1823 1824 /** 1825 * Check that the following conditions are met: 1826 * - the specification corresponds to a filter for encapsulated traffic 1827 * - the list of supported filters has a filter 1828 * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of 1829 * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also 1830 * be inserted. 1831 * 1832 * @param match[in] 1833 * The match flags of filter. 1834 * @param spec[in] 1835 * Specification to be supplemented. 1836 * @param filter[in] 1837 * SFC filter with list of supported filters. 1838 */ 1839 static boolean_t 1840 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match, 1841 efx_filter_spec_t *spec, 1842 struct sfc_filter *filter) 1843 { 1844 unsigned int i; 1845 efx_tunnel_protocol_t encap_type = spec->efs_encap_type; 1846 efx_filter_match_flags_t match_mcast_dst; 1847 1848 if (encap_type == EFX_TUNNEL_PROTOCOL_NONE) 1849 return B_FALSE; 1850 1851 match_mcast_dst = 1852 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) | 1853 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST; 1854 for (i = 0; i < filter->supported_match_num; i++) { 1855 if (match_mcast_dst == filter->supported_match[i]) 1856 return B_TRUE; 1857 } 1858 1859 return B_FALSE; 1860 } 1861 1862 /* 1863 * Match flags that can be automatically added to filters. 1864 * Selecting the last minimum when searching for the copy flag ensures that the 1865 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than 1866 * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter 1867 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported 1868 * filters. 1869 */ 1870 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = { 1871 { 1872 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, 1873 .vals_count = 2, 1874 .set_vals = sfc_flow_set_unknown_dst_flags, 1875 .spec_check = sfc_flow_check_unknown_dst_flags, 1876 }, 1877 { 1878 .flag = EFX_FILTER_MATCH_ETHER_TYPE, 1879 .vals_count = 2, 1880 .set_vals = sfc_flow_set_ethertypes, 1881 .spec_check = NULL, 1882 }, 1883 { 1884 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, 1885 .vals_count = 2, 1886 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags, 1887 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags, 1888 }, 1889 }; 1890 1891 /* Get item from array sfc_flow_copy_flags */ 1892 static const struct sfc_flow_copy_flag * 1893 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag) 1894 { 1895 unsigned int i; 1896 1897 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 1898 if (sfc_flow_copy_flags[i].flag == flag) 1899 return &sfc_flow_copy_flags[i]; 1900 } 1901 1902 return NULL; 1903 } 1904 1905 /** 1906 * Make copies of the specifications, set match flag and values 1907 * of the field that corresponds to it. 1908 * 1909 * @param spec[in, out] 1910 * SFC flow specification to update. 1911 * @param flag[in] 1912 * The match flag to add. 1913 * @param error[out] 1914 * Perform verbose error reporting if not NULL. 1915 */ 1916 static int 1917 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec, 1918 efx_filter_match_flags_t flag, 1919 struct rte_flow_error *error) 1920 { 1921 unsigned int i; 1922 unsigned int new_filters_count; 1923 unsigned int filters_count_for_one_val; 1924 const struct sfc_flow_copy_flag *copy_flag; 1925 int rc; 1926 1927 copy_flag = sfc_flow_get_copy_flag(flag); 1928 if (copy_flag == NULL) { 1929 rte_flow_error_set(error, ENOTSUP, 1930 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1931 "Unsupported spec field for copying"); 1932 return -rte_errno; 1933 } 1934 1935 new_filters_count = spec->count * copy_flag->vals_count; 1936 if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) { 1937 rte_flow_error_set(error, EINVAL, 1938 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1939 "Too much EFX specifications in the flow rule"); 1940 return -rte_errno; 1941 } 1942 1943 /* Copy filters specifications */ 1944 for (i = spec->count; i < new_filters_count; i++) 1945 spec->filters[i] = spec->filters[i - spec->count]; 1946 1947 filters_count_for_one_val = spec->count; 1948 spec->count = new_filters_count; 1949 1950 rc = copy_flag->set_vals(spec, filters_count_for_one_val, error); 1951 if (rc != 0) 1952 return rc; 1953 1954 return 0; 1955 } 1956 1957 /** 1958 * Check that the given set of match flags missing in the original filter spec 1959 * could be covered by adding spec copies which specify the corresponding 1960 * flags and packet field values to match. 1961 * 1962 * @param miss_flags[in] 1963 * Flags that are missing until the supported filter. 1964 * @param spec[in] 1965 * Specification to be supplemented. 1966 * @param filter[in] 1967 * SFC filter. 1968 * 1969 * @return 1970 * Number of specifications after copy or 0, if the flags can not be added. 1971 */ 1972 static unsigned int 1973 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags, 1974 efx_filter_spec_t *spec, 1975 struct sfc_filter *filter) 1976 { 1977 unsigned int i; 1978 efx_filter_match_flags_t copy_flags = 0; 1979 efx_filter_match_flags_t flag; 1980 efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags; 1981 sfc_flow_spec_check *check; 1982 unsigned int multiplier = 1; 1983 1984 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 1985 flag = sfc_flow_copy_flags[i].flag; 1986 check = sfc_flow_copy_flags[i].spec_check; 1987 if ((flag & miss_flags) == flag) { 1988 if (check != NULL && (!check(match, spec, filter))) 1989 continue; 1990 1991 copy_flags |= flag; 1992 multiplier *= sfc_flow_copy_flags[i].vals_count; 1993 } 1994 } 1995 1996 if (copy_flags == miss_flags) 1997 return multiplier; 1998 1999 return 0; 2000 } 2001 2002 /** 2003 * Attempt to supplement the specification template to the minimally 2004 * supported set of match flags. To do this, it is necessary to copy 2005 * the specifications, filling them with the values of fields that 2006 * correspond to the missing flags. 2007 * The necessary and sufficient filter is built from the fewest number 2008 * of copies which could be made to cover the minimally required set 2009 * of flags. 2010 * 2011 * @param sa[in] 2012 * SFC adapter. 2013 * @param spec[in, out] 2014 * SFC flow specification to update. 2015 * @param error[out] 2016 * Perform verbose error reporting if not NULL. 2017 */ 2018 static int 2019 sfc_flow_spec_filters_complete(struct sfc_adapter *sa, 2020 struct sfc_flow_spec *spec, 2021 struct rte_flow_error *error) 2022 { 2023 struct sfc_filter *filter = &sa->filter; 2024 efx_filter_match_flags_t miss_flags; 2025 efx_filter_match_flags_t min_miss_flags = 0; 2026 efx_filter_match_flags_t match; 2027 unsigned int min_multiplier = UINT_MAX; 2028 unsigned int multiplier; 2029 unsigned int i; 2030 int rc; 2031 2032 match = spec->template.efs_match_flags; 2033 for (i = 0; i < filter->supported_match_num; i++) { 2034 if ((match & filter->supported_match[i]) == match) { 2035 miss_flags = filter->supported_match[i] & (~match); 2036 multiplier = sfc_flow_check_missing_flags(miss_flags, 2037 &spec->template, filter); 2038 if (multiplier > 0) { 2039 if (multiplier <= min_multiplier) { 2040 min_multiplier = multiplier; 2041 min_miss_flags = miss_flags; 2042 } 2043 } 2044 } 2045 } 2046 2047 if (min_multiplier == UINT_MAX) { 2048 rte_flow_error_set(error, ENOTSUP, 2049 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2050 "The flow rule pattern is unsupported"); 2051 return -rte_errno; 2052 } 2053 2054 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 2055 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag; 2056 2057 if ((flag & min_miss_flags) == flag) { 2058 rc = sfc_flow_spec_add_match_flag(spec, flag, error); 2059 if (rc != 0) 2060 return rc; 2061 } 2062 } 2063 2064 return 0; 2065 } 2066 2067 /** 2068 * Check that set of match flags is referred to by a filter. Filter is 2069 * described by match flags with the ability to add OUTER_VID and INNER_VID 2070 * flags. 2071 * 2072 * @param match_flags[in] 2073 * Set of match flags. 2074 * @param flags_pattern[in] 2075 * Pattern of filter match flags. 2076 */ 2077 static boolean_t 2078 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags, 2079 efx_filter_match_flags_t flags_pattern) 2080 { 2081 if ((match_flags & flags_pattern) != flags_pattern) 2082 return B_FALSE; 2083 2084 switch (match_flags & ~flags_pattern) { 2085 case 0: 2086 case EFX_FILTER_MATCH_OUTER_VID: 2087 case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID: 2088 return B_TRUE; 2089 default: 2090 return B_FALSE; 2091 } 2092 } 2093 2094 /** 2095 * Check whether the spec maps to a hardware filter which is known to be 2096 * ineffective despite being valid. 2097 * 2098 * @param spec[in] 2099 * SFC flow specification. 2100 */ 2101 static boolean_t 2102 sfc_flow_is_match_flags_exception(struct sfc_flow_spec *spec) 2103 { 2104 unsigned int i; 2105 uint16_t ether_type; 2106 uint8_t ip_proto; 2107 efx_filter_match_flags_t match_flags; 2108 2109 for (i = 0; i < spec->count; i++) { 2110 match_flags = spec->filters[i].efs_match_flags; 2111 2112 if (sfc_flow_is_match_with_vids(match_flags, 2113 EFX_FILTER_MATCH_ETHER_TYPE) || 2114 sfc_flow_is_match_with_vids(match_flags, 2115 EFX_FILTER_MATCH_ETHER_TYPE | 2116 EFX_FILTER_MATCH_LOC_MAC)) { 2117 ether_type = spec->filters[i].efs_ether_type; 2118 if (ether_type == EFX_ETHER_TYPE_IPV4 || 2119 ether_type == EFX_ETHER_TYPE_IPV6) 2120 return B_TRUE; 2121 } else if (sfc_flow_is_match_with_vids(match_flags, 2122 EFX_FILTER_MATCH_ETHER_TYPE | 2123 EFX_FILTER_MATCH_IP_PROTO) || 2124 sfc_flow_is_match_with_vids(match_flags, 2125 EFX_FILTER_MATCH_ETHER_TYPE | 2126 EFX_FILTER_MATCH_IP_PROTO | 2127 EFX_FILTER_MATCH_LOC_MAC)) { 2128 ip_proto = spec->filters[i].efs_ip_proto; 2129 if (ip_proto == EFX_IPPROTO_TCP || 2130 ip_proto == EFX_IPPROTO_UDP) 2131 return B_TRUE; 2132 } 2133 } 2134 2135 return B_FALSE; 2136 } 2137 2138 static int 2139 sfc_flow_validate_match_flags(struct sfc_adapter *sa, 2140 struct rte_flow *flow, 2141 struct rte_flow_error *error) 2142 { 2143 efx_filter_spec_t *spec_tmpl = &flow->spec.template; 2144 efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags; 2145 int rc; 2146 2147 /* Initialize the first filter spec with template */ 2148 flow->spec.filters[0] = *spec_tmpl; 2149 flow->spec.count = 1; 2150 2151 if (!sfc_filter_is_match_supported(sa, match_flags)) { 2152 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error); 2153 if (rc != 0) 2154 return rc; 2155 } 2156 2157 if (sfc_flow_is_match_flags_exception(&flow->spec)) { 2158 rte_flow_error_set(error, ENOTSUP, 2159 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2160 "The flow rule pattern is unsupported"); 2161 return -rte_errno; 2162 } 2163 2164 return 0; 2165 } 2166 2167 static int 2168 sfc_flow_parse(struct rte_eth_dev *dev, 2169 const struct rte_flow_attr *attr, 2170 const struct rte_flow_item pattern[], 2171 const struct rte_flow_action actions[], 2172 struct rte_flow *flow, 2173 struct rte_flow_error *error) 2174 { 2175 struct sfc_adapter *sa = dev->data->dev_private; 2176 int rc; 2177 2178 rc = sfc_flow_parse_attr(attr, flow, error); 2179 if (rc != 0) 2180 goto fail_bad_value; 2181 2182 rc = sfc_flow_parse_pattern(pattern, flow, error); 2183 if (rc != 0) 2184 goto fail_bad_value; 2185 2186 rc = sfc_flow_parse_actions(sa, actions, flow, error); 2187 if (rc != 0) 2188 goto fail_bad_value; 2189 2190 rc = sfc_flow_validate_match_flags(sa, flow, error); 2191 if (rc != 0) 2192 goto fail_bad_value; 2193 2194 return 0; 2195 2196 fail_bad_value: 2197 return rc; 2198 } 2199 2200 static int 2201 sfc_flow_validate(struct rte_eth_dev *dev, 2202 const struct rte_flow_attr *attr, 2203 const struct rte_flow_item pattern[], 2204 const struct rte_flow_action actions[], 2205 struct rte_flow_error *error) 2206 { 2207 struct rte_flow flow; 2208 2209 memset(&flow, 0, sizeof(flow)); 2210 2211 return sfc_flow_parse(dev, attr, pattern, actions, &flow, error); 2212 } 2213 2214 static struct rte_flow * 2215 sfc_flow_create(struct rte_eth_dev *dev, 2216 const struct rte_flow_attr *attr, 2217 const struct rte_flow_item pattern[], 2218 const struct rte_flow_action actions[], 2219 struct rte_flow_error *error) 2220 { 2221 struct sfc_adapter *sa = dev->data->dev_private; 2222 struct rte_flow *flow = NULL; 2223 int rc; 2224 2225 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0); 2226 if (flow == NULL) { 2227 rte_flow_error_set(error, ENOMEM, 2228 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2229 "Failed to allocate memory"); 2230 goto fail_no_mem; 2231 } 2232 2233 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error); 2234 if (rc != 0) 2235 goto fail_bad_value; 2236 2237 TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries); 2238 2239 sfc_adapter_lock(sa); 2240 2241 if (sa->state == SFC_ADAPTER_STARTED) { 2242 rc = sfc_flow_filter_insert(sa, flow); 2243 if (rc != 0) { 2244 rte_flow_error_set(error, rc, 2245 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2246 "Failed to insert filter"); 2247 goto fail_filter_insert; 2248 } 2249 } 2250 2251 sfc_adapter_unlock(sa); 2252 2253 return flow; 2254 2255 fail_filter_insert: 2256 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries); 2257 2258 fail_bad_value: 2259 rte_free(flow); 2260 sfc_adapter_unlock(sa); 2261 2262 fail_no_mem: 2263 return NULL; 2264 } 2265 2266 static int 2267 sfc_flow_remove(struct sfc_adapter *sa, 2268 struct rte_flow *flow, 2269 struct rte_flow_error *error) 2270 { 2271 int rc = 0; 2272 2273 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2274 2275 if (sa->state == SFC_ADAPTER_STARTED) { 2276 rc = sfc_flow_filter_remove(sa, flow); 2277 if (rc != 0) 2278 rte_flow_error_set(error, rc, 2279 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2280 "Failed to destroy flow rule"); 2281 } 2282 2283 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries); 2284 rte_free(flow); 2285 2286 return rc; 2287 } 2288 2289 static int 2290 sfc_flow_destroy(struct rte_eth_dev *dev, 2291 struct rte_flow *flow, 2292 struct rte_flow_error *error) 2293 { 2294 struct sfc_adapter *sa = dev->data->dev_private; 2295 struct rte_flow *flow_ptr; 2296 int rc = EINVAL; 2297 2298 sfc_adapter_lock(sa); 2299 2300 TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) { 2301 if (flow_ptr == flow) 2302 rc = 0; 2303 } 2304 if (rc != 0) { 2305 rte_flow_error_set(error, rc, 2306 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2307 "Failed to find flow rule to destroy"); 2308 goto fail_bad_value; 2309 } 2310 2311 rc = sfc_flow_remove(sa, flow, error); 2312 2313 fail_bad_value: 2314 sfc_adapter_unlock(sa); 2315 2316 return -rc; 2317 } 2318 2319 static int 2320 sfc_flow_flush(struct rte_eth_dev *dev, 2321 struct rte_flow_error *error) 2322 { 2323 struct sfc_adapter *sa = dev->data->dev_private; 2324 struct rte_flow *flow; 2325 int rc = 0; 2326 int ret = 0; 2327 2328 sfc_adapter_lock(sa); 2329 2330 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) { 2331 rc = sfc_flow_remove(sa, flow, error); 2332 if (rc != 0) 2333 ret = rc; 2334 } 2335 2336 sfc_adapter_unlock(sa); 2337 2338 return -ret; 2339 } 2340 2341 static int 2342 sfc_flow_isolate(struct rte_eth_dev *dev, int enable, 2343 struct rte_flow_error *error) 2344 { 2345 struct sfc_adapter *sa = dev->data->dev_private; 2346 struct sfc_port *port = &sa->port; 2347 int ret = 0; 2348 2349 sfc_adapter_lock(sa); 2350 if (sa->state != SFC_ADAPTER_INITIALIZED) { 2351 rte_flow_error_set(error, EBUSY, 2352 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2353 NULL, "please close the port first"); 2354 ret = -rte_errno; 2355 } else { 2356 port->isolated = (enable) ? B_TRUE : B_FALSE; 2357 } 2358 sfc_adapter_unlock(sa); 2359 2360 return ret; 2361 } 2362 2363 const struct rte_flow_ops sfc_flow_ops = { 2364 .validate = sfc_flow_validate, 2365 .create = sfc_flow_create, 2366 .destroy = sfc_flow_destroy, 2367 .flush = sfc_flow_flush, 2368 .query = NULL, 2369 .isolate = sfc_flow_isolate, 2370 }; 2371 2372 void 2373 sfc_flow_init(struct sfc_adapter *sa) 2374 { 2375 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2376 2377 TAILQ_INIT(&sa->filter.flow_list); 2378 } 2379 2380 void 2381 sfc_flow_fini(struct sfc_adapter *sa) 2382 { 2383 struct rte_flow *flow; 2384 2385 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2386 2387 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) { 2388 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries); 2389 rte_free(flow); 2390 } 2391 } 2392 2393 void 2394 sfc_flow_stop(struct sfc_adapter *sa) 2395 { 2396 struct rte_flow *flow; 2397 2398 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2399 2400 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) 2401 sfc_flow_filter_remove(sa, flow); 2402 } 2403 2404 int 2405 sfc_flow_start(struct sfc_adapter *sa) 2406 { 2407 struct rte_flow *flow; 2408 int rc = 0; 2409 2410 sfc_log_init(sa, "entry"); 2411 2412 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2413 2414 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) { 2415 rc = sfc_flow_filter_insert(sa, flow); 2416 if (rc != 0) 2417 goto fail_bad_flow; 2418 } 2419 2420 sfc_log_init(sa, "done"); 2421 2422 fail_bad_flow: 2423 return rc; 2424 } 2425