1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2017-2018 Solarflare Communications Inc. 4 * All rights reserved. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #include <rte_byteorder.h> 11 #include <rte_tailq.h> 12 #include <rte_common.h> 13 #include <rte_ethdev_driver.h> 14 #include <rte_eth_ctrl.h> 15 #include <rte_ether.h> 16 #include <rte_flow.h> 17 #include <rte_flow_driver.h> 18 19 #include "efx.h" 20 21 #include "sfc.h" 22 #include "sfc_rx.h" 23 #include "sfc_filter.h" 24 #include "sfc_flow.h" 25 #include "sfc_log.h" 26 27 /* 28 * At now flow API is implemented in such a manner that each 29 * flow rule is converted to one or more hardware filters. 30 * All elements of flow rule (attributes, pattern items, actions) 31 * correspond to one or more fields in the efx_filter_spec_s structure 32 * that is responsible for the hardware filter. 33 * If some required field is unset in the flow rule, then a handful 34 * of filter copies will be created to cover all possible values 35 * of such a field. 36 */ 37 38 enum sfc_flow_item_layers { 39 SFC_FLOW_ITEM_ANY_LAYER, 40 SFC_FLOW_ITEM_START_LAYER, 41 SFC_FLOW_ITEM_L2, 42 SFC_FLOW_ITEM_L3, 43 SFC_FLOW_ITEM_L4, 44 }; 45 46 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item, 47 efx_filter_spec_t *spec, 48 struct rte_flow_error *error); 49 50 struct sfc_flow_item { 51 enum rte_flow_item_type type; /* Type of item */ 52 enum sfc_flow_item_layers layer; /* Layer of item */ 53 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */ 54 sfc_flow_item_parse *parse; /* Parsing function */ 55 }; 56 57 static sfc_flow_item_parse sfc_flow_parse_void; 58 static sfc_flow_item_parse sfc_flow_parse_eth; 59 static sfc_flow_item_parse sfc_flow_parse_vlan; 60 static sfc_flow_item_parse sfc_flow_parse_ipv4; 61 static sfc_flow_item_parse sfc_flow_parse_ipv6; 62 static sfc_flow_item_parse sfc_flow_parse_tcp; 63 static sfc_flow_item_parse sfc_flow_parse_udp; 64 static sfc_flow_item_parse sfc_flow_parse_vxlan; 65 static sfc_flow_item_parse sfc_flow_parse_geneve; 66 static sfc_flow_item_parse sfc_flow_parse_nvgre; 67 68 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec, 69 unsigned int filters_count_for_one_val, 70 struct rte_flow_error *error); 71 72 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match, 73 efx_filter_spec_t *spec, 74 struct sfc_filter *filter); 75 76 struct sfc_flow_copy_flag { 77 /* EFX filter specification match flag */ 78 efx_filter_match_flags_t flag; 79 /* Number of values of corresponding field */ 80 unsigned int vals_count; 81 /* Function to set values in specifications */ 82 sfc_flow_spec_set_vals *set_vals; 83 /* 84 * Function to check that the specification is suitable 85 * for adding this match flag 86 */ 87 sfc_flow_spec_check *spec_check; 88 }; 89 90 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags; 91 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags; 92 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes; 93 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags; 94 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags; 95 96 static boolean_t 97 sfc_flow_is_zero(const uint8_t *buf, unsigned int size) 98 { 99 uint8_t sum = 0; 100 unsigned int i; 101 102 for (i = 0; i < size; i++) 103 sum |= buf[i]; 104 105 return (sum == 0) ? B_TRUE : B_FALSE; 106 } 107 108 /* 109 * Validate item and prepare structures spec and mask for parsing 110 */ 111 static int 112 sfc_flow_parse_init(const struct rte_flow_item *item, 113 const void **spec_ptr, 114 const void **mask_ptr, 115 const void *supp_mask, 116 const void *def_mask, 117 unsigned int size, 118 struct rte_flow_error *error) 119 { 120 const uint8_t *spec; 121 const uint8_t *mask; 122 const uint8_t *last; 123 uint8_t match; 124 uint8_t supp; 125 unsigned int i; 126 127 if (item == NULL) { 128 rte_flow_error_set(error, EINVAL, 129 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 130 "NULL item"); 131 return -rte_errno; 132 } 133 134 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) { 135 rte_flow_error_set(error, EINVAL, 136 RTE_FLOW_ERROR_TYPE_ITEM, item, 137 "Mask or last is set without spec"); 138 return -rte_errno; 139 } 140 141 /* 142 * If "mask" is not set, default mask is used, 143 * but if default mask is NULL, "mask" should be set 144 */ 145 if (item->mask == NULL) { 146 if (def_mask == NULL) { 147 rte_flow_error_set(error, EINVAL, 148 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 149 "Mask should be specified"); 150 return -rte_errno; 151 } 152 153 mask = def_mask; 154 } else { 155 mask = item->mask; 156 } 157 158 spec = item->spec; 159 last = item->last; 160 161 if (spec == NULL) 162 goto exit; 163 164 /* 165 * If field values in "last" are either 0 or equal to the corresponding 166 * values in "spec" then they are ignored 167 */ 168 if (last != NULL && 169 !sfc_flow_is_zero(last, size) && 170 memcmp(last, spec, size) != 0) { 171 rte_flow_error_set(error, ENOTSUP, 172 RTE_FLOW_ERROR_TYPE_ITEM, item, 173 "Ranging is not supported"); 174 return -rte_errno; 175 } 176 177 if (supp_mask == NULL) { 178 rte_flow_error_set(error, EINVAL, 179 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 180 "Supported mask for item should be specified"); 181 return -rte_errno; 182 } 183 184 /* Check that mask and spec not asks for more match than supp_mask */ 185 for (i = 0; i < size; i++) { 186 match = spec[i] | mask[i]; 187 supp = ((const uint8_t *)supp_mask)[i]; 188 189 if ((match | supp) != supp) { 190 rte_flow_error_set(error, ENOTSUP, 191 RTE_FLOW_ERROR_TYPE_ITEM, item, 192 "Item's field is not supported"); 193 return -rte_errno; 194 } 195 } 196 197 exit: 198 *spec_ptr = spec; 199 *mask_ptr = mask; 200 return 0; 201 } 202 203 /* 204 * Protocol parsers. 205 * Masking is not supported, so masks in items should be either 206 * full or empty (zeroed) and set only for supported fields which 207 * are specified in the supp_mask. 208 */ 209 210 static int 211 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item, 212 __rte_unused efx_filter_spec_t *efx_spec, 213 __rte_unused struct rte_flow_error *error) 214 { 215 return 0; 216 } 217 218 /** 219 * Convert Ethernet item to EFX filter specification. 220 * 221 * @param item[in] 222 * Item specification. Outer frame specification may only comprise 223 * source/destination addresses and Ethertype field. 224 * Inner frame specification may contain destination address only. 225 * There is support for individual/group mask as well as for empty and full. 226 * If the mask is NULL, default mask will be used. Ranging is not supported. 227 * @param efx_spec[in, out] 228 * EFX filter specification to update. 229 * @param[out] error 230 * Perform verbose error reporting if not NULL. 231 */ 232 static int 233 sfc_flow_parse_eth(const struct rte_flow_item *item, 234 efx_filter_spec_t *efx_spec, 235 struct rte_flow_error *error) 236 { 237 int rc; 238 const struct rte_flow_item_eth *spec = NULL; 239 const struct rte_flow_item_eth *mask = NULL; 240 const struct rte_flow_item_eth supp_mask = { 241 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 242 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 243 .type = 0xffff, 244 }; 245 const struct rte_flow_item_eth ifrm_supp_mask = { 246 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 247 }; 248 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = { 249 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 250 }; 251 const struct rte_flow_item_eth *supp_mask_p; 252 const struct rte_flow_item_eth *def_mask_p; 253 uint8_t *loc_mac = NULL; 254 boolean_t is_ifrm = (efx_spec->efs_encap_type != 255 EFX_TUNNEL_PROTOCOL_NONE); 256 257 if (is_ifrm) { 258 supp_mask_p = &ifrm_supp_mask; 259 def_mask_p = &ifrm_supp_mask; 260 loc_mac = efx_spec->efs_ifrm_loc_mac; 261 } else { 262 supp_mask_p = &supp_mask; 263 def_mask_p = &rte_flow_item_eth_mask; 264 loc_mac = efx_spec->efs_loc_mac; 265 } 266 267 rc = sfc_flow_parse_init(item, 268 (const void **)&spec, 269 (const void **)&mask, 270 supp_mask_p, def_mask_p, 271 sizeof(struct rte_flow_item_eth), 272 error); 273 if (rc != 0) 274 return rc; 275 276 /* If "spec" is not set, could be any Ethernet */ 277 if (spec == NULL) 278 return 0; 279 280 if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) { 281 efx_spec->efs_match_flags |= is_ifrm ? 282 EFX_FILTER_MATCH_IFRM_LOC_MAC : 283 EFX_FILTER_MATCH_LOC_MAC; 284 rte_memcpy(loc_mac, spec->dst.addr_bytes, 285 EFX_MAC_ADDR_LEN); 286 } else if (memcmp(mask->dst.addr_bytes, ig_mask, 287 EFX_MAC_ADDR_LEN) == 0) { 288 if (is_unicast_ether_addr(&spec->dst)) 289 efx_spec->efs_match_flags |= is_ifrm ? 290 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST : 291 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST; 292 else 293 efx_spec->efs_match_flags |= is_ifrm ? 294 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST : 295 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; 296 } else if (!is_zero_ether_addr(&mask->dst)) { 297 goto fail_bad_mask; 298 } 299 300 /* 301 * ifrm_supp_mask ensures that the source address and 302 * ethertype masks are equal to zero in inner frame, 303 * so these fields are filled in only for the outer frame 304 */ 305 if (is_same_ether_addr(&mask->src, &supp_mask.src)) { 306 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC; 307 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes, 308 EFX_MAC_ADDR_LEN); 309 } else if (!is_zero_ether_addr(&mask->src)) { 310 goto fail_bad_mask; 311 } 312 313 /* 314 * Ether type is in big-endian byte order in item and 315 * in little-endian in efx_spec, so byte swap is used 316 */ 317 if (mask->type == supp_mask.type) { 318 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 319 efx_spec->efs_ether_type = rte_bswap16(spec->type); 320 } else if (mask->type != 0) { 321 goto fail_bad_mask; 322 } 323 324 return 0; 325 326 fail_bad_mask: 327 rte_flow_error_set(error, EINVAL, 328 RTE_FLOW_ERROR_TYPE_ITEM, item, 329 "Bad mask in the ETH pattern item"); 330 return -rte_errno; 331 } 332 333 /** 334 * Convert VLAN item to EFX filter specification. 335 * 336 * @param item[in] 337 * Item specification. Only VID field is supported. 338 * The mask can not be NULL. Ranging is not supported. 339 * @param efx_spec[in, out] 340 * EFX filter specification to update. 341 * @param[out] error 342 * Perform verbose error reporting if not NULL. 343 */ 344 static int 345 sfc_flow_parse_vlan(const struct rte_flow_item *item, 346 efx_filter_spec_t *efx_spec, 347 struct rte_flow_error *error) 348 { 349 int rc; 350 uint16_t vid; 351 const struct rte_flow_item_vlan *spec = NULL; 352 const struct rte_flow_item_vlan *mask = NULL; 353 const struct rte_flow_item_vlan supp_mask = { 354 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX), 355 .inner_type = RTE_BE16(0xffff), 356 }; 357 358 rc = sfc_flow_parse_init(item, 359 (const void **)&spec, 360 (const void **)&mask, 361 &supp_mask, 362 NULL, 363 sizeof(struct rte_flow_item_vlan), 364 error); 365 if (rc != 0) 366 return rc; 367 368 /* 369 * VID is in big-endian byte order in item and 370 * in little-endian in efx_spec, so byte swap is used. 371 * If two VLAN items are included, the first matches 372 * the outer tag and the next matches the inner tag. 373 */ 374 if (mask->tci == supp_mask.tci) { 375 vid = rte_bswap16(spec->tci); 376 377 if (!(efx_spec->efs_match_flags & 378 EFX_FILTER_MATCH_OUTER_VID)) { 379 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID; 380 efx_spec->efs_outer_vid = vid; 381 } else if (!(efx_spec->efs_match_flags & 382 EFX_FILTER_MATCH_INNER_VID)) { 383 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID; 384 efx_spec->efs_inner_vid = vid; 385 } else { 386 rte_flow_error_set(error, EINVAL, 387 RTE_FLOW_ERROR_TYPE_ITEM, item, 388 "More than two VLAN items"); 389 return -rte_errno; 390 } 391 } else { 392 rte_flow_error_set(error, EINVAL, 393 RTE_FLOW_ERROR_TYPE_ITEM, item, 394 "VLAN ID in TCI match is required"); 395 return -rte_errno; 396 } 397 398 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { 399 rte_flow_error_set(error, EINVAL, 400 RTE_FLOW_ERROR_TYPE_ITEM, item, 401 "VLAN TPID matching is not supported"); 402 return -rte_errno; 403 } 404 if (mask->inner_type == supp_mask.inner_type) { 405 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 406 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type); 407 } else if (mask->inner_type) { 408 rte_flow_error_set(error, EINVAL, 409 RTE_FLOW_ERROR_TYPE_ITEM, item, 410 "Bad mask for VLAN inner_type"); 411 return -rte_errno; 412 } 413 414 return 0; 415 } 416 417 /** 418 * Convert IPv4 item to EFX filter specification. 419 * 420 * @param item[in] 421 * Item specification. Only source and destination addresses and 422 * protocol fields are supported. If the mask is NULL, default 423 * mask will be used. Ranging is not supported. 424 * @param efx_spec[in, out] 425 * EFX filter specification to update. 426 * @param[out] error 427 * Perform verbose error reporting if not NULL. 428 */ 429 static int 430 sfc_flow_parse_ipv4(const struct rte_flow_item *item, 431 efx_filter_spec_t *efx_spec, 432 struct rte_flow_error *error) 433 { 434 int rc; 435 const struct rte_flow_item_ipv4 *spec = NULL; 436 const struct rte_flow_item_ipv4 *mask = NULL; 437 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4); 438 const struct rte_flow_item_ipv4 supp_mask = { 439 .hdr = { 440 .src_addr = 0xffffffff, 441 .dst_addr = 0xffffffff, 442 .next_proto_id = 0xff, 443 } 444 }; 445 446 rc = sfc_flow_parse_init(item, 447 (const void **)&spec, 448 (const void **)&mask, 449 &supp_mask, 450 &rte_flow_item_ipv4_mask, 451 sizeof(struct rte_flow_item_ipv4), 452 error); 453 if (rc != 0) 454 return rc; 455 456 /* 457 * Filtering by IPv4 source and destination addresses requires 458 * the appropriate ETHER_TYPE in hardware filters 459 */ 460 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) { 461 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 462 efx_spec->efs_ether_type = ether_type_ipv4; 463 } else if (efx_spec->efs_ether_type != ether_type_ipv4) { 464 rte_flow_error_set(error, EINVAL, 465 RTE_FLOW_ERROR_TYPE_ITEM, item, 466 "Ethertype in pattern with IPV4 item should be appropriate"); 467 return -rte_errno; 468 } 469 470 if (spec == NULL) 471 return 0; 472 473 /* 474 * IPv4 addresses are in big-endian byte order in item and in 475 * efx_spec 476 */ 477 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) { 478 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST; 479 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr; 480 } else if (mask->hdr.src_addr != 0) { 481 goto fail_bad_mask; 482 } 483 484 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) { 485 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST; 486 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr; 487 } else if (mask->hdr.dst_addr != 0) { 488 goto fail_bad_mask; 489 } 490 491 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) { 492 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 493 efx_spec->efs_ip_proto = spec->hdr.next_proto_id; 494 } else if (mask->hdr.next_proto_id != 0) { 495 goto fail_bad_mask; 496 } 497 498 return 0; 499 500 fail_bad_mask: 501 rte_flow_error_set(error, EINVAL, 502 RTE_FLOW_ERROR_TYPE_ITEM, item, 503 "Bad mask in the IPV4 pattern item"); 504 return -rte_errno; 505 } 506 507 /** 508 * Convert IPv6 item to EFX filter specification. 509 * 510 * @param item[in] 511 * Item specification. Only source and destination addresses and 512 * next header fields are supported. If the mask is NULL, default 513 * mask will be used. Ranging is not supported. 514 * @param efx_spec[in, out] 515 * EFX filter specification to update. 516 * @param[out] error 517 * Perform verbose error reporting if not NULL. 518 */ 519 static int 520 sfc_flow_parse_ipv6(const struct rte_flow_item *item, 521 efx_filter_spec_t *efx_spec, 522 struct rte_flow_error *error) 523 { 524 int rc; 525 const struct rte_flow_item_ipv6 *spec = NULL; 526 const struct rte_flow_item_ipv6 *mask = NULL; 527 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6); 528 const struct rte_flow_item_ipv6 supp_mask = { 529 .hdr = { 530 .src_addr = { 0xff, 0xff, 0xff, 0xff, 531 0xff, 0xff, 0xff, 0xff, 532 0xff, 0xff, 0xff, 0xff, 533 0xff, 0xff, 0xff, 0xff }, 534 .dst_addr = { 0xff, 0xff, 0xff, 0xff, 535 0xff, 0xff, 0xff, 0xff, 536 0xff, 0xff, 0xff, 0xff, 537 0xff, 0xff, 0xff, 0xff }, 538 .proto = 0xff, 539 } 540 }; 541 542 rc = sfc_flow_parse_init(item, 543 (const void **)&spec, 544 (const void **)&mask, 545 &supp_mask, 546 &rte_flow_item_ipv6_mask, 547 sizeof(struct rte_flow_item_ipv6), 548 error); 549 if (rc != 0) 550 return rc; 551 552 /* 553 * Filtering by IPv6 source and destination addresses requires 554 * the appropriate ETHER_TYPE in hardware filters 555 */ 556 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) { 557 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 558 efx_spec->efs_ether_type = ether_type_ipv6; 559 } else if (efx_spec->efs_ether_type != ether_type_ipv6) { 560 rte_flow_error_set(error, EINVAL, 561 RTE_FLOW_ERROR_TYPE_ITEM, item, 562 "Ethertype in pattern with IPV6 item should be appropriate"); 563 return -rte_errno; 564 } 565 566 if (spec == NULL) 567 return 0; 568 569 /* 570 * IPv6 addresses are in big-endian byte order in item and in 571 * efx_spec 572 */ 573 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr, 574 sizeof(mask->hdr.src_addr)) == 0) { 575 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST; 576 577 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) != 578 sizeof(spec->hdr.src_addr)); 579 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr, 580 sizeof(efx_spec->efs_rem_host)); 581 } else if (!sfc_flow_is_zero(mask->hdr.src_addr, 582 sizeof(mask->hdr.src_addr))) { 583 goto fail_bad_mask; 584 } 585 586 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr, 587 sizeof(mask->hdr.dst_addr)) == 0) { 588 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST; 589 590 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) != 591 sizeof(spec->hdr.dst_addr)); 592 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr, 593 sizeof(efx_spec->efs_loc_host)); 594 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr, 595 sizeof(mask->hdr.dst_addr))) { 596 goto fail_bad_mask; 597 } 598 599 if (mask->hdr.proto == supp_mask.hdr.proto) { 600 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 601 efx_spec->efs_ip_proto = spec->hdr.proto; 602 } else if (mask->hdr.proto != 0) { 603 goto fail_bad_mask; 604 } 605 606 return 0; 607 608 fail_bad_mask: 609 rte_flow_error_set(error, EINVAL, 610 RTE_FLOW_ERROR_TYPE_ITEM, item, 611 "Bad mask in the IPV6 pattern item"); 612 return -rte_errno; 613 } 614 615 /** 616 * Convert TCP item to EFX filter specification. 617 * 618 * @param item[in] 619 * Item specification. Only source and destination ports fields 620 * are supported. If the mask is NULL, default mask will be used. 621 * Ranging is not supported. 622 * @param efx_spec[in, out] 623 * EFX filter specification to update. 624 * @param[out] error 625 * Perform verbose error reporting if not NULL. 626 */ 627 static int 628 sfc_flow_parse_tcp(const struct rte_flow_item *item, 629 efx_filter_spec_t *efx_spec, 630 struct rte_flow_error *error) 631 { 632 int rc; 633 const struct rte_flow_item_tcp *spec = NULL; 634 const struct rte_flow_item_tcp *mask = NULL; 635 const struct rte_flow_item_tcp supp_mask = { 636 .hdr = { 637 .src_port = 0xffff, 638 .dst_port = 0xffff, 639 } 640 }; 641 642 rc = sfc_flow_parse_init(item, 643 (const void **)&spec, 644 (const void **)&mask, 645 &supp_mask, 646 &rte_flow_item_tcp_mask, 647 sizeof(struct rte_flow_item_tcp), 648 error); 649 if (rc != 0) 650 return rc; 651 652 /* 653 * Filtering by TCP source and destination ports requires 654 * the appropriate IP_PROTO in hardware filters 655 */ 656 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 657 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 658 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP; 659 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) { 660 rte_flow_error_set(error, EINVAL, 661 RTE_FLOW_ERROR_TYPE_ITEM, item, 662 "IP proto in pattern with TCP item should be appropriate"); 663 return -rte_errno; 664 } 665 666 if (spec == NULL) 667 return 0; 668 669 /* 670 * Source and destination ports are in big-endian byte order in item and 671 * in little-endian in efx_spec, so byte swap is used 672 */ 673 if (mask->hdr.src_port == supp_mask.hdr.src_port) { 674 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT; 675 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port); 676 } else if (mask->hdr.src_port != 0) { 677 goto fail_bad_mask; 678 } 679 680 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) { 681 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT; 682 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port); 683 } else if (mask->hdr.dst_port != 0) { 684 goto fail_bad_mask; 685 } 686 687 return 0; 688 689 fail_bad_mask: 690 rte_flow_error_set(error, EINVAL, 691 RTE_FLOW_ERROR_TYPE_ITEM, item, 692 "Bad mask in the TCP pattern item"); 693 return -rte_errno; 694 } 695 696 /** 697 * Convert UDP item to EFX filter specification. 698 * 699 * @param item[in] 700 * Item specification. Only source and destination ports fields 701 * are supported. If the mask is NULL, default mask will be used. 702 * Ranging is not supported. 703 * @param efx_spec[in, out] 704 * EFX filter specification to update. 705 * @param[out] error 706 * Perform verbose error reporting if not NULL. 707 */ 708 static int 709 sfc_flow_parse_udp(const struct rte_flow_item *item, 710 efx_filter_spec_t *efx_spec, 711 struct rte_flow_error *error) 712 { 713 int rc; 714 const struct rte_flow_item_udp *spec = NULL; 715 const struct rte_flow_item_udp *mask = NULL; 716 const struct rte_flow_item_udp supp_mask = { 717 .hdr = { 718 .src_port = 0xffff, 719 .dst_port = 0xffff, 720 } 721 }; 722 723 rc = sfc_flow_parse_init(item, 724 (const void **)&spec, 725 (const void **)&mask, 726 &supp_mask, 727 &rte_flow_item_udp_mask, 728 sizeof(struct rte_flow_item_udp), 729 error); 730 if (rc != 0) 731 return rc; 732 733 /* 734 * Filtering by UDP source and destination ports requires 735 * the appropriate IP_PROTO in hardware filters 736 */ 737 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 738 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 739 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP; 740 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) { 741 rte_flow_error_set(error, EINVAL, 742 RTE_FLOW_ERROR_TYPE_ITEM, item, 743 "IP proto in pattern with UDP item should be appropriate"); 744 return -rte_errno; 745 } 746 747 if (spec == NULL) 748 return 0; 749 750 /* 751 * Source and destination ports are in big-endian byte order in item and 752 * in little-endian in efx_spec, so byte swap is used 753 */ 754 if (mask->hdr.src_port == supp_mask.hdr.src_port) { 755 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT; 756 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port); 757 } else if (mask->hdr.src_port != 0) { 758 goto fail_bad_mask; 759 } 760 761 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) { 762 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT; 763 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port); 764 } else if (mask->hdr.dst_port != 0) { 765 goto fail_bad_mask; 766 } 767 768 return 0; 769 770 fail_bad_mask: 771 rte_flow_error_set(error, EINVAL, 772 RTE_FLOW_ERROR_TYPE_ITEM, item, 773 "Bad mask in the UDP pattern item"); 774 return -rte_errno; 775 } 776 777 /* 778 * Filters for encapsulated packets match based on the EtherType and IP 779 * protocol in the outer frame. 780 */ 781 static int 782 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item, 783 efx_filter_spec_t *efx_spec, 784 uint8_t ip_proto, 785 struct rte_flow_error *error) 786 { 787 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 788 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 789 efx_spec->efs_ip_proto = ip_proto; 790 } else if (efx_spec->efs_ip_proto != ip_proto) { 791 switch (ip_proto) { 792 case EFX_IPPROTO_UDP: 793 rte_flow_error_set(error, EINVAL, 794 RTE_FLOW_ERROR_TYPE_ITEM, item, 795 "Outer IP header protocol must be UDP " 796 "in VxLAN/GENEVE pattern"); 797 return -rte_errno; 798 799 case EFX_IPPROTO_GRE: 800 rte_flow_error_set(error, EINVAL, 801 RTE_FLOW_ERROR_TYPE_ITEM, item, 802 "Outer IP header protocol must be GRE " 803 "in NVGRE pattern"); 804 return -rte_errno; 805 806 default: 807 rte_flow_error_set(error, EINVAL, 808 RTE_FLOW_ERROR_TYPE_ITEM, item, 809 "Only VxLAN/GENEVE/NVGRE tunneling patterns " 810 "are supported"); 811 return -rte_errno; 812 } 813 } 814 815 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE && 816 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 && 817 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) { 818 rte_flow_error_set(error, EINVAL, 819 RTE_FLOW_ERROR_TYPE_ITEM, item, 820 "Outer frame EtherType in pattern with tunneling " 821 "must be IPv4 or IPv6"); 822 return -rte_errno; 823 } 824 825 return 0; 826 } 827 828 static int 829 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec, 830 const uint8_t *vni_or_vsid_val, 831 const uint8_t *vni_or_vsid_mask, 832 const struct rte_flow_item *item, 833 struct rte_flow_error *error) 834 { 835 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = { 836 0xff, 0xff, 0xff 837 }; 838 839 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask, 840 EFX_VNI_OR_VSID_LEN) == 0) { 841 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID; 842 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val, 843 EFX_VNI_OR_VSID_LEN); 844 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) { 845 rte_flow_error_set(error, EINVAL, 846 RTE_FLOW_ERROR_TYPE_ITEM, item, 847 "Unsupported VNI/VSID mask"); 848 return -rte_errno; 849 } 850 851 return 0; 852 } 853 854 /** 855 * Convert VXLAN item to EFX filter specification. 856 * 857 * @param item[in] 858 * Item specification. Only VXLAN network identifier field is supported. 859 * If the mask is NULL, default mask will be used. 860 * Ranging is not supported. 861 * @param efx_spec[in, out] 862 * EFX filter specification to update. 863 * @param[out] error 864 * Perform verbose error reporting if not NULL. 865 */ 866 static int 867 sfc_flow_parse_vxlan(const struct rte_flow_item *item, 868 efx_filter_spec_t *efx_spec, 869 struct rte_flow_error *error) 870 { 871 int rc; 872 const struct rte_flow_item_vxlan *spec = NULL; 873 const struct rte_flow_item_vxlan *mask = NULL; 874 const struct rte_flow_item_vxlan supp_mask = { 875 .vni = { 0xff, 0xff, 0xff } 876 }; 877 878 rc = sfc_flow_parse_init(item, 879 (const void **)&spec, 880 (const void **)&mask, 881 &supp_mask, 882 &rte_flow_item_vxlan_mask, 883 sizeof(struct rte_flow_item_vxlan), 884 error); 885 if (rc != 0) 886 return rc; 887 888 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 889 EFX_IPPROTO_UDP, error); 890 if (rc != 0) 891 return rc; 892 893 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN; 894 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 895 896 if (spec == NULL) 897 return 0; 898 899 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, 900 mask->vni, item, error); 901 902 return rc; 903 } 904 905 /** 906 * Convert GENEVE item to EFX filter specification. 907 * 908 * @param item[in] 909 * Item specification. Only Virtual Network Identifier and protocol type 910 * fields are supported. But protocol type can be only Ethernet (0x6558). 911 * If the mask is NULL, default mask will be used. 912 * Ranging is not supported. 913 * @param efx_spec[in, out] 914 * EFX filter specification to update. 915 * @param[out] error 916 * Perform verbose error reporting if not NULL. 917 */ 918 static int 919 sfc_flow_parse_geneve(const struct rte_flow_item *item, 920 efx_filter_spec_t *efx_spec, 921 struct rte_flow_error *error) 922 { 923 int rc; 924 const struct rte_flow_item_geneve *spec = NULL; 925 const struct rte_flow_item_geneve *mask = NULL; 926 const struct rte_flow_item_geneve supp_mask = { 927 .protocol = RTE_BE16(0xffff), 928 .vni = { 0xff, 0xff, 0xff } 929 }; 930 931 rc = sfc_flow_parse_init(item, 932 (const void **)&spec, 933 (const void **)&mask, 934 &supp_mask, 935 &rte_flow_item_geneve_mask, 936 sizeof(struct rte_flow_item_geneve), 937 error); 938 if (rc != 0) 939 return rc; 940 941 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 942 EFX_IPPROTO_UDP, error); 943 if (rc != 0) 944 return rc; 945 946 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE; 947 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 948 949 if (spec == NULL) 950 return 0; 951 952 if (mask->protocol == supp_mask.protocol) { 953 if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) { 954 rte_flow_error_set(error, EINVAL, 955 RTE_FLOW_ERROR_TYPE_ITEM, item, 956 "GENEVE encap. protocol must be Ethernet " 957 "(0x6558) in the GENEVE pattern item"); 958 return -rte_errno; 959 } 960 } else if (mask->protocol != 0) { 961 rte_flow_error_set(error, EINVAL, 962 RTE_FLOW_ERROR_TYPE_ITEM, item, 963 "Unsupported mask for GENEVE encap. protocol"); 964 return -rte_errno; 965 } 966 967 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, 968 mask->vni, item, error); 969 970 return rc; 971 } 972 973 /** 974 * Convert NVGRE item to EFX filter specification. 975 * 976 * @param item[in] 977 * Item specification. Only virtual subnet ID field is supported. 978 * If the mask is NULL, default mask will be used. 979 * Ranging is not supported. 980 * @param efx_spec[in, out] 981 * EFX filter specification to update. 982 * @param[out] error 983 * Perform verbose error reporting if not NULL. 984 */ 985 static int 986 sfc_flow_parse_nvgre(const struct rte_flow_item *item, 987 efx_filter_spec_t *efx_spec, 988 struct rte_flow_error *error) 989 { 990 int rc; 991 const struct rte_flow_item_nvgre *spec = NULL; 992 const struct rte_flow_item_nvgre *mask = NULL; 993 const struct rte_flow_item_nvgre supp_mask = { 994 .tni = { 0xff, 0xff, 0xff } 995 }; 996 997 rc = sfc_flow_parse_init(item, 998 (const void **)&spec, 999 (const void **)&mask, 1000 &supp_mask, 1001 &rte_flow_item_nvgre_mask, 1002 sizeof(struct rte_flow_item_nvgre), 1003 error); 1004 if (rc != 0) 1005 return rc; 1006 1007 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 1008 EFX_IPPROTO_GRE, error); 1009 if (rc != 0) 1010 return rc; 1011 1012 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE; 1013 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 1014 1015 if (spec == NULL) 1016 return 0; 1017 1018 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni, 1019 mask->tni, item, error); 1020 1021 return rc; 1022 } 1023 1024 static const struct sfc_flow_item sfc_flow_items[] = { 1025 { 1026 .type = RTE_FLOW_ITEM_TYPE_VOID, 1027 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER, 1028 .layer = SFC_FLOW_ITEM_ANY_LAYER, 1029 .parse = sfc_flow_parse_void, 1030 }, 1031 { 1032 .type = RTE_FLOW_ITEM_TYPE_ETH, 1033 .prev_layer = SFC_FLOW_ITEM_START_LAYER, 1034 .layer = SFC_FLOW_ITEM_L2, 1035 .parse = sfc_flow_parse_eth, 1036 }, 1037 { 1038 .type = RTE_FLOW_ITEM_TYPE_VLAN, 1039 .prev_layer = SFC_FLOW_ITEM_L2, 1040 .layer = SFC_FLOW_ITEM_L2, 1041 .parse = sfc_flow_parse_vlan, 1042 }, 1043 { 1044 .type = RTE_FLOW_ITEM_TYPE_IPV4, 1045 .prev_layer = SFC_FLOW_ITEM_L2, 1046 .layer = SFC_FLOW_ITEM_L3, 1047 .parse = sfc_flow_parse_ipv4, 1048 }, 1049 { 1050 .type = RTE_FLOW_ITEM_TYPE_IPV6, 1051 .prev_layer = SFC_FLOW_ITEM_L2, 1052 .layer = SFC_FLOW_ITEM_L3, 1053 .parse = sfc_flow_parse_ipv6, 1054 }, 1055 { 1056 .type = RTE_FLOW_ITEM_TYPE_TCP, 1057 .prev_layer = SFC_FLOW_ITEM_L3, 1058 .layer = SFC_FLOW_ITEM_L4, 1059 .parse = sfc_flow_parse_tcp, 1060 }, 1061 { 1062 .type = RTE_FLOW_ITEM_TYPE_UDP, 1063 .prev_layer = SFC_FLOW_ITEM_L3, 1064 .layer = SFC_FLOW_ITEM_L4, 1065 .parse = sfc_flow_parse_udp, 1066 }, 1067 { 1068 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 1069 .prev_layer = SFC_FLOW_ITEM_L4, 1070 .layer = SFC_FLOW_ITEM_START_LAYER, 1071 .parse = sfc_flow_parse_vxlan, 1072 }, 1073 { 1074 .type = RTE_FLOW_ITEM_TYPE_GENEVE, 1075 .prev_layer = SFC_FLOW_ITEM_L4, 1076 .layer = SFC_FLOW_ITEM_START_LAYER, 1077 .parse = sfc_flow_parse_geneve, 1078 }, 1079 { 1080 .type = RTE_FLOW_ITEM_TYPE_NVGRE, 1081 .prev_layer = SFC_FLOW_ITEM_L3, 1082 .layer = SFC_FLOW_ITEM_START_LAYER, 1083 .parse = sfc_flow_parse_nvgre, 1084 }, 1085 }; 1086 1087 /* 1088 * Protocol-independent flow API support 1089 */ 1090 static int 1091 sfc_flow_parse_attr(const struct rte_flow_attr *attr, 1092 struct rte_flow *flow, 1093 struct rte_flow_error *error) 1094 { 1095 if (attr == NULL) { 1096 rte_flow_error_set(error, EINVAL, 1097 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 1098 "NULL attribute"); 1099 return -rte_errno; 1100 } 1101 if (attr->group != 0) { 1102 rte_flow_error_set(error, ENOTSUP, 1103 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr, 1104 "Groups are not supported"); 1105 return -rte_errno; 1106 } 1107 if (attr->priority != 0) { 1108 rte_flow_error_set(error, ENOTSUP, 1109 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, 1110 "Priorities are not supported"); 1111 return -rte_errno; 1112 } 1113 if (attr->egress != 0) { 1114 rte_flow_error_set(error, ENOTSUP, 1115 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, 1116 "Egress is not supported"); 1117 return -rte_errno; 1118 } 1119 if (attr->transfer != 0) { 1120 rte_flow_error_set(error, ENOTSUP, 1121 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, 1122 "Transfer is not supported"); 1123 return -rte_errno; 1124 } 1125 if (attr->ingress == 0) { 1126 rte_flow_error_set(error, ENOTSUP, 1127 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, 1128 "Only ingress is supported"); 1129 return -rte_errno; 1130 } 1131 1132 flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX; 1133 flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; 1134 1135 return 0; 1136 } 1137 1138 /* Get item from array sfc_flow_items */ 1139 static const struct sfc_flow_item * 1140 sfc_flow_get_item(enum rte_flow_item_type type) 1141 { 1142 unsigned int i; 1143 1144 for (i = 0; i < RTE_DIM(sfc_flow_items); i++) 1145 if (sfc_flow_items[i].type == type) 1146 return &sfc_flow_items[i]; 1147 1148 return NULL; 1149 } 1150 1151 static int 1152 sfc_flow_parse_pattern(const struct rte_flow_item pattern[], 1153 struct rte_flow *flow, 1154 struct rte_flow_error *error) 1155 { 1156 int rc; 1157 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER; 1158 boolean_t is_ifrm = B_FALSE; 1159 const struct sfc_flow_item *item; 1160 1161 if (pattern == NULL) { 1162 rte_flow_error_set(error, EINVAL, 1163 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, 1164 "NULL pattern"); 1165 return -rte_errno; 1166 } 1167 1168 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { 1169 item = sfc_flow_get_item(pattern->type); 1170 if (item == NULL) { 1171 rte_flow_error_set(error, ENOTSUP, 1172 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1173 "Unsupported pattern item"); 1174 return -rte_errno; 1175 } 1176 1177 /* 1178 * Omitting one or several protocol layers at the beginning 1179 * of pattern is supported 1180 */ 1181 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER && 1182 prev_layer != SFC_FLOW_ITEM_ANY_LAYER && 1183 item->prev_layer != prev_layer) { 1184 rte_flow_error_set(error, ENOTSUP, 1185 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1186 "Unexpected sequence of pattern items"); 1187 return -rte_errno; 1188 } 1189 1190 /* 1191 * Allow only VOID and ETH pattern items in the inner frame. 1192 * Also check that there is only one tunneling protocol. 1193 */ 1194 switch (item->type) { 1195 case RTE_FLOW_ITEM_TYPE_VOID: 1196 case RTE_FLOW_ITEM_TYPE_ETH: 1197 break; 1198 1199 case RTE_FLOW_ITEM_TYPE_VXLAN: 1200 case RTE_FLOW_ITEM_TYPE_GENEVE: 1201 case RTE_FLOW_ITEM_TYPE_NVGRE: 1202 if (is_ifrm) { 1203 rte_flow_error_set(error, EINVAL, 1204 RTE_FLOW_ERROR_TYPE_ITEM, 1205 pattern, 1206 "More than one tunneling protocol"); 1207 return -rte_errno; 1208 } 1209 is_ifrm = B_TRUE; 1210 break; 1211 1212 default: 1213 if (is_ifrm) { 1214 rte_flow_error_set(error, EINVAL, 1215 RTE_FLOW_ERROR_TYPE_ITEM, 1216 pattern, 1217 "There is an unsupported pattern item " 1218 "in the inner frame"); 1219 return -rte_errno; 1220 } 1221 break; 1222 } 1223 1224 rc = item->parse(pattern, &flow->spec.template, error); 1225 if (rc != 0) 1226 return rc; 1227 1228 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER) 1229 prev_layer = item->layer; 1230 } 1231 1232 return 0; 1233 } 1234 1235 static int 1236 sfc_flow_parse_queue(struct sfc_adapter *sa, 1237 const struct rte_flow_action_queue *queue, 1238 struct rte_flow *flow) 1239 { 1240 struct sfc_rxq *rxq; 1241 1242 if (queue->index >= sa->rxq_count) 1243 return -EINVAL; 1244 1245 rxq = sa->rxq_info[queue->index].rxq; 1246 flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index; 1247 1248 return 0; 1249 } 1250 1251 #if EFSYS_OPT_RX_SCALE 1252 static int 1253 sfc_flow_parse_rss(struct sfc_adapter *sa, 1254 const struct rte_flow_action_rss *rss, 1255 struct rte_flow *flow) 1256 { 1257 unsigned int rxq_sw_index; 1258 struct sfc_rxq *rxq; 1259 unsigned int rxq_hw_index_min; 1260 unsigned int rxq_hw_index_max; 1261 const uint8_t *rss_key; 1262 struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf; 1263 unsigned int i; 1264 1265 if (rss->queue_num == 0) 1266 return -EINVAL; 1267 1268 rxq_sw_index = sa->rxq_count - 1; 1269 rxq = sa->rxq_info[rxq_sw_index].rxq; 1270 rxq_hw_index_min = rxq->hw_index; 1271 rxq_hw_index_max = 0; 1272 1273 for (i = 0; i < rss->queue_num; ++i) { 1274 rxq_sw_index = rss->queue[i]; 1275 1276 if (rxq_sw_index >= sa->rxq_count) 1277 return -EINVAL; 1278 1279 rxq = sa->rxq_info[rxq_sw_index].rxq; 1280 1281 if (rxq->hw_index < rxq_hw_index_min) 1282 rxq_hw_index_min = rxq->hw_index; 1283 1284 if (rxq->hw_index > rxq_hw_index_max) 1285 rxq_hw_index_max = rxq->hw_index; 1286 } 1287 1288 switch (rss->func) { 1289 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1290 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1291 break; 1292 default: 1293 return -EINVAL; 1294 } 1295 1296 if (rss->level) 1297 return -EINVAL; 1298 1299 if ((rss->types & ~SFC_RSS_OFFLOADS) != 0) 1300 return -EINVAL; 1301 1302 if (rss->key_len) { 1303 if (rss->key_len != sizeof(sa->rss_key)) 1304 return -EINVAL; 1305 1306 rss_key = rss->key; 1307 } else { 1308 rss_key = sa->rss_key; 1309 } 1310 1311 flow->rss = B_TRUE; 1312 1313 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min; 1314 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max; 1315 sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss->types); 1316 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key)); 1317 1318 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) { 1319 unsigned int rxq_sw_index = rss->queue[i % rss->queue_num]; 1320 struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq; 1321 1322 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min; 1323 } 1324 1325 return 0; 1326 } 1327 #endif /* EFSYS_OPT_RX_SCALE */ 1328 1329 static int 1330 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec, 1331 unsigned int filters_count) 1332 { 1333 unsigned int i; 1334 int ret = 0; 1335 1336 for (i = 0; i < filters_count; i++) { 1337 int rc; 1338 1339 rc = efx_filter_remove(sa->nic, &spec->filters[i]); 1340 if (ret == 0 && rc != 0) { 1341 sfc_err(sa, "failed to remove filter specification " 1342 "(rc = %d)", rc); 1343 ret = rc; 1344 } 1345 } 1346 1347 return ret; 1348 } 1349 1350 static int 1351 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec) 1352 { 1353 unsigned int i; 1354 int rc = 0; 1355 1356 for (i = 0; i < spec->count; i++) { 1357 rc = efx_filter_insert(sa->nic, &spec->filters[i]); 1358 if (rc != 0) { 1359 sfc_flow_spec_flush(sa, spec, i); 1360 break; 1361 } 1362 } 1363 1364 return rc; 1365 } 1366 1367 static int 1368 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec) 1369 { 1370 return sfc_flow_spec_flush(sa, spec, spec->count); 1371 } 1372 1373 static int 1374 sfc_flow_filter_insert(struct sfc_adapter *sa, 1375 struct rte_flow *flow) 1376 { 1377 #if EFSYS_OPT_RX_SCALE 1378 struct sfc_flow_rss *rss = &flow->rss_conf; 1379 uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; 1380 unsigned int i; 1381 int rc = 0; 1382 1383 if (flow->rss) { 1384 unsigned int rss_spread = MIN(rss->rxq_hw_index_max - 1385 rss->rxq_hw_index_min + 1, 1386 EFX_MAXRSS); 1387 1388 rc = efx_rx_scale_context_alloc(sa->nic, 1389 EFX_RX_SCALE_EXCLUSIVE, 1390 rss_spread, 1391 &efs_rss_context); 1392 if (rc != 0) 1393 goto fail_scale_context_alloc; 1394 1395 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context, 1396 EFX_RX_HASHALG_TOEPLITZ, 1397 rss->rss_hash_types, B_TRUE); 1398 if (rc != 0) 1399 goto fail_scale_mode_set; 1400 1401 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context, 1402 rss->rss_key, 1403 sizeof(sa->rss_key)); 1404 if (rc != 0) 1405 goto fail_scale_key_set; 1406 1407 /* 1408 * At this point, fully elaborated filter specifications 1409 * have been produced from the template. To make sure that 1410 * RSS behaviour is consistent between them, set the same 1411 * RSS context value everywhere. 1412 */ 1413 for (i = 0; i < flow->spec.count; i++) { 1414 efx_filter_spec_t *spec = &flow->spec.filters[i]; 1415 1416 spec->efs_rss_context = efs_rss_context; 1417 spec->efs_dmaq_id = rss->rxq_hw_index_min; 1418 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS; 1419 } 1420 } 1421 1422 rc = sfc_flow_spec_insert(sa, &flow->spec); 1423 if (rc != 0) 1424 goto fail_filter_insert; 1425 1426 if (flow->rss) { 1427 /* 1428 * Scale table is set after filter insertion because 1429 * the table entries are relative to the base RxQ ID 1430 * and the latter is submitted to the HW by means of 1431 * inserting a filter, so by the time of the request 1432 * the HW knows all the information needed to verify 1433 * the table entries, and the operation will succeed 1434 */ 1435 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context, 1436 rss->rss_tbl, RTE_DIM(rss->rss_tbl)); 1437 if (rc != 0) 1438 goto fail_scale_tbl_set; 1439 } 1440 1441 return 0; 1442 1443 fail_scale_tbl_set: 1444 sfc_flow_spec_remove(sa, &flow->spec); 1445 1446 fail_filter_insert: 1447 fail_scale_key_set: 1448 fail_scale_mode_set: 1449 if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT) 1450 efx_rx_scale_context_free(sa->nic, efs_rss_context); 1451 1452 fail_scale_context_alloc: 1453 return rc; 1454 #else /* !EFSYS_OPT_RX_SCALE */ 1455 return sfc_flow_spec_insert(sa, &flow->spec); 1456 #endif /* EFSYS_OPT_RX_SCALE */ 1457 } 1458 1459 static int 1460 sfc_flow_filter_remove(struct sfc_adapter *sa, 1461 struct rte_flow *flow) 1462 { 1463 int rc = 0; 1464 1465 rc = sfc_flow_spec_remove(sa, &flow->spec); 1466 if (rc != 0) 1467 return rc; 1468 1469 #if EFSYS_OPT_RX_SCALE 1470 if (flow->rss) { 1471 /* 1472 * All specifications for a given flow rule have the same RSS 1473 * context, so that RSS context value is taken from the first 1474 * filter specification 1475 */ 1476 efx_filter_spec_t *spec = &flow->spec.filters[0]; 1477 1478 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context); 1479 } 1480 #endif /* EFSYS_OPT_RX_SCALE */ 1481 1482 return rc; 1483 } 1484 1485 static int 1486 sfc_flow_parse_actions(struct sfc_adapter *sa, 1487 const struct rte_flow_action actions[], 1488 struct rte_flow *flow, 1489 struct rte_flow_error *error) 1490 { 1491 int rc; 1492 boolean_t is_specified = B_FALSE; 1493 1494 if (actions == NULL) { 1495 rte_flow_error_set(error, EINVAL, 1496 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, 1497 "NULL actions"); 1498 return -rte_errno; 1499 } 1500 1501 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1502 /* This one may appear anywhere multiple times. */ 1503 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) 1504 continue; 1505 /* Fate-deciding actions may appear exactly once. */ 1506 if (is_specified) { 1507 rte_flow_error_set 1508 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 1509 actions, 1510 "Cannot combine several fate-deciding actions," 1511 "choose between QUEUE, RSS or DROP"); 1512 return -rte_errno; 1513 } 1514 switch (actions->type) { 1515 case RTE_FLOW_ACTION_TYPE_QUEUE: 1516 rc = sfc_flow_parse_queue(sa, actions->conf, flow); 1517 if (rc != 0) { 1518 rte_flow_error_set(error, EINVAL, 1519 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1520 "Bad QUEUE action"); 1521 return -rte_errno; 1522 } 1523 1524 is_specified = B_TRUE; 1525 break; 1526 1527 #if EFSYS_OPT_RX_SCALE 1528 case RTE_FLOW_ACTION_TYPE_RSS: 1529 rc = sfc_flow_parse_rss(sa, actions->conf, flow); 1530 if (rc != 0) { 1531 rte_flow_error_set(error, rc, 1532 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1533 "Bad RSS action"); 1534 return -rte_errno; 1535 } 1536 1537 is_specified = B_TRUE; 1538 break; 1539 #endif /* EFSYS_OPT_RX_SCALE */ 1540 1541 case RTE_FLOW_ACTION_TYPE_DROP: 1542 flow->spec.template.efs_dmaq_id = 1543 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; 1544 1545 is_specified = B_TRUE; 1546 break; 1547 1548 default: 1549 rte_flow_error_set(error, ENOTSUP, 1550 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1551 "Action is not supported"); 1552 return -rte_errno; 1553 } 1554 } 1555 1556 /* When fate is unknown, drop traffic. */ 1557 if (!is_specified) { 1558 flow->spec.template.efs_dmaq_id = 1559 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; 1560 } 1561 1562 return 0; 1563 } 1564 1565 /** 1566 * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST 1567 * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same 1568 * specifications after copying. 1569 * 1570 * @param spec[in, out] 1571 * SFC flow specification to update. 1572 * @param filters_count_for_one_val[in] 1573 * How many specifications should have the same match flag, what is the 1574 * number of specifications before copying. 1575 * @param error[out] 1576 * Perform verbose error reporting if not NULL. 1577 */ 1578 static int 1579 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec, 1580 unsigned int filters_count_for_one_val, 1581 struct rte_flow_error *error) 1582 { 1583 unsigned int i; 1584 static const efx_filter_match_flags_t vals[] = { 1585 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, 1586 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST 1587 }; 1588 1589 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) { 1590 rte_flow_error_set(error, EINVAL, 1591 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1592 "Number of specifications is incorrect while copying " 1593 "by unknown destination flags"); 1594 return -rte_errno; 1595 } 1596 1597 for (i = 0; i < spec->count; i++) { 1598 /* The check above ensures that divisor can't be zero here */ 1599 spec->filters[i].efs_match_flags |= 1600 vals[i / filters_count_for_one_val]; 1601 } 1602 1603 return 0; 1604 } 1605 1606 /** 1607 * Check that the following conditions are met: 1608 * - the list of supported filters has a filter 1609 * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of 1610 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also 1611 * be inserted. 1612 * 1613 * @param match[in] 1614 * The match flags of filter. 1615 * @param spec[in] 1616 * Specification to be supplemented. 1617 * @param filter[in] 1618 * SFC filter with list of supported filters. 1619 */ 1620 static boolean_t 1621 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match, 1622 __rte_unused efx_filter_spec_t *spec, 1623 struct sfc_filter *filter) 1624 { 1625 unsigned int i; 1626 efx_filter_match_flags_t match_mcast_dst; 1627 1628 match_mcast_dst = 1629 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) | 1630 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; 1631 for (i = 0; i < filter->supported_match_num; i++) { 1632 if (match_mcast_dst == filter->supported_match[i]) 1633 return B_TRUE; 1634 } 1635 1636 return B_FALSE; 1637 } 1638 1639 /** 1640 * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and 1641 * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same 1642 * specifications after copying. 1643 * 1644 * @param spec[in, out] 1645 * SFC flow specification to update. 1646 * @param filters_count_for_one_val[in] 1647 * How many specifications should have the same EtherType value, what is the 1648 * number of specifications before copying. 1649 * @param error[out] 1650 * Perform verbose error reporting if not NULL. 1651 */ 1652 static int 1653 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec, 1654 unsigned int filters_count_for_one_val, 1655 struct rte_flow_error *error) 1656 { 1657 unsigned int i; 1658 static const uint16_t vals[] = { 1659 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6 1660 }; 1661 1662 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) { 1663 rte_flow_error_set(error, EINVAL, 1664 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1665 "Number of specifications is incorrect " 1666 "while copying by Ethertype"); 1667 return -rte_errno; 1668 } 1669 1670 for (i = 0; i < spec->count; i++) { 1671 spec->filters[i].efs_match_flags |= 1672 EFX_FILTER_MATCH_ETHER_TYPE; 1673 1674 /* 1675 * The check above ensures that 1676 * filters_count_for_one_val is not 0 1677 */ 1678 spec->filters[i].efs_ether_type = 1679 vals[i / filters_count_for_one_val]; 1680 } 1681 1682 return 0; 1683 } 1684 1685 /** 1686 * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and 1687 * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same 1688 * specifications after copying. 1689 * 1690 * @param spec[in, out] 1691 * SFC flow specification to update. 1692 * @param filters_count_for_one_val[in] 1693 * How many specifications should have the same match flag, what is the 1694 * number of specifications before copying. 1695 * @param error[out] 1696 * Perform verbose error reporting if not NULL. 1697 */ 1698 static int 1699 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec, 1700 unsigned int filters_count_for_one_val, 1701 struct rte_flow_error *error) 1702 { 1703 unsigned int i; 1704 static const efx_filter_match_flags_t vals[] = { 1705 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, 1706 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST 1707 }; 1708 1709 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) { 1710 rte_flow_error_set(error, EINVAL, 1711 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1712 "Number of specifications is incorrect while copying " 1713 "by inner frame unknown destination flags"); 1714 return -rte_errno; 1715 } 1716 1717 for (i = 0; i < spec->count; i++) { 1718 /* The check above ensures that divisor can't be zero here */ 1719 spec->filters[i].efs_match_flags |= 1720 vals[i / filters_count_for_one_val]; 1721 } 1722 1723 return 0; 1724 } 1725 1726 /** 1727 * Check that the following conditions are met: 1728 * - the specification corresponds to a filter for encapsulated traffic 1729 * - the list of supported filters has a filter 1730 * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of 1731 * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also 1732 * be inserted. 1733 * 1734 * @param match[in] 1735 * The match flags of filter. 1736 * @param spec[in] 1737 * Specification to be supplemented. 1738 * @param filter[in] 1739 * SFC filter with list of supported filters. 1740 */ 1741 static boolean_t 1742 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match, 1743 efx_filter_spec_t *spec, 1744 struct sfc_filter *filter) 1745 { 1746 unsigned int i; 1747 efx_tunnel_protocol_t encap_type = spec->efs_encap_type; 1748 efx_filter_match_flags_t match_mcast_dst; 1749 1750 if (encap_type == EFX_TUNNEL_PROTOCOL_NONE) 1751 return B_FALSE; 1752 1753 match_mcast_dst = 1754 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) | 1755 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST; 1756 for (i = 0; i < filter->supported_match_num; i++) { 1757 if (match_mcast_dst == filter->supported_match[i]) 1758 return B_TRUE; 1759 } 1760 1761 return B_FALSE; 1762 } 1763 1764 /* 1765 * Match flags that can be automatically added to filters. 1766 * Selecting the last minimum when searching for the copy flag ensures that the 1767 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than 1768 * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter 1769 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported 1770 * filters. 1771 */ 1772 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = { 1773 { 1774 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, 1775 .vals_count = 2, 1776 .set_vals = sfc_flow_set_unknown_dst_flags, 1777 .spec_check = sfc_flow_check_unknown_dst_flags, 1778 }, 1779 { 1780 .flag = EFX_FILTER_MATCH_ETHER_TYPE, 1781 .vals_count = 2, 1782 .set_vals = sfc_flow_set_ethertypes, 1783 .spec_check = NULL, 1784 }, 1785 { 1786 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, 1787 .vals_count = 2, 1788 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags, 1789 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags, 1790 }, 1791 }; 1792 1793 /* Get item from array sfc_flow_copy_flags */ 1794 static const struct sfc_flow_copy_flag * 1795 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag) 1796 { 1797 unsigned int i; 1798 1799 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 1800 if (sfc_flow_copy_flags[i].flag == flag) 1801 return &sfc_flow_copy_flags[i]; 1802 } 1803 1804 return NULL; 1805 } 1806 1807 /** 1808 * Make copies of the specifications, set match flag and values 1809 * of the field that corresponds to it. 1810 * 1811 * @param spec[in, out] 1812 * SFC flow specification to update. 1813 * @param flag[in] 1814 * The match flag to add. 1815 * @param error[out] 1816 * Perform verbose error reporting if not NULL. 1817 */ 1818 static int 1819 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec, 1820 efx_filter_match_flags_t flag, 1821 struct rte_flow_error *error) 1822 { 1823 unsigned int i; 1824 unsigned int new_filters_count; 1825 unsigned int filters_count_for_one_val; 1826 const struct sfc_flow_copy_flag *copy_flag; 1827 int rc; 1828 1829 copy_flag = sfc_flow_get_copy_flag(flag); 1830 if (copy_flag == NULL) { 1831 rte_flow_error_set(error, ENOTSUP, 1832 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1833 "Unsupported spec field for copying"); 1834 return -rte_errno; 1835 } 1836 1837 new_filters_count = spec->count * copy_flag->vals_count; 1838 if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) { 1839 rte_flow_error_set(error, EINVAL, 1840 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1841 "Too much EFX specifications in the flow rule"); 1842 return -rte_errno; 1843 } 1844 1845 /* Copy filters specifications */ 1846 for (i = spec->count; i < new_filters_count; i++) 1847 spec->filters[i] = spec->filters[i - spec->count]; 1848 1849 filters_count_for_one_val = spec->count; 1850 spec->count = new_filters_count; 1851 1852 rc = copy_flag->set_vals(spec, filters_count_for_one_val, error); 1853 if (rc != 0) 1854 return rc; 1855 1856 return 0; 1857 } 1858 1859 /** 1860 * Check that the given set of match flags missing in the original filter spec 1861 * could be covered by adding spec copies which specify the corresponding 1862 * flags and packet field values to match. 1863 * 1864 * @param miss_flags[in] 1865 * Flags that are missing until the supported filter. 1866 * @param spec[in] 1867 * Specification to be supplemented. 1868 * @param filter[in] 1869 * SFC filter. 1870 * 1871 * @return 1872 * Number of specifications after copy or 0, if the flags can not be added. 1873 */ 1874 static unsigned int 1875 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags, 1876 efx_filter_spec_t *spec, 1877 struct sfc_filter *filter) 1878 { 1879 unsigned int i; 1880 efx_filter_match_flags_t copy_flags = 0; 1881 efx_filter_match_flags_t flag; 1882 efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags; 1883 sfc_flow_spec_check *check; 1884 unsigned int multiplier = 1; 1885 1886 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 1887 flag = sfc_flow_copy_flags[i].flag; 1888 check = sfc_flow_copy_flags[i].spec_check; 1889 if ((flag & miss_flags) == flag) { 1890 if (check != NULL && (!check(match, spec, filter))) 1891 continue; 1892 1893 copy_flags |= flag; 1894 multiplier *= sfc_flow_copy_flags[i].vals_count; 1895 } 1896 } 1897 1898 if (copy_flags == miss_flags) 1899 return multiplier; 1900 1901 return 0; 1902 } 1903 1904 /** 1905 * Attempt to supplement the specification template to the minimally 1906 * supported set of match flags. To do this, it is necessary to copy 1907 * the specifications, filling them with the values of fields that 1908 * correspond to the missing flags. 1909 * The necessary and sufficient filter is built from the fewest number 1910 * of copies which could be made to cover the minimally required set 1911 * of flags. 1912 * 1913 * @param sa[in] 1914 * SFC adapter. 1915 * @param spec[in, out] 1916 * SFC flow specification to update. 1917 * @param error[out] 1918 * Perform verbose error reporting if not NULL. 1919 */ 1920 static int 1921 sfc_flow_spec_filters_complete(struct sfc_adapter *sa, 1922 struct sfc_flow_spec *spec, 1923 struct rte_flow_error *error) 1924 { 1925 struct sfc_filter *filter = &sa->filter; 1926 efx_filter_match_flags_t miss_flags; 1927 efx_filter_match_flags_t min_miss_flags = 0; 1928 efx_filter_match_flags_t match; 1929 unsigned int min_multiplier = UINT_MAX; 1930 unsigned int multiplier; 1931 unsigned int i; 1932 int rc; 1933 1934 match = spec->template.efs_match_flags; 1935 for (i = 0; i < filter->supported_match_num; i++) { 1936 if ((match & filter->supported_match[i]) == match) { 1937 miss_flags = filter->supported_match[i] & (~match); 1938 multiplier = sfc_flow_check_missing_flags(miss_flags, 1939 &spec->template, filter); 1940 if (multiplier > 0) { 1941 if (multiplier <= min_multiplier) { 1942 min_multiplier = multiplier; 1943 min_miss_flags = miss_flags; 1944 } 1945 } 1946 } 1947 } 1948 1949 if (min_multiplier == UINT_MAX) { 1950 rte_flow_error_set(error, ENOTSUP, 1951 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1952 "Flow rule pattern is not supported"); 1953 return -rte_errno; 1954 } 1955 1956 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 1957 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag; 1958 1959 if ((flag & min_miss_flags) == flag) { 1960 rc = sfc_flow_spec_add_match_flag(spec, flag, error); 1961 if (rc != 0) 1962 return rc; 1963 } 1964 } 1965 1966 return 0; 1967 } 1968 1969 /** 1970 * Check that set of match flags is referred to by a filter. Filter is 1971 * described by match flags with the ability to add OUTER_VID and INNER_VID 1972 * flags. 1973 * 1974 * @param match_flags[in] 1975 * Set of match flags. 1976 * @param flags_pattern[in] 1977 * Pattern of filter match flags. 1978 */ 1979 static boolean_t 1980 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags, 1981 efx_filter_match_flags_t flags_pattern) 1982 { 1983 if ((match_flags & flags_pattern) != flags_pattern) 1984 return B_FALSE; 1985 1986 switch (match_flags & ~flags_pattern) { 1987 case 0: 1988 case EFX_FILTER_MATCH_OUTER_VID: 1989 case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID: 1990 return B_TRUE; 1991 default: 1992 return B_FALSE; 1993 } 1994 } 1995 1996 /** 1997 * Check whether the spec maps to a hardware filter which is known to be 1998 * ineffective despite being valid. 1999 * 2000 * @param spec[in] 2001 * SFC flow specification. 2002 */ 2003 static boolean_t 2004 sfc_flow_is_match_flags_exception(struct sfc_flow_spec *spec) 2005 { 2006 unsigned int i; 2007 uint16_t ether_type; 2008 uint8_t ip_proto; 2009 efx_filter_match_flags_t match_flags; 2010 2011 for (i = 0; i < spec->count; i++) { 2012 match_flags = spec->filters[i].efs_match_flags; 2013 2014 if (sfc_flow_is_match_with_vids(match_flags, 2015 EFX_FILTER_MATCH_ETHER_TYPE) || 2016 sfc_flow_is_match_with_vids(match_flags, 2017 EFX_FILTER_MATCH_ETHER_TYPE | 2018 EFX_FILTER_MATCH_LOC_MAC)) { 2019 ether_type = spec->filters[i].efs_ether_type; 2020 if (ether_type == EFX_ETHER_TYPE_IPV4 || 2021 ether_type == EFX_ETHER_TYPE_IPV6) 2022 return B_TRUE; 2023 } else if (sfc_flow_is_match_with_vids(match_flags, 2024 EFX_FILTER_MATCH_ETHER_TYPE | 2025 EFX_FILTER_MATCH_IP_PROTO) || 2026 sfc_flow_is_match_with_vids(match_flags, 2027 EFX_FILTER_MATCH_ETHER_TYPE | 2028 EFX_FILTER_MATCH_IP_PROTO | 2029 EFX_FILTER_MATCH_LOC_MAC)) { 2030 ip_proto = spec->filters[i].efs_ip_proto; 2031 if (ip_proto == EFX_IPPROTO_TCP || 2032 ip_proto == EFX_IPPROTO_UDP) 2033 return B_TRUE; 2034 } 2035 } 2036 2037 return B_FALSE; 2038 } 2039 2040 static int 2041 sfc_flow_validate_match_flags(struct sfc_adapter *sa, 2042 struct rte_flow *flow, 2043 struct rte_flow_error *error) 2044 { 2045 efx_filter_spec_t *spec_tmpl = &flow->spec.template; 2046 efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags; 2047 int rc; 2048 2049 /* Initialize the first filter spec with template */ 2050 flow->spec.filters[0] = *spec_tmpl; 2051 flow->spec.count = 1; 2052 2053 if (!sfc_filter_is_match_supported(sa, match_flags)) { 2054 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error); 2055 if (rc != 0) 2056 return rc; 2057 } 2058 2059 if (sfc_flow_is_match_flags_exception(&flow->spec)) { 2060 rte_flow_error_set(error, ENOTSUP, 2061 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2062 "The flow rule pattern is unsupported"); 2063 return -rte_errno; 2064 } 2065 2066 return 0; 2067 } 2068 2069 static int 2070 sfc_flow_parse(struct rte_eth_dev *dev, 2071 const struct rte_flow_attr *attr, 2072 const struct rte_flow_item pattern[], 2073 const struct rte_flow_action actions[], 2074 struct rte_flow *flow, 2075 struct rte_flow_error *error) 2076 { 2077 struct sfc_adapter *sa = dev->data->dev_private; 2078 int rc; 2079 2080 rc = sfc_flow_parse_attr(attr, flow, error); 2081 if (rc != 0) 2082 goto fail_bad_value; 2083 2084 rc = sfc_flow_parse_pattern(pattern, flow, error); 2085 if (rc != 0) 2086 goto fail_bad_value; 2087 2088 rc = sfc_flow_parse_actions(sa, actions, flow, error); 2089 if (rc != 0) 2090 goto fail_bad_value; 2091 2092 rc = sfc_flow_validate_match_flags(sa, flow, error); 2093 if (rc != 0) 2094 goto fail_bad_value; 2095 2096 return 0; 2097 2098 fail_bad_value: 2099 return rc; 2100 } 2101 2102 static int 2103 sfc_flow_validate(struct rte_eth_dev *dev, 2104 const struct rte_flow_attr *attr, 2105 const struct rte_flow_item pattern[], 2106 const struct rte_flow_action actions[], 2107 struct rte_flow_error *error) 2108 { 2109 struct rte_flow flow; 2110 2111 memset(&flow, 0, sizeof(flow)); 2112 2113 return sfc_flow_parse(dev, attr, pattern, actions, &flow, error); 2114 } 2115 2116 static struct rte_flow * 2117 sfc_flow_create(struct rte_eth_dev *dev, 2118 const struct rte_flow_attr *attr, 2119 const struct rte_flow_item pattern[], 2120 const struct rte_flow_action actions[], 2121 struct rte_flow_error *error) 2122 { 2123 struct sfc_adapter *sa = dev->data->dev_private; 2124 struct rte_flow *flow = NULL; 2125 int rc; 2126 2127 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0); 2128 if (flow == NULL) { 2129 rte_flow_error_set(error, ENOMEM, 2130 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2131 "Failed to allocate memory"); 2132 goto fail_no_mem; 2133 } 2134 2135 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error); 2136 if (rc != 0) 2137 goto fail_bad_value; 2138 2139 TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries); 2140 2141 sfc_adapter_lock(sa); 2142 2143 if (sa->state == SFC_ADAPTER_STARTED) { 2144 rc = sfc_flow_filter_insert(sa, flow); 2145 if (rc != 0) { 2146 rte_flow_error_set(error, rc, 2147 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2148 "Failed to insert filter"); 2149 goto fail_filter_insert; 2150 } 2151 } 2152 2153 sfc_adapter_unlock(sa); 2154 2155 return flow; 2156 2157 fail_filter_insert: 2158 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries); 2159 2160 fail_bad_value: 2161 rte_free(flow); 2162 sfc_adapter_unlock(sa); 2163 2164 fail_no_mem: 2165 return NULL; 2166 } 2167 2168 static int 2169 sfc_flow_remove(struct sfc_adapter *sa, 2170 struct rte_flow *flow, 2171 struct rte_flow_error *error) 2172 { 2173 int rc = 0; 2174 2175 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2176 2177 if (sa->state == SFC_ADAPTER_STARTED) { 2178 rc = sfc_flow_filter_remove(sa, flow); 2179 if (rc != 0) 2180 rte_flow_error_set(error, rc, 2181 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2182 "Failed to destroy flow rule"); 2183 } 2184 2185 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries); 2186 rte_free(flow); 2187 2188 return rc; 2189 } 2190 2191 static int 2192 sfc_flow_destroy(struct rte_eth_dev *dev, 2193 struct rte_flow *flow, 2194 struct rte_flow_error *error) 2195 { 2196 struct sfc_adapter *sa = dev->data->dev_private; 2197 struct rte_flow *flow_ptr; 2198 int rc = EINVAL; 2199 2200 sfc_adapter_lock(sa); 2201 2202 TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) { 2203 if (flow_ptr == flow) 2204 rc = 0; 2205 } 2206 if (rc != 0) { 2207 rte_flow_error_set(error, rc, 2208 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2209 "Failed to find flow rule to destroy"); 2210 goto fail_bad_value; 2211 } 2212 2213 rc = sfc_flow_remove(sa, flow, error); 2214 2215 fail_bad_value: 2216 sfc_adapter_unlock(sa); 2217 2218 return -rc; 2219 } 2220 2221 static int 2222 sfc_flow_flush(struct rte_eth_dev *dev, 2223 struct rte_flow_error *error) 2224 { 2225 struct sfc_adapter *sa = dev->data->dev_private; 2226 struct rte_flow *flow; 2227 int rc = 0; 2228 int ret = 0; 2229 2230 sfc_adapter_lock(sa); 2231 2232 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) { 2233 rc = sfc_flow_remove(sa, flow, error); 2234 if (rc != 0) 2235 ret = rc; 2236 } 2237 2238 sfc_adapter_unlock(sa); 2239 2240 return -ret; 2241 } 2242 2243 static int 2244 sfc_flow_isolate(struct rte_eth_dev *dev, int enable, 2245 struct rte_flow_error *error) 2246 { 2247 struct sfc_adapter *sa = dev->data->dev_private; 2248 struct sfc_port *port = &sa->port; 2249 int ret = 0; 2250 2251 sfc_adapter_lock(sa); 2252 if (sa->state != SFC_ADAPTER_INITIALIZED) { 2253 rte_flow_error_set(error, EBUSY, 2254 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2255 NULL, "please close the port first"); 2256 ret = -rte_errno; 2257 } else { 2258 port->isolated = (enable) ? B_TRUE : B_FALSE; 2259 } 2260 sfc_adapter_unlock(sa); 2261 2262 return ret; 2263 } 2264 2265 const struct rte_flow_ops sfc_flow_ops = { 2266 .validate = sfc_flow_validate, 2267 .create = sfc_flow_create, 2268 .destroy = sfc_flow_destroy, 2269 .flush = sfc_flow_flush, 2270 .query = NULL, 2271 .isolate = sfc_flow_isolate, 2272 }; 2273 2274 void 2275 sfc_flow_init(struct sfc_adapter *sa) 2276 { 2277 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2278 2279 TAILQ_INIT(&sa->filter.flow_list); 2280 } 2281 2282 void 2283 sfc_flow_fini(struct sfc_adapter *sa) 2284 { 2285 struct rte_flow *flow; 2286 2287 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2288 2289 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) { 2290 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries); 2291 rte_free(flow); 2292 } 2293 } 2294 2295 void 2296 sfc_flow_stop(struct sfc_adapter *sa) 2297 { 2298 struct rte_flow *flow; 2299 2300 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2301 2302 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) 2303 sfc_flow_filter_remove(sa, flow); 2304 } 2305 2306 int 2307 sfc_flow_start(struct sfc_adapter *sa) 2308 { 2309 struct rte_flow *flow; 2310 int rc = 0; 2311 2312 sfc_log_init(sa, "entry"); 2313 2314 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2315 2316 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) { 2317 rc = sfc_flow_filter_insert(sa, flow); 2318 if (rc != 0) 2319 goto fail_bad_flow; 2320 } 2321 2322 sfc_log_init(sa, "done"); 2323 2324 fail_bad_flow: 2325 return rc; 2326 } 2327