1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2017-2018 Solarflare Communications Inc. 4 * All rights reserved. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #include <rte_byteorder.h> 11 #include <rte_tailq.h> 12 #include <rte_common.h> 13 #include <rte_ethdev_driver.h> 14 #include <rte_eth_ctrl.h> 15 #include <rte_ether.h> 16 #include <rte_flow.h> 17 #include <rte_flow_driver.h> 18 19 #include "efx.h" 20 21 #include "sfc.h" 22 #include "sfc_rx.h" 23 #include "sfc_filter.h" 24 #include "sfc_flow.h" 25 #include "sfc_log.h" 26 #include "sfc_dp_rx.h" 27 28 /* 29 * At now flow API is implemented in such a manner that each 30 * flow rule is converted to one or more hardware filters. 31 * All elements of flow rule (attributes, pattern items, actions) 32 * correspond to one or more fields in the efx_filter_spec_s structure 33 * that is responsible for the hardware filter. 34 * If some required field is unset in the flow rule, then a handful 35 * of filter copies will be created to cover all possible values 36 * of such a field. 37 */ 38 39 enum sfc_flow_item_layers { 40 SFC_FLOW_ITEM_ANY_LAYER, 41 SFC_FLOW_ITEM_START_LAYER, 42 SFC_FLOW_ITEM_L2, 43 SFC_FLOW_ITEM_L3, 44 SFC_FLOW_ITEM_L4, 45 }; 46 47 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item, 48 efx_filter_spec_t *spec, 49 struct rte_flow_error *error); 50 51 struct sfc_flow_item { 52 enum rte_flow_item_type type; /* Type of item */ 53 enum sfc_flow_item_layers layer; /* Layer of item */ 54 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */ 55 sfc_flow_item_parse *parse; /* Parsing function */ 56 }; 57 58 static sfc_flow_item_parse sfc_flow_parse_void; 59 static sfc_flow_item_parse sfc_flow_parse_eth; 60 static sfc_flow_item_parse sfc_flow_parse_vlan; 61 static sfc_flow_item_parse sfc_flow_parse_ipv4; 62 static sfc_flow_item_parse sfc_flow_parse_ipv6; 63 static sfc_flow_item_parse sfc_flow_parse_tcp; 64 static sfc_flow_item_parse sfc_flow_parse_udp; 65 static sfc_flow_item_parse sfc_flow_parse_vxlan; 66 static sfc_flow_item_parse sfc_flow_parse_geneve; 67 static sfc_flow_item_parse sfc_flow_parse_nvgre; 68 69 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec, 70 unsigned int filters_count_for_one_val, 71 struct rte_flow_error *error); 72 73 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match, 74 efx_filter_spec_t *spec, 75 struct sfc_filter *filter); 76 77 struct sfc_flow_copy_flag { 78 /* EFX filter specification match flag */ 79 efx_filter_match_flags_t flag; 80 /* Number of values of corresponding field */ 81 unsigned int vals_count; 82 /* Function to set values in specifications */ 83 sfc_flow_spec_set_vals *set_vals; 84 /* 85 * Function to check that the specification is suitable 86 * for adding this match flag 87 */ 88 sfc_flow_spec_check *spec_check; 89 }; 90 91 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags; 92 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags; 93 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes; 94 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags; 95 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags; 96 97 static boolean_t 98 sfc_flow_is_zero(const uint8_t *buf, unsigned int size) 99 { 100 uint8_t sum = 0; 101 unsigned int i; 102 103 for (i = 0; i < size; i++) 104 sum |= buf[i]; 105 106 return (sum == 0) ? B_TRUE : B_FALSE; 107 } 108 109 /* 110 * Validate item and prepare structures spec and mask for parsing 111 */ 112 static int 113 sfc_flow_parse_init(const struct rte_flow_item *item, 114 const void **spec_ptr, 115 const void **mask_ptr, 116 const void *supp_mask, 117 const void *def_mask, 118 unsigned int size, 119 struct rte_flow_error *error) 120 { 121 const uint8_t *spec; 122 const uint8_t *mask; 123 const uint8_t *last; 124 uint8_t supp; 125 unsigned int i; 126 127 if (item == NULL) { 128 rte_flow_error_set(error, EINVAL, 129 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 130 "NULL item"); 131 return -rte_errno; 132 } 133 134 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) { 135 rte_flow_error_set(error, EINVAL, 136 RTE_FLOW_ERROR_TYPE_ITEM, item, 137 "Mask or last is set without spec"); 138 return -rte_errno; 139 } 140 141 /* 142 * If "mask" is not set, default mask is used, 143 * but if default mask is NULL, "mask" should be set 144 */ 145 if (item->mask == NULL) { 146 if (def_mask == NULL) { 147 rte_flow_error_set(error, EINVAL, 148 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 149 "Mask should be specified"); 150 return -rte_errno; 151 } 152 153 mask = def_mask; 154 } else { 155 mask = item->mask; 156 } 157 158 spec = item->spec; 159 last = item->last; 160 161 if (spec == NULL) 162 goto exit; 163 164 /* 165 * If field values in "last" are either 0 or equal to the corresponding 166 * values in "spec" then they are ignored 167 */ 168 if (last != NULL && 169 !sfc_flow_is_zero(last, size) && 170 memcmp(last, spec, size) != 0) { 171 rte_flow_error_set(error, ENOTSUP, 172 RTE_FLOW_ERROR_TYPE_ITEM, item, 173 "Ranging is not supported"); 174 return -rte_errno; 175 } 176 177 if (supp_mask == NULL) { 178 rte_flow_error_set(error, EINVAL, 179 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 180 "Supported mask for item should be specified"); 181 return -rte_errno; 182 } 183 184 /* Check that mask does not ask for more match than supp_mask */ 185 for (i = 0; i < size; i++) { 186 supp = ((const uint8_t *)supp_mask)[i]; 187 188 if (~supp & mask[i]) { 189 rte_flow_error_set(error, ENOTSUP, 190 RTE_FLOW_ERROR_TYPE_ITEM, item, 191 "Item's field is not supported"); 192 return -rte_errno; 193 } 194 } 195 196 exit: 197 *spec_ptr = spec; 198 *mask_ptr = mask; 199 return 0; 200 } 201 202 /* 203 * Protocol parsers. 204 * Masking is not supported, so masks in items should be either 205 * full or empty (zeroed) and set only for supported fields which 206 * are specified in the supp_mask. 207 */ 208 209 static int 210 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item, 211 __rte_unused efx_filter_spec_t *efx_spec, 212 __rte_unused struct rte_flow_error *error) 213 { 214 return 0; 215 } 216 217 /** 218 * Convert Ethernet item to EFX filter specification. 219 * 220 * @param item[in] 221 * Item specification. Outer frame specification may only comprise 222 * source/destination addresses and Ethertype field. 223 * Inner frame specification may contain destination address only. 224 * There is support for individual/group mask as well as for empty and full. 225 * If the mask is NULL, default mask will be used. Ranging is not supported. 226 * @param efx_spec[in, out] 227 * EFX filter specification to update. 228 * @param[out] error 229 * Perform verbose error reporting if not NULL. 230 */ 231 static int 232 sfc_flow_parse_eth(const struct rte_flow_item *item, 233 efx_filter_spec_t *efx_spec, 234 struct rte_flow_error *error) 235 { 236 int rc; 237 const struct rte_flow_item_eth *spec = NULL; 238 const struct rte_flow_item_eth *mask = NULL; 239 const struct rte_flow_item_eth supp_mask = { 240 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 241 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 242 .type = 0xffff, 243 }; 244 const struct rte_flow_item_eth ifrm_supp_mask = { 245 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 246 }; 247 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = { 248 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 249 }; 250 const struct rte_flow_item_eth *supp_mask_p; 251 const struct rte_flow_item_eth *def_mask_p; 252 uint8_t *loc_mac = NULL; 253 boolean_t is_ifrm = (efx_spec->efs_encap_type != 254 EFX_TUNNEL_PROTOCOL_NONE); 255 256 if (is_ifrm) { 257 supp_mask_p = &ifrm_supp_mask; 258 def_mask_p = &ifrm_supp_mask; 259 loc_mac = efx_spec->efs_ifrm_loc_mac; 260 } else { 261 supp_mask_p = &supp_mask; 262 def_mask_p = &rte_flow_item_eth_mask; 263 loc_mac = efx_spec->efs_loc_mac; 264 } 265 266 rc = sfc_flow_parse_init(item, 267 (const void **)&spec, 268 (const void **)&mask, 269 supp_mask_p, def_mask_p, 270 sizeof(struct rte_flow_item_eth), 271 error); 272 if (rc != 0) 273 return rc; 274 275 /* If "spec" is not set, could be any Ethernet */ 276 if (spec == NULL) 277 return 0; 278 279 if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) { 280 efx_spec->efs_match_flags |= is_ifrm ? 281 EFX_FILTER_MATCH_IFRM_LOC_MAC : 282 EFX_FILTER_MATCH_LOC_MAC; 283 rte_memcpy(loc_mac, spec->dst.addr_bytes, 284 EFX_MAC_ADDR_LEN); 285 } else if (memcmp(mask->dst.addr_bytes, ig_mask, 286 EFX_MAC_ADDR_LEN) == 0) { 287 if (is_unicast_ether_addr(&spec->dst)) 288 efx_spec->efs_match_flags |= is_ifrm ? 289 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST : 290 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST; 291 else 292 efx_spec->efs_match_flags |= is_ifrm ? 293 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST : 294 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; 295 } else if (!is_zero_ether_addr(&mask->dst)) { 296 goto fail_bad_mask; 297 } 298 299 /* 300 * ifrm_supp_mask ensures that the source address and 301 * ethertype masks are equal to zero in inner frame, 302 * so these fields are filled in only for the outer frame 303 */ 304 if (is_same_ether_addr(&mask->src, &supp_mask.src)) { 305 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC; 306 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes, 307 EFX_MAC_ADDR_LEN); 308 } else if (!is_zero_ether_addr(&mask->src)) { 309 goto fail_bad_mask; 310 } 311 312 /* 313 * Ether type is in big-endian byte order in item and 314 * in little-endian in efx_spec, so byte swap is used 315 */ 316 if (mask->type == supp_mask.type) { 317 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 318 efx_spec->efs_ether_type = rte_bswap16(spec->type); 319 } else if (mask->type != 0) { 320 goto fail_bad_mask; 321 } 322 323 return 0; 324 325 fail_bad_mask: 326 rte_flow_error_set(error, EINVAL, 327 RTE_FLOW_ERROR_TYPE_ITEM, item, 328 "Bad mask in the ETH pattern item"); 329 return -rte_errno; 330 } 331 332 /** 333 * Convert VLAN item to EFX filter specification. 334 * 335 * @param item[in] 336 * Item specification. Only VID field is supported. 337 * The mask can not be NULL. Ranging is not supported. 338 * @param efx_spec[in, out] 339 * EFX filter specification to update. 340 * @param[out] error 341 * Perform verbose error reporting if not NULL. 342 */ 343 static int 344 sfc_flow_parse_vlan(const struct rte_flow_item *item, 345 efx_filter_spec_t *efx_spec, 346 struct rte_flow_error *error) 347 { 348 int rc; 349 uint16_t vid; 350 const struct rte_flow_item_vlan *spec = NULL; 351 const struct rte_flow_item_vlan *mask = NULL; 352 const struct rte_flow_item_vlan supp_mask = { 353 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX), 354 .inner_type = RTE_BE16(0xffff), 355 }; 356 357 rc = sfc_flow_parse_init(item, 358 (const void **)&spec, 359 (const void **)&mask, 360 &supp_mask, 361 NULL, 362 sizeof(struct rte_flow_item_vlan), 363 error); 364 if (rc != 0) 365 return rc; 366 367 /* 368 * VID is in big-endian byte order in item and 369 * in little-endian in efx_spec, so byte swap is used. 370 * If two VLAN items are included, the first matches 371 * the outer tag and the next matches the inner tag. 372 */ 373 if (mask->tci == supp_mask.tci) { 374 vid = rte_bswap16(spec->tci); 375 376 if (!(efx_spec->efs_match_flags & 377 EFX_FILTER_MATCH_OUTER_VID)) { 378 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID; 379 efx_spec->efs_outer_vid = vid; 380 } else if (!(efx_spec->efs_match_flags & 381 EFX_FILTER_MATCH_INNER_VID)) { 382 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID; 383 efx_spec->efs_inner_vid = vid; 384 } else { 385 rte_flow_error_set(error, EINVAL, 386 RTE_FLOW_ERROR_TYPE_ITEM, item, 387 "More than two VLAN items"); 388 return -rte_errno; 389 } 390 } else { 391 rte_flow_error_set(error, EINVAL, 392 RTE_FLOW_ERROR_TYPE_ITEM, item, 393 "VLAN ID in TCI match is required"); 394 return -rte_errno; 395 } 396 397 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { 398 rte_flow_error_set(error, EINVAL, 399 RTE_FLOW_ERROR_TYPE_ITEM, item, 400 "VLAN TPID matching is not supported"); 401 return -rte_errno; 402 } 403 if (mask->inner_type == supp_mask.inner_type) { 404 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 405 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type); 406 } else if (mask->inner_type) { 407 rte_flow_error_set(error, EINVAL, 408 RTE_FLOW_ERROR_TYPE_ITEM, item, 409 "Bad mask for VLAN inner_type"); 410 return -rte_errno; 411 } 412 413 return 0; 414 } 415 416 /** 417 * Convert IPv4 item to EFX filter specification. 418 * 419 * @param item[in] 420 * Item specification. Only source and destination addresses and 421 * protocol fields are supported. If the mask is NULL, default 422 * mask will be used. Ranging is not supported. 423 * @param efx_spec[in, out] 424 * EFX filter specification to update. 425 * @param[out] error 426 * Perform verbose error reporting if not NULL. 427 */ 428 static int 429 sfc_flow_parse_ipv4(const struct rte_flow_item *item, 430 efx_filter_spec_t *efx_spec, 431 struct rte_flow_error *error) 432 { 433 int rc; 434 const struct rte_flow_item_ipv4 *spec = NULL; 435 const struct rte_flow_item_ipv4 *mask = NULL; 436 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4); 437 const struct rte_flow_item_ipv4 supp_mask = { 438 .hdr = { 439 .src_addr = 0xffffffff, 440 .dst_addr = 0xffffffff, 441 .next_proto_id = 0xff, 442 } 443 }; 444 445 rc = sfc_flow_parse_init(item, 446 (const void **)&spec, 447 (const void **)&mask, 448 &supp_mask, 449 &rte_flow_item_ipv4_mask, 450 sizeof(struct rte_flow_item_ipv4), 451 error); 452 if (rc != 0) 453 return rc; 454 455 /* 456 * Filtering by IPv4 source and destination addresses requires 457 * the appropriate ETHER_TYPE in hardware filters 458 */ 459 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) { 460 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 461 efx_spec->efs_ether_type = ether_type_ipv4; 462 } else if (efx_spec->efs_ether_type != ether_type_ipv4) { 463 rte_flow_error_set(error, EINVAL, 464 RTE_FLOW_ERROR_TYPE_ITEM, item, 465 "Ethertype in pattern with IPV4 item should be appropriate"); 466 return -rte_errno; 467 } 468 469 if (spec == NULL) 470 return 0; 471 472 /* 473 * IPv4 addresses are in big-endian byte order in item and in 474 * efx_spec 475 */ 476 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) { 477 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST; 478 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr; 479 } else if (mask->hdr.src_addr != 0) { 480 goto fail_bad_mask; 481 } 482 483 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) { 484 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST; 485 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr; 486 } else if (mask->hdr.dst_addr != 0) { 487 goto fail_bad_mask; 488 } 489 490 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) { 491 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 492 efx_spec->efs_ip_proto = spec->hdr.next_proto_id; 493 } else if (mask->hdr.next_proto_id != 0) { 494 goto fail_bad_mask; 495 } 496 497 return 0; 498 499 fail_bad_mask: 500 rte_flow_error_set(error, EINVAL, 501 RTE_FLOW_ERROR_TYPE_ITEM, item, 502 "Bad mask in the IPV4 pattern item"); 503 return -rte_errno; 504 } 505 506 /** 507 * Convert IPv6 item to EFX filter specification. 508 * 509 * @param item[in] 510 * Item specification. Only source and destination addresses and 511 * next header fields are supported. If the mask is NULL, default 512 * mask will be used. Ranging is not supported. 513 * @param efx_spec[in, out] 514 * EFX filter specification to update. 515 * @param[out] error 516 * Perform verbose error reporting if not NULL. 517 */ 518 static int 519 sfc_flow_parse_ipv6(const struct rte_flow_item *item, 520 efx_filter_spec_t *efx_spec, 521 struct rte_flow_error *error) 522 { 523 int rc; 524 const struct rte_flow_item_ipv6 *spec = NULL; 525 const struct rte_flow_item_ipv6 *mask = NULL; 526 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6); 527 const struct rte_flow_item_ipv6 supp_mask = { 528 .hdr = { 529 .src_addr = { 0xff, 0xff, 0xff, 0xff, 530 0xff, 0xff, 0xff, 0xff, 531 0xff, 0xff, 0xff, 0xff, 532 0xff, 0xff, 0xff, 0xff }, 533 .dst_addr = { 0xff, 0xff, 0xff, 0xff, 534 0xff, 0xff, 0xff, 0xff, 535 0xff, 0xff, 0xff, 0xff, 536 0xff, 0xff, 0xff, 0xff }, 537 .proto = 0xff, 538 } 539 }; 540 541 rc = sfc_flow_parse_init(item, 542 (const void **)&spec, 543 (const void **)&mask, 544 &supp_mask, 545 &rte_flow_item_ipv6_mask, 546 sizeof(struct rte_flow_item_ipv6), 547 error); 548 if (rc != 0) 549 return rc; 550 551 /* 552 * Filtering by IPv6 source and destination addresses requires 553 * the appropriate ETHER_TYPE in hardware filters 554 */ 555 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) { 556 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 557 efx_spec->efs_ether_type = ether_type_ipv6; 558 } else if (efx_spec->efs_ether_type != ether_type_ipv6) { 559 rte_flow_error_set(error, EINVAL, 560 RTE_FLOW_ERROR_TYPE_ITEM, item, 561 "Ethertype in pattern with IPV6 item should be appropriate"); 562 return -rte_errno; 563 } 564 565 if (spec == NULL) 566 return 0; 567 568 /* 569 * IPv6 addresses are in big-endian byte order in item and in 570 * efx_spec 571 */ 572 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr, 573 sizeof(mask->hdr.src_addr)) == 0) { 574 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST; 575 576 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) != 577 sizeof(spec->hdr.src_addr)); 578 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr, 579 sizeof(efx_spec->efs_rem_host)); 580 } else if (!sfc_flow_is_zero(mask->hdr.src_addr, 581 sizeof(mask->hdr.src_addr))) { 582 goto fail_bad_mask; 583 } 584 585 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr, 586 sizeof(mask->hdr.dst_addr)) == 0) { 587 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST; 588 589 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) != 590 sizeof(spec->hdr.dst_addr)); 591 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr, 592 sizeof(efx_spec->efs_loc_host)); 593 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr, 594 sizeof(mask->hdr.dst_addr))) { 595 goto fail_bad_mask; 596 } 597 598 if (mask->hdr.proto == supp_mask.hdr.proto) { 599 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 600 efx_spec->efs_ip_proto = spec->hdr.proto; 601 } else if (mask->hdr.proto != 0) { 602 goto fail_bad_mask; 603 } 604 605 return 0; 606 607 fail_bad_mask: 608 rte_flow_error_set(error, EINVAL, 609 RTE_FLOW_ERROR_TYPE_ITEM, item, 610 "Bad mask in the IPV6 pattern item"); 611 return -rte_errno; 612 } 613 614 /** 615 * Convert TCP item to EFX filter specification. 616 * 617 * @param item[in] 618 * Item specification. Only source and destination ports fields 619 * are supported. If the mask is NULL, default mask will be used. 620 * Ranging is not supported. 621 * @param efx_spec[in, out] 622 * EFX filter specification to update. 623 * @param[out] error 624 * Perform verbose error reporting if not NULL. 625 */ 626 static int 627 sfc_flow_parse_tcp(const struct rte_flow_item *item, 628 efx_filter_spec_t *efx_spec, 629 struct rte_flow_error *error) 630 { 631 int rc; 632 const struct rte_flow_item_tcp *spec = NULL; 633 const struct rte_flow_item_tcp *mask = NULL; 634 const struct rte_flow_item_tcp supp_mask = { 635 .hdr = { 636 .src_port = 0xffff, 637 .dst_port = 0xffff, 638 } 639 }; 640 641 rc = sfc_flow_parse_init(item, 642 (const void **)&spec, 643 (const void **)&mask, 644 &supp_mask, 645 &rte_flow_item_tcp_mask, 646 sizeof(struct rte_flow_item_tcp), 647 error); 648 if (rc != 0) 649 return rc; 650 651 /* 652 * Filtering by TCP source and destination ports requires 653 * the appropriate IP_PROTO in hardware filters 654 */ 655 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 656 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 657 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP; 658 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) { 659 rte_flow_error_set(error, EINVAL, 660 RTE_FLOW_ERROR_TYPE_ITEM, item, 661 "IP proto in pattern with TCP item should be appropriate"); 662 return -rte_errno; 663 } 664 665 if (spec == NULL) 666 return 0; 667 668 /* 669 * Source and destination ports are in big-endian byte order in item and 670 * in little-endian in efx_spec, so byte swap is used 671 */ 672 if (mask->hdr.src_port == supp_mask.hdr.src_port) { 673 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT; 674 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port); 675 } else if (mask->hdr.src_port != 0) { 676 goto fail_bad_mask; 677 } 678 679 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) { 680 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT; 681 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port); 682 } else if (mask->hdr.dst_port != 0) { 683 goto fail_bad_mask; 684 } 685 686 return 0; 687 688 fail_bad_mask: 689 rte_flow_error_set(error, EINVAL, 690 RTE_FLOW_ERROR_TYPE_ITEM, item, 691 "Bad mask in the TCP pattern item"); 692 return -rte_errno; 693 } 694 695 /** 696 * Convert UDP item to EFX filter specification. 697 * 698 * @param item[in] 699 * Item specification. Only source and destination ports fields 700 * are supported. If the mask is NULL, default mask will be used. 701 * Ranging is not supported. 702 * @param efx_spec[in, out] 703 * EFX filter specification to update. 704 * @param[out] error 705 * Perform verbose error reporting if not NULL. 706 */ 707 static int 708 sfc_flow_parse_udp(const struct rte_flow_item *item, 709 efx_filter_spec_t *efx_spec, 710 struct rte_flow_error *error) 711 { 712 int rc; 713 const struct rte_flow_item_udp *spec = NULL; 714 const struct rte_flow_item_udp *mask = NULL; 715 const struct rte_flow_item_udp supp_mask = { 716 .hdr = { 717 .src_port = 0xffff, 718 .dst_port = 0xffff, 719 } 720 }; 721 722 rc = sfc_flow_parse_init(item, 723 (const void **)&spec, 724 (const void **)&mask, 725 &supp_mask, 726 &rte_flow_item_udp_mask, 727 sizeof(struct rte_flow_item_udp), 728 error); 729 if (rc != 0) 730 return rc; 731 732 /* 733 * Filtering by UDP source and destination ports requires 734 * the appropriate IP_PROTO in hardware filters 735 */ 736 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 737 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 738 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP; 739 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) { 740 rte_flow_error_set(error, EINVAL, 741 RTE_FLOW_ERROR_TYPE_ITEM, item, 742 "IP proto in pattern with UDP item should be appropriate"); 743 return -rte_errno; 744 } 745 746 if (spec == NULL) 747 return 0; 748 749 /* 750 * Source and destination ports are in big-endian byte order in item and 751 * in little-endian in efx_spec, so byte swap is used 752 */ 753 if (mask->hdr.src_port == supp_mask.hdr.src_port) { 754 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT; 755 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port); 756 } else if (mask->hdr.src_port != 0) { 757 goto fail_bad_mask; 758 } 759 760 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) { 761 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT; 762 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port); 763 } else if (mask->hdr.dst_port != 0) { 764 goto fail_bad_mask; 765 } 766 767 return 0; 768 769 fail_bad_mask: 770 rte_flow_error_set(error, EINVAL, 771 RTE_FLOW_ERROR_TYPE_ITEM, item, 772 "Bad mask in the UDP pattern item"); 773 return -rte_errno; 774 } 775 776 /* 777 * Filters for encapsulated packets match based on the EtherType and IP 778 * protocol in the outer frame. 779 */ 780 static int 781 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item, 782 efx_filter_spec_t *efx_spec, 783 uint8_t ip_proto, 784 struct rte_flow_error *error) 785 { 786 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 787 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 788 efx_spec->efs_ip_proto = ip_proto; 789 } else if (efx_spec->efs_ip_proto != ip_proto) { 790 switch (ip_proto) { 791 case EFX_IPPROTO_UDP: 792 rte_flow_error_set(error, EINVAL, 793 RTE_FLOW_ERROR_TYPE_ITEM, item, 794 "Outer IP header protocol must be UDP " 795 "in VxLAN/GENEVE pattern"); 796 return -rte_errno; 797 798 case EFX_IPPROTO_GRE: 799 rte_flow_error_set(error, EINVAL, 800 RTE_FLOW_ERROR_TYPE_ITEM, item, 801 "Outer IP header protocol must be GRE " 802 "in NVGRE pattern"); 803 return -rte_errno; 804 805 default: 806 rte_flow_error_set(error, EINVAL, 807 RTE_FLOW_ERROR_TYPE_ITEM, item, 808 "Only VxLAN/GENEVE/NVGRE tunneling patterns " 809 "are supported"); 810 return -rte_errno; 811 } 812 } 813 814 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE && 815 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 && 816 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) { 817 rte_flow_error_set(error, EINVAL, 818 RTE_FLOW_ERROR_TYPE_ITEM, item, 819 "Outer frame EtherType in pattern with tunneling " 820 "must be IPv4 or IPv6"); 821 return -rte_errno; 822 } 823 824 return 0; 825 } 826 827 static int 828 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec, 829 const uint8_t *vni_or_vsid_val, 830 const uint8_t *vni_or_vsid_mask, 831 const struct rte_flow_item *item, 832 struct rte_flow_error *error) 833 { 834 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = { 835 0xff, 0xff, 0xff 836 }; 837 838 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask, 839 EFX_VNI_OR_VSID_LEN) == 0) { 840 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID; 841 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val, 842 EFX_VNI_OR_VSID_LEN); 843 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) { 844 rte_flow_error_set(error, EINVAL, 845 RTE_FLOW_ERROR_TYPE_ITEM, item, 846 "Unsupported VNI/VSID mask"); 847 return -rte_errno; 848 } 849 850 return 0; 851 } 852 853 /** 854 * Convert VXLAN item to EFX filter specification. 855 * 856 * @param item[in] 857 * Item specification. Only VXLAN network identifier field is supported. 858 * If the mask is NULL, default mask will be used. 859 * Ranging is not supported. 860 * @param efx_spec[in, out] 861 * EFX filter specification to update. 862 * @param[out] error 863 * Perform verbose error reporting if not NULL. 864 */ 865 static int 866 sfc_flow_parse_vxlan(const struct rte_flow_item *item, 867 efx_filter_spec_t *efx_spec, 868 struct rte_flow_error *error) 869 { 870 int rc; 871 const struct rte_flow_item_vxlan *spec = NULL; 872 const struct rte_flow_item_vxlan *mask = NULL; 873 const struct rte_flow_item_vxlan supp_mask = { 874 .vni = { 0xff, 0xff, 0xff } 875 }; 876 877 rc = sfc_flow_parse_init(item, 878 (const void **)&spec, 879 (const void **)&mask, 880 &supp_mask, 881 &rte_flow_item_vxlan_mask, 882 sizeof(struct rte_flow_item_vxlan), 883 error); 884 if (rc != 0) 885 return rc; 886 887 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 888 EFX_IPPROTO_UDP, error); 889 if (rc != 0) 890 return rc; 891 892 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN; 893 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 894 895 if (spec == NULL) 896 return 0; 897 898 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, 899 mask->vni, item, error); 900 901 return rc; 902 } 903 904 /** 905 * Convert GENEVE item to EFX filter specification. 906 * 907 * @param item[in] 908 * Item specification. Only Virtual Network Identifier and protocol type 909 * fields are supported. But protocol type can be only Ethernet (0x6558). 910 * If the mask is NULL, default mask will be used. 911 * Ranging is not supported. 912 * @param efx_spec[in, out] 913 * EFX filter specification to update. 914 * @param[out] error 915 * Perform verbose error reporting if not NULL. 916 */ 917 static int 918 sfc_flow_parse_geneve(const struct rte_flow_item *item, 919 efx_filter_spec_t *efx_spec, 920 struct rte_flow_error *error) 921 { 922 int rc; 923 const struct rte_flow_item_geneve *spec = NULL; 924 const struct rte_flow_item_geneve *mask = NULL; 925 const struct rte_flow_item_geneve supp_mask = { 926 .protocol = RTE_BE16(0xffff), 927 .vni = { 0xff, 0xff, 0xff } 928 }; 929 930 rc = sfc_flow_parse_init(item, 931 (const void **)&spec, 932 (const void **)&mask, 933 &supp_mask, 934 &rte_flow_item_geneve_mask, 935 sizeof(struct rte_flow_item_geneve), 936 error); 937 if (rc != 0) 938 return rc; 939 940 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 941 EFX_IPPROTO_UDP, error); 942 if (rc != 0) 943 return rc; 944 945 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE; 946 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 947 948 if (spec == NULL) 949 return 0; 950 951 if (mask->protocol == supp_mask.protocol) { 952 if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) { 953 rte_flow_error_set(error, EINVAL, 954 RTE_FLOW_ERROR_TYPE_ITEM, item, 955 "GENEVE encap. protocol must be Ethernet " 956 "(0x6558) in the GENEVE pattern item"); 957 return -rte_errno; 958 } 959 } else if (mask->protocol != 0) { 960 rte_flow_error_set(error, EINVAL, 961 RTE_FLOW_ERROR_TYPE_ITEM, item, 962 "Unsupported mask for GENEVE encap. protocol"); 963 return -rte_errno; 964 } 965 966 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, 967 mask->vni, item, error); 968 969 return rc; 970 } 971 972 /** 973 * Convert NVGRE item to EFX filter specification. 974 * 975 * @param item[in] 976 * Item specification. Only virtual subnet ID field is supported. 977 * If the mask is NULL, default mask will be used. 978 * Ranging is not supported. 979 * @param efx_spec[in, out] 980 * EFX filter specification to update. 981 * @param[out] error 982 * Perform verbose error reporting if not NULL. 983 */ 984 static int 985 sfc_flow_parse_nvgre(const struct rte_flow_item *item, 986 efx_filter_spec_t *efx_spec, 987 struct rte_flow_error *error) 988 { 989 int rc; 990 const struct rte_flow_item_nvgre *spec = NULL; 991 const struct rte_flow_item_nvgre *mask = NULL; 992 const struct rte_flow_item_nvgre supp_mask = { 993 .tni = { 0xff, 0xff, 0xff } 994 }; 995 996 rc = sfc_flow_parse_init(item, 997 (const void **)&spec, 998 (const void **)&mask, 999 &supp_mask, 1000 &rte_flow_item_nvgre_mask, 1001 sizeof(struct rte_flow_item_nvgre), 1002 error); 1003 if (rc != 0) 1004 return rc; 1005 1006 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 1007 EFX_IPPROTO_GRE, error); 1008 if (rc != 0) 1009 return rc; 1010 1011 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE; 1012 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 1013 1014 if (spec == NULL) 1015 return 0; 1016 1017 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni, 1018 mask->tni, item, error); 1019 1020 return rc; 1021 } 1022 1023 static const struct sfc_flow_item sfc_flow_items[] = { 1024 { 1025 .type = RTE_FLOW_ITEM_TYPE_VOID, 1026 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER, 1027 .layer = SFC_FLOW_ITEM_ANY_LAYER, 1028 .parse = sfc_flow_parse_void, 1029 }, 1030 { 1031 .type = RTE_FLOW_ITEM_TYPE_ETH, 1032 .prev_layer = SFC_FLOW_ITEM_START_LAYER, 1033 .layer = SFC_FLOW_ITEM_L2, 1034 .parse = sfc_flow_parse_eth, 1035 }, 1036 { 1037 .type = RTE_FLOW_ITEM_TYPE_VLAN, 1038 .prev_layer = SFC_FLOW_ITEM_L2, 1039 .layer = SFC_FLOW_ITEM_L2, 1040 .parse = sfc_flow_parse_vlan, 1041 }, 1042 { 1043 .type = RTE_FLOW_ITEM_TYPE_IPV4, 1044 .prev_layer = SFC_FLOW_ITEM_L2, 1045 .layer = SFC_FLOW_ITEM_L3, 1046 .parse = sfc_flow_parse_ipv4, 1047 }, 1048 { 1049 .type = RTE_FLOW_ITEM_TYPE_IPV6, 1050 .prev_layer = SFC_FLOW_ITEM_L2, 1051 .layer = SFC_FLOW_ITEM_L3, 1052 .parse = sfc_flow_parse_ipv6, 1053 }, 1054 { 1055 .type = RTE_FLOW_ITEM_TYPE_TCP, 1056 .prev_layer = SFC_FLOW_ITEM_L3, 1057 .layer = SFC_FLOW_ITEM_L4, 1058 .parse = sfc_flow_parse_tcp, 1059 }, 1060 { 1061 .type = RTE_FLOW_ITEM_TYPE_UDP, 1062 .prev_layer = SFC_FLOW_ITEM_L3, 1063 .layer = SFC_FLOW_ITEM_L4, 1064 .parse = sfc_flow_parse_udp, 1065 }, 1066 { 1067 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 1068 .prev_layer = SFC_FLOW_ITEM_L4, 1069 .layer = SFC_FLOW_ITEM_START_LAYER, 1070 .parse = sfc_flow_parse_vxlan, 1071 }, 1072 { 1073 .type = RTE_FLOW_ITEM_TYPE_GENEVE, 1074 .prev_layer = SFC_FLOW_ITEM_L4, 1075 .layer = SFC_FLOW_ITEM_START_LAYER, 1076 .parse = sfc_flow_parse_geneve, 1077 }, 1078 { 1079 .type = RTE_FLOW_ITEM_TYPE_NVGRE, 1080 .prev_layer = SFC_FLOW_ITEM_L3, 1081 .layer = SFC_FLOW_ITEM_START_LAYER, 1082 .parse = sfc_flow_parse_nvgre, 1083 }, 1084 }; 1085 1086 /* 1087 * Protocol-independent flow API support 1088 */ 1089 static int 1090 sfc_flow_parse_attr(const struct rte_flow_attr *attr, 1091 struct rte_flow *flow, 1092 struct rte_flow_error *error) 1093 { 1094 if (attr == NULL) { 1095 rte_flow_error_set(error, EINVAL, 1096 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 1097 "NULL attribute"); 1098 return -rte_errno; 1099 } 1100 if (attr->group != 0) { 1101 rte_flow_error_set(error, ENOTSUP, 1102 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr, 1103 "Groups are not supported"); 1104 return -rte_errno; 1105 } 1106 if (attr->priority != 0) { 1107 rte_flow_error_set(error, ENOTSUP, 1108 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, 1109 "Priorities are not supported"); 1110 return -rte_errno; 1111 } 1112 if (attr->egress != 0) { 1113 rte_flow_error_set(error, ENOTSUP, 1114 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, 1115 "Egress is not supported"); 1116 return -rte_errno; 1117 } 1118 if (attr->transfer != 0) { 1119 rte_flow_error_set(error, ENOTSUP, 1120 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, 1121 "Transfer is not supported"); 1122 return -rte_errno; 1123 } 1124 if (attr->ingress == 0) { 1125 rte_flow_error_set(error, ENOTSUP, 1126 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, 1127 "Only ingress is supported"); 1128 return -rte_errno; 1129 } 1130 1131 flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX; 1132 flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; 1133 1134 return 0; 1135 } 1136 1137 /* Get item from array sfc_flow_items */ 1138 static const struct sfc_flow_item * 1139 sfc_flow_get_item(enum rte_flow_item_type type) 1140 { 1141 unsigned int i; 1142 1143 for (i = 0; i < RTE_DIM(sfc_flow_items); i++) 1144 if (sfc_flow_items[i].type == type) 1145 return &sfc_flow_items[i]; 1146 1147 return NULL; 1148 } 1149 1150 static int 1151 sfc_flow_parse_pattern(const struct rte_flow_item pattern[], 1152 struct rte_flow *flow, 1153 struct rte_flow_error *error) 1154 { 1155 int rc; 1156 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER; 1157 boolean_t is_ifrm = B_FALSE; 1158 const struct sfc_flow_item *item; 1159 1160 if (pattern == NULL) { 1161 rte_flow_error_set(error, EINVAL, 1162 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, 1163 "NULL pattern"); 1164 return -rte_errno; 1165 } 1166 1167 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { 1168 item = sfc_flow_get_item(pattern->type); 1169 if (item == NULL) { 1170 rte_flow_error_set(error, ENOTSUP, 1171 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1172 "Unsupported pattern item"); 1173 return -rte_errno; 1174 } 1175 1176 /* 1177 * Omitting one or several protocol layers at the beginning 1178 * of pattern is supported 1179 */ 1180 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER && 1181 prev_layer != SFC_FLOW_ITEM_ANY_LAYER && 1182 item->prev_layer != prev_layer) { 1183 rte_flow_error_set(error, ENOTSUP, 1184 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1185 "Unexpected sequence of pattern items"); 1186 return -rte_errno; 1187 } 1188 1189 /* 1190 * Allow only VOID and ETH pattern items in the inner frame. 1191 * Also check that there is only one tunneling protocol. 1192 */ 1193 switch (item->type) { 1194 case RTE_FLOW_ITEM_TYPE_VOID: 1195 case RTE_FLOW_ITEM_TYPE_ETH: 1196 break; 1197 1198 case RTE_FLOW_ITEM_TYPE_VXLAN: 1199 case RTE_FLOW_ITEM_TYPE_GENEVE: 1200 case RTE_FLOW_ITEM_TYPE_NVGRE: 1201 if (is_ifrm) { 1202 rte_flow_error_set(error, EINVAL, 1203 RTE_FLOW_ERROR_TYPE_ITEM, 1204 pattern, 1205 "More than one tunneling protocol"); 1206 return -rte_errno; 1207 } 1208 is_ifrm = B_TRUE; 1209 break; 1210 1211 default: 1212 if (is_ifrm) { 1213 rte_flow_error_set(error, EINVAL, 1214 RTE_FLOW_ERROR_TYPE_ITEM, 1215 pattern, 1216 "There is an unsupported pattern item " 1217 "in the inner frame"); 1218 return -rte_errno; 1219 } 1220 break; 1221 } 1222 1223 rc = item->parse(pattern, &flow->spec.template, error); 1224 if (rc != 0) 1225 return rc; 1226 1227 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER) 1228 prev_layer = item->layer; 1229 } 1230 1231 return 0; 1232 } 1233 1234 static int 1235 sfc_flow_parse_queue(struct sfc_adapter *sa, 1236 const struct rte_flow_action_queue *queue, 1237 struct rte_flow *flow) 1238 { 1239 struct sfc_rxq *rxq; 1240 1241 if (queue->index >= sa->rxq_count) 1242 return -EINVAL; 1243 1244 rxq = sa->rxq_info[queue->index].rxq; 1245 flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index; 1246 1247 return 0; 1248 } 1249 1250 static int 1251 sfc_flow_parse_rss(struct sfc_adapter *sa, 1252 const struct rte_flow_action_rss *action_rss, 1253 struct rte_flow *flow) 1254 { 1255 struct sfc_rss *rss = &sa->rss; 1256 unsigned int rxq_sw_index; 1257 struct sfc_rxq *rxq; 1258 unsigned int rxq_hw_index_min; 1259 unsigned int rxq_hw_index_max; 1260 efx_rx_hash_type_t efx_hash_types; 1261 const uint8_t *rss_key; 1262 struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf; 1263 unsigned int i; 1264 1265 if (action_rss->queue_num == 0) 1266 return -EINVAL; 1267 1268 rxq_sw_index = sa->rxq_count - 1; 1269 rxq = sa->rxq_info[rxq_sw_index].rxq; 1270 rxq_hw_index_min = rxq->hw_index; 1271 rxq_hw_index_max = 0; 1272 1273 for (i = 0; i < action_rss->queue_num; ++i) { 1274 rxq_sw_index = action_rss->queue[i]; 1275 1276 if (rxq_sw_index >= sa->rxq_count) 1277 return -EINVAL; 1278 1279 rxq = sa->rxq_info[rxq_sw_index].rxq; 1280 1281 if (rxq->hw_index < rxq_hw_index_min) 1282 rxq_hw_index_min = rxq->hw_index; 1283 1284 if (rxq->hw_index > rxq_hw_index_max) 1285 rxq_hw_index_max = rxq->hw_index; 1286 } 1287 1288 switch (action_rss->func) { 1289 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1290 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1291 break; 1292 default: 1293 return -EINVAL; 1294 } 1295 1296 if (action_rss->level) 1297 return -EINVAL; 1298 1299 /* 1300 * Dummy RSS action with only one queue and no specific settings 1301 * for hash types and key does not require dedicated RSS context 1302 * and may be simplified to single queue action. 1303 */ 1304 if (action_rss->queue_num == 1 && action_rss->types == 0 && 1305 action_rss->key_len == 0) { 1306 flow->spec.template.efs_dmaq_id = rxq_hw_index_min; 1307 return 0; 1308 } 1309 1310 if (action_rss->types) { 1311 int rc; 1312 1313 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types, 1314 &efx_hash_types); 1315 if (rc != 0) 1316 return -rc; 1317 } else { 1318 unsigned int i; 1319 1320 efx_hash_types = 0; 1321 for (i = 0; i < rss->hf_map_nb_entries; ++i) 1322 efx_hash_types |= rss->hf_map[i].efx; 1323 } 1324 1325 if (action_rss->key_len) { 1326 if (action_rss->key_len != sizeof(rss->key)) 1327 return -EINVAL; 1328 1329 rss_key = action_rss->key; 1330 } else { 1331 rss_key = rss->key; 1332 } 1333 1334 flow->rss = B_TRUE; 1335 1336 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min; 1337 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max; 1338 sfc_rss_conf->rss_hash_types = efx_hash_types; 1339 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key)); 1340 1341 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) { 1342 unsigned int nb_queues = action_rss->queue_num; 1343 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues]; 1344 struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq; 1345 1346 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min; 1347 } 1348 1349 return 0; 1350 } 1351 1352 static int 1353 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec, 1354 unsigned int filters_count) 1355 { 1356 unsigned int i; 1357 int ret = 0; 1358 1359 for (i = 0; i < filters_count; i++) { 1360 int rc; 1361 1362 rc = efx_filter_remove(sa->nic, &spec->filters[i]); 1363 if (ret == 0 && rc != 0) { 1364 sfc_err(sa, "failed to remove filter specification " 1365 "(rc = %d)", rc); 1366 ret = rc; 1367 } 1368 } 1369 1370 return ret; 1371 } 1372 1373 static int 1374 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec) 1375 { 1376 unsigned int i; 1377 int rc = 0; 1378 1379 for (i = 0; i < spec->count; i++) { 1380 rc = efx_filter_insert(sa->nic, &spec->filters[i]); 1381 if (rc != 0) { 1382 sfc_flow_spec_flush(sa, spec, i); 1383 break; 1384 } 1385 } 1386 1387 return rc; 1388 } 1389 1390 static int 1391 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec) 1392 { 1393 return sfc_flow_spec_flush(sa, spec, spec->count); 1394 } 1395 1396 static int 1397 sfc_flow_filter_insert(struct sfc_adapter *sa, 1398 struct rte_flow *flow) 1399 { 1400 struct sfc_rss *rss = &sa->rss; 1401 struct sfc_flow_rss *flow_rss = &flow->rss_conf; 1402 uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; 1403 unsigned int i; 1404 int rc = 0; 1405 1406 if (flow->rss) { 1407 unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max - 1408 flow_rss->rxq_hw_index_min + 1, 1409 EFX_MAXRSS); 1410 1411 rc = efx_rx_scale_context_alloc(sa->nic, 1412 EFX_RX_SCALE_EXCLUSIVE, 1413 rss_spread, 1414 &efs_rss_context); 1415 if (rc != 0) 1416 goto fail_scale_context_alloc; 1417 1418 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context, 1419 rss->hash_alg, 1420 flow_rss->rss_hash_types, B_TRUE); 1421 if (rc != 0) 1422 goto fail_scale_mode_set; 1423 1424 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context, 1425 flow_rss->rss_key, 1426 sizeof(rss->key)); 1427 if (rc != 0) 1428 goto fail_scale_key_set; 1429 1430 /* 1431 * At this point, fully elaborated filter specifications 1432 * have been produced from the template. To make sure that 1433 * RSS behaviour is consistent between them, set the same 1434 * RSS context value everywhere. 1435 */ 1436 for (i = 0; i < flow->spec.count; i++) { 1437 efx_filter_spec_t *spec = &flow->spec.filters[i]; 1438 1439 spec->efs_rss_context = efs_rss_context; 1440 spec->efs_dmaq_id = flow_rss->rxq_hw_index_min; 1441 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS; 1442 } 1443 } 1444 1445 rc = sfc_flow_spec_insert(sa, &flow->spec); 1446 if (rc != 0) 1447 goto fail_filter_insert; 1448 1449 if (flow->rss) { 1450 /* 1451 * Scale table is set after filter insertion because 1452 * the table entries are relative to the base RxQ ID 1453 * and the latter is submitted to the HW by means of 1454 * inserting a filter, so by the time of the request 1455 * the HW knows all the information needed to verify 1456 * the table entries, and the operation will succeed 1457 */ 1458 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context, 1459 flow_rss->rss_tbl, 1460 RTE_DIM(flow_rss->rss_tbl)); 1461 if (rc != 0) 1462 goto fail_scale_tbl_set; 1463 } 1464 1465 return 0; 1466 1467 fail_scale_tbl_set: 1468 sfc_flow_spec_remove(sa, &flow->spec); 1469 1470 fail_filter_insert: 1471 fail_scale_key_set: 1472 fail_scale_mode_set: 1473 if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT) 1474 efx_rx_scale_context_free(sa->nic, efs_rss_context); 1475 1476 fail_scale_context_alloc: 1477 return rc; 1478 } 1479 1480 static int 1481 sfc_flow_filter_remove(struct sfc_adapter *sa, 1482 struct rte_flow *flow) 1483 { 1484 int rc = 0; 1485 1486 rc = sfc_flow_spec_remove(sa, &flow->spec); 1487 if (rc != 0) 1488 return rc; 1489 1490 if (flow->rss) { 1491 /* 1492 * All specifications for a given flow rule have the same RSS 1493 * context, so that RSS context value is taken from the first 1494 * filter specification 1495 */ 1496 efx_filter_spec_t *spec = &flow->spec.filters[0]; 1497 1498 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context); 1499 } 1500 1501 return rc; 1502 } 1503 1504 static int 1505 sfc_flow_parse_mark(struct sfc_adapter *sa, 1506 const struct rte_flow_action_mark *mark, 1507 struct rte_flow *flow) 1508 { 1509 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 1510 1511 if (mark == NULL || mark->id > encp->enc_filter_action_mark_max) 1512 return EINVAL; 1513 1514 flow->spec.template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK; 1515 flow->spec.template.efs_mark = mark->id; 1516 1517 return 0; 1518 } 1519 1520 static int 1521 sfc_flow_parse_actions(struct sfc_adapter *sa, 1522 const struct rte_flow_action actions[], 1523 struct rte_flow *flow, 1524 struct rte_flow_error *error) 1525 { 1526 int rc; 1527 const unsigned int dp_rx_features = sa->dp_rx->features; 1528 uint32_t actions_set = 0; 1529 const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) | 1530 (1UL << RTE_FLOW_ACTION_TYPE_RSS) | 1531 (1UL << RTE_FLOW_ACTION_TYPE_DROP); 1532 const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) | 1533 (1UL << RTE_FLOW_ACTION_TYPE_FLAG); 1534 1535 if (actions == NULL) { 1536 rte_flow_error_set(error, EINVAL, 1537 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, 1538 "NULL actions"); 1539 return -rte_errno; 1540 } 1541 1542 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \ 1543 RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT) 1544 1545 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1546 switch (actions->type) { 1547 case RTE_FLOW_ACTION_TYPE_VOID: 1548 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID, 1549 actions_set); 1550 break; 1551 1552 case RTE_FLOW_ACTION_TYPE_QUEUE: 1553 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE, 1554 actions_set); 1555 if ((actions_set & fate_actions_mask) != 0) 1556 goto fail_fate_actions; 1557 1558 rc = sfc_flow_parse_queue(sa, actions->conf, flow); 1559 if (rc != 0) { 1560 rte_flow_error_set(error, EINVAL, 1561 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1562 "Bad QUEUE action"); 1563 return -rte_errno; 1564 } 1565 break; 1566 1567 case RTE_FLOW_ACTION_TYPE_RSS: 1568 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS, 1569 actions_set); 1570 if ((actions_set & fate_actions_mask) != 0) 1571 goto fail_fate_actions; 1572 1573 rc = sfc_flow_parse_rss(sa, actions->conf, flow); 1574 if (rc != 0) { 1575 rte_flow_error_set(error, -rc, 1576 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1577 "Bad RSS action"); 1578 return -rte_errno; 1579 } 1580 break; 1581 1582 case RTE_FLOW_ACTION_TYPE_DROP: 1583 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP, 1584 actions_set); 1585 if ((actions_set & fate_actions_mask) != 0) 1586 goto fail_fate_actions; 1587 1588 flow->spec.template.efs_dmaq_id = 1589 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; 1590 break; 1591 1592 case RTE_FLOW_ACTION_TYPE_FLAG: 1593 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG, 1594 actions_set); 1595 if ((actions_set & mark_actions_mask) != 0) 1596 goto fail_actions_overlap; 1597 1598 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) { 1599 rte_flow_error_set(error, ENOTSUP, 1600 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1601 "FLAG action is not supported on the current Rx datapath"); 1602 return -rte_errno; 1603 } 1604 1605 flow->spec.template.efs_flags |= 1606 EFX_FILTER_FLAG_ACTION_FLAG; 1607 break; 1608 1609 case RTE_FLOW_ACTION_TYPE_MARK: 1610 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK, 1611 actions_set); 1612 if ((actions_set & mark_actions_mask) != 0) 1613 goto fail_actions_overlap; 1614 1615 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) { 1616 rte_flow_error_set(error, ENOTSUP, 1617 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1618 "MARK action is not supported on the current Rx datapath"); 1619 return -rte_errno; 1620 } 1621 1622 rc = sfc_flow_parse_mark(sa, actions->conf, flow); 1623 if (rc != 0) { 1624 rte_flow_error_set(error, rc, 1625 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1626 "Bad MARK action"); 1627 return -rte_errno; 1628 } 1629 break; 1630 1631 default: 1632 rte_flow_error_set(error, ENOTSUP, 1633 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1634 "Action is not supported"); 1635 return -rte_errno; 1636 } 1637 1638 actions_set |= (1UL << actions->type); 1639 } 1640 #undef SFC_BUILD_SET_OVERFLOW 1641 1642 /* When fate is unknown, drop traffic. */ 1643 if ((actions_set & fate_actions_mask) == 0) { 1644 flow->spec.template.efs_dmaq_id = 1645 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; 1646 } 1647 1648 return 0; 1649 1650 fail_fate_actions: 1651 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, 1652 "Cannot combine several fate-deciding actions, " 1653 "choose between QUEUE, RSS or DROP"); 1654 return -rte_errno; 1655 1656 fail_actions_overlap: 1657 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, 1658 "Overlapping actions are not supported"); 1659 return -rte_errno; 1660 } 1661 1662 /** 1663 * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST 1664 * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same 1665 * specifications after copying. 1666 * 1667 * @param spec[in, out] 1668 * SFC flow specification to update. 1669 * @param filters_count_for_one_val[in] 1670 * How many specifications should have the same match flag, what is the 1671 * number of specifications before copying. 1672 * @param error[out] 1673 * Perform verbose error reporting if not NULL. 1674 */ 1675 static int 1676 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec, 1677 unsigned int filters_count_for_one_val, 1678 struct rte_flow_error *error) 1679 { 1680 unsigned int i; 1681 static const efx_filter_match_flags_t vals[] = { 1682 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, 1683 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST 1684 }; 1685 1686 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) { 1687 rte_flow_error_set(error, EINVAL, 1688 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1689 "Number of specifications is incorrect while copying " 1690 "by unknown destination flags"); 1691 return -rte_errno; 1692 } 1693 1694 for (i = 0; i < spec->count; i++) { 1695 /* The check above ensures that divisor can't be zero here */ 1696 spec->filters[i].efs_match_flags |= 1697 vals[i / filters_count_for_one_val]; 1698 } 1699 1700 return 0; 1701 } 1702 1703 /** 1704 * Check that the following conditions are met: 1705 * - the list of supported filters has a filter 1706 * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of 1707 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also 1708 * be inserted. 1709 * 1710 * @param match[in] 1711 * The match flags of filter. 1712 * @param spec[in] 1713 * Specification to be supplemented. 1714 * @param filter[in] 1715 * SFC filter with list of supported filters. 1716 */ 1717 static boolean_t 1718 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match, 1719 __rte_unused efx_filter_spec_t *spec, 1720 struct sfc_filter *filter) 1721 { 1722 unsigned int i; 1723 efx_filter_match_flags_t match_mcast_dst; 1724 1725 match_mcast_dst = 1726 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) | 1727 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; 1728 for (i = 0; i < filter->supported_match_num; i++) { 1729 if (match_mcast_dst == filter->supported_match[i]) 1730 return B_TRUE; 1731 } 1732 1733 return B_FALSE; 1734 } 1735 1736 /** 1737 * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and 1738 * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same 1739 * specifications after copying. 1740 * 1741 * @param spec[in, out] 1742 * SFC flow specification to update. 1743 * @param filters_count_for_one_val[in] 1744 * How many specifications should have the same EtherType value, what is the 1745 * number of specifications before copying. 1746 * @param error[out] 1747 * Perform verbose error reporting if not NULL. 1748 */ 1749 static int 1750 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec, 1751 unsigned int filters_count_for_one_val, 1752 struct rte_flow_error *error) 1753 { 1754 unsigned int i; 1755 static const uint16_t vals[] = { 1756 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6 1757 }; 1758 1759 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) { 1760 rte_flow_error_set(error, EINVAL, 1761 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1762 "Number of specifications is incorrect " 1763 "while copying by Ethertype"); 1764 return -rte_errno; 1765 } 1766 1767 for (i = 0; i < spec->count; i++) { 1768 spec->filters[i].efs_match_flags |= 1769 EFX_FILTER_MATCH_ETHER_TYPE; 1770 1771 /* 1772 * The check above ensures that 1773 * filters_count_for_one_val is not 0 1774 */ 1775 spec->filters[i].efs_ether_type = 1776 vals[i / filters_count_for_one_val]; 1777 } 1778 1779 return 0; 1780 } 1781 1782 /** 1783 * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and 1784 * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same 1785 * specifications after copying. 1786 * 1787 * @param spec[in, out] 1788 * SFC flow specification to update. 1789 * @param filters_count_for_one_val[in] 1790 * How many specifications should have the same match flag, what is the 1791 * number of specifications before copying. 1792 * @param error[out] 1793 * Perform verbose error reporting if not NULL. 1794 */ 1795 static int 1796 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec, 1797 unsigned int filters_count_for_one_val, 1798 struct rte_flow_error *error) 1799 { 1800 unsigned int i; 1801 static const efx_filter_match_flags_t vals[] = { 1802 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, 1803 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST 1804 }; 1805 1806 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) { 1807 rte_flow_error_set(error, EINVAL, 1808 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1809 "Number of specifications is incorrect while copying " 1810 "by inner frame unknown destination flags"); 1811 return -rte_errno; 1812 } 1813 1814 for (i = 0; i < spec->count; i++) { 1815 /* The check above ensures that divisor can't be zero here */ 1816 spec->filters[i].efs_match_flags |= 1817 vals[i / filters_count_for_one_val]; 1818 } 1819 1820 return 0; 1821 } 1822 1823 /** 1824 * Check that the following conditions are met: 1825 * - the specification corresponds to a filter for encapsulated traffic 1826 * - the list of supported filters has a filter 1827 * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of 1828 * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also 1829 * be inserted. 1830 * 1831 * @param match[in] 1832 * The match flags of filter. 1833 * @param spec[in] 1834 * Specification to be supplemented. 1835 * @param filter[in] 1836 * SFC filter with list of supported filters. 1837 */ 1838 static boolean_t 1839 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match, 1840 efx_filter_spec_t *spec, 1841 struct sfc_filter *filter) 1842 { 1843 unsigned int i; 1844 efx_tunnel_protocol_t encap_type = spec->efs_encap_type; 1845 efx_filter_match_flags_t match_mcast_dst; 1846 1847 if (encap_type == EFX_TUNNEL_PROTOCOL_NONE) 1848 return B_FALSE; 1849 1850 match_mcast_dst = 1851 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) | 1852 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST; 1853 for (i = 0; i < filter->supported_match_num; i++) { 1854 if (match_mcast_dst == filter->supported_match[i]) 1855 return B_TRUE; 1856 } 1857 1858 return B_FALSE; 1859 } 1860 1861 /* 1862 * Match flags that can be automatically added to filters. 1863 * Selecting the last minimum when searching for the copy flag ensures that the 1864 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than 1865 * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter 1866 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported 1867 * filters. 1868 */ 1869 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = { 1870 { 1871 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, 1872 .vals_count = 2, 1873 .set_vals = sfc_flow_set_unknown_dst_flags, 1874 .spec_check = sfc_flow_check_unknown_dst_flags, 1875 }, 1876 { 1877 .flag = EFX_FILTER_MATCH_ETHER_TYPE, 1878 .vals_count = 2, 1879 .set_vals = sfc_flow_set_ethertypes, 1880 .spec_check = NULL, 1881 }, 1882 { 1883 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, 1884 .vals_count = 2, 1885 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags, 1886 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags, 1887 }, 1888 }; 1889 1890 /* Get item from array sfc_flow_copy_flags */ 1891 static const struct sfc_flow_copy_flag * 1892 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag) 1893 { 1894 unsigned int i; 1895 1896 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 1897 if (sfc_flow_copy_flags[i].flag == flag) 1898 return &sfc_flow_copy_flags[i]; 1899 } 1900 1901 return NULL; 1902 } 1903 1904 /** 1905 * Make copies of the specifications, set match flag and values 1906 * of the field that corresponds to it. 1907 * 1908 * @param spec[in, out] 1909 * SFC flow specification to update. 1910 * @param flag[in] 1911 * The match flag to add. 1912 * @param error[out] 1913 * Perform verbose error reporting if not NULL. 1914 */ 1915 static int 1916 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec, 1917 efx_filter_match_flags_t flag, 1918 struct rte_flow_error *error) 1919 { 1920 unsigned int i; 1921 unsigned int new_filters_count; 1922 unsigned int filters_count_for_one_val; 1923 const struct sfc_flow_copy_flag *copy_flag; 1924 int rc; 1925 1926 copy_flag = sfc_flow_get_copy_flag(flag); 1927 if (copy_flag == NULL) { 1928 rte_flow_error_set(error, ENOTSUP, 1929 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1930 "Unsupported spec field for copying"); 1931 return -rte_errno; 1932 } 1933 1934 new_filters_count = spec->count * copy_flag->vals_count; 1935 if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) { 1936 rte_flow_error_set(error, EINVAL, 1937 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1938 "Too much EFX specifications in the flow rule"); 1939 return -rte_errno; 1940 } 1941 1942 /* Copy filters specifications */ 1943 for (i = spec->count; i < new_filters_count; i++) 1944 spec->filters[i] = spec->filters[i - spec->count]; 1945 1946 filters_count_for_one_val = spec->count; 1947 spec->count = new_filters_count; 1948 1949 rc = copy_flag->set_vals(spec, filters_count_for_one_val, error); 1950 if (rc != 0) 1951 return rc; 1952 1953 return 0; 1954 } 1955 1956 /** 1957 * Check that the given set of match flags missing in the original filter spec 1958 * could be covered by adding spec copies which specify the corresponding 1959 * flags and packet field values to match. 1960 * 1961 * @param miss_flags[in] 1962 * Flags that are missing until the supported filter. 1963 * @param spec[in] 1964 * Specification to be supplemented. 1965 * @param filter[in] 1966 * SFC filter. 1967 * 1968 * @return 1969 * Number of specifications after copy or 0, if the flags can not be added. 1970 */ 1971 static unsigned int 1972 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags, 1973 efx_filter_spec_t *spec, 1974 struct sfc_filter *filter) 1975 { 1976 unsigned int i; 1977 efx_filter_match_flags_t copy_flags = 0; 1978 efx_filter_match_flags_t flag; 1979 efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags; 1980 sfc_flow_spec_check *check; 1981 unsigned int multiplier = 1; 1982 1983 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 1984 flag = sfc_flow_copy_flags[i].flag; 1985 check = sfc_flow_copy_flags[i].spec_check; 1986 if ((flag & miss_flags) == flag) { 1987 if (check != NULL && (!check(match, spec, filter))) 1988 continue; 1989 1990 copy_flags |= flag; 1991 multiplier *= sfc_flow_copy_flags[i].vals_count; 1992 } 1993 } 1994 1995 if (copy_flags == miss_flags) 1996 return multiplier; 1997 1998 return 0; 1999 } 2000 2001 /** 2002 * Attempt to supplement the specification template to the minimally 2003 * supported set of match flags. To do this, it is necessary to copy 2004 * the specifications, filling them with the values of fields that 2005 * correspond to the missing flags. 2006 * The necessary and sufficient filter is built from the fewest number 2007 * of copies which could be made to cover the minimally required set 2008 * of flags. 2009 * 2010 * @param sa[in] 2011 * SFC adapter. 2012 * @param spec[in, out] 2013 * SFC flow specification to update. 2014 * @param error[out] 2015 * Perform verbose error reporting if not NULL. 2016 */ 2017 static int 2018 sfc_flow_spec_filters_complete(struct sfc_adapter *sa, 2019 struct sfc_flow_spec *spec, 2020 struct rte_flow_error *error) 2021 { 2022 struct sfc_filter *filter = &sa->filter; 2023 efx_filter_match_flags_t miss_flags; 2024 efx_filter_match_flags_t min_miss_flags = 0; 2025 efx_filter_match_flags_t match; 2026 unsigned int min_multiplier = UINT_MAX; 2027 unsigned int multiplier; 2028 unsigned int i; 2029 int rc; 2030 2031 match = spec->template.efs_match_flags; 2032 for (i = 0; i < filter->supported_match_num; i++) { 2033 if ((match & filter->supported_match[i]) == match) { 2034 miss_flags = filter->supported_match[i] & (~match); 2035 multiplier = sfc_flow_check_missing_flags(miss_flags, 2036 &spec->template, filter); 2037 if (multiplier > 0) { 2038 if (multiplier <= min_multiplier) { 2039 min_multiplier = multiplier; 2040 min_miss_flags = miss_flags; 2041 } 2042 } 2043 } 2044 } 2045 2046 if (min_multiplier == UINT_MAX) { 2047 rte_flow_error_set(error, ENOTSUP, 2048 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2049 "The flow rule pattern is unsupported"); 2050 return -rte_errno; 2051 } 2052 2053 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 2054 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag; 2055 2056 if ((flag & min_miss_flags) == flag) { 2057 rc = sfc_flow_spec_add_match_flag(spec, flag, error); 2058 if (rc != 0) 2059 return rc; 2060 } 2061 } 2062 2063 return 0; 2064 } 2065 2066 /** 2067 * Check that set of match flags is referred to by a filter. Filter is 2068 * described by match flags with the ability to add OUTER_VID and INNER_VID 2069 * flags. 2070 * 2071 * @param match_flags[in] 2072 * Set of match flags. 2073 * @param flags_pattern[in] 2074 * Pattern of filter match flags. 2075 */ 2076 static boolean_t 2077 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags, 2078 efx_filter_match_flags_t flags_pattern) 2079 { 2080 if ((match_flags & flags_pattern) != flags_pattern) 2081 return B_FALSE; 2082 2083 switch (match_flags & ~flags_pattern) { 2084 case 0: 2085 case EFX_FILTER_MATCH_OUTER_VID: 2086 case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID: 2087 return B_TRUE; 2088 default: 2089 return B_FALSE; 2090 } 2091 } 2092 2093 /** 2094 * Check whether the spec maps to a hardware filter which is known to be 2095 * ineffective despite being valid. 2096 * 2097 * @param spec[in] 2098 * SFC flow specification. 2099 */ 2100 static boolean_t 2101 sfc_flow_is_match_flags_exception(struct sfc_flow_spec *spec) 2102 { 2103 unsigned int i; 2104 uint16_t ether_type; 2105 uint8_t ip_proto; 2106 efx_filter_match_flags_t match_flags; 2107 2108 for (i = 0; i < spec->count; i++) { 2109 match_flags = spec->filters[i].efs_match_flags; 2110 2111 if (sfc_flow_is_match_with_vids(match_flags, 2112 EFX_FILTER_MATCH_ETHER_TYPE) || 2113 sfc_flow_is_match_with_vids(match_flags, 2114 EFX_FILTER_MATCH_ETHER_TYPE | 2115 EFX_FILTER_MATCH_LOC_MAC)) { 2116 ether_type = spec->filters[i].efs_ether_type; 2117 if (ether_type == EFX_ETHER_TYPE_IPV4 || 2118 ether_type == EFX_ETHER_TYPE_IPV6) 2119 return B_TRUE; 2120 } else if (sfc_flow_is_match_with_vids(match_flags, 2121 EFX_FILTER_MATCH_ETHER_TYPE | 2122 EFX_FILTER_MATCH_IP_PROTO) || 2123 sfc_flow_is_match_with_vids(match_flags, 2124 EFX_FILTER_MATCH_ETHER_TYPE | 2125 EFX_FILTER_MATCH_IP_PROTO | 2126 EFX_FILTER_MATCH_LOC_MAC)) { 2127 ip_proto = spec->filters[i].efs_ip_proto; 2128 if (ip_proto == EFX_IPPROTO_TCP || 2129 ip_proto == EFX_IPPROTO_UDP) 2130 return B_TRUE; 2131 } 2132 } 2133 2134 return B_FALSE; 2135 } 2136 2137 static int 2138 sfc_flow_validate_match_flags(struct sfc_adapter *sa, 2139 struct rte_flow *flow, 2140 struct rte_flow_error *error) 2141 { 2142 efx_filter_spec_t *spec_tmpl = &flow->spec.template; 2143 efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags; 2144 int rc; 2145 2146 /* Initialize the first filter spec with template */ 2147 flow->spec.filters[0] = *spec_tmpl; 2148 flow->spec.count = 1; 2149 2150 if (!sfc_filter_is_match_supported(sa, match_flags)) { 2151 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error); 2152 if (rc != 0) 2153 return rc; 2154 } 2155 2156 if (sfc_flow_is_match_flags_exception(&flow->spec)) { 2157 rte_flow_error_set(error, ENOTSUP, 2158 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2159 "The flow rule pattern is unsupported"); 2160 return -rte_errno; 2161 } 2162 2163 return 0; 2164 } 2165 2166 static int 2167 sfc_flow_parse(struct rte_eth_dev *dev, 2168 const struct rte_flow_attr *attr, 2169 const struct rte_flow_item pattern[], 2170 const struct rte_flow_action actions[], 2171 struct rte_flow *flow, 2172 struct rte_flow_error *error) 2173 { 2174 struct sfc_adapter *sa = dev->data->dev_private; 2175 int rc; 2176 2177 rc = sfc_flow_parse_attr(attr, flow, error); 2178 if (rc != 0) 2179 goto fail_bad_value; 2180 2181 rc = sfc_flow_parse_pattern(pattern, flow, error); 2182 if (rc != 0) 2183 goto fail_bad_value; 2184 2185 rc = sfc_flow_parse_actions(sa, actions, flow, error); 2186 if (rc != 0) 2187 goto fail_bad_value; 2188 2189 rc = sfc_flow_validate_match_flags(sa, flow, error); 2190 if (rc != 0) 2191 goto fail_bad_value; 2192 2193 return 0; 2194 2195 fail_bad_value: 2196 return rc; 2197 } 2198 2199 static int 2200 sfc_flow_validate(struct rte_eth_dev *dev, 2201 const struct rte_flow_attr *attr, 2202 const struct rte_flow_item pattern[], 2203 const struct rte_flow_action actions[], 2204 struct rte_flow_error *error) 2205 { 2206 struct rte_flow flow; 2207 2208 memset(&flow, 0, sizeof(flow)); 2209 2210 return sfc_flow_parse(dev, attr, pattern, actions, &flow, error); 2211 } 2212 2213 static struct rte_flow * 2214 sfc_flow_create(struct rte_eth_dev *dev, 2215 const struct rte_flow_attr *attr, 2216 const struct rte_flow_item pattern[], 2217 const struct rte_flow_action actions[], 2218 struct rte_flow_error *error) 2219 { 2220 struct sfc_adapter *sa = dev->data->dev_private; 2221 struct rte_flow *flow = NULL; 2222 int rc; 2223 2224 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0); 2225 if (flow == NULL) { 2226 rte_flow_error_set(error, ENOMEM, 2227 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2228 "Failed to allocate memory"); 2229 goto fail_no_mem; 2230 } 2231 2232 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error); 2233 if (rc != 0) 2234 goto fail_bad_value; 2235 2236 TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries); 2237 2238 sfc_adapter_lock(sa); 2239 2240 if (sa->state == SFC_ADAPTER_STARTED) { 2241 rc = sfc_flow_filter_insert(sa, flow); 2242 if (rc != 0) { 2243 rte_flow_error_set(error, rc, 2244 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2245 "Failed to insert filter"); 2246 goto fail_filter_insert; 2247 } 2248 } 2249 2250 sfc_adapter_unlock(sa); 2251 2252 return flow; 2253 2254 fail_filter_insert: 2255 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries); 2256 2257 fail_bad_value: 2258 rte_free(flow); 2259 sfc_adapter_unlock(sa); 2260 2261 fail_no_mem: 2262 return NULL; 2263 } 2264 2265 static int 2266 sfc_flow_remove(struct sfc_adapter *sa, 2267 struct rte_flow *flow, 2268 struct rte_flow_error *error) 2269 { 2270 int rc = 0; 2271 2272 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2273 2274 if (sa->state == SFC_ADAPTER_STARTED) { 2275 rc = sfc_flow_filter_remove(sa, flow); 2276 if (rc != 0) 2277 rte_flow_error_set(error, rc, 2278 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2279 "Failed to destroy flow rule"); 2280 } 2281 2282 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries); 2283 rte_free(flow); 2284 2285 return rc; 2286 } 2287 2288 static int 2289 sfc_flow_destroy(struct rte_eth_dev *dev, 2290 struct rte_flow *flow, 2291 struct rte_flow_error *error) 2292 { 2293 struct sfc_adapter *sa = dev->data->dev_private; 2294 struct rte_flow *flow_ptr; 2295 int rc = EINVAL; 2296 2297 sfc_adapter_lock(sa); 2298 2299 TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) { 2300 if (flow_ptr == flow) 2301 rc = 0; 2302 } 2303 if (rc != 0) { 2304 rte_flow_error_set(error, rc, 2305 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2306 "Failed to find flow rule to destroy"); 2307 goto fail_bad_value; 2308 } 2309 2310 rc = sfc_flow_remove(sa, flow, error); 2311 2312 fail_bad_value: 2313 sfc_adapter_unlock(sa); 2314 2315 return -rc; 2316 } 2317 2318 static int 2319 sfc_flow_flush(struct rte_eth_dev *dev, 2320 struct rte_flow_error *error) 2321 { 2322 struct sfc_adapter *sa = dev->data->dev_private; 2323 struct rte_flow *flow; 2324 int rc = 0; 2325 int ret = 0; 2326 2327 sfc_adapter_lock(sa); 2328 2329 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) { 2330 rc = sfc_flow_remove(sa, flow, error); 2331 if (rc != 0) 2332 ret = rc; 2333 } 2334 2335 sfc_adapter_unlock(sa); 2336 2337 return -ret; 2338 } 2339 2340 static int 2341 sfc_flow_isolate(struct rte_eth_dev *dev, int enable, 2342 struct rte_flow_error *error) 2343 { 2344 struct sfc_adapter *sa = dev->data->dev_private; 2345 struct sfc_port *port = &sa->port; 2346 int ret = 0; 2347 2348 sfc_adapter_lock(sa); 2349 if (sa->state != SFC_ADAPTER_INITIALIZED) { 2350 rte_flow_error_set(error, EBUSY, 2351 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2352 NULL, "please close the port first"); 2353 ret = -rte_errno; 2354 } else { 2355 port->isolated = (enable) ? B_TRUE : B_FALSE; 2356 } 2357 sfc_adapter_unlock(sa); 2358 2359 return ret; 2360 } 2361 2362 const struct rte_flow_ops sfc_flow_ops = { 2363 .validate = sfc_flow_validate, 2364 .create = sfc_flow_create, 2365 .destroy = sfc_flow_destroy, 2366 .flush = sfc_flow_flush, 2367 .query = NULL, 2368 .isolate = sfc_flow_isolate, 2369 }; 2370 2371 void 2372 sfc_flow_init(struct sfc_adapter *sa) 2373 { 2374 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2375 2376 TAILQ_INIT(&sa->filter.flow_list); 2377 } 2378 2379 void 2380 sfc_flow_fini(struct sfc_adapter *sa) 2381 { 2382 struct rte_flow *flow; 2383 2384 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2385 2386 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) { 2387 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries); 2388 rte_free(flow); 2389 } 2390 } 2391 2392 void 2393 sfc_flow_stop(struct sfc_adapter *sa) 2394 { 2395 struct rte_flow *flow; 2396 2397 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2398 2399 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) 2400 sfc_flow_filter_remove(sa, flow); 2401 } 2402 2403 int 2404 sfc_flow_start(struct sfc_adapter *sa) 2405 { 2406 struct rte_flow *flow; 2407 int rc = 0; 2408 2409 sfc_log_init(sa, "entry"); 2410 2411 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2412 2413 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) { 2414 rc = sfc_flow_filter_insert(sa, flow); 2415 if (rc != 0) 2416 goto fail_bad_flow; 2417 } 2418 2419 sfc_log_init(sa, "done"); 2420 2421 fail_bad_flow: 2422 return rc; 2423 } 2424