1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2020 Xilinx, Inc. 4 * Copyright(c) 2017-2019 Solarflare Communications Inc. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #include <rte_byteorder.h> 11 #include <rte_tailq.h> 12 #include <rte_common.h> 13 #include <rte_ethdev_driver.h> 14 #include <rte_ether.h> 15 #include <rte_flow.h> 16 #include <rte_flow_driver.h> 17 18 #include "efx.h" 19 20 #include "sfc.h" 21 #include "sfc_debug.h" 22 #include "sfc_rx.h" 23 #include "sfc_filter.h" 24 #include "sfc_flow.h" 25 #include "sfc_log.h" 26 #include "sfc_dp_rx.h" 27 28 struct sfc_flow_ops_by_spec { 29 sfc_flow_parse_cb_t *parse; 30 sfc_flow_verify_cb_t *verify; 31 sfc_flow_cleanup_cb_t *cleanup; 32 sfc_flow_insert_cb_t *insert; 33 sfc_flow_remove_cb_t *remove; 34 }; 35 36 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter; 37 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae; 38 static sfc_flow_insert_cb_t sfc_flow_filter_insert; 39 static sfc_flow_remove_cb_t sfc_flow_filter_remove; 40 41 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = { 42 .parse = sfc_flow_parse_rte_to_filter, 43 .verify = NULL, 44 .cleanup = NULL, 45 .insert = sfc_flow_filter_insert, 46 .remove = sfc_flow_filter_remove, 47 }; 48 49 static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = { 50 .parse = sfc_flow_parse_rte_to_mae, 51 .verify = sfc_mae_flow_verify, 52 .cleanup = sfc_mae_flow_cleanup, 53 .insert = NULL, 54 .remove = NULL, 55 }; 56 57 static const struct sfc_flow_ops_by_spec * 58 sfc_flow_get_ops_by_spec(struct rte_flow *flow) 59 { 60 struct sfc_flow_spec *spec = &flow->spec; 61 const struct sfc_flow_ops_by_spec *ops = NULL; 62 63 switch (spec->type) { 64 case SFC_FLOW_SPEC_FILTER: 65 ops = &sfc_flow_ops_filter; 66 break; 67 case SFC_FLOW_SPEC_MAE: 68 ops = &sfc_flow_ops_mae; 69 break; 70 default: 71 SFC_ASSERT(false); 72 break; 73 } 74 75 return ops; 76 } 77 78 /* 79 * Currently, filter-based (VNIC) flow API is implemented in such a manner 80 * that each flow rule is converted to one or more hardware filters. 81 * All elements of flow rule (attributes, pattern items, actions) 82 * correspond to one or more fields in the efx_filter_spec_s structure 83 * that is responsible for the hardware filter. 84 * If some required field is unset in the flow rule, then a handful 85 * of filter copies will be created to cover all possible values 86 * of such a field. 87 */ 88 89 static sfc_flow_item_parse sfc_flow_parse_void; 90 static sfc_flow_item_parse sfc_flow_parse_eth; 91 static sfc_flow_item_parse sfc_flow_parse_vlan; 92 static sfc_flow_item_parse sfc_flow_parse_ipv4; 93 static sfc_flow_item_parse sfc_flow_parse_ipv6; 94 static sfc_flow_item_parse sfc_flow_parse_tcp; 95 static sfc_flow_item_parse sfc_flow_parse_udp; 96 static sfc_flow_item_parse sfc_flow_parse_vxlan; 97 static sfc_flow_item_parse sfc_flow_parse_geneve; 98 static sfc_flow_item_parse sfc_flow_parse_nvgre; 99 100 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec, 101 unsigned int filters_count_for_one_val, 102 struct rte_flow_error *error); 103 104 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match, 105 efx_filter_spec_t *spec, 106 struct sfc_filter *filter); 107 108 struct sfc_flow_copy_flag { 109 /* EFX filter specification match flag */ 110 efx_filter_match_flags_t flag; 111 /* Number of values of corresponding field */ 112 unsigned int vals_count; 113 /* Function to set values in specifications */ 114 sfc_flow_spec_set_vals *set_vals; 115 /* 116 * Function to check that the specification is suitable 117 * for adding this match flag 118 */ 119 sfc_flow_spec_check *spec_check; 120 }; 121 122 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags; 123 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags; 124 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes; 125 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags; 126 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags; 127 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag; 128 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag; 129 130 static boolean_t 131 sfc_flow_is_zero(const uint8_t *buf, unsigned int size) 132 { 133 uint8_t sum = 0; 134 unsigned int i; 135 136 for (i = 0; i < size; i++) 137 sum |= buf[i]; 138 139 return (sum == 0) ? B_TRUE : B_FALSE; 140 } 141 142 /* 143 * Validate item and prepare structures spec and mask for parsing 144 */ 145 int 146 sfc_flow_parse_init(const struct rte_flow_item *item, 147 const void **spec_ptr, 148 const void **mask_ptr, 149 const void *supp_mask, 150 const void *def_mask, 151 unsigned int size, 152 struct rte_flow_error *error) 153 { 154 const uint8_t *spec; 155 const uint8_t *mask; 156 const uint8_t *last; 157 uint8_t supp; 158 unsigned int i; 159 160 if (item == NULL) { 161 rte_flow_error_set(error, EINVAL, 162 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 163 "NULL item"); 164 return -rte_errno; 165 } 166 167 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) { 168 rte_flow_error_set(error, EINVAL, 169 RTE_FLOW_ERROR_TYPE_ITEM, item, 170 "Mask or last is set without spec"); 171 return -rte_errno; 172 } 173 174 /* 175 * If "mask" is not set, default mask is used, 176 * but if default mask is NULL, "mask" should be set 177 */ 178 if (item->mask == NULL) { 179 if (def_mask == NULL) { 180 rte_flow_error_set(error, EINVAL, 181 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 182 "Mask should be specified"); 183 return -rte_errno; 184 } 185 186 mask = def_mask; 187 } else { 188 mask = item->mask; 189 } 190 191 spec = item->spec; 192 last = item->last; 193 194 if (spec == NULL) 195 goto exit; 196 197 /* 198 * If field values in "last" are either 0 or equal to the corresponding 199 * values in "spec" then they are ignored 200 */ 201 if (last != NULL && 202 !sfc_flow_is_zero(last, size) && 203 memcmp(last, spec, size) != 0) { 204 rte_flow_error_set(error, ENOTSUP, 205 RTE_FLOW_ERROR_TYPE_ITEM, item, 206 "Ranging is not supported"); 207 return -rte_errno; 208 } 209 210 if (supp_mask == NULL) { 211 rte_flow_error_set(error, EINVAL, 212 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 213 "Supported mask for item should be specified"); 214 return -rte_errno; 215 } 216 217 /* Check that mask does not ask for more match than supp_mask */ 218 for (i = 0; i < size; i++) { 219 supp = ((const uint8_t *)supp_mask)[i]; 220 221 if (~supp & mask[i]) { 222 rte_flow_error_set(error, ENOTSUP, 223 RTE_FLOW_ERROR_TYPE_ITEM, item, 224 "Item's field is not supported"); 225 return -rte_errno; 226 } 227 } 228 229 exit: 230 *spec_ptr = spec; 231 *mask_ptr = mask; 232 return 0; 233 } 234 235 /* 236 * Protocol parsers. 237 * Masking is not supported, so masks in items should be either 238 * full or empty (zeroed) and set only for supported fields which 239 * are specified in the supp_mask. 240 */ 241 242 static int 243 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item, 244 __rte_unused struct sfc_flow_parse_ctx *parse_ctx, 245 __rte_unused struct rte_flow_error *error) 246 { 247 return 0; 248 } 249 250 /** 251 * Convert Ethernet item to EFX filter specification. 252 * 253 * @param item[in] 254 * Item specification. Outer frame specification may only comprise 255 * source/destination addresses and Ethertype field. 256 * Inner frame specification may contain destination address only. 257 * There is support for individual/group mask as well as for empty and full. 258 * If the mask is NULL, default mask will be used. Ranging is not supported. 259 * @param efx_spec[in, out] 260 * EFX filter specification to update. 261 * @param[out] error 262 * Perform verbose error reporting if not NULL. 263 */ 264 static int 265 sfc_flow_parse_eth(const struct rte_flow_item *item, 266 struct sfc_flow_parse_ctx *parse_ctx, 267 struct rte_flow_error *error) 268 { 269 int rc; 270 efx_filter_spec_t *efx_spec = parse_ctx->filter; 271 const struct rte_flow_item_eth *spec = NULL; 272 const struct rte_flow_item_eth *mask = NULL; 273 const struct rte_flow_item_eth supp_mask = { 274 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 275 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 276 .type = 0xffff, 277 }; 278 const struct rte_flow_item_eth ifrm_supp_mask = { 279 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 280 }; 281 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = { 282 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 283 }; 284 const struct rte_flow_item_eth *supp_mask_p; 285 const struct rte_flow_item_eth *def_mask_p; 286 uint8_t *loc_mac = NULL; 287 boolean_t is_ifrm = (efx_spec->efs_encap_type != 288 EFX_TUNNEL_PROTOCOL_NONE); 289 290 if (is_ifrm) { 291 supp_mask_p = &ifrm_supp_mask; 292 def_mask_p = &ifrm_supp_mask; 293 loc_mac = efx_spec->efs_ifrm_loc_mac; 294 } else { 295 supp_mask_p = &supp_mask; 296 def_mask_p = &rte_flow_item_eth_mask; 297 loc_mac = efx_spec->efs_loc_mac; 298 } 299 300 rc = sfc_flow_parse_init(item, 301 (const void **)&spec, 302 (const void **)&mask, 303 supp_mask_p, def_mask_p, 304 sizeof(struct rte_flow_item_eth), 305 error); 306 if (rc != 0) 307 return rc; 308 309 /* If "spec" is not set, could be any Ethernet */ 310 if (spec == NULL) 311 return 0; 312 313 if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) { 314 efx_spec->efs_match_flags |= is_ifrm ? 315 EFX_FILTER_MATCH_IFRM_LOC_MAC : 316 EFX_FILTER_MATCH_LOC_MAC; 317 rte_memcpy(loc_mac, spec->dst.addr_bytes, 318 EFX_MAC_ADDR_LEN); 319 } else if (memcmp(mask->dst.addr_bytes, ig_mask, 320 EFX_MAC_ADDR_LEN) == 0) { 321 if (rte_is_unicast_ether_addr(&spec->dst)) 322 efx_spec->efs_match_flags |= is_ifrm ? 323 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST : 324 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST; 325 else 326 efx_spec->efs_match_flags |= is_ifrm ? 327 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST : 328 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; 329 } else if (!rte_is_zero_ether_addr(&mask->dst)) { 330 goto fail_bad_mask; 331 } 332 333 /* 334 * ifrm_supp_mask ensures that the source address and 335 * ethertype masks are equal to zero in inner frame, 336 * so these fields are filled in only for the outer frame 337 */ 338 if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) { 339 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC; 340 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes, 341 EFX_MAC_ADDR_LEN); 342 } else if (!rte_is_zero_ether_addr(&mask->src)) { 343 goto fail_bad_mask; 344 } 345 346 /* 347 * Ether type is in big-endian byte order in item and 348 * in little-endian in efx_spec, so byte swap is used 349 */ 350 if (mask->type == supp_mask.type) { 351 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 352 efx_spec->efs_ether_type = rte_bswap16(spec->type); 353 } else if (mask->type != 0) { 354 goto fail_bad_mask; 355 } 356 357 return 0; 358 359 fail_bad_mask: 360 rte_flow_error_set(error, EINVAL, 361 RTE_FLOW_ERROR_TYPE_ITEM, item, 362 "Bad mask in the ETH pattern item"); 363 return -rte_errno; 364 } 365 366 /** 367 * Convert VLAN item to EFX filter specification. 368 * 369 * @param item[in] 370 * Item specification. Only VID field is supported. 371 * The mask can not be NULL. Ranging is not supported. 372 * @param efx_spec[in, out] 373 * EFX filter specification to update. 374 * @param[out] error 375 * Perform verbose error reporting if not NULL. 376 */ 377 static int 378 sfc_flow_parse_vlan(const struct rte_flow_item *item, 379 struct sfc_flow_parse_ctx *parse_ctx, 380 struct rte_flow_error *error) 381 { 382 int rc; 383 uint16_t vid; 384 efx_filter_spec_t *efx_spec = parse_ctx->filter; 385 const struct rte_flow_item_vlan *spec = NULL; 386 const struct rte_flow_item_vlan *mask = NULL; 387 const struct rte_flow_item_vlan supp_mask = { 388 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX), 389 .inner_type = RTE_BE16(0xffff), 390 }; 391 392 rc = sfc_flow_parse_init(item, 393 (const void **)&spec, 394 (const void **)&mask, 395 &supp_mask, 396 NULL, 397 sizeof(struct rte_flow_item_vlan), 398 error); 399 if (rc != 0) 400 return rc; 401 402 /* 403 * VID is in big-endian byte order in item and 404 * in little-endian in efx_spec, so byte swap is used. 405 * If two VLAN items are included, the first matches 406 * the outer tag and the next matches the inner tag. 407 */ 408 if (mask->tci == supp_mask.tci) { 409 /* Apply mask to keep VID only */ 410 vid = rte_bswap16(spec->tci & mask->tci); 411 412 if (!(efx_spec->efs_match_flags & 413 EFX_FILTER_MATCH_OUTER_VID)) { 414 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID; 415 efx_spec->efs_outer_vid = vid; 416 } else if (!(efx_spec->efs_match_flags & 417 EFX_FILTER_MATCH_INNER_VID)) { 418 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID; 419 efx_spec->efs_inner_vid = vid; 420 } else { 421 rte_flow_error_set(error, EINVAL, 422 RTE_FLOW_ERROR_TYPE_ITEM, item, 423 "More than two VLAN items"); 424 return -rte_errno; 425 } 426 } else { 427 rte_flow_error_set(error, EINVAL, 428 RTE_FLOW_ERROR_TYPE_ITEM, item, 429 "VLAN ID in TCI match is required"); 430 return -rte_errno; 431 } 432 433 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { 434 rte_flow_error_set(error, EINVAL, 435 RTE_FLOW_ERROR_TYPE_ITEM, item, 436 "VLAN TPID matching is not supported"); 437 return -rte_errno; 438 } 439 if (mask->inner_type == supp_mask.inner_type) { 440 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 441 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type); 442 } else if (mask->inner_type) { 443 rte_flow_error_set(error, EINVAL, 444 RTE_FLOW_ERROR_TYPE_ITEM, item, 445 "Bad mask for VLAN inner_type"); 446 return -rte_errno; 447 } 448 449 return 0; 450 } 451 452 /** 453 * Convert IPv4 item to EFX filter specification. 454 * 455 * @param item[in] 456 * Item specification. Only source and destination addresses and 457 * protocol fields are supported. If the mask is NULL, default 458 * mask will be used. Ranging is not supported. 459 * @param efx_spec[in, out] 460 * EFX filter specification to update. 461 * @param[out] error 462 * Perform verbose error reporting if not NULL. 463 */ 464 static int 465 sfc_flow_parse_ipv4(const struct rte_flow_item *item, 466 struct sfc_flow_parse_ctx *parse_ctx, 467 struct rte_flow_error *error) 468 { 469 int rc; 470 efx_filter_spec_t *efx_spec = parse_ctx->filter; 471 const struct rte_flow_item_ipv4 *spec = NULL; 472 const struct rte_flow_item_ipv4 *mask = NULL; 473 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4); 474 const struct rte_flow_item_ipv4 supp_mask = { 475 .hdr = { 476 .src_addr = 0xffffffff, 477 .dst_addr = 0xffffffff, 478 .next_proto_id = 0xff, 479 } 480 }; 481 482 rc = sfc_flow_parse_init(item, 483 (const void **)&spec, 484 (const void **)&mask, 485 &supp_mask, 486 &rte_flow_item_ipv4_mask, 487 sizeof(struct rte_flow_item_ipv4), 488 error); 489 if (rc != 0) 490 return rc; 491 492 /* 493 * Filtering by IPv4 source and destination addresses requires 494 * the appropriate ETHER_TYPE in hardware filters 495 */ 496 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) { 497 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 498 efx_spec->efs_ether_type = ether_type_ipv4; 499 } else if (efx_spec->efs_ether_type != ether_type_ipv4) { 500 rte_flow_error_set(error, EINVAL, 501 RTE_FLOW_ERROR_TYPE_ITEM, item, 502 "Ethertype in pattern with IPV4 item should be appropriate"); 503 return -rte_errno; 504 } 505 506 if (spec == NULL) 507 return 0; 508 509 /* 510 * IPv4 addresses are in big-endian byte order in item and in 511 * efx_spec 512 */ 513 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) { 514 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST; 515 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr; 516 } else if (mask->hdr.src_addr != 0) { 517 goto fail_bad_mask; 518 } 519 520 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) { 521 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST; 522 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr; 523 } else if (mask->hdr.dst_addr != 0) { 524 goto fail_bad_mask; 525 } 526 527 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) { 528 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 529 efx_spec->efs_ip_proto = spec->hdr.next_proto_id; 530 } else if (mask->hdr.next_proto_id != 0) { 531 goto fail_bad_mask; 532 } 533 534 return 0; 535 536 fail_bad_mask: 537 rte_flow_error_set(error, EINVAL, 538 RTE_FLOW_ERROR_TYPE_ITEM, item, 539 "Bad mask in the IPV4 pattern item"); 540 return -rte_errno; 541 } 542 543 /** 544 * Convert IPv6 item to EFX filter specification. 545 * 546 * @param item[in] 547 * Item specification. Only source and destination addresses and 548 * next header fields are supported. If the mask is NULL, default 549 * mask will be used. Ranging is not supported. 550 * @param efx_spec[in, out] 551 * EFX filter specification to update. 552 * @param[out] error 553 * Perform verbose error reporting if not NULL. 554 */ 555 static int 556 sfc_flow_parse_ipv6(const struct rte_flow_item *item, 557 struct sfc_flow_parse_ctx *parse_ctx, 558 struct rte_flow_error *error) 559 { 560 int rc; 561 efx_filter_spec_t *efx_spec = parse_ctx->filter; 562 const struct rte_flow_item_ipv6 *spec = NULL; 563 const struct rte_flow_item_ipv6 *mask = NULL; 564 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6); 565 const struct rte_flow_item_ipv6 supp_mask = { 566 .hdr = { 567 .src_addr = { 0xff, 0xff, 0xff, 0xff, 568 0xff, 0xff, 0xff, 0xff, 569 0xff, 0xff, 0xff, 0xff, 570 0xff, 0xff, 0xff, 0xff }, 571 .dst_addr = { 0xff, 0xff, 0xff, 0xff, 572 0xff, 0xff, 0xff, 0xff, 573 0xff, 0xff, 0xff, 0xff, 574 0xff, 0xff, 0xff, 0xff }, 575 .proto = 0xff, 576 } 577 }; 578 579 rc = sfc_flow_parse_init(item, 580 (const void **)&spec, 581 (const void **)&mask, 582 &supp_mask, 583 &rte_flow_item_ipv6_mask, 584 sizeof(struct rte_flow_item_ipv6), 585 error); 586 if (rc != 0) 587 return rc; 588 589 /* 590 * Filtering by IPv6 source and destination addresses requires 591 * the appropriate ETHER_TYPE in hardware filters 592 */ 593 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) { 594 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 595 efx_spec->efs_ether_type = ether_type_ipv6; 596 } else if (efx_spec->efs_ether_type != ether_type_ipv6) { 597 rte_flow_error_set(error, EINVAL, 598 RTE_FLOW_ERROR_TYPE_ITEM, item, 599 "Ethertype in pattern with IPV6 item should be appropriate"); 600 return -rte_errno; 601 } 602 603 if (spec == NULL) 604 return 0; 605 606 /* 607 * IPv6 addresses are in big-endian byte order in item and in 608 * efx_spec 609 */ 610 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr, 611 sizeof(mask->hdr.src_addr)) == 0) { 612 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST; 613 614 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) != 615 sizeof(spec->hdr.src_addr)); 616 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr, 617 sizeof(efx_spec->efs_rem_host)); 618 } else if (!sfc_flow_is_zero(mask->hdr.src_addr, 619 sizeof(mask->hdr.src_addr))) { 620 goto fail_bad_mask; 621 } 622 623 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr, 624 sizeof(mask->hdr.dst_addr)) == 0) { 625 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST; 626 627 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) != 628 sizeof(spec->hdr.dst_addr)); 629 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr, 630 sizeof(efx_spec->efs_loc_host)); 631 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr, 632 sizeof(mask->hdr.dst_addr))) { 633 goto fail_bad_mask; 634 } 635 636 if (mask->hdr.proto == supp_mask.hdr.proto) { 637 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 638 efx_spec->efs_ip_proto = spec->hdr.proto; 639 } else if (mask->hdr.proto != 0) { 640 goto fail_bad_mask; 641 } 642 643 return 0; 644 645 fail_bad_mask: 646 rte_flow_error_set(error, EINVAL, 647 RTE_FLOW_ERROR_TYPE_ITEM, item, 648 "Bad mask in the IPV6 pattern item"); 649 return -rte_errno; 650 } 651 652 /** 653 * Convert TCP item to EFX filter specification. 654 * 655 * @param item[in] 656 * Item specification. Only source and destination ports fields 657 * are supported. If the mask is NULL, default mask will be used. 658 * Ranging is not supported. 659 * @param efx_spec[in, out] 660 * EFX filter specification to update. 661 * @param[out] error 662 * Perform verbose error reporting if not NULL. 663 */ 664 static int 665 sfc_flow_parse_tcp(const struct rte_flow_item *item, 666 struct sfc_flow_parse_ctx *parse_ctx, 667 struct rte_flow_error *error) 668 { 669 int rc; 670 efx_filter_spec_t *efx_spec = parse_ctx->filter; 671 const struct rte_flow_item_tcp *spec = NULL; 672 const struct rte_flow_item_tcp *mask = NULL; 673 const struct rte_flow_item_tcp supp_mask = { 674 .hdr = { 675 .src_port = 0xffff, 676 .dst_port = 0xffff, 677 } 678 }; 679 680 rc = sfc_flow_parse_init(item, 681 (const void **)&spec, 682 (const void **)&mask, 683 &supp_mask, 684 &rte_flow_item_tcp_mask, 685 sizeof(struct rte_flow_item_tcp), 686 error); 687 if (rc != 0) 688 return rc; 689 690 /* 691 * Filtering by TCP source and destination ports requires 692 * the appropriate IP_PROTO in hardware filters 693 */ 694 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 695 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 696 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP; 697 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) { 698 rte_flow_error_set(error, EINVAL, 699 RTE_FLOW_ERROR_TYPE_ITEM, item, 700 "IP proto in pattern with TCP item should be appropriate"); 701 return -rte_errno; 702 } 703 704 if (spec == NULL) 705 return 0; 706 707 /* 708 * Source and destination ports are in big-endian byte order in item and 709 * in little-endian in efx_spec, so byte swap is used 710 */ 711 if (mask->hdr.src_port == supp_mask.hdr.src_port) { 712 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT; 713 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port); 714 } else if (mask->hdr.src_port != 0) { 715 goto fail_bad_mask; 716 } 717 718 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) { 719 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT; 720 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port); 721 } else if (mask->hdr.dst_port != 0) { 722 goto fail_bad_mask; 723 } 724 725 return 0; 726 727 fail_bad_mask: 728 rte_flow_error_set(error, EINVAL, 729 RTE_FLOW_ERROR_TYPE_ITEM, item, 730 "Bad mask in the TCP pattern item"); 731 return -rte_errno; 732 } 733 734 /** 735 * Convert UDP item to EFX filter specification. 736 * 737 * @param item[in] 738 * Item specification. Only source and destination ports fields 739 * are supported. If the mask is NULL, default mask will be used. 740 * Ranging is not supported. 741 * @param efx_spec[in, out] 742 * EFX filter specification to update. 743 * @param[out] error 744 * Perform verbose error reporting if not NULL. 745 */ 746 static int 747 sfc_flow_parse_udp(const struct rte_flow_item *item, 748 struct sfc_flow_parse_ctx *parse_ctx, 749 struct rte_flow_error *error) 750 { 751 int rc; 752 efx_filter_spec_t *efx_spec = parse_ctx->filter; 753 const struct rte_flow_item_udp *spec = NULL; 754 const struct rte_flow_item_udp *mask = NULL; 755 const struct rte_flow_item_udp supp_mask = { 756 .hdr = { 757 .src_port = 0xffff, 758 .dst_port = 0xffff, 759 } 760 }; 761 762 rc = sfc_flow_parse_init(item, 763 (const void **)&spec, 764 (const void **)&mask, 765 &supp_mask, 766 &rte_flow_item_udp_mask, 767 sizeof(struct rte_flow_item_udp), 768 error); 769 if (rc != 0) 770 return rc; 771 772 /* 773 * Filtering by UDP source and destination ports requires 774 * the appropriate IP_PROTO in hardware filters 775 */ 776 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 777 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 778 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP; 779 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) { 780 rte_flow_error_set(error, EINVAL, 781 RTE_FLOW_ERROR_TYPE_ITEM, item, 782 "IP proto in pattern with UDP item should be appropriate"); 783 return -rte_errno; 784 } 785 786 if (spec == NULL) 787 return 0; 788 789 /* 790 * Source and destination ports are in big-endian byte order in item and 791 * in little-endian in efx_spec, so byte swap is used 792 */ 793 if (mask->hdr.src_port == supp_mask.hdr.src_port) { 794 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT; 795 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port); 796 } else if (mask->hdr.src_port != 0) { 797 goto fail_bad_mask; 798 } 799 800 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) { 801 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT; 802 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port); 803 } else if (mask->hdr.dst_port != 0) { 804 goto fail_bad_mask; 805 } 806 807 return 0; 808 809 fail_bad_mask: 810 rte_flow_error_set(error, EINVAL, 811 RTE_FLOW_ERROR_TYPE_ITEM, item, 812 "Bad mask in the UDP pattern item"); 813 return -rte_errno; 814 } 815 816 /* 817 * Filters for encapsulated packets match based on the EtherType and IP 818 * protocol in the outer frame. 819 */ 820 static int 821 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item, 822 efx_filter_spec_t *efx_spec, 823 uint8_t ip_proto, 824 struct rte_flow_error *error) 825 { 826 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { 827 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; 828 efx_spec->efs_ip_proto = ip_proto; 829 } else if (efx_spec->efs_ip_proto != ip_proto) { 830 switch (ip_proto) { 831 case EFX_IPPROTO_UDP: 832 rte_flow_error_set(error, EINVAL, 833 RTE_FLOW_ERROR_TYPE_ITEM, item, 834 "Outer IP header protocol must be UDP " 835 "in VxLAN/GENEVE pattern"); 836 return -rte_errno; 837 838 case EFX_IPPROTO_GRE: 839 rte_flow_error_set(error, EINVAL, 840 RTE_FLOW_ERROR_TYPE_ITEM, item, 841 "Outer IP header protocol must be GRE " 842 "in NVGRE pattern"); 843 return -rte_errno; 844 845 default: 846 rte_flow_error_set(error, EINVAL, 847 RTE_FLOW_ERROR_TYPE_ITEM, item, 848 "Only VxLAN/GENEVE/NVGRE tunneling patterns " 849 "are supported"); 850 return -rte_errno; 851 } 852 } 853 854 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE && 855 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 && 856 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) { 857 rte_flow_error_set(error, EINVAL, 858 RTE_FLOW_ERROR_TYPE_ITEM, item, 859 "Outer frame EtherType in pattern with tunneling " 860 "must be IPv4 or IPv6"); 861 return -rte_errno; 862 } 863 864 return 0; 865 } 866 867 static int 868 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec, 869 const uint8_t *vni_or_vsid_val, 870 const uint8_t *vni_or_vsid_mask, 871 const struct rte_flow_item *item, 872 struct rte_flow_error *error) 873 { 874 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = { 875 0xff, 0xff, 0xff 876 }; 877 878 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask, 879 EFX_VNI_OR_VSID_LEN) == 0) { 880 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID; 881 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val, 882 EFX_VNI_OR_VSID_LEN); 883 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) { 884 rte_flow_error_set(error, EINVAL, 885 RTE_FLOW_ERROR_TYPE_ITEM, item, 886 "Unsupported VNI/VSID mask"); 887 return -rte_errno; 888 } 889 890 return 0; 891 } 892 893 /** 894 * Convert VXLAN item to EFX filter specification. 895 * 896 * @param item[in] 897 * Item specification. Only VXLAN network identifier field is supported. 898 * If the mask is NULL, default mask will be used. 899 * Ranging is not supported. 900 * @param efx_spec[in, out] 901 * EFX filter specification to update. 902 * @param[out] error 903 * Perform verbose error reporting if not NULL. 904 */ 905 static int 906 sfc_flow_parse_vxlan(const struct rte_flow_item *item, 907 struct sfc_flow_parse_ctx *parse_ctx, 908 struct rte_flow_error *error) 909 { 910 int rc; 911 efx_filter_spec_t *efx_spec = parse_ctx->filter; 912 const struct rte_flow_item_vxlan *spec = NULL; 913 const struct rte_flow_item_vxlan *mask = NULL; 914 const struct rte_flow_item_vxlan supp_mask = { 915 .vni = { 0xff, 0xff, 0xff } 916 }; 917 918 rc = sfc_flow_parse_init(item, 919 (const void **)&spec, 920 (const void **)&mask, 921 &supp_mask, 922 &rte_flow_item_vxlan_mask, 923 sizeof(struct rte_flow_item_vxlan), 924 error); 925 if (rc != 0) 926 return rc; 927 928 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 929 EFX_IPPROTO_UDP, error); 930 if (rc != 0) 931 return rc; 932 933 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN; 934 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 935 936 if (spec == NULL) 937 return 0; 938 939 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, 940 mask->vni, item, error); 941 942 return rc; 943 } 944 945 /** 946 * Convert GENEVE item to EFX filter specification. 947 * 948 * @param item[in] 949 * Item specification. Only Virtual Network Identifier and protocol type 950 * fields are supported. But protocol type can be only Ethernet (0x6558). 951 * If the mask is NULL, default mask will be used. 952 * Ranging is not supported. 953 * @param efx_spec[in, out] 954 * EFX filter specification to update. 955 * @param[out] error 956 * Perform verbose error reporting if not NULL. 957 */ 958 static int 959 sfc_flow_parse_geneve(const struct rte_flow_item *item, 960 struct sfc_flow_parse_ctx *parse_ctx, 961 struct rte_flow_error *error) 962 { 963 int rc; 964 efx_filter_spec_t *efx_spec = parse_ctx->filter; 965 const struct rte_flow_item_geneve *spec = NULL; 966 const struct rte_flow_item_geneve *mask = NULL; 967 const struct rte_flow_item_geneve supp_mask = { 968 .protocol = RTE_BE16(0xffff), 969 .vni = { 0xff, 0xff, 0xff } 970 }; 971 972 rc = sfc_flow_parse_init(item, 973 (const void **)&spec, 974 (const void **)&mask, 975 &supp_mask, 976 &rte_flow_item_geneve_mask, 977 sizeof(struct rte_flow_item_geneve), 978 error); 979 if (rc != 0) 980 return rc; 981 982 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 983 EFX_IPPROTO_UDP, error); 984 if (rc != 0) 985 return rc; 986 987 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE; 988 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 989 990 if (spec == NULL) 991 return 0; 992 993 if (mask->protocol == supp_mask.protocol) { 994 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) { 995 rte_flow_error_set(error, EINVAL, 996 RTE_FLOW_ERROR_TYPE_ITEM, item, 997 "GENEVE encap. protocol must be Ethernet " 998 "(0x6558) in the GENEVE pattern item"); 999 return -rte_errno; 1000 } 1001 } else if (mask->protocol != 0) { 1002 rte_flow_error_set(error, EINVAL, 1003 RTE_FLOW_ERROR_TYPE_ITEM, item, 1004 "Unsupported mask for GENEVE encap. protocol"); 1005 return -rte_errno; 1006 } 1007 1008 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, 1009 mask->vni, item, error); 1010 1011 return rc; 1012 } 1013 1014 /** 1015 * Convert NVGRE item to EFX filter specification. 1016 * 1017 * @param item[in] 1018 * Item specification. Only virtual subnet ID field is supported. 1019 * If the mask is NULL, default mask will be used. 1020 * Ranging is not supported. 1021 * @param efx_spec[in, out] 1022 * EFX filter specification to update. 1023 * @param[out] error 1024 * Perform verbose error reporting if not NULL. 1025 */ 1026 static int 1027 sfc_flow_parse_nvgre(const struct rte_flow_item *item, 1028 struct sfc_flow_parse_ctx *parse_ctx, 1029 struct rte_flow_error *error) 1030 { 1031 int rc; 1032 efx_filter_spec_t *efx_spec = parse_ctx->filter; 1033 const struct rte_flow_item_nvgre *spec = NULL; 1034 const struct rte_flow_item_nvgre *mask = NULL; 1035 const struct rte_flow_item_nvgre supp_mask = { 1036 .tni = { 0xff, 0xff, 0xff } 1037 }; 1038 1039 rc = sfc_flow_parse_init(item, 1040 (const void **)&spec, 1041 (const void **)&mask, 1042 &supp_mask, 1043 &rte_flow_item_nvgre_mask, 1044 sizeof(struct rte_flow_item_nvgre), 1045 error); 1046 if (rc != 0) 1047 return rc; 1048 1049 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, 1050 EFX_IPPROTO_GRE, error); 1051 if (rc != 0) 1052 return rc; 1053 1054 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE; 1055 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 1056 1057 if (spec == NULL) 1058 return 0; 1059 1060 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni, 1061 mask->tni, item, error); 1062 1063 return rc; 1064 } 1065 1066 static const struct sfc_flow_item sfc_flow_items[] = { 1067 { 1068 .type = RTE_FLOW_ITEM_TYPE_VOID, 1069 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER, 1070 .layer = SFC_FLOW_ITEM_ANY_LAYER, 1071 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1072 .parse = sfc_flow_parse_void, 1073 }, 1074 { 1075 .type = RTE_FLOW_ITEM_TYPE_ETH, 1076 .prev_layer = SFC_FLOW_ITEM_START_LAYER, 1077 .layer = SFC_FLOW_ITEM_L2, 1078 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1079 .parse = sfc_flow_parse_eth, 1080 }, 1081 { 1082 .type = RTE_FLOW_ITEM_TYPE_VLAN, 1083 .prev_layer = SFC_FLOW_ITEM_L2, 1084 .layer = SFC_FLOW_ITEM_L2, 1085 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1086 .parse = sfc_flow_parse_vlan, 1087 }, 1088 { 1089 .type = RTE_FLOW_ITEM_TYPE_IPV4, 1090 .prev_layer = SFC_FLOW_ITEM_L2, 1091 .layer = SFC_FLOW_ITEM_L3, 1092 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1093 .parse = sfc_flow_parse_ipv4, 1094 }, 1095 { 1096 .type = RTE_FLOW_ITEM_TYPE_IPV6, 1097 .prev_layer = SFC_FLOW_ITEM_L2, 1098 .layer = SFC_FLOW_ITEM_L3, 1099 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1100 .parse = sfc_flow_parse_ipv6, 1101 }, 1102 { 1103 .type = RTE_FLOW_ITEM_TYPE_TCP, 1104 .prev_layer = SFC_FLOW_ITEM_L3, 1105 .layer = SFC_FLOW_ITEM_L4, 1106 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1107 .parse = sfc_flow_parse_tcp, 1108 }, 1109 { 1110 .type = RTE_FLOW_ITEM_TYPE_UDP, 1111 .prev_layer = SFC_FLOW_ITEM_L3, 1112 .layer = SFC_FLOW_ITEM_L4, 1113 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1114 .parse = sfc_flow_parse_udp, 1115 }, 1116 { 1117 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 1118 .prev_layer = SFC_FLOW_ITEM_L4, 1119 .layer = SFC_FLOW_ITEM_START_LAYER, 1120 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1121 .parse = sfc_flow_parse_vxlan, 1122 }, 1123 { 1124 .type = RTE_FLOW_ITEM_TYPE_GENEVE, 1125 .prev_layer = SFC_FLOW_ITEM_L4, 1126 .layer = SFC_FLOW_ITEM_START_LAYER, 1127 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1128 .parse = sfc_flow_parse_geneve, 1129 }, 1130 { 1131 .type = RTE_FLOW_ITEM_TYPE_NVGRE, 1132 .prev_layer = SFC_FLOW_ITEM_L3, 1133 .layer = SFC_FLOW_ITEM_START_LAYER, 1134 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, 1135 .parse = sfc_flow_parse_nvgre, 1136 }, 1137 }; 1138 1139 /* 1140 * Protocol-independent flow API support 1141 */ 1142 static int 1143 sfc_flow_parse_attr(struct sfc_adapter *sa, 1144 const struct rte_flow_attr *attr, 1145 struct rte_flow *flow, 1146 struct rte_flow_error *error) 1147 { 1148 struct sfc_flow_spec *spec = &flow->spec; 1149 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1150 struct sfc_flow_spec_mae *spec_mae = &spec->mae; 1151 struct sfc_mae *mae = &sa->mae; 1152 1153 if (attr == NULL) { 1154 rte_flow_error_set(error, EINVAL, 1155 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 1156 "NULL attribute"); 1157 return -rte_errno; 1158 } 1159 if (attr->group != 0) { 1160 rte_flow_error_set(error, ENOTSUP, 1161 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr, 1162 "Groups are not supported"); 1163 return -rte_errno; 1164 } 1165 if (attr->egress != 0) { 1166 rte_flow_error_set(error, ENOTSUP, 1167 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, 1168 "Egress is not supported"); 1169 return -rte_errno; 1170 } 1171 if (attr->ingress == 0) { 1172 rte_flow_error_set(error, ENOTSUP, 1173 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, 1174 "Ingress is compulsory"); 1175 return -rte_errno; 1176 } 1177 if (attr->transfer == 0) { 1178 if (attr->priority != 0) { 1179 rte_flow_error_set(error, ENOTSUP, 1180 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1181 attr, "Priorities are unsupported"); 1182 return -rte_errno; 1183 } 1184 spec->type = SFC_FLOW_SPEC_FILTER; 1185 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX; 1186 spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; 1187 spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL; 1188 } else { 1189 if (mae->status != SFC_MAE_STATUS_SUPPORTED) { 1190 rte_flow_error_set(error, ENOTSUP, 1191 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 1192 attr, "Transfer is not supported"); 1193 return -rte_errno; 1194 } 1195 if (attr->priority > mae->nb_action_rule_prios_max) { 1196 rte_flow_error_set(error, ENOTSUP, 1197 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1198 attr, "Unsupported priority level"); 1199 return -rte_errno; 1200 } 1201 spec->type = SFC_FLOW_SPEC_MAE; 1202 spec_mae->priority = attr->priority; 1203 spec_mae->match_spec = NULL; 1204 spec_mae->action_set = NULL; 1205 } 1206 1207 return 0; 1208 } 1209 1210 /* Get item from array sfc_flow_items */ 1211 static const struct sfc_flow_item * 1212 sfc_flow_get_item(const struct sfc_flow_item *items, 1213 unsigned int nb_items, 1214 enum rte_flow_item_type type) 1215 { 1216 unsigned int i; 1217 1218 for (i = 0; i < nb_items; i++) 1219 if (items[i].type == type) 1220 return &items[i]; 1221 1222 return NULL; 1223 } 1224 1225 int 1226 sfc_flow_parse_pattern(const struct sfc_flow_item *flow_items, 1227 unsigned int nb_flow_items, 1228 const struct rte_flow_item pattern[], 1229 struct sfc_flow_parse_ctx *parse_ctx, 1230 struct rte_flow_error *error) 1231 { 1232 int rc; 1233 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER; 1234 boolean_t is_ifrm = B_FALSE; 1235 const struct sfc_flow_item *item; 1236 1237 if (pattern == NULL) { 1238 rte_flow_error_set(error, EINVAL, 1239 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, 1240 "NULL pattern"); 1241 return -rte_errno; 1242 } 1243 1244 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { 1245 item = sfc_flow_get_item(flow_items, nb_flow_items, 1246 pattern->type); 1247 if (item == NULL) { 1248 rte_flow_error_set(error, ENOTSUP, 1249 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1250 "Unsupported pattern item"); 1251 return -rte_errno; 1252 } 1253 1254 /* 1255 * Omitting one or several protocol layers at the beginning 1256 * of pattern is supported 1257 */ 1258 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER && 1259 prev_layer != SFC_FLOW_ITEM_ANY_LAYER && 1260 item->prev_layer != prev_layer) { 1261 rte_flow_error_set(error, ENOTSUP, 1262 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1263 "Unexpected sequence of pattern items"); 1264 return -rte_errno; 1265 } 1266 1267 /* 1268 * Allow only VOID and ETH pattern items in the inner frame. 1269 * Also check that there is only one tunneling protocol. 1270 */ 1271 switch (item->type) { 1272 case RTE_FLOW_ITEM_TYPE_VOID: 1273 case RTE_FLOW_ITEM_TYPE_ETH: 1274 break; 1275 1276 case RTE_FLOW_ITEM_TYPE_VXLAN: 1277 case RTE_FLOW_ITEM_TYPE_GENEVE: 1278 case RTE_FLOW_ITEM_TYPE_NVGRE: 1279 if (is_ifrm) { 1280 rte_flow_error_set(error, EINVAL, 1281 RTE_FLOW_ERROR_TYPE_ITEM, 1282 pattern, 1283 "More than one tunneling protocol"); 1284 return -rte_errno; 1285 } 1286 is_ifrm = B_TRUE; 1287 break; 1288 1289 default: 1290 if (is_ifrm) { 1291 rte_flow_error_set(error, EINVAL, 1292 RTE_FLOW_ERROR_TYPE_ITEM, 1293 pattern, 1294 "There is an unsupported pattern item " 1295 "in the inner frame"); 1296 return -rte_errno; 1297 } 1298 break; 1299 } 1300 1301 if (parse_ctx->type != item->ctx_type) { 1302 rte_flow_error_set(error, EINVAL, 1303 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 1304 "Parse context type mismatch"); 1305 return -rte_errno; 1306 } 1307 1308 rc = item->parse(pattern, parse_ctx, error); 1309 if (rc != 0) 1310 return rc; 1311 1312 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER) 1313 prev_layer = item->layer; 1314 } 1315 1316 return 0; 1317 } 1318 1319 static int 1320 sfc_flow_parse_queue(struct sfc_adapter *sa, 1321 const struct rte_flow_action_queue *queue, 1322 struct rte_flow *flow) 1323 { 1324 struct sfc_flow_spec *spec = &flow->spec; 1325 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1326 struct sfc_rxq *rxq; 1327 struct sfc_rxq_info *rxq_info; 1328 1329 if (queue->index >= sfc_sa2shared(sa)->rxq_count) 1330 return -EINVAL; 1331 1332 rxq = &sa->rxq_ctrl[queue->index]; 1333 spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index; 1334 1335 rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index]; 1336 spec_filter->rss_hash_required = !!(rxq_info->rxq_flags & 1337 SFC_RXQ_FLAG_RSS_HASH); 1338 1339 return 0; 1340 } 1341 1342 static int 1343 sfc_flow_parse_rss(struct sfc_adapter *sa, 1344 const struct rte_flow_action_rss *action_rss, 1345 struct rte_flow *flow) 1346 { 1347 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 1348 struct sfc_rss *rss = &sas->rss; 1349 unsigned int rxq_sw_index; 1350 struct sfc_rxq *rxq; 1351 unsigned int rxq_hw_index_min; 1352 unsigned int rxq_hw_index_max; 1353 efx_rx_hash_type_t efx_hash_types; 1354 const uint8_t *rss_key; 1355 struct sfc_flow_spec *spec = &flow->spec; 1356 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1357 struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf; 1358 unsigned int i; 1359 1360 if (action_rss->queue_num == 0) 1361 return -EINVAL; 1362 1363 rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1; 1364 rxq = &sa->rxq_ctrl[rxq_sw_index]; 1365 rxq_hw_index_min = rxq->hw_index; 1366 rxq_hw_index_max = 0; 1367 1368 for (i = 0; i < action_rss->queue_num; ++i) { 1369 rxq_sw_index = action_rss->queue[i]; 1370 1371 if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count) 1372 return -EINVAL; 1373 1374 rxq = &sa->rxq_ctrl[rxq_sw_index]; 1375 1376 if (rxq->hw_index < rxq_hw_index_min) 1377 rxq_hw_index_min = rxq->hw_index; 1378 1379 if (rxq->hw_index > rxq_hw_index_max) 1380 rxq_hw_index_max = rxq->hw_index; 1381 } 1382 1383 switch (action_rss->func) { 1384 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1385 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1386 break; 1387 default: 1388 return -EINVAL; 1389 } 1390 1391 if (action_rss->level) 1392 return -EINVAL; 1393 1394 /* 1395 * Dummy RSS action with only one queue and no specific settings 1396 * for hash types and key does not require dedicated RSS context 1397 * and may be simplified to single queue action. 1398 */ 1399 if (action_rss->queue_num == 1 && action_rss->types == 0 && 1400 action_rss->key_len == 0) { 1401 spec_filter->template.efs_dmaq_id = rxq_hw_index_min; 1402 return 0; 1403 } 1404 1405 if (action_rss->types) { 1406 int rc; 1407 1408 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types, 1409 &efx_hash_types); 1410 if (rc != 0) 1411 return -rc; 1412 } else { 1413 unsigned int i; 1414 1415 efx_hash_types = 0; 1416 for (i = 0; i < rss->hf_map_nb_entries; ++i) 1417 efx_hash_types |= rss->hf_map[i].efx; 1418 } 1419 1420 if (action_rss->key_len) { 1421 if (action_rss->key_len != sizeof(rss->key)) 1422 return -EINVAL; 1423 1424 rss_key = action_rss->key; 1425 } else { 1426 rss_key = rss->key; 1427 } 1428 1429 spec_filter->rss = B_TRUE; 1430 1431 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min; 1432 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max; 1433 sfc_rss_conf->rss_hash_types = efx_hash_types; 1434 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key)); 1435 1436 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) { 1437 unsigned int nb_queues = action_rss->queue_num; 1438 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues]; 1439 struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index]; 1440 1441 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min; 1442 } 1443 1444 return 0; 1445 } 1446 1447 static int 1448 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec, 1449 unsigned int filters_count) 1450 { 1451 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1452 unsigned int i; 1453 int ret = 0; 1454 1455 for (i = 0; i < filters_count; i++) { 1456 int rc; 1457 1458 rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]); 1459 if (ret == 0 && rc != 0) { 1460 sfc_err(sa, "failed to remove filter specification " 1461 "(rc = %d)", rc); 1462 ret = rc; 1463 } 1464 } 1465 1466 return ret; 1467 } 1468 1469 static int 1470 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec) 1471 { 1472 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1473 unsigned int i; 1474 int rc = 0; 1475 1476 for (i = 0; i < spec_filter->count; i++) { 1477 rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]); 1478 if (rc != 0) { 1479 sfc_flow_spec_flush(sa, spec, i); 1480 break; 1481 } 1482 } 1483 1484 return rc; 1485 } 1486 1487 static int 1488 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec) 1489 { 1490 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1491 1492 return sfc_flow_spec_flush(sa, spec, spec_filter->count); 1493 } 1494 1495 static int 1496 sfc_flow_filter_insert(struct sfc_adapter *sa, 1497 struct rte_flow *flow) 1498 { 1499 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 1500 struct sfc_rss *rss = &sas->rss; 1501 struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter; 1502 struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf; 1503 uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; 1504 boolean_t create_context; 1505 unsigned int i; 1506 int rc = 0; 1507 1508 create_context = spec_filter->rss || (spec_filter->rss_hash_required && 1509 rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT); 1510 1511 if (create_context) { 1512 unsigned int rss_spread; 1513 unsigned int rss_hash_types; 1514 uint8_t *rss_key; 1515 1516 if (spec_filter->rss) { 1517 rss_spread = MIN(flow_rss->rxq_hw_index_max - 1518 flow_rss->rxq_hw_index_min + 1, 1519 EFX_MAXRSS); 1520 rss_hash_types = flow_rss->rss_hash_types; 1521 rss_key = flow_rss->rss_key; 1522 } else { 1523 /* 1524 * Initialize dummy RSS context parameters to have 1525 * valid RSS hash. Use default RSS hash function and 1526 * key. 1527 */ 1528 rss_spread = 1; 1529 rss_hash_types = rss->hash_types; 1530 rss_key = rss->key; 1531 } 1532 1533 rc = efx_rx_scale_context_alloc(sa->nic, 1534 EFX_RX_SCALE_EXCLUSIVE, 1535 rss_spread, 1536 &efs_rss_context); 1537 if (rc != 0) 1538 goto fail_scale_context_alloc; 1539 1540 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context, 1541 rss->hash_alg, 1542 rss_hash_types, B_TRUE); 1543 if (rc != 0) 1544 goto fail_scale_mode_set; 1545 1546 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context, 1547 rss_key, sizeof(rss->key)); 1548 if (rc != 0) 1549 goto fail_scale_key_set; 1550 } else { 1551 efs_rss_context = rss->dummy_rss_context; 1552 } 1553 1554 if (spec_filter->rss || spec_filter->rss_hash_required) { 1555 /* 1556 * At this point, fully elaborated filter specifications 1557 * have been produced from the template. To make sure that 1558 * RSS behaviour is consistent between them, set the same 1559 * RSS context value everywhere. 1560 */ 1561 for (i = 0; i < spec_filter->count; i++) { 1562 efx_filter_spec_t *spec = &spec_filter->filters[i]; 1563 1564 spec->efs_rss_context = efs_rss_context; 1565 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS; 1566 if (spec_filter->rss) 1567 spec->efs_dmaq_id = flow_rss->rxq_hw_index_min; 1568 } 1569 } 1570 1571 rc = sfc_flow_spec_insert(sa, &flow->spec); 1572 if (rc != 0) 1573 goto fail_filter_insert; 1574 1575 if (create_context) { 1576 unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0}; 1577 unsigned int *tbl; 1578 1579 tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl; 1580 1581 /* 1582 * Scale table is set after filter insertion because 1583 * the table entries are relative to the base RxQ ID 1584 * and the latter is submitted to the HW by means of 1585 * inserting a filter, so by the time of the request 1586 * the HW knows all the information needed to verify 1587 * the table entries, and the operation will succeed 1588 */ 1589 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context, 1590 tbl, RTE_DIM(flow_rss->rss_tbl)); 1591 if (rc != 0) 1592 goto fail_scale_tbl_set; 1593 1594 /* Remember created dummy RSS context */ 1595 if (!spec_filter->rss) 1596 rss->dummy_rss_context = efs_rss_context; 1597 } 1598 1599 return 0; 1600 1601 fail_scale_tbl_set: 1602 sfc_flow_spec_remove(sa, &flow->spec); 1603 1604 fail_filter_insert: 1605 fail_scale_key_set: 1606 fail_scale_mode_set: 1607 if (create_context) 1608 efx_rx_scale_context_free(sa->nic, efs_rss_context); 1609 1610 fail_scale_context_alloc: 1611 return rc; 1612 } 1613 1614 static int 1615 sfc_flow_filter_remove(struct sfc_adapter *sa, 1616 struct rte_flow *flow) 1617 { 1618 struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter; 1619 int rc = 0; 1620 1621 rc = sfc_flow_spec_remove(sa, &flow->spec); 1622 if (rc != 0) 1623 return rc; 1624 1625 if (spec_filter->rss) { 1626 /* 1627 * All specifications for a given flow rule have the same RSS 1628 * context, so that RSS context value is taken from the first 1629 * filter specification 1630 */ 1631 efx_filter_spec_t *spec = &spec_filter->filters[0]; 1632 1633 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context); 1634 } 1635 1636 return rc; 1637 } 1638 1639 static int 1640 sfc_flow_parse_mark(struct sfc_adapter *sa, 1641 const struct rte_flow_action_mark *mark, 1642 struct rte_flow *flow) 1643 { 1644 struct sfc_flow_spec *spec = &flow->spec; 1645 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1646 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 1647 1648 if (mark == NULL || mark->id > encp->enc_filter_action_mark_max) 1649 return EINVAL; 1650 1651 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK; 1652 spec_filter->template.efs_mark = mark->id; 1653 1654 return 0; 1655 } 1656 1657 static int 1658 sfc_flow_parse_actions(struct sfc_adapter *sa, 1659 const struct rte_flow_action actions[], 1660 struct rte_flow *flow, 1661 struct rte_flow_error *error) 1662 { 1663 int rc; 1664 struct sfc_flow_spec *spec = &flow->spec; 1665 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1666 const unsigned int dp_rx_features = sa->priv.dp_rx->features; 1667 uint32_t actions_set = 0; 1668 const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) | 1669 (1UL << RTE_FLOW_ACTION_TYPE_RSS) | 1670 (1UL << RTE_FLOW_ACTION_TYPE_DROP); 1671 const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) | 1672 (1UL << RTE_FLOW_ACTION_TYPE_FLAG); 1673 1674 if (actions == NULL) { 1675 rte_flow_error_set(error, EINVAL, 1676 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, 1677 "NULL actions"); 1678 return -rte_errno; 1679 } 1680 1681 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \ 1682 RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT) 1683 1684 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1685 switch (actions->type) { 1686 case RTE_FLOW_ACTION_TYPE_VOID: 1687 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID, 1688 actions_set); 1689 break; 1690 1691 case RTE_FLOW_ACTION_TYPE_QUEUE: 1692 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE, 1693 actions_set); 1694 if ((actions_set & fate_actions_mask) != 0) 1695 goto fail_fate_actions; 1696 1697 rc = sfc_flow_parse_queue(sa, actions->conf, flow); 1698 if (rc != 0) { 1699 rte_flow_error_set(error, EINVAL, 1700 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1701 "Bad QUEUE action"); 1702 return -rte_errno; 1703 } 1704 break; 1705 1706 case RTE_FLOW_ACTION_TYPE_RSS: 1707 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS, 1708 actions_set); 1709 if ((actions_set & fate_actions_mask) != 0) 1710 goto fail_fate_actions; 1711 1712 rc = sfc_flow_parse_rss(sa, actions->conf, flow); 1713 if (rc != 0) { 1714 rte_flow_error_set(error, -rc, 1715 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1716 "Bad RSS action"); 1717 return -rte_errno; 1718 } 1719 break; 1720 1721 case RTE_FLOW_ACTION_TYPE_DROP: 1722 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP, 1723 actions_set); 1724 if ((actions_set & fate_actions_mask) != 0) 1725 goto fail_fate_actions; 1726 1727 spec_filter->template.efs_dmaq_id = 1728 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; 1729 break; 1730 1731 case RTE_FLOW_ACTION_TYPE_FLAG: 1732 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG, 1733 actions_set); 1734 if ((actions_set & mark_actions_mask) != 0) 1735 goto fail_actions_overlap; 1736 1737 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) { 1738 rte_flow_error_set(error, ENOTSUP, 1739 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1740 "FLAG action is not supported on the current Rx datapath"); 1741 return -rte_errno; 1742 } 1743 1744 spec_filter->template.efs_flags |= 1745 EFX_FILTER_FLAG_ACTION_FLAG; 1746 break; 1747 1748 case RTE_FLOW_ACTION_TYPE_MARK: 1749 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK, 1750 actions_set); 1751 if ((actions_set & mark_actions_mask) != 0) 1752 goto fail_actions_overlap; 1753 1754 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) { 1755 rte_flow_error_set(error, ENOTSUP, 1756 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1757 "MARK action is not supported on the current Rx datapath"); 1758 return -rte_errno; 1759 } 1760 1761 rc = sfc_flow_parse_mark(sa, actions->conf, flow); 1762 if (rc != 0) { 1763 rte_flow_error_set(error, rc, 1764 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1765 "Bad MARK action"); 1766 return -rte_errno; 1767 } 1768 break; 1769 1770 default: 1771 rte_flow_error_set(error, ENOTSUP, 1772 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1773 "Action is not supported"); 1774 return -rte_errno; 1775 } 1776 1777 actions_set |= (1UL << actions->type); 1778 } 1779 #undef SFC_BUILD_SET_OVERFLOW 1780 1781 /* When fate is unknown, drop traffic. */ 1782 if ((actions_set & fate_actions_mask) == 0) { 1783 spec_filter->template.efs_dmaq_id = 1784 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; 1785 } 1786 1787 return 0; 1788 1789 fail_fate_actions: 1790 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, 1791 "Cannot combine several fate-deciding actions, " 1792 "choose between QUEUE, RSS or DROP"); 1793 return -rte_errno; 1794 1795 fail_actions_overlap: 1796 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, 1797 "Overlapping actions are not supported"); 1798 return -rte_errno; 1799 } 1800 1801 /** 1802 * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST 1803 * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same 1804 * specifications after copying. 1805 * 1806 * @param spec[in, out] 1807 * SFC flow specification to update. 1808 * @param filters_count_for_one_val[in] 1809 * How many specifications should have the same match flag, what is the 1810 * number of specifications before copying. 1811 * @param error[out] 1812 * Perform verbose error reporting if not NULL. 1813 */ 1814 static int 1815 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec, 1816 unsigned int filters_count_for_one_val, 1817 struct rte_flow_error *error) 1818 { 1819 unsigned int i; 1820 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1821 static const efx_filter_match_flags_t vals[] = { 1822 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, 1823 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST 1824 }; 1825 1826 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) { 1827 rte_flow_error_set(error, EINVAL, 1828 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1829 "Number of specifications is incorrect while copying " 1830 "by unknown destination flags"); 1831 return -rte_errno; 1832 } 1833 1834 for (i = 0; i < spec_filter->count; i++) { 1835 /* The check above ensures that divisor can't be zero here */ 1836 spec_filter->filters[i].efs_match_flags |= 1837 vals[i / filters_count_for_one_val]; 1838 } 1839 1840 return 0; 1841 } 1842 1843 /** 1844 * Check that the following conditions are met: 1845 * - the list of supported filters has a filter 1846 * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of 1847 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also 1848 * be inserted. 1849 * 1850 * @param match[in] 1851 * The match flags of filter. 1852 * @param spec[in] 1853 * Specification to be supplemented. 1854 * @param filter[in] 1855 * SFC filter with list of supported filters. 1856 */ 1857 static boolean_t 1858 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match, 1859 __rte_unused efx_filter_spec_t *spec, 1860 struct sfc_filter *filter) 1861 { 1862 unsigned int i; 1863 efx_filter_match_flags_t match_mcast_dst; 1864 1865 match_mcast_dst = 1866 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) | 1867 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; 1868 for (i = 0; i < filter->supported_match_num; i++) { 1869 if (match_mcast_dst == filter->supported_match[i]) 1870 return B_TRUE; 1871 } 1872 1873 return B_FALSE; 1874 } 1875 1876 /** 1877 * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and 1878 * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same 1879 * specifications after copying. 1880 * 1881 * @param spec[in, out] 1882 * SFC flow specification to update. 1883 * @param filters_count_for_one_val[in] 1884 * How many specifications should have the same EtherType value, what is the 1885 * number of specifications before copying. 1886 * @param error[out] 1887 * Perform verbose error reporting if not NULL. 1888 */ 1889 static int 1890 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec, 1891 unsigned int filters_count_for_one_val, 1892 struct rte_flow_error *error) 1893 { 1894 unsigned int i; 1895 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1896 static const uint16_t vals[] = { 1897 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6 1898 }; 1899 1900 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) { 1901 rte_flow_error_set(error, EINVAL, 1902 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1903 "Number of specifications is incorrect " 1904 "while copying by Ethertype"); 1905 return -rte_errno; 1906 } 1907 1908 for (i = 0; i < spec_filter->count; i++) { 1909 spec_filter->filters[i].efs_match_flags |= 1910 EFX_FILTER_MATCH_ETHER_TYPE; 1911 1912 /* 1913 * The check above ensures that 1914 * filters_count_for_one_val is not 0 1915 */ 1916 spec_filter->filters[i].efs_ether_type = 1917 vals[i / filters_count_for_one_val]; 1918 } 1919 1920 return 0; 1921 } 1922 1923 /** 1924 * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0 1925 * in the same specifications after copying. 1926 * 1927 * @param spec[in, out] 1928 * SFC flow specification to update. 1929 * @param filters_count_for_one_val[in] 1930 * How many specifications should have the same match flag, what is the 1931 * number of specifications before copying. 1932 * @param error[out] 1933 * Perform verbose error reporting if not NULL. 1934 */ 1935 static int 1936 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec, 1937 unsigned int filters_count_for_one_val, 1938 struct rte_flow_error *error) 1939 { 1940 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1941 unsigned int i; 1942 1943 if (filters_count_for_one_val != spec_filter->count) { 1944 rte_flow_error_set(error, EINVAL, 1945 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1946 "Number of specifications is incorrect " 1947 "while copying by outer VLAN ID"); 1948 return -rte_errno; 1949 } 1950 1951 for (i = 0; i < spec_filter->count; i++) { 1952 spec_filter->filters[i].efs_match_flags |= 1953 EFX_FILTER_MATCH_OUTER_VID; 1954 1955 spec_filter->filters[i].efs_outer_vid = 0; 1956 } 1957 1958 return 0; 1959 } 1960 1961 /** 1962 * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and 1963 * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same 1964 * specifications after copying. 1965 * 1966 * @param spec[in, out] 1967 * SFC flow specification to update. 1968 * @param filters_count_for_one_val[in] 1969 * How many specifications should have the same match flag, what is the 1970 * number of specifications before copying. 1971 * @param error[out] 1972 * Perform verbose error reporting if not NULL. 1973 */ 1974 static int 1975 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec, 1976 unsigned int filters_count_for_one_val, 1977 struct rte_flow_error *error) 1978 { 1979 unsigned int i; 1980 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 1981 static const efx_filter_match_flags_t vals[] = { 1982 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, 1983 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST 1984 }; 1985 1986 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) { 1987 rte_flow_error_set(error, EINVAL, 1988 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1989 "Number of specifications is incorrect while copying " 1990 "by inner frame unknown destination flags"); 1991 return -rte_errno; 1992 } 1993 1994 for (i = 0; i < spec_filter->count; i++) { 1995 /* The check above ensures that divisor can't be zero here */ 1996 spec_filter->filters[i].efs_match_flags |= 1997 vals[i / filters_count_for_one_val]; 1998 } 1999 2000 return 0; 2001 } 2002 2003 /** 2004 * Check that the following conditions are met: 2005 * - the specification corresponds to a filter for encapsulated traffic 2006 * - the list of supported filters has a filter 2007 * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of 2008 * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also 2009 * be inserted. 2010 * 2011 * @param match[in] 2012 * The match flags of filter. 2013 * @param spec[in] 2014 * Specification to be supplemented. 2015 * @param filter[in] 2016 * SFC filter with list of supported filters. 2017 */ 2018 static boolean_t 2019 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match, 2020 efx_filter_spec_t *spec, 2021 struct sfc_filter *filter) 2022 { 2023 unsigned int i; 2024 efx_tunnel_protocol_t encap_type = spec->efs_encap_type; 2025 efx_filter_match_flags_t match_mcast_dst; 2026 2027 if (encap_type == EFX_TUNNEL_PROTOCOL_NONE) 2028 return B_FALSE; 2029 2030 match_mcast_dst = 2031 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) | 2032 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST; 2033 for (i = 0; i < filter->supported_match_num; i++) { 2034 if (match_mcast_dst == filter->supported_match[i]) 2035 return B_TRUE; 2036 } 2037 2038 return B_FALSE; 2039 } 2040 2041 /** 2042 * Check that the list of supported filters has a filter that differs 2043 * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID 2044 * in this case that filter will be used and the flag 2045 * EFX_FILTER_MATCH_OUTER_VID is not needed. 2046 * 2047 * @param match[in] 2048 * The match flags of filter. 2049 * @param spec[in] 2050 * Specification to be supplemented. 2051 * @param filter[in] 2052 * SFC filter with list of supported filters. 2053 */ 2054 static boolean_t 2055 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match, 2056 __rte_unused efx_filter_spec_t *spec, 2057 struct sfc_filter *filter) 2058 { 2059 unsigned int i; 2060 efx_filter_match_flags_t match_without_vid = 2061 match & ~EFX_FILTER_MATCH_OUTER_VID; 2062 2063 for (i = 0; i < filter->supported_match_num; i++) { 2064 if (match_without_vid == filter->supported_match[i]) 2065 return B_FALSE; 2066 } 2067 2068 return B_TRUE; 2069 } 2070 2071 /* 2072 * Match flags that can be automatically added to filters. 2073 * Selecting the last minimum when searching for the copy flag ensures that the 2074 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than 2075 * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter 2076 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported 2077 * filters. 2078 */ 2079 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = { 2080 { 2081 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, 2082 .vals_count = 2, 2083 .set_vals = sfc_flow_set_unknown_dst_flags, 2084 .spec_check = sfc_flow_check_unknown_dst_flags, 2085 }, 2086 { 2087 .flag = EFX_FILTER_MATCH_ETHER_TYPE, 2088 .vals_count = 2, 2089 .set_vals = sfc_flow_set_ethertypes, 2090 .spec_check = NULL, 2091 }, 2092 { 2093 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, 2094 .vals_count = 2, 2095 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags, 2096 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags, 2097 }, 2098 { 2099 .flag = EFX_FILTER_MATCH_OUTER_VID, 2100 .vals_count = 1, 2101 .set_vals = sfc_flow_set_outer_vid_flag, 2102 .spec_check = sfc_flow_check_outer_vid_flag, 2103 }, 2104 }; 2105 2106 /* Get item from array sfc_flow_copy_flags */ 2107 static const struct sfc_flow_copy_flag * 2108 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag) 2109 { 2110 unsigned int i; 2111 2112 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 2113 if (sfc_flow_copy_flags[i].flag == flag) 2114 return &sfc_flow_copy_flags[i]; 2115 } 2116 2117 return NULL; 2118 } 2119 2120 /** 2121 * Make copies of the specifications, set match flag and values 2122 * of the field that corresponds to it. 2123 * 2124 * @param spec[in, out] 2125 * SFC flow specification to update. 2126 * @param flag[in] 2127 * The match flag to add. 2128 * @param error[out] 2129 * Perform verbose error reporting if not NULL. 2130 */ 2131 static int 2132 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec, 2133 efx_filter_match_flags_t flag, 2134 struct rte_flow_error *error) 2135 { 2136 unsigned int i; 2137 unsigned int new_filters_count; 2138 unsigned int filters_count_for_one_val; 2139 const struct sfc_flow_copy_flag *copy_flag; 2140 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2141 int rc; 2142 2143 copy_flag = sfc_flow_get_copy_flag(flag); 2144 if (copy_flag == NULL) { 2145 rte_flow_error_set(error, ENOTSUP, 2146 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2147 "Unsupported spec field for copying"); 2148 return -rte_errno; 2149 } 2150 2151 new_filters_count = spec_filter->count * copy_flag->vals_count; 2152 if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) { 2153 rte_flow_error_set(error, EINVAL, 2154 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2155 "Too much EFX specifications in the flow rule"); 2156 return -rte_errno; 2157 } 2158 2159 /* Copy filters specifications */ 2160 for (i = spec_filter->count; i < new_filters_count; i++) { 2161 spec_filter->filters[i] = 2162 spec_filter->filters[i - spec_filter->count]; 2163 } 2164 2165 filters_count_for_one_val = spec_filter->count; 2166 spec_filter->count = new_filters_count; 2167 2168 rc = copy_flag->set_vals(spec, filters_count_for_one_val, error); 2169 if (rc != 0) 2170 return rc; 2171 2172 return 0; 2173 } 2174 2175 /** 2176 * Check that the given set of match flags missing in the original filter spec 2177 * could be covered by adding spec copies which specify the corresponding 2178 * flags and packet field values to match. 2179 * 2180 * @param miss_flags[in] 2181 * Flags that are missing until the supported filter. 2182 * @param spec[in] 2183 * Specification to be supplemented. 2184 * @param filter[in] 2185 * SFC filter. 2186 * 2187 * @return 2188 * Number of specifications after copy or 0, if the flags can not be added. 2189 */ 2190 static unsigned int 2191 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags, 2192 efx_filter_spec_t *spec, 2193 struct sfc_filter *filter) 2194 { 2195 unsigned int i; 2196 efx_filter_match_flags_t copy_flags = 0; 2197 efx_filter_match_flags_t flag; 2198 efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags; 2199 sfc_flow_spec_check *check; 2200 unsigned int multiplier = 1; 2201 2202 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 2203 flag = sfc_flow_copy_flags[i].flag; 2204 check = sfc_flow_copy_flags[i].spec_check; 2205 if ((flag & miss_flags) == flag) { 2206 if (check != NULL && (!check(match, spec, filter))) 2207 continue; 2208 2209 copy_flags |= flag; 2210 multiplier *= sfc_flow_copy_flags[i].vals_count; 2211 } 2212 } 2213 2214 if (copy_flags == miss_flags) 2215 return multiplier; 2216 2217 return 0; 2218 } 2219 2220 /** 2221 * Attempt to supplement the specification template to the minimally 2222 * supported set of match flags. To do this, it is necessary to copy 2223 * the specifications, filling them with the values of fields that 2224 * correspond to the missing flags. 2225 * The necessary and sufficient filter is built from the fewest number 2226 * of copies which could be made to cover the minimally required set 2227 * of flags. 2228 * 2229 * @param sa[in] 2230 * SFC adapter. 2231 * @param spec[in, out] 2232 * SFC flow specification to update. 2233 * @param error[out] 2234 * Perform verbose error reporting if not NULL. 2235 */ 2236 static int 2237 sfc_flow_spec_filters_complete(struct sfc_adapter *sa, 2238 struct sfc_flow_spec *spec, 2239 struct rte_flow_error *error) 2240 { 2241 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2242 struct sfc_filter *filter = &sa->filter; 2243 efx_filter_match_flags_t miss_flags; 2244 efx_filter_match_flags_t min_miss_flags = 0; 2245 efx_filter_match_flags_t match; 2246 unsigned int min_multiplier = UINT_MAX; 2247 unsigned int multiplier; 2248 unsigned int i; 2249 int rc; 2250 2251 match = spec_filter->template.efs_match_flags; 2252 for (i = 0; i < filter->supported_match_num; i++) { 2253 if ((match & filter->supported_match[i]) == match) { 2254 miss_flags = filter->supported_match[i] & (~match); 2255 multiplier = sfc_flow_check_missing_flags(miss_flags, 2256 &spec_filter->template, filter); 2257 if (multiplier > 0) { 2258 if (multiplier <= min_multiplier) { 2259 min_multiplier = multiplier; 2260 min_miss_flags = miss_flags; 2261 } 2262 } 2263 } 2264 } 2265 2266 if (min_multiplier == UINT_MAX) { 2267 rte_flow_error_set(error, ENOTSUP, 2268 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2269 "The flow rule pattern is unsupported"); 2270 return -rte_errno; 2271 } 2272 2273 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { 2274 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag; 2275 2276 if ((flag & min_miss_flags) == flag) { 2277 rc = sfc_flow_spec_add_match_flag(spec, flag, error); 2278 if (rc != 0) 2279 return rc; 2280 } 2281 } 2282 2283 return 0; 2284 } 2285 2286 /** 2287 * Check that set of match flags is referred to by a filter. Filter is 2288 * described by match flags with the ability to add OUTER_VID and INNER_VID 2289 * flags. 2290 * 2291 * @param match_flags[in] 2292 * Set of match flags. 2293 * @param flags_pattern[in] 2294 * Pattern of filter match flags. 2295 */ 2296 static boolean_t 2297 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags, 2298 efx_filter_match_flags_t flags_pattern) 2299 { 2300 if ((match_flags & flags_pattern) != flags_pattern) 2301 return B_FALSE; 2302 2303 switch (match_flags & ~flags_pattern) { 2304 case 0: 2305 case EFX_FILTER_MATCH_OUTER_VID: 2306 case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID: 2307 return B_TRUE; 2308 default: 2309 return B_FALSE; 2310 } 2311 } 2312 2313 /** 2314 * Check whether the spec maps to a hardware filter which is known to be 2315 * ineffective despite being valid. 2316 * 2317 * @param filter[in] 2318 * SFC filter with list of supported filters. 2319 * @param spec[in] 2320 * SFC flow specification. 2321 */ 2322 static boolean_t 2323 sfc_flow_is_match_flags_exception(struct sfc_filter *filter, 2324 struct sfc_flow_spec *spec) 2325 { 2326 unsigned int i; 2327 uint16_t ether_type; 2328 uint8_t ip_proto; 2329 efx_filter_match_flags_t match_flags; 2330 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2331 2332 for (i = 0; i < spec_filter->count; i++) { 2333 match_flags = spec_filter->filters[i].efs_match_flags; 2334 2335 if (sfc_flow_is_match_with_vids(match_flags, 2336 EFX_FILTER_MATCH_ETHER_TYPE) || 2337 sfc_flow_is_match_with_vids(match_flags, 2338 EFX_FILTER_MATCH_ETHER_TYPE | 2339 EFX_FILTER_MATCH_LOC_MAC)) { 2340 ether_type = spec_filter->filters[i].efs_ether_type; 2341 if (filter->supports_ip_proto_or_addr_filter && 2342 (ether_type == EFX_ETHER_TYPE_IPV4 || 2343 ether_type == EFX_ETHER_TYPE_IPV6)) 2344 return B_TRUE; 2345 } else if (sfc_flow_is_match_with_vids(match_flags, 2346 EFX_FILTER_MATCH_ETHER_TYPE | 2347 EFX_FILTER_MATCH_IP_PROTO) || 2348 sfc_flow_is_match_with_vids(match_flags, 2349 EFX_FILTER_MATCH_ETHER_TYPE | 2350 EFX_FILTER_MATCH_IP_PROTO | 2351 EFX_FILTER_MATCH_LOC_MAC)) { 2352 ip_proto = spec_filter->filters[i].efs_ip_proto; 2353 if (filter->supports_rem_or_local_port_filter && 2354 (ip_proto == EFX_IPPROTO_TCP || 2355 ip_proto == EFX_IPPROTO_UDP)) 2356 return B_TRUE; 2357 } 2358 } 2359 2360 return B_FALSE; 2361 } 2362 2363 static int 2364 sfc_flow_validate_match_flags(struct sfc_adapter *sa, 2365 struct rte_flow *flow, 2366 struct rte_flow_error *error) 2367 { 2368 struct sfc_flow_spec *spec = &flow->spec; 2369 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2370 efx_filter_spec_t *spec_tmpl = &spec_filter->template; 2371 efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags; 2372 int rc; 2373 2374 /* Initialize the first filter spec with template */ 2375 spec_filter->filters[0] = *spec_tmpl; 2376 spec_filter->count = 1; 2377 2378 if (!sfc_filter_is_match_supported(sa, match_flags)) { 2379 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error); 2380 if (rc != 0) 2381 return rc; 2382 } 2383 2384 if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) { 2385 rte_flow_error_set(error, ENOTSUP, 2386 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2387 "The flow rule pattern is unsupported"); 2388 return -rte_errno; 2389 } 2390 2391 return 0; 2392 } 2393 2394 static int 2395 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev, 2396 const struct rte_flow_item pattern[], 2397 const struct rte_flow_action actions[], 2398 struct rte_flow *flow, 2399 struct rte_flow_error *error) 2400 { 2401 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2402 struct sfc_flow_spec *spec = &flow->spec; 2403 struct sfc_flow_spec_filter *spec_filter = &spec->filter; 2404 struct sfc_flow_parse_ctx ctx; 2405 int rc; 2406 2407 ctx.type = SFC_FLOW_PARSE_CTX_FILTER; 2408 ctx.filter = &spec_filter->template; 2409 2410 rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items), 2411 pattern, &ctx, error); 2412 if (rc != 0) 2413 goto fail_bad_value; 2414 2415 rc = sfc_flow_parse_actions(sa, actions, flow, error); 2416 if (rc != 0) 2417 goto fail_bad_value; 2418 2419 rc = sfc_flow_validate_match_flags(sa, flow, error); 2420 if (rc != 0) 2421 goto fail_bad_value; 2422 2423 return 0; 2424 2425 fail_bad_value: 2426 return rc; 2427 } 2428 2429 static int 2430 sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev, 2431 const struct rte_flow_item pattern[], 2432 const struct rte_flow_action actions[], 2433 struct rte_flow *flow, 2434 struct rte_flow_error *error) 2435 { 2436 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2437 struct sfc_flow_spec *spec = &flow->spec; 2438 struct sfc_flow_spec_mae *spec_mae = &spec->mae; 2439 int rc; 2440 2441 rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error); 2442 if (rc != 0) 2443 return rc; 2444 2445 rc = sfc_mae_rule_parse_actions(sa, actions, &spec_mae->action_set, 2446 error); 2447 if (rc != 0) 2448 return rc; 2449 2450 return 0; 2451 } 2452 2453 static int 2454 sfc_flow_parse(struct rte_eth_dev *dev, 2455 const struct rte_flow_attr *attr, 2456 const struct rte_flow_item pattern[], 2457 const struct rte_flow_action actions[], 2458 struct rte_flow *flow, 2459 struct rte_flow_error *error) 2460 { 2461 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2462 const struct sfc_flow_ops_by_spec *ops; 2463 int rc; 2464 2465 rc = sfc_flow_parse_attr(sa, attr, flow, error); 2466 if (rc != 0) 2467 return rc; 2468 2469 ops = sfc_flow_get_ops_by_spec(flow); 2470 if (ops == NULL || ops->parse == NULL) { 2471 rte_flow_error_set(error, ENOTSUP, 2472 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2473 "No backend to handle this flow"); 2474 return -rte_errno; 2475 } 2476 2477 return ops->parse(dev, pattern, actions, flow, error); 2478 } 2479 2480 static struct rte_flow * 2481 sfc_flow_zmalloc(struct rte_flow_error *error) 2482 { 2483 struct rte_flow *flow; 2484 2485 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0); 2486 if (flow == NULL) { 2487 rte_flow_error_set(error, ENOMEM, 2488 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2489 "Failed to allocate memory"); 2490 } 2491 2492 return flow; 2493 } 2494 2495 static void 2496 sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow) 2497 { 2498 const struct sfc_flow_ops_by_spec *ops; 2499 2500 ops = sfc_flow_get_ops_by_spec(flow); 2501 if (ops != NULL && ops->cleanup != NULL) 2502 ops->cleanup(sa, flow); 2503 2504 rte_free(flow); 2505 } 2506 2507 static int 2508 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow, 2509 struct rte_flow_error *error) 2510 { 2511 const struct sfc_flow_ops_by_spec *ops; 2512 int rc; 2513 2514 ops = sfc_flow_get_ops_by_spec(flow); 2515 if (ops == NULL || ops->insert == NULL) { 2516 rte_flow_error_set(error, ENOTSUP, 2517 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2518 "No backend to handle this flow"); 2519 return rte_errno; 2520 } 2521 2522 rc = ops->insert(sa, flow); 2523 if (rc != 0) { 2524 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2525 NULL, "Failed to insert the flow rule"); 2526 } 2527 2528 return rc; 2529 } 2530 2531 static int 2532 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow, 2533 struct rte_flow_error *error) 2534 { 2535 const struct sfc_flow_ops_by_spec *ops; 2536 int rc; 2537 2538 ops = sfc_flow_get_ops_by_spec(flow); 2539 if (ops == NULL || ops->remove == NULL) { 2540 rte_flow_error_set(error, ENOTSUP, 2541 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2542 "No backend to handle this flow"); 2543 return rte_errno; 2544 } 2545 2546 rc = ops->remove(sa, flow); 2547 if (rc != 0) { 2548 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2549 NULL, "Failed to remove the flow rule"); 2550 } 2551 2552 return rc; 2553 } 2554 2555 static int 2556 sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow, 2557 struct rte_flow_error *error) 2558 { 2559 const struct sfc_flow_ops_by_spec *ops; 2560 int rc = 0; 2561 2562 ops = sfc_flow_get_ops_by_spec(flow); 2563 if (ops == NULL) { 2564 rte_flow_error_set(error, ENOTSUP, 2565 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2566 "No backend to handle this flow"); 2567 return -rte_errno; 2568 } 2569 2570 if (ops->verify != NULL) { 2571 /* 2572 * Use locking since verify method may need to 2573 * access the list of already created rules. 2574 */ 2575 sfc_adapter_lock(sa); 2576 rc = ops->verify(sa, flow); 2577 sfc_adapter_unlock(sa); 2578 } 2579 2580 if (rc != 0) { 2581 rte_flow_error_set(error, rc, 2582 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2583 "Failed to verify flow validity with FW"); 2584 return -rte_errno; 2585 } 2586 2587 return 0; 2588 } 2589 2590 static int 2591 sfc_flow_validate(struct rte_eth_dev *dev, 2592 const struct rte_flow_attr *attr, 2593 const struct rte_flow_item pattern[], 2594 const struct rte_flow_action actions[], 2595 struct rte_flow_error *error) 2596 { 2597 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2598 struct rte_flow *flow; 2599 int rc; 2600 2601 flow = sfc_flow_zmalloc(error); 2602 if (flow == NULL) 2603 return -rte_errno; 2604 2605 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error); 2606 if (rc == 0) 2607 rc = sfc_flow_verify(sa, flow, error); 2608 2609 sfc_flow_free(sa, flow); 2610 2611 return rc; 2612 } 2613 2614 static struct rte_flow * 2615 sfc_flow_create(struct rte_eth_dev *dev, 2616 const struct rte_flow_attr *attr, 2617 const struct rte_flow_item pattern[], 2618 const struct rte_flow_action actions[], 2619 struct rte_flow_error *error) 2620 { 2621 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2622 struct rte_flow *flow = NULL; 2623 int rc; 2624 2625 flow = sfc_flow_zmalloc(error); 2626 if (flow == NULL) 2627 goto fail_no_mem; 2628 2629 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error); 2630 if (rc != 0) 2631 goto fail_bad_value; 2632 2633 sfc_adapter_lock(sa); 2634 2635 TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries); 2636 2637 if (sa->state == SFC_ADAPTER_STARTED) { 2638 rc = sfc_flow_insert(sa, flow, error); 2639 if (rc != 0) 2640 goto fail_flow_insert; 2641 } 2642 2643 sfc_adapter_unlock(sa); 2644 2645 return flow; 2646 2647 fail_flow_insert: 2648 TAILQ_REMOVE(&sa->flow_list, flow, entries); 2649 2650 fail_bad_value: 2651 sfc_flow_free(sa, flow); 2652 sfc_adapter_unlock(sa); 2653 2654 fail_no_mem: 2655 return NULL; 2656 } 2657 2658 static int 2659 sfc_flow_destroy(struct rte_eth_dev *dev, 2660 struct rte_flow *flow, 2661 struct rte_flow_error *error) 2662 { 2663 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2664 struct rte_flow *flow_ptr; 2665 int rc = EINVAL; 2666 2667 sfc_adapter_lock(sa); 2668 2669 TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) { 2670 if (flow_ptr == flow) 2671 rc = 0; 2672 } 2673 if (rc != 0) { 2674 rte_flow_error_set(error, rc, 2675 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 2676 "Failed to find flow rule to destroy"); 2677 goto fail_bad_value; 2678 } 2679 2680 if (sa->state == SFC_ADAPTER_STARTED) 2681 rc = sfc_flow_remove(sa, flow, error); 2682 2683 TAILQ_REMOVE(&sa->flow_list, flow, entries); 2684 sfc_flow_free(sa, flow); 2685 2686 fail_bad_value: 2687 sfc_adapter_unlock(sa); 2688 2689 return -rc; 2690 } 2691 2692 static int 2693 sfc_flow_flush(struct rte_eth_dev *dev, 2694 struct rte_flow_error *error) 2695 { 2696 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2697 struct rte_flow *flow; 2698 int ret = 0; 2699 2700 sfc_adapter_lock(sa); 2701 2702 while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) { 2703 if (sa->state == SFC_ADAPTER_STARTED) { 2704 int rc; 2705 2706 rc = sfc_flow_remove(sa, flow, error); 2707 if (rc != 0) 2708 ret = rc; 2709 } 2710 2711 TAILQ_REMOVE(&sa->flow_list, flow, entries); 2712 sfc_flow_free(sa, flow); 2713 } 2714 2715 sfc_adapter_unlock(sa); 2716 2717 return -ret; 2718 } 2719 2720 static int 2721 sfc_flow_isolate(struct rte_eth_dev *dev, int enable, 2722 struct rte_flow_error *error) 2723 { 2724 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2725 int ret = 0; 2726 2727 sfc_adapter_lock(sa); 2728 if (sa->state != SFC_ADAPTER_INITIALIZED) { 2729 rte_flow_error_set(error, EBUSY, 2730 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2731 NULL, "please close the port first"); 2732 ret = -rte_errno; 2733 } else { 2734 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE; 2735 } 2736 sfc_adapter_unlock(sa); 2737 2738 return ret; 2739 } 2740 2741 const struct rte_flow_ops sfc_flow_ops = { 2742 .validate = sfc_flow_validate, 2743 .create = sfc_flow_create, 2744 .destroy = sfc_flow_destroy, 2745 .flush = sfc_flow_flush, 2746 .query = NULL, 2747 .isolate = sfc_flow_isolate, 2748 }; 2749 2750 void 2751 sfc_flow_init(struct sfc_adapter *sa) 2752 { 2753 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2754 2755 TAILQ_INIT(&sa->flow_list); 2756 } 2757 2758 void 2759 sfc_flow_fini(struct sfc_adapter *sa) 2760 { 2761 struct rte_flow *flow; 2762 2763 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2764 2765 while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) { 2766 TAILQ_REMOVE(&sa->flow_list, flow, entries); 2767 sfc_flow_free(sa, flow); 2768 } 2769 } 2770 2771 void 2772 sfc_flow_stop(struct sfc_adapter *sa) 2773 { 2774 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 2775 struct sfc_rss *rss = &sas->rss; 2776 struct rte_flow *flow; 2777 2778 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2779 2780 TAILQ_FOREACH(flow, &sa->flow_list, entries) 2781 sfc_flow_remove(sa, flow, NULL); 2782 2783 if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) { 2784 efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context); 2785 rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT; 2786 } 2787 } 2788 2789 int 2790 sfc_flow_start(struct sfc_adapter *sa) 2791 { 2792 struct rte_flow *flow; 2793 int rc = 0; 2794 2795 sfc_log_init(sa, "entry"); 2796 2797 SFC_ASSERT(sfc_adapter_is_locked(sa)); 2798 2799 TAILQ_FOREACH(flow, &sa->flow_list, entries) { 2800 rc = sfc_flow_insert(sa, flow, NULL); 2801 if (rc != 0) 2802 goto fail_bad_flow; 2803 } 2804 2805 sfc_log_init(sa, "done"); 2806 2807 fail_bad_flow: 2808 return rc; 2809 } 2810