1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2021 Xilinx, Inc. 4 * Copyright(c) 2019 Solarflare Communications Inc. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #include <stdbool.h> 11 12 #include <rte_bitops.h> 13 #include <rte_common.h> 14 #include <rte_vxlan.h> 15 16 #include "efx.h" 17 18 #include "sfc.h" 19 #include "sfc_mae_counter.h" 20 #include "sfc_log.h" 21 #include "sfc_switch.h" 22 #include "sfc_service.h" 23 24 static int 25 sfc_mae_assign_entity_mport(struct sfc_adapter *sa, 26 efx_mport_sel_t *mportp) 27 { 28 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 29 30 return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf, 31 mportp); 32 } 33 34 static int 35 sfc_mae_counter_registry_init(struct sfc_mae_counter_registry *registry, 36 uint32_t nb_counters_max) 37 { 38 return sfc_mae_counters_init(®istry->counters, nb_counters_max); 39 } 40 41 static void 42 sfc_mae_counter_registry_fini(struct sfc_mae_counter_registry *registry) 43 { 44 sfc_mae_counters_fini(®istry->counters); 45 } 46 47 int 48 sfc_mae_attach(struct sfc_adapter *sa) 49 { 50 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 51 struct sfc_mae_switch_port_request switch_port_request = {0}; 52 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 53 efx_mport_sel_t entity_mport; 54 struct sfc_mae *mae = &sa->mae; 55 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh; 56 efx_mae_limits_t limits; 57 int rc; 58 59 sfc_log_init(sa, "entry"); 60 61 if (!encp->enc_mae_supported) { 62 mae->status = SFC_MAE_STATUS_UNSUPPORTED; 63 return 0; 64 } 65 66 sfc_log_init(sa, "init MAE"); 67 rc = efx_mae_init(sa->nic); 68 if (rc != 0) 69 goto fail_mae_init; 70 71 sfc_log_init(sa, "get MAE limits"); 72 rc = efx_mae_get_limits(sa->nic, &limits); 73 if (rc != 0) 74 goto fail_mae_get_limits; 75 76 sfc_log_init(sa, "init MAE counter registry"); 77 rc = sfc_mae_counter_registry_init(&mae->counter_registry, 78 limits.eml_max_n_counters); 79 if (rc != 0) { 80 sfc_err(sa, "failed to init MAE counters registry for %u entries: %s", 81 limits.eml_max_n_counters, rte_strerror(rc)); 82 goto fail_counter_registry_init; 83 } 84 85 sfc_log_init(sa, "assign entity MPORT"); 86 rc = sfc_mae_assign_entity_mport(sa, &entity_mport); 87 if (rc != 0) 88 goto fail_mae_assign_entity_mport; 89 90 sfc_log_init(sa, "assign RTE switch domain"); 91 rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id); 92 if (rc != 0) 93 goto fail_mae_assign_switch_domain; 94 95 sfc_log_init(sa, "assign RTE switch port"); 96 switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT; 97 switch_port_request.entity_mportp = &entity_mport; 98 /* 99 * As of now, the driver does not support representors, so 100 * RTE ethdev MPORT simply matches that of the entity. 101 */ 102 switch_port_request.ethdev_mportp = &entity_mport; 103 switch_port_request.ethdev_port_id = sas->port_id; 104 rc = sfc_mae_assign_switch_port(mae->switch_domain_id, 105 &switch_port_request, 106 &mae->switch_port_id); 107 if (rc != 0) 108 goto fail_mae_assign_switch_port; 109 110 sfc_log_init(sa, "allocate encap. header bounce buffer"); 111 bounce_eh->buf_size = limits.eml_encap_header_size_limit; 112 bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh", 113 bounce_eh->buf_size, 0); 114 if (bounce_eh->buf == NULL) 115 goto fail_mae_alloc_bounce_eh; 116 117 mae->status = SFC_MAE_STATUS_SUPPORTED; 118 mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios; 119 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios; 120 mae->encap_types_supported = limits.eml_encap_types_supported; 121 TAILQ_INIT(&mae->outer_rules); 122 TAILQ_INIT(&mae->encap_headers); 123 TAILQ_INIT(&mae->action_sets); 124 125 sfc_log_init(sa, "done"); 126 127 return 0; 128 129 fail_mae_alloc_bounce_eh: 130 fail_mae_assign_switch_port: 131 fail_mae_assign_switch_domain: 132 fail_mae_assign_entity_mport: 133 sfc_mae_counter_registry_fini(&mae->counter_registry); 134 135 fail_counter_registry_init: 136 fail_mae_get_limits: 137 efx_mae_fini(sa->nic); 138 139 fail_mae_init: 140 sfc_log_init(sa, "failed %d", rc); 141 142 return rc; 143 } 144 145 void 146 sfc_mae_detach(struct sfc_adapter *sa) 147 { 148 struct sfc_mae *mae = &sa->mae; 149 enum sfc_mae_status status_prev = mae->status; 150 151 sfc_log_init(sa, "entry"); 152 153 mae->nb_action_rule_prios_max = 0; 154 mae->status = SFC_MAE_STATUS_UNKNOWN; 155 156 if (status_prev != SFC_MAE_STATUS_SUPPORTED) 157 return; 158 159 rte_free(mae->bounce_eh.buf); 160 sfc_mae_counter_registry_fini(&mae->counter_registry); 161 162 efx_mae_fini(sa->nic); 163 164 sfc_log_init(sa, "done"); 165 } 166 167 static struct sfc_mae_outer_rule * 168 sfc_mae_outer_rule_attach(struct sfc_adapter *sa, 169 const efx_mae_match_spec_t *match_spec, 170 efx_tunnel_protocol_t encap_type) 171 { 172 struct sfc_mae_outer_rule *rule; 173 struct sfc_mae *mae = &sa->mae; 174 175 SFC_ASSERT(sfc_adapter_is_locked(sa)); 176 177 TAILQ_FOREACH(rule, &mae->outer_rules, entries) { 178 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) && 179 rule->encap_type == encap_type) { 180 sfc_dbg(sa, "attaching to outer_rule=%p", rule); 181 ++(rule->refcnt); 182 return rule; 183 } 184 } 185 186 return NULL; 187 } 188 189 static int 190 sfc_mae_outer_rule_add(struct sfc_adapter *sa, 191 efx_mae_match_spec_t *match_spec, 192 efx_tunnel_protocol_t encap_type, 193 struct sfc_mae_outer_rule **rulep) 194 { 195 struct sfc_mae_outer_rule *rule; 196 struct sfc_mae *mae = &sa->mae; 197 198 SFC_ASSERT(sfc_adapter_is_locked(sa)); 199 200 rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0); 201 if (rule == NULL) 202 return ENOMEM; 203 204 rule->refcnt = 1; 205 rule->match_spec = match_spec; 206 rule->encap_type = encap_type; 207 208 rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID; 209 210 TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries); 211 212 *rulep = rule; 213 214 sfc_dbg(sa, "added outer_rule=%p", rule); 215 216 return 0; 217 } 218 219 static void 220 sfc_mae_outer_rule_del(struct sfc_adapter *sa, 221 struct sfc_mae_outer_rule *rule) 222 { 223 struct sfc_mae *mae = &sa->mae; 224 225 SFC_ASSERT(sfc_adapter_is_locked(sa)); 226 SFC_ASSERT(rule->refcnt != 0); 227 228 --(rule->refcnt); 229 230 if (rule->refcnt != 0) 231 return; 232 233 if (rule->fw_rsrc.rule_id.id != EFX_MAE_RSRC_ID_INVALID || 234 rule->fw_rsrc.refcnt != 0) { 235 sfc_err(sa, "deleting outer_rule=%p abandons its FW resource: OR_ID=0x%08x, refcnt=%u", 236 rule, rule->fw_rsrc.rule_id.id, rule->fw_rsrc.refcnt); 237 } 238 239 efx_mae_match_spec_fini(sa->nic, rule->match_spec); 240 241 TAILQ_REMOVE(&mae->outer_rules, rule, entries); 242 rte_free(rule); 243 244 sfc_dbg(sa, "deleted outer_rule=%p", rule); 245 } 246 247 static int 248 sfc_mae_outer_rule_enable(struct sfc_adapter *sa, 249 struct sfc_mae_outer_rule *rule, 250 efx_mae_match_spec_t *match_spec_action) 251 { 252 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc; 253 int rc; 254 255 SFC_ASSERT(sfc_adapter_is_locked(sa)); 256 257 if (fw_rsrc->refcnt == 0) { 258 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID); 259 SFC_ASSERT(rule->match_spec != NULL); 260 261 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec, 262 rule->encap_type, 263 &fw_rsrc->rule_id); 264 if (rc != 0) { 265 sfc_err(sa, "failed to enable outer_rule=%p: %s", 266 rule, strerror(rc)); 267 return rc; 268 } 269 } 270 271 rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action, 272 &fw_rsrc->rule_id); 273 if (rc != 0) { 274 if (fw_rsrc->refcnt == 0) { 275 (void)efx_mae_outer_rule_remove(sa->nic, 276 &fw_rsrc->rule_id); 277 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID; 278 } 279 280 sfc_err(sa, "can't match on outer rule ID: %s", strerror(rc)); 281 282 return rc; 283 } 284 285 if (fw_rsrc->refcnt == 0) { 286 sfc_dbg(sa, "enabled outer_rule=%p: OR_ID=0x%08x", 287 rule, fw_rsrc->rule_id.id); 288 } 289 290 ++(fw_rsrc->refcnt); 291 292 return 0; 293 } 294 295 static void 296 sfc_mae_outer_rule_disable(struct sfc_adapter *sa, 297 struct sfc_mae_outer_rule *rule) 298 { 299 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc; 300 int rc; 301 302 SFC_ASSERT(sfc_adapter_is_locked(sa)); 303 304 if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID || 305 fw_rsrc->refcnt == 0) { 306 sfc_err(sa, "failed to disable outer_rule=%p: already disabled; OR_ID=0x%08x, refcnt=%u", 307 rule, fw_rsrc->rule_id.id, fw_rsrc->refcnt); 308 return; 309 } 310 311 if (fw_rsrc->refcnt == 1) { 312 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id); 313 if (rc == 0) { 314 sfc_dbg(sa, "disabled outer_rule=%p with OR_ID=0x%08x", 315 rule, fw_rsrc->rule_id.id); 316 } else { 317 sfc_err(sa, "failed to disable outer_rule=%p with OR_ID=0x%08x: %s", 318 rule, fw_rsrc->rule_id.id, strerror(rc)); 319 } 320 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID; 321 } 322 323 --(fw_rsrc->refcnt); 324 } 325 326 static struct sfc_mae_encap_header * 327 sfc_mae_encap_header_attach(struct sfc_adapter *sa, 328 const struct sfc_mae_bounce_eh *bounce_eh) 329 { 330 struct sfc_mae_encap_header *encap_header; 331 struct sfc_mae *mae = &sa->mae; 332 333 SFC_ASSERT(sfc_adapter_is_locked(sa)); 334 335 TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) { 336 if (encap_header->size == bounce_eh->size && 337 memcmp(encap_header->buf, bounce_eh->buf, 338 bounce_eh->size) == 0) { 339 sfc_dbg(sa, "attaching to encap_header=%p", 340 encap_header); 341 ++(encap_header->refcnt); 342 return encap_header; 343 } 344 } 345 346 return NULL; 347 } 348 349 static int 350 sfc_mae_encap_header_add(struct sfc_adapter *sa, 351 const struct sfc_mae_bounce_eh *bounce_eh, 352 struct sfc_mae_encap_header **encap_headerp) 353 { 354 struct sfc_mae_encap_header *encap_header; 355 struct sfc_mae *mae = &sa->mae; 356 357 SFC_ASSERT(sfc_adapter_is_locked(sa)); 358 359 encap_header = rte_zmalloc("sfc_mae_encap_header", 360 sizeof(*encap_header), 0); 361 if (encap_header == NULL) 362 return ENOMEM; 363 364 encap_header->size = bounce_eh->size; 365 366 encap_header->buf = rte_malloc("sfc_mae_encap_header_buf", 367 encap_header->size, 0); 368 if (encap_header->buf == NULL) { 369 rte_free(encap_header); 370 return ENOMEM; 371 } 372 373 rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size); 374 375 encap_header->refcnt = 1; 376 encap_header->type = bounce_eh->type; 377 encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID; 378 379 TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries); 380 381 *encap_headerp = encap_header; 382 383 sfc_dbg(sa, "added encap_header=%p", encap_header); 384 385 return 0; 386 } 387 388 static void 389 sfc_mae_encap_header_del(struct sfc_adapter *sa, 390 struct sfc_mae_encap_header *encap_header) 391 { 392 struct sfc_mae *mae = &sa->mae; 393 394 if (encap_header == NULL) 395 return; 396 397 SFC_ASSERT(sfc_adapter_is_locked(sa)); 398 SFC_ASSERT(encap_header->refcnt != 0); 399 400 --(encap_header->refcnt); 401 402 if (encap_header->refcnt != 0) 403 return; 404 405 if (encap_header->fw_rsrc.eh_id.id != EFX_MAE_RSRC_ID_INVALID || 406 encap_header->fw_rsrc.refcnt != 0) { 407 sfc_err(sa, "deleting encap_header=%p abandons its FW resource: EH_ID=0x%08x, refcnt=%u", 408 encap_header, encap_header->fw_rsrc.eh_id.id, 409 encap_header->fw_rsrc.refcnt); 410 } 411 412 TAILQ_REMOVE(&mae->encap_headers, encap_header, entries); 413 rte_free(encap_header->buf); 414 rte_free(encap_header); 415 416 sfc_dbg(sa, "deleted encap_header=%p", encap_header); 417 } 418 419 static int 420 sfc_mae_encap_header_enable(struct sfc_adapter *sa, 421 struct sfc_mae_encap_header *encap_header, 422 efx_mae_actions_t *action_set_spec) 423 { 424 struct sfc_mae_fw_rsrc *fw_rsrc; 425 int rc; 426 427 if (encap_header == NULL) 428 return 0; 429 430 SFC_ASSERT(sfc_adapter_is_locked(sa)); 431 432 fw_rsrc = &encap_header->fw_rsrc; 433 434 if (fw_rsrc->refcnt == 0) { 435 SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID); 436 SFC_ASSERT(encap_header->buf != NULL); 437 SFC_ASSERT(encap_header->size != 0); 438 439 rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type, 440 encap_header->buf, 441 encap_header->size, 442 &fw_rsrc->eh_id); 443 if (rc != 0) { 444 sfc_err(sa, "failed to enable encap_header=%p: %s", 445 encap_header, strerror(rc)); 446 return rc; 447 } 448 } 449 450 rc = efx_mae_action_set_fill_in_eh_id(action_set_spec, 451 &fw_rsrc->eh_id); 452 if (rc != 0) { 453 if (fw_rsrc->refcnt == 0) { 454 (void)efx_mae_encap_header_free(sa->nic, 455 &fw_rsrc->eh_id); 456 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID; 457 } 458 459 sfc_err(sa, "can't fill in encap. header ID: %s", strerror(rc)); 460 461 return rc; 462 } 463 464 if (fw_rsrc->refcnt == 0) { 465 sfc_dbg(sa, "enabled encap_header=%p: EH_ID=0x%08x", 466 encap_header, fw_rsrc->eh_id.id); 467 } 468 469 ++(fw_rsrc->refcnt); 470 471 return 0; 472 } 473 474 static void 475 sfc_mae_encap_header_disable(struct sfc_adapter *sa, 476 struct sfc_mae_encap_header *encap_header) 477 { 478 struct sfc_mae_fw_rsrc *fw_rsrc; 479 int rc; 480 481 if (encap_header == NULL) 482 return; 483 484 SFC_ASSERT(sfc_adapter_is_locked(sa)); 485 486 fw_rsrc = &encap_header->fw_rsrc; 487 488 if (fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID || 489 fw_rsrc->refcnt == 0) { 490 sfc_err(sa, "failed to disable encap_header=%p: already disabled; EH_ID=0x%08x, refcnt=%u", 491 encap_header, fw_rsrc->eh_id.id, fw_rsrc->refcnt); 492 return; 493 } 494 495 if (fw_rsrc->refcnt == 1) { 496 rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id); 497 if (rc == 0) { 498 sfc_dbg(sa, "disabled encap_header=%p with EH_ID=0x%08x", 499 encap_header, fw_rsrc->eh_id.id); 500 } else { 501 sfc_err(sa, "failed to disable encap_header=%p with EH_ID=0x%08x: %s", 502 encap_header, fw_rsrc->eh_id.id, strerror(rc)); 503 } 504 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID; 505 } 506 507 --(fw_rsrc->refcnt); 508 } 509 510 static int 511 sfc_mae_counters_enable(struct sfc_adapter *sa, 512 struct sfc_mae_counter_id *counters, 513 unsigned int n_counters, 514 efx_mae_actions_t *action_set_spec) 515 { 516 int rc; 517 518 sfc_log_init(sa, "entry"); 519 520 if (n_counters == 0) { 521 sfc_log_init(sa, "no counters - skip"); 522 return 0; 523 } 524 525 SFC_ASSERT(sfc_adapter_is_locked(sa)); 526 SFC_ASSERT(n_counters == 1); 527 528 rc = sfc_mae_counter_enable(sa, &counters[0]); 529 if (rc != 0) { 530 sfc_err(sa, "failed to enable MAE counter %u: %s", 531 counters[0].mae_id.id, rte_strerror(rc)); 532 goto fail_counter_add; 533 } 534 535 rc = efx_mae_action_set_fill_in_counter_id(action_set_spec, 536 &counters[0].mae_id); 537 if (rc != 0) { 538 sfc_err(sa, "failed to fill in MAE counter %u in action set: %s", 539 counters[0].mae_id.id, rte_strerror(rc)); 540 goto fail_fill_in_id; 541 } 542 543 return 0; 544 545 fail_fill_in_id: 546 (void)sfc_mae_counter_disable(sa, &counters[0]); 547 548 fail_counter_add: 549 sfc_log_init(sa, "failed: %s", rte_strerror(rc)); 550 return rc; 551 } 552 553 static int 554 sfc_mae_counters_disable(struct sfc_adapter *sa, 555 struct sfc_mae_counter_id *counters, 556 unsigned int n_counters) 557 { 558 if (n_counters == 0) 559 return 0; 560 561 SFC_ASSERT(sfc_adapter_is_locked(sa)); 562 SFC_ASSERT(n_counters == 1); 563 564 if (counters[0].mae_id.id == EFX_MAE_RSRC_ID_INVALID) { 565 sfc_err(sa, "failed to disable: already disabled"); 566 return EALREADY; 567 } 568 569 return sfc_mae_counter_disable(sa, &counters[0]); 570 } 571 572 static struct sfc_mae_action_set * 573 sfc_mae_action_set_attach(struct sfc_adapter *sa, 574 const struct sfc_mae_encap_header *encap_header, 575 unsigned int n_count, 576 const efx_mae_actions_t *spec) 577 { 578 struct sfc_mae_action_set *action_set; 579 struct sfc_mae *mae = &sa->mae; 580 581 SFC_ASSERT(sfc_adapter_is_locked(sa)); 582 583 TAILQ_FOREACH(action_set, &mae->action_sets, entries) { 584 /* 585 * Shared counters are not supported, hence action sets with 586 * COUNT are not attachable. 587 */ 588 if (action_set->encap_header == encap_header && 589 n_count == 0 && 590 efx_mae_action_set_specs_equal(action_set->spec, spec)) { 591 sfc_dbg(sa, "attaching to action_set=%p", action_set); 592 ++(action_set->refcnt); 593 return action_set; 594 } 595 } 596 597 return NULL; 598 } 599 600 static int 601 sfc_mae_action_set_add(struct sfc_adapter *sa, 602 const struct rte_flow_action actions[], 603 efx_mae_actions_t *spec, 604 struct sfc_mae_encap_header *encap_header, 605 unsigned int n_counters, 606 struct sfc_mae_action_set **action_setp) 607 { 608 struct sfc_mae_action_set *action_set; 609 struct sfc_mae *mae = &sa->mae; 610 unsigned int i; 611 612 SFC_ASSERT(sfc_adapter_is_locked(sa)); 613 614 action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0); 615 if (action_set == NULL) { 616 sfc_err(sa, "failed to alloc action set"); 617 return ENOMEM; 618 } 619 620 if (n_counters > 0) { 621 const struct rte_flow_action *action; 622 623 action_set->counters = rte_malloc("sfc_mae_counter_ids", 624 sizeof(action_set->counters[0]) * n_counters, 0); 625 if (action_set->counters == NULL) { 626 rte_free(action_set); 627 sfc_err(sa, "failed to alloc counters"); 628 return ENOMEM; 629 } 630 631 for (action = actions, i = 0; 632 action->type != RTE_FLOW_ACTION_TYPE_END && i < n_counters; 633 ++action) { 634 const struct rte_flow_action_count *conf; 635 636 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) 637 continue; 638 639 conf = action->conf; 640 641 action_set->counters[i].mae_id.id = 642 EFX_MAE_RSRC_ID_INVALID; 643 action_set->counters[i].rte_id = conf->id; 644 i++; 645 } 646 action_set->n_counters = n_counters; 647 } 648 649 action_set->refcnt = 1; 650 action_set->spec = spec; 651 action_set->encap_header = encap_header; 652 653 action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID; 654 655 TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries); 656 657 *action_setp = action_set; 658 659 sfc_dbg(sa, "added action_set=%p", action_set); 660 661 return 0; 662 } 663 664 static void 665 sfc_mae_action_set_del(struct sfc_adapter *sa, 666 struct sfc_mae_action_set *action_set) 667 { 668 struct sfc_mae *mae = &sa->mae; 669 670 SFC_ASSERT(sfc_adapter_is_locked(sa)); 671 SFC_ASSERT(action_set->refcnt != 0); 672 673 --(action_set->refcnt); 674 675 if (action_set->refcnt != 0) 676 return; 677 678 if (action_set->fw_rsrc.aset_id.id != EFX_MAE_RSRC_ID_INVALID || 679 action_set->fw_rsrc.refcnt != 0) { 680 sfc_err(sa, "deleting action_set=%p abandons its FW resource: AS_ID=0x%08x, refcnt=%u", 681 action_set, action_set->fw_rsrc.aset_id.id, 682 action_set->fw_rsrc.refcnt); 683 } 684 685 efx_mae_action_set_spec_fini(sa->nic, action_set->spec); 686 sfc_mae_encap_header_del(sa, action_set->encap_header); 687 if (action_set->n_counters > 0) { 688 SFC_ASSERT(action_set->n_counters == 1); 689 SFC_ASSERT(action_set->counters[0].mae_id.id == 690 EFX_MAE_RSRC_ID_INVALID); 691 rte_free(action_set->counters); 692 } 693 TAILQ_REMOVE(&mae->action_sets, action_set, entries); 694 rte_free(action_set); 695 696 sfc_dbg(sa, "deleted action_set=%p", action_set); 697 } 698 699 static int 700 sfc_mae_action_set_enable(struct sfc_adapter *sa, 701 struct sfc_mae_action_set *action_set) 702 { 703 struct sfc_mae_encap_header *encap_header = action_set->encap_header; 704 struct sfc_mae_counter_id *counters = action_set->counters; 705 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc; 706 int rc; 707 708 SFC_ASSERT(sfc_adapter_is_locked(sa)); 709 710 if (fw_rsrc->refcnt == 0) { 711 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID); 712 SFC_ASSERT(action_set->spec != NULL); 713 714 rc = sfc_mae_encap_header_enable(sa, encap_header, 715 action_set->spec); 716 if (rc != 0) 717 return rc; 718 719 rc = sfc_mae_counters_enable(sa, counters, 720 action_set->n_counters, 721 action_set->spec); 722 if (rc != 0) { 723 sfc_err(sa, "failed to enable %u MAE counters: %s", 724 action_set->n_counters, rte_strerror(rc)); 725 726 sfc_mae_encap_header_disable(sa, encap_header); 727 return rc; 728 } 729 730 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec, 731 &fw_rsrc->aset_id); 732 if (rc != 0) { 733 sfc_err(sa, "failed to enable action_set=%p: %s", 734 action_set, strerror(rc)); 735 736 (void)sfc_mae_counters_disable(sa, counters, 737 action_set->n_counters); 738 sfc_mae_encap_header_disable(sa, encap_header); 739 return rc; 740 } 741 742 sfc_dbg(sa, "enabled action_set=%p: AS_ID=0x%08x", 743 action_set, fw_rsrc->aset_id.id); 744 } 745 746 ++(fw_rsrc->refcnt); 747 748 return 0; 749 } 750 751 static void 752 sfc_mae_action_set_disable(struct sfc_adapter *sa, 753 struct sfc_mae_action_set *action_set) 754 { 755 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc; 756 int rc; 757 758 SFC_ASSERT(sfc_adapter_is_locked(sa)); 759 760 if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID || 761 fw_rsrc->refcnt == 0) { 762 sfc_err(sa, "failed to disable action_set=%p: already disabled; AS_ID=0x%08x, refcnt=%u", 763 action_set, fw_rsrc->aset_id.id, fw_rsrc->refcnt); 764 return; 765 } 766 767 if (fw_rsrc->refcnt == 1) { 768 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id); 769 if (rc == 0) { 770 sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x", 771 action_set, fw_rsrc->aset_id.id); 772 } else { 773 sfc_err(sa, "failed to disable action_set=%p with AS_ID=0x%08x: %s", 774 action_set, fw_rsrc->aset_id.id, strerror(rc)); 775 } 776 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID; 777 778 rc = sfc_mae_counters_disable(sa, action_set->counters, 779 action_set->n_counters); 780 if (rc != 0) { 781 sfc_err(sa, "failed to disable %u MAE counters: %s", 782 action_set->n_counters, rte_strerror(rc)); 783 } 784 785 sfc_mae_encap_header_disable(sa, action_set->encap_header); 786 } 787 788 --(fw_rsrc->refcnt); 789 } 790 791 void 792 sfc_mae_flow_cleanup(struct sfc_adapter *sa, 793 struct rte_flow *flow) 794 { 795 struct sfc_flow_spec *spec; 796 struct sfc_flow_spec_mae *spec_mae; 797 798 if (flow == NULL) 799 return; 800 801 spec = &flow->spec; 802 803 if (spec == NULL) 804 return; 805 806 spec_mae = &spec->mae; 807 808 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID); 809 810 if (spec_mae->outer_rule != NULL) 811 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule); 812 813 if (spec_mae->action_set != NULL) 814 sfc_mae_action_set_del(sa, spec_mae->action_set); 815 816 if (spec_mae->match_spec != NULL) 817 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec); 818 } 819 820 static int 821 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx) 822 { 823 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data; 824 const efx_mae_field_id_t *fremap = ctx->field_ids_remap; 825 const efx_mae_field_id_t field_ids[] = { 826 EFX_MAE_FIELD_VLAN0_PROTO_BE, 827 EFX_MAE_FIELD_VLAN1_PROTO_BE, 828 }; 829 const struct sfc_mae_ethertype *et; 830 unsigned int i; 831 int rc; 832 833 /* 834 * In accordance with RTE flow API convention, the innermost L2 835 * item's "type" ("inner_type") is a L3 EtherType. If there is 836 * no L3 item, it's 0x0000/0x0000. 837 */ 838 et = &pdata->ethertypes[pdata->nb_vlan_tags]; 839 rc = efx_mae_match_spec_field_set(ctx->match_spec, 840 fremap[EFX_MAE_FIELD_ETHER_TYPE_BE], 841 sizeof(et->value), 842 (const uint8_t *)&et->value, 843 sizeof(et->mask), 844 (const uint8_t *)&et->mask); 845 if (rc != 0) 846 return rc; 847 848 /* 849 * sfc_mae_rule_parse_item_vlan() has already made sure 850 * that pdata->nb_vlan_tags does not exceed this figure. 851 */ 852 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2); 853 854 for (i = 0; i < pdata->nb_vlan_tags; ++i) { 855 et = &pdata->ethertypes[i]; 856 857 rc = efx_mae_match_spec_field_set(ctx->match_spec, 858 fremap[field_ids[i]], 859 sizeof(et->value), 860 (const uint8_t *)&et->value, 861 sizeof(et->mask), 862 (const uint8_t *)&et->mask); 863 if (rc != 0) 864 return rc; 865 } 866 867 return 0; 868 } 869 870 static int 871 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx, 872 struct rte_flow_error *error) 873 { 874 const efx_mae_field_id_t *fremap = ctx->field_ids_remap; 875 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data; 876 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes; 877 const rte_be16_t supported_tpids[] = { 878 /* VLAN standard TPID (always the first element) */ 879 RTE_BE16(RTE_ETHER_TYPE_VLAN), 880 881 /* Double-tagging TPIDs */ 882 RTE_BE16(RTE_ETHER_TYPE_QINQ), 883 RTE_BE16(RTE_ETHER_TYPE_QINQ1), 884 RTE_BE16(RTE_ETHER_TYPE_QINQ2), 885 RTE_BE16(RTE_ETHER_TYPE_QINQ3), 886 }; 887 bool enforce_tag_presence[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {0}; 888 unsigned int nb_supported_tpids = RTE_DIM(supported_tpids); 889 unsigned int ethertype_idx; 890 const uint8_t *valuep; 891 const uint8_t *maskp; 892 int rc; 893 894 if (pdata->innermost_ethertype_restriction.mask != 0 && 895 pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) { 896 /* 897 * If a single item VLAN is followed by a L3 item, value 898 * of "type" in item ETH can't be a double-tagging TPID. 899 */ 900 nb_supported_tpids = 1; 901 } 902 903 /* 904 * sfc_mae_rule_parse_item_vlan() has already made sure 905 * that pdata->nb_vlan_tags does not exceed this figure. 906 */ 907 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2); 908 909 for (ethertype_idx = 0; 910 ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) { 911 rte_be16_t tpid_v = ethertypes[ethertype_idx].value; 912 rte_be16_t tpid_m = ethertypes[ethertype_idx].mask; 913 unsigned int tpid_idx; 914 915 /* 916 * This loop can have only two iterations. On the second one, 917 * drop outer tag presence enforcement bit because the inner 918 * tag presence automatically assumes that for the outer tag. 919 */ 920 enforce_tag_presence[0] = B_FALSE; 921 922 if (tpid_m == RTE_BE16(0)) { 923 if (pdata->tci_masks[ethertype_idx] == RTE_BE16(0)) 924 enforce_tag_presence[ethertype_idx] = B_TRUE; 925 926 /* No match on this field, and no value check. */ 927 nb_supported_tpids = 1; 928 continue; 929 } 930 931 /* Exact match is supported only. */ 932 if (tpid_m != RTE_BE16(0xffff)) { 933 sfc_err(ctx->sa, "TPID mask must be 0x0 or 0xffff; got 0x%04x", 934 rte_be_to_cpu_16(tpid_m)); 935 rc = EINVAL; 936 goto fail; 937 } 938 939 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1; 940 tpid_idx < nb_supported_tpids; ++tpid_idx) { 941 if (tpid_v == supported_tpids[tpid_idx]) 942 break; 943 } 944 945 if (tpid_idx == nb_supported_tpids) { 946 sfc_err(ctx->sa, "TPID 0x%04x is unsupported", 947 rte_be_to_cpu_16(tpid_v)); 948 rc = EINVAL; 949 goto fail; 950 } 951 952 nb_supported_tpids = 1; 953 } 954 955 if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) { 956 struct sfc_mae_ethertype *et = ðertypes[ethertype_idx]; 957 rte_be16_t enforced_et; 958 959 enforced_et = pdata->innermost_ethertype_restriction.value; 960 961 if (et->mask == 0) { 962 et->mask = RTE_BE16(0xffff); 963 et->value = enforced_et; 964 } else if (et->mask != RTE_BE16(0xffff) || 965 et->value != enforced_et) { 966 sfc_err(ctx->sa, "L3 EtherType must be 0x0/0x0 or 0x%04x/0xffff; got 0x%04x/0x%04x", 967 rte_be_to_cpu_16(enforced_et), 968 rte_be_to_cpu_16(et->value), 969 rte_be_to_cpu_16(et->mask)); 970 rc = EINVAL; 971 goto fail; 972 } 973 } 974 975 /* 976 * Now, when the number of VLAN tags is known, set fields 977 * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first 978 * one is either a valid L3 EtherType (or 0x0000/0x0000), 979 * and the last two are valid TPIDs (or 0x0000/0x0000). 980 */ 981 rc = sfc_mae_set_ethertypes(ctx); 982 if (rc != 0) 983 goto fail; 984 985 if (pdata->l3_next_proto_restriction_mask == 0xff) { 986 if (pdata->l3_next_proto_mask == 0) { 987 pdata->l3_next_proto_mask = 0xff; 988 pdata->l3_next_proto_value = 989 pdata->l3_next_proto_restriction_value; 990 } else if (pdata->l3_next_proto_mask != 0xff || 991 pdata->l3_next_proto_value != 992 pdata->l3_next_proto_restriction_value) { 993 sfc_err(ctx->sa, "L3 next protocol must be 0x0/0x0 or 0x%02x/0xff; got 0x%02x/0x%02x", 994 pdata->l3_next_proto_restriction_value, 995 pdata->l3_next_proto_value, 996 pdata->l3_next_proto_mask); 997 rc = EINVAL; 998 goto fail; 999 } 1000 } 1001 1002 if (enforce_tag_presence[0] || pdata->has_ovlan_mask) { 1003 rc = efx_mae_match_spec_bit_set(ctx->match_spec, 1004 fremap[EFX_MAE_FIELD_HAS_OVLAN], 1005 enforce_tag_presence[0] || 1006 pdata->has_ovlan_value); 1007 if (rc != 0) 1008 goto fail; 1009 } 1010 1011 if (enforce_tag_presence[1] || pdata->has_ivlan_mask) { 1012 rc = efx_mae_match_spec_bit_set(ctx->match_spec, 1013 fremap[EFX_MAE_FIELD_HAS_IVLAN], 1014 enforce_tag_presence[1] || 1015 pdata->has_ivlan_value); 1016 if (rc != 0) 1017 goto fail; 1018 } 1019 1020 valuep = (const uint8_t *)&pdata->l3_next_proto_value; 1021 maskp = (const uint8_t *)&pdata->l3_next_proto_mask; 1022 rc = efx_mae_match_spec_field_set(ctx->match_spec, 1023 fremap[EFX_MAE_FIELD_IP_PROTO], 1024 sizeof(pdata->l3_next_proto_value), 1025 valuep, 1026 sizeof(pdata->l3_next_proto_mask), 1027 maskp); 1028 if (rc != 0) 1029 goto fail; 1030 1031 return 0; 1032 1033 fail: 1034 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL, 1035 "Failed to process pattern data"); 1036 } 1037 1038 static int 1039 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item, 1040 struct sfc_flow_parse_ctx *ctx, 1041 struct rte_flow_error *error) 1042 { 1043 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae; 1044 const struct rte_flow_item_port_id supp_mask = { 1045 .id = 0xffffffff, 1046 }; 1047 const void *def_mask = &rte_flow_item_port_id_mask; 1048 const struct rte_flow_item_port_id *spec = NULL; 1049 const struct rte_flow_item_port_id *mask = NULL; 1050 efx_mport_sel_t mport_sel; 1051 int rc; 1052 1053 if (ctx_mae->match_mport_set) { 1054 return rte_flow_error_set(error, ENOTSUP, 1055 RTE_FLOW_ERROR_TYPE_ITEM, item, 1056 "Can't handle multiple traffic source items"); 1057 } 1058 1059 rc = sfc_flow_parse_init(item, 1060 (const void **)&spec, (const void **)&mask, 1061 (const void *)&supp_mask, def_mask, 1062 sizeof(struct rte_flow_item_port_id), error); 1063 if (rc != 0) 1064 return rc; 1065 1066 if (mask->id != supp_mask.id) { 1067 return rte_flow_error_set(error, EINVAL, 1068 RTE_FLOW_ERROR_TYPE_ITEM, item, 1069 "Bad mask in the PORT_ID pattern item"); 1070 } 1071 1072 /* If "spec" is not set, could be any port ID */ 1073 if (spec == NULL) 1074 return 0; 1075 1076 if (spec->id > UINT16_MAX) { 1077 return rte_flow_error_set(error, EOVERFLOW, 1078 RTE_FLOW_ERROR_TYPE_ITEM, item, 1079 "The port ID is too large"); 1080 } 1081 1082 rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id, 1083 spec->id, &mport_sel); 1084 if (rc != 0) { 1085 return rte_flow_error_set(error, rc, 1086 RTE_FLOW_ERROR_TYPE_ITEM, item, 1087 "Can't find RTE ethdev by the port ID"); 1088 } 1089 1090 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, 1091 &mport_sel, NULL); 1092 if (rc != 0) { 1093 return rte_flow_error_set(error, rc, 1094 RTE_FLOW_ERROR_TYPE_ITEM, item, 1095 "Failed to set MPORT for the port ID"); 1096 } 1097 1098 ctx_mae->match_mport_set = B_TRUE; 1099 1100 return 0; 1101 } 1102 1103 static int 1104 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item, 1105 struct sfc_flow_parse_ctx *ctx, 1106 struct rte_flow_error *error) 1107 { 1108 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae; 1109 const struct rte_flow_item_phy_port supp_mask = { 1110 .index = 0xffffffff, 1111 }; 1112 const void *def_mask = &rte_flow_item_phy_port_mask; 1113 const struct rte_flow_item_phy_port *spec = NULL; 1114 const struct rte_flow_item_phy_port *mask = NULL; 1115 efx_mport_sel_t mport_v; 1116 int rc; 1117 1118 if (ctx_mae->match_mport_set) { 1119 return rte_flow_error_set(error, ENOTSUP, 1120 RTE_FLOW_ERROR_TYPE_ITEM, item, 1121 "Can't handle multiple traffic source items"); 1122 } 1123 1124 rc = sfc_flow_parse_init(item, 1125 (const void **)&spec, (const void **)&mask, 1126 (const void *)&supp_mask, def_mask, 1127 sizeof(struct rte_flow_item_phy_port), error); 1128 if (rc != 0) 1129 return rc; 1130 1131 if (mask->index != supp_mask.index) { 1132 return rte_flow_error_set(error, EINVAL, 1133 RTE_FLOW_ERROR_TYPE_ITEM, item, 1134 "Bad mask in the PHY_PORT pattern item"); 1135 } 1136 1137 /* If "spec" is not set, could be any physical port */ 1138 if (spec == NULL) 1139 return 0; 1140 1141 rc = efx_mae_mport_by_phy_port(spec->index, &mport_v); 1142 if (rc != 0) { 1143 return rte_flow_error_set(error, rc, 1144 RTE_FLOW_ERROR_TYPE_ITEM, item, 1145 "Failed to convert the PHY_PORT index"); 1146 } 1147 1148 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL); 1149 if (rc != 0) { 1150 return rte_flow_error_set(error, rc, 1151 RTE_FLOW_ERROR_TYPE_ITEM, item, 1152 "Failed to set MPORT for the PHY_PORT"); 1153 } 1154 1155 ctx_mae->match_mport_set = B_TRUE; 1156 1157 return 0; 1158 } 1159 1160 static int 1161 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item, 1162 struct sfc_flow_parse_ctx *ctx, 1163 struct rte_flow_error *error) 1164 { 1165 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae; 1166 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic); 1167 efx_mport_sel_t mport_v; 1168 int rc; 1169 1170 if (ctx_mae->match_mport_set) { 1171 return rte_flow_error_set(error, ENOTSUP, 1172 RTE_FLOW_ERROR_TYPE_ITEM, item, 1173 "Can't handle multiple traffic source items"); 1174 } 1175 1176 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID, 1177 &mport_v); 1178 if (rc != 0) { 1179 return rte_flow_error_set(error, rc, 1180 RTE_FLOW_ERROR_TYPE_ITEM, item, 1181 "Failed to convert the PF ID"); 1182 } 1183 1184 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL); 1185 if (rc != 0) { 1186 return rte_flow_error_set(error, rc, 1187 RTE_FLOW_ERROR_TYPE_ITEM, item, 1188 "Failed to set MPORT for the PF"); 1189 } 1190 1191 ctx_mae->match_mport_set = B_TRUE; 1192 1193 return 0; 1194 } 1195 1196 static int 1197 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item, 1198 struct sfc_flow_parse_ctx *ctx, 1199 struct rte_flow_error *error) 1200 { 1201 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae; 1202 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic); 1203 const struct rte_flow_item_vf supp_mask = { 1204 .id = 0xffffffff, 1205 }; 1206 const void *def_mask = &rte_flow_item_vf_mask; 1207 const struct rte_flow_item_vf *spec = NULL; 1208 const struct rte_flow_item_vf *mask = NULL; 1209 efx_mport_sel_t mport_v; 1210 int rc; 1211 1212 if (ctx_mae->match_mport_set) { 1213 return rte_flow_error_set(error, ENOTSUP, 1214 RTE_FLOW_ERROR_TYPE_ITEM, item, 1215 "Can't handle multiple traffic source items"); 1216 } 1217 1218 rc = sfc_flow_parse_init(item, 1219 (const void **)&spec, (const void **)&mask, 1220 (const void *)&supp_mask, def_mask, 1221 sizeof(struct rte_flow_item_vf), error); 1222 if (rc != 0) 1223 return rc; 1224 1225 if (mask->id != supp_mask.id) { 1226 return rte_flow_error_set(error, EINVAL, 1227 RTE_FLOW_ERROR_TYPE_ITEM, item, 1228 "Bad mask in the VF pattern item"); 1229 } 1230 1231 /* 1232 * If "spec" is not set, the item requests any VF related to the 1233 * PF of the current DPDK port (but not the PF itself). 1234 * Reject this match criterion as unsupported. 1235 */ 1236 if (spec == NULL) { 1237 return rte_flow_error_set(error, EINVAL, 1238 RTE_FLOW_ERROR_TYPE_ITEM, item, 1239 "Bad spec in the VF pattern item"); 1240 } 1241 1242 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v); 1243 if (rc != 0) { 1244 return rte_flow_error_set(error, rc, 1245 RTE_FLOW_ERROR_TYPE_ITEM, item, 1246 "Failed to convert the PF + VF IDs"); 1247 } 1248 1249 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL); 1250 if (rc != 0) { 1251 return rte_flow_error_set(error, rc, 1252 RTE_FLOW_ERROR_TYPE_ITEM, item, 1253 "Failed to set MPORT for the PF + VF"); 1254 } 1255 1256 ctx_mae->match_mport_set = B_TRUE; 1257 1258 return 0; 1259 } 1260 1261 /* 1262 * Having this field ID in a field locator means that this 1263 * locator cannot be used to actually set the field at the 1264 * time when the corresponding item gets encountered. Such 1265 * fields get stashed in the parsing context instead. This 1266 * is required to resolve dependencies between the stashed 1267 * fields. See sfc_mae_rule_process_pattern_data(). 1268 */ 1269 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS 1270 1271 struct sfc_mae_field_locator { 1272 efx_mae_field_id_t field_id; 1273 size_t size; 1274 /* Field offset in the corresponding rte_flow_item_ struct */ 1275 size_t ofst; 1276 }; 1277 1278 static void 1279 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators, 1280 unsigned int nb_field_locators, void *mask_ptr, 1281 size_t mask_size) 1282 { 1283 unsigned int i; 1284 1285 memset(mask_ptr, 0, mask_size); 1286 1287 for (i = 0; i < nb_field_locators; ++i) { 1288 const struct sfc_mae_field_locator *fl = &field_locators[i]; 1289 1290 SFC_ASSERT(fl->ofst + fl->size <= mask_size); 1291 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size); 1292 } 1293 } 1294 1295 static int 1296 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators, 1297 unsigned int nb_field_locators, const uint8_t *spec, 1298 const uint8_t *mask, struct sfc_mae_parse_ctx *ctx, 1299 struct rte_flow_error *error) 1300 { 1301 const efx_mae_field_id_t *fremap = ctx->field_ids_remap; 1302 unsigned int i; 1303 int rc = 0; 1304 1305 for (i = 0; i < nb_field_locators; ++i) { 1306 const struct sfc_mae_field_locator *fl = &field_locators[i]; 1307 1308 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED) 1309 continue; 1310 1311 rc = efx_mae_match_spec_field_set(ctx->match_spec, 1312 fremap[fl->field_id], 1313 fl->size, spec + fl->ofst, 1314 fl->size, mask + fl->ofst); 1315 if (rc != 0) 1316 break; 1317 } 1318 1319 if (rc != 0) { 1320 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, 1321 NULL, "Failed to process item fields"); 1322 } 1323 1324 return rc; 1325 } 1326 1327 static const struct sfc_mae_field_locator flocs_eth[] = { 1328 { 1329 /* 1330 * This locator is used only for building supported fields mask. 1331 * The field is handled by sfc_mae_rule_process_pattern_data(). 1332 */ 1333 SFC_MAE_FIELD_HANDLING_DEFERRED, 1334 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type), 1335 offsetof(struct rte_flow_item_eth, type), 1336 }, 1337 { 1338 EFX_MAE_FIELD_ETH_DADDR_BE, 1339 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst), 1340 offsetof(struct rte_flow_item_eth, dst), 1341 }, 1342 { 1343 EFX_MAE_FIELD_ETH_SADDR_BE, 1344 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src), 1345 offsetof(struct rte_flow_item_eth, src), 1346 }, 1347 }; 1348 1349 static int 1350 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item, 1351 struct sfc_flow_parse_ctx *ctx, 1352 struct rte_flow_error *error) 1353 { 1354 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae; 1355 struct rte_flow_item_eth supp_mask; 1356 const uint8_t *spec = NULL; 1357 const uint8_t *mask = NULL; 1358 int rc; 1359 1360 sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth), 1361 &supp_mask, sizeof(supp_mask)); 1362 supp_mask.has_vlan = 1; 1363 1364 rc = sfc_flow_parse_init(item, 1365 (const void **)&spec, (const void **)&mask, 1366 (const void *)&supp_mask, 1367 &rte_flow_item_eth_mask, 1368 sizeof(struct rte_flow_item_eth), error); 1369 if (rc != 0) 1370 return rc; 1371 1372 if (spec != NULL) { 1373 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data; 1374 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes; 1375 const struct rte_flow_item_eth *item_spec; 1376 const struct rte_flow_item_eth *item_mask; 1377 1378 item_spec = (const struct rte_flow_item_eth *)spec; 1379 item_mask = (const struct rte_flow_item_eth *)mask; 1380 1381 /* 1382 * Remember various match criteria in the parsing context. 1383 * sfc_mae_rule_process_pattern_data() will consider them 1384 * altogether when the rest of the items have been parsed. 1385 */ 1386 ethertypes[0].value = item_spec->type; 1387 ethertypes[0].mask = item_mask->type; 1388 if (item_mask->has_vlan) { 1389 pdata->has_ovlan_mask = B_TRUE; 1390 if (item_spec->has_vlan) 1391 pdata->has_ovlan_value = B_TRUE; 1392 } 1393 } else { 1394 /* 1395 * The specification is empty. The overall pattern 1396 * validity will be enforced at the end of parsing. 1397 * See sfc_mae_rule_process_pattern_data(). 1398 */ 1399 return 0; 1400 } 1401 1402 return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask, 1403 ctx_mae, error); 1404 } 1405 1406 static const struct sfc_mae_field_locator flocs_vlan[] = { 1407 /* Outermost tag */ 1408 { 1409 EFX_MAE_FIELD_VLAN0_TCI_BE, 1410 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci), 1411 offsetof(struct rte_flow_item_vlan, tci), 1412 }, 1413 { 1414 /* 1415 * This locator is used only for building supported fields mask. 1416 * The field is handled by sfc_mae_rule_process_pattern_data(). 1417 */ 1418 SFC_MAE_FIELD_HANDLING_DEFERRED, 1419 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type), 1420 offsetof(struct rte_flow_item_vlan, inner_type), 1421 }, 1422 1423 /* Innermost tag */ 1424 { 1425 EFX_MAE_FIELD_VLAN1_TCI_BE, 1426 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci), 1427 offsetof(struct rte_flow_item_vlan, tci), 1428 }, 1429 { 1430 /* 1431 * This locator is used only for building supported fields mask. 1432 * The field is handled by sfc_mae_rule_process_pattern_data(). 1433 */ 1434 SFC_MAE_FIELD_HANDLING_DEFERRED, 1435 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type), 1436 offsetof(struct rte_flow_item_vlan, inner_type), 1437 }, 1438 }; 1439 1440 static int 1441 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item, 1442 struct sfc_flow_parse_ctx *ctx, 1443 struct rte_flow_error *error) 1444 { 1445 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae; 1446 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data; 1447 boolean_t *has_vlan_mp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = { 1448 &pdata->has_ovlan_mask, 1449 &pdata->has_ivlan_mask, 1450 }; 1451 boolean_t *has_vlan_vp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = { 1452 &pdata->has_ovlan_value, 1453 &pdata->has_ivlan_value, 1454 }; 1455 boolean_t *cur_tag_presence_bit_mp; 1456 boolean_t *cur_tag_presence_bit_vp; 1457 const struct sfc_mae_field_locator *flocs; 1458 struct rte_flow_item_vlan supp_mask; 1459 const uint8_t *spec = NULL; 1460 const uint8_t *mask = NULL; 1461 unsigned int nb_flocs; 1462 int rc; 1463 1464 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2); 1465 1466 if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) { 1467 return rte_flow_error_set(error, ENOTSUP, 1468 RTE_FLOW_ERROR_TYPE_ITEM, item, 1469 "Can't match that many VLAN tags"); 1470 } 1471 1472 cur_tag_presence_bit_mp = has_vlan_mp_by_nb_tags[pdata->nb_vlan_tags]; 1473 cur_tag_presence_bit_vp = has_vlan_vp_by_nb_tags[pdata->nb_vlan_tags]; 1474 1475 if (*cur_tag_presence_bit_mp == B_TRUE && 1476 *cur_tag_presence_bit_vp == B_FALSE) { 1477 return rte_flow_error_set(error, EINVAL, 1478 RTE_FLOW_ERROR_TYPE_ITEM, item, 1479 "The previous item enforces no (more) VLAN, " 1480 "so the current item (VLAN) must not exist"); 1481 } 1482 1483 nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS; 1484 flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs; 1485 1486 sfc_mae_item_build_supp_mask(flocs, nb_flocs, 1487 &supp_mask, sizeof(supp_mask)); 1488 /* 1489 * This only means that the field is supported by the driver and libefx. 1490 * Support on NIC level will be checked when all items have been parsed. 1491 */ 1492 supp_mask.has_more_vlan = 1; 1493 1494 rc = sfc_flow_parse_init(item, 1495 (const void **)&spec, (const void **)&mask, 1496 (const void *)&supp_mask, 1497 &rte_flow_item_vlan_mask, 1498 sizeof(struct rte_flow_item_vlan), error); 1499 if (rc != 0) 1500 return rc; 1501 1502 if (spec != NULL) { 1503 struct sfc_mae_ethertype *et = pdata->ethertypes; 1504 const struct rte_flow_item_vlan *item_spec; 1505 const struct rte_flow_item_vlan *item_mask; 1506 1507 item_spec = (const struct rte_flow_item_vlan *)spec; 1508 item_mask = (const struct rte_flow_item_vlan *)mask; 1509 1510 /* 1511 * Remember various match criteria in the parsing context. 1512 * sfc_mae_rule_process_pattern_data() will consider them 1513 * altogether when the rest of the items have been parsed. 1514 */ 1515 et[pdata->nb_vlan_tags + 1].value = item_spec->inner_type; 1516 et[pdata->nb_vlan_tags + 1].mask = item_mask->inner_type; 1517 pdata->tci_masks[pdata->nb_vlan_tags] = item_mask->tci; 1518 if (item_mask->has_more_vlan) { 1519 if (pdata->nb_vlan_tags == 1520 SFC_MAE_MATCH_VLAN_MAX_NTAGS) { 1521 return rte_flow_error_set(error, ENOTSUP, 1522 RTE_FLOW_ERROR_TYPE_ITEM, item, 1523 "Can't use 'has_more_vlan' in " 1524 "the second item VLAN"); 1525 } 1526 pdata->has_ivlan_mask = B_TRUE; 1527 if (item_spec->has_more_vlan) 1528 pdata->has_ivlan_value = B_TRUE; 1529 } 1530 1531 /* Convert TCI to MAE representation right now. */ 1532 rc = sfc_mae_parse_item(flocs, nb_flocs, spec, mask, 1533 ctx_mae, error); 1534 if (rc != 0) 1535 return rc; 1536 } 1537 1538 ++(pdata->nb_vlan_tags); 1539 1540 return 0; 1541 } 1542 1543 static const struct sfc_mae_field_locator flocs_ipv4[] = { 1544 { 1545 EFX_MAE_FIELD_SRC_IP4_BE, 1546 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr), 1547 offsetof(struct rte_flow_item_ipv4, hdr.src_addr), 1548 }, 1549 { 1550 EFX_MAE_FIELD_DST_IP4_BE, 1551 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr), 1552 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr), 1553 }, 1554 { 1555 /* 1556 * This locator is used only for building supported fields mask. 1557 * The field is handled by sfc_mae_rule_process_pattern_data(). 1558 */ 1559 SFC_MAE_FIELD_HANDLING_DEFERRED, 1560 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id), 1561 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id), 1562 }, 1563 { 1564 EFX_MAE_FIELD_IP_TOS, 1565 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, 1566 hdr.type_of_service), 1567 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service), 1568 }, 1569 { 1570 EFX_MAE_FIELD_IP_TTL, 1571 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live), 1572 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live), 1573 }, 1574 }; 1575 1576 static int 1577 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item, 1578 struct sfc_flow_parse_ctx *ctx, 1579 struct rte_flow_error *error) 1580 { 1581 rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4); 1582 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae; 1583 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data; 1584 struct rte_flow_item_ipv4 supp_mask; 1585 const uint8_t *spec = NULL; 1586 const uint8_t *mask = NULL; 1587 int rc; 1588 1589 sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4), 1590 &supp_mask, sizeof(supp_mask)); 1591 1592 rc = sfc_flow_parse_init(item, 1593 (const void **)&spec, (const void **)&mask, 1594 (const void *)&supp_mask, 1595 &rte_flow_item_ipv4_mask, 1596 sizeof(struct rte_flow_item_ipv4), error); 1597 if (rc != 0) 1598 return rc; 1599 1600 pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be; 1601 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff); 1602 1603 if (spec != NULL) { 1604 const struct rte_flow_item_ipv4 *item_spec; 1605 const struct rte_flow_item_ipv4 *item_mask; 1606 1607 item_spec = (const struct rte_flow_item_ipv4 *)spec; 1608 item_mask = (const struct rte_flow_item_ipv4 *)mask; 1609 1610 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id; 1611 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id; 1612 } else { 1613 return 0; 1614 } 1615 1616 return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask, 1617 ctx_mae, error); 1618 } 1619 1620 static const struct sfc_mae_field_locator flocs_ipv6[] = { 1621 { 1622 EFX_MAE_FIELD_SRC_IP6_BE, 1623 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr), 1624 offsetof(struct rte_flow_item_ipv6, hdr.src_addr), 1625 }, 1626 { 1627 EFX_MAE_FIELD_DST_IP6_BE, 1628 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr), 1629 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr), 1630 }, 1631 { 1632 /* 1633 * This locator is used only for building supported fields mask. 1634 * The field is handled by sfc_mae_rule_process_pattern_data(). 1635 */ 1636 SFC_MAE_FIELD_HANDLING_DEFERRED, 1637 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto), 1638 offsetof(struct rte_flow_item_ipv6, hdr.proto), 1639 }, 1640 { 1641 EFX_MAE_FIELD_IP_TTL, 1642 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits), 1643 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits), 1644 }, 1645 }; 1646 1647 static int 1648 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item, 1649 struct sfc_flow_parse_ctx *ctx, 1650 struct rte_flow_error *error) 1651 { 1652 rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6); 1653 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae; 1654 const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap; 1655 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data; 1656 struct rte_flow_item_ipv6 supp_mask; 1657 const uint8_t *spec = NULL; 1658 const uint8_t *mask = NULL; 1659 rte_be32_t vtc_flow_be; 1660 uint32_t vtc_flow; 1661 uint8_t tc_value; 1662 uint8_t tc_mask; 1663 int rc; 1664 1665 sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6), 1666 &supp_mask, sizeof(supp_mask)); 1667 1668 vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK); 1669 memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be)); 1670 1671 rc = sfc_flow_parse_init(item, 1672 (const void **)&spec, (const void **)&mask, 1673 (const void *)&supp_mask, 1674 &rte_flow_item_ipv6_mask, 1675 sizeof(struct rte_flow_item_ipv6), error); 1676 if (rc != 0) 1677 return rc; 1678 1679 pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be; 1680 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff); 1681 1682 if (spec != NULL) { 1683 const struct rte_flow_item_ipv6 *item_spec; 1684 const struct rte_flow_item_ipv6 *item_mask; 1685 1686 item_spec = (const struct rte_flow_item_ipv6 *)spec; 1687 item_mask = (const struct rte_flow_item_ipv6 *)mask; 1688 1689 pdata->l3_next_proto_value = item_spec->hdr.proto; 1690 pdata->l3_next_proto_mask = item_mask->hdr.proto; 1691 } else { 1692 return 0; 1693 } 1694 1695 rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask, 1696 ctx_mae, error); 1697 if (rc != 0) 1698 return rc; 1699 1700 memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be)); 1701 vtc_flow = rte_be_to_cpu_32(vtc_flow_be); 1702 tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT; 1703 1704 memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be)); 1705 vtc_flow = rte_be_to_cpu_32(vtc_flow_be); 1706 tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT; 1707 1708 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec, 1709 fremap[EFX_MAE_FIELD_IP_TOS], 1710 sizeof(tc_value), &tc_value, 1711 sizeof(tc_mask), &tc_mask); 1712 if (rc != 0) { 1713 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, 1714 NULL, "Failed to process item fields"); 1715 } 1716 1717 return 0; 1718 } 1719 1720 static const struct sfc_mae_field_locator flocs_tcp[] = { 1721 { 1722 EFX_MAE_FIELD_L4_SPORT_BE, 1723 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port), 1724 offsetof(struct rte_flow_item_tcp, hdr.src_port), 1725 }, 1726 { 1727 EFX_MAE_FIELD_L4_DPORT_BE, 1728 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port), 1729 offsetof(struct rte_flow_item_tcp, hdr.dst_port), 1730 }, 1731 { 1732 EFX_MAE_FIELD_TCP_FLAGS_BE, 1733 /* 1734 * The values have been picked intentionally since the 1735 * target MAE field is oversize (16 bit). This mapping 1736 * relies on the fact that the MAE field is big-endian. 1737 */ 1738 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) + 1739 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags), 1740 offsetof(struct rte_flow_item_tcp, hdr.data_off), 1741 }, 1742 }; 1743 1744 static int 1745 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item, 1746 struct sfc_flow_parse_ctx *ctx, 1747 struct rte_flow_error *error) 1748 { 1749 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae; 1750 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data; 1751 struct rte_flow_item_tcp supp_mask; 1752 const uint8_t *spec = NULL; 1753 const uint8_t *mask = NULL; 1754 int rc; 1755 1756 /* 1757 * When encountered among outermost items, item TCP is invalid. 1758 * Check which match specification is being constructed now. 1759 */ 1760 if (ctx_mae->match_spec != ctx_mae->match_spec_action) { 1761 return rte_flow_error_set(error, EINVAL, 1762 RTE_FLOW_ERROR_TYPE_ITEM, item, 1763 "TCP in outer frame is invalid"); 1764 } 1765 1766 sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp), 1767 &supp_mask, sizeof(supp_mask)); 1768 1769 rc = sfc_flow_parse_init(item, 1770 (const void **)&spec, (const void **)&mask, 1771 (const void *)&supp_mask, 1772 &rte_flow_item_tcp_mask, 1773 sizeof(struct rte_flow_item_tcp), error); 1774 if (rc != 0) 1775 return rc; 1776 1777 pdata->l3_next_proto_restriction_value = IPPROTO_TCP; 1778 pdata->l3_next_proto_restriction_mask = 0xff; 1779 1780 if (spec == NULL) 1781 return 0; 1782 1783 return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask, 1784 ctx_mae, error); 1785 } 1786 1787 static const struct sfc_mae_field_locator flocs_udp[] = { 1788 { 1789 EFX_MAE_FIELD_L4_SPORT_BE, 1790 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port), 1791 offsetof(struct rte_flow_item_udp, hdr.src_port), 1792 }, 1793 { 1794 EFX_MAE_FIELD_L4_DPORT_BE, 1795 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port), 1796 offsetof(struct rte_flow_item_udp, hdr.dst_port), 1797 }, 1798 }; 1799 1800 static int 1801 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item, 1802 struct sfc_flow_parse_ctx *ctx, 1803 struct rte_flow_error *error) 1804 { 1805 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae; 1806 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data; 1807 struct rte_flow_item_udp supp_mask; 1808 const uint8_t *spec = NULL; 1809 const uint8_t *mask = NULL; 1810 int rc; 1811 1812 sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp), 1813 &supp_mask, sizeof(supp_mask)); 1814 1815 rc = sfc_flow_parse_init(item, 1816 (const void **)&spec, (const void **)&mask, 1817 (const void *)&supp_mask, 1818 &rte_flow_item_udp_mask, 1819 sizeof(struct rte_flow_item_udp), error); 1820 if (rc != 0) 1821 return rc; 1822 1823 pdata->l3_next_proto_restriction_value = IPPROTO_UDP; 1824 pdata->l3_next_proto_restriction_mask = 0xff; 1825 1826 if (spec == NULL) 1827 return 0; 1828 1829 return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask, 1830 ctx_mae, error); 1831 } 1832 1833 static const struct sfc_mae_field_locator flocs_tunnel[] = { 1834 { 1835 /* 1836 * The size and offset values are relevant 1837 * for Geneve and NVGRE, too. 1838 */ 1839 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni), 1840 .ofst = offsetof(struct rte_flow_item_vxlan, vni), 1841 }, 1842 }; 1843 1844 /* 1845 * An auxiliary registry which allows using non-encap. field IDs 1846 * directly when building a match specification of type ACTION. 1847 * 1848 * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel(). 1849 */ 1850 static const efx_mae_field_id_t field_ids_no_remap[] = { 1851 #define FIELD_ID_NO_REMAP(_field) \ 1852 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field 1853 1854 FIELD_ID_NO_REMAP(ETHER_TYPE_BE), 1855 FIELD_ID_NO_REMAP(ETH_SADDR_BE), 1856 FIELD_ID_NO_REMAP(ETH_DADDR_BE), 1857 FIELD_ID_NO_REMAP(VLAN0_TCI_BE), 1858 FIELD_ID_NO_REMAP(VLAN0_PROTO_BE), 1859 FIELD_ID_NO_REMAP(VLAN1_TCI_BE), 1860 FIELD_ID_NO_REMAP(VLAN1_PROTO_BE), 1861 FIELD_ID_NO_REMAP(SRC_IP4_BE), 1862 FIELD_ID_NO_REMAP(DST_IP4_BE), 1863 FIELD_ID_NO_REMAP(IP_PROTO), 1864 FIELD_ID_NO_REMAP(IP_TOS), 1865 FIELD_ID_NO_REMAP(IP_TTL), 1866 FIELD_ID_NO_REMAP(SRC_IP6_BE), 1867 FIELD_ID_NO_REMAP(DST_IP6_BE), 1868 FIELD_ID_NO_REMAP(L4_SPORT_BE), 1869 FIELD_ID_NO_REMAP(L4_DPORT_BE), 1870 FIELD_ID_NO_REMAP(TCP_FLAGS_BE), 1871 FIELD_ID_NO_REMAP(HAS_OVLAN), 1872 FIELD_ID_NO_REMAP(HAS_IVLAN), 1873 1874 #undef FIELD_ID_NO_REMAP 1875 }; 1876 1877 /* 1878 * An auxiliary registry which allows using "ENC" field IDs 1879 * when building a match specification of type OUTER. 1880 * 1881 * See sfc_mae_rule_encap_parse_init(). 1882 */ 1883 static const efx_mae_field_id_t field_ids_remap_to_encap[] = { 1884 #define FIELD_ID_REMAP_TO_ENCAP(_field) \ 1885 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field 1886 1887 FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE), 1888 FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE), 1889 FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE), 1890 FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE), 1891 FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE), 1892 FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE), 1893 FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE), 1894 FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE), 1895 FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE), 1896 FIELD_ID_REMAP_TO_ENCAP(IP_PROTO), 1897 FIELD_ID_REMAP_TO_ENCAP(IP_TOS), 1898 FIELD_ID_REMAP_TO_ENCAP(IP_TTL), 1899 FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE), 1900 FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE), 1901 FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE), 1902 FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE), 1903 FIELD_ID_REMAP_TO_ENCAP(HAS_OVLAN), 1904 FIELD_ID_REMAP_TO_ENCAP(HAS_IVLAN), 1905 1906 #undef FIELD_ID_REMAP_TO_ENCAP 1907 }; 1908 1909 static int 1910 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item, 1911 struct sfc_flow_parse_ctx *ctx, 1912 struct rte_flow_error *error) 1913 { 1914 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae; 1915 uint8_t vnet_id_v[sizeof(uint32_t)] = {0}; 1916 uint8_t vnet_id_m[sizeof(uint32_t)] = {0}; 1917 const struct rte_flow_item_vxlan *vxp; 1918 uint8_t supp_mask[sizeof(uint64_t)]; 1919 const uint8_t *spec = NULL; 1920 const uint8_t *mask = NULL; 1921 int rc; 1922 1923 /* 1924 * We're about to start processing inner frame items. 1925 * Process pattern data that has been deferred so far 1926 * and reset pattern data storage. 1927 */ 1928 rc = sfc_mae_rule_process_pattern_data(ctx_mae, error); 1929 if (rc != 0) 1930 return rc; 1931 1932 memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data)); 1933 1934 sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel), 1935 &supp_mask, sizeof(supp_mask)); 1936 1937 /* 1938 * This tunnel item was preliminarily detected by 1939 * sfc_mae_rule_encap_parse_init(). Default mask 1940 * was also picked by that helper. Use it here. 1941 */ 1942 rc = sfc_flow_parse_init(item, 1943 (const void **)&spec, (const void **)&mask, 1944 (const void *)&supp_mask, 1945 ctx_mae->tunnel_def_mask, 1946 ctx_mae->tunnel_def_mask_size, error); 1947 if (rc != 0) 1948 return rc; 1949 1950 /* 1951 * This item and later ones comprise a 1952 * match specification of type ACTION. 1953 */ 1954 ctx_mae->match_spec = ctx_mae->match_spec_action; 1955 1956 /* This item and later ones use non-encap. EFX MAE field IDs. */ 1957 ctx_mae->field_ids_remap = field_ids_no_remap; 1958 1959 if (spec == NULL) 1960 return 0; 1961 1962 /* 1963 * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one. 1964 * Copy 24-bit VNI, which is BE, at offset 1 in it. 1965 * The extra byte is 0 both in the mask and in the value. 1966 */ 1967 vxp = (const struct rte_flow_item_vxlan *)spec; 1968 memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni)); 1969 1970 vxp = (const struct rte_flow_item_vxlan *)mask; 1971 memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni)); 1972 1973 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec, 1974 EFX_MAE_FIELD_ENC_VNET_ID_BE, 1975 sizeof(vnet_id_v), vnet_id_v, 1976 sizeof(vnet_id_m), vnet_id_m); 1977 if (rc != 0) { 1978 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, 1979 item, "Failed to set VXLAN VNI"); 1980 } 1981 1982 return rc; 1983 } 1984 1985 static const struct sfc_flow_item sfc_flow_items[] = { 1986 { 1987 .type = RTE_FLOW_ITEM_TYPE_PORT_ID, 1988 .name = "PORT_ID", 1989 /* 1990 * In terms of RTE flow, this item is a META one, 1991 * and its position in the pattern is don't care. 1992 */ 1993 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER, 1994 .layer = SFC_FLOW_ITEM_ANY_LAYER, 1995 .ctx_type = SFC_FLOW_PARSE_CTX_MAE, 1996 .parse = sfc_mae_rule_parse_item_port_id, 1997 }, 1998 { 1999 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT, 2000 .name = "PHY_PORT", 2001 /* 2002 * In terms of RTE flow, this item is a META one, 2003 * and its position in the pattern is don't care. 2004 */ 2005 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER, 2006 .layer = SFC_FLOW_ITEM_ANY_LAYER, 2007 .ctx_type = SFC_FLOW_PARSE_CTX_MAE, 2008 .parse = sfc_mae_rule_parse_item_phy_port, 2009 }, 2010 { 2011 .type = RTE_FLOW_ITEM_TYPE_PF, 2012 .name = "PF", 2013 /* 2014 * In terms of RTE flow, this item is a META one, 2015 * and its position in the pattern is don't care. 2016 */ 2017 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER, 2018 .layer = SFC_FLOW_ITEM_ANY_LAYER, 2019 .ctx_type = SFC_FLOW_PARSE_CTX_MAE, 2020 .parse = sfc_mae_rule_parse_item_pf, 2021 }, 2022 { 2023 .type = RTE_FLOW_ITEM_TYPE_VF, 2024 .name = "VF", 2025 /* 2026 * In terms of RTE flow, this item is a META one, 2027 * and its position in the pattern is don't care. 2028 */ 2029 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER, 2030 .layer = SFC_FLOW_ITEM_ANY_LAYER, 2031 .ctx_type = SFC_FLOW_PARSE_CTX_MAE, 2032 .parse = sfc_mae_rule_parse_item_vf, 2033 }, 2034 { 2035 .type = RTE_FLOW_ITEM_TYPE_ETH, 2036 .name = "ETH", 2037 .prev_layer = SFC_FLOW_ITEM_START_LAYER, 2038 .layer = SFC_FLOW_ITEM_L2, 2039 .ctx_type = SFC_FLOW_PARSE_CTX_MAE, 2040 .parse = sfc_mae_rule_parse_item_eth, 2041 }, 2042 { 2043 .type = RTE_FLOW_ITEM_TYPE_VLAN, 2044 .name = "VLAN", 2045 .prev_layer = SFC_FLOW_ITEM_L2, 2046 .layer = SFC_FLOW_ITEM_L2, 2047 .ctx_type = SFC_FLOW_PARSE_CTX_MAE, 2048 .parse = sfc_mae_rule_parse_item_vlan, 2049 }, 2050 { 2051 .type = RTE_FLOW_ITEM_TYPE_IPV4, 2052 .name = "IPV4", 2053 .prev_layer = SFC_FLOW_ITEM_L2, 2054 .layer = SFC_FLOW_ITEM_L3, 2055 .ctx_type = SFC_FLOW_PARSE_CTX_MAE, 2056 .parse = sfc_mae_rule_parse_item_ipv4, 2057 }, 2058 { 2059 .type = RTE_FLOW_ITEM_TYPE_IPV6, 2060 .name = "IPV6", 2061 .prev_layer = SFC_FLOW_ITEM_L2, 2062 .layer = SFC_FLOW_ITEM_L3, 2063 .ctx_type = SFC_FLOW_PARSE_CTX_MAE, 2064 .parse = sfc_mae_rule_parse_item_ipv6, 2065 }, 2066 { 2067 .type = RTE_FLOW_ITEM_TYPE_TCP, 2068 .name = "TCP", 2069 .prev_layer = SFC_FLOW_ITEM_L3, 2070 .layer = SFC_FLOW_ITEM_L4, 2071 .ctx_type = SFC_FLOW_PARSE_CTX_MAE, 2072 .parse = sfc_mae_rule_parse_item_tcp, 2073 }, 2074 { 2075 .type = RTE_FLOW_ITEM_TYPE_UDP, 2076 .name = "UDP", 2077 .prev_layer = SFC_FLOW_ITEM_L3, 2078 .layer = SFC_FLOW_ITEM_L4, 2079 .ctx_type = SFC_FLOW_PARSE_CTX_MAE, 2080 .parse = sfc_mae_rule_parse_item_udp, 2081 }, 2082 { 2083 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 2084 .name = "VXLAN", 2085 .prev_layer = SFC_FLOW_ITEM_L4, 2086 .layer = SFC_FLOW_ITEM_START_LAYER, 2087 .ctx_type = SFC_FLOW_PARSE_CTX_MAE, 2088 .parse = sfc_mae_rule_parse_item_tunnel, 2089 }, 2090 { 2091 .type = RTE_FLOW_ITEM_TYPE_GENEVE, 2092 .name = "GENEVE", 2093 .prev_layer = SFC_FLOW_ITEM_L4, 2094 .layer = SFC_FLOW_ITEM_START_LAYER, 2095 .ctx_type = SFC_FLOW_PARSE_CTX_MAE, 2096 .parse = sfc_mae_rule_parse_item_tunnel, 2097 }, 2098 { 2099 .type = RTE_FLOW_ITEM_TYPE_NVGRE, 2100 .name = "NVGRE", 2101 .prev_layer = SFC_FLOW_ITEM_L3, 2102 .layer = SFC_FLOW_ITEM_START_LAYER, 2103 .ctx_type = SFC_FLOW_PARSE_CTX_MAE, 2104 .parse = sfc_mae_rule_parse_item_tunnel, 2105 }, 2106 }; 2107 2108 static int 2109 sfc_mae_rule_process_outer(struct sfc_adapter *sa, 2110 struct sfc_mae_parse_ctx *ctx, 2111 struct sfc_mae_outer_rule **rulep, 2112 struct rte_flow_error *error) 2113 { 2114 efx_mae_rule_id_t invalid_rule_id = { .id = EFX_MAE_RSRC_ID_INVALID }; 2115 int rc; 2116 2117 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) { 2118 *rulep = NULL; 2119 goto no_or_id; 2120 } 2121 2122 SFC_ASSERT(ctx->match_spec_outer != NULL); 2123 2124 if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) { 2125 return rte_flow_error_set(error, ENOTSUP, 2126 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 2127 "Inconsistent pattern (outer)"); 2128 } 2129 2130 *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer, 2131 ctx->encap_type); 2132 if (*rulep != NULL) { 2133 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer); 2134 } else { 2135 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer, 2136 ctx->encap_type, rulep); 2137 if (rc != 0) { 2138 return rte_flow_error_set(error, rc, 2139 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 2140 "Failed to process the pattern"); 2141 } 2142 } 2143 2144 /* The spec has now been tracked by the outer rule entry. */ 2145 ctx->match_spec_outer = NULL; 2146 2147 no_or_id: 2148 /* 2149 * In MAE, lookup sequence comprises outer parse, outer rule lookup, 2150 * inner parse (when some outer rule is hit) and action rule lookup. 2151 * If the currently processed flow does not come with an outer rule, 2152 * its action rule must be available only for packets which miss in 2153 * outer rule table. Set OR_ID match field to 0xffffffff/0xffffffff 2154 * in the action rule specification; this ensures correct behaviour. 2155 * 2156 * If, on the other hand, this flow does have an outer rule, its ID 2157 * may be unknown at the moment (not yet allocated), but OR_ID mask 2158 * has to be set to 0xffffffff anyway for correct class comparisons. 2159 * When the outer rule has been allocated, this match field will be 2160 * overridden by sfc_mae_outer_rule_enable() to use the right value. 2161 */ 2162 rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action, 2163 &invalid_rule_id); 2164 if (rc != 0) { 2165 if (*rulep != NULL) 2166 sfc_mae_outer_rule_del(sa, *rulep); 2167 2168 *rulep = NULL; 2169 2170 return rte_flow_error_set(error, rc, 2171 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 2172 "Failed to process the pattern"); 2173 } 2174 2175 return 0; 2176 } 2177 2178 static int 2179 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa, 2180 const struct rte_flow_item pattern[], 2181 struct sfc_mae_parse_ctx *ctx, 2182 struct rte_flow_error *error) 2183 { 2184 struct sfc_mae *mae = &sa->mae; 2185 int rc; 2186 2187 if (pattern == NULL) { 2188 rte_flow_error_set(error, EINVAL, 2189 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, 2190 "NULL pattern"); 2191 return -rte_errno; 2192 } 2193 2194 for (;;) { 2195 switch (pattern->type) { 2196 case RTE_FLOW_ITEM_TYPE_VXLAN: 2197 ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN; 2198 ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask; 2199 ctx->tunnel_def_mask_size = 2200 sizeof(rte_flow_item_vxlan_mask); 2201 break; 2202 case RTE_FLOW_ITEM_TYPE_GENEVE: 2203 ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE; 2204 ctx->tunnel_def_mask = &rte_flow_item_geneve_mask; 2205 ctx->tunnel_def_mask_size = 2206 sizeof(rte_flow_item_geneve_mask); 2207 break; 2208 case RTE_FLOW_ITEM_TYPE_NVGRE: 2209 ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE; 2210 ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask; 2211 ctx->tunnel_def_mask_size = 2212 sizeof(rte_flow_item_nvgre_mask); 2213 break; 2214 case RTE_FLOW_ITEM_TYPE_END: 2215 break; 2216 default: 2217 ++pattern; 2218 continue; 2219 }; 2220 2221 break; 2222 } 2223 2224 if (pattern->type == RTE_FLOW_ITEM_TYPE_END) 2225 return 0; 2226 2227 if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) { 2228 return rte_flow_error_set(error, ENOTSUP, 2229 RTE_FLOW_ERROR_TYPE_ITEM, 2230 pattern, "Unsupported tunnel item"); 2231 } 2232 2233 if (ctx->priority >= mae->nb_outer_rule_prios_max) { 2234 return rte_flow_error_set(error, ENOTSUP, 2235 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 2236 NULL, "Unsupported priority level"); 2237 } 2238 2239 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_OUTER, ctx->priority, 2240 &ctx->match_spec_outer); 2241 if (rc != 0) { 2242 return rte_flow_error_set(error, rc, 2243 RTE_FLOW_ERROR_TYPE_ITEM, pattern, 2244 "Failed to initialise outer rule match specification"); 2245 } 2246 2247 /* Outermost items comprise a match specification of type OUTER. */ 2248 ctx->match_spec = ctx->match_spec_outer; 2249 2250 /* Outermost items use "ENC" EFX MAE field IDs. */ 2251 ctx->field_ids_remap = field_ids_remap_to_encap; 2252 2253 return 0; 2254 } 2255 2256 static void 2257 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa, 2258 struct sfc_mae_parse_ctx *ctx) 2259 { 2260 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) 2261 return; 2262 2263 if (ctx->match_spec_outer != NULL) 2264 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer); 2265 } 2266 2267 int 2268 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa, 2269 const struct rte_flow_item pattern[], 2270 struct sfc_flow_spec_mae *spec, 2271 struct rte_flow_error *error) 2272 { 2273 struct sfc_mae_parse_ctx ctx_mae; 2274 struct sfc_flow_parse_ctx ctx; 2275 int rc; 2276 2277 memset(&ctx_mae, 0, sizeof(ctx_mae)); 2278 ctx_mae.priority = spec->priority; 2279 ctx_mae.sa = sa; 2280 2281 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION, 2282 spec->priority, 2283 &ctx_mae.match_spec_action); 2284 if (rc != 0) { 2285 rc = rte_flow_error_set(error, rc, 2286 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2287 "Failed to initialise action rule match specification"); 2288 goto fail_init_match_spec_action; 2289 } 2290 2291 /* 2292 * As a preliminary setting, assume that there is no encapsulation 2293 * in the pattern. That is, pattern items are about to comprise a 2294 * match specification of type ACTION and use non-encap. field IDs. 2295 * 2296 * sfc_mae_rule_encap_parse_init() below may override this. 2297 */ 2298 ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE; 2299 ctx_mae.match_spec = ctx_mae.match_spec_action; 2300 ctx_mae.field_ids_remap = field_ids_no_remap; 2301 2302 ctx.type = SFC_FLOW_PARSE_CTX_MAE; 2303 ctx.mae = &ctx_mae; 2304 2305 rc = sfc_mae_rule_encap_parse_init(sa, pattern, &ctx_mae, error); 2306 if (rc != 0) 2307 goto fail_encap_parse_init; 2308 2309 rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items), 2310 pattern, &ctx, error); 2311 if (rc != 0) 2312 goto fail_parse_pattern; 2313 2314 rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error); 2315 if (rc != 0) 2316 goto fail_process_pattern_data; 2317 2318 rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error); 2319 if (rc != 0) 2320 goto fail_process_outer; 2321 2322 if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) { 2323 rc = rte_flow_error_set(error, ENOTSUP, 2324 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 2325 "Inconsistent pattern"); 2326 goto fail_validate_match_spec_action; 2327 } 2328 2329 spec->match_spec = ctx_mae.match_spec_action; 2330 2331 return 0; 2332 2333 fail_validate_match_spec_action: 2334 fail_process_outer: 2335 fail_process_pattern_data: 2336 fail_parse_pattern: 2337 sfc_mae_rule_encap_parse_fini(sa, &ctx_mae); 2338 2339 fail_encap_parse_init: 2340 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action); 2341 2342 fail_init_match_spec_action: 2343 return rc; 2344 } 2345 2346 /* 2347 * An action supported by MAE may correspond to a bundle of RTE flow actions, 2348 * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP. 2349 * That is, related RTE flow actions need to be tracked as parts of a whole 2350 * so that they can be combined into a single action and submitted to MAE 2351 * representation of a given rule's action set. 2352 * 2353 * Each RTE flow action provided by an application gets classified as 2354 * one belonging to some bundle type. If an action is not supposed to 2355 * belong to any bundle, or if this action is END, it is described as 2356 * one belonging to a dummy bundle of type EMPTY. 2357 * 2358 * A currently tracked bundle will be submitted if a repeating 2359 * action or an action of different bundle type follows. 2360 */ 2361 2362 enum sfc_mae_actions_bundle_type { 2363 SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0, 2364 SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH, 2365 }; 2366 2367 struct sfc_mae_actions_bundle { 2368 enum sfc_mae_actions_bundle_type type; 2369 2370 /* Indicates actions already tracked by the current bundle */ 2371 uint64_t actions_mask; 2372 2373 /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */ 2374 rte_be16_t vlan_push_tpid; 2375 rte_be16_t vlan_push_tci; 2376 }; 2377 2378 /* 2379 * Combine configuration of RTE flow actions tracked by the bundle into a 2380 * single action and submit the result to MAE action set specification. 2381 * Do nothing in the case of dummy action bundle. 2382 */ 2383 static int 2384 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle, 2385 efx_mae_actions_t *spec) 2386 { 2387 int rc = 0; 2388 2389 switch (bundle->type) { 2390 case SFC_MAE_ACTIONS_BUNDLE_EMPTY: 2391 break; 2392 case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH: 2393 rc = efx_mae_action_set_populate_vlan_push( 2394 spec, bundle->vlan_push_tpid, bundle->vlan_push_tci); 2395 break; 2396 default: 2397 SFC_ASSERT(B_FALSE); 2398 break; 2399 } 2400 2401 return rc; 2402 } 2403 2404 /* 2405 * Given the type of the next RTE flow action in the line, decide 2406 * whether a new bundle is about to start, and, if this is the case, 2407 * submit and reset the current bundle. 2408 */ 2409 static int 2410 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action, 2411 struct sfc_mae_actions_bundle *bundle, 2412 efx_mae_actions_t *spec, 2413 struct rte_flow_error *error) 2414 { 2415 enum sfc_mae_actions_bundle_type bundle_type_new; 2416 int rc; 2417 2418 switch (action->type) { 2419 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 2420 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 2421 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 2422 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH; 2423 break; 2424 default: 2425 /* 2426 * Self-sufficient actions, including END, are handled in this 2427 * case. No checks for unsupported actions are needed here 2428 * because parsing doesn't occur at this point. 2429 */ 2430 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY; 2431 break; 2432 } 2433 2434 if (bundle_type_new != bundle->type || 2435 (bundle->actions_mask & (1ULL << action->type)) != 0) { 2436 rc = sfc_mae_actions_bundle_submit(bundle, spec); 2437 if (rc != 0) 2438 goto fail_submit; 2439 2440 memset(bundle, 0, sizeof(*bundle)); 2441 } 2442 2443 bundle->type = bundle_type_new; 2444 2445 return 0; 2446 2447 fail_submit: 2448 return rte_flow_error_set(error, rc, 2449 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2450 "Failed to request the (group of) action(s)"); 2451 } 2452 2453 static void 2454 sfc_mae_rule_parse_action_of_push_vlan( 2455 const struct rte_flow_action_of_push_vlan *conf, 2456 struct sfc_mae_actions_bundle *bundle) 2457 { 2458 bundle->vlan_push_tpid = conf->ethertype; 2459 } 2460 2461 static void 2462 sfc_mae_rule_parse_action_of_set_vlan_vid( 2463 const struct rte_flow_action_of_set_vlan_vid *conf, 2464 struct sfc_mae_actions_bundle *bundle) 2465 { 2466 bundle->vlan_push_tci |= (conf->vlan_vid & 2467 rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t))); 2468 } 2469 2470 static void 2471 sfc_mae_rule_parse_action_of_set_vlan_pcp( 2472 const struct rte_flow_action_of_set_vlan_pcp *conf, 2473 struct sfc_mae_actions_bundle *bundle) 2474 { 2475 uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp & 2476 RTE_LEN2MASK(3, uint8_t)) << 13; 2477 2478 bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp); 2479 } 2480 2481 struct sfc_mae_parsed_item { 2482 const struct rte_flow_item *item; 2483 size_t proto_header_ofst; 2484 size_t proto_header_size; 2485 }; 2486 2487 /* 2488 * For each 16-bit word of the given header, override 2489 * bits enforced by the corresponding 16-bit mask. 2490 */ 2491 static void 2492 sfc_mae_header_force_item_masks(uint8_t *header_buf, 2493 const struct sfc_mae_parsed_item *parsed_items, 2494 unsigned int nb_parsed_items) 2495 { 2496 unsigned int item_idx; 2497 2498 for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) { 2499 const struct sfc_mae_parsed_item *parsed_item; 2500 const struct rte_flow_item *item; 2501 size_t proto_header_size; 2502 size_t ofst; 2503 2504 parsed_item = &parsed_items[item_idx]; 2505 proto_header_size = parsed_item->proto_header_size; 2506 item = parsed_item->item; 2507 2508 for (ofst = 0; ofst < proto_header_size; 2509 ofst += sizeof(rte_be16_t)) { 2510 rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst); 2511 const rte_be16_t *w_maskp; 2512 const rte_be16_t *w_specp; 2513 2514 w_maskp = RTE_PTR_ADD(item->mask, ofst); 2515 w_specp = RTE_PTR_ADD(item->spec, ofst); 2516 2517 *wp &= ~(*w_maskp); 2518 *wp |= (*w_specp & *w_maskp); 2519 } 2520 2521 header_buf += proto_header_size; 2522 } 2523 } 2524 2525 #define SFC_IPV4_TTL_DEF 0x40 2526 #define SFC_IPV6_VTC_FLOW_DEF 0x60000000 2527 #define SFC_IPV6_HOP_LIMITS_DEF 0xff 2528 #define SFC_VXLAN_FLAGS_DEF 0x08000000 2529 2530 static int 2531 sfc_mae_rule_parse_action_vxlan_encap( 2532 struct sfc_mae *mae, 2533 const struct rte_flow_action_vxlan_encap *conf, 2534 efx_mae_actions_t *spec, 2535 struct rte_flow_error *error) 2536 { 2537 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh; 2538 struct rte_flow_item *pattern = conf->definition; 2539 uint8_t *buf = bounce_eh->buf; 2540 2541 /* This array will keep track of non-VOID pattern items. */ 2542 struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ + 2543 2 /* VLAN tags */ + 2544 1 /* IPv4 or IPv6 */ + 2545 1 /* UDP */ + 2546 1 /* VXLAN */]; 2547 unsigned int nb_parsed_items = 0; 2548 2549 size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type); 2550 uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr), 2551 sizeof(struct rte_ipv6_hdr))]; 2552 struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf; 2553 struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf; 2554 struct rte_vxlan_hdr *vxlan = NULL; 2555 struct rte_udp_hdr *udp = NULL; 2556 unsigned int nb_vlan_tags = 0; 2557 size_t next_proto_ofst = 0; 2558 size_t ethertype_ofst = 0; 2559 uint64_t exp_items; 2560 int rc; 2561 2562 if (pattern == NULL) { 2563 return rte_flow_error_set(error, EINVAL, 2564 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 2565 "The encap. header definition is NULL"); 2566 } 2567 2568 bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN; 2569 bounce_eh->size = 0; 2570 2571 /* 2572 * Process pattern items and remember non-VOID ones. 2573 * Defer applying masks until after the complete header 2574 * has been built from the pattern items. 2575 */ 2576 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH); 2577 2578 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) { 2579 struct sfc_mae_parsed_item *parsed_item; 2580 const uint64_t exp_items_extra_vlan[] = { 2581 RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0 2582 }; 2583 size_t proto_header_size; 2584 rte_be16_t *ethertypep; 2585 uint8_t *next_protop; 2586 uint8_t *buf_cur; 2587 2588 if (pattern->spec == NULL) { 2589 return rte_flow_error_set(error, EINVAL, 2590 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 2591 "NULL item spec in the encap. header"); 2592 } 2593 2594 if (pattern->mask == NULL) { 2595 return rte_flow_error_set(error, EINVAL, 2596 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 2597 "NULL item mask in the encap. header"); 2598 } 2599 2600 if (pattern->last != NULL) { 2601 /* This is not a match pattern, so disallow range. */ 2602 return rte_flow_error_set(error, EINVAL, 2603 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 2604 "Range item in the encap. header"); 2605 } 2606 2607 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) { 2608 /* Handle VOID separately, for clarity. */ 2609 continue; 2610 } 2611 2612 if ((exp_items & RTE_BIT64(pattern->type)) == 0) { 2613 return rte_flow_error_set(error, ENOTSUP, 2614 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 2615 "Unexpected item in the encap. header"); 2616 } 2617 2618 parsed_item = &parsed_items[nb_parsed_items]; 2619 buf_cur = buf + bounce_eh->size; 2620 2621 switch (pattern->type) { 2622 case RTE_FLOW_ITEM_TYPE_ETH: 2623 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH, 2624 exp_items); 2625 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth, 2626 hdr) != 0); 2627 2628 proto_header_size = sizeof(struct rte_ether_hdr); 2629 2630 ethertype_ofst = eth_ethertype_ofst; 2631 2632 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) | 2633 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) | 2634 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6); 2635 break; 2636 case RTE_FLOW_ITEM_TYPE_VLAN: 2637 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN, 2638 exp_items); 2639 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan, 2640 hdr) != 0); 2641 2642 proto_header_size = sizeof(struct rte_vlan_hdr); 2643 2644 ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst); 2645 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ); 2646 2647 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst); 2648 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN); 2649 2650 ethertype_ofst = 2651 bounce_eh->size + 2652 offsetof(struct rte_vlan_hdr, eth_proto); 2653 2654 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) | 2655 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6); 2656 exp_items |= exp_items_extra_vlan[nb_vlan_tags]; 2657 2658 ++nb_vlan_tags; 2659 break; 2660 case RTE_FLOW_ITEM_TYPE_IPV4: 2661 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4, 2662 exp_items); 2663 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4, 2664 hdr) != 0); 2665 2666 proto_header_size = sizeof(struct rte_ipv4_hdr); 2667 2668 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst); 2669 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4); 2670 2671 next_proto_ofst = 2672 bounce_eh->size + 2673 offsetof(struct rte_ipv4_hdr, next_proto_id); 2674 2675 ipv4 = (struct rte_ipv4_hdr *)buf_cur; 2676 2677 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP); 2678 break; 2679 case RTE_FLOW_ITEM_TYPE_IPV6: 2680 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6, 2681 exp_items); 2682 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6, 2683 hdr) != 0); 2684 2685 proto_header_size = sizeof(struct rte_ipv6_hdr); 2686 2687 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst); 2688 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6); 2689 2690 next_proto_ofst = bounce_eh->size + 2691 offsetof(struct rte_ipv6_hdr, proto); 2692 2693 ipv6 = (struct rte_ipv6_hdr *)buf_cur; 2694 2695 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP); 2696 break; 2697 case RTE_FLOW_ITEM_TYPE_UDP: 2698 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP, 2699 exp_items); 2700 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp, 2701 hdr) != 0); 2702 2703 proto_header_size = sizeof(struct rte_udp_hdr); 2704 2705 next_protop = RTE_PTR_ADD(buf, next_proto_ofst); 2706 *next_protop = IPPROTO_UDP; 2707 2708 udp = (struct rte_udp_hdr *)buf_cur; 2709 2710 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN); 2711 break; 2712 case RTE_FLOW_ITEM_TYPE_VXLAN: 2713 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN, 2714 exp_items); 2715 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan, 2716 hdr) != 0); 2717 2718 proto_header_size = sizeof(struct rte_vxlan_hdr); 2719 2720 vxlan = (struct rte_vxlan_hdr *)buf_cur; 2721 2722 udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT); 2723 udp->dgram_len = RTE_BE16(sizeof(*udp) + 2724 sizeof(*vxlan)); 2725 udp->dgram_cksum = 0; 2726 2727 exp_items = 0; 2728 break; 2729 default: 2730 return rte_flow_error_set(error, ENOTSUP, 2731 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 2732 "Unknown item in the encap. header"); 2733 } 2734 2735 if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) { 2736 return rte_flow_error_set(error, E2BIG, 2737 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 2738 "The encap. header is too big"); 2739 } 2740 2741 if ((proto_header_size & 1) != 0) { 2742 return rte_flow_error_set(error, EINVAL, 2743 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 2744 "Odd layer size in the encap. header"); 2745 } 2746 2747 rte_memcpy(buf_cur, pattern->spec, proto_header_size); 2748 bounce_eh->size += proto_header_size; 2749 2750 parsed_item->item = pattern; 2751 parsed_item->proto_header_size = proto_header_size; 2752 ++nb_parsed_items; 2753 } 2754 2755 if (exp_items != 0) { 2756 /* Parsing item VXLAN would have reset exp_items to 0. */ 2757 return rte_flow_error_set(error, ENOTSUP, 2758 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 2759 "No item VXLAN in the encap. header"); 2760 } 2761 2762 /* One of the pointers (ipv4, ipv6) refers to a dummy area. */ 2763 ipv4->version_ihl = RTE_IPV4_VHL_DEF; 2764 ipv4->time_to_live = SFC_IPV4_TTL_DEF; 2765 ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) + 2766 sizeof(*vxlan)); 2767 /* The HW cannot compute this checksum. */ 2768 ipv4->hdr_checksum = 0; 2769 ipv4->hdr_checksum = rte_ipv4_cksum(ipv4); 2770 2771 ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF); 2772 ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF; 2773 ipv6->payload_len = udp->dgram_len; 2774 2775 vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF); 2776 2777 /* Take care of the masks. */ 2778 sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items); 2779 2780 rc = efx_mae_action_set_populate_encap(spec); 2781 if (rc != 0) { 2782 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION, 2783 NULL, "failed to request action ENCAP"); 2784 } 2785 2786 return rc; 2787 } 2788 2789 static int 2790 sfc_mae_rule_parse_action_mark(struct sfc_adapter *sa, 2791 const struct rte_flow_action_mark *conf, 2792 efx_mae_actions_t *spec) 2793 { 2794 int rc; 2795 2796 rc = efx_mae_action_set_populate_mark(spec, conf->id); 2797 if (rc != 0) 2798 sfc_err(sa, "failed to request action MARK: %s", strerror(rc)); 2799 2800 return rc; 2801 } 2802 2803 static int 2804 sfc_mae_rule_parse_action_count(struct sfc_adapter *sa, 2805 const struct rte_flow_action_count *conf, 2806 efx_mae_actions_t *spec) 2807 { 2808 int rc; 2809 2810 if (conf->shared) { 2811 rc = ENOTSUP; 2812 goto fail_counter_shared; 2813 } 2814 2815 if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) { 2816 sfc_err(sa, 2817 "counter queue is not configured for COUNT action"); 2818 rc = EINVAL; 2819 goto fail_counter_queue_uninit; 2820 } 2821 2822 if (sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE) { 2823 rc = EINVAL; 2824 goto fail_no_service_core; 2825 } 2826 2827 rc = efx_mae_action_set_populate_count(spec); 2828 if (rc != 0) { 2829 sfc_err(sa, 2830 "failed to populate counters in MAE action set: %s", 2831 rte_strerror(rc)); 2832 goto fail_populate_count; 2833 } 2834 2835 return 0; 2836 2837 fail_populate_count: 2838 fail_no_service_core: 2839 fail_counter_queue_uninit: 2840 fail_counter_shared: 2841 2842 return rc; 2843 } 2844 2845 static int 2846 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa, 2847 const struct rte_flow_action_phy_port *conf, 2848 efx_mae_actions_t *spec) 2849 { 2850 efx_mport_sel_t mport; 2851 uint32_t phy_port; 2852 int rc; 2853 2854 if (conf->original != 0) 2855 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port; 2856 else 2857 phy_port = conf->index; 2858 2859 rc = efx_mae_mport_by_phy_port(phy_port, &mport); 2860 if (rc != 0) { 2861 sfc_err(sa, "failed to convert phys. port ID %u to m-port selector: %s", 2862 phy_port, strerror(rc)); 2863 return rc; 2864 } 2865 2866 rc = efx_mae_action_set_populate_deliver(spec, &mport); 2867 if (rc != 0) { 2868 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s", 2869 mport.sel, strerror(rc)); 2870 } 2871 2872 return rc; 2873 } 2874 2875 static int 2876 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa, 2877 const struct rte_flow_action_vf *vf_conf, 2878 efx_mae_actions_t *spec) 2879 { 2880 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 2881 efx_mport_sel_t mport; 2882 uint32_t vf; 2883 int rc; 2884 2885 if (vf_conf == NULL) 2886 vf = EFX_PCI_VF_INVALID; 2887 else if (vf_conf->original != 0) 2888 vf = encp->enc_vf; 2889 else 2890 vf = vf_conf->id; 2891 2892 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport); 2893 if (rc != 0) { 2894 sfc_err(sa, "failed to convert PF %u VF %d to m-port: %s", 2895 encp->enc_pf, (vf != EFX_PCI_VF_INVALID) ? (int)vf : -1, 2896 strerror(rc)); 2897 return rc; 2898 } 2899 2900 rc = efx_mae_action_set_populate_deliver(spec, &mport); 2901 if (rc != 0) { 2902 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s", 2903 mport.sel, strerror(rc)); 2904 } 2905 2906 return rc; 2907 } 2908 2909 static int 2910 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa, 2911 const struct rte_flow_action_port_id *conf, 2912 efx_mae_actions_t *spec) 2913 { 2914 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 2915 struct sfc_mae *mae = &sa->mae; 2916 efx_mport_sel_t mport; 2917 uint16_t port_id; 2918 int rc; 2919 2920 if (conf->id > UINT16_MAX) 2921 return EOVERFLOW; 2922 2923 port_id = (conf->original != 0) ? sas->port_id : conf->id; 2924 2925 rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id, 2926 port_id, &mport); 2927 if (rc != 0) { 2928 sfc_err(sa, "failed to find MAE switch port SW entry for RTE ethdev port %u: %s", 2929 port_id, strerror(rc)); 2930 return rc; 2931 } 2932 2933 rc = efx_mae_action_set_populate_deliver(spec, &mport); 2934 if (rc != 0) { 2935 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s", 2936 mport.sel, strerror(rc)); 2937 } 2938 2939 return rc; 2940 } 2941 2942 static const char * const action_names[] = { 2943 [RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = "VXLAN_DECAP", 2944 [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = "OF_POP_VLAN", 2945 [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = "OF_PUSH_VLAN", 2946 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = "OF_SET_VLAN_VID", 2947 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = "OF_SET_VLAN_PCP", 2948 [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = "VXLAN_ENCAP", 2949 [RTE_FLOW_ACTION_TYPE_FLAG] = "FLAG", 2950 [RTE_FLOW_ACTION_TYPE_MARK] = "MARK", 2951 [RTE_FLOW_ACTION_TYPE_PHY_PORT] = "PHY_PORT", 2952 [RTE_FLOW_ACTION_TYPE_PF] = "PF", 2953 [RTE_FLOW_ACTION_TYPE_VF] = "VF", 2954 [RTE_FLOW_ACTION_TYPE_PORT_ID] = "PORT_ID", 2955 [RTE_FLOW_ACTION_TYPE_DROP] = "DROP", 2956 }; 2957 2958 static int 2959 sfc_mae_rule_parse_action(struct sfc_adapter *sa, 2960 const struct rte_flow_action *action, 2961 const struct sfc_mae_outer_rule *outer_rule, 2962 struct sfc_mae_actions_bundle *bundle, 2963 efx_mae_actions_t *spec, 2964 struct rte_flow_error *error) 2965 { 2966 bool custom_error = B_FALSE; 2967 int rc = 0; 2968 2969 switch (action->type) { 2970 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 2971 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP, 2972 bundle->actions_mask); 2973 if (outer_rule == NULL || 2974 outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN) 2975 rc = EINVAL; 2976 else 2977 rc = efx_mae_action_set_populate_decap(spec); 2978 break; 2979 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: 2980 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN, 2981 bundle->actions_mask); 2982 rc = efx_mae_action_set_populate_vlan_pop(spec); 2983 break; 2984 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 2985 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN, 2986 bundle->actions_mask); 2987 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle); 2988 break; 2989 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 2990 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID, 2991 bundle->actions_mask); 2992 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle); 2993 break; 2994 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 2995 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP, 2996 bundle->actions_mask); 2997 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle); 2998 break; 2999 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 3000 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP, 3001 bundle->actions_mask); 3002 rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae, 3003 action->conf, 3004 spec, error); 3005 custom_error = B_TRUE; 3006 break; 3007 case RTE_FLOW_ACTION_TYPE_COUNT: 3008 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT, 3009 bundle->actions_mask); 3010 rc = sfc_mae_rule_parse_action_count(sa, action->conf, spec); 3011 break; 3012 case RTE_FLOW_ACTION_TYPE_FLAG: 3013 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG, 3014 bundle->actions_mask); 3015 rc = efx_mae_action_set_populate_flag(spec); 3016 break; 3017 case RTE_FLOW_ACTION_TYPE_MARK: 3018 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK, 3019 bundle->actions_mask); 3020 rc = sfc_mae_rule_parse_action_mark(sa, action->conf, spec); 3021 break; 3022 case RTE_FLOW_ACTION_TYPE_PHY_PORT: 3023 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT, 3024 bundle->actions_mask); 3025 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec); 3026 break; 3027 case RTE_FLOW_ACTION_TYPE_PF: 3028 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF, 3029 bundle->actions_mask); 3030 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec); 3031 break; 3032 case RTE_FLOW_ACTION_TYPE_VF: 3033 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF, 3034 bundle->actions_mask); 3035 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec); 3036 break; 3037 case RTE_FLOW_ACTION_TYPE_PORT_ID: 3038 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID, 3039 bundle->actions_mask); 3040 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec); 3041 break; 3042 case RTE_FLOW_ACTION_TYPE_DROP: 3043 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP, 3044 bundle->actions_mask); 3045 rc = efx_mae_action_set_populate_drop(spec); 3046 break; 3047 default: 3048 return rte_flow_error_set(error, ENOTSUP, 3049 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3050 "Unsupported action"); 3051 } 3052 3053 if (rc == 0) { 3054 bundle->actions_mask |= (1ULL << action->type); 3055 } else if (!custom_error) { 3056 if (action->type < RTE_DIM(action_names)) { 3057 const char *action_name = action_names[action->type]; 3058 3059 if (action_name != NULL) { 3060 sfc_err(sa, "action %s was rejected: %s", 3061 action_name, strerror(rc)); 3062 } 3063 } 3064 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION, 3065 NULL, "Failed to request the action"); 3066 } 3067 3068 return rc; 3069 } 3070 3071 static void 3072 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh) 3073 { 3074 bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE; 3075 } 3076 3077 static int 3078 sfc_mae_process_encap_header(struct sfc_adapter *sa, 3079 const struct sfc_mae_bounce_eh *bounce_eh, 3080 struct sfc_mae_encap_header **encap_headerp) 3081 { 3082 if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) { 3083 encap_headerp = NULL; 3084 return 0; 3085 } 3086 3087 *encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh); 3088 if (*encap_headerp != NULL) 3089 return 0; 3090 3091 return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp); 3092 } 3093 3094 int 3095 sfc_mae_rule_parse_actions(struct sfc_adapter *sa, 3096 const struct rte_flow_action actions[], 3097 struct sfc_flow_spec_mae *spec_mae, 3098 struct rte_flow_error *error) 3099 { 3100 struct sfc_mae_encap_header *encap_header = NULL; 3101 struct sfc_mae_actions_bundle bundle = {0}; 3102 const struct rte_flow_action *action; 3103 struct sfc_mae *mae = &sa->mae; 3104 efx_mae_actions_t *spec; 3105 unsigned int n_count; 3106 int rc; 3107 3108 rte_errno = 0; 3109 3110 if (actions == NULL) { 3111 return rte_flow_error_set(error, EINVAL, 3112 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, 3113 "NULL actions"); 3114 } 3115 3116 rc = efx_mae_action_set_spec_init(sa->nic, &spec); 3117 if (rc != 0) 3118 goto fail_action_set_spec_init; 3119 3120 /* Cleanup after previous encap. header bounce buffer usage. */ 3121 sfc_mae_bounce_eh_invalidate(&mae->bounce_eh); 3122 3123 for (action = actions; 3124 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) { 3125 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error); 3126 if (rc != 0) 3127 goto fail_rule_parse_action; 3128 3129 rc = sfc_mae_rule_parse_action(sa, action, spec_mae->outer_rule, 3130 &bundle, spec, error); 3131 if (rc != 0) 3132 goto fail_rule_parse_action; 3133 } 3134 3135 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error); 3136 if (rc != 0) 3137 goto fail_rule_parse_action; 3138 3139 rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, &encap_header); 3140 if (rc != 0) 3141 goto fail_process_encap_header; 3142 3143 n_count = efx_mae_action_set_get_nb_count(spec); 3144 if (n_count > 1) { 3145 rc = ENOTSUP; 3146 sfc_err(sa, "too many count actions requested: %u", n_count); 3147 goto fail_nb_count; 3148 } 3149 3150 spec_mae->action_set = sfc_mae_action_set_attach(sa, encap_header, 3151 n_count, spec); 3152 if (spec_mae->action_set != NULL) { 3153 sfc_mae_encap_header_del(sa, encap_header); 3154 efx_mae_action_set_spec_fini(sa->nic, spec); 3155 return 0; 3156 } 3157 3158 rc = sfc_mae_action_set_add(sa, actions, spec, encap_header, n_count, 3159 &spec_mae->action_set); 3160 if (rc != 0) 3161 goto fail_action_set_add; 3162 3163 return 0; 3164 3165 fail_action_set_add: 3166 fail_nb_count: 3167 sfc_mae_encap_header_del(sa, encap_header); 3168 3169 fail_process_encap_header: 3170 fail_rule_parse_action: 3171 efx_mae_action_set_spec_fini(sa->nic, spec); 3172 3173 fail_action_set_spec_init: 3174 if (rc > 0 && rte_errno == 0) { 3175 rc = rte_flow_error_set(error, rc, 3176 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3177 NULL, "Failed to process the action"); 3178 } 3179 return rc; 3180 } 3181 3182 static bool 3183 sfc_mae_rules_class_cmp(struct sfc_adapter *sa, 3184 const efx_mae_match_spec_t *left, 3185 const efx_mae_match_spec_t *right) 3186 { 3187 bool have_same_class; 3188 int rc; 3189 3190 rc = efx_mae_match_specs_class_cmp(sa->nic, left, right, 3191 &have_same_class); 3192 3193 return (rc == 0) ? have_same_class : false; 3194 } 3195 3196 static int 3197 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa, 3198 struct sfc_mae_outer_rule *rule) 3199 { 3200 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc; 3201 struct sfc_mae_outer_rule *entry; 3202 struct sfc_mae *mae = &sa->mae; 3203 3204 if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) { 3205 /* An active rule is reused. It's class is wittingly valid. */ 3206 return 0; 3207 } 3208 3209 TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules, 3210 sfc_mae_outer_rules, entries) { 3211 const efx_mae_match_spec_t *left = entry->match_spec; 3212 const efx_mae_match_spec_t *right = rule->match_spec; 3213 3214 if (entry == rule) 3215 continue; 3216 3217 if (sfc_mae_rules_class_cmp(sa, left, right)) 3218 return 0; 3219 } 3220 3221 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW " 3222 "support for outer frame pattern items is not guaranteed; " 3223 "other than that, the items are valid from SW standpoint"); 3224 return 0; 3225 } 3226 3227 static int 3228 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa, 3229 struct sfc_flow_spec_mae *spec) 3230 { 3231 const struct rte_flow *entry; 3232 3233 TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) { 3234 const struct sfc_flow_spec *entry_spec = &entry->spec; 3235 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae; 3236 const efx_mae_match_spec_t *left = es_mae->match_spec; 3237 const efx_mae_match_spec_t *right = spec->match_spec; 3238 3239 switch (entry_spec->type) { 3240 case SFC_FLOW_SPEC_FILTER: 3241 /* Ignore VNIC-level flows */ 3242 break; 3243 case SFC_FLOW_SPEC_MAE: 3244 if (sfc_mae_rules_class_cmp(sa, left, right)) 3245 return 0; 3246 break; 3247 default: 3248 SFC_ASSERT(false); 3249 } 3250 } 3251 3252 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW " 3253 "support for inner frame pattern items is not guaranteed; " 3254 "other than that, the items are valid from SW standpoint"); 3255 return 0; 3256 } 3257 3258 /** 3259 * Confirm that a given flow can be accepted by the FW. 3260 * 3261 * @param sa 3262 * Software adapter context 3263 * @param flow 3264 * Flow to be verified 3265 * @return 3266 * Zero on success and non-zero in the case of error. 3267 * A special value of EAGAIN indicates that the adapter is 3268 * not in started state. This state is compulsory because 3269 * it only makes sense to compare the rule class of the flow 3270 * being validated with classes of the active rules. 3271 * Such classes are wittingly supported by the FW. 3272 */ 3273 int 3274 sfc_mae_flow_verify(struct sfc_adapter *sa, 3275 struct rte_flow *flow) 3276 { 3277 struct sfc_flow_spec *spec = &flow->spec; 3278 struct sfc_flow_spec_mae *spec_mae = &spec->mae; 3279 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule; 3280 int rc; 3281 3282 SFC_ASSERT(sfc_adapter_is_locked(sa)); 3283 3284 if (sa->state != SFC_ADAPTER_STARTED) 3285 return EAGAIN; 3286 3287 if (outer_rule != NULL) { 3288 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule); 3289 if (rc != 0) 3290 return rc; 3291 } 3292 3293 return sfc_mae_action_rule_class_verify(sa, spec_mae); 3294 } 3295 3296 int 3297 sfc_mae_flow_insert(struct sfc_adapter *sa, 3298 struct rte_flow *flow) 3299 { 3300 struct sfc_flow_spec *spec = &flow->spec; 3301 struct sfc_flow_spec_mae *spec_mae = &spec->mae; 3302 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule; 3303 struct sfc_mae_action_set *action_set = spec_mae->action_set; 3304 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc; 3305 int rc; 3306 3307 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID); 3308 SFC_ASSERT(action_set != NULL); 3309 3310 if (outer_rule != NULL) { 3311 rc = sfc_mae_outer_rule_enable(sa, outer_rule, 3312 spec_mae->match_spec); 3313 if (rc != 0) 3314 goto fail_outer_rule_enable; 3315 } 3316 3317 rc = sfc_mae_action_set_enable(sa, action_set); 3318 if (rc != 0) 3319 goto fail_action_set_enable; 3320 3321 if (action_set->n_counters > 0) { 3322 rc = sfc_mae_counter_start(sa); 3323 if (rc != 0) { 3324 sfc_err(sa, "failed to start MAE counters support: %s", 3325 rte_strerror(rc)); 3326 goto fail_mae_counter_start; 3327 } 3328 } 3329 3330 rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec, 3331 NULL, &fw_rsrc->aset_id, 3332 &spec_mae->rule_id); 3333 if (rc != 0) 3334 goto fail_action_rule_insert; 3335 3336 sfc_dbg(sa, "enabled flow=%p: AR_ID=0x%08x", 3337 flow, spec_mae->rule_id.id); 3338 3339 return 0; 3340 3341 fail_action_rule_insert: 3342 fail_mae_counter_start: 3343 sfc_mae_action_set_disable(sa, action_set); 3344 3345 fail_action_set_enable: 3346 if (outer_rule != NULL) 3347 sfc_mae_outer_rule_disable(sa, outer_rule); 3348 3349 fail_outer_rule_enable: 3350 return rc; 3351 } 3352 3353 int 3354 sfc_mae_flow_remove(struct sfc_adapter *sa, 3355 struct rte_flow *flow) 3356 { 3357 struct sfc_flow_spec *spec = &flow->spec; 3358 struct sfc_flow_spec_mae *spec_mae = &spec->mae; 3359 struct sfc_mae_action_set *action_set = spec_mae->action_set; 3360 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule; 3361 int rc; 3362 3363 SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID); 3364 SFC_ASSERT(action_set != NULL); 3365 3366 rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id); 3367 if (rc != 0) { 3368 sfc_err(sa, "failed to disable flow=%p with AR_ID=0x%08x: %s", 3369 flow, spec_mae->rule_id.id, strerror(rc)); 3370 } 3371 sfc_dbg(sa, "disabled flow=%p with AR_ID=0x%08x", 3372 flow, spec_mae->rule_id.id); 3373 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID; 3374 3375 sfc_mae_action_set_disable(sa, action_set); 3376 3377 if (outer_rule != NULL) 3378 sfc_mae_outer_rule_disable(sa, outer_rule); 3379 3380 return 0; 3381 } 3382 3383 static int 3384 sfc_mae_query_counter(struct sfc_adapter *sa, 3385 struct sfc_flow_spec_mae *spec, 3386 const struct rte_flow_action *action, 3387 struct rte_flow_query_count *data, 3388 struct rte_flow_error *error) 3389 { 3390 struct sfc_mae_action_set *action_set = spec->action_set; 3391 const struct rte_flow_action_count *conf = action->conf; 3392 unsigned int i; 3393 int rc; 3394 3395 if (action_set->n_counters == 0) { 3396 return rte_flow_error_set(error, EINVAL, 3397 RTE_FLOW_ERROR_TYPE_ACTION, action, 3398 "Queried flow rule does not have count actions"); 3399 } 3400 3401 for (i = 0; i < action_set->n_counters; i++) { 3402 /* 3403 * Get the first available counter of the flow rule if 3404 * counter ID is not specified. 3405 */ 3406 if (conf != NULL && action_set->counters[i].rte_id != conf->id) 3407 continue; 3408 3409 rc = sfc_mae_counter_get(&sa->mae.counter_registry.counters, 3410 &action_set->counters[i], data); 3411 if (rc != 0) { 3412 return rte_flow_error_set(error, EINVAL, 3413 RTE_FLOW_ERROR_TYPE_ACTION, action, 3414 "Queried flow rule counter action is invalid"); 3415 } 3416 3417 return 0; 3418 } 3419 3420 return rte_flow_error_set(error, ENOENT, 3421 RTE_FLOW_ERROR_TYPE_ACTION, action, 3422 "No such flow rule action count ID"); 3423 } 3424 3425 int 3426 sfc_mae_flow_query(struct rte_eth_dev *dev, 3427 struct rte_flow *flow, 3428 const struct rte_flow_action *action, 3429 void *data, 3430 struct rte_flow_error *error) 3431 { 3432 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 3433 struct sfc_flow_spec *spec = &flow->spec; 3434 struct sfc_flow_spec_mae *spec_mae = &spec->mae; 3435 3436 switch (action->type) { 3437 case RTE_FLOW_ACTION_TYPE_COUNT: 3438 return sfc_mae_query_counter(sa, spec_mae, action, 3439 data, error); 3440 default: 3441 return rte_flow_error_set(error, ENOTSUP, 3442 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3443 "Query for action of this type is not supported"); 3444 } 3445 } 3446