1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdalign.h> 7 #include <stdint.h> 8 #include <string.h> 9 #include <stdbool.h> 10 #include <sys/queue.h> 11 12 #include <rte_common.h> 13 #include <rte_ether.h> 14 #include <ethdev_driver.h> 15 #include <rte_eal_paging.h> 16 #include <rte_flow.h> 17 #include <rte_cycles.h> 18 #include <rte_flow_driver.h> 19 #include <rte_malloc.h> 20 #include <rte_ip.h> 21 22 #include <mlx5_glue.h> 23 #include <mlx5_devx_cmds.h> 24 #include <mlx5_prm.h> 25 #include <mlx5_malloc.h> 26 27 #include "mlx5_defs.h" 28 #include "mlx5.h" 29 #include "mlx5_flow.h" 30 #include "mlx5_flow_os.h" 31 #include "mlx5_rx.h" 32 #include "mlx5_tx.h" 33 #include "mlx5_common_os.h" 34 #include "rte_pmd_mlx5.h" 35 36 /* 37 * Shared array for quick translation between port_id and vport mask/values 38 * used for HWS rules. 39 */ 40 struct flow_hw_port_info mlx5_flow_hw_port_infos[RTE_MAX_ETHPORTS]; 41 42 struct tunnel_default_miss_ctx { 43 uint16_t *queue; 44 __extension__ 45 union { 46 struct rte_flow_action_rss action_rss; 47 struct rte_flow_action_queue miss_queue; 48 struct rte_flow_action_jump miss_jump; 49 uint8_t raw[0]; 50 }; 51 }; 52 53 void 54 mlx5_indirect_list_handles_release(struct rte_eth_dev *dev) 55 { 56 struct mlx5_priv *priv = dev->data->dev_private; 57 #ifdef HAVE_MLX5_HWS_SUPPORT 58 struct rte_flow_error error; 59 #endif 60 61 while (!LIST_EMPTY(&priv->indirect_list_head)) { 62 struct mlx5_indirect_list *e = 63 LIST_FIRST(&priv->indirect_list_head); 64 65 LIST_REMOVE(e, entry); 66 switch (e->type) { 67 #ifdef HAVE_MLX5_HWS_SUPPORT 68 case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR: 69 mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e); 70 break; 71 case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY: 72 mlx5_destroy_legacy_indirect(dev, e); 73 break; 74 case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT: 75 mlx5_reformat_action_destroy(dev, 76 (struct rte_flow_action_list_handle *)e, &error); 77 break; 78 #endif 79 default: 80 DRV_LOG(ERR, "invalid indirect list type"); 81 MLX5_ASSERT(false); 82 break; 83 } 84 } 85 } 86 87 static int 88 flow_tunnel_add_default_miss(struct rte_eth_dev *dev, 89 struct rte_flow *flow, 90 const struct rte_flow_attr *attr, 91 const struct rte_flow_action *app_actions, 92 uint32_t flow_idx, 93 const struct mlx5_flow_tunnel *tunnel, 94 struct tunnel_default_miss_ctx *ctx, 95 struct rte_flow_error *error); 96 static struct mlx5_flow_tunnel * 97 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id); 98 static void 99 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel); 100 static uint32_t 101 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, 102 const struct mlx5_flow_tunnel *tunnel, 103 uint32_t group, uint32_t *table, 104 struct rte_flow_error *error); 105 106 /** Device flow drivers. */ 107 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; 108 109 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; 110 111 const struct mlx5_flow_driver_ops *flow_drv_ops[] = { 112 [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, 113 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 114 [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, 115 #endif 116 #ifdef HAVE_MLX5_HWS_SUPPORT 117 [MLX5_FLOW_TYPE_HW] = &mlx5_flow_hw_drv_ops, 118 #endif 119 [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, 120 [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops 121 }; 122 123 /** Helper macro to build input graph for mlx5_flow_expand_rss(). */ 124 #define MLX5_FLOW_EXPAND_RSS_NEXT(...) \ 125 (const int []){ \ 126 __VA_ARGS__, 0, \ 127 } 128 129 /** Node object of input graph for mlx5_flow_expand_rss(). */ 130 struct mlx5_flow_expand_node { 131 const int *const next; 132 /**< 133 * List of next node indexes. Index 0 is interpreted as a terminator. 134 */ 135 const enum rte_flow_item_type type; 136 /**< Pattern item type of current node. */ 137 uint64_t rss_types; 138 /**< 139 * RSS types bit-field associated with this node 140 * (see RTE_ETH_RSS_* definitions). 141 */ 142 uint64_t node_flags; 143 /**< 144 * Bit-fields that define how the node is used in the expansion. 145 * (see MLX5_EXPANSION_NODE_* definitions). 146 */ 147 }; 148 149 /** Keep same format with mlx5_flow_expand_rss to share the buffer for expansion. */ 150 struct mlx5_flow_expand_sqn { 151 uint32_t entries; /** Number of entries */ 152 struct { 153 struct rte_flow_item *pattern; /**< Expanded pattern array. */ 154 uint32_t priority; /**< Priority offset for each expansion. */ 155 } entry[]; 156 }; 157 158 /* Optional expand field. The expansion alg will not go deeper. */ 159 #define MLX5_EXPANSION_NODE_OPTIONAL (UINT64_C(1) << 0) 160 161 /* The node is not added implicitly as expansion to the flow pattern. 162 * If the node type does not match the flow pattern item type, the 163 * expansion alg will go deeper to its next items. 164 * In the current implementation, the list of next nodes indexes can 165 * have up to one node with this flag set and it has to be the last 166 * node index (before the list terminator). 167 */ 168 #define MLX5_EXPANSION_NODE_EXPLICIT (UINT64_C(1) << 1) 169 170 /** Object returned by mlx5_flow_expand_rss(). */ 171 struct mlx5_flow_expand_rss { 172 uint32_t entries; 173 /**< Number of entries @p patterns and @p priorities. */ 174 struct { 175 struct rte_flow_item *pattern; /**< Expanded pattern array. */ 176 uint32_t priority; /**< Priority offset for each expansion. */ 177 } entry[]; 178 }; 179 180 static void 181 mlx5_dbg__print_pattern(const struct rte_flow_item *item); 182 183 static const struct mlx5_flow_expand_node * 184 mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern, 185 unsigned int item_idx, 186 const struct mlx5_flow_expand_node graph[], 187 const struct mlx5_flow_expand_node *node); 188 189 static __rte_always_inline int 190 mlx5_need_cache_flow(const struct mlx5_priv *priv, 191 const struct rte_flow_attr *attr) 192 { 193 return priv->isolated && priv->sh->config.dv_flow_en == 1 && 194 (attr ? !attr->group : true) && 195 priv->mode_info.mode == RTE_PMD_MLX5_FLOW_ENGINE_MODE_STANDBY && 196 (!priv->sh->config.dv_esw_en || !priv->sh->config.fdb_def_rule); 197 } 198 199 static bool 200 mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) 201 { 202 switch (item->type) { 203 case RTE_FLOW_ITEM_TYPE_ETH: 204 case RTE_FLOW_ITEM_TYPE_VLAN: 205 case RTE_FLOW_ITEM_TYPE_IPV4: 206 case RTE_FLOW_ITEM_TYPE_IPV6: 207 case RTE_FLOW_ITEM_TYPE_UDP: 208 case RTE_FLOW_ITEM_TYPE_TCP: 209 case RTE_FLOW_ITEM_TYPE_ESP: 210 case RTE_FLOW_ITEM_TYPE_ICMP: 211 case RTE_FLOW_ITEM_TYPE_ICMP6: 212 case RTE_FLOW_ITEM_TYPE_VXLAN: 213 case RTE_FLOW_ITEM_TYPE_NVGRE: 214 case RTE_FLOW_ITEM_TYPE_GRE: 215 case RTE_FLOW_ITEM_TYPE_GENEVE: 216 case RTE_FLOW_ITEM_TYPE_MPLS: 217 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 218 case RTE_FLOW_ITEM_TYPE_GRE_KEY: 219 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: 220 case RTE_FLOW_ITEM_TYPE_GTP: 221 return true; 222 default: 223 break; 224 } 225 return false; 226 } 227 228 /** 229 * Network Service Header (NSH) and its next protocol values 230 * are described in RFC-8393. 231 */ 232 static enum rte_flow_item_type 233 mlx5_nsh_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) 234 { 235 enum rte_flow_item_type type; 236 237 switch (proto_mask & proto_spec) { 238 case 0: 239 type = RTE_FLOW_ITEM_TYPE_VOID; 240 break; 241 case RTE_VXLAN_GPE_TYPE_IPV4: 242 type = RTE_FLOW_ITEM_TYPE_IPV4; 243 break; 244 case RTE_VXLAN_GPE_TYPE_IPV6: 245 type = RTE_VXLAN_GPE_TYPE_IPV6; 246 break; 247 case RTE_VXLAN_GPE_TYPE_ETH: 248 type = RTE_FLOW_ITEM_TYPE_ETH; 249 break; 250 default: 251 type = RTE_FLOW_ITEM_TYPE_END; 252 } 253 return type; 254 } 255 256 static enum rte_flow_item_type 257 mlx5_inet_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) 258 { 259 enum rte_flow_item_type type; 260 261 switch (proto_mask & proto_spec) { 262 case 0: 263 type = RTE_FLOW_ITEM_TYPE_VOID; 264 break; 265 case IPPROTO_UDP: 266 type = RTE_FLOW_ITEM_TYPE_UDP; 267 break; 268 case IPPROTO_TCP: 269 type = RTE_FLOW_ITEM_TYPE_TCP; 270 break; 271 case IPPROTO_IPIP: 272 type = RTE_FLOW_ITEM_TYPE_IPV4; 273 break; 274 case IPPROTO_IPV6: 275 type = RTE_FLOW_ITEM_TYPE_IPV6; 276 break; 277 case IPPROTO_ESP: 278 type = RTE_FLOW_ITEM_TYPE_ESP; 279 break; 280 default: 281 type = RTE_FLOW_ITEM_TYPE_END; 282 } 283 return type; 284 } 285 286 static enum rte_flow_item_type 287 mlx5_ethertype_to_item_type(rte_be16_t type_spec, 288 rte_be16_t type_mask, bool is_tunnel) 289 { 290 enum rte_flow_item_type type; 291 292 switch (rte_be_to_cpu_16(type_spec & type_mask)) { 293 case 0: 294 type = RTE_FLOW_ITEM_TYPE_VOID; 295 break; 296 case RTE_ETHER_TYPE_TEB: 297 type = is_tunnel ? 298 RTE_FLOW_ITEM_TYPE_ETH : RTE_FLOW_ITEM_TYPE_END; 299 break; 300 case RTE_ETHER_TYPE_VLAN: 301 type = !is_tunnel ? 302 RTE_FLOW_ITEM_TYPE_VLAN : RTE_FLOW_ITEM_TYPE_END; 303 break; 304 case RTE_ETHER_TYPE_IPV4: 305 type = RTE_FLOW_ITEM_TYPE_IPV4; 306 break; 307 case RTE_ETHER_TYPE_IPV6: 308 type = RTE_FLOW_ITEM_TYPE_IPV6; 309 break; 310 default: 311 type = RTE_FLOW_ITEM_TYPE_END; 312 } 313 return type; 314 } 315 316 static enum rte_flow_item_type 317 mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) 318 { 319 #define MLX5_XSET_ITEM_MASK_SPEC(type, fld) \ 320 do { \ 321 const void *m = item->mask; \ 322 const void *s = item->spec; \ 323 mask = m ? \ 324 ((const struct rte_flow_item_##type *)m)->fld : \ 325 rte_flow_item_##type##_mask.fld; \ 326 spec = ((const struct rte_flow_item_##type *)s)->fld; \ 327 } while (0) 328 329 enum rte_flow_item_type ret; 330 uint16_t spec, mask; 331 332 if (item == NULL || item->spec == NULL) 333 return RTE_FLOW_ITEM_TYPE_VOID; 334 switch (item->type) { 335 case RTE_FLOW_ITEM_TYPE_ETH: 336 MLX5_XSET_ITEM_MASK_SPEC(eth, hdr.ether_type); 337 if (!mask) 338 return RTE_FLOW_ITEM_TYPE_VOID; 339 ret = mlx5_ethertype_to_item_type(spec, mask, false); 340 break; 341 case RTE_FLOW_ITEM_TYPE_VLAN: 342 MLX5_XSET_ITEM_MASK_SPEC(vlan, hdr.eth_proto); 343 if (!mask) 344 return RTE_FLOW_ITEM_TYPE_VOID; 345 ret = mlx5_ethertype_to_item_type(spec, mask, false); 346 break; 347 case RTE_FLOW_ITEM_TYPE_IPV4: 348 MLX5_XSET_ITEM_MASK_SPEC(ipv4, hdr.next_proto_id); 349 if (!mask) 350 return RTE_FLOW_ITEM_TYPE_VOID; 351 ret = mlx5_inet_proto_to_item_type(spec, mask); 352 break; 353 case RTE_FLOW_ITEM_TYPE_IPV6: 354 MLX5_XSET_ITEM_MASK_SPEC(ipv6, hdr.proto); 355 if (!mask) 356 return RTE_FLOW_ITEM_TYPE_VOID; 357 ret = mlx5_inet_proto_to_item_type(spec, mask); 358 break; 359 case RTE_FLOW_ITEM_TYPE_GENEVE: 360 MLX5_XSET_ITEM_MASK_SPEC(geneve, protocol); 361 ret = mlx5_ethertype_to_item_type(spec, mask, true); 362 break; 363 case RTE_FLOW_ITEM_TYPE_GRE: 364 MLX5_XSET_ITEM_MASK_SPEC(gre, protocol); 365 ret = mlx5_ethertype_to_item_type(spec, mask, true); 366 break; 367 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 368 MLX5_XSET_ITEM_MASK_SPEC(vxlan_gpe, hdr.proto); 369 ret = mlx5_nsh_proto_to_item_type(spec, mask); 370 break; 371 default: 372 ret = RTE_FLOW_ITEM_TYPE_VOID; 373 break; 374 } 375 return ret; 376 #undef MLX5_XSET_ITEM_MASK_SPEC 377 } 378 379 static const int * 380 mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[], 381 const int *next_node) 382 { 383 const struct mlx5_flow_expand_node *node = NULL; 384 const int *next = next_node; 385 386 while (next && *next) { 387 /* 388 * Skip the nodes with the MLX5_EXPANSION_NODE_EXPLICIT 389 * flag set, because they were not found in the flow pattern. 390 */ 391 node = &graph[*next]; 392 if (!(node->node_flags & MLX5_EXPANSION_NODE_EXPLICIT)) 393 break; 394 next = node->next; 395 } 396 return next; 397 } 398 399 #define MLX5_RSS_EXP_ELT_N 32 400 401 /** 402 * Expand RSS flows into several possible flows according to the RSS hash 403 * fields requested and the driver capabilities. 404 * 405 * @param[out] buf 406 * Buffer to store the result expansion. 407 * @param[in] size 408 * Buffer size in bytes. If 0, @p buf can be NULL. 409 * @param[in] pattern 410 * User flow pattern. 411 * @param[in] types 412 * RSS types to expand (see RTE_ETH_RSS_* definitions). 413 * @param[in] graph 414 * Input graph to expand @p pattern according to @p types. 415 * @param[in] graph_root_index 416 * Index of root node in @p graph, typically 0. 417 * 418 * @return 419 * A positive value representing the size of @p buf in bytes regardless of 420 * @p size on success, a negative errno value otherwise and rte_errno is 421 * set, the following errors are defined: 422 * 423 * -E2BIG: graph-depth @p graph is too deep. 424 * -EINVAL: @p size has not enough space for expanded pattern. 425 */ 426 static int 427 mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, 428 const struct rte_flow_item *pattern, uint64_t types, 429 const struct mlx5_flow_expand_node graph[], 430 int graph_root_index) 431 { 432 const struct rte_flow_item *item; 433 const struct mlx5_flow_expand_node *node = &graph[graph_root_index]; 434 const int *next_node; 435 const int *stack[MLX5_RSS_EXP_ELT_N]; 436 int stack_pos = 0; 437 struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N]; 438 unsigned int i, item_idx, last_expand_item_idx = 0; 439 size_t lsize; 440 size_t user_pattern_size = 0; 441 void *addr = NULL; 442 const struct mlx5_flow_expand_node *next = NULL; 443 struct rte_flow_item missed_item; 444 int missed = 0; 445 int elt = 0; 446 const struct rte_flow_item *last_expand_item = NULL; 447 448 memset(&missed_item, 0, sizeof(missed_item)); 449 lsize = offsetof(struct mlx5_flow_expand_rss, entry) + 450 MLX5_RSS_EXP_ELT_N * sizeof(buf->entry[0]); 451 if (lsize > size) 452 return -EINVAL; 453 buf->entry[0].priority = 0; 454 buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N]; 455 buf->entries = 0; 456 addr = buf->entry[0].pattern; 457 for (item = pattern, item_idx = 0; 458 item->type != RTE_FLOW_ITEM_TYPE_END; 459 item++, item_idx++) { 460 if (!mlx5_flow_is_rss_expandable_item(item)) { 461 user_pattern_size += sizeof(*item); 462 continue; 463 } 464 last_expand_item = item; 465 last_expand_item_idx = item_idx; 466 i = 0; 467 while (node->next && node->next[i]) { 468 next = &graph[node->next[i]]; 469 if (next->type == item->type) 470 break; 471 if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) { 472 node = next; 473 i = 0; 474 } else { 475 ++i; 476 } 477 } 478 if (next) 479 node = next; 480 user_pattern_size += sizeof(*item); 481 } 482 user_pattern_size += sizeof(*item); /* Handle END item. */ 483 lsize += user_pattern_size; 484 if (lsize > size) 485 return -EINVAL; 486 /* Copy the user pattern in the first entry of the buffer. */ 487 rte_memcpy(addr, pattern, user_pattern_size); 488 addr = (void *)(((uintptr_t)addr) + user_pattern_size); 489 buf->entries = 1; 490 /* Start expanding. */ 491 memset(flow_items, 0, sizeof(flow_items)); 492 user_pattern_size -= sizeof(*item); 493 /* 494 * Check if the last valid item has spec set, need complete pattern, 495 * and the pattern can be used for expansion. 496 */ 497 missed_item.type = mlx5_flow_expand_rss_item_complete(last_expand_item); 498 if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) { 499 /* Item type END indicates expansion is not required. */ 500 return lsize; 501 } 502 if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) { 503 next = NULL; 504 missed = 1; 505 i = 0; 506 while (node->next && node->next[i]) { 507 next = &graph[node->next[i]]; 508 if (next->type == missed_item.type) { 509 flow_items[0].type = missed_item.type; 510 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END; 511 break; 512 } 513 if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) { 514 node = next; 515 i = 0; 516 } else { 517 ++i; 518 } 519 next = NULL; 520 } 521 } 522 if (next && missed) { 523 elt = 2; /* missed item + item end. */ 524 node = next; 525 lsize += elt * sizeof(*item) + user_pattern_size; 526 if (lsize > size) 527 return -EINVAL; 528 if (node->rss_types & types) { 529 buf->entry[buf->entries].priority = 1; 530 buf->entry[buf->entries].pattern = addr; 531 buf->entries++; 532 rte_memcpy(addr, buf->entry[0].pattern, 533 user_pattern_size); 534 addr = (void *)(((uintptr_t)addr) + user_pattern_size); 535 rte_memcpy(addr, flow_items, elt * sizeof(*item)); 536 addr = (void *)(((uintptr_t)addr) + 537 elt * sizeof(*item)); 538 } 539 } else if (last_expand_item != NULL) { 540 node = mlx5_flow_expand_rss_adjust_node(pattern, 541 last_expand_item_idx, graph, node); 542 } 543 memset(flow_items, 0, sizeof(flow_items)); 544 next_node = mlx5_flow_expand_rss_skip_explicit(graph, 545 node->next); 546 stack[stack_pos] = next_node; 547 node = next_node ? &graph[*next_node] : NULL; 548 while (node) { 549 flow_items[stack_pos].type = node->type; 550 if (node->rss_types & types) { 551 size_t n; 552 /* 553 * compute the number of items to copy from the 554 * expansion and copy it. 555 * When the stack_pos is 0, there are 1 element in it, 556 * plus the addition END item. 557 */ 558 elt = stack_pos + 2; 559 flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END; 560 lsize += elt * sizeof(*item) + user_pattern_size; 561 if (lsize > size) 562 return -EINVAL; 563 n = elt * sizeof(*item); 564 MLX5_ASSERT((buf->entries) < MLX5_RSS_EXP_ELT_N); 565 buf->entry[buf->entries].priority = 566 stack_pos + 1 + missed; 567 buf->entry[buf->entries].pattern = addr; 568 buf->entries++; 569 rte_memcpy(addr, buf->entry[0].pattern, 570 user_pattern_size); 571 addr = (void *)(((uintptr_t)addr) + 572 user_pattern_size); 573 rte_memcpy(addr, &missed_item, 574 missed * sizeof(*item)); 575 addr = (void *)(((uintptr_t)addr) + 576 missed * sizeof(*item)); 577 rte_memcpy(addr, flow_items, n); 578 addr = (void *)(((uintptr_t)addr) + n); 579 } 580 /* Go deeper. */ 581 if (!(node->node_flags & MLX5_EXPANSION_NODE_OPTIONAL) && 582 node->next) { 583 next_node = mlx5_flow_expand_rss_skip_explicit(graph, 584 node->next); 585 if (stack_pos++ == MLX5_RSS_EXP_ELT_N) { 586 rte_errno = E2BIG; 587 return -rte_errno; 588 } 589 stack[stack_pos] = next_node; 590 } else if (*(next_node + 1)) { 591 /* Follow up with the next possibility. */ 592 next_node = mlx5_flow_expand_rss_skip_explicit(graph, 593 ++next_node); 594 } else if (!stack_pos) { 595 /* 596 * Completing the traverse over the different paths. 597 * The next_node is advanced to the terminator. 598 */ 599 ++next_node; 600 } else { 601 /* Move to the next path. */ 602 while (stack_pos) { 603 next_node = stack[--stack_pos]; 604 next_node++; 605 if (*next_node) 606 break; 607 } 608 next_node = mlx5_flow_expand_rss_skip_explicit(graph, 609 next_node); 610 stack[stack_pos] = next_node; 611 } 612 node = next_node && *next_node ? &graph[*next_node] : NULL; 613 }; 614 return lsize; 615 } 616 617 /** 618 * Expand SQN flows into several possible flows according to the Tx queue 619 * number 620 * 621 * @param[in] buf 622 * Buffer to store the result expansion. 623 * @param[in] size 624 * Buffer size in bytes. If 0, @p buf can be NULL. 625 * @param[in] pattern 626 * User flow pattern. 627 * @param[in] sq_specs 628 * Buffer to store sq spec. 629 * 630 * @return 631 * 0 for success and negative value for failure 632 * 633 */ 634 static int 635 mlx5_flow_expand_sqn(struct mlx5_flow_expand_sqn *buf, size_t size, 636 const struct rte_flow_item *pattern, 637 struct mlx5_rte_flow_item_sq *sq_specs) 638 { 639 const struct rte_flow_item *item; 640 bool port_representor = false; 641 size_t user_pattern_size = 0; 642 struct rte_eth_dev *dev; 643 struct mlx5_priv *priv; 644 void *addr = NULL; 645 uint16_t port_id; 646 size_t lsize; 647 int elt = 2; 648 uint16_t i; 649 650 buf->entries = 0; 651 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 652 if (item->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) { 653 const struct rte_flow_item_ethdev *pid_v = item->spec; 654 655 if (!pid_v) 656 return 0; 657 port_id = pid_v->port_id; 658 port_representor = true; 659 } 660 user_pattern_size += sizeof(*item); 661 } 662 if (!port_representor) 663 return 0; 664 dev = &rte_eth_devices[port_id]; 665 priv = dev->data->dev_private; 666 buf->entry[0].pattern = (void *)&buf->entry[priv->txqs_n]; 667 lsize = offsetof(struct mlx5_flow_expand_sqn, entry) + 668 sizeof(buf->entry[0]) * priv->txqs_n; 669 if (lsize + (user_pattern_size + sizeof(struct rte_flow_item) * elt) * priv->txqs_n > size) 670 return -EINVAL; 671 addr = buf->entry[0].pattern; 672 for (i = 0; i != priv->txqs_n; ++i) { 673 struct rte_flow_item pattern_add[] = { 674 { 675 .type = (enum rte_flow_item_type) 676 MLX5_RTE_FLOW_ITEM_TYPE_SQ, 677 .spec = &sq_specs[i], 678 }, 679 { 680 .type = RTE_FLOW_ITEM_TYPE_END, 681 }, 682 }; 683 struct mlx5_txq_ctrl *txq = mlx5_txq_get(dev, i); 684 685 if (txq == NULL) 686 return -EINVAL; 687 buf->entry[i].pattern = addr; 688 sq_specs[i].queue = mlx5_txq_get_sqn(txq); 689 mlx5_txq_release(dev, i); 690 rte_memcpy(addr, pattern, user_pattern_size); 691 addr = (void *)(((uintptr_t)addr) + user_pattern_size); 692 rte_memcpy(addr, pattern_add, sizeof(struct rte_flow_item) * elt); 693 addr = (void *)(((uintptr_t)addr) + sizeof(struct rte_flow_item) * elt); 694 buf->entries++; 695 } 696 return 0; 697 } 698 699 enum mlx5_expansion { 700 MLX5_EXPANSION_ROOT, 701 MLX5_EXPANSION_ROOT_OUTER, 702 MLX5_EXPANSION_OUTER_ETH, 703 MLX5_EXPANSION_OUTER_VLAN, 704 MLX5_EXPANSION_OUTER_IPV4, 705 MLX5_EXPANSION_OUTER_IPV4_UDP, 706 MLX5_EXPANSION_OUTER_IPV4_TCP, 707 MLX5_EXPANSION_OUTER_IPV4_ESP, 708 MLX5_EXPANSION_OUTER_IPV4_ICMP, 709 MLX5_EXPANSION_OUTER_IPV6, 710 MLX5_EXPANSION_OUTER_IPV6_UDP, 711 MLX5_EXPANSION_OUTER_IPV6_TCP, 712 MLX5_EXPANSION_OUTER_IPV6_ESP, 713 MLX5_EXPANSION_OUTER_IPV6_ICMP6, 714 MLX5_EXPANSION_VXLAN, 715 MLX5_EXPANSION_STD_VXLAN, 716 MLX5_EXPANSION_L3_VXLAN, 717 MLX5_EXPANSION_VXLAN_GPE, 718 MLX5_EXPANSION_GRE, 719 MLX5_EXPANSION_NVGRE, 720 MLX5_EXPANSION_GRE_KEY, 721 MLX5_EXPANSION_MPLS, 722 MLX5_EXPANSION_ETH, 723 MLX5_EXPANSION_VLAN, 724 MLX5_EXPANSION_IPV4, 725 MLX5_EXPANSION_IPV4_UDP, 726 MLX5_EXPANSION_IPV4_TCP, 727 MLX5_EXPANSION_IPV4_ESP, 728 MLX5_EXPANSION_IPV4_ICMP, 729 MLX5_EXPANSION_IPV6, 730 MLX5_EXPANSION_IPV6_UDP, 731 MLX5_EXPANSION_IPV6_TCP, 732 MLX5_EXPANSION_IPV6_ESP, 733 MLX5_EXPANSION_IPV6_ICMP6, 734 MLX5_EXPANSION_IPV6_FRAG_EXT, 735 MLX5_EXPANSION_GTP, 736 MLX5_EXPANSION_GENEVE, 737 }; 738 739 /** Supported expansion of items. */ 740 static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { 741 [MLX5_EXPANSION_ROOT] = { 742 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 743 MLX5_EXPANSION_IPV4, 744 MLX5_EXPANSION_IPV6), 745 .type = RTE_FLOW_ITEM_TYPE_END, 746 }, 747 [MLX5_EXPANSION_ROOT_OUTER] = { 748 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, 749 MLX5_EXPANSION_OUTER_IPV4, 750 MLX5_EXPANSION_OUTER_IPV6), 751 .type = RTE_FLOW_ITEM_TYPE_END, 752 }, 753 [MLX5_EXPANSION_OUTER_ETH] = { 754 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), 755 .type = RTE_FLOW_ITEM_TYPE_ETH, 756 .rss_types = 0, 757 }, 758 [MLX5_EXPANSION_OUTER_VLAN] = { 759 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 760 MLX5_EXPANSION_OUTER_IPV6), 761 .type = RTE_FLOW_ITEM_TYPE_VLAN, 762 .node_flags = MLX5_EXPANSION_NODE_EXPLICIT, 763 }, 764 [MLX5_EXPANSION_OUTER_IPV4] = { 765 .next = MLX5_FLOW_EXPAND_RSS_NEXT 766 (MLX5_EXPANSION_OUTER_IPV4_UDP, 767 MLX5_EXPANSION_OUTER_IPV4_TCP, 768 MLX5_EXPANSION_OUTER_IPV4_ESP, 769 MLX5_EXPANSION_OUTER_IPV4_ICMP, 770 MLX5_EXPANSION_GRE, 771 MLX5_EXPANSION_NVGRE, 772 MLX5_EXPANSION_IPV4, 773 MLX5_EXPANSION_IPV6), 774 .type = RTE_FLOW_ITEM_TYPE_IPV4, 775 .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | 776 RTE_ETH_RSS_NONFRAG_IPV4_OTHER, 777 }, 778 [MLX5_EXPANSION_OUTER_IPV4_UDP] = { 779 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 780 MLX5_EXPANSION_VXLAN_GPE, 781 MLX5_EXPANSION_MPLS, 782 MLX5_EXPANSION_GENEVE, 783 MLX5_EXPANSION_GTP), 784 .type = RTE_FLOW_ITEM_TYPE_UDP, 785 .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP, 786 }, 787 [MLX5_EXPANSION_OUTER_IPV4_TCP] = { 788 .type = RTE_FLOW_ITEM_TYPE_TCP, 789 .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP, 790 }, 791 [MLX5_EXPANSION_OUTER_IPV4_ESP] = { 792 .type = RTE_FLOW_ITEM_TYPE_ESP, 793 .rss_types = RTE_ETH_RSS_ESP, 794 }, 795 [MLX5_EXPANSION_OUTER_IPV4_ICMP] = { 796 .type = RTE_FLOW_ITEM_TYPE_ICMP, 797 }, 798 [MLX5_EXPANSION_OUTER_IPV6] = { 799 .next = MLX5_FLOW_EXPAND_RSS_NEXT 800 (MLX5_EXPANSION_OUTER_IPV6_UDP, 801 MLX5_EXPANSION_OUTER_IPV6_TCP, 802 MLX5_EXPANSION_OUTER_IPV6_ESP, 803 MLX5_EXPANSION_OUTER_IPV6_ICMP6, 804 MLX5_EXPANSION_IPV4, 805 MLX5_EXPANSION_IPV6, 806 MLX5_EXPANSION_GRE, 807 MLX5_EXPANSION_NVGRE), 808 .type = RTE_FLOW_ITEM_TYPE_IPV6, 809 .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | 810 RTE_ETH_RSS_NONFRAG_IPV6_OTHER, 811 }, 812 [MLX5_EXPANSION_OUTER_IPV6_UDP] = { 813 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 814 MLX5_EXPANSION_VXLAN_GPE, 815 MLX5_EXPANSION_MPLS, 816 MLX5_EXPANSION_GENEVE, 817 MLX5_EXPANSION_GTP), 818 .type = RTE_FLOW_ITEM_TYPE_UDP, 819 .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP, 820 }, 821 [MLX5_EXPANSION_OUTER_IPV6_TCP] = { 822 .type = RTE_FLOW_ITEM_TYPE_TCP, 823 .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP, 824 }, 825 [MLX5_EXPANSION_OUTER_IPV6_ESP] = { 826 .type = RTE_FLOW_ITEM_TYPE_ESP, 827 .rss_types = RTE_ETH_RSS_ESP, 828 }, 829 [MLX5_EXPANSION_OUTER_IPV6_ICMP6] = { 830 .type = RTE_FLOW_ITEM_TYPE_ICMP6, 831 }, 832 [MLX5_EXPANSION_VXLAN] = { 833 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 834 MLX5_EXPANSION_IPV4, 835 MLX5_EXPANSION_IPV6), 836 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 837 }, 838 [MLX5_EXPANSION_STD_VXLAN] = { 839 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), 840 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 841 }, 842 [MLX5_EXPANSION_L3_VXLAN] = { 843 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 844 MLX5_EXPANSION_IPV6), 845 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 846 }, 847 [MLX5_EXPANSION_VXLAN_GPE] = { 848 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 849 MLX5_EXPANSION_IPV4, 850 MLX5_EXPANSION_IPV6), 851 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, 852 }, 853 [MLX5_EXPANSION_GRE] = { 854 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 855 MLX5_EXPANSION_IPV4, 856 MLX5_EXPANSION_IPV6, 857 MLX5_EXPANSION_GRE_KEY, 858 MLX5_EXPANSION_MPLS), 859 .type = RTE_FLOW_ITEM_TYPE_GRE, 860 }, 861 [MLX5_EXPANSION_GRE_KEY] = { 862 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 863 MLX5_EXPANSION_IPV6, 864 MLX5_EXPANSION_MPLS), 865 .type = RTE_FLOW_ITEM_TYPE_GRE_KEY, 866 .node_flags = MLX5_EXPANSION_NODE_OPTIONAL, 867 }, 868 [MLX5_EXPANSION_NVGRE] = { 869 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), 870 .type = RTE_FLOW_ITEM_TYPE_NVGRE, 871 }, 872 [MLX5_EXPANSION_MPLS] = { 873 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 874 MLX5_EXPANSION_IPV6, 875 MLX5_EXPANSION_ETH), 876 .type = RTE_FLOW_ITEM_TYPE_MPLS, 877 .node_flags = MLX5_EXPANSION_NODE_OPTIONAL, 878 }, 879 [MLX5_EXPANSION_ETH] = { 880 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), 881 .type = RTE_FLOW_ITEM_TYPE_ETH, 882 }, 883 [MLX5_EXPANSION_VLAN] = { 884 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 885 MLX5_EXPANSION_IPV6), 886 .type = RTE_FLOW_ITEM_TYPE_VLAN, 887 .node_flags = MLX5_EXPANSION_NODE_EXPLICIT, 888 }, 889 [MLX5_EXPANSION_IPV4] = { 890 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, 891 MLX5_EXPANSION_IPV4_TCP, 892 MLX5_EXPANSION_IPV4_ESP, 893 MLX5_EXPANSION_IPV4_ICMP), 894 .type = RTE_FLOW_ITEM_TYPE_IPV4, 895 .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | 896 RTE_ETH_RSS_NONFRAG_IPV4_OTHER, 897 }, 898 [MLX5_EXPANSION_IPV4_UDP] = { 899 .type = RTE_FLOW_ITEM_TYPE_UDP, 900 .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP, 901 }, 902 [MLX5_EXPANSION_IPV4_TCP] = { 903 .type = RTE_FLOW_ITEM_TYPE_TCP, 904 .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP, 905 }, 906 [MLX5_EXPANSION_IPV4_ESP] = { 907 .type = RTE_FLOW_ITEM_TYPE_ESP, 908 .rss_types = RTE_ETH_RSS_ESP, 909 }, 910 [MLX5_EXPANSION_IPV4_ICMP] = { 911 .type = RTE_FLOW_ITEM_TYPE_ICMP, 912 }, 913 [MLX5_EXPANSION_IPV6] = { 914 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, 915 MLX5_EXPANSION_IPV6_TCP, 916 MLX5_EXPANSION_IPV6_ESP, 917 MLX5_EXPANSION_IPV6_ICMP6, 918 MLX5_EXPANSION_IPV6_FRAG_EXT), 919 .type = RTE_FLOW_ITEM_TYPE_IPV6, 920 .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | 921 RTE_ETH_RSS_NONFRAG_IPV6_OTHER, 922 }, 923 [MLX5_EXPANSION_IPV6_UDP] = { 924 .type = RTE_FLOW_ITEM_TYPE_UDP, 925 .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP, 926 }, 927 [MLX5_EXPANSION_IPV6_TCP] = { 928 .type = RTE_FLOW_ITEM_TYPE_TCP, 929 .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP, 930 }, 931 [MLX5_EXPANSION_IPV6_ESP] = { 932 .type = RTE_FLOW_ITEM_TYPE_ESP, 933 .rss_types = RTE_ETH_RSS_ESP, 934 }, 935 [MLX5_EXPANSION_IPV6_FRAG_EXT] = { 936 .type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT, 937 }, 938 [MLX5_EXPANSION_IPV6_ICMP6] = { 939 .type = RTE_FLOW_ITEM_TYPE_ICMP6, 940 }, 941 [MLX5_EXPANSION_GTP] = { 942 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 943 MLX5_EXPANSION_IPV6), 944 .type = RTE_FLOW_ITEM_TYPE_GTP, 945 }, 946 [MLX5_EXPANSION_GENEVE] = { 947 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 948 MLX5_EXPANSION_IPV4, 949 MLX5_EXPANSION_IPV6), 950 .type = RTE_FLOW_ITEM_TYPE_GENEVE, 951 }, 952 }; 953 954 static struct rte_flow_action_handle * 955 mlx5_action_handle_create(struct rte_eth_dev *dev, 956 const struct rte_flow_indir_action_conf *conf, 957 const struct rte_flow_action *action, 958 struct rte_flow_error *error); 959 static int mlx5_action_handle_destroy 960 (struct rte_eth_dev *dev, 961 struct rte_flow_action_handle *handle, 962 struct rte_flow_error *error); 963 static int mlx5_action_handle_update 964 (struct rte_eth_dev *dev, 965 struct rte_flow_action_handle *handle, 966 const void *update, 967 struct rte_flow_error *error); 968 static int mlx5_action_handle_query 969 (struct rte_eth_dev *dev, 970 const struct rte_flow_action_handle *handle, 971 void *data, 972 struct rte_flow_error *error); 973 static int 974 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev, 975 struct rte_flow_tunnel *app_tunnel, 976 struct rte_flow_action **actions, 977 uint32_t *num_of_actions, 978 struct rte_flow_error *error); 979 static int 980 mlx5_flow_tunnel_match(struct rte_eth_dev *dev, 981 struct rte_flow_tunnel *app_tunnel, 982 struct rte_flow_item **items, 983 uint32_t *num_of_items, 984 struct rte_flow_error *error); 985 static int 986 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev, 987 struct rte_flow_item *pmd_items, 988 uint32_t num_items, struct rte_flow_error *err); 989 static int 990 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev, 991 struct rte_flow_action *pmd_actions, 992 uint32_t num_actions, 993 struct rte_flow_error *err); 994 static int 995 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, 996 struct rte_mbuf *m, 997 struct rte_flow_restore_info *info, 998 struct rte_flow_error *err); 999 static struct rte_flow_item_flex_handle * 1000 mlx5_flow_flex_item_create(struct rte_eth_dev *dev, 1001 const struct rte_flow_item_flex_conf *conf, 1002 struct rte_flow_error *error); 1003 static int 1004 mlx5_flow_flex_item_release(struct rte_eth_dev *dev, 1005 const struct rte_flow_item_flex_handle *handle, 1006 struct rte_flow_error *error); 1007 static int 1008 mlx5_flow_info_get(struct rte_eth_dev *dev, 1009 struct rte_flow_port_info *port_info, 1010 struct rte_flow_queue_info *queue_info, 1011 struct rte_flow_error *error); 1012 static int 1013 mlx5_flow_port_configure(struct rte_eth_dev *dev, 1014 const struct rte_flow_port_attr *port_attr, 1015 uint16_t nb_queue, 1016 const struct rte_flow_queue_attr *queue_attr[], 1017 struct rte_flow_error *err); 1018 1019 static struct rte_flow_pattern_template * 1020 mlx5_flow_pattern_template_create(struct rte_eth_dev *dev, 1021 const struct rte_flow_pattern_template_attr *attr, 1022 const struct rte_flow_item items[], 1023 struct rte_flow_error *error); 1024 1025 static int 1026 mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev, 1027 struct rte_flow_pattern_template *template, 1028 struct rte_flow_error *error); 1029 static struct rte_flow_actions_template * 1030 mlx5_flow_actions_template_create(struct rte_eth_dev *dev, 1031 const struct rte_flow_actions_template_attr *attr, 1032 const struct rte_flow_action actions[], 1033 const struct rte_flow_action masks[], 1034 struct rte_flow_error *error); 1035 static int 1036 mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev, 1037 struct rte_flow_actions_template *template, 1038 struct rte_flow_error *error); 1039 1040 static struct rte_flow_template_table * 1041 mlx5_flow_table_create(struct rte_eth_dev *dev, 1042 const struct rte_flow_template_table_attr *attr, 1043 struct rte_flow_pattern_template *item_templates[], 1044 uint8_t nb_item_templates, 1045 struct rte_flow_actions_template *action_templates[], 1046 uint8_t nb_action_templates, 1047 struct rte_flow_error *error); 1048 static int 1049 mlx5_flow_table_destroy(struct rte_eth_dev *dev, 1050 struct rte_flow_template_table *table, 1051 struct rte_flow_error *error); 1052 static int 1053 mlx5_flow_group_set_miss_actions(struct rte_eth_dev *dev, 1054 uint32_t group_id, 1055 const struct rte_flow_group_attr *attr, 1056 const struct rte_flow_action actions[], 1057 struct rte_flow_error *error); 1058 1059 static int 1060 mlx5_action_handle_query_update(struct rte_eth_dev *dev, 1061 struct rte_flow_action_handle *handle, 1062 const void *update, void *query, 1063 enum rte_flow_query_update_mode qu_mode, 1064 struct rte_flow_error *error); 1065 1066 static struct rte_flow_action_list_handle * 1067 mlx5_action_list_handle_create(struct rte_eth_dev *dev, 1068 const struct rte_flow_indir_action_conf *conf, 1069 const struct rte_flow_action *actions, 1070 struct rte_flow_error *error); 1071 1072 static int 1073 mlx5_action_list_handle_destroy(struct rte_eth_dev *dev, 1074 struct rte_flow_action_list_handle *handle, 1075 struct rte_flow_error *error); 1076 1077 static int 1078 mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev, 1079 const 1080 struct rte_flow_action_list_handle *handle, 1081 const void **update, void **query, 1082 enum rte_flow_query_update_mode mode, 1083 struct rte_flow_error *error); 1084 1085 static int 1086 mlx5_flow_calc_table_hash(struct rte_eth_dev *dev, 1087 const struct rte_flow_template_table *table, 1088 const struct rte_flow_item pattern[], 1089 uint8_t pattern_template_index, 1090 uint32_t *hash, struct rte_flow_error *error); 1091 static int 1092 mlx5_flow_calc_encap_hash(struct rte_eth_dev *dev, 1093 const struct rte_flow_item pattern[], 1094 enum rte_flow_encap_hash_field dest_field, 1095 uint8_t *hash, 1096 struct rte_flow_error *error); 1097 1098 static int 1099 mlx5_template_table_resize(struct rte_eth_dev *dev, 1100 struct rte_flow_template_table *table, 1101 uint32_t nb_rules, struct rte_flow_error *error); 1102 static int 1103 mlx5_flow_async_update_resized(struct rte_eth_dev *dev, uint32_t queue, 1104 const struct rte_flow_op_attr *attr, 1105 struct rte_flow *rule, void *user_data, 1106 struct rte_flow_error *error); 1107 static int 1108 mlx5_table_resize_complete(struct rte_eth_dev *dev, 1109 struct rte_flow_template_table *table, 1110 struct rte_flow_error *error); 1111 1112 static const struct rte_flow_ops mlx5_flow_ops = { 1113 .validate = mlx5_flow_validate, 1114 .create = mlx5_flow_create, 1115 .destroy = mlx5_flow_destroy, 1116 .flush = mlx5_flow_flush, 1117 .isolate = mlx5_flow_isolate, 1118 .query = mlx5_flow_query, 1119 .dev_dump = mlx5_flow_dev_dump, 1120 .get_q_aged_flows = mlx5_flow_get_q_aged_flows, 1121 .get_aged_flows = mlx5_flow_get_aged_flows, 1122 .action_handle_create = mlx5_action_handle_create, 1123 .action_handle_destroy = mlx5_action_handle_destroy, 1124 .action_handle_update = mlx5_action_handle_update, 1125 .action_handle_query = mlx5_action_handle_query, 1126 .action_handle_query_update = mlx5_action_handle_query_update, 1127 .action_list_handle_create = mlx5_action_list_handle_create, 1128 .action_list_handle_destroy = mlx5_action_list_handle_destroy, 1129 .tunnel_decap_set = mlx5_flow_tunnel_decap_set, 1130 .tunnel_match = mlx5_flow_tunnel_match, 1131 .tunnel_action_decap_release = mlx5_flow_tunnel_action_release, 1132 .tunnel_item_release = mlx5_flow_tunnel_item_release, 1133 .get_restore_info = mlx5_flow_tunnel_get_restore_info, 1134 .flex_item_create = mlx5_flow_flex_item_create, 1135 .flex_item_release = mlx5_flow_flex_item_release, 1136 .info_get = mlx5_flow_info_get, 1137 .pick_transfer_proxy = mlx5_flow_pick_transfer_proxy, 1138 .configure = mlx5_flow_port_configure, 1139 .pattern_template_create = mlx5_flow_pattern_template_create, 1140 .pattern_template_destroy = mlx5_flow_pattern_template_destroy, 1141 .actions_template_create = mlx5_flow_actions_template_create, 1142 .actions_template_destroy = mlx5_flow_actions_template_destroy, 1143 .template_table_create = mlx5_flow_table_create, 1144 .template_table_destroy = mlx5_flow_table_destroy, 1145 .group_set_miss_actions = mlx5_flow_group_set_miss_actions, 1146 .action_list_handle_query_update = 1147 mlx5_flow_action_list_handle_query_update, 1148 .flow_calc_table_hash = mlx5_flow_calc_table_hash, 1149 .flow_calc_encap_hash = mlx5_flow_calc_encap_hash, 1150 .flow_template_table_resize = mlx5_template_table_resize, 1151 .flow_update_resized = mlx5_flow_async_update_resized, 1152 .flow_template_table_resize_complete = mlx5_table_resize_complete, 1153 }; 1154 1155 /* Tunnel information. */ 1156 struct mlx5_flow_tunnel_info { 1157 uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ 1158 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ 1159 }; 1160 1161 static struct mlx5_flow_tunnel_info tunnels_info[] = { 1162 { 1163 .tunnel = MLX5_FLOW_LAYER_VXLAN, 1164 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, 1165 }, 1166 { 1167 .tunnel = MLX5_FLOW_LAYER_GENEVE, 1168 .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP, 1169 }, 1170 { 1171 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, 1172 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, 1173 }, 1174 { 1175 .tunnel = MLX5_FLOW_LAYER_GRE, 1176 .ptype = RTE_PTYPE_TUNNEL_GRE, 1177 }, 1178 { 1179 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, 1180 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, 1181 }, 1182 { 1183 .tunnel = MLX5_FLOW_LAYER_MPLS, 1184 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, 1185 }, 1186 { 1187 .tunnel = MLX5_FLOW_LAYER_NVGRE, 1188 .ptype = RTE_PTYPE_TUNNEL_NVGRE, 1189 }, 1190 { 1191 .tunnel = MLX5_FLOW_LAYER_IPIP, 1192 .ptype = RTE_PTYPE_TUNNEL_IP, 1193 }, 1194 { 1195 .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP, 1196 .ptype = RTE_PTYPE_TUNNEL_IP, 1197 }, 1198 { 1199 .tunnel = MLX5_FLOW_LAYER_GTP, 1200 .ptype = RTE_PTYPE_TUNNEL_GTPU, 1201 }, 1202 }; 1203 1204 1205 1206 /** 1207 * Translate tag ID to register. 1208 * 1209 * @param[in] dev 1210 * Pointer to the Ethernet device structure. 1211 * @param[in] feature 1212 * The feature that request the register. 1213 * @param[in] id 1214 * The request register ID. 1215 * @param[out] error 1216 * Error description in case of any. 1217 * 1218 * @return 1219 * The request register on success, a negative errno 1220 * value otherwise and rte_errno is set. 1221 */ 1222 int 1223 mlx5_flow_get_reg_id(struct rte_eth_dev *dev, 1224 enum mlx5_feature_name feature, 1225 uint32_t id, 1226 struct rte_flow_error *error) 1227 { 1228 struct mlx5_priv *priv = dev->data->dev_private; 1229 struct mlx5_sh_config *config = &priv->sh->config; 1230 struct mlx5_dev_registers *reg = &priv->sh->registers; 1231 enum modify_reg start_reg; 1232 bool skip_mtr_reg = false; 1233 1234 switch (feature) { 1235 case MLX5_HAIRPIN_RX: 1236 return REG_B; 1237 case MLX5_HAIRPIN_TX: 1238 return REG_A; 1239 case MLX5_METADATA_RX: 1240 switch (config->dv_xmeta_en) { 1241 case MLX5_XMETA_MODE_LEGACY: 1242 return REG_B; 1243 case MLX5_XMETA_MODE_META16: 1244 return REG_C_0; 1245 case MLX5_XMETA_MODE_META32: 1246 return REG_C_1; 1247 case MLX5_XMETA_MODE_META32_HWS: 1248 return REG_C_1; 1249 } 1250 break; 1251 case MLX5_METADATA_TX: 1252 if (config->dv_flow_en == 2 && config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) { 1253 return REG_C_1; 1254 } else { 1255 return REG_A; 1256 } 1257 case MLX5_METADATA_FDB: 1258 switch (config->dv_xmeta_en) { 1259 case MLX5_XMETA_MODE_LEGACY: 1260 return REG_NON; 1261 case MLX5_XMETA_MODE_META16: 1262 return REG_C_0; 1263 case MLX5_XMETA_MODE_META32: 1264 return REG_C_1; 1265 case MLX5_XMETA_MODE_META32_HWS: 1266 return REG_C_1; 1267 } 1268 break; 1269 case MLX5_FLOW_MARK: 1270 switch (config->dv_xmeta_en) { 1271 case MLX5_XMETA_MODE_LEGACY: 1272 case MLX5_XMETA_MODE_META32_HWS: 1273 return REG_NON; 1274 case MLX5_XMETA_MODE_META16: 1275 return REG_C_1; 1276 case MLX5_XMETA_MODE_META32: 1277 return REG_C_0; 1278 } 1279 break; 1280 case MLX5_MTR_ID: 1281 /* 1282 * If meter color and meter id share one register, flow match 1283 * should use the meter color register for match. 1284 */ 1285 if (priv->mtr_reg_share) 1286 return reg->aso_reg; 1287 else 1288 return reg->aso_reg != REG_C_2 ? REG_C_2 : 1289 REG_C_3; 1290 case MLX5_MTR_COLOR: 1291 case MLX5_ASO_FLOW_HIT: 1292 case MLX5_ASO_CONNTRACK: 1293 case MLX5_SAMPLE_ID: 1294 /* All features use the same REG_C. */ 1295 MLX5_ASSERT(reg->aso_reg != REG_NON); 1296 return reg->aso_reg; 1297 case MLX5_COPY_MARK: 1298 /* 1299 * Metadata COPY_MARK register using is in meter suffix sub 1300 * flow while with meter. It's safe to share the same register. 1301 */ 1302 return reg->aso_reg != REG_C_2 ? REG_C_2 : REG_C_3; 1303 case MLX5_APP_TAG: 1304 /* 1305 * If meter is enable, it will engage the register for color 1306 * match and flow match. If meter color match is not using the 1307 * REG_C_2, need to skip the REG_C_x be used by meter color 1308 * match. 1309 * If meter is disable, free to use all available registers. 1310 */ 1311 start_reg = reg->aso_reg != REG_C_2 ? REG_C_2 : 1312 (priv->mtr_reg_share ? REG_C_3 : REG_C_4); 1313 skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2); 1314 if (id > (uint32_t)(REG_C_7 - start_reg)) 1315 return rte_flow_error_set(error, EINVAL, 1316 RTE_FLOW_ERROR_TYPE_ITEM, 1317 NULL, "invalid tag id"); 1318 if (priv->sh->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON) 1319 return rte_flow_error_set(error, ENOTSUP, 1320 RTE_FLOW_ERROR_TYPE_ITEM, 1321 NULL, "unsupported tag id"); 1322 /* 1323 * This case means meter is using the REG_C_x great than 2. 1324 * Take care not to conflict with meter color REG_C_x. 1325 * If the available index REG_C_y >= REG_C_x, skip the 1326 * color register. 1327 */ 1328 if (skip_mtr_reg && priv->sh->flow_mreg_c 1329 [id + start_reg - REG_C_0] >= reg->aso_reg) { 1330 if (id >= (uint32_t)(REG_C_7 - start_reg)) 1331 return rte_flow_error_set(error, EINVAL, 1332 RTE_FLOW_ERROR_TYPE_ITEM, 1333 NULL, "invalid tag id"); 1334 if (priv->sh->flow_mreg_c 1335 [id + 1 + start_reg - REG_C_0] != REG_NON) 1336 return priv->sh->flow_mreg_c 1337 [id + 1 + start_reg - REG_C_0]; 1338 return rte_flow_error_set(error, ENOTSUP, 1339 RTE_FLOW_ERROR_TYPE_ITEM, 1340 NULL, "unsupported tag id"); 1341 } 1342 return priv->sh->flow_mreg_c[id + start_reg - REG_C_0]; 1343 } 1344 MLX5_ASSERT(false); 1345 return rte_flow_error_set(error, EINVAL, 1346 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1347 NULL, "invalid feature name"); 1348 } 1349 1350 /** 1351 * Check extensive flow metadata register support. 1352 * 1353 * @param dev 1354 * Pointer to rte_eth_dev structure. 1355 * 1356 * @return 1357 * True if device supports extensive flow metadata register, otherwise false. 1358 */ 1359 bool 1360 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) 1361 { 1362 struct mlx5_priv *priv = dev->data->dev_private; 1363 1364 /* 1365 * Having available reg_c can be regarded inclusively as supporting 1366 * extensive flow metadata register, which could mean, 1367 * - metadata register copy action by modify header. 1368 * - 16 modify header actions is supported. 1369 * - reg_c's are preserved across different domain (FDB and NIC) on 1370 * packet loopback by flow lookup miss. 1371 */ 1372 return priv->sh->flow_mreg_c[2] != REG_NON; 1373 } 1374 1375 /** 1376 * Get the lowest priority. 1377 * 1378 * @param[in] dev 1379 * Pointer to the Ethernet device structure. 1380 * @param[in] attributes 1381 * Pointer to device flow rule attributes. 1382 * 1383 * @return 1384 * The value of lowest priority of flow. 1385 */ 1386 uint32_t 1387 mlx5_get_lowest_priority(struct rte_eth_dev *dev, 1388 const struct rte_flow_attr *attr) 1389 { 1390 struct mlx5_priv *priv = dev->data->dev_private; 1391 1392 if (!attr->group && !(attr->transfer && priv->fdb_def_rule)) 1393 return priv->sh->flow_max_priority - 2; 1394 return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1; 1395 } 1396 1397 /** 1398 * Calculate matcher priority of the flow. 1399 * 1400 * @param[in] dev 1401 * Pointer to the Ethernet device structure. 1402 * @param[in] attr 1403 * Pointer to device flow rule attributes. 1404 * @param[in] subpriority 1405 * The priority based on the items. 1406 * @param[in] external 1407 * Flow is user flow. 1408 * @return 1409 * The matcher priority of the flow. 1410 */ 1411 uint16_t 1412 mlx5_get_matcher_priority(struct rte_eth_dev *dev, 1413 const struct rte_flow_attr *attr, 1414 uint32_t subpriority, bool external) 1415 { 1416 uint16_t priority = (uint16_t)attr->priority; 1417 struct mlx5_priv *priv = dev->data->dev_private; 1418 1419 /* NIC root rules */ 1420 if (!attr->group && !attr->transfer) { 1421 if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) 1422 priority = priv->sh->flow_max_priority - 1; 1423 return mlx5_os_flow_adjust_priority(dev, priority, subpriority); 1424 /* FDB root rules */ 1425 } else if (attr->transfer && (!external || !priv->fdb_def_rule) && 1426 attr->group == 0 && 1427 attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) { 1428 return (priv->sh->flow_max_priority - 1) * 3; 1429 } 1430 if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) 1431 priority = MLX5_NON_ROOT_FLOW_MAX_PRIO; 1432 return priority * 3 + subpriority; 1433 } 1434 1435 /** 1436 * Verify the @p item specifications (spec, last, mask) are compatible with the 1437 * NIC capabilities. 1438 * 1439 * @param[in] item 1440 * Item specification. 1441 * @param[in] mask 1442 * @p item->mask or flow default bit-masks. 1443 * @param[in] nic_mask 1444 * Bit-masks covering supported fields by the NIC to compare with user mask. 1445 * @param[in] size 1446 * Bit-masks size in bytes. 1447 * @param[in] range_accepted 1448 * True if range of values is accepted for specific fields, false otherwise. 1449 * @param[out] error 1450 * Pointer to error structure. 1451 * 1452 * @return 1453 * 0 on success, a negative errno value otherwise and rte_errno is set. 1454 */ 1455 int 1456 mlx5_flow_item_acceptable(const struct rte_eth_dev *dev, 1457 const struct rte_flow_item *item, 1458 const uint8_t *mask, 1459 const uint8_t *nic_mask, 1460 unsigned int size, 1461 bool range_accepted, 1462 struct rte_flow_error *error) 1463 { 1464 unsigned int i; 1465 1466 MLX5_ASSERT(nic_mask); 1467 for (i = 0; i < size; ++i) 1468 if ((nic_mask[i] | mask[i]) != nic_mask[i]) 1469 return rte_flow_error_set(error, ENOTSUP, 1470 RTE_FLOW_ERROR_TYPE_ITEM, 1471 item, 1472 "mask enables non supported" 1473 " bits"); 1474 if (mlx5_hws_active(dev)) 1475 return 0; 1476 if (!item->spec && (item->mask || item->last)) 1477 return rte_flow_error_set(error, EINVAL, 1478 RTE_FLOW_ERROR_TYPE_ITEM, item, 1479 "mask/last without a spec is not" 1480 " supported"); 1481 if (item->spec && item->last && !range_accepted) { 1482 uint8_t spec[size]; 1483 uint8_t last[size]; 1484 unsigned int i; 1485 int ret; 1486 1487 for (i = 0; i < size; ++i) { 1488 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; 1489 last[i] = ((const uint8_t *)item->last)[i] & mask[i]; 1490 } 1491 ret = memcmp(spec, last, size); 1492 if (ret != 0) 1493 return rte_flow_error_set(error, EINVAL, 1494 RTE_FLOW_ERROR_TYPE_ITEM, 1495 item, 1496 "range is not valid"); 1497 } 1498 return 0; 1499 } 1500 1501 /** 1502 * Adjust the hash fields according to the @p flow information. 1503 * 1504 * @param[in] dev_flow. 1505 * Pointer to the mlx5_flow. 1506 * @param[in] tunnel 1507 * 1 when the hash field is for a tunnel item. 1508 * @param[in] layer_types 1509 * RTE_ETH_RSS_* types. 1510 * @param[in] hash_fields 1511 * Item hash fields. 1512 * 1513 * @return 1514 * The hash fields that should be used. 1515 */ 1516 uint64_t 1517 mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc, 1518 int tunnel __rte_unused, uint64_t layer_types, 1519 uint64_t hash_fields) 1520 { 1521 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 1522 int rss_request_inner = rss_desc->level >= 2; 1523 1524 /* Check RSS hash level for tunnel. */ 1525 if (tunnel && rss_request_inner) 1526 hash_fields |= IBV_RX_HASH_INNER; 1527 else if (tunnel || rss_request_inner) 1528 return 0; 1529 #endif 1530 /* Check if requested layer matches RSS hash fields. */ 1531 if (!(rss_desc->types & layer_types)) 1532 return 0; 1533 return hash_fields; 1534 } 1535 1536 /** 1537 * Lookup and set the ptype in the data Rx part. A single Ptype can be used, 1538 * if several tunnel rules are used on this queue, the tunnel ptype will be 1539 * cleared. 1540 * 1541 * @param rxq_ctrl 1542 * Rx queue to update. 1543 */ 1544 static void 1545 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) 1546 { 1547 unsigned int i; 1548 uint32_t tunnel_ptype = 0; 1549 1550 /* Look up for the ptype to use. */ 1551 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { 1552 if (!rxq_ctrl->flow_tunnels_n[i]) 1553 continue; 1554 if (!tunnel_ptype) { 1555 tunnel_ptype = tunnels_info[i].ptype; 1556 } else { 1557 tunnel_ptype = 0; 1558 break; 1559 } 1560 } 1561 rxq_ctrl->rxq.tunnel = tunnel_ptype; 1562 } 1563 1564 /** 1565 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device 1566 * flow. 1567 * 1568 * @param[in] dev 1569 * Pointer to the Ethernet device structure. 1570 * @param[in] dev_handle 1571 * Pointer to device flow handle structure. 1572 */ 1573 void 1574 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, 1575 struct mlx5_flow_handle *dev_handle) 1576 { 1577 struct mlx5_priv *priv = dev->data->dev_private; 1578 const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); 1579 struct mlx5_ind_table_obj *ind_tbl = NULL; 1580 unsigned int i; 1581 1582 if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) { 1583 struct mlx5_hrxq *hrxq; 1584 1585 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], 1586 dev_handle->rix_hrxq); 1587 if (hrxq) 1588 ind_tbl = hrxq->ind_table; 1589 } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { 1590 struct mlx5_shared_action_rss *shared_rss; 1591 1592 shared_rss = mlx5_ipool_get 1593 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 1594 dev_handle->rix_srss); 1595 if (shared_rss) 1596 ind_tbl = shared_rss->ind_tbl; 1597 } 1598 if (!ind_tbl) 1599 return; 1600 for (i = 0; i != ind_tbl->queues_n; ++i) { 1601 int idx = ind_tbl->queues[i]; 1602 struct mlx5_rxq_ctrl *rxq_ctrl; 1603 1604 if (mlx5_is_external_rxq(dev, idx)) 1605 continue; 1606 rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx); 1607 MLX5_ASSERT(rxq_ctrl != NULL); 1608 if (rxq_ctrl == NULL) 1609 continue; 1610 /* 1611 * To support metadata register copy on Tx loopback, 1612 * this must be always enabled (metadata may arive 1613 * from other port - not from local flows only. 1614 */ 1615 if (tunnel) { 1616 unsigned int j; 1617 1618 /* Increase the counter matching the flow. */ 1619 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 1620 if ((tunnels_info[j].tunnel & 1621 dev_handle->layers) == 1622 tunnels_info[j].tunnel) { 1623 rxq_ctrl->flow_tunnels_n[j]++; 1624 break; 1625 } 1626 } 1627 flow_rxq_tunnel_ptype_update(rxq_ctrl); 1628 } 1629 } 1630 } 1631 1632 static void 1633 flow_rxq_mark_flag_set(struct rte_eth_dev *dev) 1634 { 1635 struct mlx5_priv *priv = dev->data->dev_private; 1636 struct mlx5_rxq_ctrl *rxq_ctrl; 1637 uint16_t port_id; 1638 1639 if (priv->sh->shared_mark_enabled) 1640 return; 1641 if (priv->master || priv->representor) { 1642 MLX5_ETH_FOREACH_DEV(port_id, dev->device) { 1643 struct mlx5_priv *opriv = 1644 rte_eth_devices[port_id].data->dev_private; 1645 1646 if (!opriv || 1647 opriv->sh != priv->sh || 1648 opriv->domain_id != priv->domain_id || 1649 opriv->mark_enabled) 1650 continue; 1651 LIST_FOREACH(rxq_ctrl, &opriv->rxqsctrl, next) { 1652 rxq_ctrl->rxq.mark = 1; 1653 } 1654 opriv->mark_enabled = 1; 1655 } 1656 } else { 1657 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { 1658 rxq_ctrl->rxq.mark = 1; 1659 } 1660 priv->mark_enabled = 1; 1661 } 1662 priv->sh->shared_mark_enabled = 1; 1663 } 1664 1665 /** 1666 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow 1667 * 1668 * @param[in] dev 1669 * Pointer to the Ethernet device structure. 1670 * @param[in] flow 1671 * Pointer to flow structure. 1672 */ 1673 static void 1674 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) 1675 { 1676 struct mlx5_priv *priv = dev->data->dev_private; 1677 uint32_t handle_idx; 1678 struct mlx5_flow_handle *dev_handle; 1679 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 1680 1681 MLX5_ASSERT(wks); 1682 if (wks->mark) 1683 flow_rxq_mark_flag_set(dev); 1684 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 1685 handle_idx, dev_handle, next) 1686 flow_drv_rxq_flags_set(dev, dev_handle); 1687 } 1688 1689 /** 1690 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 1691 * device flow if no other flow uses it with the same kind of request. 1692 * 1693 * @param dev 1694 * Pointer to Ethernet device. 1695 * @param[in] dev_handle 1696 * Pointer to the device flow handle structure. 1697 */ 1698 static void 1699 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, 1700 struct mlx5_flow_handle *dev_handle) 1701 { 1702 struct mlx5_priv *priv = dev->data->dev_private; 1703 const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); 1704 struct mlx5_ind_table_obj *ind_tbl = NULL; 1705 unsigned int i; 1706 1707 if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) { 1708 struct mlx5_hrxq *hrxq; 1709 1710 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], 1711 dev_handle->rix_hrxq); 1712 if (hrxq) 1713 ind_tbl = hrxq->ind_table; 1714 } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { 1715 struct mlx5_shared_action_rss *shared_rss; 1716 1717 shared_rss = mlx5_ipool_get 1718 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 1719 dev_handle->rix_srss); 1720 if (shared_rss) 1721 ind_tbl = shared_rss->ind_tbl; 1722 } 1723 if (!ind_tbl) 1724 return; 1725 MLX5_ASSERT(dev->data->dev_started); 1726 for (i = 0; i != ind_tbl->queues_n; ++i) { 1727 int idx = ind_tbl->queues[i]; 1728 struct mlx5_rxq_ctrl *rxq_ctrl; 1729 1730 if (mlx5_is_external_rxq(dev, idx)) 1731 continue; 1732 rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx); 1733 MLX5_ASSERT(rxq_ctrl != NULL); 1734 if (rxq_ctrl == NULL) 1735 continue; 1736 if (tunnel) { 1737 unsigned int j; 1738 1739 /* Decrease the counter matching the flow. */ 1740 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 1741 if ((tunnels_info[j].tunnel & 1742 dev_handle->layers) == 1743 tunnels_info[j].tunnel) { 1744 rxq_ctrl->flow_tunnels_n[j]--; 1745 break; 1746 } 1747 } 1748 flow_rxq_tunnel_ptype_update(rxq_ctrl); 1749 } 1750 } 1751 } 1752 1753 /** 1754 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 1755 * @p flow if no other flow uses it with the same kind of request. 1756 * 1757 * @param dev 1758 * Pointer to Ethernet device. 1759 * @param[in] flow 1760 * Pointer to the flow. 1761 */ 1762 static void 1763 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) 1764 { 1765 struct mlx5_priv *priv = dev->data->dev_private; 1766 uint32_t handle_idx; 1767 struct mlx5_flow_handle *dev_handle; 1768 1769 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 1770 handle_idx, dev_handle, next) 1771 flow_drv_rxq_flags_trim(dev, dev_handle); 1772 } 1773 1774 /** 1775 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. 1776 * 1777 * @param dev 1778 * Pointer to Ethernet device. 1779 */ 1780 static void 1781 flow_rxq_flags_clear(struct rte_eth_dev *dev) 1782 { 1783 struct mlx5_priv *priv = dev->data->dev_private; 1784 unsigned int i; 1785 1786 for (i = 0; i != priv->rxqs_n; ++i) { 1787 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i); 1788 unsigned int j; 1789 1790 if (rxq == NULL || rxq->ctrl == NULL) 1791 continue; 1792 rxq->ctrl->rxq.mark = 0; 1793 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) 1794 rxq->ctrl->flow_tunnels_n[j] = 0; 1795 rxq->ctrl->rxq.tunnel = 0; 1796 } 1797 priv->mark_enabled = 0; 1798 priv->sh->shared_mark_enabled = 0; 1799 } 1800 1801 static uint64_t mlx5_restore_info_dynflag; 1802 1803 int 1804 mlx5_flow_rx_metadata_negotiate(struct rte_eth_dev *dev, uint64_t *features) 1805 { 1806 struct mlx5_priv *priv = dev->data->dev_private; 1807 uint64_t supported = 0; 1808 1809 if (!is_tunnel_offload_active(dev)) { 1810 supported |= RTE_ETH_RX_METADATA_USER_FLAG; 1811 supported |= RTE_ETH_RX_METADATA_USER_MARK; 1812 if ((*features & RTE_ETH_RX_METADATA_TUNNEL_ID) != 0) { 1813 DRV_LOG(DEBUG, 1814 "tunnel offload was not activated, consider setting dv_xmeta_en=%d", 1815 MLX5_XMETA_MODE_MISS_INFO); 1816 } 1817 } else { 1818 supported |= RTE_ETH_RX_METADATA_TUNNEL_ID; 1819 if ((*features & RTE_ETH_RX_METADATA_TUNNEL_ID) != 0 && 1820 mlx5_restore_info_dynflag == 0) 1821 mlx5_restore_info_dynflag = rte_flow_restore_info_dynflag(); 1822 } 1823 1824 if (((*features & supported) & RTE_ETH_RX_METADATA_TUNNEL_ID) != 0) 1825 priv->tunnel_enabled = 1; 1826 else 1827 priv->tunnel_enabled = 0; 1828 1829 *features &= supported; 1830 return 0; 1831 } 1832 1833 /** 1834 * Set the Rx queue dynamic metadata (mask and offset) for a flow 1835 * 1836 * @param[in] dev 1837 * Pointer to the Ethernet device structure. 1838 */ 1839 void 1840 mlx5_flow_rxq_dynf_set(struct rte_eth_dev *dev) 1841 { 1842 struct mlx5_priv *priv = dev->data->dev_private; 1843 uint64_t mark_flag = RTE_MBUF_F_RX_FDIR_ID; 1844 unsigned int i; 1845 1846 if (priv->tunnel_enabled) 1847 mark_flag |= mlx5_restore_info_dynflag; 1848 1849 for (i = 0; i != priv->rxqs_n; ++i) { 1850 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i); 1851 struct mlx5_rxq_data *data; 1852 1853 if (rxq == NULL || rxq->ctrl == NULL) 1854 continue; 1855 data = &rxq->ctrl->rxq; 1856 if (!data->shared || !rxq->ctrl->started) { 1857 if (!rte_flow_dynf_metadata_avail()) { 1858 data->dynf_meta = 0; 1859 data->flow_meta_mask = 0; 1860 data->flow_meta_offset = -1; 1861 data->flow_meta_port_mask = 0; 1862 } else { 1863 data->dynf_meta = 1; 1864 data->flow_meta_mask = rte_flow_dynf_metadata_mask; 1865 data->flow_meta_offset = rte_flow_dynf_metadata_offs; 1866 data->flow_meta_port_mask = priv->sh->dv_meta_mask; 1867 } 1868 data->mark_flag = mark_flag; 1869 } 1870 } 1871 } 1872 1873 /* 1874 * return a pointer to the desired action in the list of actions. 1875 * 1876 * @param[in] actions 1877 * The list of actions to search the action in. 1878 * @param[in] action 1879 * The action to find. 1880 * 1881 * @return 1882 * Pointer to the action in the list, if found. NULL otherwise. 1883 */ 1884 const struct rte_flow_action * 1885 mlx5_flow_find_action(const struct rte_flow_action *actions, 1886 enum rte_flow_action_type action) 1887 { 1888 if (actions == NULL) 1889 return NULL; 1890 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) 1891 if (actions->type == action) 1892 return actions; 1893 return NULL; 1894 } 1895 1896 /* 1897 * Validate the flag action. 1898 * 1899 * @param[in] action_flags 1900 * Bit-fields that holds the actions detected until now. 1901 * @param[in] attr 1902 * Attributes of flow that includes this action. 1903 * @param[out] error 1904 * Pointer to error structure. 1905 * 1906 * @return 1907 * 0 on success, a negative errno value otherwise and rte_errno is set. 1908 */ 1909 int 1910 mlx5_flow_validate_action_flag(uint64_t action_flags, 1911 const struct rte_flow_attr *attr, 1912 struct rte_flow_error *error) 1913 { 1914 if (action_flags & MLX5_FLOW_ACTION_MARK) 1915 return rte_flow_error_set(error, EINVAL, 1916 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1917 "can't mark and flag in same flow"); 1918 if (action_flags & MLX5_FLOW_ACTION_FLAG) 1919 return rte_flow_error_set(error, EINVAL, 1920 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1921 "can't have 2 flag" 1922 " actions in same flow"); 1923 if (attr->egress) 1924 return rte_flow_error_set(error, ENOTSUP, 1925 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1926 "flag action not supported for " 1927 "egress"); 1928 return 0; 1929 } 1930 1931 /* 1932 * Validate the mark action. 1933 * 1934 * @param[in] action 1935 * Pointer to the queue action. 1936 * @param[in] action_flags 1937 * Bit-fields that holds the actions detected until now. 1938 * @param[in] attr 1939 * Attributes of flow that includes this action. 1940 * @param[out] error 1941 * Pointer to error structure. 1942 * 1943 * @return 1944 * 0 on success, a negative errno value otherwise and rte_errno is set. 1945 */ 1946 int 1947 mlx5_flow_validate_action_mark(struct rte_eth_dev *dev, 1948 const struct rte_flow_action *action, 1949 uint64_t action_flags, 1950 const struct rte_flow_attr *attr, 1951 struct rte_flow_error *error) 1952 { 1953 const struct rte_flow_action_mark *mark = action->conf; 1954 1955 if (!mark) 1956 return rte_flow_error_set(error, EINVAL, 1957 RTE_FLOW_ERROR_TYPE_ACTION, 1958 action, 1959 "configuration cannot be null"); 1960 if (mark->id >= MLX5_FLOW_MARK_MAX) 1961 return rte_flow_error_set(error, EINVAL, 1962 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1963 &mark->id, 1964 "mark id must in 0 <= id < " 1965 RTE_STR(MLX5_FLOW_MARK_MAX)); 1966 if (action_flags & MLX5_FLOW_ACTION_FLAG) 1967 return rte_flow_error_set(error, EINVAL, 1968 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1969 "can't flag and mark in same flow"); 1970 if (action_flags & MLX5_FLOW_ACTION_MARK) 1971 return rte_flow_error_set(error, EINVAL, 1972 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1973 "can't have 2 mark actions in same" 1974 " flow"); 1975 if (attr->egress) 1976 return rte_flow_error_set(error, ENOTSUP, 1977 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1978 "mark action not supported for " 1979 "egress"); 1980 if (attr->transfer && mlx5_hws_active(dev)) 1981 return rte_flow_error_set(error, ENOTSUP, 1982 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1983 "non-template mark action not supported for transfer"); 1984 return 0; 1985 } 1986 1987 /* 1988 * Validate the drop action. 1989 * 1990 * @param[in] dev 1991 * Pointer to the Ethernet device structure. 1992 * @param[in] is_root 1993 * True if flow is validated for root table. False otherwise. 1994 * @param[in] attr 1995 * Attributes of flow that includes this action. 1996 * @param[out] error 1997 * Pointer to error structure. 1998 * 1999 * @return 2000 * 0 on success, a negative errno value otherwise and rte_errno is set. 2001 */ 2002 int 2003 mlx5_flow_validate_action_drop(struct rte_eth_dev *dev, 2004 bool is_root, 2005 const struct rte_flow_attr *attr, 2006 struct rte_flow_error *error) 2007 { 2008 struct mlx5_priv *priv = dev->data->dev_private; 2009 2010 if (priv->sh->config.dv_flow_en == 0 && attr->egress) 2011 return rte_flow_error_set(error, ENOTSUP, 2012 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 2013 "drop action not supported for " 2014 "egress"); 2015 if (priv->sh->config.dv_flow_en == 1 && is_root && (attr->egress || attr->transfer) && 2016 !priv->sh->dr_root_drop_action_en) { 2017 return rte_flow_error_set(error, ENOTSUP, 2018 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 2019 "drop action not supported for " 2020 "egress and transfer on group 0"); 2021 } 2022 return 0; 2023 } 2024 2025 /* 2026 * Check if a queue specified in the queue action is valid. 2027 * 2028 * @param[in] dev 2029 * Pointer to the Ethernet device structure. 2030 * @param[in] action 2031 * Pointer to the queue action. 2032 * @param[out] error 2033 * Pointer to error structure. 2034 * 2035 * @return 2036 * 0 on success, a negative errno value otherwise and rte_errno is set. 2037 */ 2038 int 2039 mlx5_flow_validate_target_queue(struct rte_eth_dev *dev, 2040 const struct rte_flow_action *action, 2041 struct rte_flow_error *error) 2042 { 2043 const struct rte_flow_action_queue *queue = action->conf; 2044 struct mlx5_priv *priv = dev->data->dev_private; 2045 2046 if (mlx5_is_external_rxq(dev, queue->index)) 2047 return 0; 2048 if (!priv->rxqs_n) 2049 return rte_flow_error_set(error, EINVAL, 2050 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2051 NULL, "No Rx queues configured"); 2052 if (queue->index >= priv->rxqs_n) 2053 return rte_flow_error_set(error, EINVAL, 2054 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2055 &queue->index, 2056 "queue index out of range"); 2057 if (mlx5_rxq_get(dev, queue->index) == NULL) 2058 return rte_flow_error_set(error, EINVAL, 2059 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2060 &queue->index, 2061 "queue is not configured"); 2062 return 0; 2063 } 2064 2065 /* 2066 * Validate the queue action. 2067 * 2068 * @param[in] action 2069 * Pointer to the queue action. 2070 * @param[in] action_flags 2071 * Bit-fields that holds the actions detected until now. 2072 * @param[in] dev 2073 * Pointer to the Ethernet device structure. 2074 * @param[in] attr 2075 * Attributes of flow that includes this action. 2076 * @param[out] error 2077 * Pointer to error structure. 2078 * 2079 * @return 2080 * 0 on success, a negative errno value otherwise and rte_errno is set. 2081 */ 2082 int 2083 mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 2084 uint64_t action_flags, 2085 struct rte_eth_dev *dev, 2086 const struct rte_flow_attr *attr, 2087 struct rte_flow_error *error) 2088 { 2089 const struct rte_flow_action_queue *queue = action->conf; 2090 2091 if (!queue) 2092 return rte_flow_error_set(error, EINVAL, 2093 RTE_FLOW_ERROR_TYPE_ACTION, action, 2094 "no QUEUE action configuration"); 2095 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 2096 return rte_flow_error_set(error, EINVAL, 2097 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2098 "can't have 2 fate actions in" 2099 " same flow"); 2100 if (attr->egress) 2101 return rte_flow_error_set(error, ENOTSUP, 2102 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 2103 "queue action not supported for egress."); 2104 return mlx5_flow_validate_target_queue(dev, action, error); 2105 } 2106 2107 /** 2108 * Validate queue numbers for device RSS. 2109 * 2110 * @param[in] dev 2111 * Configured device. 2112 * @param[in] queues 2113 * Array of queue numbers. 2114 * @param[in] queues_n 2115 * Size of the @p queues array. 2116 * @param[out] error 2117 * On error, filled with a textual error description. 2118 * @param[out] queue_idx 2119 * On error, filled with an offending queue index in @p queues array. 2120 * 2121 * @return 2122 * 0 on success, a negative errno code on error. 2123 */ 2124 static int 2125 mlx5_validate_rss_queues(struct rte_eth_dev *dev, 2126 const uint16_t *queues, uint32_t queues_n, 2127 const char **error, uint32_t *queue_idx) 2128 { 2129 const struct mlx5_priv *priv = dev->data->dev_private; 2130 bool is_hairpin = false; 2131 bool is_ext_rss = false; 2132 uint32_t i; 2133 2134 for (i = 0; i != queues_n; ++i) { 2135 struct mlx5_rxq_ctrl *rxq_ctrl; 2136 2137 if (mlx5_is_external_rxq(dev, queues[0])) { 2138 is_ext_rss = true; 2139 continue; 2140 } 2141 if (is_ext_rss) { 2142 *error = "Combining external and regular RSS queues is not supported"; 2143 *queue_idx = i; 2144 return -ENOTSUP; 2145 } 2146 if (queues[i] >= priv->rxqs_n) { 2147 *error = "queue index out of range"; 2148 *queue_idx = i; 2149 return -EINVAL; 2150 } 2151 rxq_ctrl = mlx5_rxq_ctrl_get(dev, queues[i]); 2152 if (rxq_ctrl == NULL) { 2153 *error = "queue is not configured"; 2154 *queue_idx = i; 2155 return -EINVAL; 2156 } 2157 if (i == 0 && rxq_ctrl->is_hairpin) 2158 is_hairpin = true; 2159 if (is_hairpin != rxq_ctrl->is_hairpin) { 2160 *error = "combining hairpin and regular RSS queues is not supported"; 2161 *queue_idx = i; 2162 return -ENOTSUP; 2163 } 2164 } 2165 return 0; 2166 } 2167 2168 /* 2169 * Validate the rss action. 2170 * 2171 * @param[in] dev 2172 * Pointer to the Ethernet device structure. 2173 * @param[in] action 2174 * Pointer to the queue action. 2175 * @param[out] error 2176 * Pointer to error structure. 2177 * 2178 * @return 2179 * 0 on success, a negative errno value otherwise and rte_errno is set. 2180 */ 2181 int 2182 mlx5_validate_action_rss(struct rte_eth_dev *dev, 2183 const struct rte_flow_action *action, 2184 struct rte_flow_error *error) 2185 { 2186 struct mlx5_priv *priv = dev->data->dev_private; 2187 const struct rte_flow_action_rss *rss = action->conf; 2188 int ret; 2189 const char *message; 2190 uint32_t queue_idx; 2191 2192 if (!rss) 2193 return rte_flow_error_set 2194 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, 2195 action, "no RSS action configuration"); 2196 if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) { 2197 DRV_LOG(WARNING, "port %u symmetric RSS supported with SORT", 2198 dev->data->port_id); 2199 } else if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && 2200 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) 2201 return rte_flow_error_set(error, ENOTSUP, 2202 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2203 &rss->func, 2204 "RSS hash function not supported"); 2205 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 2206 if (rss->level > 2) 2207 #else 2208 if (rss->level > 1) 2209 #endif 2210 return rte_flow_error_set(error, ENOTSUP, 2211 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2212 &rss->level, 2213 "tunnel RSS is not supported"); 2214 /* allow RSS key_len 0 in case of NULL (default) RSS key. */ 2215 if (rss->key_len == 0 && rss->key != NULL) 2216 return rte_flow_error_set(error, ENOTSUP, 2217 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2218 &rss->key_len, 2219 "RSS hash key length 0"); 2220 if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) 2221 return rte_flow_error_set(error, ENOTSUP, 2222 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2223 &rss->key_len, 2224 "RSS hash key too small"); 2225 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) 2226 return rte_flow_error_set(error, ENOTSUP, 2227 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2228 &rss->key_len, 2229 "RSS hash key too large"); 2230 if (rss->queue_num > priv->sh->dev_cap.ind_table_max_size) 2231 return rte_flow_error_set(error, ENOTSUP, 2232 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2233 &rss->queue_num, 2234 "number of queues too large"); 2235 if (rss->types & MLX5_RSS_HF_MASK) 2236 return rte_flow_error_set(error, ENOTSUP, 2237 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2238 &rss->types, 2239 "some RSS protocols are not" 2240 " supported"); 2241 if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) && 2242 !(rss->types & RTE_ETH_RSS_IP)) 2243 return rte_flow_error_set(error, EINVAL, 2244 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 2245 "L3 partial RSS requested but L3 RSS" 2246 " type not specified"); 2247 if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) && 2248 !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP))) 2249 return rte_flow_error_set(error, EINVAL, 2250 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 2251 "L4 partial RSS requested but L4 RSS" 2252 " type not specified"); 2253 if (!priv->rxqs_n && priv->ext_rxqs == NULL) 2254 return rte_flow_error_set(error, EINVAL, 2255 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2256 NULL, "No Rx queues configured"); 2257 if (!rss->queue_num) 2258 return rte_flow_error_set(error, EINVAL, 2259 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2260 NULL, "No queues configured"); 2261 ret = mlx5_validate_rss_queues(dev, rss->queue, rss->queue_num, 2262 &message, &queue_idx); 2263 if (ret != 0) { 2264 return rte_flow_error_set(error, -ret, 2265 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2266 &rss->queue[queue_idx], message); 2267 } 2268 return 0; 2269 } 2270 2271 /* 2272 * Validate the rss action. 2273 * 2274 * @param[in] action 2275 * Pointer to the queue action. 2276 * @param[in] action_flags 2277 * Bit-fields that holds the actions detected until now. 2278 * @param[in] dev 2279 * Pointer to the Ethernet device structure. 2280 * @param[in] attr 2281 * Attributes of flow that includes this action. 2282 * @param[in] item_flags 2283 * Items that were detected. 2284 * @param[out] error 2285 * Pointer to error structure. 2286 * 2287 * @return 2288 * 0 on success, a negative errno value otherwise and rte_errno is set. 2289 */ 2290 int 2291 mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 2292 uint64_t action_flags, 2293 struct rte_eth_dev *dev, 2294 const struct rte_flow_attr *attr, 2295 uint64_t item_flags, 2296 struct rte_flow_error *error) 2297 { 2298 const struct rte_flow_action_rss *rss = action->conf; 2299 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2300 int ret; 2301 2302 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 2303 return rte_flow_error_set(error, EINVAL, 2304 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2305 "can't have 2 fate actions" 2306 " in same flow"); 2307 ret = mlx5_validate_action_rss(dev, action, error); 2308 if (ret) 2309 return ret; 2310 if (attr->egress) 2311 return rte_flow_error_set(error, ENOTSUP, 2312 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 2313 "rss action not supported for " 2314 "egress"); 2315 if (rss->level > 1 && !tunnel) 2316 return rte_flow_error_set(error, EINVAL, 2317 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 2318 "inner RSS is not supported for " 2319 "non-tunnel flows"); 2320 if ((item_flags & MLX5_FLOW_LAYER_ECPRI) && 2321 !(item_flags & MLX5_FLOW_LAYER_INNER_L4_UDP)) { 2322 return rte_flow_error_set(error, EINVAL, 2323 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 2324 "RSS on eCPRI is not supported now"); 2325 } 2326 if ((item_flags & MLX5_FLOW_LAYER_MPLS) && 2327 !(item_flags & 2328 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3)) && 2329 rss->level > 1) 2330 return rte_flow_error_set(error, EINVAL, 2331 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 2332 "MPLS inner RSS needs to specify inner L2/L3 items after MPLS in pattern"); 2333 return 0; 2334 } 2335 2336 /* 2337 * Validate the default miss action. 2338 * 2339 * @param[in] action_flags 2340 * Bit-fields that holds the actions detected until now. 2341 * @param[out] error 2342 * Pointer to error structure. 2343 * 2344 * @return 2345 * 0 on success, a negative errno value otherwise and rte_errno is set. 2346 */ 2347 int 2348 mlx5_flow_validate_action_default_miss(uint64_t action_flags, 2349 const struct rte_flow_attr *attr, 2350 struct rte_flow_error *error) 2351 { 2352 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 2353 return rte_flow_error_set(error, EINVAL, 2354 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2355 "can't have 2 fate actions in" 2356 " same flow"); 2357 if (attr->egress) 2358 return rte_flow_error_set(error, ENOTSUP, 2359 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 2360 "default miss action not supported " 2361 "for egress"); 2362 if (attr->group) 2363 return rte_flow_error_set(error, ENOTSUP, 2364 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, 2365 "only group 0 is supported"); 2366 if (attr->transfer) 2367 return rte_flow_error_set(error, ENOTSUP, 2368 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 2369 NULL, "transfer is not supported"); 2370 return 0; 2371 } 2372 2373 /* 2374 * Validate the count action. 2375 * 2376 * @param[in] dev 2377 * Pointer to the Ethernet device structure. 2378 * @param[in] attr 2379 * Attributes of flow that includes this action. 2380 * @param[out] error 2381 * Pointer to error structure. 2382 * 2383 * @return 2384 * 0 on success, a negative errno value otherwise and rte_errno is set. 2385 */ 2386 int 2387 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, 2388 const struct rte_flow_attr *attr, 2389 struct rte_flow_error *error) 2390 { 2391 if (attr->egress) 2392 return rte_flow_error_set(error, ENOTSUP, 2393 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 2394 "count action not supported for " 2395 "egress"); 2396 return 0; 2397 } 2398 2399 /* 2400 * Validate the ASO CT action. 2401 * 2402 * @param[in] dev 2403 * Pointer to the Ethernet device structure. 2404 * @param[in] conntrack 2405 * Pointer to the CT action profile. 2406 * @param[out] error 2407 * Pointer to error structure. 2408 * 2409 * @return 2410 * 0 on success, a negative errno value otherwise and rte_errno is set. 2411 */ 2412 int 2413 mlx5_validate_action_ct(struct rte_eth_dev *dev, 2414 const struct rte_flow_action_conntrack *conntrack, 2415 struct rte_flow_error *error) 2416 { 2417 RTE_SET_USED(dev); 2418 2419 if (conntrack->state > RTE_FLOW_CONNTRACK_STATE_TIME_WAIT) 2420 return rte_flow_error_set(error, EINVAL, 2421 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2422 "Invalid CT state"); 2423 if (conntrack->last_index > RTE_FLOW_CONNTRACK_FLAG_RST) 2424 return rte_flow_error_set(error, EINVAL, 2425 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2426 "Invalid last TCP packet flag"); 2427 return 0; 2428 } 2429 2430 /** 2431 * Validate the level value for modify field action. 2432 * 2433 * @param[in] data 2434 * Pointer to the rte_flow_field_data structure either src or dst. 2435 * @param[out] error 2436 * Pointer to error structure. 2437 * 2438 * @return 2439 * 0 on success, a negative errno value otherwise and rte_errno is set. 2440 */ 2441 int 2442 flow_validate_modify_field_level(const struct rte_flow_field_data *data, 2443 struct rte_flow_error *error) 2444 { 2445 if (data->level == 0 || data->field == RTE_FLOW_FIELD_FLEX_ITEM) 2446 return 0; 2447 if (data->field != RTE_FLOW_FIELD_TAG && 2448 data->field != (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) { 2449 if (data->level > 1) 2450 return rte_flow_error_set(error, ENOTSUP, 2451 RTE_FLOW_ERROR_TYPE_ACTION, 2452 NULL, 2453 "inner header fields modification is not supported"); 2454 return 0; 2455 } 2456 if (data->tag_index != 0) 2457 return rte_flow_error_set(error, EINVAL, 2458 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2459 "tag array can be provided using 'level' or 'tag_index' fields, not both"); 2460 /* 2461 * The tag array for RTE_FLOW_FIELD_TAG type is provided using 2462 * 'tag_index' field. In old API, it was provided using 'level' field 2463 * and it is still supported for backwards compatibility. 2464 */ 2465 DRV_LOG(DEBUG, "tag array provided in 'level' field instead of 'tag_index' field."); 2466 return 0; 2467 } 2468 2469 /** 2470 * Validate ICMP6 item. 2471 * 2472 * @param[in] item 2473 * Item specification. 2474 * @param[in] item_flags 2475 * Bit-fields that holds the items detected until now. 2476 * @param[in] ext_vlan_sup 2477 * Whether extended VLAN features are supported or not. 2478 * @param[out] error 2479 * Pointer to error structure. 2480 * 2481 * @return 2482 * 0 on success, a negative errno value otherwise and rte_errno is set. 2483 */ 2484 int 2485 mlx5_flow_validate_item_icmp6(const struct rte_eth_dev *dev, 2486 const struct rte_flow_item *item, 2487 uint64_t item_flags, 2488 uint8_t target_protocol, 2489 struct rte_flow_error *error) 2490 { 2491 const struct rte_flow_item_icmp6 *mask = item->mask; 2492 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2493 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 2494 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 2495 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 2496 MLX5_FLOW_LAYER_OUTER_L4; 2497 int ret; 2498 2499 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) 2500 return rte_flow_error_set(error, EINVAL, 2501 RTE_FLOW_ERROR_TYPE_ITEM, item, 2502 "protocol filtering not compatible" 2503 " with ICMP6 layer"); 2504 if (!mlx5_hws_active(dev)) { 2505 if (!(item_flags & l3m)) 2506 return rte_flow_error_set(error, EINVAL, 2507 RTE_FLOW_ERROR_TYPE_ITEM, 2508 item, "IPv6 is mandatory to filter on ICMP6"); 2509 } 2510 if (item_flags & l4m) 2511 return rte_flow_error_set(error, EINVAL, 2512 RTE_FLOW_ERROR_TYPE_ITEM, item, 2513 "multiple L4 layers not supported"); 2514 if (!mask) 2515 mask = &rte_flow_item_icmp6_mask; 2516 ret = mlx5_flow_item_acceptable 2517 (dev, item, (const uint8_t *)mask, 2518 (const uint8_t *)&rte_flow_item_icmp6_mask, 2519 sizeof(struct rte_flow_item_icmp6), 2520 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2521 if (ret < 0) 2522 return ret; 2523 return 0; 2524 } 2525 2526 /** 2527 * Validate ICMP6 echo request/reply item. 2528 * 2529 * @param[in] item 2530 * Item specification. 2531 * @param[in] item_flags 2532 * Bit-fields that holds the items detected until now. 2533 * @param[in] ext_vlan_sup 2534 * Whether extended VLAN features are supported or not. 2535 * @param[out] error 2536 * Pointer to error structure. 2537 * 2538 * @return 2539 * 0 on success, a negative errno value otherwise and rte_errno is set. 2540 */ 2541 int 2542 mlx5_flow_validate_item_icmp6_echo(const struct rte_eth_dev *dev, 2543 const struct rte_flow_item *item, 2544 uint64_t item_flags, 2545 uint8_t target_protocol, 2546 struct rte_flow_error *error) 2547 { 2548 const struct rte_flow_item_icmp6_echo *mask = item->mask; 2549 const struct rte_flow_item_icmp6_echo nic_mask = { 2550 .hdr.base.type = 0xff, 2551 .hdr.base.code = 0xff, 2552 .hdr.identifier = RTE_BE16(0xffff), 2553 .hdr.sequence = RTE_BE16(0xffff), 2554 }; 2555 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2556 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 2557 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 2558 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 2559 MLX5_FLOW_LAYER_OUTER_L4; 2560 int ret; 2561 2562 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) 2563 return rte_flow_error_set(error, EINVAL, 2564 RTE_FLOW_ERROR_TYPE_ITEM, item, 2565 "protocol filtering not compatible" 2566 " with ICMP6 layer"); 2567 if (!mlx5_hws_active(dev)) { 2568 if (!(item_flags & l3m)) 2569 return rte_flow_error_set(error, EINVAL, 2570 RTE_FLOW_ERROR_TYPE_ITEM, 2571 item, "IPv6 is mandatory to filter on ICMP6"); 2572 } 2573 if (item_flags & l4m) 2574 return rte_flow_error_set(error, EINVAL, 2575 RTE_FLOW_ERROR_TYPE_ITEM, item, 2576 "multiple L4 layers not supported"); 2577 if (!mask) 2578 mask = &nic_mask; 2579 ret = mlx5_flow_item_acceptable 2580 (dev, item, (const uint8_t *)mask, 2581 (const uint8_t *)&nic_mask, 2582 sizeof(struct rte_flow_item_icmp6_echo), 2583 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2584 if (ret < 0) 2585 return ret; 2586 return 0; 2587 } 2588 2589 /** 2590 * Validate ICMP item. 2591 * 2592 * @param[in] item 2593 * Item specification. 2594 * @param[in] item_flags 2595 * Bit-fields that holds the items detected until now. 2596 * @param[out] error 2597 * Pointer to error structure. 2598 * 2599 * @return 2600 * 0 on success, a negative errno value otherwise and rte_errno is set. 2601 */ 2602 int 2603 mlx5_flow_validate_item_icmp(const struct rte_eth_dev *dev, 2604 const struct rte_flow_item *item, 2605 uint64_t item_flags, 2606 uint8_t target_protocol, 2607 struct rte_flow_error *error) 2608 { 2609 const struct rte_flow_item_icmp *mask = item->mask; 2610 const struct rte_flow_item_icmp nic_mask = { 2611 .hdr.icmp_type = 0xff, 2612 .hdr.icmp_code = 0xff, 2613 .hdr.icmp_ident = RTE_BE16(0xffff), 2614 .hdr.icmp_seq_nb = RTE_BE16(0xffff), 2615 }; 2616 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2617 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 2618 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 2619 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 2620 MLX5_FLOW_LAYER_OUTER_L4; 2621 int ret; 2622 2623 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) 2624 return rte_flow_error_set(error, EINVAL, 2625 RTE_FLOW_ERROR_TYPE_ITEM, item, 2626 "protocol filtering not compatible" 2627 " with ICMP layer"); 2628 if (!mlx5_hws_active(dev)) { 2629 if (!(item_flags & l3m)) 2630 return rte_flow_error_set(error, EINVAL, 2631 RTE_FLOW_ERROR_TYPE_ITEM, 2632 item, "IPv4 is mandatory to filter on ICMP"); 2633 } 2634 if (item_flags & l4m) 2635 return rte_flow_error_set(error, EINVAL, 2636 RTE_FLOW_ERROR_TYPE_ITEM, item, 2637 "multiple L4 layers not supported"); 2638 if (!mask) 2639 mask = &nic_mask; 2640 ret = mlx5_flow_item_acceptable 2641 (dev, item, (const uint8_t *)mask, 2642 (const uint8_t *)&nic_mask, 2643 sizeof(struct rte_flow_item_icmp), 2644 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2645 if (ret < 0) 2646 return ret; 2647 return 0; 2648 } 2649 2650 /** 2651 * Validate Ethernet item. 2652 * 2653 * @param[in] item 2654 * Item specification. 2655 * @param[in] item_flags 2656 * Bit-fields that holds the items detected until now. 2657 * @param[out] error 2658 * Pointer to error structure. 2659 * 2660 * @return 2661 * 0 on success, a negative errno value otherwise and rte_errno is set. 2662 */ 2663 int 2664 mlx5_flow_validate_item_eth(const struct rte_eth_dev *dev, 2665 const struct rte_flow_item *item, 2666 uint64_t item_flags, bool ext_vlan_sup, 2667 struct rte_flow_error *error) 2668 { 2669 const struct rte_flow_item_eth *mask = item->mask; 2670 const struct rte_flow_item_eth nic_mask = { 2671 .hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 2672 .hdr.src_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 2673 .hdr.ether_type = RTE_BE16(0xffff), 2674 .has_vlan = ext_vlan_sup ? 1 : 0, 2675 }; 2676 int ret; 2677 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2678 const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 2679 MLX5_FLOW_LAYER_OUTER_L2; 2680 2681 if (item_flags & ethm) 2682 return rte_flow_error_set(error, ENOTSUP, 2683 RTE_FLOW_ERROR_TYPE_ITEM, item, 2684 "multiple L2 layers not supported"); 2685 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) || 2686 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))) 2687 return rte_flow_error_set(error, EINVAL, 2688 RTE_FLOW_ERROR_TYPE_ITEM, item, 2689 "L2 layer should not follow " 2690 "L3 layers"); 2691 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) || 2692 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN))) 2693 return rte_flow_error_set(error, EINVAL, 2694 RTE_FLOW_ERROR_TYPE_ITEM, item, 2695 "L2 layer should not follow VLAN"); 2696 if (item_flags & MLX5_FLOW_LAYER_GTP) 2697 return rte_flow_error_set(error, EINVAL, 2698 RTE_FLOW_ERROR_TYPE_ITEM, item, 2699 "L2 layer should not follow GTP"); 2700 if (!mask) 2701 mask = &rte_flow_item_eth_mask; 2702 ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask, 2703 (const uint8_t *)&nic_mask, 2704 sizeof(struct rte_flow_item_eth), 2705 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2706 return ret; 2707 } 2708 2709 /** 2710 * Validate VLAN item. 2711 * 2712 * @param[in] item 2713 * Item specification. 2714 * @param[in] item_flags 2715 * Bit-fields that holds the items detected until now. 2716 * @param[in] dev 2717 * Ethernet device flow is being created on. 2718 * @param[out] error 2719 * Pointer to error structure. 2720 * 2721 * @return 2722 * 0 on success, a negative errno value otherwise and rte_errno is set. 2723 */ 2724 int 2725 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 2726 uint64_t item_flags, 2727 struct rte_eth_dev *dev, 2728 struct rte_flow_error *error) 2729 { 2730 const struct rte_flow_item_vlan *spec = item->spec; 2731 const struct rte_flow_item_vlan *mask = item->mask; 2732 const struct rte_flow_item_vlan nic_mask = { 2733 .hdr.vlan_tci = RTE_BE16(UINT16_MAX), 2734 .hdr.eth_proto = RTE_BE16(UINT16_MAX), 2735 }; 2736 uint16_t vlan_tag = 0; 2737 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2738 int ret; 2739 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 2740 MLX5_FLOW_LAYER_INNER_L4) : 2741 (MLX5_FLOW_LAYER_OUTER_L3 | 2742 MLX5_FLOW_LAYER_OUTER_L4); 2743 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 2744 MLX5_FLOW_LAYER_OUTER_VLAN; 2745 2746 if (item_flags & vlanm) 2747 return rte_flow_error_set(error, EINVAL, 2748 RTE_FLOW_ERROR_TYPE_ITEM, item, 2749 "multiple VLAN layers not supported"); 2750 else if ((item_flags & l34m) != 0) 2751 return rte_flow_error_set(error, EINVAL, 2752 RTE_FLOW_ERROR_TYPE_ITEM, item, 2753 "VLAN cannot follow L3/L4 layer"); 2754 if (!mask) 2755 mask = &rte_flow_item_vlan_mask; 2756 ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask, 2757 (const uint8_t *)&nic_mask, 2758 sizeof(struct rte_flow_item_vlan), 2759 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2760 if (ret) 2761 return ret; 2762 if (!tunnel && mask->hdr.vlan_tci != RTE_BE16(0x0fff)) { 2763 struct mlx5_priv *priv = dev->data->dev_private; 2764 2765 if (priv->vmwa_context) { 2766 /* 2767 * Non-NULL context means we have a virtual machine 2768 * and SR-IOV enabled, we have to create VLAN interface 2769 * to make hypervisor to setup E-Switch vport 2770 * context correctly. We avoid creating the multiple 2771 * VLAN interfaces, so we cannot support VLAN tag mask. 2772 */ 2773 return rte_flow_error_set(error, EINVAL, 2774 RTE_FLOW_ERROR_TYPE_ITEM, 2775 item, 2776 "VLAN tag mask is not" 2777 " supported in virtual" 2778 " environment"); 2779 } 2780 } 2781 if (spec) { 2782 vlan_tag = spec->hdr.vlan_tci; 2783 vlan_tag &= mask->hdr.vlan_tci; 2784 } 2785 /* 2786 * From verbs perspective an empty VLAN is equivalent 2787 * to a packet without VLAN layer. 2788 */ 2789 if (!vlan_tag) 2790 return rte_flow_error_set(error, EINVAL, 2791 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 2792 item->spec, 2793 "VLAN cannot be empty"); 2794 return 0; 2795 } 2796 2797 /** 2798 * Validate IPV4 item. 2799 * 2800 * @param[in] item 2801 * Item specification. 2802 * @param[in] item_flags 2803 * Bit-fields that holds the items detected until now. 2804 * @param[in] last_item 2805 * Previous validated item in the pattern items. 2806 * @param[in] ether_type 2807 * Type in the ethernet layer header (including dot1q). 2808 * @param[in] acc_mask 2809 * Acceptable mask, if NULL default internal default mask 2810 * will be used to check whether item fields are supported. 2811 * @param[in] range_accepted 2812 * True if range of values is accepted for specific fields, false otherwise. 2813 * @param[out] error 2814 * Pointer to error structure. 2815 * 2816 * @return 2817 * 0 on success, a negative errno value otherwise and rte_errno is set. 2818 */ 2819 int 2820 mlx5_flow_validate_item_ipv4(const struct rte_eth_dev *dev, 2821 const struct rte_flow_item *item, 2822 uint64_t item_flags, 2823 uint64_t last_item, 2824 uint16_t ether_type, 2825 const struct rte_flow_item_ipv4 *acc_mask, 2826 bool range_accepted, 2827 struct rte_flow_error *error) 2828 { 2829 const struct rte_flow_item_ipv4 *mask = item->mask; 2830 const struct rte_flow_item_ipv4 *spec = item->spec; 2831 const struct rte_flow_item_ipv4 nic_mask = { 2832 .hdr = { 2833 .src_addr = RTE_BE32(0xffffffff), 2834 .dst_addr = RTE_BE32(0xffffffff), 2835 .type_of_service = 0xff, 2836 .next_proto_id = 0xff, 2837 }, 2838 }; 2839 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2840 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 2841 MLX5_FLOW_LAYER_OUTER_L3; 2842 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 2843 MLX5_FLOW_LAYER_OUTER_L4; 2844 int ret; 2845 uint8_t next_proto = 0xFF; 2846 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 2847 MLX5_FLOW_LAYER_OUTER_VLAN | 2848 MLX5_FLOW_LAYER_INNER_VLAN); 2849 2850 if ((last_item & l2_vlan) && ether_type && 2851 ether_type != RTE_ETHER_TYPE_IPV4) 2852 return rte_flow_error_set(error, EINVAL, 2853 RTE_FLOW_ERROR_TYPE_ITEM, item, 2854 "IPv4 cannot follow L2/VLAN layer " 2855 "which ether type is not IPv4"); 2856 if (item_flags & MLX5_FLOW_LAYER_IPIP) { 2857 if (mask && spec) 2858 next_proto = mask->hdr.next_proto_id & 2859 spec->hdr.next_proto_id; 2860 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 2861 return rte_flow_error_set(error, EINVAL, 2862 RTE_FLOW_ERROR_TYPE_ITEM, 2863 item, 2864 "multiple tunnel " 2865 "not supported"); 2866 } 2867 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) 2868 return rte_flow_error_set(error, EINVAL, 2869 RTE_FLOW_ERROR_TYPE_ITEM, item, 2870 "wrong tunnel type - IPv6 specified " 2871 "but IPv4 item provided"); 2872 if (item_flags & l3m) 2873 return rte_flow_error_set(error, ENOTSUP, 2874 RTE_FLOW_ERROR_TYPE_ITEM, item, 2875 "multiple L3 layers not supported"); 2876 else if (item_flags & l4m) 2877 return rte_flow_error_set(error, EINVAL, 2878 RTE_FLOW_ERROR_TYPE_ITEM, item, 2879 "L3 cannot follow an L4 layer."); 2880 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 2881 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 2882 return rte_flow_error_set(error, EINVAL, 2883 RTE_FLOW_ERROR_TYPE_ITEM, item, 2884 "L3 cannot follow an NVGRE layer."); 2885 if (!mask) 2886 mask = &rte_flow_item_ipv4_mask; 2887 else if (mask->hdr.next_proto_id != 0 && 2888 mask->hdr.next_proto_id != 0xff) 2889 return rte_flow_error_set(error, EINVAL, 2890 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 2891 "partial mask is not supported" 2892 " for protocol"); 2893 ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask, 2894 acc_mask ? (const uint8_t *)acc_mask 2895 : (const uint8_t *)&nic_mask, 2896 sizeof(struct rte_flow_item_ipv4), 2897 range_accepted, error); 2898 if (ret < 0) 2899 return ret; 2900 return 0; 2901 } 2902 2903 /** 2904 * Validate IPV6 item. 2905 * 2906 * @param[in] item 2907 * Item specification. 2908 * @param[in] item_flags 2909 * Bit-fields that holds the items detected until now. 2910 * @param[in] last_item 2911 * Previous validated item in the pattern items. 2912 * @param[in] ether_type 2913 * Type in the ethernet layer header (including dot1q). 2914 * @param[in] acc_mask 2915 * Acceptable mask, if NULL default internal default mask 2916 * will be used to check whether item fields are supported. 2917 * @param[out] error 2918 * Pointer to error structure. 2919 * 2920 * @return 2921 * 0 on success, a negative errno value otherwise and rte_errno is set. 2922 */ 2923 int 2924 mlx5_flow_validate_item_ipv6(const struct rte_eth_dev *dev, 2925 const struct rte_flow_item *item, 2926 uint64_t item_flags, 2927 uint64_t last_item, 2928 uint16_t ether_type, 2929 const struct rte_flow_item_ipv6 *acc_mask, 2930 struct rte_flow_error *error) 2931 { 2932 const struct rte_flow_item_ipv6 *mask = item->mask; 2933 const struct rte_flow_item_ipv6 *spec = item->spec; 2934 const struct rte_flow_item_ipv6 nic_mask = { 2935 .hdr = { 2936 .src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 2937 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 2938 .dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 2939 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 2940 .vtc_flow = RTE_BE32(0xffffffff), 2941 .proto = 0xff, 2942 }, 2943 }; 2944 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2945 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 2946 MLX5_FLOW_LAYER_OUTER_L3; 2947 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 2948 MLX5_FLOW_LAYER_OUTER_L4; 2949 int ret; 2950 uint8_t next_proto = 0xFF; 2951 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 2952 MLX5_FLOW_LAYER_OUTER_VLAN | 2953 MLX5_FLOW_LAYER_INNER_VLAN); 2954 2955 if ((last_item & l2_vlan) && ether_type && 2956 ether_type != RTE_ETHER_TYPE_IPV6) 2957 return rte_flow_error_set(error, EINVAL, 2958 RTE_FLOW_ERROR_TYPE_ITEM, item, 2959 "IPv6 cannot follow L2/VLAN layer " 2960 "which ether type is not IPv6"); 2961 if (mask && mask->hdr.proto == UINT8_MAX && spec) 2962 next_proto = spec->hdr.proto; 2963 if (item_flags & MLX5_FLOW_LAYER_IPIP) { 2964 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 2965 return rte_flow_error_set(error, EINVAL, 2966 RTE_FLOW_ERROR_TYPE_ITEM, 2967 item, 2968 "multiple tunnel " 2969 "not supported"); 2970 } 2971 if (next_proto == IPPROTO_HOPOPTS || 2972 next_proto == IPPROTO_ROUTING || 2973 next_proto == IPPROTO_FRAGMENT || 2974 next_proto == IPPROTO_AH || 2975 next_proto == IPPROTO_DSTOPTS || 2976 (!mlx5_hws_active(dev) && next_proto == IPPROTO_ESP)) 2977 return rte_flow_error_set(error, EINVAL, 2978 RTE_FLOW_ERROR_TYPE_ITEM, item, 2979 "IPv6 proto (next header) should " 2980 "not be set as extension header"); 2981 if (item_flags & MLX5_FLOW_LAYER_IPIP) 2982 return rte_flow_error_set(error, EINVAL, 2983 RTE_FLOW_ERROR_TYPE_ITEM, item, 2984 "wrong tunnel type - IPv4 specified " 2985 "but IPv6 item provided"); 2986 if (item_flags & l3m) 2987 return rte_flow_error_set(error, ENOTSUP, 2988 RTE_FLOW_ERROR_TYPE_ITEM, item, 2989 "multiple L3 layers not supported"); 2990 else if (item_flags & l4m) 2991 return rte_flow_error_set(error, EINVAL, 2992 RTE_FLOW_ERROR_TYPE_ITEM, item, 2993 "L3 cannot follow an L4 layer."); 2994 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 2995 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 2996 return rte_flow_error_set(error, EINVAL, 2997 RTE_FLOW_ERROR_TYPE_ITEM, item, 2998 "L3 cannot follow an NVGRE layer."); 2999 if (!mask) 3000 mask = &rte_flow_item_ipv6_mask; 3001 ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask, 3002 acc_mask ? (const uint8_t *)acc_mask 3003 : (const uint8_t *)&nic_mask, 3004 sizeof(struct rte_flow_item_ipv6), 3005 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3006 if (ret < 0) 3007 return ret; 3008 return 0; 3009 } 3010 3011 /** 3012 * Validate UDP item. 3013 * 3014 * @param[in] item 3015 * Item specification. 3016 * @param[in] item_flags 3017 * Bit-fields that holds the items detected until now. 3018 * @param[in] target_protocol 3019 * The next protocol in the previous item. 3020 * @param[in] flow_mask 3021 * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask. 3022 * @param[out] error 3023 * Pointer to error structure. 3024 * 3025 * @return 3026 * 0 on success, a negative errno value otherwise and rte_errno is set. 3027 */ 3028 int 3029 mlx5_flow_validate_item_udp(const struct rte_eth_dev *dev, 3030 const struct rte_flow_item *item, 3031 uint64_t item_flags, 3032 uint8_t target_protocol, 3033 struct rte_flow_error *error) 3034 { 3035 const struct rte_flow_item_udp *mask = item->mask; 3036 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 3037 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 3038 MLX5_FLOW_LAYER_OUTER_L3; 3039 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 3040 MLX5_FLOW_LAYER_OUTER_L4; 3041 int ret; 3042 3043 if (!mlx5_hws_active(dev)) { 3044 if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) 3045 return rte_flow_error_set(error, EINVAL, 3046 RTE_FLOW_ERROR_TYPE_ITEM, 3047 item, "protocol filtering not compatible with UDP layer"); 3048 if (!(item_flags & l3m)) 3049 return rte_flow_error_set(error, EINVAL, 3050 RTE_FLOW_ERROR_TYPE_ITEM, 3051 item, 3052 "L3 is mandatory to filter on L4"); 3053 } 3054 if (item_flags & l4m) 3055 return rte_flow_error_set(error, EINVAL, 3056 RTE_FLOW_ERROR_TYPE_ITEM, item, 3057 "multiple L4 layers not supported"); 3058 if (!mask) 3059 mask = &rte_flow_item_udp_mask; 3060 ret = mlx5_flow_item_acceptable 3061 (dev, item, (const uint8_t *)mask, 3062 (const uint8_t *)&rte_flow_item_udp_mask, 3063 sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED, 3064 error); 3065 if (ret < 0) 3066 return ret; 3067 return 0; 3068 } 3069 3070 /** 3071 * Validate TCP item. 3072 * 3073 * @param[in] item 3074 * Item specification. 3075 * @param[in] item_flags 3076 * Bit-fields that holds the items detected until now. 3077 * @param[in] target_protocol 3078 * The next protocol in the previous item. 3079 * @param[out] error 3080 * Pointer to error structure. 3081 * 3082 * @return 3083 * 0 on success, a negative errno value otherwise and rte_errno is set. 3084 */ 3085 int 3086 mlx5_flow_validate_item_tcp(const struct rte_eth_dev *dev, 3087 const struct rte_flow_item *item, 3088 uint64_t item_flags, 3089 uint8_t target_protocol, 3090 const struct rte_flow_item_tcp *flow_mask, 3091 struct rte_flow_error *error) 3092 { 3093 const struct rte_flow_item_tcp *mask = item->mask; 3094 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 3095 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 3096 MLX5_FLOW_LAYER_OUTER_L3; 3097 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 3098 MLX5_FLOW_LAYER_OUTER_L4; 3099 int ret; 3100 3101 MLX5_ASSERT(flow_mask); 3102 if (!mlx5_hws_active(dev)) { 3103 if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) 3104 return rte_flow_error_set(error, EINVAL, 3105 RTE_FLOW_ERROR_TYPE_ITEM, 3106 item, "protocol filtering not compatible with TCP layer"); 3107 if (!(item_flags & l3m)) 3108 return rte_flow_error_set(error, EINVAL, 3109 RTE_FLOW_ERROR_TYPE_ITEM, 3110 item, "L3 is mandatory to filter on L4"); 3111 } 3112 if (item_flags & l4m) 3113 return rte_flow_error_set(error, EINVAL, 3114 RTE_FLOW_ERROR_TYPE_ITEM, item, 3115 "multiple L4 layers not supported"); 3116 if (!mask) 3117 mask = &rte_flow_item_tcp_mask; 3118 ret = mlx5_flow_item_acceptable 3119 (dev, item, (const uint8_t *)mask, 3120 (const uint8_t *)flow_mask, 3121 sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED, 3122 error); 3123 if (ret < 0) 3124 return ret; 3125 return 0; 3126 } 3127 3128 /** 3129 * Validate VXLAN item. 3130 * 3131 * @param[in] dev 3132 * Pointer to the Ethernet device structure. 3133 * @param[in] udp_dport 3134 * UDP destination port 3135 * @param[in] item 3136 * Item specification. 3137 * @param[in] item_flags 3138 * Bit-fields that holds the items detected until now. 3139 * @param root 3140 * Whether action is on root table. 3141 * @param[out] error 3142 * Pointer to error structure. 3143 * 3144 * @return 3145 * 0 on success, a negative errno value otherwise and rte_errno is set. 3146 */ 3147 int 3148 mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev, 3149 uint16_t udp_dport, 3150 const struct rte_flow_item *item, 3151 uint64_t item_flags, 3152 bool root, 3153 struct rte_flow_error *error) 3154 { 3155 const struct rte_flow_item_vxlan *spec = item->spec; 3156 const struct rte_flow_item_vxlan *mask = item->mask; 3157 int ret; 3158 struct mlx5_priv *priv = dev->data->dev_private; 3159 union vni { 3160 uint32_t vlan_id; 3161 uint8_t vni[4]; 3162 } id = { .vlan_id = 0, }; 3163 const struct rte_flow_item_vxlan nic_mask = { 3164 .hdr.vni = { 0xff, 0xff, 0xff }, 3165 .hdr.rsvd1 = 0xff, 3166 }; 3167 const struct rte_flow_item_vxlan *valid_mask; 3168 3169 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 3170 return rte_flow_error_set(error, ENOTSUP, 3171 RTE_FLOW_ERROR_TYPE_ITEM, item, 3172 "multiple tunnel layers not" 3173 " supported"); 3174 /* HWS can match entire VXLAN, VXLAN-GBP and VXLAN-GPE headers */ 3175 if (mlx5_hws_active(dev)) 3176 return 0; 3177 valid_mask = &rte_flow_item_vxlan_mask; 3178 /* 3179 * Verify only UDPv4 is present as defined in 3180 * https://tools.ietf.org/html/rfc7348 3181 */ 3182 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 3183 return rte_flow_error_set(error, EINVAL, 3184 RTE_FLOW_ERROR_TYPE_ITEM, 3185 item, "no outer UDP layer found"); 3186 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 3187 return rte_flow_error_set(error, ENOTSUP, 3188 RTE_FLOW_ERROR_TYPE_ITEM, item, 3189 "VXLAN tunnel must be fully defined"); 3190 if (!mask) 3191 mask = &rte_flow_item_vxlan_mask; 3192 3193 if (priv->sh->steering_format_version != 3194 MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 || 3195 !udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN) { 3196 /* non-root table */ 3197 if (!root && priv->sh->misc5_cap) 3198 valid_mask = &nic_mask; 3199 /* Group zero in NIC domain */ 3200 if (!root && priv->sh->tunnel_header_0_1) 3201 valid_mask = &nic_mask; 3202 } 3203 ret = mlx5_flow_item_acceptable 3204 (dev, item, (const uint8_t *)mask, 3205 (const uint8_t *)valid_mask, 3206 sizeof(struct rte_flow_item_vxlan), 3207 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3208 if (ret < 0) 3209 return ret; 3210 if (spec) { 3211 memcpy(&id.vni[1], spec->hdr.vni, 3); 3212 memcpy(&id.vni[1], mask->hdr.vni, 3); 3213 } 3214 return 0; 3215 } 3216 3217 /** 3218 * Validate VXLAN_GPE item. 3219 * 3220 * @param[in] item 3221 * Item specification. 3222 * @param[in] item_flags 3223 * Bit-fields that holds the items detected until now. 3224 * @param[in] priv 3225 * Pointer to the private data structure. 3226 * @param[in] target_protocol 3227 * The next protocol in the previous item. 3228 * @param[out] error 3229 * Pointer to error structure. 3230 * 3231 * @return 3232 * 0 on success, a negative errno value otherwise and rte_errno is set. 3233 */ 3234 int 3235 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 3236 uint64_t item_flags, 3237 struct rte_eth_dev *dev, 3238 struct rte_flow_error *error) 3239 { 3240 struct mlx5_priv *priv = dev->data->dev_private; 3241 const struct rte_flow_item_vxlan_gpe *spec = item->spec; 3242 const struct rte_flow_item_vxlan_gpe *mask = item->mask; 3243 int ret; 3244 union vni { 3245 uint32_t vlan_id; 3246 uint8_t vni[4]; 3247 } id = { .vlan_id = 0, }; 3248 3249 struct rte_flow_item_vxlan_gpe nic_mask = { 3250 .vni = { 0xff, 0xff, 0xff }, 3251 .protocol = 0xff, 3252 .flags = 0xff, 3253 }; 3254 3255 if (!priv->sh->config.l3_vxlan_en) 3256 return rte_flow_error_set(error, ENOTSUP, 3257 RTE_FLOW_ERROR_TYPE_ITEM, item, 3258 "L3 VXLAN is not enabled by device" 3259 " parameter and/or not configured in" 3260 " firmware"); 3261 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 3262 return rte_flow_error_set(error, ENOTSUP, 3263 RTE_FLOW_ERROR_TYPE_ITEM, item, 3264 "multiple tunnel layers not" 3265 " supported"); 3266 /* 3267 * Verify only UDPv4 is present as defined in 3268 * https://tools.ietf.org/html/rfc7348 3269 */ 3270 if (!mlx5_hws_active(dev)) { 3271 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 3272 return rte_flow_error_set(error, EINVAL, 3273 RTE_FLOW_ERROR_TYPE_ITEM, 3274 item, "no outer UDP layer found"); 3275 } 3276 if (!mask) 3277 mask = &rte_flow_item_vxlan_gpe_mask; 3278 if (mlx5_hws_active(dev) || 3279 (priv->sh->misc5_cap && priv->sh->tunnel_header_0_1)) { 3280 nic_mask.rsvd0[0] = 0xff; 3281 nic_mask.rsvd0[1] = 0xff; 3282 nic_mask.rsvd1 = 0xff; 3283 } 3284 ret = mlx5_flow_item_acceptable 3285 (dev, item, (const uint8_t *)mask, 3286 (const uint8_t *)&nic_mask, 3287 sizeof(struct rte_flow_item_vxlan_gpe), 3288 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3289 if (ret < 0) 3290 return ret; 3291 if (spec) { 3292 memcpy(&id.vni[1], spec->hdr.vni, 3); 3293 memcpy(&id.vni[1], mask->hdr.vni, 3); 3294 } 3295 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 3296 return rte_flow_error_set(error, ENOTSUP, 3297 RTE_FLOW_ERROR_TYPE_ITEM, item, 3298 "VXLAN-GPE tunnel must be fully" 3299 " defined"); 3300 return 0; 3301 } 3302 /** 3303 * Validate GRE Key item. 3304 * 3305 * @param[in] item 3306 * Item specification. 3307 * @param[in] item_flags 3308 * Bit flags to mark detected items. 3309 * @param[in] gre_item 3310 * Pointer to gre_item 3311 * @param[out] error 3312 * Pointer to error structure. 3313 * 3314 * @return 3315 * 0 on success, a negative errno value otherwise and rte_errno is set. 3316 */ 3317 int 3318 mlx5_flow_validate_item_gre_key(const struct rte_eth_dev *dev, 3319 const struct rte_flow_item *item, 3320 uint64_t item_flags, 3321 const struct rte_flow_item *gre_item, 3322 struct rte_flow_error *error) 3323 { 3324 const rte_be32_t *mask = item->mask; 3325 int ret = 0; 3326 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); 3327 const struct rte_flow_item_gre *gre_spec; 3328 const struct rte_flow_item_gre *gre_mask; 3329 3330 if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) 3331 return rte_flow_error_set(error, ENOTSUP, 3332 RTE_FLOW_ERROR_TYPE_ITEM, item, 3333 "Multiple GRE key not support"); 3334 if (!(item_flags & MLX5_FLOW_LAYER_GRE)) 3335 return rte_flow_error_set(error, ENOTSUP, 3336 RTE_FLOW_ERROR_TYPE_ITEM, item, 3337 "No preceding GRE header"); 3338 if (item_flags & MLX5_FLOW_LAYER_INNER) 3339 return rte_flow_error_set(error, ENOTSUP, 3340 RTE_FLOW_ERROR_TYPE_ITEM, item, 3341 "GRE key following a wrong item"); 3342 gre_mask = gre_item->mask; 3343 if (!gre_mask) 3344 gre_mask = &rte_flow_item_gre_mask; 3345 gre_spec = gre_item->spec; 3346 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && 3347 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) 3348 return rte_flow_error_set(error, EINVAL, 3349 RTE_FLOW_ERROR_TYPE_ITEM, item, 3350 "Key bit must be on"); 3351 3352 if (!mask) 3353 mask = &gre_key_default_mask; 3354 ret = mlx5_flow_item_acceptable 3355 (dev, item, (const uint8_t *)mask, 3356 (const uint8_t *)&gre_key_default_mask, 3357 sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3358 return ret; 3359 } 3360 3361 /** 3362 * Validate GRE optional item. 3363 * 3364 * @param[in] dev 3365 * Pointer to the Ethernet device structure. 3366 * @param[in] item 3367 * Item specification. 3368 * @param[in] item_flags 3369 * Bit flags to mark detected items. 3370 * @param[in] attr 3371 * Flow rule attributes. 3372 * @param[in] gre_item 3373 * Pointer to gre_item 3374 * @param[out] error 3375 * Pointer to error structure. 3376 * 3377 * @return 3378 * 0 on success, a negative errno value otherwise and rte_errno is set. 3379 */ 3380 int 3381 mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev, 3382 const struct rte_flow_item *item, 3383 uint64_t item_flags, 3384 const struct rte_flow_attr *attr, 3385 const struct rte_flow_item *gre_item, 3386 struct rte_flow_error *error) 3387 { 3388 const struct rte_flow_item_gre *gre_spec = gre_item->spec; 3389 const struct rte_flow_item_gre *gre_mask = gre_item->mask; 3390 const struct rte_flow_item_gre_opt *spec = item->spec; 3391 const struct rte_flow_item_gre_opt *mask = item->mask; 3392 struct mlx5_priv *priv = dev->data->dev_private; 3393 int ret = 0; 3394 struct rte_flow_item_gre_opt nic_mask = { 3395 .checksum_rsvd = { 3396 .checksum = RTE_BE16(UINT16_MAX), 3397 .reserved1 = 0x0, 3398 }, 3399 .key = { 3400 .key = RTE_BE32(UINT32_MAX), 3401 }, 3402 .sequence = { 3403 .sequence = RTE_BE32(UINT32_MAX), 3404 }, 3405 }; 3406 3407 if (!(item_flags & MLX5_FLOW_LAYER_GRE)) 3408 return rte_flow_error_set(error, ENOTSUP, 3409 RTE_FLOW_ERROR_TYPE_ITEM, item, 3410 "No preceding GRE header"); 3411 if (item_flags & MLX5_FLOW_LAYER_INNER) 3412 return rte_flow_error_set(error, ENOTSUP, 3413 RTE_FLOW_ERROR_TYPE_ITEM, item, 3414 "GRE option following a wrong item"); 3415 if ((!spec && !mlx5_hws_active(dev)) || !mask) 3416 return rte_flow_error_set(error, EINVAL, 3417 RTE_FLOW_ERROR_TYPE_ITEM, item, 3418 "At least one field gre_option(checksum/key/sequence) must be specified"); 3419 if (!gre_mask) 3420 gre_mask = &rte_flow_item_gre_mask; 3421 if (mask->checksum_rsvd.checksum) 3422 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x8000)) && 3423 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x8000))) 3424 return rte_flow_error_set(error, EINVAL, 3425 RTE_FLOW_ERROR_TYPE_ITEM, 3426 item, 3427 "Checksum bit must be on"); 3428 if (mask->key.key) 3429 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && 3430 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) 3431 return rte_flow_error_set(error, EINVAL, 3432 RTE_FLOW_ERROR_TYPE_ITEM, 3433 item, "Key bit must be on"); 3434 if (mask->sequence.sequence) 3435 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x1000)) && 3436 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x1000))) 3437 return rte_flow_error_set(error, EINVAL, 3438 RTE_FLOW_ERROR_TYPE_ITEM, 3439 item, 3440 "Sequence bit must be on"); 3441 if (!mlx5_hws_active(dev)) { 3442 if (mask->checksum_rsvd.checksum || mask->sequence.sequence) { 3443 if (priv->sh->steering_format_version == 3444 MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 || 3445 ((attr->group || 3446 (attr->transfer && priv->fdb_def_rule)) && 3447 !priv->sh->misc5_cap) || 3448 (!(priv->sh->tunnel_header_0_1 && 3449 priv->sh->tunnel_header_2_3) && 3450 !attr->group && 3451 (!attr->transfer || !priv->fdb_def_rule))) 3452 return rte_flow_error_set 3453 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, 3454 item, "Checksum/Sequence not supported"); 3455 } 3456 } 3457 ret = mlx5_flow_item_acceptable 3458 (dev, item, (const uint8_t *)mask, 3459 (const uint8_t *)&nic_mask, 3460 sizeof(struct rte_flow_item_gre_opt), 3461 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3462 return ret; 3463 } 3464 3465 /** 3466 * Validate GRE item. 3467 * 3468 * @param[in] item 3469 * Item specification. 3470 * @param[in] item_flags 3471 * Bit flags to mark detected items. 3472 * @param[in] target_protocol 3473 * The next protocol in the previous item. 3474 * @param[out] error 3475 * Pointer to error structure. 3476 * 3477 * @return 3478 * 0 on success, a negative errno value otherwise and rte_errno is set. 3479 */ 3480 int 3481 mlx5_flow_validate_item_gre(const struct rte_eth_dev *dev, 3482 const struct rte_flow_item *item, 3483 uint64_t item_flags, 3484 uint8_t target_protocol, 3485 struct rte_flow_error *error) 3486 { 3487 const struct rte_flow_item_gre *spec __rte_unused = item->spec; 3488 const struct rte_flow_item_gre *mask = item->mask; 3489 int ret; 3490 const struct rte_flow_item_gre nic_mask = { 3491 .c_rsvd0_ver = RTE_BE16(0xB000), 3492 .protocol = RTE_BE16(UINT16_MAX), 3493 }; 3494 3495 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 3496 return rte_flow_error_set(error, EINVAL, 3497 RTE_FLOW_ERROR_TYPE_ITEM, item, 3498 "protocol filtering not compatible" 3499 " with this GRE layer"); 3500 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 3501 return rte_flow_error_set(error, ENOTSUP, 3502 RTE_FLOW_ERROR_TYPE_ITEM, item, 3503 "multiple tunnel layers not" 3504 " supported"); 3505 if (!mlx5_hws_active(dev)) { 3506 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 3507 return rte_flow_error_set(error, ENOTSUP, 3508 RTE_FLOW_ERROR_TYPE_ITEM, 3509 item, "L3 Layer is missing"); 3510 } 3511 if (!mask) 3512 mask = &rte_flow_item_gre_mask; 3513 ret = mlx5_flow_item_acceptable 3514 (dev, item, (const uint8_t *)mask, 3515 (const uint8_t *)&nic_mask, 3516 sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED, 3517 error); 3518 if (ret < 0) 3519 return ret; 3520 #ifndef HAVE_MLX5DV_DR 3521 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT 3522 if (spec && (spec->protocol & mask->protocol)) 3523 return rte_flow_error_set(error, ENOTSUP, 3524 RTE_FLOW_ERROR_TYPE_ITEM, item, 3525 "without MPLS support the" 3526 " specification cannot be used for" 3527 " filtering"); 3528 #endif 3529 #endif 3530 return 0; 3531 } 3532 3533 /** 3534 * Validate Geneve item. 3535 * 3536 * @param[in] item 3537 * Item specification. 3538 * @param[in] itemFlags 3539 * Bit-fields that holds the items detected until now. 3540 * @param[in] enPriv 3541 * Pointer to the private data structure. 3542 * @param[out] error 3543 * Pointer to error structure. 3544 * 3545 * @return 3546 * 0 on success, a negative errno value otherwise and rte_errno is set. 3547 */ 3548 3549 int 3550 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, 3551 uint64_t item_flags, 3552 struct rte_eth_dev *dev, 3553 struct rte_flow_error *error) 3554 { 3555 struct mlx5_priv *priv = dev->data->dev_private; 3556 const struct rte_flow_item_geneve *spec = item->spec; 3557 const struct rte_flow_item_geneve *mask = item->mask; 3558 int ret; 3559 uint16_t gbhdr; 3560 uint8_t opt_len = priv->sh->cdev->config.hca_attr.geneve_max_opt_len ? 3561 MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; 3562 const struct rte_flow_item_geneve nic_mask = { 3563 .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), 3564 .vni = { 0xff, 0xff, 0xff }, 3565 .protocol = RTE_BE16(UINT16_MAX), 3566 }; 3567 3568 if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_geneve_rx) 3569 return rte_flow_error_set(error, ENOTSUP, 3570 RTE_FLOW_ERROR_TYPE_ITEM, item, 3571 "L3 Geneve is not enabled by device" 3572 " parameter and/or not configured in" 3573 " firmware"); 3574 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 3575 return rte_flow_error_set(error, ENOTSUP, 3576 RTE_FLOW_ERROR_TYPE_ITEM, item, 3577 "multiple tunnel layers not" 3578 " supported"); 3579 /* 3580 * Verify only UDPv4 is present as defined in 3581 * https://tools.ietf.org/html/rfc7348 3582 */ 3583 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 3584 return rte_flow_error_set(error, EINVAL, 3585 RTE_FLOW_ERROR_TYPE_ITEM, item, 3586 "no outer UDP layer found"); 3587 if (!mask) 3588 mask = &rte_flow_item_geneve_mask; 3589 ret = mlx5_flow_item_acceptable 3590 (dev, item, (const uint8_t *)mask, 3591 (const uint8_t *)&nic_mask, 3592 sizeof(struct rte_flow_item_geneve), 3593 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3594 if (ret) 3595 return ret; 3596 if (spec) { 3597 gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0); 3598 if (MLX5_GENEVE_VER_VAL(gbhdr) || 3599 MLX5_GENEVE_CRITO_VAL(gbhdr) || 3600 MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1) 3601 return rte_flow_error_set(error, ENOTSUP, 3602 RTE_FLOW_ERROR_TYPE_ITEM, 3603 item, 3604 "Geneve protocol unsupported" 3605 " fields are being used"); 3606 if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len) 3607 return rte_flow_error_set 3608 (error, ENOTSUP, 3609 RTE_FLOW_ERROR_TYPE_ITEM, 3610 item, 3611 "Unsupported Geneve options length"); 3612 } 3613 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 3614 return rte_flow_error_set 3615 (error, ENOTSUP, 3616 RTE_FLOW_ERROR_TYPE_ITEM, item, 3617 "Geneve tunnel must be fully defined"); 3618 return 0; 3619 } 3620 3621 /** 3622 * Validate Geneve TLV option item. 3623 * 3624 * @param[in] item 3625 * Item specification. 3626 * @param[in] last_item 3627 * Previous validated item in the pattern items. 3628 * @param[in] geneve_item 3629 * Previous GENEVE item specification. 3630 * @param[in] dev 3631 * Pointer to the rte_eth_dev structure. 3632 * @param[out] error 3633 * Pointer to error structure. 3634 * 3635 * @return 3636 * 0 on success, a negative errno value otherwise and rte_errno is set. 3637 */ 3638 int 3639 mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, 3640 uint64_t last_item, 3641 const struct rte_flow_item *geneve_item, 3642 struct rte_eth_dev *dev, 3643 struct rte_flow_error *error) 3644 { 3645 struct mlx5_priv *priv = dev->data->dev_private; 3646 struct mlx5_dev_ctx_shared *sh = priv->sh; 3647 struct mlx5_geneve_tlv_option_resource *geneve_opt_resource; 3648 struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr; 3649 uint8_t data_max_supported = 3650 hca_attr->max_geneve_tlv_option_data_len * 4; 3651 const struct rte_flow_item_geneve *geneve_spec; 3652 const struct rte_flow_item_geneve *geneve_mask; 3653 const struct rte_flow_item_geneve_opt *spec = item->spec; 3654 const struct rte_flow_item_geneve_opt *mask = item->mask; 3655 unsigned int i; 3656 unsigned int data_len; 3657 uint8_t tlv_option_len; 3658 uint16_t optlen_m, optlen_v; 3659 const struct rte_flow_item_geneve_opt full_mask = { 3660 .option_class = RTE_BE16(0xffff), 3661 .option_type = 0xff, 3662 .option_len = 0x1f, 3663 }; 3664 3665 if (!mask) 3666 mask = &rte_flow_item_geneve_opt_mask; 3667 if (!spec) 3668 return rte_flow_error_set 3669 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3670 "Geneve TLV opt class/type/length must be specified"); 3671 if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK) 3672 return rte_flow_error_set 3673 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3674 "Geneve TLV opt length exceeds the limit (31)"); 3675 /* Check if class type and length masks are full. */ 3676 if (full_mask.option_class != mask->option_class || 3677 full_mask.option_type != mask->option_type || 3678 full_mask.option_len != (mask->option_len & full_mask.option_len)) 3679 return rte_flow_error_set 3680 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3681 "Geneve TLV opt class/type/length masks must be full"); 3682 /* Check if length is supported */ 3683 if ((uint32_t)spec->option_len > 3684 hca_attr->max_geneve_tlv_option_data_len) 3685 return rte_flow_error_set 3686 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3687 "Geneve TLV opt length not supported"); 3688 if (hca_attr->max_geneve_tlv_options > 1) 3689 DRV_LOG(DEBUG, 3690 "max_geneve_tlv_options supports more than 1 option"); 3691 /* Check GENEVE item preceding. */ 3692 if (!geneve_item || !(last_item & MLX5_FLOW_LAYER_GENEVE)) 3693 return rte_flow_error_set 3694 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3695 "Geneve opt item must be preceded with Geneve item"); 3696 geneve_spec = geneve_item->spec; 3697 geneve_mask = geneve_item->mask ? geneve_item->mask : 3698 &rte_flow_item_geneve_mask; 3699 /* Check if GENEVE TLV option size doesn't exceed option length */ 3700 if (geneve_spec && (geneve_mask->ver_opt_len_o_c_rsvd0 || 3701 geneve_spec->ver_opt_len_o_c_rsvd0)) { 3702 tlv_option_len = spec->option_len & mask->option_len; 3703 optlen_v = rte_be_to_cpu_16(geneve_spec->ver_opt_len_o_c_rsvd0); 3704 optlen_v = MLX5_GENEVE_OPTLEN_VAL(optlen_v); 3705 optlen_m = rte_be_to_cpu_16(geneve_mask->ver_opt_len_o_c_rsvd0); 3706 optlen_m = MLX5_GENEVE_OPTLEN_VAL(optlen_m); 3707 if ((optlen_v & optlen_m) <= tlv_option_len) 3708 return rte_flow_error_set 3709 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3710 "GENEVE TLV option length exceeds optlen"); 3711 } 3712 /* Check if length is 0 or data is 0. */ 3713 if (spec->data == NULL || spec->option_len == 0) 3714 return rte_flow_error_set 3715 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3716 "Geneve TLV opt with zero data/length not supported"); 3717 /* Check not all data & mask are 0. */ 3718 data_len = spec->option_len * 4; 3719 if (mask->data == NULL) { 3720 for (i = 0; i < data_len; i++) 3721 if (spec->data[i]) 3722 break; 3723 if (i == data_len) 3724 return rte_flow_error_set(error, ENOTSUP, 3725 RTE_FLOW_ERROR_TYPE_ITEM, item, 3726 "Can't match on Geneve option data 0"); 3727 } else { 3728 for (i = 0; i < data_len; i++) 3729 if (spec->data[i] & mask->data[i]) 3730 break; 3731 if (i == data_len) 3732 return rte_flow_error_set(error, ENOTSUP, 3733 RTE_FLOW_ERROR_TYPE_ITEM, item, 3734 "Can't match on Geneve option data and mask 0"); 3735 /* Check data mask supported. */ 3736 for (i = data_max_supported; i < data_len ; i++) 3737 if (mask->data[i]) 3738 return rte_flow_error_set(error, ENOTSUP, 3739 RTE_FLOW_ERROR_TYPE_ITEM, item, 3740 "Data mask is of unsupported size"); 3741 } 3742 /* Check GENEVE option is supported in NIC. */ 3743 if (!hca_attr->geneve_tlv_opt) 3744 return rte_flow_error_set 3745 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3746 "Geneve TLV opt not supported"); 3747 /* Check if we already have geneve option with different type/class. */ 3748 rte_spinlock_lock(&sh->geneve_tlv_opt_sl); 3749 geneve_opt_resource = sh->geneve_tlv_option_resource; 3750 if (geneve_opt_resource != NULL) 3751 if (geneve_opt_resource->option_class != spec->option_class || 3752 geneve_opt_resource->option_type != spec->option_type || 3753 geneve_opt_resource->length != spec->option_len) { 3754 rte_spinlock_unlock(&sh->geneve_tlv_opt_sl); 3755 return rte_flow_error_set(error, ENOTSUP, 3756 RTE_FLOW_ERROR_TYPE_ITEM, item, 3757 "Only one Geneve TLV option supported"); 3758 } 3759 rte_spinlock_unlock(&sh->geneve_tlv_opt_sl); 3760 return 0; 3761 } 3762 3763 /** 3764 * Validate MPLS item. 3765 * 3766 * @param[in] dev 3767 * Pointer to the rte_eth_dev structure. 3768 * @param[in] item 3769 * Item specification. 3770 * @param[in] item_flags 3771 * Bit-fields that holds the items detected until now. 3772 * @param[in] prev_layer 3773 * The protocol layer indicated in previous item. 3774 * @param[out] error 3775 * Pointer to error structure. 3776 * 3777 * @return 3778 * 0 on success, a negative errno value otherwise and rte_errno is set. 3779 */ 3780 int 3781 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, 3782 const struct rte_flow_item *item __rte_unused, 3783 uint64_t item_flags __rte_unused, 3784 uint64_t prev_layer __rte_unused, 3785 struct rte_flow_error *error) 3786 { 3787 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 3788 const struct rte_flow_item_mpls hws_nic_mask = { 3789 .label_tc_s = {0xff, 0xff, 0xff}, 3790 .ttl = 0xff 3791 }; 3792 const struct rte_flow_item_mpls *nic_mask = !mlx5_hws_active(dev) ? 3793 &rte_flow_item_mpls_mask : &hws_nic_mask; 3794 const struct rte_flow_item_mpls *mask = item->mask; 3795 struct mlx5_priv *priv = dev->data->dev_private; 3796 int ret; 3797 3798 if (!mlx5_hws_active(dev)) { 3799 /* MPLS has HW support in HWS */ 3800 if (!priv->sh->dev_cap.mpls_en) 3801 return rte_flow_error_set(error, ENOTSUP, 3802 RTE_FLOW_ERROR_TYPE_ITEM, 3803 item, "MPLS not supported or disabled in firmware configuration."); 3804 /* MPLS over UDP, GRE is allowed */ 3805 if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L4_UDP | 3806 MLX5_FLOW_LAYER_GRE | 3807 MLX5_FLOW_LAYER_GRE_KEY))) 3808 return rte_flow_error_set(error, EINVAL, 3809 RTE_FLOW_ERROR_TYPE_ITEM, 3810 item, "protocol filtering not compatible with MPLS layer"); 3811 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ 3812 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && 3813 !(item_flags & MLX5_FLOW_LAYER_GRE)) 3814 return rte_flow_error_set(error, ENOTSUP, 3815 RTE_FLOW_ERROR_TYPE_ITEM, item, 3816 "multiple tunnel layers not supported"); 3817 } else { 3818 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ 3819 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && 3820 !(item_flags & MLX5_FLOW_LAYER_MPLS)) 3821 return rte_flow_error_set(error, ENOTSUP, 3822 RTE_FLOW_ERROR_TYPE_ITEM, item, 3823 "multiple tunnel layers not supported"); 3824 } 3825 if (!mask) 3826 mask = nic_mask; 3827 ret = mlx5_flow_item_acceptable 3828 (dev, item, (const uint8_t *)mask, 3829 (const uint8_t *)nic_mask, 3830 sizeof(struct rte_flow_item_mpls), 3831 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3832 if (ret < 0) 3833 return ret; 3834 return 0; 3835 #else 3836 return rte_flow_error_set(error, ENOTSUP, 3837 RTE_FLOW_ERROR_TYPE_ITEM, item, 3838 "MPLS is not supported by Verbs, please" 3839 " update."); 3840 #endif 3841 } 3842 3843 /** 3844 * Validate NVGRE item. 3845 * 3846 * @param[in] item 3847 * Item specification. 3848 * @param[in] item_flags 3849 * Bit flags to mark detected items. 3850 * @param[in] target_protocol 3851 * The next protocol in the previous item. 3852 * @param[out] error 3853 * Pointer to error structure. 3854 * 3855 * @return 3856 * 0 on success, a negative errno value otherwise and rte_errno is set. 3857 */ 3858 int 3859 mlx5_flow_validate_item_nvgre(const struct rte_eth_dev *dev, 3860 const struct rte_flow_item *item, 3861 uint64_t item_flags, 3862 uint8_t target_protocol, 3863 struct rte_flow_error *error) 3864 { 3865 const struct rte_flow_item_nvgre *mask = item->mask; 3866 int ret; 3867 3868 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 3869 return rte_flow_error_set(error, EINVAL, 3870 RTE_FLOW_ERROR_TYPE_ITEM, item, 3871 "protocol filtering not compatible" 3872 " with this GRE layer"); 3873 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 3874 return rte_flow_error_set(error, ENOTSUP, 3875 RTE_FLOW_ERROR_TYPE_ITEM, item, 3876 "multiple tunnel layers not" 3877 " supported"); 3878 if (!mlx5_hws_active(dev)) { 3879 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 3880 return rte_flow_error_set(error, ENOTSUP, 3881 RTE_FLOW_ERROR_TYPE_ITEM, 3882 item, "L3 Layer is missing"); 3883 } 3884 if (!mask) 3885 mask = &rte_flow_item_nvgre_mask; 3886 ret = mlx5_flow_item_acceptable 3887 (dev, item, (const uint8_t *)mask, 3888 (const uint8_t *)&rte_flow_item_nvgre_mask, 3889 sizeof(struct rte_flow_item_nvgre), 3890 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3891 if (ret < 0) 3892 return ret; 3893 return 0; 3894 } 3895 3896 /** 3897 * Validate eCPRI item. 3898 * 3899 * @param[in] item 3900 * Item specification. 3901 * @param[in] item_flags 3902 * Bit-fields that holds the items detected until now. 3903 * @param[in] last_item 3904 * Previous validated item in the pattern items. 3905 * @param[in] ether_type 3906 * Type in the ethernet layer header (including dot1q). 3907 * @param[in] acc_mask 3908 * Acceptable mask, if NULL default internal default mask 3909 * will be used to check whether item fields are supported. 3910 * @param[out] error 3911 * Pointer to error structure. 3912 * 3913 * @return 3914 * 0 on success, a negative errno value otherwise and rte_errno is set. 3915 */ 3916 int 3917 mlx5_flow_validate_item_ecpri(const struct rte_eth_dev *dev, 3918 const struct rte_flow_item *item, 3919 uint64_t item_flags, 3920 uint64_t last_item, 3921 uint16_t ether_type, 3922 const struct rte_flow_item_ecpri *acc_mask, 3923 struct rte_flow_error *error) 3924 { 3925 const struct rte_flow_item_ecpri *mask = item->mask; 3926 const struct rte_flow_item_ecpri nic_mask = { 3927 .hdr = { 3928 .common = { 3929 .u32 = 3930 RTE_BE32(((const struct rte_ecpri_common_hdr) { 3931 .type = 0xFF, 3932 }).u32), 3933 }, 3934 .dummy[0] = 0xFFFFFFFF, 3935 }, 3936 }; 3937 const uint64_t outer_l2_vlan = (MLX5_FLOW_LAYER_OUTER_L2 | 3938 MLX5_FLOW_LAYER_OUTER_VLAN); 3939 struct rte_flow_item_ecpri mask_lo; 3940 3941 if (!(last_item & outer_l2_vlan) && 3942 last_item != MLX5_FLOW_LAYER_OUTER_L4_UDP) 3943 return rte_flow_error_set(error, EINVAL, 3944 RTE_FLOW_ERROR_TYPE_ITEM, item, 3945 "eCPRI can only follow L2/VLAN layer or UDP layer"); 3946 if ((last_item & outer_l2_vlan) && ether_type && 3947 ether_type != RTE_ETHER_TYPE_ECPRI) 3948 return rte_flow_error_set(error, EINVAL, 3949 RTE_FLOW_ERROR_TYPE_ITEM, item, 3950 "eCPRI cannot follow L2/VLAN layer which ether type is not 0xAEFE"); 3951 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 3952 return rte_flow_error_set(error, EINVAL, 3953 RTE_FLOW_ERROR_TYPE_ITEM, item, 3954 "eCPRI with tunnel is not supported right now"); 3955 if (item_flags & MLX5_FLOW_LAYER_OUTER_L3) 3956 return rte_flow_error_set(error, ENOTSUP, 3957 RTE_FLOW_ERROR_TYPE_ITEM, item, 3958 "multiple L3 layers not supported"); 3959 else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP) 3960 return rte_flow_error_set(error, EINVAL, 3961 RTE_FLOW_ERROR_TYPE_ITEM, item, 3962 "eCPRI cannot coexist with a TCP layer"); 3963 /* In specification, eCPRI could be over UDP layer. */ 3964 else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP) 3965 return rte_flow_error_set(error, EINVAL, 3966 RTE_FLOW_ERROR_TYPE_ITEM, item, 3967 "eCPRI over UDP layer is not yet supported right now"); 3968 /* Mask for type field in common header could be zero. */ 3969 if (!mask) 3970 mask = &rte_flow_item_ecpri_mask; 3971 mask_lo.hdr.common.u32 = rte_be_to_cpu_32(mask->hdr.common.u32); 3972 /* Input mask is in big-endian format. */ 3973 if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff) 3974 return rte_flow_error_set(error, EINVAL, 3975 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 3976 "partial mask is not supported for protocol"); 3977 else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0) 3978 return rte_flow_error_set(error, EINVAL, 3979 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 3980 "message header mask must be after a type mask"); 3981 return mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask, 3982 acc_mask ? (const uint8_t *)acc_mask 3983 : (const uint8_t *)&nic_mask, 3984 sizeof(struct rte_flow_item_ecpri), 3985 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3986 } 3987 3988 /** 3989 * Validate the NSH item. 3990 * 3991 * @param[in] dev 3992 * Pointer to Ethernet device on which flow rule is being created on. 3993 * @param[out] error 3994 * Pointer to error structure. 3995 * 3996 * @return 3997 * 0 on success, a negative errno value otherwise and rte_errno is set. 3998 */ 3999 int 4000 mlx5_flow_validate_item_nsh(struct rte_eth_dev *dev, 4001 const struct rte_flow_item *item, 4002 struct rte_flow_error *error) 4003 { 4004 struct mlx5_priv *priv = dev->data->dev_private; 4005 4006 if (item->mask) { 4007 return rte_flow_error_set(error, ENOTSUP, 4008 RTE_FLOW_ERROR_TYPE_ITEM, item, 4009 "NSH fields matching is not supported"); 4010 } 4011 4012 if (!priv->sh->config.dv_flow_en) { 4013 return rte_flow_error_set(error, ENOTSUP, 4014 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4015 NULL, "NSH support requires DV flow interface"); 4016 } 4017 4018 if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_vxlan_gpe_nsh) { 4019 return rte_flow_error_set(error, ENOTSUP, 4020 RTE_FLOW_ERROR_TYPE_ITEM, item, 4021 "Current FW does not support matching on NSH"); 4022 } 4023 4024 return 0; 4025 } 4026 4027 static int 4028 flow_null_validate(struct rte_eth_dev *dev __rte_unused, 4029 const struct rte_flow_attr *attr __rte_unused, 4030 const struct rte_flow_item items[] __rte_unused, 4031 const struct rte_flow_action actions[] __rte_unused, 4032 bool external __rte_unused, 4033 int hairpin __rte_unused, 4034 struct rte_flow_error *error) 4035 { 4036 return rte_flow_error_set(error, ENOTSUP, 4037 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 4038 } 4039 4040 static struct mlx5_flow * 4041 flow_null_prepare(struct rte_eth_dev *dev __rte_unused, 4042 const struct rte_flow_attr *attr __rte_unused, 4043 const struct rte_flow_item items[] __rte_unused, 4044 const struct rte_flow_action actions[] __rte_unused, 4045 struct rte_flow_error *error) 4046 { 4047 rte_flow_error_set(error, ENOTSUP, 4048 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 4049 return NULL; 4050 } 4051 4052 static int 4053 flow_null_translate(struct rte_eth_dev *dev __rte_unused, 4054 struct mlx5_flow *dev_flow __rte_unused, 4055 const struct rte_flow_attr *attr __rte_unused, 4056 const struct rte_flow_item items[] __rte_unused, 4057 const struct rte_flow_action actions[] __rte_unused, 4058 struct rte_flow_error *error) 4059 { 4060 return rte_flow_error_set(error, ENOTSUP, 4061 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 4062 } 4063 4064 static int 4065 flow_null_apply(struct rte_eth_dev *dev __rte_unused, 4066 struct rte_flow *flow __rte_unused, 4067 struct rte_flow_error *error) 4068 { 4069 return rte_flow_error_set(error, ENOTSUP, 4070 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 4071 } 4072 4073 static void 4074 flow_null_remove(struct rte_eth_dev *dev __rte_unused, 4075 struct rte_flow *flow __rte_unused) 4076 { 4077 } 4078 4079 static void 4080 flow_null_destroy(struct rte_eth_dev *dev __rte_unused, 4081 struct rte_flow *flow __rte_unused) 4082 { 4083 } 4084 4085 static int 4086 flow_null_query(struct rte_eth_dev *dev __rte_unused, 4087 struct rte_flow *flow __rte_unused, 4088 const struct rte_flow_action *actions __rte_unused, 4089 void *data __rte_unused, 4090 struct rte_flow_error *error) 4091 { 4092 return rte_flow_error_set(error, ENOTSUP, 4093 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 4094 } 4095 4096 static int 4097 flow_null_sync_domain(struct rte_eth_dev *dev __rte_unused, 4098 uint32_t domains __rte_unused, 4099 uint32_t flags __rte_unused) 4100 { 4101 return 0; 4102 } 4103 4104 int 4105 flow_null_get_aged_flows(struct rte_eth_dev *dev, 4106 void **context __rte_unused, 4107 uint32_t nb_contexts __rte_unused, 4108 struct rte_flow_error *error __rte_unused) 4109 { 4110 DRV_LOG(ERR, "port %u get aged flows is not supported.", 4111 dev->data->port_id); 4112 return -ENOTSUP; 4113 } 4114 4115 uint32_t 4116 flow_null_counter_allocate(struct rte_eth_dev *dev) 4117 { 4118 DRV_LOG(ERR, "port %u counter allocate is not supported.", 4119 dev->data->port_id); 4120 return 0; 4121 } 4122 4123 void 4124 flow_null_counter_free(struct rte_eth_dev *dev, 4125 uint32_t counter __rte_unused) 4126 { 4127 DRV_LOG(ERR, "port %u counter free is not supported.", 4128 dev->data->port_id); 4129 } 4130 4131 int 4132 flow_null_counter_query(struct rte_eth_dev *dev, 4133 uint32_t counter __rte_unused, 4134 bool clear __rte_unused, 4135 uint64_t *pkts __rte_unused, 4136 uint64_t *bytes __rte_unused, 4137 void **action __rte_unused) 4138 { 4139 DRV_LOG(ERR, "port %u counter query is not supported.", 4140 dev->data->port_id); 4141 return -ENOTSUP; 4142 } 4143 4144 /* Void driver to protect from null pointer reference. */ 4145 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { 4146 .validate = flow_null_validate, 4147 .prepare = flow_null_prepare, 4148 .translate = flow_null_translate, 4149 .apply = flow_null_apply, 4150 .remove = flow_null_remove, 4151 .destroy = flow_null_destroy, 4152 .query = flow_null_query, 4153 .sync_domain = flow_null_sync_domain, 4154 .get_aged_flows = flow_null_get_aged_flows, 4155 .counter_alloc = flow_null_counter_allocate, 4156 .counter_free = flow_null_counter_free, 4157 .counter_query = flow_null_counter_query 4158 }; 4159 4160 /** 4161 * Select flow driver type according to flow attributes and device 4162 * configuration. 4163 * 4164 * @param[in] dev 4165 * Pointer to the dev structure. 4166 * @param[in] attr 4167 * Pointer to the flow attributes. 4168 * 4169 * @return 4170 * flow driver type, MLX5_FLOW_TYPE_MAX otherwise. 4171 */ 4172 static enum mlx5_flow_drv_type 4173 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) 4174 { 4175 struct mlx5_priv *priv = dev->data->dev_private; 4176 /* The OS can determine first a specific flow type (DV, VERBS) */ 4177 enum mlx5_flow_drv_type type = mlx5_flow_os_get_type(); 4178 4179 if (type != MLX5_FLOW_TYPE_MAX) 4180 return type; 4181 /* 4182 * Currently when dv_flow_en == 2, only HW steering engine is 4183 * supported. New engines can also be chosen here if ready. 4184 */ 4185 if (priv->sh->config.dv_flow_en == 2) 4186 return MLX5_FLOW_TYPE_HW; 4187 if (!attr) 4188 return MLX5_FLOW_TYPE_MIN; 4189 /* If no OS specific type - continue with DV/VERBS selection */ 4190 if (attr->transfer && priv->sh->config.dv_esw_en) 4191 type = MLX5_FLOW_TYPE_DV; 4192 if (!attr->transfer) 4193 type = priv->sh->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : 4194 MLX5_FLOW_TYPE_VERBS; 4195 return type; 4196 } 4197 4198 #define flow_get_drv_ops(type) flow_drv_ops[type] 4199 4200 /** 4201 * Flow driver validation API. This abstracts calling driver specific functions. 4202 * The type of flow driver is determined according to flow attributes. 4203 * 4204 * @param[in] dev 4205 * Pointer to the dev structure. 4206 * @param[in] attr 4207 * Pointer to the flow attributes. 4208 * @param[in] items 4209 * Pointer to the list of items. 4210 * @param[in] actions 4211 * Pointer to the list of actions. 4212 * @param[in] external 4213 * This flow rule is created by request external to PMD. 4214 * @param[in] hairpin 4215 * Number of hairpin TX actions, 0 means classic flow. 4216 * @param[out] error 4217 * Pointer to the error structure. 4218 * 4219 * @return 4220 * 0 on success, a negative errno value otherwise and rte_errno is set. 4221 */ 4222 static inline int 4223 flow_drv_validate(struct rte_eth_dev *dev, 4224 const struct rte_flow_attr *attr, 4225 const struct rte_flow_item items[], 4226 const struct rte_flow_action actions[], 4227 bool external, int hairpin, struct rte_flow_error *error) 4228 { 4229 const struct mlx5_flow_driver_ops *fops; 4230 enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); 4231 4232 fops = flow_get_drv_ops(type); 4233 return fops->validate(dev, attr, items, actions, external, 4234 hairpin, error); 4235 } 4236 4237 /** 4238 * Flow driver preparation API. This abstracts calling driver specific 4239 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 4240 * calculates the size of memory required for device flow, allocates the memory, 4241 * initializes the device flow and returns the pointer. 4242 * 4243 * @note 4244 * This function initializes device flow structure such as dv or verbs in 4245 * struct mlx5_flow. However, it is caller's responsibility to initialize the 4246 * rest. For example, adding returning device flow to flow->dev_flow list and 4247 * setting backward reference to the flow should be done out of this function. 4248 * layers field is not filled either. 4249 * 4250 * @param[in] dev 4251 * Pointer to the dev structure. 4252 * @param[in] attr 4253 * Pointer to the flow attributes. 4254 * @param[in] items 4255 * Pointer to the list of items. 4256 * @param[in] actions 4257 * Pointer to the list of actions. 4258 * @param[in] flow_idx 4259 * This memory pool index to the flow. 4260 * @param[out] error 4261 * Pointer to the error structure. 4262 * 4263 * @return 4264 * Pointer to device flow on success, otherwise NULL and rte_errno is set. 4265 */ 4266 static inline struct mlx5_flow * 4267 flow_drv_prepare(struct rte_eth_dev *dev, 4268 const struct rte_flow *flow, 4269 const struct rte_flow_attr *attr, 4270 const struct rte_flow_item items[], 4271 const struct rte_flow_action actions[], 4272 uint32_t flow_idx, 4273 struct rte_flow_error *error) 4274 { 4275 const struct mlx5_flow_driver_ops *fops; 4276 enum mlx5_flow_drv_type type = flow->drv_type; 4277 struct mlx5_flow *mlx5_flow = NULL; 4278 4279 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 4280 fops = flow_get_drv_ops(type); 4281 mlx5_flow = fops->prepare(dev, attr, items, actions, error); 4282 if (mlx5_flow) 4283 mlx5_flow->flow_idx = flow_idx; 4284 return mlx5_flow; 4285 } 4286 4287 /** 4288 * Flow driver translation API. This abstracts calling driver specific 4289 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 4290 * translates a generic flow into a driver flow. flow_drv_prepare() must 4291 * precede. 4292 * 4293 * @note 4294 * dev_flow->layers could be filled as a result of parsing during translation 4295 * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled 4296 * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, 4297 * flow->actions could be overwritten even though all the expanded dev_flows 4298 * have the same actions. 4299 * 4300 * @param[in] dev 4301 * Pointer to the rte dev structure. 4302 * @param[in, out] dev_flow 4303 * Pointer to the mlx5 flow. 4304 * @param[in] attr 4305 * Pointer to the flow attributes. 4306 * @param[in] items 4307 * Pointer to the list of items. 4308 * @param[in] actions 4309 * Pointer to the list of actions. 4310 * @param[out] error 4311 * Pointer to the error structure. 4312 * 4313 * @return 4314 * 0 on success, a negative errno value otherwise and rte_errno is set. 4315 */ 4316 static inline int 4317 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, 4318 const struct rte_flow_attr *attr, 4319 const struct rte_flow_item items[], 4320 const struct rte_flow_action actions[], 4321 struct rte_flow_error *error) 4322 { 4323 const struct mlx5_flow_driver_ops *fops; 4324 enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; 4325 4326 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 4327 fops = flow_get_drv_ops(type); 4328 return fops->translate(dev, dev_flow, attr, items, actions, error); 4329 } 4330 4331 /** 4332 * Flow driver apply API. This abstracts calling driver specific functions. 4333 * Parent flow (rte_flow) should have driver type (drv_type). It applies 4334 * translated driver flows on to device. flow_drv_translate() must precede. 4335 * 4336 * @param[in] dev 4337 * Pointer to Ethernet device structure. 4338 * @param[in, out] flow 4339 * Pointer to flow structure. 4340 * @param[out] error 4341 * Pointer to error structure. 4342 * 4343 * @return 4344 * 0 on success, a negative errno value otherwise and rte_errno is set. 4345 */ 4346 static inline int 4347 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 4348 struct rte_flow_error *error) 4349 { 4350 const struct mlx5_flow_driver_ops *fops; 4351 enum mlx5_flow_drv_type type = flow->drv_type; 4352 4353 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 4354 fops = flow_get_drv_ops(type); 4355 return fops->apply(dev, flow, error); 4356 } 4357 4358 /** 4359 * Flow driver destroy API. This abstracts calling driver specific functions. 4360 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 4361 * on device and releases resources of the flow. 4362 * 4363 * @param[in] dev 4364 * Pointer to Ethernet device. 4365 * @param[in, out] flow 4366 * Pointer to flow structure. 4367 */ 4368 static inline void 4369 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 4370 { 4371 const struct mlx5_flow_driver_ops *fops; 4372 enum mlx5_flow_drv_type type = flow->drv_type; 4373 4374 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 4375 fops = flow_get_drv_ops(type); 4376 fops->destroy(dev, flow); 4377 } 4378 4379 /** 4380 * Flow driver find RSS policy tbl API. This abstracts calling driver 4381 * specific functions. Parent flow (rte_flow) should have driver 4382 * type (drv_type). It will find the RSS policy table that has the rss_desc. 4383 * 4384 * @param[in] dev 4385 * Pointer to Ethernet device. 4386 * @param[in, out] flow 4387 * Pointer to flow structure. 4388 * @param[in] policy 4389 * Pointer to meter policy table. 4390 * @param[in] rss_desc 4391 * Pointer to rss_desc 4392 */ 4393 static struct mlx5_flow_meter_sub_policy * 4394 flow_drv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev, 4395 struct rte_flow *flow, 4396 struct mlx5_flow_meter_policy *policy, 4397 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]) 4398 { 4399 const struct mlx5_flow_driver_ops *fops; 4400 enum mlx5_flow_drv_type type = flow->drv_type; 4401 4402 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 4403 fops = flow_get_drv_ops(type); 4404 return fops->meter_sub_policy_rss_prepare(dev, policy, rss_desc); 4405 } 4406 4407 /** 4408 * Flow driver color tag rule API. This abstracts calling driver 4409 * specific functions. Parent flow (rte_flow) should have driver 4410 * type (drv_type). It will create the color tag rules in hierarchy meter. 4411 * 4412 * @param[in] dev 4413 * Pointer to Ethernet device. 4414 * @param[in, out] flow 4415 * Pointer to flow structure. 4416 * @param[in] fm 4417 * Pointer to flow meter structure. 4418 * @param[in] src_port 4419 * The src port this extra rule should use. 4420 * @param[in] item 4421 * The src port id match item. 4422 * @param[out] error 4423 * Pointer to error structure. 4424 */ 4425 static int 4426 flow_drv_mtr_hierarchy_rule_create(struct rte_eth_dev *dev, 4427 struct rte_flow *flow, 4428 struct mlx5_flow_meter_info *fm, 4429 int32_t src_port, 4430 const struct rte_flow_item *item, 4431 struct rte_flow_error *error) 4432 { 4433 const struct mlx5_flow_driver_ops *fops; 4434 enum mlx5_flow_drv_type type = flow->drv_type; 4435 4436 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 4437 fops = flow_get_drv_ops(type); 4438 return fops->meter_hierarchy_rule_create(dev, fm, 4439 src_port, item, error); 4440 } 4441 4442 /** 4443 * Get RSS action from the action list. 4444 * 4445 * @param[in] dev 4446 * Pointer to Ethernet device. 4447 * @param[in] actions 4448 * Pointer to the list of actions. 4449 * @param[in] flow 4450 * Parent flow structure pointer. 4451 * 4452 * @return 4453 * Pointer to the RSS action if exist, else return NULL. 4454 */ 4455 static const struct rte_flow_action_rss* 4456 flow_get_rss_action(struct rte_eth_dev *dev, 4457 const struct rte_flow_action actions[]) 4458 { 4459 struct mlx5_priv *priv = dev->data->dev_private; 4460 const struct rte_flow_action_rss *rss = NULL; 4461 struct mlx5_meter_policy_action_container *acg; 4462 struct mlx5_meter_policy_action_container *acy; 4463 4464 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 4465 switch (actions->type) { 4466 case RTE_FLOW_ACTION_TYPE_RSS: 4467 rss = actions->conf; 4468 break; 4469 case RTE_FLOW_ACTION_TYPE_SAMPLE: 4470 { 4471 const struct rte_flow_action_sample *sample = 4472 actions->conf; 4473 const struct rte_flow_action *act = sample->actions; 4474 for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) 4475 if (act->type == RTE_FLOW_ACTION_TYPE_RSS) 4476 rss = act->conf; 4477 break; 4478 } 4479 case RTE_FLOW_ACTION_TYPE_METER: 4480 { 4481 uint32_t mtr_idx; 4482 struct mlx5_flow_meter_info *fm; 4483 struct mlx5_flow_meter_policy *policy; 4484 const struct rte_flow_action_meter *mtr = actions->conf; 4485 4486 fm = mlx5_flow_meter_find(priv, mtr->mtr_id, &mtr_idx); 4487 if (fm && !fm->def_policy) { 4488 policy = mlx5_flow_meter_policy_find(dev, 4489 fm->policy_id, NULL); 4490 MLX5_ASSERT(policy); 4491 if (policy->is_hierarchy) { 4492 policy = 4493 mlx5_flow_meter_hierarchy_get_final_policy(dev, 4494 policy); 4495 if (!policy) 4496 return NULL; 4497 } 4498 if (policy->is_rss) { 4499 acg = 4500 &policy->act_cnt[RTE_COLOR_GREEN]; 4501 acy = 4502 &policy->act_cnt[RTE_COLOR_YELLOW]; 4503 if (acg->fate_action == 4504 MLX5_FLOW_FATE_SHARED_RSS) 4505 rss = acg->rss->conf; 4506 else if (acy->fate_action == 4507 MLX5_FLOW_FATE_SHARED_RSS) 4508 rss = acy->rss->conf; 4509 } 4510 } 4511 break; 4512 } 4513 default: 4514 break; 4515 } 4516 } 4517 return rss; 4518 } 4519 4520 /** 4521 * Get ASO age action by index. 4522 * 4523 * @param[in] dev 4524 * Pointer to the Ethernet device structure. 4525 * @param[in] age_idx 4526 * Index to the ASO age action. 4527 * 4528 * @return 4529 * The specified ASO age action. 4530 */ 4531 struct mlx5_aso_age_action* 4532 flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx) 4533 { 4534 uint16_t pool_idx = age_idx & UINT16_MAX; 4535 uint16_t offset = (age_idx >> 16) & UINT16_MAX; 4536 struct mlx5_priv *priv = dev->data->dev_private; 4537 struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; 4538 struct mlx5_aso_age_pool *pool; 4539 4540 rte_rwlock_read_lock(&mng->resize_rwl); 4541 pool = mng->pools[pool_idx]; 4542 rte_rwlock_read_unlock(&mng->resize_rwl); 4543 return &pool->actions[offset - 1]; 4544 } 4545 4546 /* maps indirect action to translated direct in some actions array */ 4547 struct mlx5_translated_action_handle { 4548 struct rte_flow_action_handle *action; /**< Indirect action handle. */ 4549 int index; /**< Index in related array of rte_flow_action. */ 4550 }; 4551 4552 /** 4553 * Translates actions of type RTE_FLOW_ACTION_TYPE_INDIRECT to related 4554 * direct action if translation possible. 4555 * This functionality used to run same execution path for both direct and 4556 * indirect actions on flow create. All necessary preparations for indirect 4557 * action handling should be performed on *handle* actions list returned 4558 * from this call. 4559 * 4560 * @param[in] dev 4561 * Pointer to Ethernet device. 4562 * @param[in] actions 4563 * List of actions to translate. 4564 * @param[out] handle 4565 * List to store translated indirect action object handles. 4566 * @param[in, out] indir_n 4567 * Size of *handle* array. On return should be updated with number of 4568 * indirect actions retrieved from the *actions* list. 4569 * @param[out] translated_actions 4570 * List of actions where all indirect actions were translated to direct 4571 * if possible. NULL if no translation took place. 4572 * @param[out] error 4573 * Pointer to the error structure. 4574 * 4575 * @return 4576 * 0 on success, a negative errno value otherwise and rte_errno is set. 4577 */ 4578 static int 4579 flow_action_handles_translate(struct rte_eth_dev *dev, 4580 const struct rte_flow_action actions[], 4581 struct mlx5_translated_action_handle *handle, 4582 int *indir_n, 4583 struct rte_flow_action **translated_actions, 4584 struct rte_flow_error *error) 4585 { 4586 struct mlx5_priv *priv = dev->data->dev_private; 4587 struct rte_flow_action *translated = NULL; 4588 size_t actions_size; 4589 int n; 4590 int copied_n = 0; 4591 struct mlx5_translated_action_handle *handle_end = NULL; 4592 4593 for (n = 0; actions[n].type != RTE_FLOW_ACTION_TYPE_END; n++) { 4594 if (actions[n].type != RTE_FLOW_ACTION_TYPE_INDIRECT) 4595 continue; 4596 if (copied_n == *indir_n) { 4597 return rte_flow_error_set 4598 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_NUM, 4599 NULL, "too many shared actions"); 4600 } 4601 rte_memcpy(&handle[copied_n].action, &actions[n].conf, 4602 sizeof(actions[n].conf)); 4603 handle[copied_n].index = n; 4604 copied_n++; 4605 } 4606 n++; 4607 *indir_n = copied_n; 4608 if (!copied_n) 4609 return 0; 4610 actions_size = sizeof(struct rte_flow_action) * n; 4611 translated = mlx5_malloc(MLX5_MEM_ZERO, actions_size, 0, SOCKET_ID_ANY); 4612 if (!translated) { 4613 rte_errno = ENOMEM; 4614 return -ENOMEM; 4615 } 4616 memcpy(translated, actions, actions_size); 4617 for (handle_end = handle + copied_n; handle < handle_end; handle++) { 4618 struct mlx5_shared_action_rss *shared_rss; 4619 uint32_t act_idx = (uint32_t)(uintptr_t)handle->action; 4620 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; 4621 uint32_t idx = act_idx & 4622 ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1); 4623 4624 switch (type) { 4625 case MLX5_INDIRECT_ACTION_TYPE_RSS: 4626 shared_rss = mlx5_ipool_get 4627 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); 4628 translated[handle->index].type = 4629 RTE_FLOW_ACTION_TYPE_RSS; 4630 translated[handle->index].conf = 4631 &shared_rss->origin; 4632 break; 4633 case MLX5_INDIRECT_ACTION_TYPE_COUNT: 4634 translated[handle->index].type = 4635 (enum rte_flow_action_type) 4636 MLX5_RTE_FLOW_ACTION_TYPE_COUNT; 4637 translated[handle->index].conf = (void *)(uintptr_t)idx; 4638 break; 4639 case MLX5_INDIRECT_ACTION_TYPE_METER_MARK: 4640 translated[handle->index].type = 4641 (enum rte_flow_action_type) 4642 MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK; 4643 translated[handle->index].conf = (void *)(uintptr_t)idx; 4644 break; 4645 case MLX5_INDIRECT_ACTION_TYPE_AGE: 4646 if (priv->sh->flow_hit_aso_en) { 4647 translated[handle->index].type = 4648 (enum rte_flow_action_type) 4649 MLX5_RTE_FLOW_ACTION_TYPE_AGE; 4650 translated[handle->index].conf = 4651 (void *)(uintptr_t)idx; 4652 break; 4653 } 4654 /* Fall-through */ 4655 case MLX5_INDIRECT_ACTION_TYPE_CT: 4656 if (priv->sh->ct_aso_en) { 4657 translated[handle->index].type = 4658 RTE_FLOW_ACTION_TYPE_CONNTRACK; 4659 translated[handle->index].conf = 4660 (void *)(uintptr_t)idx; 4661 break; 4662 } 4663 /* Fall-through */ 4664 default: 4665 mlx5_free(translated); 4666 return rte_flow_error_set 4667 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, 4668 NULL, "invalid indirect action type"); 4669 } 4670 } 4671 *translated_actions = translated; 4672 return 0; 4673 } 4674 4675 /** 4676 * Get Shared RSS action from the action list. 4677 * 4678 * @param[in] dev 4679 * Pointer to Ethernet device. 4680 * @param[in] shared 4681 * Pointer to the list of actions. 4682 * @param[in] shared_n 4683 * Actions list length. 4684 * 4685 * @return 4686 * The MLX5 RSS action ID if exists, otherwise return 0. 4687 */ 4688 static uint32_t 4689 flow_get_shared_rss_action(struct rte_eth_dev *dev, 4690 struct mlx5_translated_action_handle *handle, 4691 int shared_n) 4692 { 4693 struct mlx5_translated_action_handle *handle_end; 4694 struct mlx5_priv *priv = dev->data->dev_private; 4695 struct mlx5_shared_action_rss *shared_rss; 4696 4697 4698 for (handle_end = handle + shared_n; handle < handle_end; handle++) { 4699 uint32_t act_idx = (uint32_t)(uintptr_t)handle->action; 4700 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; 4701 uint32_t idx = act_idx & 4702 ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1); 4703 switch (type) { 4704 case MLX5_INDIRECT_ACTION_TYPE_RSS: 4705 shared_rss = mlx5_ipool_get 4706 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 4707 idx); 4708 rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1, 4709 rte_memory_order_relaxed); 4710 return idx; 4711 default: 4712 break; 4713 } 4714 } 4715 return 0; 4716 } 4717 4718 static unsigned int 4719 find_graph_root(uint32_t rss_level) 4720 { 4721 return rss_level < 2 ? MLX5_EXPANSION_ROOT : 4722 MLX5_EXPANSION_ROOT_OUTER; 4723 } 4724 4725 /** 4726 * Get layer flags from the prefix flow. 4727 * 4728 * Some flows may be split to several subflows, the prefix subflow gets the 4729 * match items and the suffix sub flow gets the actions. 4730 * Some actions need the user defined match item flags to get the detail for 4731 * the action. 4732 * This function helps the suffix flow to get the item layer flags from prefix 4733 * subflow. 4734 * 4735 * @param[in] dev_flow 4736 * Pointer the created prefix subflow. 4737 * 4738 * @return 4739 * The layers get from prefix subflow. 4740 */ 4741 static inline uint64_t 4742 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow) 4743 { 4744 uint64_t layers = 0; 4745 4746 /* 4747 * Layers bits could be localization, but usually the compiler will 4748 * help to do the optimization work for source code. 4749 * If no decap actions, use the layers directly. 4750 */ 4751 if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP)) 4752 return dev_flow->handle->layers; 4753 /* Convert L3 layers with decap action. */ 4754 if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4) 4755 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4; 4756 else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6) 4757 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6; 4758 /* Convert L4 layers with decap action. */ 4759 if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP) 4760 layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP; 4761 else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP) 4762 layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP; 4763 return layers; 4764 } 4765 4766 /** 4767 * Get metadata split action information. 4768 * 4769 * @param[in] actions 4770 * Pointer to the list of actions. 4771 * @param[out] qrss 4772 * Pointer to the return pointer. 4773 * @param[out] qrss_type 4774 * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned 4775 * if no QUEUE/RSS is found. 4776 * @param[out] encap_idx 4777 * Pointer to the index of the encap action if exists, otherwise the last 4778 * action index. 4779 * 4780 * @return 4781 * Total number of actions. 4782 */ 4783 static int 4784 flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[], 4785 const struct rte_flow_action **qrss, 4786 int *encap_idx) 4787 { 4788 const struct rte_flow_action_raw_encap *raw_encap; 4789 int actions_n = 0; 4790 int raw_decap_idx = -1; 4791 4792 *encap_idx = -1; 4793 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 4794 switch (actions->type) { 4795 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 4796 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 4797 *encap_idx = actions_n; 4798 break; 4799 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 4800 raw_decap_idx = actions_n; 4801 break; 4802 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 4803 raw_encap = actions->conf; 4804 if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 4805 *encap_idx = raw_decap_idx != -1 ? 4806 raw_decap_idx : actions_n; 4807 break; 4808 case RTE_FLOW_ACTION_TYPE_QUEUE: 4809 case RTE_FLOW_ACTION_TYPE_RSS: 4810 *qrss = actions; 4811 break; 4812 default: 4813 break; 4814 } 4815 actions_n++; 4816 } 4817 if (*encap_idx == -1) 4818 *encap_idx = actions_n; 4819 /* Count RTE_FLOW_ACTION_TYPE_END. */ 4820 return actions_n + 1; 4821 } 4822 4823 /** 4824 * Check if the action will change packet. 4825 * 4826 * @param dev 4827 * Pointer to Ethernet device. 4828 * @param[in] type 4829 * action type. 4830 * 4831 * @return 4832 * true if action will change packet, false otherwise. 4833 */ 4834 static bool flow_check_modify_action_type(struct rte_eth_dev *dev, 4835 enum rte_flow_action_type type) 4836 { 4837 struct mlx5_priv *priv = dev->data->dev_private; 4838 4839 switch (type) { 4840 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: 4841 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: 4842 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: 4843 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: 4844 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: 4845 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: 4846 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: 4847 case RTE_FLOW_ACTION_TYPE_SET_TP_DST: 4848 case RTE_FLOW_ACTION_TYPE_DEC_TTL: 4849 case RTE_FLOW_ACTION_TYPE_SET_TTL: 4850 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: 4851 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: 4852 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: 4853 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: 4854 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: 4855 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: 4856 case RTE_FLOW_ACTION_TYPE_SET_META: 4857 case RTE_FLOW_ACTION_TYPE_SET_TAG: 4858 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: 4859 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 4860 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 4861 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 4862 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 4863 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 4864 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 4865 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 4866 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 4867 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 4868 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: 4869 return true; 4870 case RTE_FLOW_ACTION_TYPE_FLAG: 4871 case RTE_FLOW_ACTION_TYPE_MARK: 4872 if (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 4873 priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_META32_HWS) 4874 return true; 4875 else 4876 return false; 4877 default: 4878 return false; 4879 } 4880 } 4881 4882 /** 4883 * Check meter action from the action list. 4884 * 4885 * @param dev 4886 * Pointer to Ethernet device. 4887 * @param[in] actions 4888 * Pointer to the list of actions. 4889 * @param[out] has_mtr 4890 * Pointer to the meter exist flag. 4891 * @param[out] has_modify 4892 * Pointer to the flag showing there's packet change action. 4893 * @param[out] meter_id 4894 * Pointer to the meter id. 4895 * 4896 * @return 4897 * Total number of actions. 4898 */ 4899 static int 4900 flow_check_meter_action(struct rte_eth_dev *dev, 4901 const struct rte_flow_action actions[], 4902 bool *has_mtr, bool *has_modify, uint32_t *meter_id) 4903 { 4904 const struct rte_flow_action_meter *mtr = NULL; 4905 int actions_n = 0; 4906 4907 MLX5_ASSERT(has_mtr); 4908 *has_mtr = false; 4909 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 4910 switch (actions->type) { 4911 case RTE_FLOW_ACTION_TYPE_METER: 4912 mtr = actions->conf; 4913 *meter_id = mtr->mtr_id; 4914 *has_mtr = true; 4915 break; 4916 default: 4917 break; 4918 } 4919 if (!*has_mtr) 4920 *has_modify |= flow_check_modify_action_type(dev, 4921 actions->type); 4922 actions_n++; 4923 } 4924 /* Count RTE_FLOW_ACTION_TYPE_END. */ 4925 return actions_n + 1; 4926 } 4927 4928 /** 4929 * Check if the flow should be split due to hairpin. 4930 * The reason for the split is that in current HW we can't 4931 * support encap and push-vlan on Rx, so if a flow contains 4932 * these actions we move it to Tx. 4933 * 4934 * @param dev 4935 * Pointer to Ethernet device. 4936 * @param[in] attr 4937 * Flow rule attributes. 4938 * @param[in] actions 4939 * Associated actions (list terminated by the END action). 4940 * 4941 * @return 4942 * > 0 the number of actions and the flow should be split, 4943 * 0 when no split required. 4944 */ 4945 static int 4946 flow_check_hairpin_split(struct rte_eth_dev *dev, 4947 const struct rte_flow_attr *attr, 4948 const struct rte_flow_action actions[]) 4949 { 4950 int queue_action = 0; 4951 int action_n = 0; 4952 int split = 0; 4953 int push_vlan = 0; 4954 const struct rte_flow_action_queue *queue; 4955 const struct rte_flow_action_rss *rss; 4956 const struct rte_flow_action_raw_encap *raw_encap; 4957 const struct rte_eth_hairpin_conf *conf; 4958 4959 if (!attr->ingress) 4960 return 0; 4961 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 4962 if (actions->type == RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) 4963 push_vlan = 1; 4964 switch (actions->type) { 4965 case RTE_FLOW_ACTION_TYPE_QUEUE: 4966 queue = actions->conf; 4967 if (queue == NULL) 4968 return 0; 4969 conf = mlx5_rxq_get_hairpin_conf(dev, queue->index); 4970 if (conf == NULL || conf->tx_explicit != 0) 4971 return 0; 4972 queue_action = 1; 4973 action_n++; 4974 break; 4975 case RTE_FLOW_ACTION_TYPE_RSS: 4976 rss = actions->conf; 4977 if (rss == NULL || rss->queue_num == 0) 4978 return 0; 4979 conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]); 4980 if (conf == NULL || conf->tx_explicit != 0) 4981 return 0; 4982 queue_action = 1; 4983 action_n++; 4984 break; 4985 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 4986 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 4987 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 4988 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 4989 split++; 4990 action_n++; 4991 break; 4992 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 4993 if (push_vlan) 4994 split++; 4995 action_n++; 4996 break; 4997 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 4998 raw_encap = actions->conf; 4999 if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 5000 split++; 5001 action_n++; 5002 break; 5003 default: 5004 action_n++; 5005 break; 5006 } 5007 } 5008 if (split && queue_action) 5009 return action_n; 5010 return 0; 5011 } 5012 5013 int 5014 flow_dv_mreg_match_cb(void *tool_ctx __rte_unused, 5015 struct mlx5_list_entry *entry, void *cb_ctx) 5016 { 5017 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 5018 struct mlx5_flow_mreg_copy_resource *mcp_res = 5019 container_of(entry, typeof(*mcp_res), hlist_ent); 5020 5021 return mcp_res->mark_id != *(uint32_t *)(ctx->data); 5022 } 5023 5024 struct mlx5_list_entry * 5025 flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx) 5026 { 5027 struct rte_eth_dev *dev = tool_ctx; 5028 struct mlx5_priv *priv = dev->data->dev_private; 5029 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 5030 struct mlx5_flow_mreg_copy_resource *mcp_res; 5031 struct rte_flow_error *error = ctx->error; 5032 uint32_t idx = 0; 5033 int ret; 5034 uint32_t mark_id = *(uint32_t *)(ctx->data); 5035 struct rte_flow_attr attr = { 5036 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 5037 .ingress = 1, 5038 }; 5039 struct mlx5_rte_flow_item_tag tag_spec = { 5040 .data = mark_id, 5041 }; 5042 struct rte_flow_item items[] = { 5043 [1] = { .type = RTE_FLOW_ITEM_TYPE_END, }, 5044 }; 5045 struct rte_flow_action_mark ftag = { 5046 .id = mark_id, 5047 }; 5048 struct mlx5_flow_action_copy_mreg cp_mreg = { 5049 .dst = REG_B, 5050 .src = REG_NON, 5051 }; 5052 struct rte_flow_action_jump jump = { 5053 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 5054 }; 5055 struct rte_flow_action actions[] = { 5056 [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, 5057 }; 5058 5059 /* Fill the register fields in the flow. */ 5060 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 5061 if (ret < 0) 5062 return NULL; 5063 tag_spec.id = ret; 5064 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 5065 if (ret < 0) 5066 return NULL; 5067 cp_mreg.src = ret; 5068 /* Provide the full width of FLAG specific value. */ 5069 if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT)) 5070 tag_spec.data = MLX5_FLOW_MARK_DEFAULT; 5071 /* Build a new flow. */ 5072 if (mark_id != MLX5_DEFAULT_COPY_ID) { 5073 items[0] = (struct rte_flow_item){ 5074 .type = (enum rte_flow_item_type) 5075 MLX5_RTE_FLOW_ITEM_TYPE_TAG, 5076 .spec = &tag_spec, 5077 }; 5078 items[1] = (struct rte_flow_item){ 5079 .type = RTE_FLOW_ITEM_TYPE_END, 5080 }; 5081 actions[0] = (struct rte_flow_action){ 5082 .type = (enum rte_flow_action_type) 5083 MLX5_RTE_FLOW_ACTION_TYPE_MARK, 5084 .conf = &ftag, 5085 }; 5086 actions[1] = (struct rte_flow_action){ 5087 .type = (enum rte_flow_action_type) 5088 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 5089 .conf = &cp_mreg, 5090 }; 5091 actions[2] = (struct rte_flow_action){ 5092 .type = RTE_FLOW_ACTION_TYPE_JUMP, 5093 .conf = &jump, 5094 }; 5095 actions[3] = (struct rte_flow_action){ 5096 .type = RTE_FLOW_ACTION_TYPE_END, 5097 }; 5098 } else { 5099 /* Default rule, wildcard match. */ 5100 attr.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR; 5101 items[0] = (struct rte_flow_item){ 5102 .type = RTE_FLOW_ITEM_TYPE_END, 5103 }; 5104 actions[0] = (struct rte_flow_action){ 5105 .type = (enum rte_flow_action_type) 5106 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 5107 .conf = &cp_mreg, 5108 }; 5109 actions[1] = (struct rte_flow_action){ 5110 .type = RTE_FLOW_ACTION_TYPE_JUMP, 5111 .conf = &jump, 5112 }; 5113 actions[2] = (struct rte_flow_action){ 5114 .type = RTE_FLOW_ACTION_TYPE_END, 5115 }; 5116 } 5117 /* Build a new entry. */ 5118 mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx); 5119 if (!mcp_res) { 5120 rte_errno = ENOMEM; 5121 return NULL; 5122 } 5123 mcp_res->idx = idx; 5124 mcp_res->mark_id = mark_id; 5125 /* 5126 * The copy Flows are not included in any list. There 5127 * ones are referenced from other Flows and can not 5128 * be applied, removed, deleted in arbitrary order 5129 * by list traversing. 5130 */ 5131 mcp_res->rix_flow = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_MCP, 5132 &attr, items, actions, false, error); 5133 if (!mcp_res->rix_flow) { 5134 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx); 5135 return NULL; 5136 } 5137 return &mcp_res->hlist_ent; 5138 } 5139 5140 struct mlx5_list_entry * 5141 flow_dv_mreg_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry, 5142 void *cb_ctx __rte_unused) 5143 { 5144 struct rte_eth_dev *dev = tool_ctx; 5145 struct mlx5_priv *priv = dev->data->dev_private; 5146 struct mlx5_flow_mreg_copy_resource *mcp_res; 5147 uint32_t idx = 0; 5148 5149 mcp_res = mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx); 5150 if (!mcp_res) { 5151 rte_errno = ENOMEM; 5152 return NULL; 5153 } 5154 memcpy(mcp_res, oentry, sizeof(*mcp_res)); 5155 mcp_res->idx = idx; 5156 return &mcp_res->hlist_ent; 5157 } 5158 5159 void 5160 flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry) 5161 { 5162 struct mlx5_flow_mreg_copy_resource *mcp_res = 5163 container_of(entry, typeof(*mcp_res), hlist_ent); 5164 struct rte_eth_dev *dev = tool_ctx; 5165 struct mlx5_priv *priv = dev->data->dev_private; 5166 5167 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); 5168 } 5169 5170 /** 5171 * Add a flow of copying flow metadata registers in RX_CP_TBL. 5172 * 5173 * As mark_id is unique, if there's already a registered flow for the mark_id, 5174 * return by increasing the reference counter of the resource. Otherwise, create 5175 * the resource (mcp_res) and flow. 5176 * 5177 * Flow looks like, 5178 * - If ingress port is ANY and reg_c[1] is mark_id, 5179 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 5180 * 5181 * For default flow (zero mark_id), flow is like, 5182 * - If ingress port is ANY, 5183 * reg_b := reg_c[0] and jump to RX_ACT_TBL. 5184 * 5185 * @param dev 5186 * Pointer to Ethernet device. 5187 * @param mark_id 5188 * ID of MARK action, zero means default flow for META. 5189 * @param[out] error 5190 * Perform verbose error reporting if not NULL. 5191 * 5192 * @return 5193 * Associated resource on success, NULL otherwise and rte_errno is set. 5194 */ 5195 static struct mlx5_flow_mreg_copy_resource * 5196 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, 5197 struct rte_flow_error *error) 5198 { 5199 struct mlx5_priv *priv = dev->data->dev_private; 5200 struct mlx5_list_entry *entry; 5201 struct mlx5_flow_cb_ctx ctx = { 5202 .dev = dev, 5203 .error = error, 5204 .data = &mark_id, 5205 }; 5206 5207 /* Check if already registered. */ 5208 MLX5_ASSERT(priv->sh->mreg_cp_tbl); 5209 entry = mlx5_hlist_register(priv->sh->mreg_cp_tbl, mark_id, &ctx); 5210 if (!entry) 5211 return NULL; 5212 return container_of(entry, struct mlx5_flow_mreg_copy_resource, 5213 hlist_ent); 5214 } 5215 5216 void 5217 flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry) 5218 { 5219 struct mlx5_flow_mreg_copy_resource *mcp_res = 5220 container_of(entry, typeof(*mcp_res), hlist_ent); 5221 struct rte_eth_dev *dev = tool_ctx; 5222 struct mlx5_priv *priv = dev->data->dev_private; 5223 5224 MLX5_ASSERT(mcp_res->rix_flow); 5225 mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_MCP, mcp_res->rix_flow); 5226 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); 5227 } 5228 5229 /** 5230 * Release flow in RX_CP_TBL. 5231 * 5232 * @param dev 5233 * Pointer to Ethernet device. 5234 * @flow 5235 * Parent flow for wich copying is provided. 5236 */ 5237 static void 5238 flow_mreg_del_copy_action(struct rte_eth_dev *dev, 5239 struct rte_flow *flow) 5240 { 5241 struct mlx5_flow_mreg_copy_resource *mcp_res; 5242 struct mlx5_priv *priv = dev->data->dev_private; 5243 5244 if (!flow->rix_mreg_copy) 5245 return; 5246 mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], 5247 flow->rix_mreg_copy); 5248 if (!mcp_res || !priv->sh->mreg_cp_tbl) 5249 return; 5250 MLX5_ASSERT(mcp_res->rix_flow); 5251 mlx5_hlist_unregister(priv->sh->mreg_cp_tbl, &mcp_res->hlist_ent); 5252 flow->rix_mreg_copy = 0; 5253 } 5254 5255 /** 5256 * Remove the default copy action from RX_CP_TBL. 5257 * 5258 * This functions is called in the mlx5_dev_start(). No thread safe 5259 * is guaranteed. 5260 * 5261 * @param dev 5262 * Pointer to Ethernet device. 5263 */ 5264 static void 5265 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) 5266 { 5267 struct mlx5_list_entry *entry; 5268 struct mlx5_priv *priv = dev->data->dev_private; 5269 struct mlx5_flow_cb_ctx ctx; 5270 uint32_t mark_id; 5271 5272 /* Check if default flow is registered. */ 5273 if (!priv->sh->mreg_cp_tbl) 5274 return; 5275 mark_id = MLX5_DEFAULT_COPY_ID; 5276 ctx.data = &mark_id; 5277 entry = mlx5_hlist_lookup(priv->sh->mreg_cp_tbl, mark_id, &ctx); 5278 if (!entry) 5279 return; 5280 mlx5_hlist_unregister(priv->sh->mreg_cp_tbl, entry); 5281 } 5282 5283 /** 5284 * Add the default copy action in RX_CP_TBL. 5285 * 5286 * This functions is called in the mlx5_dev_start(). No thread safe 5287 * is guaranteed. 5288 * 5289 * @param dev 5290 * Pointer to Ethernet device. 5291 * @param[out] error 5292 * Perform verbose error reporting if not NULL. 5293 * 5294 * @return 5295 * 0 for success, negative value otherwise and rte_errno is set. 5296 */ 5297 static int 5298 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, 5299 struct rte_flow_error *error) 5300 { 5301 struct mlx5_priv *priv = dev->data->dev_private; 5302 struct mlx5_flow_mreg_copy_resource *mcp_res; 5303 struct mlx5_flow_cb_ctx ctx; 5304 uint32_t mark_id; 5305 5306 /* Check whether extensive metadata feature is engaged. */ 5307 if (!priv->sh->config.dv_flow_en || 5308 priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 5309 !mlx5_flow_ext_mreg_supported(dev) || 5310 !priv->sh->dv_regc0_mask) 5311 return 0; 5312 /* 5313 * Add default mreg copy flow may be called multiple time, but 5314 * only be called once in stop. Avoid register it twice. 5315 */ 5316 mark_id = MLX5_DEFAULT_COPY_ID; 5317 ctx.data = &mark_id; 5318 if (mlx5_hlist_lookup(priv->sh->mreg_cp_tbl, mark_id, &ctx)) 5319 return 0; 5320 mcp_res = flow_mreg_add_copy_action(dev, mark_id, error); 5321 if (!mcp_res) 5322 return -rte_errno; 5323 return 0; 5324 } 5325 5326 /** 5327 * Add a flow of copying flow metadata registers in RX_CP_TBL. 5328 * 5329 * All the flow having Q/RSS action should be split by 5330 * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL 5331 * performs the following, 5332 * - CQE->flow_tag := reg_c[1] (MARK) 5333 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 5334 * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1] 5335 * but there should be a flow per each MARK ID set by MARK action. 5336 * 5337 * For the aforementioned reason, if there's a MARK action in flow's action 5338 * list, a corresponding flow should be added to the RX_CP_TBL in order to copy 5339 * the MARK ID to CQE's flow_tag like, 5340 * - If reg_c[1] is mark_id, 5341 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 5342 * 5343 * For SET_META action which stores value in reg_c[0], as the destination is 5344 * also a flow metadata register (reg_b), adding a default flow is enough. Zero 5345 * MARK ID means the default flow. The default flow looks like, 5346 * - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL. 5347 * 5348 * @param dev 5349 * Pointer to Ethernet device. 5350 * @param flow 5351 * Pointer to flow structure. 5352 * @param[in] actions 5353 * Pointer to the list of actions. 5354 * @param[out] error 5355 * Perform verbose error reporting if not NULL. 5356 * 5357 * @return 5358 * 0 on success, negative value otherwise and rte_errno is set. 5359 */ 5360 static int 5361 flow_mreg_update_copy_table(struct rte_eth_dev *dev, 5362 struct rte_flow *flow, 5363 const struct rte_flow_action *actions, 5364 struct rte_flow_error *error) 5365 { 5366 struct mlx5_priv *priv = dev->data->dev_private; 5367 struct mlx5_sh_config *config = &priv->sh->config; 5368 struct mlx5_flow_mreg_copy_resource *mcp_res; 5369 const struct rte_flow_action_mark *mark; 5370 5371 /* Check whether extensive metadata feature is engaged. */ 5372 if (!config->dv_flow_en || 5373 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 5374 !mlx5_flow_ext_mreg_supported(dev) || 5375 !priv->sh->dv_regc0_mask) 5376 return 0; 5377 /* Find MARK action. */ 5378 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 5379 switch (actions->type) { 5380 case RTE_FLOW_ACTION_TYPE_FLAG: 5381 mcp_res = flow_mreg_add_copy_action 5382 (dev, MLX5_FLOW_MARK_DEFAULT, error); 5383 if (!mcp_res) 5384 return -rte_errno; 5385 flow->rix_mreg_copy = mcp_res->idx; 5386 return 0; 5387 case RTE_FLOW_ACTION_TYPE_MARK: 5388 mark = (const struct rte_flow_action_mark *) 5389 actions->conf; 5390 mcp_res = 5391 flow_mreg_add_copy_action(dev, mark->id, error); 5392 if (!mcp_res) 5393 return -rte_errno; 5394 flow->rix_mreg_copy = mcp_res->idx; 5395 return 0; 5396 default: 5397 break; 5398 } 5399 } 5400 return 0; 5401 } 5402 5403 #define MLX5_MAX_SPLIT_ACTIONS 24 5404 #define MLX5_MAX_SPLIT_ITEMS 24 5405 5406 /** 5407 * Split the hairpin flow. 5408 * Since HW can't support encap and push-vlan on Rx, we move these 5409 * actions to Tx. 5410 * If the count action is after the encap then we also 5411 * move the count action. in this case the count will also measure 5412 * the outer bytes. 5413 * 5414 * @param dev 5415 * Pointer to Ethernet device. 5416 * @param[in] actions 5417 * Associated actions (list terminated by the END action). 5418 * @param[out] actions_rx 5419 * Rx flow actions. 5420 * @param[out] actions_tx 5421 * Tx flow actions.. 5422 * @param[out] pattern_tx 5423 * The pattern items for the Tx flow. 5424 * @param[out] flow_id 5425 * The flow ID connected to this flow. 5426 * 5427 * @return 5428 * 0 on success. 5429 */ 5430 static int 5431 flow_hairpin_split(struct rte_eth_dev *dev, 5432 const struct rte_flow_action actions[], 5433 struct rte_flow_action actions_rx[], 5434 struct rte_flow_action actions_tx[], 5435 struct rte_flow_item pattern_tx[], 5436 uint32_t flow_id) 5437 { 5438 const struct rte_flow_action_raw_encap *raw_encap; 5439 const struct rte_flow_action_raw_decap *raw_decap; 5440 struct mlx5_rte_flow_action_set_tag *set_tag; 5441 struct rte_flow_action *tag_action; 5442 struct mlx5_rte_flow_item_tag *tag_item; 5443 struct rte_flow_item *item; 5444 char *addr; 5445 int push_vlan = 0; 5446 int encap = 0; 5447 5448 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 5449 if (actions->type == RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) 5450 push_vlan = 1; 5451 switch (actions->type) { 5452 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 5453 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 5454 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 5455 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 5456 rte_memcpy(actions_tx, actions, 5457 sizeof(struct rte_flow_action)); 5458 actions_tx++; 5459 break; 5460 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 5461 if (push_vlan) { 5462 rte_memcpy(actions_tx, actions, 5463 sizeof(struct rte_flow_action)); 5464 actions_tx++; 5465 } else { 5466 rte_memcpy(actions_rx, actions, 5467 sizeof(struct rte_flow_action)); 5468 actions_rx++; 5469 } 5470 break; 5471 case RTE_FLOW_ACTION_TYPE_COUNT: 5472 case RTE_FLOW_ACTION_TYPE_AGE: 5473 if (encap) { 5474 rte_memcpy(actions_tx, actions, 5475 sizeof(struct rte_flow_action)); 5476 actions_tx++; 5477 } else { 5478 rte_memcpy(actions_rx, actions, 5479 sizeof(struct rte_flow_action)); 5480 actions_rx++; 5481 } 5482 break; 5483 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 5484 raw_encap = actions->conf; 5485 if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) { 5486 memcpy(actions_tx, actions, 5487 sizeof(struct rte_flow_action)); 5488 actions_tx++; 5489 encap = 1; 5490 } else { 5491 rte_memcpy(actions_rx, actions, 5492 sizeof(struct rte_flow_action)); 5493 actions_rx++; 5494 } 5495 break; 5496 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 5497 raw_decap = actions->conf; 5498 if (raw_decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) { 5499 memcpy(actions_tx, actions, 5500 sizeof(struct rte_flow_action)); 5501 actions_tx++; 5502 } else { 5503 rte_memcpy(actions_rx, actions, 5504 sizeof(struct rte_flow_action)); 5505 actions_rx++; 5506 } 5507 break; 5508 default: 5509 rte_memcpy(actions_rx, actions, 5510 sizeof(struct rte_flow_action)); 5511 actions_rx++; 5512 break; 5513 } 5514 } 5515 /* Add set meta action and end action for the Rx flow. */ 5516 tag_action = actions_rx; 5517 tag_action->type = (enum rte_flow_action_type) 5518 MLX5_RTE_FLOW_ACTION_TYPE_TAG; 5519 actions_rx++; 5520 rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action)); 5521 actions_rx++; 5522 set_tag = (void *)actions_rx; 5523 *set_tag = (struct mlx5_rte_flow_action_set_tag) { 5524 .id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL), 5525 .data = flow_id, 5526 }; 5527 MLX5_ASSERT(set_tag->id > REG_NON); 5528 tag_action->conf = set_tag; 5529 /* Create Tx item list. */ 5530 rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); 5531 addr = (void *)&pattern_tx[2]; 5532 item = pattern_tx; 5533 item->type = (enum rte_flow_item_type) 5534 MLX5_RTE_FLOW_ITEM_TYPE_TAG; 5535 tag_item = (void *)addr; 5536 tag_item->data = flow_id; 5537 tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); 5538 MLX5_ASSERT(set_tag->id > REG_NON); 5539 item->spec = tag_item; 5540 addr += sizeof(struct mlx5_rte_flow_item_tag); 5541 tag_item = (void *)addr; 5542 tag_item->data = UINT32_MAX; 5543 tag_item->id = UINT16_MAX; 5544 item->mask = tag_item; 5545 item->last = NULL; 5546 item++; 5547 item->type = RTE_FLOW_ITEM_TYPE_END; 5548 return 0; 5549 } 5550 5551 /** 5552 * The last stage of splitting chain, just creates the subflow 5553 * without any modification. 5554 * 5555 * @param[in] dev 5556 * Pointer to Ethernet device. 5557 * @param[in] flow 5558 * Parent flow structure pointer. 5559 * @param[in, out] sub_flow 5560 * Pointer to return the created subflow, may be NULL. 5561 * @param[in] attr 5562 * Flow rule attributes. 5563 * @param[in] items 5564 * Pattern specification (list terminated by the END pattern item). 5565 * @param[in] actions 5566 * Associated actions (list terminated by the END action). 5567 * @param[in] flow_split_info 5568 * Pointer to flow split info structure. 5569 * @param[out] error 5570 * Perform verbose error reporting if not NULL. 5571 * @return 5572 * 0 on success, negative value otherwise 5573 */ 5574 static int 5575 flow_create_split_inner(struct rte_eth_dev *dev, 5576 struct rte_flow *flow, 5577 struct mlx5_flow **sub_flow, 5578 const struct rte_flow_attr *attr, 5579 const struct rte_flow_item items[], 5580 const struct rte_flow_action actions[], 5581 struct mlx5_flow_split_info *flow_split_info, 5582 struct rte_flow_error *error) 5583 { 5584 struct mlx5_flow *dev_flow; 5585 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 5586 5587 dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, 5588 flow_split_info->flow_idx, error); 5589 if (!dev_flow) 5590 return -rte_errno; 5591 dev_flow->flow = flow; 5592 dev_flow->external = flow_split_info->external; 5593 dev_flow->skip_scale = flow_split_info->skip_scale; 5594 /* Subflow object was created, we must include one in the list. */ 5595 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, 5596 dev_flow->handle, next); 5597 /* 5598 * If dev_flow is as one of the suffix flow, some actions in suffix 5599 * flow may need some user defined item layer flags, and pass the 5600 * Metadata rxq mark flag to suffix flow as well. 5601 */ 5602 if (flow_split_info->prefix_layers) 5603 dev_flow->handle->layers = flow_split_info->prefix_layers; 5604 if (flow_split_info->prefix_mark) { 5605 MLX5_ASSERT(wks); 5606 wks->mark = 1; 5607 } 5608 if (sub_flow) 5609 *sub_flow = dev_flow; 5610 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 5611 dev_flow->dv.table_id = flow_split_info->table_id; 5612 #endif 5613 return flow_drv_translate(dev, dev_flow, attr, items, actions, error); 5614 } 5615 5616 /** 5617 * Get the sub policy of a meter. 5618 * 5619 * @param[in] dev 5620 * Pointer to Ethernet device. 5621 * @param[in] flow 5622 * Parent flow structure pointer. 5623 * @param wks 5624 * Pointer to thread flow work space. 5625 * @param[in] attr 5626 * Flow rule attributes. 5627 * @param[in] items 5628 * Pattern specification (list terminated by the END pattern item). 5629 * @param[out] error 5630 * Perform verbose error reporting if not NULL. 5631 * 5632 * @return 5633 * Pointer to the meter sub policy, NULL otherwise and rte_errno is set. 5634 */ 5635 static struct mlx5_flow_meter_sub_policy * 5636 get_meter_sub_policy(struct rte_eth_dev *dev, 5637 struct rte_flow *flow, 5638 struct mlx5_flow_workspace *wks, 5639 const struct rte_flow_attr *attr, 5640 const struct rte_flow_item items[], 5641 struct rte_flow_error *error) 5642 { 5643 struct mlx5_flow_meter_policy *policy; 5644 struct mlx5_flow_meter_policy *final_policy; 5645 struct mlx5_flow_meter_sub_policy *sub_policy = NULL; 5646 5647 policy = wks->policy; 5648 final_policy = policy->is_hierarchy ? wks->final_policy : policy; 5649 if (final_policy->is_rss || final_policy->is_queue) { 5650 struct mlx5_flow_rss_desc rss_desc_v[MLX5_MTR_RTE_COLORS]; 5651 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS] = {0}; 5652 uint32_t i; 5653 5654 /* 5655 * This is a tmp dev_flow, 5656 * no need to register any matcher for it in translate. 5657 */ 5658 wks->skip_matcher_reg = 1; 5659 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) { 5660 struct mlx5_flow dev_flow = {0}; 5661 struct mlx5_flow_handle dev_handle = { {0} }; 5662 uint8_t fate = final_policy->act_cnt[i].fate_action; 5663 5664 if (fate == MLX5_FLOW_FATE_SHARED_RSS) { 5665 const struct rte_flow_action_rss *rss_act = 5666 final_policy->act_cnt[i].rss->conf; 5667 struct rte_flow_action rss_actions[2] = { 5668 [0] = { 5669 .type = RTE_FLOW_ACTION_TYPE_RSS, 5670 .conf = rss_act, 5671 }, 5672 [1] = { 5673 .type = RTE_FLOW_ACTION_TYPE_END, 5674 .conf = NULL, 5675 } 5676 }; 5677 5678 dev_flow.handle = &dev_handle; 5679 dev_flow.ingress = attr->ingress; 5680 dev_flow.flow = flow; 5681 dev_flow.external = 0; 5682 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 5683 dev_flow.dv.transfer = attr->transfer; 5684 #endif 5685 /** 5686 * Translate RSS action to get rss hash fields. 5687 */ 5688 if (flow_drv_translate(dev, &dev_flow, attr, 5689 items, rss_actions, error)) 5690 goto exit; 5691 rss_desc_v[i] = wks->rss_desc; 5692 rss_desc_v[i].symmetric_hash_function = 5693 dev_flow.symmetric_hash_function; 5694 rss_desc_v[i].key_len = MLX5_RSS_HASH_KEY_LEN; 5695 rss_desc_v[i].hash_fields = 5696 dev_flow.hash_fields; 5697 rss_desc_v[i].queue_num = 5698 rss_desc_v[i].hash_fields ? 5699 rss_desc_v[i].queue_num : 1; 5700 rss_desc_v[i].tunnel = 5701 !!(dev_flow.handle->layers & 5702 MLX5_FLOW_LAYER_TUNNEL); 5703 /* Use the RSS queues in the containers. */ 5704 rss_desc_v[i].queue = 5705 (uint16_t *)(uintptr_t)rss_act->queue; 5706 rss_desc[i] = &rss_desc_v[i]; 5707 } else if (fate == MLX5_FLOW_FATE_QUEUE) { 5708 /* This is queue action. */ 5709 rss_desc_v[i] = wks->rss_desc; 5710 rss_desc_v[i].key_len = 0; 5711 rss_desc_v[i].hash_fields = 0; 5712 rss_desc_v[i].queue = 5713 &final_policy->act_cnt[i].queue; 5714 rss_desc_v[i].queue_num = 1; 5715 rss_desc[i] = &rss_desc_v[i]; 5716 } else { 5717 rss_desc[i] = NULL; 5718 } 5719 } 5720 sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev, 5721 flow, policy, rss_desc); 5722 } else { 5723 enum mlx5_meter_domain mtr_domain = 5724 attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER : 5725 (attr->egress ? MLX5_MTR_DOMAIN_EGRESS : 5726 MLX5_MTR_DOMAIN_INGRESS); 5727 sub_policy = policy->sub_policys[mtr_domain][0]; 5728 } 5729 if (!sub_policy) 5730 rte_flow_error_set(error, EINVAL, 5731 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 5732 "Failed to get meter sub-policy."); 5733 exit: 5734 return sub_policy; 5735 } 5736 5737 /** 5738 * Split the meter flow. 5739 * 5740 * As meter flow will split to three sub flow, other than meter 5741 * action, the other actions make sense to only meter accepts 5742 * the packet. If it need to be dropped, no other additional 5743 * actions should be take. 5744 * 5745 * One kind of special action which decapsulates the L3 tunnel 5746 * header will be in the prefix sub flow, as not to take the 5747 * L3 tunnel header into account. 5748 * 5749 * @param[in] dev 5750 * Pointer to Ethernet device. 5751 * @param[in] flow 5752 * Parent flow structure pointer. 5753 * @param wks 5754 * Pointer to thread flow work space. 5755 * @param[in] attr 5756 * Flow rule attributes. 5757 * @param[in] items 5758 * Pattern specification (list terminated by the END pattern item). 5759 * @param[out] sfx_items 5760 * Suffix flow match items (list terminated by the END pattern item). 5761 * @param[in] actions 5762 * Associated actions (list terminated by the END action). 5763 * @param[out] actions_sfx 5764 * Suffix flow actions. 5765 * @param[out] actions_pre 5766 * Prefix flow actions. 5767 * @param[out] mtr_flow_id 5768 * Pointer to meter flow id. 5769 * @param[out] error 5770 * Perform verbose error reporting if not NULL. 5771 * 5772 * @return 5773 * 0 on success, a negative errno value otherwise and rte_errno is set. 5774 */ 5775 static int 5776 flow_meter_split_prep(struct rte_eth_dev *dev, 5777 struct rte_flow *flow, 5778 struct mlx5_flow_workspace *wks, 5779 const struct rte_flow_attr *attr, 5780 const struct rte_flow_item items[], 5781 struct rte_flow_item sfx_items[], 5782 const struct rte_flow_action actions[], 5783 struct rte_flow_action actions_sfx[], 5784 struct rte_flow_action actions_pre[], 5785 uint32_t *mtr_flow_id, 5786 struct rte_flow_error *error) 5787 { 5788 struct mlx5_priv *priv = dev->data->dev_private; 5789 struct mlx5_flow_meter_info *fm = wks->fm; 5790 struct rte_flow_action *tag_action = NULL; 5791 struct rte_flow_item *tag_item; 5792 struct mlx5_rte_flow_action_set_tag *set_tag; 5793 const struct rte_flow_action_raw_encap *raw_encap; 5794 const struct rte_flow_action_raw_decap *raw_decap; 5795 struct mlx5_rte_flow_item_tag *tag_item_spec; 5796 struct mlx5_rte_flow_item_tag *tag_item_mask; 5797 uint32_t tag_id = 0; 5798 bool vlan_actions; 5799 struct rte_flow_item *orig_sfx_items = sfx_items; 5800 const struct rte_flow_item *orig_items = items; 5801 struct rte_flow_action *hw_mtr_action; 5802 struct rte_flow_action *action_pre_head = NULL; 5803 uint16_t flow_src_port = priv->representor_id; 5804 bool mtr_first; 5805 uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0; 5806 uint8_t mtr_reg_bits = priv->mtr_reg_share ? 5807 MLX5_MTR_IDLE_BITS_IN_COLOR_REG : MLX5_REG_BITS; 5808 uint32_t flow_id = 0; 5809 uint32_t flow_id_reversed = 0; 5810 uint8_t flow_id_bits = 0; 5811 bool after_meter = false; 5812 int shift; 5813 5814 /* Prepare the suffix subflow items. */ 5815 tag_item = sfx_items++; 5816 tag_item->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG; 5817 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 5818 int item_type = items->type; 5819 5820 switch (item_type) { 5821 case RTE_FLOW_ITEM_TYPE_PORT_ID: 5822 case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: 5823 case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: 5824 if (mlx5_flow_get_item_vport_id(dev, items, &flow_src_port, NULL, error)) 5825 return -rte_errno; 5826 if (!fm->def_policy && wks->policy->hierarchy_match_port && 5827 flow_src_port != priv->representor_id) { 5828 if (flow_drv_mtr_hierarchy_rule_create(dev, 5829 flow, fm, 5830 flow_src_port, 5831 items, 5832 error)) 5833 return -rte_errno; 5834 } 5835 memcpy(sfx_items, items, sizeof(*sfx_items)); 5836 sfx_items++; 5837 break; 5838 case RTE_FLOW_ITEM_TYPE_VLAN: 5839 /* 5840 * Copy VLAN items in case VLAN actions are performed. 5841 * If there are no VLAN actions, these items will be VOID. 5842 */ 5843 memcpy(sfx_items, items, sizeof(*sfx_items)); 5844 sfx_items->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN; 5845 sfx_items++; 5846 break; 5847 default: 5848 break; 5849 } 5850 } 5851 sfx_items->type = RTE_FLOW_ITEM_TYPE_END; 5852 sfx_items++; 5853 mtr_first = priv->sh->meter_aso_en && 5854 (attr->egress || (attr->transfer && flow_src_port != UINT16_MAX)); 5855 /* For ASO meter, meter must be before tag in TX direction. */ 5856 if (mtr_first) { 5857 action_pre_head = actions_pre++; 5858 /* Leave space for tag action. */ 5859 tag_action = actions_pre++; 5860 } 5861 /* Prepare the actions for prefix and suffix flow. */ 5862 vlan_actions = false; 5863 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 5864 struct rte_flow_action *action_cur = NULL; 5865 5866 switch (actions->type) { 5867 case RTE_FLOW_ACTION_TYPE_METER: 5868 if (mtr_first) { 5869 action_cur = action_pre_head; 5870 } else { 5871 /* Leave space for tag action. */ 5872 tag_action = actions_pre++; 5873 action_cur = actions_pre++; 5874 } 5875 after_meter = true; 5876 break; 5877 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 5878 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 5879 action_cur = actions_pre++; 5880 break; 5881 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 5882 raw_encap = actions->conf; 5883 if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) 5884 action_cur = actions_pre++; 5885 break; 5886 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 5887 raw_decap = actions->conf; 5888 if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 5889 action_cur = actions_pre++; 5890 break; 5891 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 5892 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 5893 vlan_actions = true; 5894 break; 5895 case RTE_FLOW_ACTION_TYPE_COUNT: 5896 if (fm->def_policy) 5897 action_cur = after_meter ? 5898 actions_sfx++ : actions_pre++; 5899 break; 5900 default: 5901 break; 5902 } 5903 if (!action_cur) 5904 action_cur = (fm->def_policy) ? 5905 actions_sfx++ : actions_pre++; 5906 memcpy(action_cur, actions, sizeof(struct rte_flow_action)); 5907 } 5908 /* If there are no VLAN actions, convert VLAN items to VOID in suffix flow items. */ 5909 if (!vlan_actions) { 5910 struct rte_flow_item *it = orig_sfx_items; 5911 5912 for (; it->type != RTE_FLOW_ITEM_TYPE_END; it++) 5913 if (it->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN) 5914 it->type = RTE_FLOW_ITEM_TYPE_VOID; 5915 } 5916 /* Add end action to the actions. */ 5917 actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; 5918 if (priv->sh->meter_aso_en) { 5919 /** 5920 * For ASO meter, need to add an extra jump action explicitly, 5921 * to jump from meter to policer table. 5922 */ 5923 struct mlx5_flow_meter_sub_policy *sub_policy; 5924 struct mlx5_flow_tbl_data_entry *tbl_data; 5925 5926 if (!fm->def_policy) { 5927 sub_policy = get_meter_sub_policy(dev, flow, wks, 5928 attr, orig_items, 5929 error); 5930 if (!sub_policy) 5931 return -rte_errno; 5932 } else { 5933 enum mlx5_meter_domain mtr_domain = 5934 attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER : 5935 (attr->egress ? MLX5_MTR_DOMAIN_EGRESS : 5936 MLX5_MTR_DOMAIN_INGRESS); 5937 5938 sub_policy = 5939 &priv->sh->mtrmng->def_policy[mtr_domain]->sub_policy; 5940 } 5941 tbl_data = container_of(sub_policy->tbl_rsc, 5942 struct mlx5_flow_tbl_data_entry, tbl); 5943 hw_mtr_action = actions_pre++; 5944 hw_mtr_action->type = (enum rte_flow_action_type) 5945 MLX5_RTE_FLOW_ACTION_TYPE_JUMP; 5946 hw_mtr_action->conf = tbl_data->jump.action; 5947 } 5948 actions_pre->type = RTE_FLOW_ACTION_TYPE_END; 5949 actions_pre++; 5950 if (!tag_action) 5951 return rte_flow_error_set(error, ENOMEM, 5952 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 5953 NULL, "No tag action space."); 5954 if (!mtr_flow_id) { 5955 tag_action->type = RTE_FLOW_ACTION_TYPE_VOID; 5956 goto exit; 5957 } 5958 /* Only default-policy Meter creates mtr flow id. */ 5959 if (fm->def_policy) { 5960 mlx5_ipool_malloc(fm->flow_ipool, &tag_id); 5961 if (!tag_id) 5962 return rte_flow_error_set(error, ENOMEM, 5963 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 5964 "Failed to allocate meter flow id."); 5965 flow_id = tag_id - 1; 5966 flow_id_bits = (!flow_id) ? 1 : 5967 (MLX5_REG_BITS - rte_clz32(flow_id)); 5968 if ((flow_id_bits + priv->sh->mtrmng->max_mtr_bits) > 5969 mtr_reg_bits) { 5970 mlx5_ipool_free(fm->flow_ipool, tag_id); 5971 return rte_flow_error_set(error, EINVAL, 5972 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 5973 "Meter flow id exceeds max limit."); 5974 } 5975 if (flow_id_bits > priv->sh->mtrmng->max_mtr_flow_bits) 5976 priv->sh->mtrmng->max_mtr_flow_bits = flow_id_bits; 5977 } 5978 /* Build tag actions and items for meter_id/meter flow_id. */ 5979 set_tag = (struct mlx5_rte_flow_action_set_tag *)actions_pre; 5980 tag_item_spec = (struct mlx5_rte_flow_item_tag *)sfx_items; 5981 tag_item_mask = tag_item_spec + 1; 5982 /* Both flow_id and meter_id share the same register. */ 5983 *set_tag = (struct mlx5_rte_flow_action_set_tag) { 5984 .id = (enum modify_reg)mlx5_flow_get_reg_id(dev, MLX5_MTR_ID, 5985 0, error), 5986 .offset = mtr_id_offset, 5987 .length = mtr_reg_bits, 5988 .data = flow->meter, 5989 }; 5990 /* 5991 * The color Reg bits used by flow_id are growing from 5992 * msb to lsb, so must do bit reverse for flow_id val in RegC. 5993 */ 5994 for (shift = 0; shift < flow_id_bits; shift++) 5995 flow_id_reversed = (flow_id_reversed << 1) | 5996 ((flow_id >> shift) & 0x1); 5997 set_tag->data |= 5998 flow_id_reversed << (mtr_reg_bits - flow_id_bits); 5999 tag_item_spec->id = set_tag->id; 6000 tag_item_spec->data = set_tag->data << mtr_id_offset; 6001 tag_item_mask->data = UINT32_MAX << mtr_id_offset; 6002 tag_action->type = (enum rte_flow_action_type) 6003 MLX5_RTE_FLOW_ACTION_TYPE_TAG; 6004 tag_action->conf = set_tag; 6005 tag_item->spec = tag_item_spec; 6006 tag_item->last = NULL; 6007 tag_item->mask = tag_item_mask; 6008 exit: 6009 if (mtr_flow_id) 6010 *mtr_flow_id = tag_id; 6011 return 0; 6012 } 6013 6014 /** 6015 * Split action list having QUEUE/RSS for metadata register copy. 6016 * 6017 * Once Q/RSS action is detected in user's action list, the flow action 6018 * should be split in order to copy metadata registers, which will happen in 6019 * RX_CP_TBL like, 6020 * - CQE->flow_tag := reg_c[1] (MARK) 6021 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 6022 * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL. 6023 * This is because the last action of each flow must be a terminal action 6024 * (QUEUE, RSS or DROP). 6025 * 6026 * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is 6027 * stored and kept in the mlx5_flow structure per each sub_flow. 6028 * 6029 * The Q/RSS action is replaced with, 6030 * - SET_TAG, setting the allocated flow ID to reg_c[2]. 6031 * And the following JUMP action is added at the end, 6032 * - JUMP, to RX_CP_TBL. 6033 * 6034 * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by 6035 * flow_create_split_metadata() routine. The flow will look like, 6036 * - If flow ID matches (reg_c[2]), perform Q/RSS. 6037 * 6038 * @param dev 6039 * Pointer to Ethernet device. 6040 * @param[out] split_actions 6041 * Pointer to store split actions to jump to CP_TBL. 6042 * @param[in] actions 6043 * Pointer to the list of original flow actions. 6044 * @param[in] qrss 6045 * Pointer to the Q/RSS action. 6046 * @param[in] actions_n 6047 * Number of original actions. 6048 * @param[in] mtr_sfx 6049 * Check if it is in meter suffix table. 6050 * @param[out] error 6051 * Perform verbose error reporting if not NULL. 6052 * 6053 * @return 6054 * non-zero unique flow_id on success, otherwise 0 and 6055 * error/rte_error are set. 6056 */ 6057 static uint32_t 6058 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, 6059 struct rte_flow_action *split_actions, 6060 const struct rte_flow_action *actions, 6061 const struct rte_flow_action *qrss, 6062 int actions_n, int mtr_sfx, 6063 struct rte_flow_error *error) 6064 { 6065 struct mlx5_priv *priv = dev->data->dev_private; 6066 struct mlx5_rte_flow_action_set_tag *set_tag; 6067 struct rte_flow_action_jump *jump; 6068 const int qrss_idx = qrss - actions; 6069 uint32_t flow_id = 0; 6070 int ret = 0; 6071 6072 /* 6073 * Given actions will be split 6074 * - Replace QUEUE/RSS action with SET_TAG to set flow ID. 6075 * - Add jump to mreg CP_TBL. 6076 * As a result, there will be one more action. 6077 */ 6078 memcpy(split_actions, actions, sizeof(*split_actions) * actions_n); 6079 /* Count MLX5_RTE_FLOW_ACTION_TYPE_TAG. */ 6080 ++actions_n; 6081 set_tag = (void *)(split_actions + actions_n); 6082 /* 6083 * If we are not the meter suffix flow, add the tag action. 6084 * Since meter suffix flow already has the tag added. 6085 */ 6086 if (!mtr_sfx) { 6087 /* 6088 * Allocate the new subflow ID. This one is unique within 6089 * device and not shared with representors. Otherwise, 6090 * we would have to resolve multi-thread access synch 6091 * issue. Each flow on the shared device is appended 6092 * with source vport identifier, so the resulting 6093 * flows will be unique in the shared (by master and 6094 * representors) domain even if they have coinciding 6095 * IDs. 6096 */ 6097 mlx5_ipool_malloc(priv->sh->ipool 6098 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id); 6099 if (!flow_id) 6100 return rte_flow_error_set(error, ENOMEM, 6101 RTE_FLOW_ERROR_TYPE_ACTION, 6102 NULL, "can't allocate id " 6103 "for split Q/RSS subflow"); 6104 /* Internal SET_TAG action to set flow ID. */ 6105 *set_tag = (struct mlx5_rte_flow_action_set_tag){ 6106 .data = flow_id, 6107 }; 6108 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); 6109 if (ret < 0) 6110 return ret; 6111 set_tag->id = ret; 6112 /* Construct new actions array. */ 6113 /* Replace QUEUE/RSS action. */ 6114 split_actions[qrss_idx] = (struct rte_flow_action){ 6115 .type = (enum rte_flow_action_type) 6116 MLX5_RTE_FLOW_ACTION_TYPE_TAG, 6117 .conf = set_tag, 6118 }; 6119 } else { 6120 /* 6121 * If we are the suffix flow of meter, tag already exist. 6122 * Set the QUEUE/RSS action to void. 6123 */ 6124 split_actions[qrss_idx].type = RTE_FLOW_ACTION_TYPE_VOID; 6125 } 6126 /* JUMP action to jump to mreg copy table (CP_TBL). */ 6127 jump = (void *)(set_tag + 1); 6128 *jump = (struct rte_flow_action_jump){ 6129 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 6130 }; 6131 split_actions[actions_n - 2] = (struct rte_flow_action){ 6132 .type = RTE_FLOW_ACTION_TYPE_JUMP, 6133 .conf = jump, 6134 }; 6135 split_actions[actions_n - 1] = (struct rte_flow_action){ 6136 .type = RTE_FLOW_ACTION_TYPE_END, 6137 }; 6138 return flow_id; 6139 } 6140 6141 /** 6142 * Extend the given action list for Tx metadata copy. 6143 * 6144 * Copy the given action list to the ext_actions and add flow metadata register 6145 * copy action in order to copy reg_a set by WQE to reg_c[0]. 6146 * 6147 * @param[out] ext_actions 6148 * Pointer to the extended action list. 6149 * @param[in] actions 6150 * Pointer to the list of actions. 6151 * @param[in] actions_n 6152 * Number of actions in the list. 6153 * @param[out] error 6154 * Perform verbose error reporting if not NULL. 6155 * @param[in] encap_idx 6156 * The encap action index. 6157 * 6158 * @return 6159 * 0 on success, negative value otherwise 6160 */ 6161 static int 6162 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, 6163 struct rte_flow_action *ext_actions, 6164 const struct rte_flow_action *actions, 6165 int actions_n, struct rte_flow_error *error, 6166 int encap_idx) 6167 { 6168 struct mlx5_flow_action_copy_mreg *cp_mreg = 6169 (struct mlx5_flow_action_copy_mreg *) 6170 (ext_actions + actions_n + 1); 6171 int ret; 6172 6173 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 6174 if (ret < 0) 6175 return ret; 6176 cp_mreg->dst = ret; 6177 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error); 6178 if (ret < 0) 6179 return ret; 6180 cp_mreg->src = ret; 6181 if (encap_idx != 0) 6182 memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx); 6183 if (encap_idx == actions_n - 1) { 6184 ext_actions[actions_n - 1] = (struct rte_flow_action){ 6185 .type = (enum rte_flow_action_type) 6186 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 6187 .conf = cp_mreg, 6188 }; 6189 ext_actions[actions_n] = (struct rte_flow_action){ 6190 .type = RTE_FLOW_ACTION_TYPE_END, 6191 }; 6192 } else { 6193 ext_actions[encap_idx] = (struct rte_flow_action){ 6194 .type = (enum rte_flow_action_type) 6195 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 6196 .conf = cp_mreg, 6197 }; 6198 memcpy(ext_actions + encap_idx + 1, actions + encap_idx, 6199 sizeof(*ext_actions) * (actions_n - encap_idx)); 6200 } 6201 return 0; 6202 } 6203 6204 /** 6205 * Check the match action from the action list. 6206 * 6207 * @param[in] actions 6208 * Pointer to the list of actions. 6209 * @param[in] attr 6210 * Flow rule attributes. 6211 * @param[in] action 6212 * The action to be check if exist. 6213 * @param[out] match_action_pos 6214 * Pointer to the position of the matched action if exists, otherwise is -1. 6215 * @param[out] qrss_action_pos 6216 * Pointer to the position of the Queue/RSS action if exists, otherwise is -1. 6217 * @param[out] modify_after_mirror 6218 * Pointer to the flag of modify action after FDB mirroring. 6219 * 6220 * @return 6221 * > 0 the total number of actions. 6222 * 0 if not found match action in action list. 6223 */ 6224 static int 6225 flow_check_match_action(const struct rte_flow_action actions[], 6226 const struct rte_flow_attr *attr, 6227 enum rte_flow_action_type action, 6228 int *match_action_pos, int *qrss_action_pos, 6229 int *modify_after_mirror) 6230 { 6231 const struct rte_flow_action_sample *sample; 6232 const struct rte_flow_action_raw_decap *decap; 6233 const struct rte_flow_action *action_cur = NULL; 6234 int actions_n = 0; 6235 uint32_t ratio = 0; 6236 int sub_type = 0; 6237 int flag = 0; 6238 int fdb_mirror = 0; 6239 6240 *match_action_pos = -1; 6241 *qrss_action_pos = -1; 6242 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 6243 if (actions->type == action) { 6244 flag = 1; 6245 *match_action_pos = actions_n; 6246 } 6247 switch (actions->type) { 6248 case RTE_FLOW_ACTION_TYPE_QUEUE: 6249 case RTE_FLOW_ACTION_TYPE_RSS: 6250 *qrss_action_pos = actions_n; 6251 break; 6252 case RTE_FLOW_ACTION_TYPE_SAMPLE: 6253 sample = actions->conf; 6254 ratio = sample->ratio; 6255 sub_type = ((const struct rte_flow_action *) 6256 (sample->actions))->type; 6257 if (ratio == 1 && attr->transfer && 6258 sub_type != RTE_FLOW_ACTION_TYPE_END) 6259 fdb_mirror = 1; 6260 break; 6261 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: 6262 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: 6263 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: 6264 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: 6265 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: 6266 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: 6267 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: 6268 case RTE_FLOW_ACTION_TYPE_SET_TP_DST: 6269 case RTE_FLOW_ACTION_TYPE_DEC_TTL: 6270 case RTE_FLOW_ACTION_TYPE_SET_TTL: 6271 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: 6272 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: 6273 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: 6274 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: 6275 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: 6276 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: 6277 case RTE_FLOW_ACTION_TYPE_FLAG: 6278 case RTE_FLOW_ACTION_TYPE_MARK: 6279 case RTE_FLOW_ACTION_TYPE_SET_META: 6280 case RTE_FLOW_ACTION_TYPE_SET_TAG: 6281 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: 6282 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 6283 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 6284 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 6285 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 6286 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 6287 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: 6288 case RTE_FLOW_ACTION_TYPE_METER: 6289 if (fdb_mirror) 6290 *modify_after_mirror = 1; 6291 break; 6292 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 6293 decap = actions->conf; 6294 action_cur = actions; 6295 while ((++action_cur)->type == RTE_FLOW_ACTION_TYPE_VOID) 6296 ; 6297 if (action_cur->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { 6298 const struct rte_flow_action_raw_encap *encap = 6299 action_cur->conf; 6300 if (decap->size <= 6301 MLX5_ENCAPSULATION_DECISION_SIZE && 6302 encap->size > 6303 MLX5_ENCAPSULATION_DECISION_SIZE) 6304 /* L3 encap. */ 6305 break; 6306 } 6307 if (fdb_mirror) 6308 *modify_after_mirror = 1; 6309 break; 6310 default: 6311 break; 6312 } 6313 actions_n++; 6314 } 6315 if (flag && fdb_mirror && !*modify_after_mirror) { 6316 /* FDB mirroring uses the destination array to implement 6317 * instead of FLOW_SAMPLER object. 6318 */ 6319 if (sub_type != RTE_FLOW_ACTION_TYPE_END) 6320 flag = 0; 6321 } 6322 /* Count RTE_FLOW_ACTION_TYPE_END. */ 6323 return flag ? actions_n + 1 : 0; 6324 } 6325 6326 #define SAMPLE_SUFFIX_ITEM 3 6327 6328 /** 6329 * Split the sample flow. 6330 * 6331 * As sample flow will split to two sub flow, sample flow with 6332 * sample action, the other actions will move to new suffix flow. 6333 * 6334 * Also add unique tag id with tag action in the sample flow, 6335 * the same tag id will be as match in the suffix flow. 6336 * 6337 * @param dev 6338 * Pointer to Ethernet device. 6339 * @param[in] add_tag 6340 * Add extra tag action flag. 6341 * @param[out] sfx_items 6342 * Suffix flow match items (list terminated by the END pattern item). 6343 * @param[in] actions 6344 * Associated actions (list terminated by the END action). 6345 * @param[out] actions_sfx 6346 * Suffix flow actions. 6347 * @param[out] actions_pre 6348 * Prefix flow actions. 6349 * @param[in] actions_n 6350 * The total number of actions. 6351 * @param[in] sample_action_pos 6352 * The sample action position. 6353 * @param[in] qrss_action_pos 6354 * The Queue/RSS action position. 6355 * @param[in] jump_table 6356 * Add extra jump action flag. 6357 * @param[out] error 6358 * Perform verbose error reporting if not NULL. 6359 * 6360 * @return 6361 * 0 on success, or unique flow_id, a negative errno value 6362 * otherwise and rte_errno is set. 6363 */ 6364 static int 6365 flow_sample_split_prep(struct rte_eth_dev *dev, 6366 int add_tag, 6367 const struct rte_flow_item items[], 6368 struct rte_flow_item sfx_items[], 6369 const struct rte_flow_action actions[], 6370 struct rte_flow_action actions_sfx[], 6371 struct rte_flow_action actions_pre[], 6372 int actions_n, 6373 int sample_action_pos, 6374 int qrss_action_pos, 6375 int jump_table, 6376 struct rte_flow_error *error) 6377 { 6378 struct mlx5_priv *priv = dev->data->dev_private; 6379 struct mlx5_rte_flow_action_set_tag *set_tag; 6380 struct mlx5_rte_flow_item_tag *tag_spec; 6381 struct mlx5_rte_flow_item_tag *tag_mask; 6382 struct rte_flow_action_jump *jump_action; 6383 uint32_t tag_id = 0; 6384 int append_index = 0; 6385 int set_tag_idx = -1; 6386 int index; 6387 int ret; 6388 6389 if (sample_action_pos < 0) 6390 return rte_flow_error_set(error, EINVAL, 6391 RTE_FLOW_ERROR_TYPE_ACTION, 6392 NULL, "invalid position of sample " 6393 "action in list"); 6394 /* Prepare the actions for prefix and suffix flow. */ 6395 if (add_tag) { 6396 /* Update the new added tag action index preceding 6397 * the PUSH_VLAN or ENCAP action. 6398 */ 6399 const struct rte_flow_action_raw_encap *raw_encap; 6400 const struct rte_flow_action *action = actions; 6401 int encap_idx; 6402 int action_idx = 0; 6403 int raw_decap_idx = -1; 6404 int push_vlan_idx = -1; 6405 for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) { 6406 switch (action->type) { 6407 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 6408 raw_decap_idx = action_idx; 6409 break; 6410 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 6411 raw_encap = action->conf; 6412 if (raw_encap->size > 6413 MLX5_ENCAPSULATION_DECISION_SIZE) { 6414 encap_idx = raw_decap_idx != -1 ? 6415 raw_decap_idx : action_idx; 6416 if (encap_idx < sample_action_pos && 6417 push_vlan_idx == -1) 6418 set_tag_idx = encap_idx; 6419 } 6420 break; 6421 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 6422 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 6423 encap_idx = action_idx; 6424 if (encap_idx < sample_action_pos && 6425 push_vlan_idx == -1) 6426 set_tag_idx = encap_idx; 6427 break; 6428 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 6429 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 6430 if (action_idx < sample_action_pos && 6431 push_vlan_idx == -1) { 6432 set_tag_idx = action_idx; 6433 push_vlan_idx = action_idx; 6434 } 6435 break; 6436 default: 6437 break; 6438 } 6439 action_idx++; 6440 } 6441 } 6442 /* Prepare the actions for prefix and suffix flow. */ 6443 if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) { 6444 index = qrss_action_pos; 6445 /* Put the preceding the Queue/RSS action into prefix flow. */ 6446 if (index != 0) 6447 memcpy(actions_pre, actions, 6448 sizeof(struct rte_flow_action) * index); 6449 /* Put others preceding the sample action into prefix flow. */ 6450 if (sample_action_pos > index + 1) 6451 memcpy(actions_pre + index, actions + index + 1, 6452 sizeof(struct rte_flow_action) * 6453 (sample_action_pos - index - 1)); 6454 index = sample_action_pos - 1; 6455 /* Put Queue/RSS action into Suffix flow. */ 6456 memcpy(actions_sfx, actions + qrss_action_pos, 6457 sizeof(struct rte_flow_action)); 6458 actions_sfx++; 6459 } else if (add_tag && set_tag_idx >= 0) { 6460 if (set_tag_idx > 0) 6461 memcpy(actions_pre, actions, 6462 sizeof(struct rte_flow_action) * set_tag_idx); 6463 memcpy(actions_pre + set_tag_idx + 1, actions + set_tag_idx, 6464 sizeof(struct rte_flow_action) * 6465 (sample_action_pos - set_tag_idx)); 6466 index = sample_action_pos; 6467 } else { 6468 index = sample_action_pos; 6469 if (index != 0) 6470 memcpy(actions_pre, actions, 6471 sizeof(struct rte_flow_action) * index); 6472 } 6473 /* For CX5, add an extra tag action for NIC-RX and E-Switch ingress. 6474 * For CX6DX and above, metadata registers Cx preserve their value, 6475 * add an extra tag action for NIC-RX and E-Switch Domain. 6476 */ 6477 if (add_tag) { 6478 /* Prepare the prefix tag action. */ 6479 append_index++; 6480 set_tag = (void *)(actions_pre + actions_n + append_index); 6481 /* Trust VF/SF on CX5 not supported meter so that the reserved 6482 * metadata regC is REG_NON, back to use application tag 6483 * index 0. 6484 */ 6485 if (unlikely(priv->sh->registers.aso_reg == REG_NON)) 6486 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error); 6487 else 6488 ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error); 6489 if (ret < 0) 6490 return ret; 6491 mlx5_ipool_malloc(priv->sh->ipool 6492 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id); 6493 *set_tag = (struct mlx5_rte_flow_action_set_tag) { 6494 .id = ret, 6495 .data = tag_id, 6496 }; 6497 /* Prepare the suffix subflow items. */ 6498 tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM); 6499 tag_spec->data = tag_id; 6500 tag_spec->id = set_tag->id; 6501 tag_mask = tag_spec + 1; 6502 tag_mask->data = UINT32_MAX; 6503 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 6504 if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID || 6505 items->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR || 6506 items->type == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT) { 6507 memcpy(sfx_items, items, sizeof(*sfx_items)); 6508 sfx_items++; 6509 break; 6510 } 6511 } 6512 sfx_items[0] = (struct rte_flow_item){ 6513 .type = (enum rte_flow_item_type) 6514 MLX5_RTE_FLOW_ITEM_TYPE_TAG, 6515 .spec = tag_spec, 6516 .last = NULL, 6517 .mask = tag_mask, 6518 }; 6519 sfx_items[1] = (struct rte_flow_item){ 6520 .type = (enum rte_flow_item_type) 6521 RTE_FLOW_ITEM_TYPE_END, 6522 }; 6523 /* Prepare the tag action in prefix subflow. */ 6524 set_tag_idx = (set_tag_idx == -1) ? index : set_tag_idx; 6525 actions_pre[set_tag_idx] = 6526 (struct rte_flow_action){ 6527 .type = (enum rte_flow_action_type) 6528 MLX5_RTE_FLOW_ACTION_TYPE_TAG, 6529 .conf = set_tag, 6530 }; 6531 /* Update next sample position due to add one tag action */ 6532 index += 1; 6533 } 6534 /* Copy the sample action into prefix flow. */ 6535 memcpy(actions_pre + index, actions + sample_action_pos, 6536 sizeof(struct rte_flow_action)); 6537 index += 1; 6538 /* For the modify action after the sample action in E-Switch mirroring, 6539 * Add the extra jump action in prefix subflow and jump into the next 6540 * table, then do the modify action in the new table. 6541 */ 6542 if (jump_table) { 6543 /* Prepare the prefix jump action. */ 6544 append_index++; 6545 jump_action = (void *)(actions_pre + actions_n + append_index); 6546 jump_action->group = jump_table; 6547 actions_pre[index++] = 6548 (struct rte_flow_action){ 6549 .type = (enum rte_flow_action_type) 6550 RTE_FLOW_ACTION_TYPE_JUMP, 6551 .conf = jump_action, 6552 }; 6553 } 6554 actions_pre[index] = (struct rte_flow_action){ 6555 .type = (enum rte_flow_action_type) 6556 RTE_FLOW_ACTION_TYPE_END, 6557 }; 6558 /* Put the actions after sample into Suffix flow. */ 6559 memcpy(actions_sfx, actions + sample_action_pos + 1, 6560 sizeof(struct rte_flow_action) * 6561 (actions_n - sample_action_pos - 1)); 6562 return tag_id; 6563 } 6564 6565 /** 6566 * The splitting for metadata feature. 6567 * 6568 * - Q/RSS action on NIC Rx should be split in order to pass by 6569 * the mreg copy table (RX_CP_TBL) and then it jumps to the 6570 * action table (RX_ACT_TBL) which has the split Q/RSS action. 6571 * 6572 * - All the actions on NIC Tx should have a mreg copy action to 6573 * copy reg_a from WQE to reg_c[0]. 6574 * 6575 * @param dev 6576 * Pointer to Ethernet device. 6577 * @param[in] flow 6578 * Parent flow structure pointer. 6579 * @param[in] attr 6580 * Flow rule attributes. 6581 * @param[in] items 6582 * Pattern specification (list terminated by the END pattern item). 6583 * @param[in] actions 6584 * Associated actions (list terminated by the END action). 6585 * @param[in] flow_split_info 6586 * Pointer to flow split info structure. 6587 * @param[out] error 6588 * Perform verbose error reporting if not NULL. 6589 * @return 6590 * 0 on success, negative value otherwise 6591 */ 6592 static int 6593 flow_create_split_metadata(struct rte_eth_dev *dev, 6594 struct rte_flow *flow, 6595 const struct rte_flow_attr *attr, 6596 const struct rte_flow_item items[], 6597 const struct rte_flow_action actions[], 6598 struct mlx5_flow_split_info *flow_split_info, 6599 struct rte_flow_error *error) 6600 { 6601 struct mlx5_priv *priv = dev->data->dev_private; 6602 struct mlx5_sh_config *config = &priv->sh->config; 6603 const struct rte_flow_action *qrss = NULL; 6604 struct rte_flow_action *ext_actions = NULL; 6605 struct mlx5_flow *dev_flow = NULL; 6606 uint32_t qrss_id = 0; 6607 int mtr_sfx = 0; 6608 size_t act_size; 6609 int actions_n; 6610 int encap_idx; 6611 int ret; 6612 6613 /* Check whether extensive metadata feature is engaged. */ 6614 if (!config->dv_flow_en || 6615 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 6616 !mlx5_flow_ext_mreg_supported(dev)) 6617 return flow_create_split_inner(dev, flow, NULL, attr, items, 6618 actions, flow_split_info, error); 6619 actions_n = flow_parse_metadata_split_actions_info(actions, &qrss, 6620 &encap_idx); 6621 if (qrss) { 6622 /* Exclude hairpin flows from splitting. */ 6623 if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) { 6624 const struct rte_flow_action_queue *queue; 6625 6626 queue = qrss->conf; 6627 if (mlx5_rxq_is_hairpin(dev, queue->index)) 6628 qrss = NULL; 6629 } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) { 6630 const struct rte_flow_action_rss *rss; 6631 6632 rss = qrss->conf; 6633 if (mlx5_rxq_is_hairpin(dev, rss->queue[0])) 6634 qrss = NULL; 6635 } 6636 } 6637 if (qrss) { 6638 /* Check if it is in meter suffix table. */ 6639 mtr_sfx = attr->group == 6640 ((attr->transfer && priv->fdb_def_rule) ? 6641 (MLX5_FLOW_TABLE_LEVEL_METER - 1) : 6642 MLX5_FLOW_TABLE_LEVEL_METER); 6643 /* 6644 * Q/RSS action on NIC Rx should be split in order to pass by 6645 * the mreg copy table (RX_CP_TBL) and then it jumps to the 6646 * action table (RX_ACT_TBL) which has the split Q/RSS action. 6647 */ 6648 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 6649 sizeof(struct rte_flow_action_set_tag) + 6650 sizeof(struct rte_flow_action_jump); 6651 ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0, 6652 SOCKET_ID_ANY); 6653 if (!ext_actions) 6654 return rte_flow_error_set(error, ENOMEM, 6655 RTE_FLOW_ERROR_TYPE_ACTION, 6656 NULL, "no memory to split " 6657 "metadata flow"); 6658 /* 6659 * Create the new actions list with removed Q/RSS action 6660 * and appended set tag and jump to register copy table 6661 * (RX_CP_TBL). We should preallocate unique tag ID here 6662 * in advance, because it is needed for set tag action. 6663 */ 6664 qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions, 6665 qrss, actions_n, 6666 mtr_sfx, error); 6667 if (!mtr_sfx && !qrss_id) { 6668 ret = -rte_errno; 6669 goto exit; 6670 } 6671 } else if (attr->egress) { 6672 /* 6673 * All the actions on NIC Tx should have a metadata register 6674 * copy action to copy reg_a from WQE to reg_c[meta] 6675 */ 6676 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 6677 sizeof(struct mlx5_flow_action_copy_mreg); 6678 ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0, 6679 SOCKET_ID_ANY); 6680 if (!ext_actions) 6681 return rte_flow_error_set(error, ENOMEM, 6682 RTE_FLOW_ERROR_TYPE_ACTION, 6683 NULL, "no memory to split " 6684 "metadata flow"); 6685 /* Create the action list appended with copy register. */ 6686 ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions, 6687 actions_n, error, encap_idx); 6688 if (ret < 0) 6689 goto exit; 6690 } 6691 /* Add the unmodified original or prefix subflow. */ 6692 ret = flow_create_split_inner(dev, flow, &dev_flow, attr, 6693 items, ext_actions ? ext_actions : 6694 actions, flow_split_info, error); 6695 if (ret < 0) 6696 goto exit; 6697 MLX5_ASSERT(dev_flow); 6698 if (qrss) { 6699 const struct rte_flow_attr q_attr = { 6700 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 6701 .ingress = 1, 6702 }; 6703 /* Internal PMD action to set register. */ 6704 struct mlx5_rte_flow_item_tag q_tag_spec = { 6705 .data = qrss_id, 6706 .id = REG_NON, 6707 }; 6708 struct rte_flow_item q_items[] = { 6709 { 6710 .type = (enum rte_flow_item_type) 6711 MLX5_RTE_FLOW_ITEM_TYPE_TAG, 6712 .spec = &q_tag_spec, 6713 .last = NULL, 6714 .mask = NULL, 6715 }, 6716 { 6717 .type = RTE_FLOW_ITEM_TYPE_END, 6718 }, 6719 }; 6720 struct rte_flow_action q_actions[] = { 6721 { 6722 .type = qrss->type, 6723 .conf = qrss->conf, 6724 }, 6725 { 6726 .type = RTE_FLOW_ACTION_TYPE_END, 6727 }, 6728 }; 6729 uint64_t layers = flow_get_prefix_layer_flags(dev_flow); 6730 6731 /* 6732 * Configure the tag item only if there is no meter subflow. 6733 * Since tag is already marked in the meter suffix subflow 6734 * we can just use the meter suffix items as is. 6735 */ 6736 if (qrss_id) { 6737 /* Not meter subflow. */ 6738 MLX5_ASSERT(!mtr_sfx); 6739 /* 6740 * Put unique id in prefix flow due to it is destroyed 6741 * after suffix flow and id will be freed after there 6742 * is no actual flows with this id and identifier 6743 * reallocation becomes possible (for example, for 6744 * other flows in other threads). 6745 */ 6746 dev_flow->handle->split_flow_id = qrss_id; 6747 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, 6748 error); 6749 if (ret < 0) 6750 goto exit; 6751 q_tag_spec.id = ret; 6752 } 6753 dev_flow = NULL; 6754 /* Add suffix subflow to execute Q/RSS. */ 6755 flow_split_info->prefix_layers = layers; 6756 flow_split_info->prefix_mark = 0; 6757 flow_split_info->table_id = 0; 6758 ret = flow_create_split_inner(dev, flow, &dev_flow, 6759 &q_attr, mtr_sfx ? items : 6760 q_items, q_actions, 6761 flow_split_info, error); 6762 if (ret < 0) 6763 goto exit; 6764 /* qrss ID should be freed if failed. */ 6765 qrss_id = 0; 6766 MLX5_ASSERT(dev_flow); 6767 } 6768 6769 exit: 6770 /* 6771 * We do not destroy the partially created sub_flows in case of error. 6772 * These ones are included into parent flow list and will be destroyed 6773 * by flow_drv_destroy. 6774 */ 6775 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], 6776 qrss_id); 6777 mlx5_free(ext_actions); 6778 return ret; 6779 } 6780 6781 /** 6782 * Create meter internal drop flow with the original pattern. 6783 * 6784 * @param dev 6785 * Pointer to Ethernet device. 6786 * @param[in] flow 6787 * Parent flow structure pointer. 6788 * @param[in] attr 6789 * Flow rule attributes. 6790 * @param[in] items 6791 * Pattern specification (list terminated by the END pattern item). 6792 * @param[in] flow_split_info 6793 * Pointer to flow split info structure. 6794 * @param[in] fm 6795 * Pointer to flow meter structure. 6796 * @param[out] error 6797 * Perform verbose error reporting if not NULL. 6798 * @return 6799 * 0 on success, negative value otherwise 6800 */ 6801 static uint32_t 6802 flow_meter_create_drop_flow_with_org_pattern(struct rte_eth_dev *dev, 6803 struct rte_flow *flow, 6804 const struct rte_flow_attr *attr, 6805 const struct rte_flow_item items[], 6806 struct mlx5_flow_split_info *flow_split_info, 6807 struct mlx5_flow_meter_info *fm, 6808 struct rte_flow_error *error) 6809 { 6810 struct mlx5_flow *dev_flow = NULL; 6811 struct rte_flow_attr drop_attr = *attr; 6812 struct rte_flow_action drop_actions[3]; 6813 struct mlx5_flow_split_info drop_split_info = *flow_split_info; 6814 6815 MLX5_ASSERT(fm->drop_cnt); 6816 drop_actions[0].type = 6817 (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_COUNT; 6818 drop_actions[0].conf = (void *)(uintptr_t)fm->drop_cnt; 6819 drop_actions[1].type = RTE_FLOW_ACTION_TYPE_DROP; 6820 drop_actions[1].conf = NULL; 6821 drop_actions[2].type = RTE_FLOW_ACTION_TYPE_END; 6822 drop_actions[2].conf = NULL; 6823 drop_split_info.external = false; 6824 drop_split_info.skip_scale |= 1 << MLX5_SCALE_FLOW_GROUP_BIT; 6825 drop_split_info.table_id = MLX5_MTR_TABLE_ID_DROP; 6826 drop_attr.group = MLX5_FLOW_TABLE_LEVEL_METER; 6827 return flow_create_split_inner(dev, flow, &dev_flow, 6828 &drop_attr, items, drop_actions, 6829 &drop_split_info, error); 6830 } 6831 6832 static int 6833 flow_count_vlan_items(const struct rte_flow_item items[]) 6834 { 6835 int items_n = 0; 6836 6837 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 6838 if (items->type == RTE_FLOW_ITEM_TYPE_VLAN || 6839 items->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN) 6840 items_n++; 6841 } 6842 return items_n; 6843 } 6844 6845 /** 6846 * The splitting for meter feature. 6847 * 6848 * - The meter flow will be split to two flows as prefix and 6849 * suffix flow. The packets make sense only it pass the prefix 6850 * meter action. 6851 * 6852 * - Reg_C_5 is used for the packet to match betweend prefix and 6853 * suffix flow. 6854 * 6855 * @param dev 6856 * Pointer to Ethernet device. 6857 * @param[in] flow 6858 * Parent flow structure pointer. 6859 * @param[in] attr 6860 * Flow rule attributes. 6861 * @param[in] items 6862 * Pattern specification (list terminated by the END pattern item). 6863 * @param[in] actions 6864 * Associated actions (list terminated by the END action). 6865 * @param[in] flow_split_info 6866 * Pointer to flow split info structure. 6867 * @param[out] error 6868 * Perform verbose error reporting if not NULL. 6869 * @return 6870 * 0 on success, negative value otherwise 6871 */ 6872 static int 6873 flow_create_split_meter(struct rte_eth_dev *dev, 6874 struct rte_flow *flow, 6875 const struct rte_flow_attr *attr, 6876 const struct rte_flow_item items[], 6877 const struct rte_flow_action actions[], 6878 struct mlx5_flow_split_info *flow_split_info, 6879 struct rte_flow_error *error) 6880 { 6881 struct mlx5_priv *priv = dev->data->dev_private; 6882 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 6883 struct rte_flow_action *sfx_actions = NULL; 6884 struct rte_flow_action *pre_actions = NULL; 6885 struct rte_flow_item *sfx_items = NULL; 6886 struct mlx5_flow *dev_flow = NULL; 6887 struct rte_flow_attr sfx_attr = *attr; 6888 struct mlx5_flow_meter_info *fm = NULL; 6889 uint8_t skip_scale_restore; 6890 bool has_mtr = false; 6891 bool has_modify = false; 6892 bool set_mtr_reg = true; 6893 bool is_mtr_hierarchy = false; 6894 uint32_t meter_id = 0; 6895 uint32_t mtr_idx = 0; 6896 uint32_t mtr_flow_id = 0; 6897 size_t act_size; 6898 size_t item_size; 6899 int actions_n = 0; 6900 int vlan_items_n = 0; 6901 int ret = 0; 6902 6903 if (priv->mtr_en) 6904 actions_n = flow_check_meter_action(dev, actions, &has_mtr, 6905 &has_modify, &meter_id); 6906 if (has_mtr) { 6907 if (flow->meter) { 6908 fm = flow_dv_meter_find_by_idx(priv, flow->meter); 6909 if (!fm) 6910 return rte_flow_error_set(error, EINVAL, 6911 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 6912 NULL, "Meter not found."); 6913 } else { 6914 fm = mlx5_flow_meter_find(priv, meter_id, &mtr_idx); 6915 if (!fm) 6916 return rte_flow_error_set(error, EINVAL, 6917 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 6918 NULL, "Meter not found."); 6919 ret = mlx5_flow_meter_attach(priv, fm, 6920 &sfx_attr, error); 6921 if (ret) 6922 return -rte_errno; 6923 flow->meter = mtr_idx; 6924 } 6925 MLX5_ASSERT(wks); 6926 wks->fm = fm; 6927 if (!fm->def_policy) { 6928 wks->policy = mlx5_flow_meter_policy_find(dev, 6929 fm->policy_id, 6930 NULL); 6931 MLX5_ASSERT(wks->policy); 6932 if (wks->policy->mark) 6933 wks->mark = 1; 6934 if (wks->policy->is_hierarchy) { 6935 wks->final_policy = 6936 mlx5_flow_meter_hierarchy_get_final_policy(dev, 6937 wks->policy); 6938 if (!wks->final_policy) 6939 return rte_flow_error_set(error, 6940 EINVAL, 6941 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 6942 "Failed to find terminal policy of hierarchy."); 6943 is_mtr_hierarchy = true; 6944 } 6945 } 6946 /* 6947 * If it isn't default-policy Meter, and 6948 * 1. Not meter hierarchy and there's no action in flow to change 6949 * packet (modify/encap/decap etc.), OR 6950 * 2. No drop count needed for this meter. 6951 * Then no need to use regC to save meter id anymore. 6952 */ 6953 if (!fm->def_policy && ((!has_modify && !is_mtr_hierarchy) || !fm->drop_cnt)) 6954 set_mtr_reg = false; 6955 /* Prefix actions: meter, decap, encap, tag, jump, end, cnt. */ 6956 #define METER_PREFIX_ACTION 7 6957 act_size = (sizeof(struct rte_flow_action) * 6958 (actions_n + METER_PREFIX_ACTION)) + 6959 sizeof(struct mlx5_rte_flow_action_set_tag); 6960 /* Flow can have multiple VLAN items. Account for them in suffix items. */ 6961 vlan_items_n = flow_count_vlan_items(items); 6962 /* Suffix items: tag, [vlans], port id, end. */ 6963 #define METER_SUFFIX_ITEM 3 6964 item_size = sizeof(struct rte_flow_item) * (METER_SUFFIX_ITEM + vlan_items_n) + 6965 sizeof(struct mlx5_rte_flow_item_tag) * 2; 6966 sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size), 6967 0, SOCKET_ID_ANY); 6968 if (!sfx_actions) 6969 return rte_flow_error_set(error, ENOMEM, 6970 RTE_FLOW_ERROR_TYPE_ACTION, 6971 NULL, "no memory to split " 6972 "meter flow"); 6973 sfx_items = (struct rte_flow_item *)((char *)sfx_actions + 6974 act_size); 6975 /* There's no suffix flow for meter of non-default policy. */ 6976 if (!fm->def_policy) 6977 pre_actions = sfx_actions + 1; 6978 else 6979 pre_actions = sfx_actions + actions_n; 6980 ret = flow_meter_split_prep(dev, flow, wks, &sfx_attr, 6981 items, sfx_items, actions, 6982 sfx_actions, pre_actions, 6983 (set_mtr_reg ? &mtr_flow_id : NULL), 6984 error); 6985 if (ret) { 6986 ret = -rte_errno; 6987 goto exit; 6988 } 6989 /* Add the prefix subflow. */ 6990 skip_scale_restore = flow_split_info->skip_scale; 6991 flow_split_info->skip_scale |= 6992 1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT; 6993 ret = flow_create_split_inner(dev, flow, &dev_flow, 6994 attr, items, pre_actions, 6995 flow_split_info, error); 6996 flow_split_info->skip_scale = skip_scale_restore; 6997 if (ret) { 6998 if (mtr_flow_id) 6999 mlx5_ipool_free(fm->flow_ipool, mtr_flow_id); 7000 ret = -rte_errno; 7001 goto exit; 7002 } 7003 if (mtr_flow_id) { 7004 dev_flow->handle->split_flow_id = mtr_flow_id; 7005 dev_flow->handle->is_meter_flow_id = 1; 7006 } 7007 if (!fm->def_policy) { 7008 if (!set_mtr_reg && fm->drop_cnt) 7009 ret = 7010 flow_meter_create_drop_flow_with_org_pattern(dev, flow, 7011 &sfx_attr, items, 7012 flow_split_info, 7013 fm, error); 7014 goto exit; 7015 } 7016 /* Setting the sfx group atrr. */ 7017 sfx_attr.group = sfx_attr.transfer ? 7018 (MLX5_FLOW_TABLE_LEVEL_METER - 1) : 7019 MLX5_FLOW_TABLE_LEVEL_METER; 7020 flow_split_info->prefix_layers = 7021 flow_get_prefix_layer_flags(dev_flow); 7022 flow_split_info->prefix_mark |= wks->mark; 7023 flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX; 7024 } 7025 /* Add the prefix subflow. */ 7026 ret = flow_create_split_metadata(dev, flow, 7027 &sfx_attr, sfx_items ? 7028 sfx_items : items, 7029 sfx_actions ? sfx_actions : actions, 7030 flow_split_info, error); 7031 exit: 7032 if (sfx_actions) 7033 mlx5_free(sfx_actions); 7034 return ret; 7035 } 7036 7037 /** 7038 * The splitting for sample feature. 7039 * 7040 * Once Sample action is detected in the action list, the flow actions should 7041 * be split into prefix sub flow and suffix sub flow. 7042 * 7043 * The original items remain in the prefix sub flow, all actions preceding the 7044 * sample action and the sample action itself will be copied to the prefix 7045 * sub flow, the actions following the sample action will be copied to the 7046 * suffix sub flow, Queue action always be located in the suffix sub flow. 7047 * 7048 * In order to make the packet from prefix sub flow matches with suffix sub 7049 * flow, an extra tag action be added into prefix sub flow, and the suffix sub 7050 * flow uses tag item with the unique flow id. 7051 * 7052 * @param dev 7053 * Pointer to Ethernet device. 7054 * @param[in] flow 7055 * Parent flow structure pointer. 7056 * @param[in] attr 7057 * Flow rule attributes. 7058 * @param[in] items 7059 * Pattern specification (list terminated by the END pattern item). 7060 * @param[in] actions 7061 * Associated actions (list terminated by the END action). 7062 * @param[in] flow_split_info 7063 * Pointer to flow split info structure. 7064 * @param[out] error 7065 * Perform verbose error reporting if not NULL. 7066 * @return 7067 * 0 on success, negative value otherwise 7068 */ 7069 static int 7070 flow_create_split_sample(struct rte_eth_dev *dev, 7071 struct rte_flow *flow, 7072 const struct rte_flow_attr *attr, 7073 const struct rte_flow_item items[], 7074 const struct rte_flow_action actions[], 7075 struct mlx5_flow_split_info *flow_split_info, 7076 struct rte_flow_error *error) 7077 { 7078 struct mlx5_priv *priv = dev->data->dev_private; 7079 struct rte_flow_action *sfx_actions = NULL; 7080 struct rte_flow_action *pre_actions = NULL; 7081 struct rte_flow_item *sfx_items = NULL; 7082 struct mlx5_flow *dev_flow = NULL; 7083 struct rte_flow_attr sfx_attr = *attr; 7084 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 7085 struct mlx5_flow_dv_sample_resource *sample_res; 7086 struct mlx5_flow_tbl_data_entry *sfx_tbl_data; 7087 struct mlx5_flow_tbl_resource *sfx_tbl; 7088 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 7089 #endif 7090 size_t act_size; 7091 size_t item_size; 7092 uint32_t fdb_tx = 0; 7093 int32_t tag_id = 0; 7094 int actions_n = 0; 7095 int sample_action_pos; 7096 int qrss_action_pos; 7097 int add_tag = 0; 7098 int modify_after_mirror = 0; 7099 uint16_t jump_table = 0; 7100 const uint32_t next_ft_step = 1; 7101 int ret = 0; 7102 struct mlx5_priv *item_port_priv = NULL; 7103 const struct rte_flow_item *item; 7104 7105 if (priv->sampler_en) 7106 actions_n = flow_check_match_action(actions, attr, 7107 RTE_FLOW_ACTION_TYPE_SAMPLE, 7108 &sample_action_pos, &qrss_action_pos, 7109 &modify_after_mirror); 7110 if (actions_n) { 7111 /* The prefix actions must includes sample, tag, end. */ 7112 act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1) 7113 + sizeof(struct mlx5_rte_flow_action_set_tag); 7114 item_size = sizeof(struct rte_flow_item) * SAMPLE_SUFFIX_ITEM + 7115 sizeof(struct mlx5_rte_flow_item_tag) * 2; 7116 sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + 7117 item_size), 0, SOCKET_ID_ANY); 7118 if (!sfx_actions) 7119 return rte_flow_error_set(error, ENOMEM, 7120 RTE_FLOW_ERROR_TYPE_ACTION, 7121 NULL, "no memory to split " 7122 "sample flow"); 7123 for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 7124 if (item->type == RTE_FLOW_ITEM_TYPE_PORT_ID) { 7125 const struct rte_flow_item_port_id *spec; 7126 7127 spec = (const struct rte_flow_item_port_id *)item->spec; 7128 if (spec) 7129 item_port_priv = 7130 mlx5_port_to_eswitch_info(spec->id, true); 7131 break; 7132 } else if (item->type == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT) { 7133 const struct rte_flow_item_ethdev *spec; 7134 7135 spec = (const struct rte_flow_item_ethdev *)item->spec; 7136 if (spec) 7137 item_port_priv = 7138 mlx5_port_to_eswitch_info(spec->port_id, true); 7139 break; 7140 } else if (item->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) { 7141 const struct rte_flow_item_ethdev *spec; 7142 7143 spec = (const struct rte_flow_item_ethdev *)item->spec; 7144 if (spec) 7145 item_port_priv = 7146 mlx5_port_to_eswitch_info(spec->port_id, true); 7147 break; 7148 } 7149 } 7150 /* The representor_id is UINT16_MAX for uplink. */ 7151 fdb_tx = (attr->transfer && 7152 flow_source_vport_representor(priv, item_port_priv)); 7153 /* 7154 * When reg_c_preserve is set, metadata registers Cx preserve 7155 * their value even through packet duplication. 7156 */ 7157 add_tag = (!fdb_tx || 7158 priv->sh->cdev->config.hca_attr.reg_c_preserve); 7159 if (add_tag) 7160 sfx_items = (struct rte_flow_item *)((char *)sfx_actions 7161 + act_size); 7162 if (modify_after_mirror) 7163 jump_table = attr->group * MLX5_FLOW_TABLE_FACTOR + 7164 next_ft_step; 7165 pre_actions = sfx_actions + actions_n; 7166 tag_id = flow_sample_split_prep(dev, add_tag, items, sfx_items, 7167 actions, sfx_actions, 7168 pre_actions, actions_n, 7169 sample_action_pos, 7170 qrss_action_pos, jump_table, 7171 error); 7172 if (tag_id < 0 || (add_tag && !tag_id)) { 7173 ret = -rte_errno; 7174 goto exit; 7175 } 7176 if (modify_after_mirror) 7177 flow_split_info->skip_scale = 7178 1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT; 7179 /* Add the prefix subflow. */ 7180 ret = flow_create_split_inner(dev, flow, &dev_flow, attr, 7181 items, pre_actions, 7182 flow_split_info, error); 7183 if (ret) { 7184 ret = -rte_errno; 7185 goto exit; 7186 } 7187 dev_flow->handle->split_flow_id = tag_id; 7188 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 7189 if (!modify_after_mirror) { 7190 /* Set the sfx group attr. */ 7191 sample_res = (struct mlx5_flow_dv_sample_resource *) 7192 dev_flow->dv.sample_res; 7193 sfx_tbl = (struct mlx5_flow_tbl_resource *) 7194 sample_res->normal_path_tbl; 7195 sfx_tbl_data = container_of(sfx_tbl, 7196 struct mlx5_flow_tbl_data_entry, 7197 tbl); 7198 sfx_attr.group = sfx_attr.transfer ? 7199 (sfx_tbl_data->level - 1) : sfx_tbl_data->level; 7200 } else { 7201 MLX5_ASSERT(attr->transfer); 7202 sfx_attr.group = jump_table; 7203 } 7204 flow_split_info->prefix_layers = 7205 flow_get_prefix_layer_flags(dev_flow); 7206 MLX5_ASSERT(wks); 7207 flow_split_info->prefix_mark |= wks->mark; 7208 /* Suffix group level already be scaled with factor, set 7209 * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale 7210 * again in translation. 7211 */ 7212 flow_split_info->skip_scale = 1 << MLX5_SCALE_FLOW_GROUP_BIT; 7213 #endif 7214 } 7215 /* Add the suffix subflow. */ 7216 ret = flow_create_split_meter(dev, flow, &sfx_attr, 7217 sfx_items ? sfx_items : items, 7218 sfx_actions ? sfx_actions : actions, 7219 flow_split_info, error); 7220 exit: 7221 if (sfx_actions) 7222 mlx5_free(sfx_actions); 7223 return ret; 7224 } 7225 7226 /** 7227 * Split the flow to subflow set. The splitters might be linked 7228 * in the chain, like this: 7229 * flow_create_split_outer() calls: 7230 * flow_create_split_meter() calls: 7231 * flow_create_split_metadata(meter_subflow_0) calls: 7232 * flow_create_split_inner(metadata_subflow_0) 7233 * flow_create_split_inner(metadata_subflow_1) 7234 * flow_create_split_inner(metadata_subflow_2) 7235 * flow_create_split_metadata(meter_subflow_1) calls: 7236 * flow_create_split_inner(metadata_subflow_0) 7237 * flow_create_split_inner(metadata_subflow_1) 7238 * flow_create_split_inner(metadata_subflow_2) 7239 * 7240 * This provide flexible way to add new levels of flow splitting. 7241 * The all of successfully created subflows are included to the 7242 * parent flow dev_flow list. 7243 * 7244 * @param dev 7245 * Pointer to Ethernet device. 7246 * @param[in] flow 7247 * Parent flow structure pointer. 7248 * @param[in] attr 7249 * Flow rule attributes. 7250 * @param[in] items 7251 * Pattern specification (list terminated by the END pattern item). 7252 * @param[in] actions 7253 * Associated actions (list terminated by the END action). 7254 * @param[in] flow_split_info 7255 * Pointer to flow split info structure. 7256 * @param[out] error 7257 * Perform verbose error reporting if not NULL. 7258 * @return 7259 * 0 on success, negative value otherwise 7260 */ 7261 static int 7262 flow_create_split_outer(struct rte_eth_dev *dev, 7263 struct rte_flow *flow, 7264 const struct rte_flow_attr *attr, 7265 const struct rte_flow_item items[], 7266 const struct rte_flow_action actions[], 7267 struct mlx5_flow_split_info *flow_split_info, 7268 struct rte_flow_error *error) 7269 { 7270 int ret; 7271 7272 ret = flow_create_split_sample(dev, flow, attr, items, 7273 actions, flow_split_info, error); 7274 MLX5_ASSERT(ret <= 0); 7275 return ret; 7276 } 7277 7278 static inline struct mlx5_flow_tunnel * 7279 flow_tunnel_from_rule(const struct mlx5_flow *flow) 7280 { 7281 struct mlx5_flow_tunnel *tunnel; 7282 7283 #pragma GCC diagnostic push 7284 #pragma GCC diagnostic ignored "-Wcast-qual" 7285 tunnel = (typeof(tunnel))flow->tunnel; 7286 #pragma GCC diagnostic pop 7287 7288 return tunnel; 7289 } 7290 7291 /** 7292 * Create a flow and add it to @p list. 7293 * 7294 * @param dev 7295 * Pointer to Ethernet device. 7296 * @param list 7297 * Pointer to a TAILQ flow list. If this parameter NULL, 7298 * no list insertion occurred, flow is just created, 7299 * this is caller's responsibility to track the 7300 * created flow. 7301 * @param[in] attr 7302 * Flow rule attributes. 7303 * @param[in] items 7304 * Pattern specification (list terminated by the END pattern item). 7305 * @param[in] actions 7306 * Associated actions (list terminated by the END action). 7307 * @param[in] external 7308 * This flow rule is created by request external to PMD. 7309 * @param[out] error 7310 * Perform verbose error reporting if not NULL. 7311 * 7312 * @return 7313 * A flow index on success, 0 otherwise and rte_errno is set. 7314 */ 7315 uintptr_t 7316 flow_legacy_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, 7317 const struct rte_flow_attr *attr, 7318 const struct rte_flow_item items[], 7319 const struct rte_flow_action original_actions[], 7320 bool external, struct rte_flow_error *error) 7321 { 7322 struct mlx5_priv *priv = dev->data->dev_private; 7323 struct rte_flow *flow = NULL; 7324 struct mlx5_flow *dev_flow; 7325 const struct rte_flow_action_rss *rss = NULL; 7326 struct mlx5_translated_action_handle 7327 indir_actions[MLX5_MAX_INDIRECT_ACTIONS]; 7328 int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS; 7329 union { 7330 struct mlx5_flow_expand_rss buf; 7331 uint8_t buffer[8192]; 7332 } expand_buffer; 7333 union { 7334 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 7335 uint8_t buffer[2048]; 7336 } actions_rx; 7337 union { 7338 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 7339 uint8_t buffer[2048]; 7340 } actions_hairpin_tx; 7341 union { 7342 struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS]; 7343 uint8_t buffer[2048]; 7344 } items_tx; 7345 struct mlx5_rte_flow_item_sq sq_specs[RTE_MAX_QUEUES_PER_PORT]; 7346 struct mlx5_flow_expand_rss *buf = &expand_buffer.buf; 7347 struct mlx5_flow_rss_desc *rss_desc; 7348 const struct rte_flow_action *p_actions_rx; 7349 uint32_t i; 7350 uint32_t idx = 0; 7351 int hairpin_flow; 7352 struct rte_flow_attr attr_tx = { .priority = 0 }; 7353 const struct rte_flow_action *actions; 7354 struct rte_flow_action *translated_actions = NULL; 7355 struct mlx5_flow_tunnel *tunnel; 7356 struct tunnel_default_miss_ctx default_miss_ctx = { 0, }; 7357 struct mlx5_flow_workspace *wks = mlx5_flow_push_thread_workspace(); 7358 struct mlx5_flow_split_info flow_split_info = { 7359 .external = !!external, 7360 .skip_scale = 0, 7361 .flow_idx = 0, 7362 .prefix_mark = 0, 7363 .prefix_layers = 0, 7364 .table_id = 0 7365 }; 7366 int ret; 7367 struct mlx5_shared_action_rss *shared_rss_action; 7368 7369 if (!wks) 7370 return rte_flow_error_set(error, ENOMEM, 7371 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 7372 NULL, 7373 "failed to push flow workspace"); 7374 memset(indir_actions, 0, sizeof(indir_actions)); 7375 rss_desc = &wks->rss_desc; 7376 ret = flow_action_handles_translate(dev, original_actions, 7377 indir_actions, 7378 &indir_actions_n, 7379 &translated_actions, error); 7380 if (ret < 0) { 7381 MLX5_ASSERT(translated_actions == NULL); 7382 return 0; 7383 } 7384 actions = translated_actions ? translated_actions : original_actions; 7385 p_actions_rx = actions; 7386 hairpin_flow = flow_check_hairpin_split(dev, attr, actions); 7387 ret = flow_drv_validate(dev, attr, items, p_actions_rx, 7388 external, hairpin_flow, error); 7389 if (ret < 0) 7390 goto error_before_hairpin_split; 7391 flow = mlx5_ipool_zmalloc(priv->flows[type], &idx); 7392 if (!flow) { 7393 rte_errno = ENOMEM; 7394 goto error_before_hairpin_split; 7395 } 7396 if (hairpin_flow > 0) { 7397 if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { 7398 rte_errno = EINVAL; 7399 goto error_before_hairpin_split; 7400 } 7401 flow_hairpin_split(dev, actions, actions_rx.actions, 7402 actions_hairpin_tx.actions, items_tx.items, 7403 idx); 7404 p_actions_rx = actions_rx.actions; 7405 } 7406 flow_split_info.flow_idx = idx; 7407 flow->drv_type = flow_get_drv_type(dev, attr); 7408 MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && 7409 flow->drv_type < MLX5_FLOW_TYPE_MAX); 7410 memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue)); 7411 /* RSS Action only works on NIC RX domain */ 7412 if (attr->ingress) 7413 rss = flow_get_rss_action(dev, p_actions_rx); 7414 if (rss) { 7415 MLX5_ASSERT(rss->queue_num <= RTE_ETH_RSS_RETA_SIZE_512); 7416 rss_desc->symmetric_hash_function = MLX5_RSS_IS_SYMM(rss->func); 7417 /* 7418 * The following information is required by 7419 * mlx5_flow_hashfields_adjust() in advance. 7420 */ 7421 rss_desc->level = rss->level; 7422 /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */ 7423 rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types; 7424 } 7425 flow->dev_handles = 0; 7426 if (rss && rss->types) { 7427 unsigned int graph_root; 7428 7429 graph_root = find_graph_root(rss->level); 7430 ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer), 7431 items, rss->types, 7432 mlx5_support_expansion, graph_root); 7433 MLX5_ASSERT(ret > 0 && 7434 (unsigned int)ret < sizeof(expand_buffer.buffer)); 7435 if (rte_log_can_log(mlx5_logtype, RTE_LOG_DEBUG)) { 7436 for (i = 0; i < buf->entries; ++i) 7437 mlx5_dbg__print_pattern(buf->entry[i].pattern); 7438 } 7439 } else { 7440 ret = mlx5_flow_expand_sqn((struct mlx5_flow_expand_sqn *)buf, 7441 sizeof(expand_buffer.buffer), 7442 items, sq_specs); 7443 if (ret) { 7444 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, 7445 NULL, "not enough memory for rte_flow"); 7446 goto error; 7447 } 7448 if (buf->entries == 0) { 7449 buf->entries = 1; 7450 buf->entry[0].pattern = (void *)(uintptr_t)items; 7451 } 7452 } 7453 rss_desc->shared_rss = flow_get_shared_rss_action(dev, indir_actions, 7454 indir_actions_n); 7455 for (i = 0; i < buf->entries; ++i) { 7456 /* Initialize flow split data. */ 7457 flow_split_info.prefix_layers = 0; 7458 flow_split_info.prefix_mark = 0; 7459 flow_split_info.skip_scale = 0; 7460 /* 7461 * The splitter may create multiple dev_flows, 7462 * depending on configuration. In the simplest 7463 * case it just creates unmodified original flow. 7464 */ 7465 ret = flow_create_split_outer(dev, flow, attr, 7466 buf->entry[i].pattern, 7467 p_actions_rx, &flow_split_info, 7468 error); 7469 if (ret < 0) 7470 goto error; 7471 if (is_flow_tunnel_steer_rule(wks->flows[0].tof_type)) { 7472 ret = flow_tunnel_add_default_miss(dev, flow, attr, 7473 p_actions_rx, 7474 idx, 7475 wks->flows[0].tunnel, 7476 &default_miss_ctx, 7477 error); 7478 if (ret < 0) { 7479 mlx5_free(default_miss_ctx.queue); 7480 goto error; 7481 } 7482 } 7483 } 7484 /* Create the tx flow. */ 7485 if (hairpin_flow) { 7486 attr_tx.group = MLX5_HAIRPIN_TX_TABLE; 7487 attr_tx.ingress = 0; 7488 attr_tx.egress = 1; 7489 dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items, 7490 actions_hairpin_tx.actions, 7491 idx, error); 7492 if (!dev_flow) 7493 goto error; 7494 dev_flow->flow = flow; 7495 dev_flow->external = 0; 7496 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, 7497 dev_flow->handle, next); 7498 ret = flow_drv_translate(dev, dev_flow, &attr_tx, 7499 items_tx.items, 7500 actions_hairpin_tx.actions, error); 7501 if (ret < 0) 7502 goto error; 7503 } 7504 /* 7505 * Update the metadata register copy table. If extensive 7506 * metadata feature is enabled and registers are supported 7507 * we might create the extra rte_flow for each unique 7508 * MARK/FLAG action ID. 7509 * 7510 * The table is updated for ingress and transfer flows only, because 7511 * the egress Flows belong to the different device and 7512 * copy table should be updated in peer NIC Rx domain. 7513 */ 7514 if ((attr->ingress || attr->transfer) && 7515 (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) { 7516 ret = flow_mreg_update_copy_table(dev, flow, actions, error); 7517 if (ret) 7518 goto error; 7519 } 7520 /* 7521 * If the flow is external (from application) OR device is started, 7522 * OR mreg discover, then apply immediately. 7523 */ 7524 if (external || dev->data->dev_started || 7525 (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP && 7526 attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) { 7527 ret = flow_drv_apply(dev, flow, error); 7528 if (ret < 0) 7529 goto error; 7530 } 7531 flow->type = type; 7532 flow_rxq_flags_set(dev, flow); 7533 rte_free(translated_actions); 7534 tunnel = flow_tunnel_from_rule(wks->flows); 7535 if (tunnel) { 7536 flow->tunnel = 1; 7537 flow->tunnel_id = tunnel->tunnel_id; 7538 rte_atomic_fetch_add_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed); 7539 mlx5_free(default_miss_ctx.queue); 7540 } 7541 mlx5_flow_pop_thread_workspace(); 7542 return idx; 7543 error: 7544 MLX5_ASSERT(flow); 7545 ret = rte_errno; /* Save rte_errno before cleanup. */ 7546 flow_mreg_del_copy_action(dev, flow); 7547 flow_drv_destroy(dev, flow); 7548 7549 if (rss_desc->shared_rss) { 7550 shared_rss_action = (struct mlx5_shared_action_rss *) 7551 mlx5_ipool_get 7552 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 7553 rss_desc->shared_rss); 7554 if (shared_rss_action) 7555 rte_atomic_fetch_sub_explicit(&(shared_rss_action)->refcnt, 1, 7556 rte_memory_order_relaxed); 7557 } 7558 mlx5_ipool_free(priv->flows[type], idx); 7559 rte_errno = ret; /* Restore rte_errno. */ 7560 ret = rte_errno; 7561 rte_errno = ret; 7562 error_before_hairpin_split: 7563 mlx5_flow_pop_thread_workspace(); 7564 rte_free(translated_actions); 7565 return 0; 7566 } 7567 7568 /** 7569 * Create a dedicated flow rule on e-switch table 0 (root table), to direct all 7570 * incoming packets to table 1. 7571 * 7572 * Other flow rules, requested for group n, will be created in 7573 * e-switch table n+1. 7574 * Jump action to e-switch group n will be created to group n+1. 7575 * 7576 * Used when working in switchdev mode, to utilise advantages of table 1 7577 * and above. 7578 * 7579 * @param dev 7580 * Pointer to Ethernet device. 7581 * 7582 * @return 7583 * Pointer to flow on success, NULL otherwise and rte_errno is set. 7584 */ 7585 struct rte_flow * 7586 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) 7587 { 7588 const struct rte_flow_attr attr = { 7589 .group = 0, 7590 .priority = 0, 7591 .ingress = 0, 7592 .egress = 0, 7593 .transfer = 1, 7594 }; 7595 const struct rte_flow_item pattern = { 7596 .type = RTE_FLOW_ITEM_TYPE_END, 7597 }; 7598 struct rte_flow_action_jump jump = { 7599 .group = 1, 7600 }; 7601 const struct rte_flow_action actions[] = { 7602 { 7603 .type = RTE_FLOW_ACTION_TYPE_JUMP, 7604 .conf = &jump, 7605 }, 7606 { 7607 .type = RTE_FLOW_ACTION_TYPE_END, 7608 }, 7609 }; 7610 struct rte_flow_error error; 7611 7612 return (void *)(uintptr_t)mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_CTL, 7613 &attr, &pattern, 7614 actions, false, &error); 7615 } 7616 7617 /** 7618 * Create a dedicated flow rule on e-switch table 1, matches ESW manager 7619 * and sq number, directs all packets to peer vport. 7620 * 7621 * @param dev 7622 * Pointer to Ethernet device. 7623 * @param sq_num 7624 * SQ number. 7625 * 7626 * @return 7627 * Flow ID on success, 0 otherwise and rte_errno is set. 7628 */ 7629 uint32_t 7630 mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sq_num) 7631 { 7632 struct rte_flow_attr attr = { 7633 .group = 0, 7634 .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR, 7635 .ingress = 0, 7636 .egress = 0, 7637 .transfer = 1, 7638 }; 7639 struct rte_flow_item_port_id port_spec = { 7640 .id = MLX5_PORT_ESW_MGR, 7641 }; 7642 struct mlx5_rte_flow_item_sq sq_spec = { 7643 .queue = sq_num, 7644 }; 7645 struct rte_flow_item pattern[] = { 7646 { 7647 .type = RTE_FLOW_ITEM_TYPE_PORT_ID, 7648 .spec = &port_spec, 7649 }, 7650 { 7651 .type = (enum rte_flow_item_type) 7652 MLX5_RTE_FLOW_ITEM_TYPE_SQ, 7653 .spec = &sq_spec, 7654 }, 7655 { 7656 .type = RTE_FLOW_ITEM_TYPE_END, 7657 }, 7658 }; 7659 struct rte_flow_action_jump jump = { 7660 .group = 1, 7661 }; 7662 struct rte_flow_action_port_id port = { 7663 .id = dev->data->port_id, 7664 }; 7665 struct rte_flow_action actions[] = { 7666 { 7667 .type = RTE_FLOW_ACTION_TYPE_JUMP, 7668 .conf = &jump, 7669 }, 7670 { 7671 .type = RTE_FLOW_ACTION_TYPE_END, 7672 }, 7673 }; 7674 struct rte_flow_error error; 7675 7676 /* 7677 * Creates group 0, highest priority jump flow. 7678 * Matches txq to bypass kernel packets. 7679 */ 7680 if (mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, actions, 7681 false, &error) == 0) 7682 return 0; 7683 /* Create group 1, lowest priority redirect flow for txq. */ 7684 attr.group = 1; 7685 actions[0].conf = &port; 7686 actions[0].type = RTE_FLOW_ACTION_TYPE_PORT_ID; 7687 return mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, 7688 actions, false, &error); 7689 } 7690 7691 /** 7692 * Validate a flow supported by the NIC. 7693 * 7694 * @see rte_flow_validate() 7695 * @see rte_flow_ops 7696 */ 7697 int 7698 mlx5_flow_validate(struct rte_eth_dev *dev, 7699 const struct rte_flow_attr *attr, 7700 const struct rte_flow_item items[], 7701 const struct rte_flow_action original_actions[], 7702 struct rte_flow_error *error) 7703 { 7704 int hairpin_flow; 7705 struct mlx5_translated_action_handle 7706 indir_actions[MLX5_MAX_INDIRECT_ACTIONS]; 7707 int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS; 7708 const struct rte_flow_action *actions; 7709 struct rte_flow_action *translated_actions = NULL; 7710 int ret = flow_action_handles_translate(dev, original_actions, 7711 indir_actions, 7712 &indir_actions_n, 7713 &translated_actions, error); 7714 7715 if (ret) 7716 return ret; 7717 actions = translated_actions ? translated_actions : original_actions; 7718 hairpin_flow = flow_check_hairpin_split(dev, attr, actions); 7719 ret = flow_drv_validate(dev, attr, items, actions, 7720 true, hairpin_flow, error); 7721 rte_free(translated_actions); 7722 return ret; 7723 } 7724 7725 static int 7726 mlx5_flow_cache_flow_info(struct rte_eth_dev *dev, 7727 const struct rte_flow_attr *attr, 7728 const uint32_t orig_prio, 7729 const struct rte_flow_item *items, 7730 const struct rte_flow_action *actions, 7731 uint32_t flow_idx) 7732 { 7733 struct mlx5_priv *priv = dev->data->dev_private; 7734 struct rte_pmd_mlx5_flow_engine_mode_info *mode_info = &priv->mode_info; 7735 struct mlx5_dv_flow_info *flow_info, *tmp_info; 7736 struct rte_flow_error error; 7737 int len, ret; 7738 7739 flow_info = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*flow_info), 0, SOCKET_ID_ANY); 7740 if (!flow_info) { 7741 DRV_LOG(ERR, "No enough memory for flow_info caching."); 7742 return -1; 7743 } 7744 flow_info->orig_prio = orig_prio; 7745 flow_info->attr = *attr; 7746 /* Standby mode rule awlays saves it in low priority entry. */ 7747 flow_info->flow_idx_low_prio = flow_idx; 7748 7749 /* Store matching items. */ 7750 ret = rte_flow_conv(RTE_FLOW_CONV_OP_PATTERN, NULL, 0, items, &error); 7751 if (ret <= 0) { 7752 DRV_LOG(ERR, "Can't get items length."); 7753 goto end; 7754 } 7755 len = RTE_ALIGN(ret, 16); 7756 flow_info->items = mlx5_malloc(MLX5_MEM_ZERO, len, 0, SOCKET_ID_ANY); 7757 if (!flow_info->items) { 7758 DRV_LOG(ERR, "No enough memory for items caching."); 7759 goto end; 7760 } 7761 ret = rte_flow_conv(RTE_FLOW_CONV_OP_PATTERN, flow_info->items, ret, items, &error); 7762 if (ret <= 0) { 7763 DRV_LOG(ERR, "Can't duplicate items."); 7764 goto end; 7765 } 7766 7767 /* Store flow actions. */ 7768 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, actions, &error); 7769 if (ret <= 0) { 7770 DRV_LOG(ERR, "Can't get actions length."); 7771 goto end; 7772 } 7773 len = RTE_ALIGN(ret, 16); 7774 flow_info->actions = mlx5_malloc(MLX5_MEM_ZERO, len, 0, SOCKET_ID_ANY); 7775 if (!flow_info->actions) { 7776 DRV_LOG(ERR, "No enough memory for actions caching."); 7777 goto end; 7778 } 7779 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, flow_info->actions, ret, actions, &error); 7780 if (ret <= 0) { 7781 DRV_LOG(ERR, "Can't duplicate actions."); 7782 goto end; 7783 } 7784 7785 /* Insert to the list end. */ 7786 if (LIST_EMPTY(&mode_info->hot_upgrade)) { 7787 LIST_INSERT_HEAD(&mode_info->hot_upgrade, flow_info, next); 7788 } else { 7789 tmp_info = LIST_FIRST(&mode_info->hot_upgrade); 7790 while (LIST_NEXT(tmp_info, next)) 7791 tmp_info = LIST_NEXT(tmp_info, next); 7792 LIST_INSERT_AFTER(tmp_info, flow_info, next); 7793 } 7794 return 0; 7795 end: 7796 if (flow_info->items) 7797 mlx5_free(flow_info->items); 7798 if (flow_info->actions) 7799 mlx5_free(flow_info->actions); 7800 mlx5_free(flow_info); 7801 return -1; 7802 } 7803 7804 static int 7805 mlx5_flow_cache_flow_toggle(struct rte_eth_dev *dev, bool orig_prio) 7806 { 7807 struct mlx5_priv *priv = dev->data->dev_private; 7808 struct rte_pmd_mlx5_flow_engine_mode_info *mode_info = &priv->mode_info; 7809 struct mlx5_dv_flow_info *flow_info; 7810 struct rte_flow_attr attr; 7811 struct rte_flow_error error; 7812 struct rte_flow *high, *low; 7813 7814 flow_info = LIST_FIRST(&mode_info->hot_upgrade); 7815 while (flow_info) { 7816 /* DUP flow may have the same priority. */ 7817 if (flow_info->orig_prio != flow_info->attr.priority) { 7818 attr = flow_info->attr; 7819 if (orig_prio) 7820 attr.priority = flow_info->orig_prio; 7821 flow_info->flow_idx_high_prio = mlx5_flow_list_create(dev, 7822 MLX5_FLOW_TYPE_GEN, &attr, 7823 flow_info->items, flow_info->actions, 7824 true, &error); 7825 if (!flow_info->flow_idx_high_prio) { 7826 DRV_LOG(ERR, "Priority toggle failed internally."); 7827 goto err; 7828 } 7829 } 7830 flow_info = LIST_NEXT(flow_info, next); 7831 } 7832 /* Delete the low priority rules and swap the flow handle. */ 7833 flow_info = LIST_FIRST(&mode_info->hot_upgrade); 7834 while (flow_info) { 7835 MLX5_ASSERT(flow_info->flow_idx_low_prio); 7836 if (flow_info->orig_prio != flow_info->attr.priority) { 7837 high = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], 7838 flow_info->flow_idx_high_prio); 7839 low = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], 7840 flow_info->flow_idx_low_prio); 7841 if (high && low) { 7842 RTE_SWAP(*low, *high); 7843 mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, 7844 flow_info->flow_idx_low_prio); 7845 flow_info->flow_idx_high_prio = 0; 7846 } 7847 } 7848 flow_info = LIST_NEXT(flow_info, next); 7849 } 7850 return 0; 7851 err: 7852 /* Destroy preceding successful high priority rules. */ 7853 flow_info = LIST_FIRST(&mode_info->hot_upgrade); 7854 while (flow_info) { 7855 if (flow_info->orig_prio != flow_info->attr.priority) { 7856 if (flow_info->flow_idx_high_prio) 7857 mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, 7858 flow_info->flow_idx_high_prio); 7859 else 7860 break; 7861 flow_info->flow_idx_high_prio = 0; 7862 } 7863 flow_info = LIST_NEXT(flow_info, next); 7864 } 7865 return -1; 7866 } 7867 7868 /** 7869 * Set the mode of the flow engine of a process to active or standby during live migration. 7870 * 7871 * @param[in] mode 7872 * MLX5 flow engine mode, @see `enum rte_pmd_mlx5_flow_engine_mode`. 7873 * @param[in] flags 7874 * Flow engine mode specific flags. 7875 * 7876 * @return 7877 * Negative value on error, positive on success. 7878 */ 7879 int 7880 rte_pmd_mlx5_flow_engine_set_mode(enum rte_pmd_mlx5_flow_engine_mode mode, uint32_t flags) 7881 { 7882 struct mlx5_priv *priv; 7883 struct rte_pmd_mlx5_flow_engine_mode_info *mode_info; 7884 struct mlx5_dv_flow_info *flow_info, *tmp_info; 7885 uint16_t port, port_id; 7886 uint16_t toggle_num = 0; 7887 struct rte_eth_dev *dev; 7888 enum rte_pmd_mlx5_flow_engine_mode orig_mode; 7889 uint32_t orig_flags; 7890 bool need_toggle = false; 7891 7892 /* Check if flags combinations are supported. */ 7893 if (flags && flags != RTE_PMD_MLX5_FLOW_ENGINE_FLAG_STANDBY_DUP_INGRESS) { 7894 DRV_LOG(ERR, "Doesn't support such flags %u", flags); 7895 return -1; 7896 } 7897 MLX5_ETH_FOREACH_DEV(port, NULL) { 7898 dev = &rte_eth_devices[port]; 7899 priv = dev->data->dev_private; 7900 mode_info = &priv->mode_info; 7901 /* No mode change. Assume all devices hold the same mode. */ 7902 if (mode_info->mode == mode) { 7903 DRV_LOG(INFO, "Process flow engine has been in mode %u", mode); 7904 if (mode_info->mode_flag != flags && !LIST_EMPTY(&mode_info->hot_upgrade)) { 7905 DRV_LOG(ERR, "Port %u has rule cache with different flag %u\n", 7906 port, mode_info->mode_flag); 7907 orig_mode = mode_info->mode; 7908 orig_flags = mode_info->mode_flag; 7909 goto err; 7910 } 7911 mode_info->mode_flag = flags; 7912 toggle_num++; 7913 continue; 7914 } 7915 /* Active -> standby. */ 7916 if (mode == RTE_PMD_MLX5_FLOW_ENGINE_MODE_STANDBY) { 7917 if (!LIST_EMPTY(&mode_info->hot_upgrade)) { 7918 DRV_LOG(ERR, "Cached rule existed"); 7919 orig_mode = mode_info->mode; 7920 orig_flags = mode_info->mode_flag; 7921 goto err; 7922 } 7923 mode_info->mode_flag = flags; 7924 mode_info->mode = mode; 7925 toggle_num++; 7926 /* Standby -> active. */ 7927 } else if (mode == RTE_PMD_MLX5_FLOW_ENGINE_MODE_ACTIVE) { 7928 if (LIST_EMPTY(&mode_info->hot_upgrade)) { 7929 DRV_LOG(INFO, "No cached rule existed"); 7930 } else { 7931 if (mlx5_flow_cache_flow_toggle(dev, true)) { 7932 orig_mode = mode_info->mode; 7933 orig_flags = mode_info->mode_flag; 7934 need_toggle = true; 7935 goto err; 7936 } 7937 } 7938 toggle_num++; 7939 } 7940 } 7941 if (mode == RTE_PMD_MLX5_FLOW_ENGINE_MODE_ACTIVE) { 7942 /* Clear cache flow rules. */ 7943 MLX5_ETH_FOREACH_DEV(port, NULL) { 7944 priv = rte_eth_devices[port].data->dev_private; 7945 mode_info = &priv->mode_info; 7946 flow_info = LIST_FIRST(&mode_info->hot_upgrade); 7947 while (flow_info) { 7948 tmp_info = LIST_NEXT(flow_info, next); 7949 LIST_REMOVE(flow_info, next); 7950 mlx5_free(flow_info->actions); 7951 mlx5_free(flow_info->items); 7952 mlx5_free(flow_info); 7953 flow_info = tmp_info; 7954 } 7955 MLX5_ASSERT(LIST_EMPTY(&mode_info->hot_upgrade)); 7956 } 7957 } 7958 return toggle_num; 7959 err: 7960 /* Rollback all preceding successful ports. */ 7961 MLX5_ETH_FOREACH_DEV(port_id, NULL) { 7962 if (port_id == port) 7963 break; 7964 priv = rte_eth_devices[port_id].data->dev_private; 7965 mode_info = &priv->mode_info; 7966 if (need_toggle && !LIST_EMPTY(&mode_info->hot_upgrade) && 7967 mlx5_flow_cache_flow_toggle(dev, false)) 7968 return -EPERM; 7969 mode_info->mode = orig_mode; 7970 mode_info->mode_flag = orig_flags; 7971 } 7972 return -EINVAL; 7973 } 7974 /** 7975 * Create a flow. 7976 * 7977 * @see rte_flow_create() 7978 * @see rte_flow_ops 7979 */ 7980 struct rte_flow * 7981 mlx5_flow_create(struct rte_eth_dev *dev, 7982 const struct rte_flow_attr *attr, 7983 const struct rte_flow_item items[], 7984 const struct rte_flow_action actions[], 7985 struct rte_flow_error *error) 7986 { 7987 struct mlx5_priv *priv = dev->data->dev_private; 7988 struct rte_flow_attr *new_attr = (void *)(uintptr_t)attr; 7989 uint32_t prio = attr->priority; 7990 uintptr_t flow_idx; 7991 7992 /* 7993 * If the device is not started yet, it is not allowed to created a 7994 * flow from application. PMD default flows and traffic control flows 7995 * are not affected. 7996 */ 7997 if (unlikely(!dev->data->dev_started)) { 7998 DRV_LOG(DEBUG, "port %u is not started when " 7999 "inserting a flow", dev->data->port_id); 8000 rte_flow_error_set(error, ENODEV, 8001 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8002 NULL, 8003 "port not started"); 8004 return NULL; 8005 } 8006 if (unlikely(mlx5_need_cache_flow(priv, attr))) { 8007 if (attr->transfer || 8008 (attr->ingress && !(priv->mode_info.mode_flag & 8009 RTE_PMD_MLX5_FLOW_ENGINE_FLAG_STANDBY_DUP_INGRESS))) 8010 new_attr->priority += 1; 8011 } 8012 flow_idx = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_GEN, attr, items, actions, 8013 true, error); 8014 if (!flow_idx) 8015 return NULL; 8016 if (unlikely(mlx5_need_cache_flow(priv, attr))) { 8017 if (mlx5_flow_cache_flow_info(dev, attr, prio, items, actions, flow_idx)) { 8018 mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx); 8019 flow_idx = 0; 8020 } 8021 } 8022 return (void *)(uintptr_t)flow_idx; 8023 } 8024 8025 uintptr_t 8026 mlx5_flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, 8027 const struct rte_flow_attr *attr, 8028 const struct rte_flow_item items[], 8029 const struct rte_flow_action actions[], 8030 bool external, struct rte_flow_error *error) 8031 { 8032 const struct mlx5_flow_driver_ops *fops; 8033 enum mlx5_flow_drv_type drv_type = flow_get_drv_type(dev, attr); 8034 8035 fops = flow_get_drv_ops(drv_type); 8036 return fops->list_create(dev, type, attr, items, actions, external, 8037 error); 8038 } 8039 8040 /** 8041 * Destroy a flow in a list. 8042 * 8043 * @param dev 8044 * Pointer to Ethernet device. 8045 * @param[in] flow_idx 8046 * Index of flow to destroy. 8047 */ 8048 void 8049 flow_legacy_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, 8050 uintptr_t flow_idx) 8051 { 8052 struct mlx5_priv *priv = dev->data->dev_private; 8053 struct rte_flow *flow = mlx5_ipool_get(priv->flows[type], (uint32_t)flow_idx); 8054 8055 if (!flow) 8056 return; 8057 MLX5_ASSERT((type >= MLX5_FLOW_TYPE_CTL) && (type < MLX5_FLOW_TYPE_MAXI)); 8058 MLX5_ASSERT(flow->type == type); 8059 /* 8060 * Update RX queue flags only if port is started, otherwise it is 8061 * already clean. 8062 */ 8063 if (dev->data->dev_started) 8064 flow_rxq_flags_trim(dev, flow); 8065 flow_drv_destroy(dev, flow); 8066 if (flow->tunnel) { 8067 struct mlx5_flow_tunnel *tunnel; 8068 8069 tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id); 8070 RTE_VERIFY(tunnel); 8071 if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1, 8072 rte_memory_order_relaxed) - 1)) 8073 mlx5_flow_tunnel_free(dev, tunnel); 8074 } 8075 flow_mreg_del_copy_action(dev, flow); 8076 mlx5_ipool_free(priv->flows[type], flow_idx); 8077 } 8078 8079 void 8080 mlx5_flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, 8081 uintptr_t flow_idx) 8082 { 8083 const struct mlx5_flow_driver_ops *fops; 8084 struct rte_flow_attr attr = { .transfer = 0 }; 8085 enum mlx5_flow_drv_type drv_type = flow_get_drv_type(dev, &attr); 8086 8087 fops = flow_get_drv_ops(drv_type); 8088 fops->list_destroy(dev, type, flow_idx); 8089 } 8090 8091 /** 8092 * Destroy all flows. 8093 * 8094 * @param dev 8095 * Pointer to Ethernet device. 8096 * @param type 8097 * Flow type to be flushed. 8098 * @param active 8099 * If flushing is called actively. 8100 */ 8101 void 8102 mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type, 8103 bool active) 8104 { 8105 struct mlx5_priv *priv = dev->data->dev_private; 8106 uint32_t num_flushed = 0, fidx = 1; 8107 struct rte_flow *flow; 8108 struct rte_pmd_mlx5_flow_engine_mode_info *mode_info = &priv->mode_info; 8109 struct mlx5_dv_flow_info *flow_info; 8110 8111 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 8112 if (priv->sh->config.dv_flow_en == 2 && 8113 type == MLX5_FLOW_TYPE_GEN) { 8114 flow_hw_q_flow_flush(dev, NULL); 8115 } 8116 #endif 8117 MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) { 8118 if (priv->sh->config.dv_flow_en == 2) { 8119 mlx5_flow_list_destroy(dev, type, (uintptr_t)flow); 8120 } else { 8121 mlx5_flow_list_destroy(dev, type, fidx); 8122 } 8123 if (unlikely(mlx5_need_cache_flow(priv, NULL) && type == MLX5_FLOW_TYPE_GEN)) { 8124 flow_info = LIST_FIRST(&mode_info->hot_upgrade); 8125 while (flow_info) { 8126 /* Romove the cache flow info. */ 8127 if (flow_info->flow_idx_low_prio == (uint32_t)(uintptr_t)fidx) { 8128 MLX5_ASSERT(!flow_info->flow_idx_high_prio); 8129 LIST_REMOVE(flow_info, next); 8130 mlx5_free(flow_info->items); 8131 mlx5_free(flow_info->actions); 8132 mlx5_free(flow_info); 8133 break; 8134 } 8135 flow_info = LIST_NEXT(flow_info, next); 8136 } 8137 } 8138 num_flushed++; 8139 } 8140 if (active) { 8141 DRV_LOG(INFO, "port %u: %u flows flushed before stopping", 8142 dev->data->port_id, num_flushed); 8143 } 8144 } 8145 8146 /** 8147 * Stop all default actions for flows. 8148 * 8149 * @param dev 8150 * Pointer to Ethernet device. 8151 */ 8152 void 8153 mlx5_flow_stop_default(struct rte_eth_dev *dev) 8154 { 8155 #ifdef HAVE_MLX5_HWS_SUPPORT 8156 struct mlx5_priv *priv = dev->data->dev_private; 8157 8158 if (priv->sh->config.dv_flow_en == 2) { 8159 mlx5_flow_nta_del_default_copy_action(dev); 8160 if (!rte_atomic_load_explicit(&priv->hws_mark_refcnt, 8161 rte_memory_order_relaxed)) 8162 flow_hw_rxq_flag_set(dev, false); 8163 return; 8164 } 8165 #endif 8166 flow_mreg_del_default_copy_action(dev); 8167 flow_rxq_flags_clear(dev); 8168 } 8169 8170 /** 8171 * Set rxq flag. 8172 * 8173 * @param[in] dev 8174 * Pointer to the rte_eth_dev structure. 8175 * @param[in] enable 8176 * Flag to enable or not. 8177 */ 8178 void 8179 flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable) 8180 { 8181 struct mlx5_priv *priv = dev->data->dev_private; 8182 unsigned int i; 8183 8184 if ((!priv->mark_enabled && !enable) || 8185 (priv->mark_enabled && enable)) 8186 return; 8187 for (i = 0; i < priv->rxqs_n; ++i) { 8188 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i); 8189 8190 /* With RXQ start/stop feature, RXQ might be stopped. */ 8191 if (!rxq_ctrl) 8192 continue; 8193 rxq_ctrl->rxq.mark = enable; 8194 } 8195 priv->mark_enabled = enable; 8196 } 8197 8198 /** 8199 * Start all default actions for flows. 8200 * 8201 * @param dev 8202 * Pointer to Ethernet device. 8203 * @return 8204 * 0 on success, a negative errno value otherwise and rte_errno is set. 8205 */ 8206 int 8207 mlx5_flow_start_default(struct rte_eth_dev *dev) 8208 { 8209 struct rte_flow_error error; 8210 #ifdef HAVE_MLX5_HWS_SUPPORT 8211 struct mlx5_priv *priv = dev->data->dev_private; 8212 8213 if (priv->sh->config.dv_flow_en == 2) 8214 return mlx5_flow_nta_add_default_copy_action(dev, &error); 8215 #endif 8216 /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ 8217 return flow_mreg_add_default_copy_action(dev, &error); 8218 } 8219 8220 /** 8221 * Release key of thread specific flow workspace data. 8222 */ 8223 void 8224 flow_release_workspace(void *data) 8225 { 8226 struct mlx5_flow_workspace *wks = data; 8227 struct mlx5_flow_workspace *next; 8228 8229 while (wks) { 8230 next = wks->next; 8231 free(wks); 8232 wks = next; 8233 } 8234 } 8235 8236 /** 8237 * Get thread specific current flow workspace. 8238 * 8239 * @return pointer to thread specific flow workspace data, NULL on error. 8240 */ 8241 struct mlx5_flow_workspace* 8242 mlx5_flow_get_thread_workspace(void) 8243 { 8244 struct mlx5_flow_workspace *data; 8245 8246 data = mlx5_flow_os_get_specific_workspace(); 8247 MLX5_ASSERT(data && data->inuse); 8248 if (!data || !data->inuse) 8249 DRV_LOG(ERR, "flow workspace not initialized."); 8250 return data; 8251 } 8252 8253 /** 8254 * Allocate and init new flow workspace. 8255 * 8256 * @return pointer to flow workspace data, NULL on error. 8257 */ 8258 static struct mlx5_flow_workspace* 8259 flow_alloc_thread_workspace(void) 8260 { 8261 size_t data_size = RTE_ALIGN(sizeof(struct mlx5_flow_workspace), sizeof(long)); 8262 size_t rss_queue_array_size = sizeof(uint16_t) * RTE_ETH_RSS_RETA_SIZE_512; 8263 struct mlx5_flow_workspace *data = calloc(1, data_size + 8264 rss_queue_array_size); 8265 8266 if (!data) { 8267 DRV_LOG(ERR, "Failed to allocate flow workspace memory."); 8268 return NULL; 8269 } 8270 data->rss_desc.queue = RTE_PTR_ADD(data, data_size); 8271 return data; 8272 } 8273 8274 /** 8275 * Get new thread specific flow workspace. 8276 * 8277 * If current workspace inuse, create new one and set as current. 8278 * 8279 * @return pointer to thread specific flow workspace data, NULL on error. 8280 */ 8281 struct mlx5_flow_workspace* 8282 mlx5_flow_push_thread_workspace(void) 8283 { 8284 struct mlx5_flow_workspace *curr; 8285 struct mlx5_flow_workspace *data; 8286 8287 curr = mlx5_flow_os_get_specific_workspace(); 8288 if (!curr) { 8289 data = flow_alloc_thread_workspace(); 8290 if (!data) 8291 return NULL; 8292 mlx5_flow_os_workspace_gc_add(data); 8293 } else if (!curr->inuse) { 8294 data = curr; 8295 } else if (curr->next) { 8296 data = curr->next; 8297 } else { 8298 data = flow_alloc_thread_workspace(); 8299 if (!data) 8300 return NULL; 8301 curr->next = data; 8302 data->prev = curr; 8303 } 8304 data->inuse = 1; 8305 data->flow_idx = 0; 8306 /* Set as current workspace */ 8307 if (mlx5_flow_os_set_specific_workspace(data)) 8308 DRV_LOG(ERR, "Failed to set flow workspace to thread."); 8309 return data; 8310 } 8311 8312 /** 8313 * Close current thread specific flow workspace. 8314 * 8315 * If previous workspace available, set it as current. 8316 * 8317 * @return pointer to thread specific flow workspace data, NULL on error. 8318 */ 8319 void 8320 mlx5_flow_pop_thread_workspace(void) 8321 { 8322 struct mlx5_flow_workspace *data = mlx5_flow_get_thread_workspace(); 8323 8324 if (!data) 8325 return; 8326 if (!data->inuse) { 8327 DRV_LOG(ERR, "Failed to close unused flow workspace."); 8328 return; 8329 } 8330 data->inuse = 0; 8331 if (!data->prev) 8332 return; 8333 if (mlx5_flow_os_set_specific_workspace(data->prev)) 8334 DRV_LOG(ERR, "Failed to set flow workspace to thread."); 8335 } 8336 8337 /** 8338 * Verify the flow list is empty 8339 * 8340 * @param dev 8341 * Pointer to Ethernet device. 8342 * 8343 * @return the number of flows not released. 8344 */ 8345 int 8346 mlx5_flow_verify(struct rte_eth_dev *dev __rte_unused) 8347 { 8348 struct mlx5_priv *priv = dev->data->dev_private; 8349 struct rte_flow *flow; 8350 uint32_t idx = 0; 8351 int ret = 0, i; 8352 8353 for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) { 8354 MLX5_IPOOL_FOREACH(priv->flows[i], idx, flow) { 8355 DRV_LOG(DEBUG, "port %u flow %p still referenced", 8356 dev->data->port_id, (void *)flow); 8357 ret++; 8358 } 8359 } 8360 return ret; 8361 } 8362 8363 /** 8364 * Enable default hairpin egress flow. 8365 * 8366 * @param dev 8367 * Pointer to Ethernet device. 8368 * @param sq_num 8369 * The SQ hw number. 8370 * 8371 * @return 8372 * 0 on success, a negative errno value otherwise and rte_errno is set. 8373 */ 8374 int 8375 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, 8376 uint32_t sq_num) 8377 { 8378 const struct rte_flow_attr attr = { 8379 .egress = 1, 8380 .priority = 0, 8381 }; 8382 struct mlx5_rte_flow_item_sq queue_spec = { 8383 .queue = sq_num, 8384 }; 8385 struct mlx5_rte_flow_item_sq queue_mask = { 8386 .queue = UINT32_MAX, 8387 }; 8388 struct rte_flow_item items[] = { 8389 { 8390 .type = (enum rte_flow_item_type) 8391 MLX5_RTE_FLOW_ITEM_TYPE_SQ, 8392 .spec = &queue_spec, 8393 .last = NULL, 8394 .mask = &queue_mask, 8395 }, 8396 { 8397 .type = RTE_FLOW_ITEM_TYPE_END, 8398 }, 8399 }; 8400 struct rte_flow_action_jump jump = { 8401 .group = MLX5_HAIRPIN_TX_TABLE, 8402 }; 8403 struct rte_flow_action actions[2]; 8404 uint32_t flow_idx; 8405 struct rte_flow_error error; 8406 8407 actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; 8408 actions[0].conf = &jump; 8409 actions[1].type = RTE_FLOW_ACTION_TYPE_END; 8410 flow_idx = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_CTL, 8411 &attr, items, actions, false, &error); 8412 if (!flow_idx) { 8413 DRV_LOG(DEBUG, 8414 "Failed to create ctrl flow: rte_errno(%d)," 8415 " type(%d), message(%s)", 8416 rte_errno, error.type, 8417 error.message ? error.message : " (no stated reason)"); 8418 return -rte_errno; 8419 } 8420 return 0; 8421 } 8422 8423 /** 8424 * Enable a control flow configured from the control plane. 8425 * 8426 * @param dev 8427 * Pointer to Ethernet device. 8428 * @param eth_spec 8429 * An Ethernet flow spec to apply. 8430 * @param eth_mask 8431 * An Ethernet flow mask to apply. 8432 * @param vlan_spec 8433 * A VLAN flow spec to apply. 8434 * @param vlan_mask 8435 * A VLAN flow mask to apply. 8436 * 8437 * @return 8438 * 0 on success, a negative errno value otherwise and rte_errno is set. 8439 */ 8440 int 8441 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, 8442 struct rte_flow_item_eth *eth_spec, 8443 struct rte_flow_item_eth *eth_mask, 8444 struct rte_flow_item_vlan *vlan_spec, 8445 struct rte_flow_item_vlan *vlan_mask) 8446 { 8447 struct mlx5_priv *priv = dev->data->dev_private; 8448 const struct rte_flow_attr attr = { 8449 .ingress = 1, 8450 .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR, 8451 }; 8452 struct rte_flow_item items[] = { 8453 { 8454 .type = RTE_FLOW_ITEM_TYPE_ETH, 8455 .spec = eth_spec, 8456 .last = NULL, 8457 .mask = eth_mask, 8458 }, 8459 { 8460 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : 8461 RTE_FLOW_ITEM_TYPE_END, 8462 .spec = vlan_spec, 8463 .last = NULL, 8464 .mask = vlan_mask, 8465 }, 8466 { 8467 .type = RTE_FLOW_ITEM_TYPE_END, 8468 }, 8469 }; 8470 uint16_t queue[priv->reta_idx_n]; 8471 struct rte_flow_action_rss action_rss = { 8472 .func = RTE_ETH_HASH_FUNCTION_DEFAULT, 8473 .level = 0, 8474 .types = priv->rss_conf.rss_hf, 8475 .key_len = priv->rss_conf.rss_key_len, 8476 .queue_num = priv->reta_idx_n, 8477 .key = priv->rss_conf.rss_key, 8478 .queue = queue, 8479 }; 8480 struct rte_flow_action actions[] = { 8481 { 8482 .type = RTE_FLOW_ACTION_TYPE_RSS, 8483 .conf = &action_rss, 8484 }, 8485 { 8486 .type = RTE_FLOW_ACTION_TYPE_END, 8487 }, 8488 }; 8489 uint32_t flow_idx; 8490 struct rte_flow_error error; 8491 unsigned int i; 8492 8493 if (!priv->reta_idx_n || !priv->rxqs_n) { 8494 return 0; 8495 } 8496 if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) 8497 action_rss.types = 0; 8498 for (i = 0; i != priv->reta_idx_n; ++i) 8499 queue[i] = (*priv->reta_idx)[i]; 8500 flow_idx = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_CTL, 8501 &attr, items, actions, false, &error); 8502 if (!flow_idx) 8503 return -rte_errno; 8504 return 0; 8505 } 8506 8507 /** 8508 * Enable a flow control configured from the control plane. 8509 * 8510 * @param dev 8511 * Pointer to Ethernet device. 8512 * @param eth_spec 8513 * An Ethernet flow spec to apply. 8514 * @param eth_mask 8515 * An Ethernet flow mask to apply. 8516 * 8517 * @return 8518 * 0 on success, a negative errno value otherwise and rte_errno is set. 8519 */ 8520 int 8521 mlx5_ctrl_flow(struct rte_eth_dev *dev, 8522 struct rte_flow_item_eth *eth_spec, 8523 struct rte_flow_item_eth *eth_mask) 8524 { 8525 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); 8526 } 8527 8528 /** 8529 * Create default miss flow rule matching lacp traffic 8530 * 8531 * @param dev 8532 * Pointer to Ethernet device. 8533 * @param eth_spec 8534 * An Ethernet flow spec to apply. 8535 * 8536 * @return 8537 * 0 on success, a negative errno value otherwise and rte_errno is set. 8538 */ 8539 int 8540 mlx5_flow_lacp_miss(struct rte_eth_dev *dev) 8541 { 8542 /* 8543 * The LACP matching is done by only using ether type since using 8544 * a multicast dst mac causes kernel to give low priority to this flow. 8545 */ 8546 static const struct rte_flow_item_eth lacp_spec = { 8547 .hdr.ether_type = RTE_BE16(0x8809), 8548 }; 8549 static const struct rte_flow_item_eth lacp_mask = { 8550 .hdr.ether_type = 0xffff, 8551 }; 8552 const struct rte_flow_attr attr = { 8553 .ingress = 1, 8554 }; 8555 struct rte_flow_item items[] = { 8556 { 8557 .type = RTE_FLOW_ITEM_TYPE_ETH, 8558 .spec = &lacp_spec, 8559 .mask = &lacp_mask, 8560 }, 8561 { 8562 .type = RTE_FLOW_ITEM_TYPE_END, 8563 }, 8564 }; 8565 struct rte_flow_action actions[] = { 8566 { 8567 .type = (enum rte_flow_action_type) 8568 MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS, 8569 }, 8570 { 8571 .type = RTE_FLOW_ACTION_TYPE_END, 8572 }, 8573 }; 8574 struct rte_flow_error error; 8575 uint32_t flow_idx = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_CTL, 8576 &attr, items, actions, 8577 false, &error); 8578 8579 if (!flow_idx) 8580 return -rte_errno; 8581 return 0; 8582 } 8583 8584 /** 8585 * Destroy a flow. 8586 * 8587 * @see rte_flow_destroy() 8588 * @see rte_flow_ops 8589 */ 8590 int 8591 mlx5_flow_destroy(struct rte_eth_dev *dev, 8592 struct rte_flow *flow, 8593 struct rte_flow_error *error __rte_unused) 8594 { 8595 struct mlx5_priv *priv = dev->data->dev_private; 8596 struct rte_pmd_mlx5_flow_engine_mode_info *mode_info = &priv->mode_info; 8597 struct mlx5_dv_flow_info *flow_info; 8598 8599 mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, 8600 (uintptr_t)(void *)flow); 8601 if (unlikely(mlx5_need_cache_flow(priv, NULL))) { 8602 flow_info = LIST_FIRST(&mode_info->hot_upgrade); 8603 while (flow_info) { 8604 /* Romove the cache flow info. */ 8605 if (flow_info->flow_idx_low_prio == (uint32_t)(uintptr_t)flow) { 8606 MLX5_ASSERT(!flow_info->flow_idx_high_prio); 8607 LIST_REMOVE(flow_info, next); 8608 mlx5_free(flow_info->items); 8609 mlx5_free(flow_info->actions); 8610 mlx5_free(flow_info); 8611 break; 8612 } 8613 flow_info = LIST_NEXT(flow_info, next); 8614 } 8615 } 8616 return 0; 8617 } 8618 8619 /** 8620 * Destroy all flows. 8621 * 8622 * @see rte_flow_flush() 8623 * @see rte_flow_ops 8624 */ 8625 int 8626 mlx5_flow_flush(struct rte_eth_dev *dev, 8627 struct rte_flow_error *error __rte_unused) 8628 { 8629 mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, false); 8630 return 0; 8631 } 8632 8633 /** 8634 * Isolated mode. 8635 * 8636 * @see rte_flow_isolate() 8637 * @see rte_flow_ops 8638 */ 8639 int 8640 mlx5_flow_isolate(struct rte_eth_dev *dev, 8641 int enable, 8642 struct rte_flow_error *error) 8643 { 8644 struct mlx5_priv *priv = dev->data->dev_private; 8645 8646 if (dev->data->dev_started) { 8647 rte_flow_error_set(error, EBUSY, 8648 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8649 NULL, 8650 "port must be stopped first"); 8651 return -rte_errno; 8652 } 8653 if (!enable && !priv->sh->config.repr_matching) 8654 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 8655 "isolated mode cannot be disabled when " 8656 "representor matching is disabled"); 8657 priv->isolated = !!enable; 8658 if (enable) 8659 dev->dev_ops = &mlx5_dev_ops_isolate; 8660 else 8661 dev->dev_ops = &mlx5_dev_ops; 8662 8663 dev->rx_descriptor_status = mlx5_rx_descriptor_status; 8664 dev->tx_descriptor_status = mlx5_tx_descriptor_status; 8665 8666 return 0; 8667 } 8668 8669 /** 8670 * Query a flow. 8671 * 8672 * @see rte_flow_query() 8673 * @see rte_flow_ops 8674 */ 8675 static int 8676 flow_drv_query(struct rte_eth_dev *dev, 8677 struct rte_flow *eflow, 8678 const struct rte_flow_action *actions, 8679 void *data, 8680 struct rte_flow_error *error) 8681 { 8682 struct mlx5_priv *priv = dev->data->dev_private; 8683 const struct mlx5_flow_driver_ops *fops; 8684 struct rte_flow *flow = NULL; 8685 enum mlx5_flow_drv_type ftype = MLX5_FLOW_TYPE_MIN; 8686 8687 if (priv->sh->config.dv_flow_en == 2) { 8688 #ifdef HAVE_MLX5_HWS_SUPPORT 8689 flow = eflow; 8690 ftype = MLX5_FLOW_TYPE_HW; 8691 #endif 8692 } else { 8693 flow = (struct rte_flow *)mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], 8694 (uintptr_t)(void *)eflow); 8695 } 8696 if (!flow) { 8697 return rte_flow_error_set(error, ENOENT, 8698 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8699 NULL, 8700 "invalid flow handle"); 8701 } 8702 if (ftype == MLX5_FLOW_TYPE_MIN) 8703 ftype = flow->drv_type; 8704 MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); 8705 fops = flow_get_drv_ops(ftype); 8706 8707 return fops->query(dev, flow, actions, data, error); 8708 } 8709 8710 /** 8711 * Query a flow. 8712 * 8713 * @see rte_flow_query() 8714 * @see rte_flow_ops 8715 */ 8716 int 8717 mlx5_flow_query(struct rte_eth_dev *dev, 8718 struct rte_flow *flow, 8719 const struct rte_flow_action *actions, 8720 void *data, 8721 struct rte_flow_error *error) 8722 { 8723 int ret; 8724 8725 ret = flow_drv_query(dev, flow, actions, data, 8726 error); 8727 if (ret < 0) 8728 return ret; 8729 return 0; 8730 } 8731 8732 /** 8733 * Get rte_flow callbacks. 8734 * 8735 * @param dev 8736 * Pointer to Ethernet device structure. 8737 * @param ops 8738 * Pointer to operation-specific structure. 8739 * 8740 * @return 0 8741 */ 8742 int 8743 mlx5_flow_ops_get(struct rte_eth_dev *dev __rte_unused, 8744 const struct rte_flow_ops **ops) 8745 { 8746 *ops = &mlx5_flow_ops; 8747 return 0; 8748 } 8749 8750 /** 8751 * Validate meter policy actions. 8752 * Dispatcher for action type specific validation. 8753 * 8754 * @param[in] dev 8755 * Pointer to the Ethernet device structure. 8756 * @param[in] action 8757 * The meter policy action object to validate. 8758 * @param[in] attr 8759 * Attributes of flow to determine steering domain. 8760 * @param[out] is_rss 8761 * Is RSS or not. 8762 * @param[out] domain_bitmap 8763 * Domain bitmap. 8764 * @param[out] is_def_policy 8765 * Is default policy or not. 8766 * @param[out] error 8767 * Perform verbose error reporting if not NULL. Initialized in case of 8768 * error only. 8769 * 8770 * @return 8771 * 0 on success, otherwise negative errno value. 8772 */ 8773 int 8774 mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev, 8775 const struct rte_flow_action *actions[RTE_COLORS], 8776 struct rte_flow_attr *attr, 8777 bool *is_rss, 8778 uint8_t *domain_bitmap, 8779 uint8_t *policy_mode, 8780 struct rte_mtr_error *error) 8781 { 8782 const struct mlx5_flow_driver_ops *fops; 8783 8784 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8785 return fops->validate_mtr_acts(dev, actions, attr, is_rss, 8786 domain_bitmap, policy_mode, error); 8787 } 8788 8789 /** 8790 * Destroy the meter table set. 8791 * 8792 * @param[in] dev 8793 * Pointer to Ethernet device. 8794 * @param[in] mtr_policy 8795 * Meter policy struct. 8796 */ 8797 void 8798 mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev, 8799 struct mlx5_flow_meter_policy *mtr_policy) 8800 { 8801 const struct mlx5_flow_driver_ops *fops; 8802 8803 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8804 fops->destroy_mtr_acts(dev, mtr_policy); 8805 } 8806 8807 /** 8808 * Create policy action, lock free, 8809 * (mutex should be acquired by caller). 8810 * Dispatcher for action type specific call. 8811 * 8812 * @param[in] dev 8813 * Pointer to the Ethernet device structure. 8814 * @param[in] mtr_policy 8815 * Meter policy struct. 8816 * @param[in] action 8817 * Action specification used to create meter actions. 8818 * @param[in] attr 8819 * Flow rule attributes. 8820 * @param[out] error 8821 * Perform verbose error reporting if not NULL. Initialized in case of 8822 * error only. 8823 * 8824 * @return 8825 * 0 on success, otherwise negative errno value. 8826 */ 8827 int 8828 mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev, 8829 struct mlx5_flow_meter_policy *mtr_policy, 8830 const struct rte_flow_action *actions[RTE_COLORS], 8831 struct rte_flow_attr *attr, 8832 struct rte_mtr_error *error) 8833 { 8834 const struct mlx5_flow_driver_ops *fops; 8835 8836 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8837 return fops->create_mtr_acts(dev, mtr_policy, actions, attr, error); 8838 } 8839 8840 /** 8841 * Create policy rules, lock free, 8842 * (mutex should be acquired by caller). 8843 * Dispatcher for action type specific call. 8844 * 8845 * @param[in] dev 8846 * Pointer to the Ethernet device structure. 8847 * @param[in] mtr_policy 8848 * Meter policy struct. 8849 * 8850 * @return 8851 * 0 on success, -1 otherwise. 8852 */ 8853 int 8854 mlx5_flow_create_policy_rules(struct rte_eth_dev *dev, 8855 struct mlx5_flow_meter_policy *mtr_policy) 8856 { 8857 const struct mlx5_flow_driver_ops *fops; 8858 8859 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8860 return fops->create_policy_rules(dev, mtr_policy); 8861 } 8862 8863 /** 8864 * Destroy policy rules, lock free, 8865 * (mutex should be acquired by caller). 8866 * Dispatcher for action type specific call. 8867 * 8868 * @param[in] dev 8869 * Pointer to the Ethernet device structure. 8870 * @param[in] mtr_policy 8871 * Meter policy struct. 8872 */ 8873 void 8874 mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev, 8875 struct mlx5_flow_meter_policy *mtr_policy) 8876 { 8877 const struct mlx5_flow_driver_ops *fops; 8878 8879 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8880 fops->destroy_policy_rules(dev, mtr_policy); 8881 } 8882 8883 /** 8884 * Destroy the default policy table set. 8885 * 8886 * @param[in] dev 8887 * Pointer to Ethernet device. 8888 */ 8889 void 8890 mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev) 8891 { 8892 const struct mlx5_flow_driver_ops *fops; 8893 8894 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8895 fops->destroy_def_policy(dev); 8896 } 8897 8898 /** 8899 * Destroy the default policy table set. 8900 * 8901 * @param[in] dev 8902 * Pointer to Ethernet device. 8903 * 8904 * @return 8905 * 0 on success, -1 otherwise. 8906 */ 8907 int 8908 mlx5_flow_create_def_policy(struct rte_eth_dev *dev) 8909 { 8910 const struct mlx5_flow_driver_ops *fops; 8911 8912 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8913 return fops->create_def_policy(dev); 8914 } 8915 8916 /** 8917 * Create the needed meter and suffix tables. 8918 * 8919 * @param[in] dev 8920 * Pointer to Ethernet device. 8921 * 8922 * @return 8923 * 0 on success, -1 otherwise. 8924 */ 8925 int 8926 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev, 8927 struct mlx5_flow_meter_info *fm, 8928 uint32_t mtr_idx, 8929 uint8_t domain_bitmap) 8930 { 8931 const struct mlx5_flow_driver_ops *fops; 8932 8933 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8934 return fops->create_mtr_tbls(dev, fm, mtr_idx, domain_bitmap); 8935 } 8936 8937 /** 8938 * Destroy the meter table set. 8939 * 8940 * @param[in] dev 8941 * Pointer to Ethernet device. 8942 * @param[in] tbl 8943 * Pointer to the meter table set. 8944 */ 8945 void 8946 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, 8947 struct mlx5_flow_meter_info *fm) 8948 { 8949 const struct mlx5_flow_driver_ops *fops; 8950 8951 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8952 fops->destroy_mtr_tbls(dev, fm); 8953 } 8954 8955 /** 8956 * Destroy the global meter drop table. 8957 * 8958 * @param[in] dev 8959 * Pointer to Ethernet device. 8960 */ 8961 void 8962 mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev) 8963 { 8964 const struct mlx5_flow_driver_ops *fops; 8965 8966 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8967 fops->destroy_mtr_drop_tbls(dev); 8968 } 8969 8970 /** 8971 * Destroy the sub policy table with RX queue. 8972 * 8973 * @param[in] dev 8974 * Pointer to Ethernet device. 8975 * @param[in] mtr_policy 8976 * Pointer to meter policy table. 8977 */ 8978 void 8979 mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev, 8980 struct mlx5_flow_meter_policy *mtr_policy) 8981 { 8982 const struct mlx5_flow_driver_ops *fops; 8983 8984 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8985 fops->destroy_sub_policy_with_rxq(dev, mtr_policy); 8986 } 8987 8988 /** 8989 * Allocate the needed aso flow meter id. 8990 * 8991 * @param[in] dev 8992 * Pointer to Ethernet device. 8993 * 8994 * @return 8995 * Index to aso flow meter on success, NULL otherwise. 8996 */ 8997 uint32_t 8998 mlx5_flow_mtr_alloc(struct rte_eth_dev *dev) 8999 { 9000 const struct mlx5_flow_driver_ops *fops; 9001 9002 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 9003 return fops->create_meter(dev); 9004 } 9005 9006 /** 9007 * Free the aso flow meter id. 9008 * 9009 * @param[in] dev 9010 * Pointer to Ethernet device. 9011 * @param[in] mtr_idx 9012 * Index to aso flow meter to be free. 9013 * 9014 * @return 9015 * 0 on success. 9016 */ 9017 void 9018 mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx) 9019 { 9020 const struct mlx5_flow_driver_ops *fops; 9021 9022 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 9023 fops->free_meter(dev, mtr_idx); 9024 } 9025 9026 /** 9027 * Allocate a counter. 9028 * 9029 * @param[in] dev 9030 * Pointer to Ethernet device structure. 9031 * 9032 * @return 9033 * Index to allocated counter on success, 0 otherwise. 9034 */ 9035 uint32_t 9036 mlx5_counter_alloc(struct rte_eth_dev *dev) 9037 { 9038 struct rte_flow_attr attr = { .transfer = 0 }; 9039 9040 return flow_get_drv_ops(flow_get_drv_type(dev, &attr))->counter_alloc 9041 (dev); 9042 } 9043 9044 /** 9045 * Free a counter. 9046 * 9047 * @param[in] dev 9048 * Pointer to Ethernet device structure. 9049 * @param[in] cnt 9050 * Index to counter to be free. 9051 */ 9052 void 9053 mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt) 9054 { 9055 struct rte_flow_attr attr = { .transfer = 0 }; 9056 9057 flow_get_drv_ops(flow_get_drv_type(dev, &attr))->counter_free(dev, cnt); 9058 } 9059 9060 /** 9061 * Query counter statistics. 9062 * 9063 * @param[in] dev 9064 * Pointer to Ethernet device structure. 9065 * @param[in] cnt 9066 * Index to counter to query. 9067 * @param[in] clear 9068 * Set to clear counter statistics. 9069 * @param[out] pkts 9070 * The counter hits packets number to save. 9071 * @param[out] bytes 9072 * The counter hits bytes number to save. 9073 * 9074 * @return 9075 * 0 on success, a negative errno value otherwise. 9076 */ 9077 int 9078 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, 9079 bool clear, uint64_t *pkts, uint64_t *bytes, void **action) 9080 { 9081 struct rte_flow_attr attr = { .transfer = 0 }; 9082 9083 return flow_get_drv_ops(flow_get_drv_type(dev, &attr))->counter_query 9084 (dev, cnt, clear, pkts, bytes, action); 9085 } 9086 9087 /** 9088 * Get information about HWS pre-configurable resources. 9089 * 9090 * @param[in] dev 9091 * Pointer to the rte_eth_dev structure. 9092 * @param[out] port_info 9093 * Pointer to port information. 9094 * @param[out] queue_info 9095 * Pointer to queue information. 9096 * @param[out] error 9097 * Pointer to error structure. 9098 * 9099 * @return 9100 * 0 on success, a negative errno value otherwise and rte_errno is set. 9101 */ 9102 static int 9103 mlx5_flow_info_get(struct rte_eth_dev *dev, 9104 struct rte_flow_port_info *port_info, 9105 struct rte_flow_queue_info *queue_info, 9106 struct rte_flow_error *error) 9107 { 9108 const struct mlx5_flow_driver_ops *fops; 9109 struct rte_flow_attr attr = {0}; 9110 9111 if (flow_get_drv_type(dev, &attr) != MLX5_FLOW_TYPE_HW) 9112 return rte_flow_error_set(error, ENOTSUP, 9113 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9114 NULL, 9115 "info get with incorrect steering mode"); 9116 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9117 return fops->info_get(dev, port_info, queue_info, error); 9118 } 9119 9120 /** 9121 * Configure port HWS resources. 9122 * 9123 * @param[in] dev 9124 * Pointer to the rte_eth_dev structure. 9125 * @param[in] port_attr 9126 * Port configuration attributes. 9127 * @param[in] nb_queue 9128 * Number of queue. 9129 * @param[in] queue_attr 9130 * Array that holds attributes for each flow queue. 9131 * @param[out] error 9132 * Pointer to error structure. 9133 * 9134 * @return 9135 * 0 on success, a negative errno value otherwise and rte_errno is set. 9136 */ 9137 static int 9138 mlx5_flow_port_configure(struct rte_eth_dev *dev, 9139 const struct rte_flow_port_attr *port_attr, 9140 uint16_t nb_queue, 9141 const struct rte_flow_queue_attr *queue_attr[], 9142 struct rte_flow_error *error) 9143 { 9144 const struct mlx5_flow_driver_ops *fops; 9145 struct rte_flow_attr attr = {0}; 9146 9147 if (flow_get_drv_type(dev, &attr) != MLX5_FLOW_TYPE_HW) 9148 return rte_flow_error_set(error, ENOTSUP, 9149 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9150 NULL, 9151 "port configure with incorrect steering mode"); 9152 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9153 return fops->configure(dev, port_attr, nb_queue, queue_attr, error); 9154 } 9155 9156 /** 9157 * Validate item template. 9158 * 9159 * @param[in] dev 9160 * Pointer to the rte_eth_dev structure. 9161 * @param[in] attr 9162 * Pointer to the item template attributes. 9163 * @param[in] items 9164 * The template item pattern. 9165 * @param[out] error 9166 * Pointer to error structure. 9167 * 9168 * @return 9169 * 0 on success, a negative errno value otherwise and rte_errno is set. 9170 */ 9171 int 9172 mlx5_flow_pattern_validate(struct rte_eth_dev *dev, 9173 const struct rte_flow_pattern_template_attr *attr, 9174 const struct rte_flow_item items[], 9175 struct rte_flow_error *error) 9176 { 9177 const struct mlx5_flow_driver_ops *fops; 9178 struct rte_flow_attr fattr = {0}; 9179 uint64_t item_flags = 0; 9180 9181 if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) { 9182 rte_flow_error_set(error, ENOTSUP, 9183 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 9184 "pattern validate with incorrect steering mode"); 9185 return -ENOTSUP; 9186 } 9187 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9188 return fops->pattern_validate(dev, attr, items, &item_flags, error); 9189 } 9190 9191 /** 9192 * Create flow item template. 9193 * 9194 * @param[in] dev 9195 * Pointer to the rte_eth_dev structure. 9196 * @param[in] attr 9197 * Pointer to the item template attributes. 9198 * @param[in] items 9199 * The template item pattern. 9200 * @param[out] error 9201 * Pointer to error structure. 9202 * 9203 * @return 9204 * 0 on success, a negative errno value otherwise and rte_errno is set. 9205 */ 9206 static struct rte_flow_pattern_template * 9207 mlx5_flow_pattern_template_create(struct rte_eth_dev *dev, 9208 const struct rte_flow_pattern_template_attr *attr, 9209 const struct rte_flow_item items[], 9210 struct rte_flow_error *error) 9211 { 9212 const struct mlx5_flow_driver_ops *fops; 9213 struct rte_flow_attr fattr = {0}; 9214 9215 if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) { 9216 rte_flow_error_set(error, ENOTSUP, 9217 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9218 NULL, 9219 "pattern create with incorrect steering mode"); 9220 return NULL; 9221 } 9222 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9223 return fops->pattern_template_create(dev, attr, items, error); 9224 } 9225 9226 /** 9227 * Destroy flow item template. 9228 * 9229 * @param[in] dev 9230 * Pointer to the rte_eth_dev structure. 9231 * @param[in] template 9232 * Pointer to the item template to be destroyed. 9233 * @param[out] error 9234 * Pointer to error structure. 9235 * 9236 * @return 9237 * 0 on success, a negative errno value otherwise and rte_errno is set. 9238 */ 9239 static int 9240 mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev, 9241 struct rte_flow_pattern_template *template, 9242 struct rte_flow_error *error) 9243 { 9244 const struct mlx5_flow_driver_ops *fops; 9245 struct rte_flow_attr attr = {0}; 9246 9247 if (flow_get_drv_type(dev, &attr) != MLX5_FLOW_TYPE_HW) 9248 return rte_flow_error_set(error, ENOTSUP, 9249 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9250 NULL, 9251 "pattern destroy with incorrect steering mode"); 9252 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9253 return fops->pattern_template_destroy(dev, template, error); 9254 } 9255 9256 /** 9257 * Validate flow actions template. 9258 * 9259 * @param[in] dev 9260 * Pointer to the rte_eth_dev structure. 9261 * @param[in] attr 9262 * Pointer to the action template attributes. 9263 * @param[in] actions 9264 * Associated actions (list terminated by the END action). 9265 * @param[in] masks 9266 * List of actions that marks which of the action's member is constant. 9267 * @param[out] error 9268 * Pointer to error structure. 9269 * 9270 * @return 9271 * 0 on success, a negative errno value otherwise and rte_errno is set. 9272 */ 9273 int 9274 mlx5_flow_actions_validate(struct rte_eth_dev *dev, 9275 const struct rte_flow_actions_template_attr *attr, 9276 const struct rte_flow_action actions[], 9277 const struct rte_flow_action masks[], 9278 struct rte_flow_error *error) 9279 { 9280 const struct mlx5_flow_driver_ops *fops; 9281 struct rte_flow_attr fattr = {0}; 9282 9283 if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) { 9284 rte_flow_error_set(error, ENOTSUP, 9285 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 9286 "actions validate with incorrect steering mode"); 9287 return -ENOTSUP; 9288 } 9289 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9290 return fops->actions_validate(dev, attr, actions, masks, error); 9291 } 9292 9293 /** 9294 * Create flow item template. 9295 * 9296 * @param[in] dev 9297 * Pointer to the rte_eth_dev structure. 9298 * @param[in] attr 9299 * Pointer to the action template attributes. 9300 * @param[in] actions 9301 * Associated actions (list terminated by the END action). 9302 * @param[in] masks 9303 * List of actions that marks which of the action's member is constant. 9304 * @param[out] error 9305 * Pointer to error structure. 9306 * 9307 * @return 9308 * 0 on success, a negative errno value otherwise and rte_errno is set. 9309 */ 9310 static struct rte_flow_actions_template * 9311 mlx5_flow_actions_template_create(struct rte_eth_dev *dev, 9312 const struct rte_flow_actions_template_attr *attr, 9313 const struct rte_flow_action actions[], 9314 const struct rte_flow_action masks[], 9315 struct rte_flow_error *error) 9316 { 9317 const struct mlx5_flow_driver_ops *fops; 9318 struct rte_flow_attr fattr = {0}; 9319 9320 if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) { 9321 rte_flow_error_set(error, ENOTSUP, 9322 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9323 NULL, 9324 "action create with incorrect steering mode"); 9325 return NULL; 9326 } 9327 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9328 return fops->actions_template_create(dev, attr, actions, masks, error); 9329 } 9330 9331 /** 9332 * Destroy flow action template. 9333 * 9334 * @param[in] dev 9335 * Pointer to the rte_eth_dev structure. 9336 * @param[in] template 9337 * Pointer to the action template to be destroyed. 9338 * @param[out] error 9339 * Pointer to error structure. 9340 * 9341 * @return 9342 * 0 on success, a negative errno value otherwise and rte_errno is set. 9343 */ 9344 static int 9345 mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev, 9346 struct rte_flow_actions_template *template, 9347 struct rte_flow_error *error) 9348 { 9349 const struct mlx5_flow_driver_ops *fops; 9350 struct rte_flow_attr attr = {0}; 9351 9352 if (flow_get_drv_type(dev, &attr) != MLX5_FLOW_TYPE_HW) 9353 return rte_flow_error_set(error, ENOTSUP, 9354 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9355 NULL, 9356 "action destroy with incorrect steering mode"); 9357 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9358 return fops->actions_template_destroy(dev, template, error); 9359 } 9360 9361 /** 9362 * Create flow table. 9363 * 9364 * @param[in] dev 9365 * Pointer to the rte_eth_dev structure. 9366 * @param[in] attr 9367 * Pointer to the table attributes. 9368 * @param[in] item_templates 9369 * Item template array to be binded to the table. 9370 * @param[in] nb_item_templates 9371 * Number of item template. 9372 * @param[in] action_templates 9373 * Action template array to be binded to the table. 9374 * @param[in] nb_action_templates 9375 * Number of action template. 9376 * @param[out] error 9377 * Pointer to error structure. 9378 * 9379 * @return 9380 * Table on success, NULL otherwise and rte_errno is set. 9381 */ 9382 static struct rte_flow_template_table * 9383 mlx5_flow_table_create(struct rte_eth_dev *dev, 9384 const struct rte_flow_template_table_attr *attr, 9385 struct rte_flow_pattern_template *item_templates[], 9386 uint8_t nb_item_templates, 9387 struct rte_flow_actions_template *action_templates[], 9388 uint8_t nb_action_templates, 9389 struct rte_flow_error *error) 9390 { 9391 const struct mlx5_flow_driver_ops *fops; 9392 struct rte_flow_attr fattr = {0}; 9393 9394 if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) { 9395 rte_flow_error_set(error, ENOTSUP, 9396 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9397 NULL, 9398 "table create with incorrect steering mode"); 9399 return NULL; 9400 } 9401 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9402 return fops->template_table_create(dev, 9403 attr, 9404 item_templates, 9405 nb_item_templates, 9406 action_templates, 9407 nb_action_templates, 9408 error); 9409 } 9410 9411 /** 9412 * PMD destroy flow table. 9413 * 9414 * @param[in] dev 9415 * Pointer to the rte_eth_dev structure. 9416 * @param[in] table 9417 * Pointer to the table to be destroyed. 9418 * @param[out] error 9419 * Pointer to error structure. 9420 * 9421 * @return 9422 * 0 on success, a negative errno value otherwise and rte_errno is set. 9423 */ 9424 static int 9425 mlx5_flow_table_destroy(struct rte_eth_dev *dev, 9426 struct rte_flow_template_table *table, 9427 struct rte_flow_error *error) 9428 { 9429 const struct mlx5_flow_driver_ops *fops; 9430 struct rte_flow_attr attr = {0}; 9431 9432 if (flow_get_drv_type(dev, &attr) != MLX5_FLOW_TYPE_HW) 9433 return rte_flow_error_set(error, ENOTSUP, 9434 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9435 NULL, 9436 "table destroy with incorrect steering mode"); 9437 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9438 return fops->template_table_destroy(dev, table, error); 9439 } 9440 9441 /** 9442 * PMD group set miss actions. 9443 * 9444 * @param[in] dev 9445 * Pointer to the rte_eth_dev structure. 9446 * @param[in] attr 9447 * Pointer to group attributes 9448 * @param[in] actions 9449 * Array of actions 9450 * @param[out] error 9451 * Pointer to error structure. 9452 * 9453 * @return 9454 * 0 on success, a negative errno value otherwise and rte_errno is set. 9455 */ 9456 static int 9457 mlx5_flow_group_set_miss_actions(struct rte_eth_dev *dev, 9458 uint32_t group_id, 9459 const struct rte_flow_group_attr *attr, 9460 const struct rte_flow_action actions[], 9461 struct rte_flow_error *error) 9462 { 9463 const struct mlx5_flow_driver_ops *fops; 9464 struct rte_flow_attr fattr = {0}; 9465 9466 if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) 9467 return rte_flow_error_set(error, ENOTSUP, 9468 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9469 NULL, 9470 "group set miss actions with incorrect steering mode"); 9471 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9472 return fops->group_set_miss_actions(dev, group_id, attr, actions, error); 9473 } 9474 9475 /** 9476 * Allocate a new memory for the counter values wrapped by all the needed 9477 * management. 9478 * 9479 * @param[in] sh 9480 * Pointer to mlx5_dev_ctx_shared object. 9481 * 9482 * @return 9483 * 0 on success, a negative errno value otherwise. 9484 */ 9485 static int 9486 mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) 9487 { 9488 struct mlx5_counter_stats_mem_mng *mem_mng; 9489 volatile struct flow_counter_stats *raw_data; 9490 int raws_n = MLX5_CNT_MR_ALLOC_BULK + MLX5_MAX_PENDING_QUERIES; 9491 int size = (sizeof(struct flow_counter_stats) * 9492 MLX5_COUNTERS_PER_POOL + 9493 sizeof(struct mlx5_counter_stats_raw)) * raws_n + 9494 sizeof(struct mlx5_counter_stats_mem_mng); 9495 size_t pgsize = rte_mem_page_size(); 9496 uint8_t *mem; 9497 int ret; 9498 int i; 9499 9500 if (pgsize == (size_t)-1) { 9501 DRV_LOG(ERR, "Failed to get mem page size"); 9502 rte_errno = ENOMEM; 9503 return -ENOMEM; 9504 } 9505 mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, SOCKET_ID_ANY); 9506 if (!mem) { 9507 rte_errno = ENOMEM; 9508 return -ENOMEM; 9509 } 9510 mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1; 9511 size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n; 9512 ret = mlx5_os_wrapped_mkey_create(sh->cdev->ctx, sh->cdev->pd, 9513 sh->cdev->pdn, mem, size, 9514 &mem_mng->wm); 9515 if (ret) { 9516 rte_errno = errno; 9517 mlx5_free(mem); 9518 return -rte_errno; 9519 } 9520 mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size); 9521 raw_data = (volatile struct flow_counter_stats *)mem; 9522 for (i = 0; i < raws_n; ++i) { 9523 mem_mng->raws[i].mem_mng = mem_mng; 9524 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL; 9525 } 9526 for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) 9527 LIST_INSERT_HEAD(&sh->sws_cmng.free_stat_raws, 9528 mem_mng->raws + MLX5_CNT_MR_ALLOC_BULK + i, 9529 next); 9530 LIST_INSERT_HEAD(&sh->sws_cmng.mem_mngs, mem_mng, next); 9531 sh->sws_cmng.mem_mng = mem_mng; 9532 return 0; 9533 } 9534 9535 /** 9536 * Set the statistic memory to the new counter pool. 9537 * 9538 * @param[in] sh 9539 * Pointer to mlx5_dev_ctx_shared object. 9540 * @param[in] pool 9541 * Pointer to the pool to set the statistic memory. 9542 * 9543 * @return 9544 * 0 on success, a negative errno value otherwise. 9545 */ 9546 static int 9547 mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh, 9548 struct mlx5_flow_counter_pool *pool) 9549 { 9550 struct mlx5_flow_counter_mng *cmng = &sh->sws_cmng; 9551 /* Resize statistic memory once used out. */ 9552 if (!(pool->index % MLX5_CNT_MR_ALLOC_BULK) && 9553 mlx5_flow_create_counter_stat_mem_mng(sh)) { 9554 DRV_LOG(ERR, "Cannot resize counter stat mem."); 9555 return -1; 9556 } 9557 rte_spinlock_lock(&pool->sl); 9558 pool->raw = cmng->mem_mng->raws + pool->index % MLX5_CNT_MR_ALLOC_BULK; 9559 rte_spinlock_unlock(&pool->sl); 9560 pool->raw_hw = NULL; 9561 return 0; 9562 } 9563 9564 #define MLX5_POOL_QUERY_FREQ_US 1000000 9565 9566 /** 9567 * Set the periodic procedure for triggering asynchronous batch queries for all 9568 * the counter pools. 9569 * 9570 * @param[in] sh 9571 * Pointer to mlx5_dev_ctx_shared object. 9572 */ 9573 void 9574 mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh) 9575 { 9576 uint32_t pools_n, us; 9577 9578 pools_n = rte_atomic_load_explicit(&sh->sws_cmng.n_valid, rte_memory_order_relaxed); 9579 us = MLX5_POOL_QUERY_FREQ_US / pools_n; 9580 DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us); 9581 if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { 9582 sh->sws_cmng.query_thread_on = 0; 9583 DRV_LOG(ERR, "Cannot reinitialize query alarm"); 9584 } else { 9585 sh->sws_cmng.query_thread_on = 1; 9586 } 9587 } 9588 9589 /** 9590 * The periodic procedure for triggering asynchronous batch queries for all the 9591 * counter pools. This function is probably called by the host thread. 9592 * 9593 * @param[in] arg 9594 * The parameter for the alarm process. 9595 */ 9596 void 9597 mlx5_flow_query_alarm(void *arg) 9598 { 9599 struct mlx5_dev_ctx_shared *sh = arg; 9600 struct mlx5_flow_counter_mng *cmng = &sh->sws_cmng; 9601 uint16_t pool_index = cmng->pool_index; 9602 struct mlx5_flow_counter_pool *pool; 9603 uint16_t n_valid; 9604 int ret; 9605 9606 if (cmng->pending_queries >= MLX5_MAX_PENDING_QUERIES) 9607 goto set_alarm; 9608 rte_spinlock_lock(&cmng->pool_update_sl); 9609 pool = cmng->pools[pool_index]; 9610 n_valid = cmng->n_valid; 9611 rte_spinlock_unlock(&cmng->pool_update_sl); 9612 /* Set the statistic memory to the new created pool. */ 9613 if ((!pool->raw && mlx5_flow_set_counter_stat_mem(sh, pool))) 9614 goto set_alarm; 9615 if (pool->raw_hw) 9616 /* There is a pool query in progress. */ 9617 goto set_alarm; 9618 pool->raw_hw = LIST_FIRST(&cmng->free_stat_raws); 9619 if (!pool->raw_hw) 9620 /* No free counter statistics raw memory. */ 9621 goto set_alarm; 9622 /* 9623 * Identify the counters released between query trigger and query 9624 * handle more efficiently. The counter released in this gap period 9625 * should wait for a new round of query as the new arrived packets 9626 * will not be taken into account. 9627 */ 9628 pool->query_gen++; 9629 ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0, 9630 MLX5_COUNTERS_PER_POOL, 9631 NULL, NULL, 9632 pool->raw_hw->mem_mng->wm.lkey, 9633 (void *)(uintptr_t) 9634 pool->raw_hw->data, 9635 sh->devx_comp, 9636 (uint64_t)(uintptr_t)pool); 9637 if (ret) { 9638 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID" 9639 " %d", pool->min_dcs->id); 9640 pool->raw_hw = NULL; 9641 goto set_alarm; 9642 } 9643 LIST_REMOVE(pool->raw_hw, next); 9644 cmng->pending_queries++; 9645 pool_index++; 9646 if (pool_index >= n_valid) 9647 pool_index = 0; 9648 set_alarm: 9649 cmng->pool_index = pool_index; 9650 mlx5_set_query_alarm(sh); 9651 } 9652 9653 /** 9654 * Check and callback event for new aged flow in the counter pool 9655 * 9656 * @param[in] sh 9657 * Pointer to mlx5_dev_ctx_shared object. 9658 * @param[in] pool 9659 * Pointer to Current counter pool. 9660 */ 9661 static void 9662 mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh, 9663 struct mlx5_flow_counter_pool *pool) 9664 { 9665 struct mlx5_priv *priv; 9666 struct mlx5_flow_counter *cnt; 9667 struct mlx5_age_info *age_info; 9668 struct mlx5_age_param *age_param; 9669 struct mlx5_counter_stats_raw *cur = pool->raw_hw; 9670 struct mlx5_counter_stats_raw *prev = pool->raw; 9671 const uint64_t curr_time = MLX5_CURR_TIME_SEC; 9672 const uint32_t time_delta = curr_time - pool->time_of_last_age_check; 9673 uint16_t expected = AGE_CANDIDATE; 9674 uint32_t i; 9675 9676 pool->time_of_last_age_check = curr_time; 9677 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { 9678 cnt = MLX5_POOL_GET_CNT(pool, i); 9679 age_param = MLX5_CNT_TO_AGE(cnt); 9680 if (rte_atomic_load_explicit(&age_param->state, 9681 rte_memory_order_relaxed) != AGE_CANDIDATE) 9682 continue; 9683 if (cur->data[i].hits != prev->data[i].hits) { 9684 rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0, 9685 rte_memory_order_relaxed); 9686 continue; 9687 } 9688 if (rte_atomic_fetch_add_explicit(&age_param->sec_since_last_hit, 9689 time_delta, 9690 rte_memory_order_relaxed) + time_delta <= age_param->timeout) 9691 continue; 9692 /** 9693 * Hold the lock first, or if between the 9694 * state AGE_TMOUT and tailq operation the 9695 * release happened, the release procedure 9696 * may delete a non-existent tailq node. 9697 */ 9698 priv = rte_eth_devices[age_param->port_id].data->dev_private; 9699 age_info = GET_PORT_AGE_INFO(priv); 9700 rte_spinlock_lock(&age_info->aged_sl); 9701 if (rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected, 9702 AGE_TMOUT, 9703 rte_memory_order_relaxed, 9704 rte_memory_order_relaxed)) { 9705 TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next); 9706 MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW); 9707 } 9708 rte_spinlock_unlock(&age_info->aged_sl); 9709 } 9710 mlx5_age_event_prepare(sh); 9711 } 9712 9713 /** 9714 * Handler for the HW respond about ready values from an asynchronous batch 9715 * query. This function is probably called by the host thread. 9716 * 9717 * @param[in] sh 9718 * The pointer to the shared device context. 9719 * @param[in] async_id 9720 * The Devx async ID. 9721 * @param[in] status 9722 * The status of the completion. 9723 */ 9724 void 9725 mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh, 9726 uint64_t async_id, int status) 9727 { 9728 struct mlx5_flow_counter_pool *pool = 9729 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; 9730 struct mlx5_counter_stats_raw *raw_to_free; 9731 uint8_t query_gen = pool->query_gen ^ 1; 9732 struct mlx5_flow_counter_mng *cmng = &sh->sws_cmng; 9733 enum mlx5_counter_type cnt_type = 9734 pool->is_aged ? MLX5_COUNTER_TYPE_AGE : 9735 MLX5_COUNTER_TYPE_ORIGIN; 9736 9737 if (unlikely(status)) { 9738 raw_to_free = pool->raw_hw; 9739 } else { 9740 raw_to_free = pool->raw; 9741 if (pool->is_aged) 9742 mlx5_flow_aging_check(sh, pool); 9743 rte_spinlock_lock(&pool->sl); 9744 pool->raw = pool->raw_hw; 9745 rte_spinlock_unlock(&pool->sl); 9746 /* Be sure the new raw counters data is updated in memory. */ 9747 rte_io_wmb(); 9748 if (!TAILQ_EMPTY(&pool->counters[query_gen])) { 9749 rte_spinlock_lock(&cmng->csl[cnt_type]); 9750 TAILQ_CONCAT(&cmng->counters[cnt_type], 9751 &pool->counters[query_gen], next); 9752 rte_spinlock_unlock(&cmng->csl[cnt_type]); 9753 } 9754 } 9755 LIST_INSERT_HEAD(&sh->sws_cmng.free_stat_raws, raw_to_free, next); 9756 pool->raw_hw = NULL; 9757 sh->sws_cmng.pending_queries--; 9758 } 9759 9760 static int 9761 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table, 9762 const struct flow_grp_info *grp_info, 9763 struct rte_flow_error *error) 9764 { 9765 if (grp_info->transfer && grp_info->external && 9766 grp_info->fdb_def_rule) { 9767 if (group == UINT32_MAX) 9768 return rte_flow_error_set 9769 (error, EINVAL, 9770 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 9771 NULL, 9772 "group index not supported"); 9773 *table = group + 1; 9774 } else { 9775 *table = group; 9776 } 9777 DRV_LOG(DEBUG, "port %u group=%#x table=%#x", port_id, group, *table); 9778 return 0; 9779 } 9780 9781 /** 9782 * Translate the rte_flow group index to HW table value. 9783 * 9784 * If tunnel offload is disabled, all group ids converted to flow table 9785 * id using the standard method. 9786 * If tunnel offload is enabled, group id can be converted using the 9787 * standard or tunnel conversion method. Group conversion method 9788 * selection depends on flags in `grp_info` parameter: 9789 * - Internal (grp_info.external == 0) groups conversion uses the 9790 * standard method. 9791 * - Group ids in JUMP action converted with the tunnel conversion. 9792 * - Group id in rule attribute conversion depends on a rule type and 9793 * group id value: 9794 * ** non zero group attributes converted with the tunnel method 9795 * ** zero group attribute in non-tunnel rule is converted using the 9796 * standard method - there's only one root table 9797 * ** zero group attribute in steer tunnel rule is converted with the 9798 * standard method - single root table 9799 * ** zero group attribute in match tunnel rule is a special OvS 9800 * case: that value is used for portability reasons. That group 9801 * id is converted with the tunnel conversion method. 9802 * 9803 * @param[in] dev 9804 * Port device 9805 * @param[in] tunnel 9806 * PMD tunnel offload object 9807 * @param[in] group 9808 * rte_flow group index value. 9809 * @param[out] table 9810 * HW table value. 9811 * @param[in] grp_info 9812 * flags used for conversion 9813 * @param[out] error 9814 * Pointer to error structure. 9815 * 9816 * @return 9817 * 0 on success, a negative errno value otherwise and rte_errno is set. 9818 */ 9819 int 9820 mlx5_flow_group_to_table(struct rte_eth_dev *dev, 9821 const struct mlx5_flow_tunnel *tunnel, 9822 uint32_t group, uint32_t *table, 9823 const struct flow_grp_info *grp_info, 9824 struct rte_flow_error *error) 9825 { 9826 int ret; 9827 bool standard_translation; 9828 9829 if (!grp_info->skip_scale && grp_info->external && 9830 group < MLX5_MAX_TABLES_EXTERNAL) 9831 group *= MLX5_FLOW_TABLE_FACTOR; 9832 if (is_tunnel_offload_active(dev)) { 9833 standard_translation = !grp_info->external || 9834 grp_info->std_tbl_fix; 9835 } else { 9836 standard_translation = true; 9837 } 9838 DRV_LOG(DEBUG, 9839 "port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s", 9840 dev->data->port_id, group, grp_info->transfer, 9841 grp_info->external, grp_info->fdb_def_rule, 9842 standard_translation ? "STANDARD" : "TUNNEL"); 9843 if (standard_translation) 9844 ret = flow_group_to_table(dev->data->port_id, group, table, 9845 grp_info, error); 9846 else 9847 ret = tunnel_flow_group_to_flow_table(dev, tunnel, group, 9848 table, error); 9849 9850 return ret; 9851 } 9852 9853 /** 9854 * Discover availability of metadata reg_c's. 9855 * 9856 * Iteratively use test flows to check availability. 9857 * 9858 * @param[in] dev 9859 * Pointer to the Ethernet device structure. 9860 * 9861 * @return 9862 * 0 on success, a negative errno value otherwise and rte_errno is set. 9863 */ 9864 int 9865 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) 9866 { 9867 struct mlx5_priv *priv = dev->data->dev_private; 9868 enum modify_reg idx; 9869 int n = 0; 9870 9871 /* reg_c[0] and reg_c[1] are reserved. */ 9872 priv->sh->flow_mreg_c[n++] = REG_C_0; 9873 priv->sh->flow_mreg_c[n++] = REG_C_1; 9874 /* Discover availability of other reg_c's. */ 9875 for (idx = REG_C_2; idx <= REG_C_7; ++idx) { 9876 struct rte_flow_attr attr = { 9877 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 9878 .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR, 9879 .ingress = 1, 9880 }; 9881 struct rte_flow_item items[] = { 9882 [0] = { 9883 .type = RTE_FLOW_ITEM_TYPE_END, 9884 }, 9885 }; 9886 struct rte_flow_action actions[] = { 9887 [0] = { 9888 .type = (enum rte_flow_action_type) 9889 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 9890 .conf = &(struct mlx5_flow_action_copy_mreg){ 9891 .src = REG_C_1, 9892 .dst = idx, 9893 }, 9894 }, 9895 [1] = { 9896 .type = RTE_FLOW_ACTION_TYPE_JUMP, 9897 .conf = &(struct rte_flow_action_jump){ 9898 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 9899 }, 9900 }, 9901 [2] = { 9902 .type = RTE_FLOW_ACTION_TYPE_END, 9903 }, 9904 }; 9905 uint32_t flow_idx; 9906 struct rte_flow *flow; 9907 struct rte_flow_error error; 9908 9909 if (!priv->sh->config.dv_flow_en) 9910 break; 9911 /* Create internal flow, validation skips copy action. */ 9912 flow_idx = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr, 9913 items, actions, false, &error); 9914 flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], 9915 flow_idx); 9916 if (!flow) 9917 continue; 9918 priv->sh->flow_mreg_c[n++] = idx; 9919 mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx); 9920 } 9921 for (; n < MLX5_MREG_C_NUM; ++n) 9922 priv->sh->flow_mreg_c[n] = REG_NON; 9923 priv->sh->metadata_regc_check_flag = 1; 9924 return 0; 9925 } 9926 9927 int 9928 save_dump_file(const uint8_t *data, uint32_t size, 9929 uint32_t type, uint64_t id, void *arg, FILE *file) 9930 { 9931 char line[BUF_SIZE]; 9932 uint32_t out = 0; 9933 uint32_t k; 9934 uint32_t actions_num; 9935 struct rte_flow_query_count *count; 9936 9937 memset(line, 0, BUF_SIZE); 9938 switch (type) { 9939 case DR_DUMP_REC_TYPE_PMD_MODIFY_HDR: 9940 actions_num = *(uint32_t *)(arg); 9941 out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",%d,", 9942 type, id, actions_num); 9943 break; 9944 case DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT: 9945 out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",", 9946 type, id); 9947 break; 9948 case DR_DUMP_REC_TYPE_PMD_COUNTER: 9949 count = (struct rte_flow_query_count *)arg; 9950 fprintf(file, 9951 "%d,0x%" PRIx64 ",%" PRIu64 ",%" PRIu64 "\n", 9952 type, id, count->hits, count->bytes); 9953 return 0; 9954 default: 9955 return -1; 9956 } 9957 9958 for (k = 0; k < size; k++) { 9959 /* Make sure we do not overrun the line buffer length. */ 9960 if (out >= BUF_SIZE - 4) { 9961 line[out] = '\0'; 9962 break; 9963 } 9964 out += snprintf(line + out, BUF_SIZE - out, "%02x", 9965 (data[k]) & 0xff); 9966 } 9967 fprintf(file, "%s\n", line); 9968 return 0; 9969 } 9970 9971 int 9972 mlx5_flow_query_counter(struct rte_eth_dev *dev, struct rte_flow *flow, 9973 struct rte_flow_query_count *count, struct rte_flow_error *error) 9974 { 9975 struct rte_flow_action action[2]; 9976 enum mlx5_flow_drv_type ftype; 9977 const struct mlx5_flow_driver_ops *fops; 9978 9979 if (!flow) { 9980 return rte_flow_error_set(error, ENOENT, 9981 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9982 NULL, 9983 "invalid flow handle"); 9984 } 9985 action[0].type = RTE_FLOW_ACTION_TYPE_COUNT; 9986 action[1].type = RTE_FLOW_ACTION_TYPE_END; 9987 if (flow->counter) { 9988 memset(count, 0, sizeof(struct rte_flow_query_count)); 9989 ftype = (enum mlx5_flow_drv_type)(flow->drv_type); 9990 MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && 9991 ftype < MLX5_FLOW_TYPE_MAX); 9992 fops = flow_get_drv_ops(ftype); 9993 return fops->query(dev, flow, action, count, error); 9994 } 9995 return -1; 9996 } 9997 9998 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 9999 /** 10000 * Dump flow ipool data to file 10001 * 10002 * @param[in] dev 10003 * The pointer to Ethernet device. 10004 * @param[in] file 10005 * A pointer to a file for output. 10006 * @param[out] error 10007 * Perform verbose error reporting if not NULL. PMDs initialize this 10008 * structure in case of error only. 10009 * @return 10010 * 0 on success, a negative value otherwise. 10011 */ 10012 int 10013 mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, 10014 struct rte_flow *flow, FILE *file, 10015 struct rte_flow_error *error) 10016 { 10017 struct mlx5_priv *priv = dev->data->dev_private; 10018 struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; 10019 struct mlx5_flow_dv_encap_decap_resource *encap_decap; 10020 uint32_t handle_idx; 10021 struct mlx5_flow_handle *dh; 10022 struct rte_flow_query_count count; 10023 uint32_t actions_num; 10024 const uint8_t *data; 10025 size_t size; 10026 uint64_t id; 10027 uint32_t type; 10028 void *action = NULL; 10029 10030 if (!flow) { 10031 return rte_flow_error_set(error, ENOENT, 10032 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 10033 NULL, 10034 "invalid flow handle"); 10035 } 10036 handle_idx = flow->dev_handles; 10037 /* query counter */ 10038 if (flow->counter && 10039 (!mlx5_counter_query(dev, flow->counter, false, 10040 &count.hits, &count.bytes, &action)) && action) { 10041 id = (uint64_t)(uintptr_t)action; 10042 type = DR_DUMP_REC_TYPE_PMD_COUNTER; 10043 save_dump_file(NULL, 0, type, 10044 id, (void *)&count, file); 10045 } 10046 10047 while (handle_idx) { 10048 dh = mlx5_ipool_get(priv->sh->ipool 10049 [MLX5_IPOOL_MLX5_FLOW], handle_idx); 10050 if (!dh) 10051 continue; 10052 handle_idx = dh->next.next; 10053 10054 /* Get modify_hdr and encap_decap buf from ipools. */ 10055 encap_decap = NULL; 10056 modify_hdr = dh->dvh.modify_hdr; 10057 10058 if (dh->dvh.rix_encap_decap) { 10059 encap_decap = mlx5_ipool_get(priv->sh->ipool 10060 [MLX5_IPOOL_DECAP_ENCAP], 10061 dh->dvh.rix_encap_decap); 10062 } 10063 if (modify_hdr) { 10064 data = (const uint8_t *)modify_hdr->actions; 10065 size = (size_t)(modify_hdr->actions_num) * 8; 10066 id = (uint64_t)(uintptr_t)modify_hdr->action; 10067 actions_num = modify_hdr->actions_num; 10068 type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; 10069 save_dump_file(data, size, type, id, 10070 (void *)(&actions_num), file); 10071 } 10072 if (encap_decap) { 10073 data = encap_decap->buf; 10074 size = encap_decap->size; 10075 id = (uint64_t)(uintptr_t)encap_decap->action; 10076 type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT; 10077 save_dump_file(data, size, type, 10078 id, NULL, file); 10079 } 10080 } 10081 return 0; 10082 } 10083 10084 /** 10085 * Dump all flow's encap_decap/modify_hdr/counter data to file 10086 * 10087 * @param[in] dev 10088 * The pointer to Ethernet device. 10089 * @param[in] file 10090 * A pointer to a file for output. 10091 * @param[out] error 10092 * Perform verbose error reporting if not NULL. PMDs initialize this 10093 * structure in case of error only. 10094 * @return 10095 * 0 on success, a negative value otherwise. 10096 */ 10097 static int 10098 mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, 10099 FILE *file, struct rte_flow_error *error __rte_unused) 10100 { 10101 struct mlx5_priv *priv = dev->data->dev_private; 10102 struct mlx5_dev_ctx_shared *sh = priv->sh; 10103 struct mlx5_hlist *h; 10104 struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; 10105 struct mlx5_flow_dv_encap_decap_resource *encap_decap; 10106 struct rte_flow_query_count count; 10107 uint32_t actions_num; 10108 const uint8_t *data; 10109 size_t size; 10110 uint64_t id; 10111 uint32_t type; 10112 uint32_t i; 10113 uint32_t j; 10114 struct mlx5_list_inconst *l_inconst; 10115 struct mlx5_list_entry *e; 10116 int lcore_index; 10117 struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng; 10118 uint32_t max; 10119 void *action; 10120 10121 /* encap_decap hlist is lcore_share, get global core cache. */ 10122 i = MLX5_LIST_GLOBAL; 10123 h = sh->encaps_decaps; 10124 if (h) { 10125 for (j = 0; j <= h->mask; j++) { 10126 l_inconst = &h->buckets[j].l; 10127 if (!l_inconst || !l_inconst->cache[i]) 10128 continue; 10129 10130 e = LIST_FIRST(&l_inconst->cache[i]->h); 10131 while (e) { 10132 encap_decap = 10133 (struct mlx5_flow_dv_encap_decap_resource *)e; 10134 data = encap_decap->buf; 10135 size = encap_decap->size; 10136 id = (uint64_t)(uintptr_t)encap_decap->action; 10137 type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT; 10138 save_dump_file(data, size, type, 10139 id, NULL, file); 10140 e = LIST_NEXT(e, next); 10141 } 10142 } 10143 } 10144 10145 /* get modify_hdr */ 10146 h = sh->modify_cmds; 10147 if (h) { 10148 lcore_index = rte_lcore_index(rte_lcore_id()); 10149 if (unlikely(lcore_index == -1)) { 10150 lcore_index = MLX5_LIST_NLCORE; 10151 rte_spinlock_lock(&h->l_const.lcore_lock); 10152 } 10153 i = lcore_index; 10154 10155 if (lcore_index == MLX5_LIST_NLCORE) { 10156 for (i = 0; i <= (uint32_t)lcore_index; i++) { 10157 for (j = 0; j <= h->mask; j++) { 10158 l_inconst = &h->buckets[j].l; 10159 if (!l_inconst || !l_inconst->cache[i]) 10160 continue; 10161 10162 e = LIST_FIRST(&l_inconst->cache[i]->h); 10163 while (e) { 10164 modify_hdr = 10165 (struct mlx5_flow_dv_modify_hdr_resource *)e; 10166 data = (const uint8_t *)modify_hdr->actions; 10167 size = (size_t)(modify_hdr->actions_num) * 8; 10168 actions_num = modify_hdr->actions_num; 10169 id = (uint64_t)(uintptr_t)modify_hdr->action; 10170 type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; 10171 save_dump_file(data, size, type, id, 10172 (void *)(&actions_num), file); 10173 e = LIST_NEXT(e, next); 10174 } 10175 } 10176 } 10177 } else { 10178 for (j = 0; j <= h->mask; j++) { 10179 l_inconst = &h->buckets[j].l; 10180 if (!l_inconst || !l_inconst->cache[i]) 10181 continue; 10182 10183 e = LIST_FIRST(&l_inconst->cache[i]->h); 10184 while (e) { 10185 modify_hdr = 10186 (struct mlx5_flow_dv_modify_hdr_resource *)e; 10187 data = (const uint8_t *)modify_hdr->actions; 10188 size = (size_t)(modify_hdr->actions_num) * 8; 10189 actions_num = modify_hdr->actions_num; 10190 id = (uint64_t)(uintptr_t)modify_hdr->action; 10191 type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; 10192 save_dump_file(data, size, type, id, 10193 (void *)(&actions_num), file); 10194 e = LIST_NEXT(e, next); 10195 } 10196 } 10197 } 10198 10199 if (unlikely(lcore_index == MLX5_LIST_NLCORE)) 10200 rte_spinlock_unlock(&h->l_const.lcore_lock); 10201 } 10202 10203 /* get counter */ 10204 MLX5_ASSERT(cmng->n_valid <= MLX5_COUNTER_POOLS_MAX_NUM); 10205 max = MLX5_COUNTERS_PER_POOL * cmng->n_valid; 10206 for (j = 1; j <= max; j++) { 10207 action = NULL; 10208 if ((!mlx5_counter_query(dev, j, false, &count.hits, 10209 &count.bytes, &action)) && action) { 10210 id = (uint64_t)(uintptr_t)action; 10211 type = DR_DUMP_REC_TYPE_PMD_COUNTER; 10212 save_dump_file(NULL, 0, type, 10213 id, (void *)&count, file); 10214 } 10215 } 10216 return 0; 10217 } 10218 #endif 10219 10220 /** 10221 * Dump flow raw hw data to file 10222 * 10223 * @param[in] dev 10224 * The pointer to Ethernet device. 10225 * @param[in] file 10226 * A pointer to a file for output. 10227 * @param[out] error 10228 * Perform verbose error reporting if not NULL. PMDs initialize this 10229 * structure in case of error only. 10230 * @return 10231 * 0 on success, a negative value otherwise. 10232 */ 10233 int 10234 mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, 10235 FILE *file, 10236 struct rte_flow_error *error __rte_unused) 10237 { 10238 struct mlx5_priv *priv = dev->data->dev_private; 10239 struct mlx5_dev_ctx_shared *sh = priv->sh; 10240 uint32_t handle_idx; 10241 int ret; 10242 struct mlx5_flow_handle *dh; 10243 struct rte_flow *flow; 10244 10245 if (!sh->config.dv_flow_en) { 10246 if (fputs("device dv flow disabled\n", file) <= 0) 10247 return -errno; 10248 return -ENOTSUP; 10249 } 10250 10251 /* dump all */ 10252 if (!flow_idx) { 10253 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 10254 if (mlx5_flow_dev_dump_sh_all(dev, file, error)) 10255 return -EINVAL; 10256 10257 if (sh->config.dv_flow_en == 2) 10258 return mlx5dr_debug_dump(priv->dr_ctx, file); 10259 #endif 10260 return mlx5_devx_cmd_flow_dump(sh->fdb_domain, 10261 sh->rx_domain, 10262 sh->tx_domain, file); 10263 } 10264 /* dump one */ 10265 flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], 10266 (uintptr_t)(void *)flow_idx); 10267 if (!flow) 10268 return -EINVAL; 10269 10270 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 10271 mlx5_flow_dev_dump_ipool(dev, flow, file, error); 10272 #endif 10273 handle_idx = flow->dev_handles; 10274 while (handle_idx) { 10275 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 10276 handle_idx); 10277 if (!dh) 10278 return -ENOENT; 10279 if (dh->drv_flow) { 10280 if (sh->config.dv_flow_en == 2) 10281 return -ENOTSUP; 10282 10283 ret = mlx5_devx_cmd_flow_single_dump(dh->drv_flow, 10284 file); 10285 if (ret) 10286 return -ENOENT; 10287 } 10288 handle_idx = dh->next.next; 10289 } 10290 return 0; 10291 } 10292 10293 /** 10294 * Get aged-out flows. 10295 * 10296 * @param[in] dev 10297 * Pointer to the Ethernet device structure. 10298 * @param[in] context 10299 * The address of an array of pointers to the aged-out flows contexts. 10300 * @param[in] nb_countexts 10301 * The length of context array pointers. 10302 * @param[out] error 10303 * Perform verbose error reporting if not NULL. Initialized in case of 10304 * error only. 10305 * 10306 * @return 10307 * how many contexts get in success, otherwise negative errno value. 10308 * if nb_contexts is 0, return the amount of all aged contexts. 10309 * if nb_contexts is not 0 , return the amount of aged flows reported 10310 * in the context array. 10311 */ 10312 int 10313 mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts, 10314 uint32_t nb_contexts, struct rte_flow_error *error) 10315 { 10316 struct rte_flow_attr attr = { .transfer = 0 }; 10317 10318 return flow_get_drv_ops(flow_get_drv_type(dev, &attr))->get_aged_flows 10319 (dev, contexts, nb_contexts, error); 10320 } 10321 10322 /** 10323 * Get aged-out flows per HWS queue. 10324 * 10325 * @param[in] dev 10326 * Pointer to the Ethernet device structure. 10327 * @param[in] queue_id 10328 * Flow queue to query. 10329 * @param[in] context 10330 * The address of an array of pointers to the aged-out flows contexts. 10331 * @param[in] nb_countexts 10332 * The length of context array pointers. 10333 * @param[out] error 10334 * Perform verbose error reporting if not NULL. Initialized in case of 10335 * error only. 10336 * 10337 * @return 10338 * how many contexts get in success, otherwise negative errno value. 10339 * if nb_contexts is 0, return the amount of all aged contexts. 10340 * if nb_contexts is not 0 , return the amount of aged flows reported 10341 * in the context array. 10342 */ 10343 int 10344 mlx5_flow_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id, 10345 void **contexts, uint32_t nb_contexts, 10346 struct rte_flow_error *error) 10347 { 10348 const struct mlx5_flow_driver_ops *fops; 10349 struct rte_flow_attr attr = { 0 }; 10350 10351 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_HW) { 10352 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 10353 return fops->get_q_aged_flows(dev, queue_id, contexts, 10354 nb_contexts, error); 10355 } 10356 DRV_LOG(ERR, "port %u queue %u get aged flows is not supported.", 10357 dev->data->port_id, queue_id); 10358 return rte_flow_error_set(error, ENOTSUP, 10359 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 10360 "get Q aged flows with incorrect steering mode"); 10361 } 10362 10363 /* Wrapper for driver action_validate op callback */ 10364 static int 10365 flow_drv_action_validate(struct rte_eth_dev *dev, 10366 const struct rte_flow_indir_action_conf *conf, 10367 const struct rte_flow_action *action, 10368 const struct mlx5_flow_driver_ops *fops, 10369 struct rte_flow_error *error) 10370 { 10371 static const char err_msg[] = "indirect action validation unsupported"; 10372 10373 if (!fops->action_validate) { 10374 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 10375 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 10376 NULL, err_msg); 10377 return -rte_errno; 10378 } 10379 return fops->action_validate(dev, conf, action, error); 10380 } 10381 10382 /** 10383 * Destroys the shared action by handle. 10384 * 10385 * @param dev 10386 * Pointer to Ethernet device structure. 10387 * @param[in] handle 10388 * Handle for the indirect action object to be destroyed. 10389 * @param[out] error 10390 * Perform verbose error reporting if not NULL. PMDs initialize this 10391 * structure in case of error only. 10392 * 10393 * @return 10394 * 0 on success, a negative errno value otherwise and rte_errno is set. 10395 * 10396 * @note: wrapper for driver action_create op callback. 10397 */ 10398 static int 10399 mlx5_action_handle_destroy(struct rte_eth_dev *dev, 10400 struct rte_flow_action_handle *handle, 10401 struct rte_flow_error *error) 10402 { 10403 static const char err_msg[] = "indirect action destruction unsupported"; 10404 struct rte_flow_attr attr = { .transfer = 0 }; 10405 const struct mlx5_flow_driver_ops *fops = 10406 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 10407 10408 if (!fops->action_destroy) { 10409 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 10410 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 10411 NULL, err_msg); 10412 return -rte_errno; 10413 } 10414 return fops->action_destroy(dev, handle, error); 10415 } 10416 10417 /* Wrapper for driver action_destroy op callback */ 10418 static int 10419 flow_drv_action_update(struct rte_eth_dev *dev, 10420 struct rte_flow_action_handle *handle, 10421 const void *update, 10422 const struct mlx5_flow_driver_ops *fops, 10423 struct rte_flow_error *error) 10424 { 10425 static const char err_msg[] = "indirect action update unsupported"; 10426 10427 if (!fops->action_update) { 10428 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 10429 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 10430 NULL, err_msg); 10431 return -rte_errno; 10432 } 10433 return fops->action_update(dev, handle, update, error); 10434 } 10435 10436 /* Wrapper for driver action_destroy op callback */ 10437 static int 10438 flow_drv_action_query(struct rte_eth_dev *dev, 10439 const struct rte_flow_action_handle *handle, 10440 void *data, 10441 const struct mlx5_flow_driver_ops *fops, 10442 struct rte_flow_error *error) 10443 { 10444 static const char err_msg[] = "indirect action query unsupported"; 10445 10446 if (!fops->action_query) { 10447 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 10448 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 10449 NULL, err_msg); 10450 return -rte_errno; 10451 } 10452 return fops->action_query(dev, handle, data, error); 10453 } 10454 10455 /** 10456 * Create indirect action for reuse in multiple flow rules. 10457 * 10458 * @param dev 10459 * Pointer to Ethernet device structure. 10460 * @param conf 10461 * Pointer to indirect action object configuration. 10462 * @param[in] action 10463 * Action configuration for indirect action object creation. 10464 * @param[out] error 10465 * Perform verbose error reporting if not NULL. PMDs initialize this 10466 * structure in case of error only. 10467 * @return 10468 * A valid handle in case of success, NULL otherwise and rte_errno is set. 10469 */ 10470 static struct rte_flow_action_handle * 10471 mlx5_action_handle_create(struct rte_eth_dev *dev, 10472 const struct rte_flow_indir_action_conf *conf, 10473 const struct rte_flow_action *action, 10474 struct rte_flow_error *error) 10475 { 10476 static const char err_msg[] = "indirect action creation unsupported"; 10477 struct rte_flow_attr attr = { .transfer = 0 }; 10478 const struct mlx5_flow_driver_ops *fops = 10479 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 10480 10481 if (flow_drv_action_validate(dev, conf, action, fops, error)) 10482 return NULL; 10483 if (!fops->action_create) { 10484 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 10485 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 10486 NULL, err_msg); 10487 return NULL; 10488 } 10489 return fops->action_create(dev, conf, action, error); 10490 } 10491 10492 /** 10493 * Updates inplace the indirect action configuration pointed by *handle* 10494 * with the configuration provided as *update* argument. 10495 * The update of the indirect action configuration effects all flow rules 10496 * reusing the action via handle. 10497 * 10498 * @param dev 10499 * Pointer to Ethernet device structure. 10500 * @param[in] handle 10501 * Handle for the indirect action to be updated. 10502 * @param[in] update 10503 * Action specification used to modify the action pointed by handle. 10504 * *update* could be of same type with the action pointed by the *handle* 10505 * handle argument, or some other structures like a wrapper, depending on 10506 * the indirect action type. 10507 * @param[out] error 10508 * Perform verbose error reporting if not NULL. PMDs initialize this 10509 * structure in case of error only. 10510 * 10511 * @return 10512 * 0 on success, a negative errno value otherwise and rte_errno is set. 10513 */ 10514 static int 10515 mlx5_action_handle_update(struct rte_eth_dev *dev, 10516 struct rte_flow_action_handle *handle, 10517 const void *update, 10518 struct rte_flow_error *error) 10519 { 10520 struct rte_flow_attr attr = { .transfer = 0 }; 10521 const struct mlx5_flow_driver_ops *fops = 10522 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 10523 int ret; 10524 uint32_t act_idx = (uint32_t)(uintptr_t)handle; 10525 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; 10526 10527 switch (type) { 10528 case MLX5_INDIRECT_ACTION_TYPE_CT: 10529 case MLX5_INDIRECT_ACTION_TYPE_METER_MARK: 10530 ret = 0; 10531 break; 10532 default: 10533 ret = flow_drv_action_validate(dev, NULL, 10534 (const struct rte_flow_action *)update, 10535 fops, error); 10536 } 10537 if (ret) 10538 return ret; 10539 return flow_drv_action_update(dev, handle, update, fops, 10540 error); 10541 } 10542 10543 /** 10544 * Query the indirect action by handle. 10545 * 10546 * This function allows retrieving action-specific data such as counters. 10547 * Data is gathered by special action which may be present/referenced in 10548 * more than one flow rule definition. 10549 * 10550 * see @RTE_FLOW_ACTION_TYPE_COUNT 10551 * 10552 * @param dev 10553 * Pointer to Ethernet device structure. 10554 * @param[in] handle 10555 * Handle for the indirect action to query. 10556 * @param[in, out] data 10557 * Pointer to storage for the associated query data type. 10558 * @param[out] error 10559 * Perform verbose error reporting if not NULL. PMDs initialize this 10560 * structure in case of error only. 10561 * 10562 * @return 10563 * 0 on success, a negative errno value otherwise and rte_errno is set. 10564 */ 10565 static int 10566 mlx5_action_handle_query(struct rte_eth_dev *dev, 10567 const struct rte_flow_action_handle *handle, 10568 void *data, 10569 struct rte_flow_error *error) 10570 { 10571 struct rte_flow_attr attr = { .transfer = 0 }; 10572 const struct mlx5_flow_driver_ops *fops = 10573 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 10574 10575 return flow_drv_action_query(dev, handle, data, fops, error); 10576 } 10577 10578 static int 10579 mlx5_action_handle_query_update(struct rte_eth_dev *dev, 10580 struct rte_flow_action_handle *handle, 10581 const void *update, void *query, 10582 enum rte_flow_query_update_mode qu_mode, 10583 struct rte_flow_error *error) 10584 { 10585 struct rte_flow_attr attr = { .transfer = 0 }; 10586 enum mlx5_flow_drv_type drv_type = flow_get_drv_type(dev, &attr); 10587 const struct mlx5_flow_driver_ops *fops; 10588 10589 if (drv_type == MLX5_FLOW_TYPE_MIN || drv_type == MLX5_FLOW_TYPE_MAX) 10590 return rte_flow_error_set(error, ENOTSUP, 10591 RTE_FLOW_ERROR_TYPE_ACTION, 10592 NULL, "invalid driver type"); 10593 fops = flow_get_drv_ops(drv_type); 10594 if (!fops || !fops->action_query_update) 10595 return rte_flow_error_set(error, ENOTSUP, 10596 RTE_FLOW_ERROR_TYPE_ACTION, 10597 NULL, "no query_update handler"); 10598 return fops->action_query_update(dev, handle, update, 10599 query, qu_mode, error); 10600 } 10601 10602 10603 #define MLX5_DRV_FOPS_OR_ERR(dev, fops, drv_cb, ret) \ 10604 { \ 10605 struct rte_flow_attr attr = { .transfer = 0 }; \ 10606 enum mlx5_flow_drv_type drv_type = flow_get_drv_type((dev), &attr); \ 10607 if (drv_type == MLX5_FLOW_TYPE_MIN || \ 10608 drv_type == MLX5_FLOW_TYPE_MAX) { \ 10609 rte_flow_error_set(error, ENOTSUP, \ 10610 RTE_FLOW_ERROR_TYPE_ACTION, \ 10611 NULL, "invalid driver type"); \ 10612 return ret; \ 10613 } \ 10614 (fops) = flow_get_drv_ops(drv_type); \ 10615 if (!(fops) || !(fops)->drv_cb) { \ 10616 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, \ 10617 NULL, "no action_list handler"); \ 10618 return ret; \ 10619 } \ 10620 } 10621 10622 static struct rte_flow_action_list_handle * 10623 mlx5_action_list_handle_create(struct rte_eth_dev *dev, 10624 const struct rte_flow_indir_action_conf *conf, 10625 const struct rte_flow_action *actions, 10626 struct rte_flow_error *error) 10627 { 10628 const struct mlx5_flow_driver_ops *fops; 10629 10630 MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_create, NULL); 10631 return fops->action_list_handle_create(dev, conf, actions, error); 10632 } 10633 10634 static int 10635 mlx5_action_list_handle_destroy(struct rte_eth_dev *dev, 10636 struct rte_flow_action_list_handle *handle, 10637 struct rte_flow_error *error) 10638 { 10639 const struct mlx5_flow_driver_ops *fops; 10640 10641 MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_destroy, ENOTSUP); 10642 return fops->action_list_handle_destroy(dev, handle, error); 10643 } 10644 10645 static int 10646 mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev, 10647 const 10648 struct rte_flow_action_list_handle *handle, 10649 const void **update, void **query, 10650 enum rte_flow_query_update_mode mode, 10651 struct rte_flow_error *error) 10652 { 10653 const struct mlx5_flow_driver_ops *fops; 10654 10655 MLX5_DRV_FOPS_OR_ERR(dev, fops, 10656 action_list_handle_query_update, ENOTSUP); 10657 return fops->action_list_handle_query_update(dev, handle, update, query, 10658 mode, error); 10659 } 10660 static int 10661 mlx5_flow_calc_table_hash(struct rte_eth_dev *dev, 10662 const struct rte_flow_template_table *table, 10663 const struct rte_flow_item pattern[], 10664 uint8_t pattern_template_index, 10665 uint32_t *hash, struct rte_flow_error *error) 10666 { 10667 struct rte_flow_attr attr = { .transfer = 0 }; 10668 enum mlx5_flow_drv_type drv_type = flow_get_drv_type(dev, &attr); 10669 const struct mlx5_flow_driver_ops *fops; 10670 10671 if (drv_type == MLX5_FLOW_TYPE_MIN || drv_type == MLX5_FLOW_TYPE_MAX) 10672 return rte_flow_error_set(error, ENOTSUP, 10673 RTE_FLOW_ERROR_TYPE_ACTION, 10674 NULL, "invalid driver type"); 10675 fops = flow_get_drv_ops(drv_type); 10676 if (!fops || !fops->action_query_update) 10677 return rte_flow_error_set(error, ENOTSUP, 10678 RTE_FLOW_ERROR_TYPE_ACTION, 10679 NULL, "no query_update handler"); 10680 return fops->flow_calc_table_hash(dev, table, pattern, pattern_template_index, 10681 hash, error); 10682 } 10683 10684 static int 10685 mlx5_flow_calc_encap_hash(struct rte_eth_dev *dev, 10686 const struct rte_flow_item pattern[], 10687 enum rte_flow_encap_hash_field dest_field, 10688 uint8_t *hash, 10689 struct rte_flow_error *error) 10690 { 10691 enum mlx5_flow_drv_type drv_type = flow_get_drv_type(dev, NULL); 10692 const struct mlx5_flow_driver_ops *fops; 10693 10694 if (drv_type == MLX5_FLOW_TYPE_MIN || drv_type == MLX5_FLOW_TYPE_MAX) 10695 return rte_flow_error_set(error, ENOTSUP, 10696 RTE_FLOW_ERROR_TYPE_ACTION, 10697 NULL, "invalid driver type"); 10698 fops = flow_get_drv_ops(drv_type); 10699 if (!fops || !fops->flow_calc_encap_hash) 10700 return rte_flow_error_set(error, ENOTSUP, 10701 RTE_FLOW_ERROR_TYPE_ACTION, 10702 NULL, "no calc encap hash handler"); 10703 return fops->flow_calc_encap_hash(dev, pattern, dest_field, hash, error); 10704 } 10705 10706 static int 10707 mlx5_template_table_resize(struct rte_eth_dev *dev, 10708 struct rte_flow_template_table *table, 10709 uint32_t nb_rules, struct rte_flow_error *error) 10710 { 10711 const struct mlx5_flow_driver_ops *fops; 10712 10713 MLX5_DRV_FOPS_OR_ERR(dev, fops, table_resize, ENOTSUP); 10714 return fops->table_resize(dev, table, nb_rules, error); 10715 } 10716 10717 static int 10718 mlx5_table_resize_complete(struct rte_eth_dev *dev, 10719 struct rte_flow_template_table *table, 10720 struct rte_flow_error *error) 10721 { 10722 const struct mlx5_flow_driver_ops *fops; 10723 10724 MLX5_DRV_FOPS_OR_ERR(dev, fops, table_resize_complete, ENOTSUP); 10725 return fops->table_resize_complete(dev, table, error); 10726 } 10727 10728 static int 10729 mlx5_flow_async_update_resized(struct rte_eth_dev *dev, uint32_t queue, 10730 const struct rte_flow_op_attr *op_attr, 10731 struct rte_flow *rule, void *user_data, 10732 struct rte_flow_error *error) 10733 { 10734 const struct mlx5_flow_driver_ops *fops; 10735 10736 MLX5_DRV_FOPS_OR_ERR(dev, fops, flow_update_resized, ENOTSUP); 10737 return fops->flow_update_resized(dev, queue, op_attr, rule, user_data, error); 10738 } 10739 10740 /** 10741 * Destroy all indirect actions (shared RSS). 10742 * 10743 * @param dev 10744 * Pointer to Ethernet device. 10745 * 10746 * @return 10747 * 0 on success, a negative errno value otherwise and rte_errno is set. 10748 */ 10749 int 10750 mlx5_action_handle_flush(struct rte_eth_dev *dev) 10751 { 10752 struct rte_flow_error error; 10753 struct mlx5_priv *priv = dev->data->dev_private; 10754 struct mlx5_shared_action_rss *shared_rss; 10755 int ret = 0; 10756 uint32_t idx; 10757 10758 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 10759 priv->rss_shared_actions, idx, shared_rss, next) { 10760 ret |= mlx5_action_handle_destroy(dev, 10761 (struct rte_flow_action_handle *)(uintptr_t)idx, &error); 10762 } 10763 return ret; 10764 } 10765 10766 /** 10767 * Validate existing indirect actions against current device configuration 10768 * and attach them to device resources. 10769 * 10770 * @param dev 10771 * Pointer to Ethernet device. 10772 * 10773 * @return 10774 * 0 on success, a negative errno value otherwise and rte_errno is set. 10775 */ 10776 int 10777 mlx5_action_handle_attach(struct rte_eth_dev *dev) 10778 { 10779 struct mlx5_priv *priv = dev->data->dev_private; 10780 int ret = 0; 10781 struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last; 10782 10783 LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { 10784 const char *message; 10785 uint32_t queue_idx; 10786 10787 ret = mlx5_validate_rss_queues(dev, ind_tbl->queues, 10788 ind_tbl->queues_n, 10789 &message, &queue_idx); 10790 if (ret != 0) { 10791 DRV_LOG(ERR, "Port %u cannot use queue %u in RSS: %s", 10792 dev->data->port_id, ind_tbl->queues[queue_idx], 10793 message); 10794 break; 10795 } 10796 } 10797 if (ret != 0) 10798 return ret; 10799 LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { 10800 ret = mlx5_ind_table_obj_attach(dev, ind_tbl); 10801 if (ret != 0) { 10802 DRV_LOG(ERR, "Port %u could not attach " 10803 "indirection table obj %p", 10804 dev->data->port_id, (void *)ind_tbl); 10805 goto error; 10806 } 10807 } 10808 10809 return 0; 10810 error: 10811 ind_tbl_last = ind_tbl; 10812 LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { 10813 if (ind_tbl == ind_tbl_last) 10814 break; 10815 if (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0) 10816 DRV_LOG(CRIT, "Port %u could not detach " 10817 "indirection table obj %p on rollback", 10818 dev->data->port_id, (void *)ind_tbl); 10819 } 10820 return ret; 10821 } 10822 10823 /** 10824 * Detach indirect actions of the device from its resources. 10825 * 10826 * @param dev 10827 * Pointer to Ethernet device. 10828 * 10829 * @return 10830 * 0 on success, a negative errno value otherwise and rte_errno is set. 10831 */ 10832 int 10833 mlx5_action_handle_detach(struct rte_eth_dev *dev) 10834 { 10835 struct mlx5_priv *priv = dev->data->dev_private; 10836 int ret = 0; 10837 struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last; 10838 10839 LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { 10840 ret = mlx5_ind_table_obj_detach(dev, ind_tbl); 10841 if (ret != 0) { 10842 DRV_LOG(ERR, "Port %u could not detach " 10843 "indirection table obj %p", 10844 dev->data->port_id, (void *)ind_tbl); 10845 goto error; 10846 } 10847 } 10848 return 0; 10849 error: 10850 ind_tbl_last = ind_tbl; 10851 LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { 10852 if (ind_tbl == ind_tbl_last) 10853 break; 10854 if (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0) 10855 DRV_LOG(CRIT, "Port %u could not attach " 10856 "indirection table obj %p on rollback", 10857 dev->data->port_id, (void *)ind_tbl); 10858 } 10859 return ret; 10860 } 10861 10862 #ifndef HAVE_MLX5DV_DR 10863 #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1)) 10864 #else 10865 #define MLX5_DOMAIN_SYNC_FLOW \ 10866 (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW) 10867 #endif 10868 10869 int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains) 10870 { 10871 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 10872 const struct mlx5_flow_driver_ops *fops; 10873 int ret; 10874 struct rte_flow_attr attr = { .transfer = 0 }; 10875 10876 fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 10877 ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW); 10878 if (ret > 0) 10879 ret = -ret; 10880 return ret; 10881 } 10882 10883 const struct mlx5_flow_tunnel * 10884 mlx5_get_tof(const struct rte_flow_item *item, 10885 const struct rte_flow_action *action, 10886 enum mlx5_tof_rule_type *rule_type) 10887 { 10888 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 10889 if (item->type == (typeof(item->type)) 10890 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) { 10891 *rule_type = MLX5_TUNNEL_OFFLOAD_MATCH_RULE; 10892 return flow_items_to_tunnel(item); 10893 } 10894 } 10895 for (; action->conf != RTE_FLOW_ACTION_TYPE_END; action++) { 10896 if (action->type == (typeof(action->type)) 10897 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) { 10898 *rule_type = MLX5_TUNNEL_OFFLOAD_SET_RULE; 10899 return flow_actions_to_tunnel(action); 10900 } 10901 } 10902 return NULL; 10903 } 10904 10905 /** 10906 * tunnel offload functionality is defined for DV environment only 10907 */ 10908 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 10909 __extension__ 10910 union tunnel_offload_mark { 10911 uint32_t val; 10912 struct { 10913 uint32_t app_reserve:8; 10914 uint32_t table_id:15; 10915 uint32_t transfer:1; 10916 uint32_t _unused_:8; 10917 }; 10918 }; 10919 10920 static bool 10921 mlx5_access_tunnel_offload_db 10922 (struct rte_eth_dev *dev, 10923 bool (*match)(struct rte_eth_dev *, 10924 struct mlx5_flow_tunnel *, const void *), 10925 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), 10926 void (*miss)(struct rte_eth_dev *, void *), 10927 void *ctx, bool lock_op); 10928 10929 static int 10930 flow_tunnel_add_default_miss(struct rte_eth_dev *dev, 10931 struct rte_flow *flow, 10932 const struct rte_flow_attr *attr, 10933 const struct rte_flow_action *app_actions, 10934 uint32_t flow_idx, 10935 const struct mlx5_flow_tunnel *tunnel, 10936 struct tunnel_default_miss_ctx *ctx, 10937 struct rte_flow_error *error) 10938 { 10939 struct mlx5_priv *priv = dev->data->dev_private; 10940 struct mlx5_flow *dev_flow; 10941 struct rte_flow_attr miss_attr = *attr; 10942 const struct rte_flow_item miss_items[2] = { 10943 { 10944 .type = RTE_FLOW_ITEM_TYPE_ETH, 10945 .spec = NULL, 10946 .last = NULL, 10947 .mask = NULL 10948 }, 10949 { 10950 .type = RTE_FLOW_ITEM_TYPE_END, 10951 .spec = NULL, 10952 .last = NULL, 10953 .mask = NULL 10954 } 10955 }; 10956 union tunnel_offload_mark mark_id; 10957 struct rte_flow_action_mark miss_mark; 10958 struct rte_flow_action miss_actions[3] = { 10959 [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark }, 10960 [2] = { .type = RTE_FLOW_ACTION_TYPE_END, .conf = NULL } 10961 }; 10962 const struct rte_flow_action_jump *jump_data; 10963 uint32_t i, flow_table = 0; /* prevent compilation warning */ 10964 struct flow_grp_info grp_info = { 10965 .external = 1, 10966 .transfer = attr->transfer, 10967 .fdb_def_rule = !!priv->fdb_def_rule, 10968 .std_tbl_fix = 0, 10969 }; 10970 int ret; 10971 10972 if (!attr->transfer) { 10973 uint32_t q_size; 10974 10975 miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS; 10976 q_size = priv->reta_idx_n * sizeof(ctx->queue[0]); 10977 ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size, 10978 0, SOCKET_ID_ANY); 10979 if (!ctx->queue) 10980 return rte_flow_error_set 10981 (error, ENOMEM, 10982 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 10983 NULL, "invalid default miss RSS"); 10984 ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT, 10985 ctx->action_rss.level = 0, 10986 ctx->action_rss.types = priv->rss_conf.rss_hf, 10987 ctx->action_rss.key_len = priv->rss_conf.rss_key_len, 10988 ctx->action_rss.queue_num = priv->reta_idx_n, 10989 ctx->action_rss.key = priv->rss_conf.rss_key, 10990 ctx->action_rss.queue = ctx->queue; 10991 if (!priv->reta_idx_n || !priv->rxqs_n) 10992 return rte_flow_error_set 10993 (error, EINVAL, 10994 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 10995 NULL, "invalid port configuration"); 10996 if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) 10997 ctx->action_rss.types = 0; 10998 for (i = 0; i != priv->reta_idx_n; ++i) 10999 ctx->queue[i] = (*priv->reta_idx)[i]; 11000 } else { 11001 miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP; 11002 ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP; 11003 } 11004 miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw; 11005 for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++); 11006 jump_data = app_actions->conf; 11007 miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY; 11008 miss_attr.group = jump_data->group; 11009 ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group, 11010 &flow_table, &grp_info, error); 11011 if (ret) 11012 return rte_flow_error_set(error, EINVAL, 11013 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 11014 NULL, "invalid tunnel id"); 11015 mark_id.app_reserve = 0; 11016 mark_id.table_id = tunnel_flow_tbl_to_id(flow_table); 11017 mark_id.transfer = !!attr->transfer; 11018 mark_id._unused_ = 0; 11019 miss_mark.id = mark_id.val; 11020 dev_flow = flow_drv_prepare(dev, flow, &miss_attr, 11021 miss_items, miss_actions, flow_idx, error); 11022 if (!dev_flow) 11023 return -rte_errno; 11024 dev_flow->flow = flow; 11025 dev_flow->external = true; 11026 dev_flow->tunnel = tunnel; 11027 dev_flow->tof_type = MLX5_TUNNEL_OFFLOAD_MISS_RULE; 11028 /* Subflow object was created, we must include one in the list. */ 11029 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, 11030 dev_flow->handle, next); 11031 DRV_LOG(DEBUG, 11032 "port %u tunnel type=%d id=%u miss rule priority=%u group=%u", 11033 dev->data->port_id, tunnel->app_tunnel.type, 11034 tunnel->tunnel_id, miss_attr.priority, miss_attr.group); 11035 ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items, 11036 miss_actions, error); 11037 if (!ret) 11038 ret = flow_mreg_update_copy_table(dev, flow, miss_actions, 11039 error); 11040 11041 return ret; 11042 } 11043 11044 static const struct mlx5_flow_tbl_data_entry * 11045 tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark) 11046 { 11047 struct mlx5_priv *priv = dev->data->dev_private; 11048 struct mlx5_dev_ctx_shared *sh = priv->sh; 11049 struct mlx5_list_entry *he; 11050 union tunnel_offload_mark mbits = { .val = mark }; 11051 union mlx5_flow_tbl_key table_key = { 11052 { 11053 .level = tunnel_id_to_flow_tbl(mbits.table_id), 11054 .id = 0, 11055 .reserved = 0, 11056 .dummy = 0, 11057 .is_fdb = !!mbits.transfer, 11058 .is_egress = 0, 11059 } 11060 }; 11061 struct mlx5_flow_cb_ctx ctx = { 11062 .data = &table_key.v64, 11063 }; 11064 11065 he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, &ctx); 11066 return he ? 11067 container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL; 11068 } 11069 11070 static void 11071 mlx5_flow_tunnel_grp2tbl_remove_cb(void *tool_ctx, 11072 struct mlx5_list_entry *entry) 11073 { 11074 struct mlx5_dev_ctx_shared *sh = tool_ctx; 11075 struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); 11076 11077 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], 11078 tunnel_flow_tbl_to_id(tte->flow_table)); 11079 mlx5_free(tte); 11080 } 11081 11082 static int 11083 mlx5_flow_tunnel_grp2tbl_match_cb(void *tool_ctx __rte_unused, 11084 struct mlx5_list_entry *entry, void *cb_ctx) 11085 { 11086 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 11087 union tunnel_tbl_key tbl = { 11088 .val = *(uint64_t *)(ctx->data), 11089 }; 11090 struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); 11091 11092 return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group; 11093 } 11094 11095 static struct mlx5_list_entry * 11096 mlx5_flow_tunnel_grp2tbl_create_cb(void *tool_ctx, void *cb_ctx) 11097 { 11098 struct mlx5_dev_ctx_shared *sh = tool_ctx; 11099 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 11100 struct tunnel_tbl_entry *tte; 11101 union tunnel_tbl_key tbl = { 11102 .val = *(uint64_t *)(ctx->data), 11103 }; 11104 11105 tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, 11106 sizeof(*tte), 0, 11107 SOCKET_ID_ANY); 11108 if (!tte) 11109 goto err; 11110 mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], 11111 &tte->flow_table); 11112 if (tte->flow_table >= MLX5_MAX_TABLES) { 11113 DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.", 11114 tte->flow_table); 11115 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], 11116 tte->flow_table); 11117 goto err; 11118 } else if (!tte->flow_table) { 11119 goto err; 11120 } 11121 tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table); 11122 tte->tunnel_id = tbl.tunnel_id; 11123 tte->group = tbl.group; 11124 return &tte->hash; 11125 err: 11126 if (tte) 11127 mlx5_free(tte); 11128 return NULL; 11129 } 11130 11131 static struct mlx5_list_entry * 11132 mlx5_flow_tunnel_grp2tbl_clone_cb(void *tool_ctx __rte_unused, 11133 struct mlx5_list_entry *oentry, 11134 void *cb_ctx __rte_unused) 11135 { 11136 struct tunnel_tbl_entry *tte = mlx5_malloc(MLX5_MEM_SYS, sizeof(*tte), 11137 0, SOCKET_ID_ANY); 11138 11139 if (!tte) 11140 return NULL; 11141 memcpy(tte, oentry, sizeof(*tte)); 11142 return &tte->hash; 11143 } 11144 11145 static void 11146 mlx5_flow_tunnel_grp2tbl_clone_free_cb(void *tool_ctx __rte_unused, 11147 struct mlx5_list_entry *entry) 11148 { 11149 struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); 11150 11151 mlx5_free(tte); 11152 } 11153 11154 static uint32_t 11155 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, 11156 const struct mlx5_flow_tunnel *tunnel, 11157 uint32_t group, uint32_t *table, 11158 struct rte_flow_error *error) 11159 { 11160 struct mlx5_list_entry *he; 11161 struct tunnel_tbl_entry *tte; 11162 union tunnel_tbl_key key = { 11163 .tunnel_id = tunnel ? tunnel->tunnel_id : 0, 11164 .group = group 11165 }; 11166 struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); 11167 struct mlx5_hlist *group_hash; 11168 struct mlx5_flow_cb_ctx ctx = { 11169 .data = &key.val, 11170 }; 11171 11172 group_hash = tunnel ? tunnel->groups : thub->groups; 11173 he = mlx5_hlist_register(group_hash, key.val, &ctx); 11174 if (!he) 11175 return rte_flow_error_set(error, EINVAL, 11176 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 11177 NULL, 11178 "tunnel group index not supported"); 11179 tte = container_of(he, typeof(*tte), hash); 11180 *table = tte->flow_table; 11181 DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x", 11182 dev->data->port_id, key.tunnel_id, group, *table); 11183 return 0; 11184 } 11185 11186 static void 11187 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, 11188 struct mlx5_flow_tunnel *tunnel) 11189 { 11190 struct mlx5_priv *priv = dev->data->dev_private; 11191 struct mlx5_indexed_pool *ipool; 11192 11193 DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x", 11194 dev->data->port_id, tunnel->tunnel_id); 11195 LIST_REMOVE(tunnel, chain); 11196 mlx5_hlist_destroy(tunnel->groups); 11197 ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID]; 11198 mlx5_ipool_free(ipool, tunnel->tunnel_id); 11199 } 11200 11201 static bool 11202 mlx5_access_tunnel_offload_db 11203 (struct rte_eth_dev *dev, 11204 bool (*match)(struct rte_eth_dev *, 11205 struct mlx5_flow_tunnel *, const void *), 11206 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), 11207 void (*miss)(struct rte_eth_dev *, void *), 11208 void *ctx, bool lock_op) 11209 { 11210 bool verdict = false; 11211 struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); 11212 struct mlx5_flow_tunnel *tunnel; 11213 11214 rte_spinlock_lock(&thub->sl); 11215 LIST_FOREACH(tunnel, &thub->tunnels, chain) { 11216 verdict = match(dev, tunnel, (const void *)ctx); 11217 if (verdict) 11218 break; 11219 } 11220 if (!lock_op) 11221 rte_spinlock_unlock(&thub->sl); 11222 if (verdict && hit) 11223 hit(dev, tunnel, ctx); 11224 if (!verdict && miss) 11225 miss(dev, ctx); 11226 if (lock_op) 11227 rte_spinlock_unlock(&thub->sl); 11228 11229 return verdict; 11230 } 11231 11232 struct tunnel_db_find_tunnel_id_ctx { 11233 uint32_t tunnel_id; 11234 struct mlx5_flow_tunnel *tunnel; 11235 }; 11236 11237 static bool 11238 find_tunnel_id_match(struct rte_eth_dev *dev, 11239 struct mlx5_flow_tunnel *tunnel, const void *x) 11240 { 11241 const struct tunnel_db_find_tunnel_id_ctx *ctx = x; 11242 11243 RTE_SET_USED(dev); 11244 return tunnel->tunnel_id == ctx->tunnel_id; 11245 } 11246 11247 static void 11248 find_tunnel_id_hit(struct rte_eth_dev *dev, 11249 struct mlx5_flow_tunnel *tunnel, void *x) 11250 { 11251 struct tunnel_db_find_tunnel_id_ctx *ctx = x; 11252 RTE_SET_USED(dev); 11253 ctx->tunnel = tunnel; 11254 } 11255 11256 static struct mlx5_flow_tunnel * 11257 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id) 11258 { 11259 struct tunnel_db_find_tunnel_id_ctx ctx = { 11260 .tunnel_id = id, 11261 }; 11262 11263 mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match, 11264 find_tunnel_id_hit, NULL, &ctx, true); 11265 11266 return ctx.tunnel; 11267 } 11268 11269 static struct mlx5_flow_tunnel * 11270 mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, 11271 const struct rte_flow_tunnel *app_tunnel) 11272 { 11273 struct mlx5_priv *priv = dev->data->dev_private; 11274 struct mlx5_indexed_pool *ipool; 11275 struct mlx5_flow_tunnel *tunnel; 11276 uint32_t id; 11277 11278 ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID]; 11279 tunnel = mlx5_ipool_zmalloc(ipool, &id); 11280 if (!tunnel) 11281 return NULL; 11282 if (id >= MLX5_MAX_TUNNELS) { 11283 mlx5_ipool_free(ipool, id); 11284 DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id); 11285 return NULL; 11286 } 11287 tunnel->groups = mlx5_hlist_create("tunnel groups", 64, false, true, 11288 priv->sh, 11289 mlx5_flow_tunnel_grp2tbl_create_cb, 11290 mlx5_flow_tunnel_grp2tbl_match_cb, 11291 mlx5_flow_tunnel_grp2tbl_remove_cb, 11292 mlx5_flow_tunnel_grp2tbl_clone_cb, 11293 mlx5_flow_tunnel_grp2tbl_clone_free_cb); 11294 if (!tunnel->groups) { 11295 mlx5_ipool_free(ipool, id); 11296 return NULL; 11297 } 11298 /* initiate new PMD tunnel */ 11299 memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel)); 11300 tunnel->tunnel_id = id; 11301 tunnel->action.type = (typeof(tunnel->action.type)) 11302 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET; 11303 tunnel->action.conf = tunnel; 11304 tunnel->item.type = (typeof(tunnel->item.type)) 11305 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL; 11306 tunnel->item.spec = tunnel; 11307 tunnel->item.last = NULL; 11308 tunnel->item.mask = NULL; 11309 11310 DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x", 11311 dev->data->port_id, tunnel->tunnel_id); 11312 11313 return tunnel; 11314 } 11315 11316 struct tunnel_db_get_tunnel_ctx { 11317 const struct rte_flow_tunnel *app_tunnel; 11318 struct mlx5_flow_tunnel *tunnel; 11319 }; 11320 11321 static bool get_tunnel_match(struct rte_eth_dev *dev, 11322 struct mlx5_flow_tunnel *tunnel, const void *x) 11323 { 11324 const struct tunnel_db_get_tunnel_ctx *ctx = x; 11325 11326 RTE_SET_USED(dev); 11327 return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel, 11328 sizeof(*ctx->app_tunnel)); 11329 } 11330 11331 static void get_tunnel_hit(struct rte_eth_dev *dev, 11332 struct mlx5_flow_tunnel *tunnel, void *x) 11333 { 11334 /* called under tunnel spinlock protection */ 11335 struct tunnel_db_get_tunnel_ctx *ctx = x; 11336 11337 RTE_SET_USED(dev); 11338 tunnel->refctn++; 11339 ctx->tunnel = tunnel; 11340 } 11341 11342 static void get_tunnel_miss(struct rte_eth_dev *dev, void *x) 11343 { 11344 /* called under tunnel spinlock protection */ 11345 struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); 11346 struct tunnel_db_get_tunnel_ctx *ctx = x; 11347 11348 rte_spinlock_unlock(&thub->sl); 11349 ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel); 11350 rte_spinlock_lock(&thub->sl); 11351 if (ctx->tunnel) { 11352 ctx->tunnel->refctn = 1; 11353 LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain); 11354 } 11355 } 11356 11357 11358 static int 11359 mlx5_get_flow_tunnel(struct rte_eth_dev *dev, 11360 const struct rte_flow_tunnel *app_tunnel, 11361 struct mlx5_flow_tunnel **tunnel) 11362 { 11363 struct tunnel_db_get_tunnel_ctx ctx = { 11364 .app_tunnel = app_tunnel, 11365 }; 11366 11367 mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit, 11368 get_tunnel_miss, &ctx, true); 11369 *tunnel = ctx.tunnel; 11370 return ctx.tunnel ? 0 : -ENOMEM; 11371 } 11372 11373 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id) 11374 { 11375 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub; 11376 11377 if (!thub) 11378 return; 11379 if (!LIST_EMPTY(&thub->tunnels)) 11380 DRV_LOG(WARNING, "port %u tunnels present", port_id); 11381 mlx5_hlist_destroy(thub->groups); 11382 mlx5_free(thub); 11383 } 11384 11385 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) 11386 { 11387 int err; 11388 struct mlx5_flow_tunnel_hub *thub; 11389 11390 thub = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*thub), 11391 0, SOCKET_ID_ANY); 11392 if (!thub) 11393 return -ENOMEM; 11394 LIST_INIT(&thub->tunnels); 11395 rte_spinlock_init(&thub->sl); 11396 thub->groups = mlx5_hlist_create("flow groups", 64, 11397 false, true, sh, 11398 mlx5_flow_tunnel_grp2tbl_create_cb, 11399 mlx5_flow_tunnel_grp2tbl_match_cb, 11400 mlx5_flow_tunnel_grp2tbl_remove_cb, 11401 mlx5_flow_tunnel_grp2tbl_clone_cb, 11402 mlx5_flow_tunnel_grp2tbl_clone_free_cb); 11403 if (!thub->groups) { 11404 err = -rte_errno; 11405 goto err; 11406 } 11407 sh->tunnel_hub = thub; 11408 11409 return 0; 11410 11411 err: 11412 if (thub->groups) 11413 mlx5_hlist_destroy(thub->groups); 11414 if (thub) 11415 mlx5_free(thub); 11416 return err; 11417 } 11418 11419 static inline int 11420 mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, 11421 struct rte_flow_tunnel *tunnel, 11422 struct rte_flow_error *error) 11423 { 11424 struct mlx5_priv *priv = dev->data->dev_private; 11425 11426 if (!priv->sh->config.dv_flow_en) 11427 return rte_flow_error_set(error, ENOTSUP, 11428 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 11429 "flow DV interface is off"); 11430 if (!is_tunnel_offload_active(dev)) 11431 return rte_flow_error_set(error, ENOTSUP, 11432 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 11433 "tunnel offload was not activated, consider setting dv_xmeta_en=3"); 11434 if (!tunnel) 11435 return rte_flow_error_set(error, EINVAL, 11436 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 11437 "no application tunnel"); 11438 switch (tunnel->type) { 11439 default: 11440 return rte_flow_error_set(error, EINVAL, 11441 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 11442 "unsupported tunnel type"); 11443 case RTE_FLOW_ITEM_TYPE_VXLAN: 11444 case RTE_FLOW_ITEM_TYPE_GRE: 11445 case RTE_FLOW_ITEM_TYPE_NVGRE: 11446 case RTE_FLOW_ITEM_TYPE_GENEVE: 11447 break; 11448 } 11449 return 0; 11450 } 11451 11452 static int 11453 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev, 11454 struct rte_flow_tunnel *app_tunnel, 11455 struct rte_flow_action **actions, 11456 uint32_t *num_of_actions, 11457 struct rte_flow_error *error) 11458 { 11459 struct mlx5_flow_tunnel *tunnel; 11460 int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error); 11461 11462 if (ret) 11463 return ret; 11464 ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); 11465 if (ret < 0) { 11466 return rte_flow_error_set(error, ret, 11467 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 11468 "failed to initialize pmd tunnel"); 11469 } 11470 *actions = &tunnel->action; 11471 *num_of_actions = 1; 11472 return 0; 11473 } 11474 11475 static int 11476 mlx5_flow_tunnel_match(struct rte_eth_dev *dev, 11477 struct rte_flow_tunnel *app_tunnel, 11478 struct rte_flow_item **items, 11479 uint32_t *num_of_items, 11480 struct rte_flow_error *error) 11481 { 11482 struct mlx5_flow_tunnel *tunnel; 11483 int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error); 11484 11485 if (ret) 11486 return ret; 11487 ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); 11488 if (ret < 0) { 11489 return rte_flow_error_set(error, ret, 11490 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 11491 "failed to initialize pmd tunnel"); 11492 } 11493 *items = &tunnel->item; 11494 *num_of_items = 1; 11495 return 0; 11496 } 11497 11498 struct tunnel_db_element_release_ctx { 11499 struct rte_flow_item *items; 11500 struct rte_flow_action *actions; 11501 uint32_t num_elements; 11502 struct rte_flow_error *error; 11503 int ret; 11504 }; 11505 11506 static bool 11507 tunnel_element_release_match(struct rte_eth_dev *dev, 11508 struct mlx5_flow_tunnel *tunnel, const void *x) 11509 { 11510 const struct tunnel_db_element_release_ctx *ctx = x; 11511 11512 RTE_SET_USED(dev); 11513 if (ctx->num_elements != 1) 11514 return false; 11515 else if (ctx->items) 11516 return ctx->items == &tunnel->item; 11517 else if (ctx->actions) 11518 return ctx->actions == &tunnel->action; 11519 11520 return false; 11521 } 11522 11523 static void 11524 tunnel_element_release_hit(struct rte_eth_dev *dev, 11525 struct mlx5_flow_tunnel *tunnel, void *x) 11526 { 11527 struct tunnel_db_element_release_ctx *ctx = x; 11528 ctx->ret = 0; 11529 if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed) - 1)) 11530 mlx5_flow_tunnel_free(dev, tunnel); 11531 } 11532 11533 static void 11534 tunnel_element_release_miss(struct rte_eth_dev *dev, void *x) 11535 { 11536 struct tunnel_db_element_release_ctx *ctx = x; 11537 RTE_SET_USED(dev); 11538 ctx->ret = rte_flow_error_set(ctx->error, EINVAL, 11539 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 11540 "invalid argument"); 11541 } 11542 11543 static int 11544 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev, 11545 struct rte_flow_item *pmd_items, 11546 uint32_t num_items, struct rte_flow_error *err) 11547 { 11548 struct tunnel_db_element_release_ctx ctx = { 11549 .items = pmd_items, 11550 .actions = NULL, 11551 .num_elements = num_items, 11552 .error = err, 11553 }; 11554 11555 mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, 11556 tunnel_element_release_hit, 11557 tunnel_element_release_miss, &ctx, false); 11558 11559 return ctx.ret; 11560 } 11561 11562 static int 11563 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev, 11564 struct rte_flow_action *pmd_actions, 11565 uint32_t num_actions, struct rte_flow_error *err) 11566 { 11567 struct tunnel_db_element_release_ctx ctx = { 11568 .items = NULL, 11569 .actions = pmd_actions, 11570 .num_elements = num_actions, 11571 .error = err, 11572 }; 11573 11574 mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, 11575 tunnel_element_release_hit, 11576 tunnel_element_release_miss, &ctx, false); 11577 11578 return ctx.ret; 11579 } 11580 11581 static int 11582 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, 11583 struct rte_mbuf *m, 11584 struct rte_flow_restore_info *info, 11585 struct rte_flow_error *err) 11586 { 11587 uint64_t ol_flags = m->ol_flags; 11588 const struct mlx5_flow_tbl_data_entry *tble; 11589 const uint64_t mask = RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 11590 struct mlx5_priv *priv = dev->data->dev_private; 11591 11592 if (priv->tunnel_enabled == 0) 11593 goto err; 11594 if ((ol_flags & mask) != mask) 11595 goto err; 11596 tble = tunnel_mark_decode(dev, m->hash.fdir.hi); 11597 if (!tble) { 11598 DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x", 11599 dev->data->port_id, m->hash.fdir.hi); 11600 goto err; 11601 } 11602 MLX5_ASSERT(tble->tunnel); 11603 memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel)); 11604 info->group_id = tble->group_id; 11605 info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL | 11606 RTE_FLOW_RESTORE_INFO_GROUP_ID | 11607 RTE_FLOW_RESTORE_INFO_ENCAPSULATED; 11608 11609 return 0; 11610 11611 err: 11612 return rte_flow_error_set(err, EINVAL, 11613 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 11614 "failed to get restore info"); 11615 } 11616 11617 #else /* HAVE_IBV_FLOW_DV_SUPPORT */ 11618 static int 11619 mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev, 11620 __rte_unused struct rte_flow_tunnel *app_tunnel, 11621 __rte_unused struct rte_flow_action **actions, 11622 __rte_unused uint32_t *num_of_actions, 11623 __rte_unused struct rte_flow_error *error) 11624 { 11625 return -ENOTSUP; 11626 } 11627 11628 static int 11629 mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev, 11630 __rte_unused struct rte_flow_tunnel *app_tunnel, 11631 __rte_unused struct rte_flow_item **items, 11632 __rte_unused uint32_t *num_of_items, 11633 __rte_unused struct rte_flow_error *error) 11634 { 11635 return -ENOTSUP; 11636 } 11637 11638 static int 11639 mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev, 11640 __rte_unused struct rte_flow_item *pmd_items, 11641 __rte_unused uint32_t num_items, 11642 __rte_unused struct rte_flow_error *err) 11643 { 11644 return -ENOTSUP; 11645 } 11646 11647 static int 11648 mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev, 11649 __rte_unused struct rte_flow_action *pmd_action, 11650 __rte_unused uint32_t num_actions, 11651 __rte_unused struct rte_flow_error *err) 11652 { 11653 return -ENOTSUP; 11654 } 11655 11656 static int 11657 mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev, 11658 __rte_unused struct rte_mbuf *m, 11659 __rte_unused struct rte_flow_restore_info *i, 11660 __rte_unused struct rte_flow_error *err) 11661 { 11662 return -ENOTSUP; 11663 } 11664 11665 static int 11666 flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev, 11667 __rte_unused struct rte_flow *flow, 11668 __rte_unused const struct rte_flow_attr *attr, 11669 __rte_unused const struct rte_flow_action *actions, 11670 __rte_unused uint32_t flow_idx, 11671 __rte_unused const struct mlx5_flow_tunnel *tunnel, 11672 __rte_unused struct tunnel_default_miss_ctx *ctx, 11673 __rte_unused struct rte_flow_error *error) 11674 { 11675 return -ENOTSUP; 11676 } 11677 11678 static struct mlx5_flow_tunnel * 11679 mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev, 11680 __rte_unused uint32_t id) 11681 { 11682 return NULL; 11683 } 11684 11685 static void 11686 mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev, 11687 __rte_unused struct mlx5_flow_tunnel *tunnel) 11688 { 11689 } 11690 11691 static uint32_t 11692 tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev, 11693 __rte_unused const struct mlx5_flow_tunnel *t, 11694 __rte_unused uint32_t group, 11695 __rte_unused uint32_t *table, 11696 struct rte_flow_error *error) 11697 { 11698 return rte_flow_error_set(error, ENOTSUP, 11699 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 11700 "tunnel offload requires DV support"); 11701 } 11702 11703 void 11704 mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh, 11705 __rte_unused uint16_t port_id) 11706 { 11707 } 11708 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 11709 11710 /* Flex flow item API */ 11711 static struct rte_flow_item_flex_handle * 11712 mlx5_flow_flex_item_create(struct rte_eth_dev *dev, 11713 const struct rte_flow_item_flex_conf *conf, 11714 struct rte_flow_error *error) 11715 { 11716 static const char err_msg[] = "flex item creation unsupported"; 11717 struct mlx5_priv *priv = dev->data->dev_private; 11718 struct rte_flow_attr attr = { .transfer = 0 }; 11719 const struct mlx5_flow_driver_ops *fops = 11720 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 11721 11722 if (!priv->pci_dev) { 11723 rte_flow_error_set(error, ENOTSUP, 11724 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 11725 "create flex item on PF only"); 11726 return NULL; 11727 } 11728 switch (priv->pci_dev->id.device_id) { 11729 case PCI_DEVICE_ID_MELLANOX_BLUEFIELD2: 11730 case PCI_DEVICE_ID_MELLANOX_BLUEFIELD3: 11731 break; 11732 default: 11733 rte_flow_error_set(error, ENOTSUP, 11734 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 11735 "flex item available on BlueField ports only"); 11736 return NULL; 11737 } 11738 if (!fops->item_create) { 11739 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 11740 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 11741 NULL, err_msg); 11742 return NULL; 11743 } 11744 return fops->item_create(dev, conf, error); 11745 } 11746 11747 static int 11748 mlx5_flow_flex_item_release(struct rte_eth_dev *dev, 11749 const struct rte_flow_item_flex_handle *handle, 11750 struct rte_flow_error *error) 11751 { 11752 static const char err_msg[] = "flex item release unsupported"; 11753 struct rte_flow_attr attr = { .transfer = 0 }; 11754 const struct mlx5_flow_driver_ops *fops = 11755 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 11756 11757 if (!fops->item_release) { 11758 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 11759 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 11760 NULL, err_msg); 11761 return -rte_errno; 11762 } 11763 return fops->item_release(dev, handle, error); 11764 } 11765 11766 static void 11767 mlx5_dbg__print_pattern(const struct rte_flow_item *item) 11768 { 11769 int ret; 11770 struct rte_flow_error error; 11771 11772 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 11773 char *item_name; 11774 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, &item_name, 11775 sizeof(item_name), 11776 (void *)(uintptr_t)item->type, &error); 11777 if (ret > 0) 11778 printf("%s ", item_name); 11779 else 11780 printf("%d\n", (int)item->type); 11781 } 11782 printf("END\n"); 11783 } 11784 11785 static int 11786 mlx5_flow_is_std_vxlan_port(const struct rte_flow_item *udp_item) 11787 { 11788 const struct rte_flow_item_udp *spec = udp_item->spec; 11789 const struct rte_flow_item_udp *mask = udp_item->mask; 11790 uint16_t udp_dport = 0; 11791 11792 if (spec != NULL) { 11793 if (!mask) 11794 mask = &rte_flow_item_udp_mask; 11795 udp_dport = rte_be_to_cpu_16(spec->hdr.dst_port & 11796 mask->hdr.dst_port); 11797 } 11798 return (!udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN); 11799 } 11800 11801 static const struct mlx5_flow_expand_node * 11802 mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern, 11803 unsigned int item_idx, 11804 const struct mlx5_flow_expand_node graph[], 11805 const struct mlx5_flow_expand_node *node) 11806 { 11807 const struct rte_flow_item *item = pattern + item_idx, *prev_item; 11808 11809 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN && 11810 node != NULL && 11811 node->type == RTE_FLOW_ITEM_TYPE_VXLAN) { 11812 /* 11813 * The expansion node is VXLAN and it is also the last 11814 * expandable item in the pattern, so need to continue 11815 * expansion of the inner tunnel. 11816 */ 11817 MLX5_ASSERT(item_idx > 0); 11818 prev_item = pattern + item_idx - 1; 11819 MLX5_ASSERT(prev_item->type == RTE_FLOW_ITEM_TYPE_UDP); 11820 if (mlx5_flow_is_std_vxlan_port(prev_item)) 11821 return &graph[MLX5_EXPANSION_STD_VXLAN]; 11822 return &graph[MLX5_EXPANSION_L3_VXLAN]; 11823 } 11824 return node; 11825 } 11826 11827 /* Map of Verbs to Flow priority with 8 Verbs priorities. */ 11828 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { 11829 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, 11830 }; 11831 11832 /* Map of Verbs to Flow priority with 16 Verbs priorities. */ 11833 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { 11834 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, 11835 { 9, 10, 11 }, { 12, 13, 14 }, 11836 }; 11837 11838 /** 11839 * Discover the number of available flow priorities. 11840 * 11841 * @param dev 11842 * Ethernet device. 11843 * 11844 * @return 11845 * On success, number of available flow priorities. 11846 * On failure, a negative errno-style code and rte_errno is set. 11847 */ 11848 int 11849 mlx5_flow_discover_priorities(struct rte_eth_dev *dev) 11850 { 11851 static const uint16_t vprio[] = {8, 16}; 11852 const struct mlx5_priv *priv = dev->data->dev_private; 11853 const struct mlx5_flow_driver_ops *fops; 11854 enum mlx5_flow_drv_type type; 11855 int ret; 11856 11857 type = mlx5_flow_os_get_type(); 11858 if (type == MLX5_FLOW_TYPE_MAX) { 11859 type = MLX5_FLOW_TYPE_VERBS; 11860 if (priv->sh->cdev->config.devx && priv->sh->config.dv_flow_en) 11861 type = MLX5_FLOW_TYPE_DV; 11862 } 11863 fops = flow_get_drv_ops(type); 11864 if (fops->discover_priorities == NULL) { 11865 DRV_LOG(ERR, "Priority discovery not supported"); 11866 rte_errno = ENOTSUP; 11867 return -rte_errno; 11868 } 11869 ret = fops->discover_priorities(dev, vprio, RTE_DIM(vprio)); 11870 if (ret < 0) 11871 return ret; 11872 switch (ret) { 11873 case 8: 11874 ret = RTE_DIM(priority_map_3); 11875 break; 11876 case 16: 11877 ret = RTE_DIM(priority_map_5); 11878 break; 11879 default: 11880 rte_errno = ENOTSUP; 11881 DRV_LOG(ERR, 11882 "port %u maximum priority: %d expected 8/16", 11883 dev->data->port_id, ret); 11884 return -rte_errno; 11885 } 11886 DRV_LOG(INFO, "port %u supported flow priorities:" 11887 " 0-%d for ingress or egress root table," 11888 " 0-%d for non-root table or transfer root table.", 11889 dev->data->port_id, ret - 2, 11890 MLX5_NON_ROOT_FLOW_MAX_PRIO - 1); 11891 return ret; 11892 } 11893 11894 /** 11895 * Adjust flow priority based on the highest layer and the request priority. 11896 * 11897 * @param[in] dev 11898 * Pointer to the Ethernet device structure. 11899 * @param[in] priority 11900 * The rule base priority. 11901 * @param[in] subpriority 11902 * The priority based on the items. 11903 * 11904 * @return 11905 * The new priority. 11906 */ 11907 uint32_t 11908 mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 11909 uint32_t subpriority) 11910 { 11911 uint32_t res = 0; 11912 struct mlx5_priv *priv = dev->data->dev_private; 11913 11914 switch (priv->sh->flow_max_priority) { 11915 case RTE_DIM(priority_map_3): 11916 res = priority_map_3[priority][subpriority]; 11917 break; 11918 case RTE_DIM(priority_map_5): 11919 res = priority_map_5[priority][subpriority]; 11920 break; 11921 } 11922 return res; 11923 } 11924 11925 /** 11926 * Get the priority for sending traffic to kernel table. 11927 * 11928 * @param[in] dev 11929 * Pointer to the Ethernet device structure. 11930 * 11931 * @return 11932 * On success: the value of priority for sending traffic to kernel table 11933 * On failure: -1 11934 */ 11935 uint32_t 11936 mlx5_get_send_to_kernel_priority(struct rte_eth_dev *dev) 11937 { 11938 struct mlx5_priv *priv = dev->data->dev_private; 11939 uint32_t res; 11940 11941 switch (priv->sh->flow_max_priority) { 11942 case RTE_DIM(priority_map_5): 11943 res = 15; 11944 break; 11945 case RTE_DIM(priority_map_3): 11946 res = 7; 11947 break; 11948 default: 11949 DRV_LOG(ERR, 11950 "port %u maximum priority: %d expected 8/16", 11951 dev->data->port_id, priv->sh->flow_max_priority); 11952 res = (uint32_t)-1; 11953 } 11954 return res; 11955 } 11956 11957 /** 11958 * Get the E-Switch Manager vport id. 11959 * 11960 * @param[in] dev 11961 * Pointer to the Ethernet device structure. 11962 * 11963 * @return 11964 * The vport id. 11965 */ 11966 int16_t mlx5_flow_get_esw_manager_vport_id(struct rte_eth_dev *dev) 11967 { 11968 struct mlx5_priv *priv = dev->data->dev_private; 11969 struct mlx5_common_device *cdev = priv->sh->cdev; 11970 11971 /* New FW exposes E-Switch Manager vport ID, can use it directly. */ 11972 if (cdev->config.hca_attr.esw_mgr_vport_id_valid) 11973 return (int16_t)cdev->config.hca_attr.esw_mgr_vport_id; 11974 11975 if (priv->pci_dev == NULL) 11976 return 0; 11977 switch (priv->pci_dev->id.device_id) { 11978 case PCI_DEVICE_ID_MELLANOX_BLUEFIELD: 11979 case PCI_DEVICE_ID_MELLANOX_BLUEFIELD2: 11980 case PCI_DEVICE_ID_MELLANOX_BLUEFIELD3: 11981 /* 11982 * In old FW which doesn't expose the E-Switch Manager vport ID in the capability, 11983 * only the BF embedded CPUs control the E-Switch Manager port. Hence, 11984 * ECPF vport ID is selected and not the host port (0) in any BF case. 11985 */ 11986 return (int16_t)MLX5_ECPF_VPORT_ID; 11987 default: 11988 return MLX5_PF_VPORT_ID; 11989 } 11990 } 11991 11992 /** 11993 * Parse item to get the vport id. 11994 * 11995 * @param[in] dev 11996 * Pointer to the Ethernet device structure. 11997 * @param[in] item 11998 * The src port id match item. 11999 * @param[out] vport_id 12000 * Pointer to put the vport id. 12001 * @param[out] all_ports 12002 * Indicate if the item matches all ports. 12003 * @param[out] error 12004 * Pointer to error structure. 12005 * 12006 * @return 12007 * 0 on success, a negative errno value otherwise and rte_errno is set. 12008 */ 12009 int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev, 12010 const struct rte_flow_item *item, 12011 uint16_t *vport_id, 12012 bool *all_ports, 12013 struct rte_flow_error *error) 12014 { 12015 struct mlx5_priv *port_priv; 12016 const struct rte_flow_item_port_id *pid_v = NULL; 12017 const struct rte_flow_item_ethdev *dev_v = NULL; 12018 uint32_t esw_mgr_port; 12019 uint32_t src_port; 12020 12021 if (all_ports) 12022 *all_ports = false; 12023 switch (item->type) { 12024 case RTE_FLOW_ITEM_TYPE_PORT_ID: 12025 pid_v = item->spec; 12026 if (!pid_v) 12027 return 0; 12028 src_port = pid_v->id; 12029 esw_mgr_port = MLX5_PORT_ESW_MGR; 12030 break; 12031 case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: 12032 dev_v = item->spec; 12033 if (!dev_v) { 12034 if (all_ports) 12035 *all_ports = true; 12036 return 0; 12037 } 12038 src_port = dev_v->port_id; 12039 esw_mgr_port = MLX5_REPRESENTED_PORT_ESW_MGR; 12040 break; 12041 case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: 12042 src_port = MLX5_REPRESENTED_PORT_ESW_MGR; 12043 esw_mgr_port = MLX5_REPRESENTED_PORT_ESW_MGR; 12044 break; 12045 default: 12046 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 12047 NULL, "Incorrect item type."); 12048 } 12049 if (src_port == esw_mgr_port) { 12050 *vport_id = mlx5_flow_get_esw_manager_vport_id(dev); 12051 } else { 12052 port_priv = mlx5_port_to_eswitch_info(src_port, false); 12053 if (!port_priv) 12054 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 12055 NULL, "Failed to get port info."); 12056 *vport_id = port_priv->representor_id; 12057 } 12058 12059 return 0; 12060 } 12061 12062 int 12063 mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev, 12064 uint16_t *proxy_port_id, 12065 struct rte_flow_error *error) 12066 { 12067 const struct mlx5_priv *priv = dev->data->dev_private; 12068 uint16_t port_id; 12069 12070 if (!priv->sh->config.dv_esw_en) 12071 return rte_flow_error_set(error, EINVAL, 12072 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 12073 NULL, 12074 "unable to provide a proxy port" 12075 " without E-Switch configured"); 12076 if (!priv->master && !priv->representor) 12077 return rte_flow_error_set(error, EINVAL, 12078 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 12079 NULL, 12080 "unable to provide a proxy port" 12081 " for port which is not a master" 12082 " or a representor port"); 12083 if (priv->master) { 12084 *proxy_port_id = dev->data->port_id; 12085 return 0; 12086 } 12087 MLX5_ETH_FOREACH_DEV(port_id, dev->device) { 12088 const struct rte_eth_dev *port_dev = &rte_eth_devices[port_id]; 12089 const struct mlx5_priv *port_priv = port_dev->data->dev_private; 12090 12091 if (port_priv->master && 12092 port_priv->domain_id == priv->domain_id) { 12093 *proxy_port_id = port_id; 12094 return 0; 12095 } 12096 } 12097 return rte_flow_error_set(error, ENODEV, 12098 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 12099 NULL, "unable to find a proxy port"); 12100 } 12101 12102 /** 12103 * Discover IPv6 traffic class ID support in rdma-core and firmware. 12104 * 12105 * @param dev 12106 * Ethernet device. 12107 * 12108 * @return 12109 * 0, rdma-core is good to work with firmware. 12110 * -EOPNOTSUPP, rdma-core could not work with new IPv6 TC ID. 12111 */ 12112 int 12113 mlx5_flow_discover_ipv6_tc_support(struct rte_eth_dev *dev) 12114 { 12115 struct rte_flow_action_set_dscp set_dscp; 12116 struct rte_flow_attr attr; 12117 struct rte_flow_action actions[2]; 12118 struct rte_flow_item items[3]; 12119 struct rte_flow_error error; 12120 uint32_t flow_idx; 12121 12122 memset(&attr, 0, sizeof(attr)); 12123 memset(actions, 0, sizeof(actions)); 12124 memset(items, 0, sizeof(items)); 12125 attr.group = 1; 12126 attr.egress = 1; 12127 items[0].type = RTE_FLOW_ITEM_TYPE_ETH; 12128 items[1].type = RTE_FLOW_ITEM_TYPE_IPV6; 12129 items[2].type = RTE_FLOW_ITEM_TYPE_END; 12130 /* Random value */ 12131 set_dscp.dscp = 9; 12132 actions[0].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP; 12133 actions[0].conf = &set_dscp; 12134 actions[1].type = RTE_FLOW_ACTION_TYPE_END; 12135 12136 flow_idx = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr, items, 12137 actions, true, &error); 12138 if (!flow_idx) 12139 return -EOPNOTSUPP; 12140 12141 mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx); 12142 return 0; 12143 } 12144 12145 void * 12146 rte_pmd_mlx5_create_geneve_tlv_parser(uint16_t port_id, 12147 const struct rte_pmd_mlx5_geneve_tlv tlv_list[], 12148 uint8_t nb_options) 12149 { 12150 #ifdef HAVE_MLX5_HWS_SUPPORT 12151 return mlx5_geneve_tlv_parser_create(port_id, tlv_list, nb_options); 12152 #else 12153 (void)port_id; 12154 (void)tlv_list; 12155 (void)nb_options; 12156 DRV_LOG(ERR, "%s is not supported.", __func__); 12157 rte_errno = ENOTSUP; 12158 return NULL; 12159 #endif 12160 } 12161 12162 int 12163 rte_pmd_mlx5_destroy_geneve_tlv_parser(void *handle) 12164 { 12165 #ifdef HAVE_MLX5_HWS_SUPPORT 12166 return mlx5_geneve_tlv_parser_destroy(handle); 12167 #else 12168 (void)handle; 12169 DRV_LOG(ERR, "%s is not supported.", __func__); 12170 rte_errno = ENOTSUP; 12171 return -rte_errno; 12172 #endif 12173 } 12174