1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdalign.h> 7 #include <stdint.h> 8 #include <string.h> 9 #include <stdbool.h> 10 #include <sys/queue.h> 11 12 #include <rte_common.h> 13 #include <rte_ether.h> 14 #include <ethdev_driver.h> 15 #include <rte_eal_paging.h> 16 #include <rte_flow.h> 17 #include <rte_cycles.h> 18 #include <rte_flow_driver.h> 19 #include <rte_malloc.h> 20 #include <rte_ip.h> 21 22 #include <mlx5_glue.h> 23 #include <mlx5_devx_cmds.h> 24 #include <mlx5_prm.h> 25 #include <mlx5_malloc.h> 26 27 #include "mlx5_defs.h" 28 #include "mlx5.h" 29 #include "mlx5_flow.h" 30 #include "mlx5_flow_os.h" 31 #include "mlx5_rx.h" 32 #include "mlx5_tx.h" 33 #include "mlx5_common_os.h" 34 #include "rte_pmd_mlx5.h" 35 36 /* 37 * Shared array for quick translation between port_id and vport mask/values 38 * used for HWS rules. 39 */ 40 struct flow_hw_port_info mlx5_flow_hw_port_infos[RTE_MAX_ETHPORTS]; 41 42 struct tunnel_default_miss_ctx { 43 uint16_t *queue; 44 __extension__ 45 union { 46 struct rte_flow_action_rss action_rss; 47 struct rte_flow_action_queue miss_queue; 48 struct rte_flow_action_jump miss_jump; 49 uint8_t raw[0]; 50 }; 51 }; 52 53 void 54 mlx5_indirect_list_handles_release(struct rte_eth_dev *dev) 55 { 56 struct mlx5_priv *priv = dev->data->dev_private; 57 #ifdef HAVE_MLX5_HWS_SUPPORT 58 struct rte_flow_error error; 59 #endif 60 61 while (!LIST_EMPTY(&priv->indirect_list_head)) { 62 struct mlx5_indirect_list *e = 63 LIST_FIRST(&priv->indirect_list_head); 64 65 LIST_REMOVE(e, entry); 66 switch (e->type) { 67 #ifdef HAVE_MLX5_HWS_SUPPORT 68 case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR: 69 mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e); 70 break; 71 case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY: 72 mlx5_destroy_legacy_indirect(dev, e); 73 break; 74 case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT: 75 mlx5_reformat_action_destroy(dev, 76 (struct rte_flow_action_list_handle *)e, &error); 77 break; 78 #endif 79 default: 80 DRV_LOG(ERR, "invalid indirect list type"); 81 MLX5_ASSERT(false); 82 break; 83 } 84 } 85 } 86 87 static int 88 flow_tunnel_add_default_miss(struct rte_eth_dev *dev, 89 struct rte_flow *flow, 90 const struct rte_flow_attr *attr, 91 const struct rte_flow_action *app_actions, 92 uint32_t flow_idx, 93 const struct mlx5_flow_tunnel *tunnel, 94 struct tunnel_default_miss_ctx *ctx, 95 struct rte_flow_error *error); 96 static struct mlx5_flow_tunnel * 97 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id); 98 static void 99 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel); 100 static uint32_t 101 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, 102 const struct mlx5_flow_tunnel *tunnel, 103 uint32_t group, uint32_t *table, 104 struct rte_flow_error *error); 105 106 /** Device flow drivers. */ 107 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; 108 109 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; 110 111 const struct mlx5_flow_driver_ops *flow_drv_ops[] = { 112 [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, 113 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 114 [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, 115 #endif 116 #ifdef HAVE_MLX5_HWS_SUPPORT 117 [MLX5_FLOW_TYPE_HW] = &mlx5_flow_hw_drv_ops, 118 #endif 119 [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, 120 [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops 121 }; 122 123 /** Helper macro to build input graph for mlx5_flow_expand_rss(). */ 124 #define MLX5_FLOW_EXPAND_RSS_NEXT(...) \ 125 (const int []){ \ 126 __VA_ARGS__, 0, \ 127 } 128 129 /** Node object of input graph for mlx5_flow_expand_rss(). */ 130 struct mlx5_flow_expand_node { 131 const int *const next; 132 /**< 133 * List of next node indexes. Index 0 is interpreted as a terminator. 134 */ 135 const enum rte_flow_item_type type; 136 /**< Pattern item type of current node. */ 137 uint64_t rss_types; 138 /**< 139 * RSS types bit-field associated with this node 140 * (see RTE_ETH_RSS_* definitions). 141 */ 142 uint64_t node_flags; 143 /**< 144 * Bit-fields that define how the node is used in the expansion. 145 * (see MLX5_EXPANSION_NODE_* definitions). 146 */ 147 }; 148 149 /** Keep same format with mlx5_flow_expand_rss to share the buffer for expansion. */ 150 struct mlx5_flow_expand_sqn { 151 uint32_t entries; /** Number of entries */ 152 struct { 153 struct rte_flow_item *pattern; /**< Expanded pattern array. */ 154 uint32_t priority; /**< Priority offset for each expansion. */ 155 } entry[]; 156 }; 157 158 /* Optional expand field. The expansion alg will not go deeper. */ 159 #define MLX5_EXPANSION_NODE_OPTIONAL (UINT64_C(1) << 0) 160 161 /* The node is not added implicitly as expansion to the flow pattern. 162 * If the node type does not match the flow pattern item type, the 163 * expansion alg will go deeper to its next items. 164 * In the current implementation, the list of next nodes indexes can 165 * have up to one node with this flag set and it has to be the last 166 * node index (before the list terminator). 167 */ 168 #define MLX5_EXPANSION_NODE_EXPLICIT (UINT64_C(1) << 1) 169 170 /** Object returned by mlx5_flow_expand_rss(). */ 171 struct mlx5_flow_expand_rss { 172 uint32_t entries; 173 /**< Number of entries @p patterns and @p priorities. */ 174 struct { 175 struct rte_flow_item *pattern; /**< Expanded pattern array. */ 176 uint32_t priority; /**< Priority offset for each expansion. */ 177 } entry[]; 178 }; 179 180 static void 181 mlx5_dbg__print_pattern(const struct rte_flow_item *item); 182 183 static const struct mlx5_flow_expand_node * 184 mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern, 185 unsigned int item_idx, 186 const struct mlx5_flow_expand_node graph[], 187 const struct mlx5_flow_expand_node *node); 188 189 static __rte_always_inline int 190 mlx5_need_cache_flow(const struct mlx5_priv *priv, 191 const struct rte_flow_attr *attr) 192 { 193 return priv->isolated && priv->sh->config.dv_flow_en == 1 && 194 (attr ? !attr->group : true) && 195 priv->mode_info.mode == RTE_PMD_MLX5_FLOW_ENGINE_MODE_STANDBY && 196 (!priv->sh->config.dv_esw_en || !priv->sh->config.fdb_def_rule); 197 } 198 199 static bool 200 mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) 201 { 202 switch (item->type) { 203 case RTE_FLOW_ITEM_TYPE_ETH: 204 case RTE_FLOW_ITEM_TYPE_VLAN: 205 case RTE_FLOW_ITEM_TYPE_IPV4: 206 case RTE_FLOW_ITEM_TYPE_IPV6: 207 case RTE_FLOW_ITEM_TYPE_UDP: 208 case RTE_FLOW_ITEM_TYPE_TCP: 209 case RTE_FLOW_ITEM_TYPE_ESP: 210 case RTE_FLOW_ITEM_TYPE_ICMP: 211 case RTE_FLOW_ITEM_TYPE_ICMP6: 212 case RTE_FLOW_ITEM_TYPE_VXLAN: 213 case RTE_FLOW_ITEM_TYPE_NVGRE: 214 case RTE_FLOW_ITEM_TYPE_GRE: 215 case RTE_FLOW_ITEM_TYPE_GENEVE: 216 case RTE_FLOW_ITEM_TYPE_MPLS: 217 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 218 case RTE_FLOW_ITEM_TYPE_GRE_KEY: 219 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: 220 case RTE_FLOW_ITEM_TYPE_GTP: 221 return true; 222 default: 223 break; 224 } 225 return false; 226 } 227 228 /** 229 * Network Service Header (NSH) and its next protocol values 230 * are described in RFC-8393. 231 */ 232 static enum rte_flow_item_type 233 mlx5_nsh_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) 234 { 235 enum rte_flow_item_type type; 236 237 switch (proto_mask & proto_spec) { 238 case 0: 239 type = RTE_FLOW_ITEM_TYPE_VOID; 240 break; 241 case RTE_VXLAN_GPE_TYPE_IPV4: 242 type = RTE_FLOW_ITEM_TYPE_IPV4; 243 break; 244 case RTE_VXLAN_GPE_TYPE_IPV6: 245 type = RTE_VXLAN_GPE_TYPE_IPV6; 246 break; 247 case RTE_VXLAN_GPE_TYPE_ETH: 248 type = RTE_FLOW_ITEM_TYPE_ETH; 249 break; 250 default: 251 type = RTE_FLOW_ITEM_TYPE_END; 252 } 253 return type; 254 } 255 256 static enum rte_flow_item_type 257 mlx5_inet_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) 258 { 259 enum rte_flow_item_type type; 260 261 switch (proto_mask & proto_spec) { 262 case 0: 263 type = RTE_FLOW_ITEM_TYPE_VOID; 264 break; 265 case IPPROTO_UDP: 266 type = RTE_FLOW_ITEM_TYPE_UDP; 267 break; 268 case IPPROTO_TCP: 269 type = RTE_FLOW_ITEM_TYPE_TCP; 270 break; 271 case IPPROTO_IPIP: 272 type = RTE_FLOW_ITEM_TYPE_IPV4; 273 break; 274 case IPPROTO_IPV6: 275 type = RTE_FLOW_ITEM_TYPE_IPV6; 276 break; 277 case IPPROTO_ESP: 278 type = RTE_FLOW_ITEM_TYPE_ESP; 279 break; 280 default: 281 type = RTE_FLOW_ITEM_TYPE_END; 282 } 283 return type; 284 } 285 286 static enum rte_flow_item_type 287 mlx5_ethertype_to_item_type(rte_be16_t type_spec, 288 rte_be16_t type_mask, bool is_tunnel) 289 { 290 enum rte_flow_item_type type; 291 292 switch (rte_be_to_cpu_16(type_spec & type_mask)) { 293 case 0: 294 type = RTE_FLOW_ITEM_TYPE_VOID; 295 break; 296 case RTE_ETHER_TYPE_TEB: 297 type = is_tunnel ? 298 RTE_FLOW_ITEM_TYPE_ETH : RTE_FLOW_ITEM_TYPE_END; 299 break; 300 case RTE_ETHER_TYPE_VLAN: 301 type = !is_tunnel ? 302 RTE_FLOW_ITEM_TYPE_VLAN : RTE_FLOW_ITEM_TYPE_END; 303 break; 304 case RTE_ETHER_TYPE_IPV4: 305 type = RTE_FLOW_ITEM_TYPE_IPV4; 306 break; 307 case RTE_ETHER_TYPE_IPV6: 308 type = RTE_FLOW_ITEM_TYPE_IPV6; 309 break; 310 default: 311 type = RTE_FLOW_ITEM_TYPE_END; 312 } 313 return type; 314 } 315 316 static enum rte_flow_item_type 317 mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) 318 { 319 #define MLX5_XSET_ITEM_MASK_SPEC(type, fld) \ 320 do { \ 321 const void *m = item->mask; \ 322 const void *s = item->spec; \ 323 mask = m ? \ 324 ((const struct rte_flow_item_##type *)m)->fld : \ 325 rte_flow_item_##type##_mask.fld; \ 326 spec = ((const struct rte_flow_item_##type *)s)->fld; \ 327 } while (0) 328 329 enum rte_flow_item_type ret; 330 uint16_t spec, mask; 331 332 if (item == NULL || item->spec == NULL) 333 return RTE_FLOW_ITEM_TYPE_VOID; 334 switch (item->type) { 335 case RTE_FLOW_ITEM_TYPE_ETH: 336 MLX5_XSET_ITEM_MASK_SPEC(eth, hdr.ether_type); 337 if (!mask) 338 return RTE_FLOW_ITEM_TYPE_VOID; 339 ret = mlx5_ethertype_to_item_type(spec, mask, false); 340 break; 341 case RTE_FLOW_ITEM_TYPE_VLAN: 342 MLX5_XSET_ITEM_MASK_SPEC(vlan, hdr.eth_proto); 343 if (!mask) 344 return RTE_FLOW_ITEM_TYPE_VOID; 345 ret = mlx5_ethertype_to_item_type(spec, mask, false); 346 break; 347 case RTE_FLOW_ITEM_TYPE_IPV4: 348 MLX5_XSET_ITEM_MASK_SPEC(ipv4, hdr.next_proto_id); 349 if (!mask) 350 return RTE_FLOW_ITEM_TYPE_VOID; 351 ret = mlx5_inet_proto_to_item_type(spec, mask); 352 break; 353 case RTE_FLOW_ITEM_TYPE_IPV6: 354 MLX5_XSET_ITEM_MASK_SPEC(ipv6, hdr.proto); 355 if (!mask) 356 return RTE_FLOW_ITEM_TYPE_VOID; 357 ret = mlx5_inet_proto_to_item_type(spec, mask); 358 break; 359 case RTE_FLOW_ITEM_TYPE_GENEVE: 360 MLX5_XSET_ITEM_MASK_SPEC(geneve, protocol); 361 ret = mlx5_ethertype_to_item_type(spec, mask, true); 362 break; 363 case RTE_FLOW_ITEM_TYPE_GRE: 364 MLX5_XSET_ITEM_MASK_SPEC(gre, protocol); 365 ret = mlx5_ethertype_to_item_type(spec, mask, true); 366 break; 367 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 368 MLX5_XSET_ITEM_MASK_SPEC(vxlan_gpe, hdr.proto); 369 ret = mlx5_nsh_proto_to_item_type(spec, mask); 370 break; 371 default: 372 ret = RTE_FLOW_ITEM_TYPE_VOID; 373 break; 374 } 375 return ret; 376 #undef MLX5_XSET_ITEM_MASK_SPEC 377 } 378 379 static const int * 380 mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[], 381 const int *next_node) 382 { 383 const struct mlx5_flow_expand_node *node = NULL; 384 const int *next = next_node; 385 386 while (next && *next) { 387 /* 388 * Skip the nodes with the MLX5_EXPANSION_NODE_EXPLICIT 389 * flag set, because they were not found in the flow pattern. 390 */ 391 node = &graph[*next]; 392 if (!(node->node_flags & MLX5_EXPANSION_NODE_EXPLICIT)) 393 break; 394 next = node->next; 395 } 396 return next; 397 } 398 399 #define MLX5_RSS_EXP_ELT_N 32 400 401 /** 402 * Expand RSS flows into several possible flows according to the RSS hash 403 * fields requested and the driver capabilities. 404 * 405 * @param[out] buf 406 * Buffer to store the result expansion. 407 * @param[in] size 408 * Buffer size in bytes. If 0, @p buf can be NULL. 409 * @param[in] pattern 410 * User flow pattern. 411 * @param[in] types 412 * RSS types to expand (see RTE_ETH_RSS_* definitions). 413 * @param[in] graph 414 * Input graph to expand @p pattern according to @p types. 415 * @param[in] graph_root_index 416 * Index of root node in @p graph, typically 0. 417 * 418 * @return 419 * A positive value representing the size of @p buf in bytes regardless of 420 * @p size on success, a negative errno value otherwise and rte_errno is 421 * set, the following errors are defined: 422 * 423 * -E2BIG: graph-depth @p graph is too deep. 424 * -EINVAL: @p size has not enough space for expanded pattern. 425 */ 426 static int 427 mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, 428 const struct rte_flow_item *pattern, uint64_t types, 429 const struct mlx5_flow_expand_node graph[], 430 int graph_root_index) 431 { 432 const struct rte_flow_item *item; 433 const struct mlx5_flow_expand_node *node = &graph[graph_root_index]; 434 const int *next_node; 435 const int *stack[MLX5_RSS_EXP_ELT_N]; 436 int stack_pos = 0; 437 struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N]; 438 unsigned int i, item_idx, last_expand_item_idx = 0; 439 size_t lsize; 440 size_t user_pattern_size = 0; 441 void *addr = NULL; 442 const struct mlx5_flow_expand_node *next = NULL; 443 struct rte_flow_item missed_item; 444 int missed = 0; 445 int elt = 0; 446 const struct rte_flow_item *last_expand_item = NULL; 447 448 memset(&missed_item, 0, sizeof(missed_item)); 449 lsize = offsetof(struct mlx5_flow_expand_rss, entry) + 450 MLX5_RSS_EXP_ELT_N * sizeof(buf->entry[0]); 451 if (lsize > size) 452 return -EINVAL; 453 buf->entry[0].priority = 0; 454 buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N]; 455 buf->entries = 0; 456 addr = buf->entry[0].pattern; 457 for (item = pattern, item_idx = 0; 458 item->type != RTE_FLOW_ITEM_TYPE_END; 459 item++, item_idx++) { 460 if (!mlx5_flow_is_rss_expandable_item(item)) { 461 user_pattern_size += sizeof(*item); 462 continue; 463 } 464 last_expand_item = item; 465 last_expand_item_idx = item_idx; 466 i = 0; 467 while (node->next && node->next[i]) { 468 next = &graph[node->next[i]]; 469 if (next->type == item->type) 470 break; 471 if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) { 472 node = next; 473 i = 0; 474 } else { 475 ++i; 476 } 477 } 478 if (next) 479 node = next; 480 user_pattern_size += sizeof(*item); 481 } 482 user_pattern_size += sizeof(*item); /* Handle END item. */ 483 lsize += user_pattern_size; 484 if (lsize > size) 485 return -EINVAL; 486 /* Copy the user pattern in the first entry of the buffer. */ 487 rte_memcpy(addr, pattern, user_pattern_size); 488 addr = (void *)(((uintptr_t)addr) + user_pattern_size); 489 buf->entries = 1; 490 /* Start expanding. */ 491 memset(flow_items, 0, sizeof(flow_items)); 492 user_pattern_size -= sizeof(*item); 493 /* 494 * Check if the last valid item has spec set, need complete pattern, 495 * and the pattern can be used for expansion. 496 */ 497 missed_item.type = mlx5_flow_expand_rss_item_complete(last_expand_item); 498 if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) { 499 /* Item type END indicates expansion is not required. */ 500 return lsize; 501 } 502 if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) { 503 next = NULL; 504 missed = 1; 505 i = 0; 506 while (node->next && node->next[i]) { 507 next = &graph[node->next[i]]; 508 if (next->type == missed_item.type) { 509 flow_items[0].type = missed_item.type; 510 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END; 511 break; 512 } 513 if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) { 514 node = next; 515 i = 0; 516 } else { 517 ++i; 518 } 519 next = NULL; 520 } 521 } 522 if (next && missed) { 523 elt = 2; /* missed item + item end. */ 524 node = next; 525 lsize += elt * sizeof(*item) + user_pattern_size; 526 if (lsize > size) 527 return -EINVAL; 528 if (node->rss_types & types) { 529 buf->entry[buf->entries].priority = 1; 530 buf->entry[buf->entries].pattern = addr; 531 buf->entries++; 532 rte_memcpy(addr, buf->entry[0].pattern, 533 user_pattern_size); 534 addr = (void *)(((uintptr_t)addr) + user_pattern_size); 535 rte_memcpy(addr, flow_items, elt * sizeof(*item)); 536 addr = (void *)(((uintptr_t)addr) + 537 elt * sizeof(*item)); 538 } 539 } else if (last_expand_item != NULL) { 540 node = mlx5_flow_expand_rss_adjust_node(pattern, 541 last_expand_item_idx, graph, node); 542 } 543 memset(flow_items, 0, sizeof(flow_items)); 544 next_node = mlx5_flow_expand_rss_skip_explicit(graph, 545 node->next); 546 stack[stack_pos] = next_node; 547 node = next_node ? &graph[*next_node] : NULL; 548 while (node) { 549 flow_items[stack_pos].type = node->type; 550 if (node->rss_types & types) { 551 size_t n; 552 /* 553 * compute the number of items to copy from the 554 * expansion and copy it. 555 * When the stack_pos is 0, there are 1 element in it, 556 * plus the addition END item. 557 */ 558 elt = stack_pos + 2; 559 flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END; 560 lsize += elt * sizeof(*item) + user_pattern_size; 561 if (lsize > size) 562 return -EINVAL; 563 n = elt * sizeof(*item); 564 MLX5_ASSERT((buf->entries) < MLX5_RSS_EXP_ELT_N); 565 buf->entry[buf->entries].priority = 566 stack_pos + 1 + missed; 567 buf->entry[buf->entries].pattern = addr; 568 buf->entries++; 569 rte_memcpy(addr, buf->entry[0].pattern, 570 user_pattern_size); 571 addr = (void *)(((uintptr_t)addr) + 572 user_pattern_size); 573 rte_memcpy(addr, &missed_item, 574 missed * sizeof(*item)); 575 addr = (void *)(((uintptr_t)addr) + 576 missed * sizeof(*item)); 577 rte_memcpy(addr, flow_items, n); 578 addr = (void *)(((uintptr_t)addr) + n); 579 } 580 /* Go deeper. */ 581 if (!(node->node_flags & MLX5_EXPANSION_NODE_OPTIONAL) && 582 node->next) { 583 next_node = mlx5_flow_expand_rss_skip_explicit(graph, 584 node->next); 585 if (stack_pos++ == MLX5_RSS_EXP_ELT_N) { 586 rte_errno = E2BIG; 587 return -rte_errno; 588 } 589 stack[stack_pos] = next_node; 590 } else if (*(next_node + 1)) { 591 /* Follow up with the next possibility. */ 592 next_node = mlx5_flow_expand_rss_skip_explicit(graph, 593 ++next_node); 594 } else if (!stack_pos) { 595 /* 596 * Completing the traverse over the different paths. 597 * The next_node is advanced to the terminator. 598 */ 599 ++next_node; 600 } else { 601 /* Move to the next path. */ 602 while (stack_pos) { 603 next_node = stack[--stack_pos]; 604 next_node++; 605 if (*next_node) 606 break; 607 } 608 next_node = mlx5_flow_expand_rss_skip_explicit(graph, 609 next_node); 610 stack[stack_pos] = next_node; 611 } 612 node = next_node && *next_node ? &graph[*next_node] : NULL; 613 }; 614 return lsize; 615 } 616 617 /** 618 * Expand SQN flows into several possible flows according to the Tx queue 619 * number 620 * 621 * @param[in] buf 622 * Buffer to store the result expansion. 623 * @param[in] size 624 * Buffer size in bytes. If 0, @p buf can be NULL. 625 * @param[in] pattern 626 * User flow pattern. 627 * @param[in] sq_specs 628 * Buffer to store sq spec. 629 * 630 * @return 631 * 0 for success and negative value for failure 632 * 633 */ 634 static int 635 mlx5_flow_expand_sqn(struct mlx5_flow_expand_sqn *buf, size_t size, 636 const struct rte_flow_item *pattern, 637 struct mlx5_rte_flow_item_sq *sq_specs) 638 { 639 const struct rte_flow_item *item; 640 bool port_representor = false; 641 size_t user_pattern_size = 0; 642 struct rte_eth_dev *dev; 643 struct mlx5_priv *priv; 644 void *addr = NULL; 645 uint16_t port_id; 646 size_t lsize; 647 int elt = 2; 648 uint16_t i; 649 650 buf->entries = 0; 651 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 652 if (item->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) { 653 const struct rte_flow_item_ethdev *pid_v = item->spec; 654 655 if (!pid_v) 656 return 0; 657 port_id = pid_v->port_id; 658 port_representor = true; 659 } 660 user_pattern_size += sizeof(*item); 661 } 662 if (!port_representor) 663 return 0; 664 dev = &rte_eth_devices[port_id]; 665 priv = dev->data->dev_private; 666 buf->entry[0].pattern = (void *)&buf->entry[priv->txqs_n]; 667 lsize = offsetof(struct mlx5_flow_expand_sqn, entry) + 668 sizeof(buf->entry[0]) * priv->txqs_n; 669 if (lsize + (user_pattern_size + sizeof(struct rte_flow_item) * elt) * priv->txqs_n > size) 670 return -EINVAL; 671 addr = buf->entry[0].pattern; 672 for (i = 0; i != priv->txqs_n; ++i) { 673 struct rte_flow_item pattern_add[] = { 674 { 675 .type = (enum rte_flow_item_type) 676 MLX5_RTE_FLOW_ITEM_TYPE_SQ, 677 .spec = &sq_specs[i], 678 }, 679 { 680 .type = RTE_FLOW_ITEM_TYPE_END, 681 }, 682 }; 683 struct mlx5_txq_ctrl *txq = mlx5_txq_get(dev, i); 684 685 if (txq == NULL) 686 return -EINVAL; 687 buf->entry[i].pattern = addr; 688 sq_specs[i].queue = mlx5_txq_get_sqn(txq); 689 mlx5_txq_release(dev, i); 690 rte_memcpy(addr, pattern, user_pattern_size); 691 addr = (void *)(((uintptr_t)addr) + user_pattern_size); 692 rte_memcpy(addr, pattern_add, sizeof(struct rte_flow_item) * elt); 693 addr = (void *)(((uintptr_t)addr) + sizeof(struct rte_flow_item) * elt); 694 buf->entries++; 695 } 696 return 0; 697 } 698 699 enum mlx5_expansion { 700 MLX5_EXPANSION_ROOT, 701 MLX5_EXPANSION_ROOT_OUTER, 702 MLX5_EXPANSION_OUTER_ETH, 703 MLX5_EXPANSION_OUTER_VLAN, 704 MLX5_EXPANSION_OUTER_IPV4, 705 MLX5_EXPANSION_OUTER_IPV4_UDP, 706 MLX5_EXPANSION_OUTER_IPV4_TCP, 707 MLX5_EXPANSION_OUTER_IPV4_ESP, 708 MLX5_EXPANSION_OUTER_IPV4_ICMP, 709 MLX5_EXPANSION_OUTER_IPV6, 710 MLX5_EXPANSION_OUTER_IPV6_UDP, 711 MLX5_EXPANSION_OUTER_IPV6_TCP, 712 MLX5_EXPANSION_OUTER_IPV6_ESP, 713 MLX5_EXPANSION_OUTER_IPV6_ICMP6, 714 MLX5_EXPANSION_VXLAN, 715 MLX5_EXPANSION_STD_VXLAN, 716 MLX5_EXPANSION_L3_VXLAN, 717 MLX5_EXPANSION_VXLAN_GPE, 718 MLX5_EXPANSION_GRE, 719 MLX5_EXPANSION_NVGRE, 720 MLX5_EXPANSION_GRE_KEY, 721 MLX5_EXPANSION_MPLS, 722 MLX5_EXPANSION_ETH, 723 MLX5_EXPANSION_VLAN, 724 MLX5_EXPANSION_IPV4, 725 MLX5_EXPANSION_IPV4_UDP, 726 MLX5_EXPANSION_IPV4_TCP, 727 MLX5_EXPANSION_IPV4_ESP, 728 MLX5_EXPANSION_IPV4_ICMP, 729 MLX5_EXPANSION_IPV6, 730 MLX5_EXPANSION_IPV6_UDP, 731 MLX5_EXPANSION_IPV6_TCP, 732 MLX5_EXPANSION_IPV6_ESP, 733 MLX5_EXPANSION_IPV6_ICMP6, 734 MLX5_EXPANSION_IPV6_FRAG_EXT, 735 MLX5_EXPANSION_GTP, 736 MLX5_EXPANSION_GENEVE, 737 }; 738 739 /** Supported expansion of items. */ 740 static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { 741 [MLX5_EXPANSION_ROOT] = { 742 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 743 MLX5_EXPANSION_IPV4, 744 MLX5_EXPANSION_IPV6), 745 .type = RTE_FLOW_ITEM_TYPE_END, 746 }, 747 [MLX5_EXPANSION_ROOT_OUTER] = { 748 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, 749 MLX5_EXPANSION_OUTER_IPV4, 750 MLX5_EXPANSION_OUTER_IPV6), 751 .type = RTE_FLOW_ITEM_TYPE_END, 752 }, 753 [MLX5_EXPANSION_OUTER_ETH] = { 754 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), 755 .type = RTE_FLOW_ITEM_TYPE_ETH, 756 .rss_types = 0, 757 }, 758 [MLX5_EXPANSION_OUTER_VLAN] = { 759 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 760 MLX5_EXPANSION_OUTER_IPV6), 761 .type = RTE_FLOW_ITEM_TYPE_VLAN, 762 .node_flags = MLX5_EXPANSION_NODE_EXPLICIT, 763 }, 764 [MLX5_EXPANSION_OUTER_IPV4] = { 765 .next = MLX5_FLOW_EXPAND_RSS_NEXT 766 (MLX5_EXPANSION_OUTER_IPV4_UDP, 767 MLX5_EXPANSION_OUTER_IPV4_TCP, 768 MLX5_EXPANSION_OUTER_IPV4_ESP, 769 MLX5_EXPANSION_OUTER_IPV4_ICMP, 770 MLX5_EXPANSION_GRE, 771 MLX5_EXPANSION_NVGRE, 772 MLX5_EXPANSION_IPV4, 773 MLX5_EXPANSION_IPV6), 774 .type = RTE_FLOW_ITEM_TYPE_IPV4, 775 .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | 776 RTE_ETH_RSS_NONFRAG_IPV4_OTHER, 777 }, 778 [MLX5_EXPANSION_OUTER_IPV4_UDP] = { 779 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 780 MLX5_EXPANSION_VXLAN_GPE, 781 MLX5_EXPANSION_MPLS, 782 MLX5_EXPANSION_GENEVE, 783 MLX5_EXPANSION_GTP), 784 .type = RTE_FLOW_ITEM_TYPE_UDP, 785 .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP, 786 }, 787 [MLX5_EXPANSION_OUTER_IPV4_TCP] = { 788 .type = RTE_FLOW_ITEM_TYPE_TCP, 789 .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP, 790 }, 791 [MLX5_EXPANSION_OUTER_IPV4_ESP] = { 792 .type = RTE_FLOW_ITEM_TYPE_ESP, 793 .rss_types = RTE_ETH_RSS_ESP, 794 }, 795 [MLX5_EXPANSION_OUTER_IPV4_ICMP] = { 796 .type = RTE_FLOW_ITEM_TYPE_ICMP, 797 }, 798 [MLX5_EXPANSION_OUTER_IPV6] = { 799 .next = MLX5_FLOW_EXPAND_RSS_NEXT 800 (MLX5_EXPANSION_OUTER_IPV6_UDP, 801 MLX5_EXPANSION_OUTER_IPV6_TCP, 802 MLX5_EXPANSION_OUTER_IPV6_ESP, 803 MLX5_EXPANSION_OUTER_IPV6_ICMP6, 804 MLX5_EXPANSION_IPV4, 805 MLX5_EXPANSION_IPV6, 806 MLX5_EXPANSION_GRE, 807 MLX5_EXPANSION_NVGRE), 808 .type = RTE_FLOW_ITEM_TYPE_IPV6, 809 .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | 810 RTE_ETH_RSS_NONFRAG_IPV6_OTHER, 811 }, 812 [MLX5_EXPANSION_OUTER_IPV6_UDP] = { 813 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 814 MLX5_EXPANSION_VXLAN_GPE, 815 MLX5_EXPANSION_MPLS, 816 MLX5_EXPANSION_GENEVE, 817 MLX5_EXPANSION_GTP), 818 .type = RTE_FLOW_ITEM_TYPE_UDP, 819 .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP, 820 }, 821 [MLX5_EXPANSION_OUTER_IPV6_TCP] = { 822 .type = RTE_FLOW_ITEM_TYPE_TCP, 823 .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP, 824 }, 825 [MLX5_EXPANSION_OUTER_IPV6_ESP] = { 826 .type = RTE_FLOW_ITEM_TYPE_ESP, 827 .rss_types = RTE_ETH_RSS_ESP, 828 }, 829 [MLX5_EXPANSION_OUTER_IPV6_ICMP6] = { 830 .type = RTE_FLOW_ITEM_TYPE_ICMP6, 831 }, 832 [MLX5_EXPANSION_VXLAN] = { 833 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 834 MLX5_EXPANSION_IPV4, 835 MLX5_EXPANSION_IPV6), 836 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 837 }, 838 [MLX5_EXPANSION_STD_VXLAN] = { 839 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), 840 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 841 }, 842 [MLX5_EXPANSION_L3_VXLAN] = { 843 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 844 MLX5_EXPANSION_IPV6), 845 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 846 }, 847 [MLX5_EXPANSION_VXLAN_GPE] = { 848 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 849 MLX5_EXPANSION_IPV4, 850 MLX5_EXPANSION_IPV6), 851 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, 852 }, 853 [MLX5_EXPANSION_GRE] = { 854 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 855 MLX5_EXPANSION_IPV4, 856 MLX5_EXPANSION_IPV6, 857 MLX5_EXPANSION_GRE_KEY, 858 MLX5_EXPANSION_MPLS), 859 .type = RTE_FLOW_ITEM_TYPE_GRE, 860 }, 861 [MLX5_EXPANSION_GRE_KEY] = { 862 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 863 MLX5_EXPANSION_IPV6, 864 MLX5_EXPANSION_MPLS), 865 .type = RTE_FLOW_ITEM_TYPE_GRE_KEY, 866 .node_flags = MLX5_EXPANSION_NODE_OPTIONAL, 867 }, 868 [MLX5_EXPANSION_NVGRE] = { 869 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), 870 .type = RTE_FLOW_ITEM_TYPE_NVGRE, 871 }, 872 [MLX5_EXPANSION_MPLS] = { 873 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 874 MLX5_EXPANSION_IPV6, 875 MLX5_EXPANSION_ETH), 876 .type = RTE_FLOW_ITEM_TYPE_MPLS, 877 .node_flags = MLX5_EXPANSION_NODE_OPTIONAL, 878 }, 879 [MLX5_EXPANSION_ETH] = { 880 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), 881 .type = RTE_FLOW_ITEM_TYPE_ETH, 882 }, 883 [MLX5_EXPANSION_VLAN] = { 884 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 885 MLX5_EXPANSION_IPV6), 886 .type = RTE_FLOW_ITEM_TYPE_VLAN, 887 .node_flags = MLX5_EXPANSION_NODE_EXPLICIT, 888 }, 889 [MLX5_EXPANSION_IPV4] = { 890 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, 891 MLX5_EXPANSION_IPV4_TCP, 892 MLX5_EXPANSION_IPV4_ESP, 893 MLX5_EXPANSION_IPV4_ICMP), 894 .type = RTE_FLOW_ITEM_TYPE_IPV4, 895 .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | 896 RTE_ETH_RSS_NONFRAG_IPV4_OTHER, 897 }, 898 [MLX5_EXPANSION_IPV4_UDP] = { 899 .type = RTE_FLOW_ITEM_TYPE_UDP, 900 .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP, 901 }, 902 [MLX5_EXPANSION_IPV4_TCP] = { 903 .type = RTE_FLOW_ITEM_TYPE_TCP, 904 .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP, 905 }, 906 [MLX5_EXPANSION_IPV4_ESP] = { 907 .type = RTE_FLOW_ITEM_TYPE_ESP, 908 .rss_types = RTE_ETH_RSS_ESP, 909 }, 910 [MLX5_EXPANSION_IPV4_ICMP] = { 911 .type = RTE_FLOW_ITEM_TYPE_ICMP, 912 }, 913 [MLX5_EXPANSION_IPV6] = { 914 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, 915 MLX5_EXPANSION_IPV6_TCP, 916 MLX5_EXPANSION_IPV6_ESP, 917 MLX5_EXPANSION_IPV6_ICMP6, 918 MLX5_EXPANSION_IPV6_FRAG_EXT), 919 .type = RTE_FLOW_ITEM_TYPE_IPV6, 920 .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | 921 RTE_ETH_RSS_NONFRAG_IPV6_OTHER, 922 }, 923 [MLX5_EXPANSION_IPV6_UDP] = { 924 .type = RTE_FLOW_ITEM_TYPE_UDP, 925 .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP, 926 }, 927 [MLX5_EXPANSION_IPV6_TCP] = { 928 .type = RTE_FLOW_ITEM_TYPE_TCP, 929 .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP, 930 }, 931 [MLX5_EXPANSION_IPV6_ESP] = { 932 .type = RTE_FLOW_ITEM_TYPE_ESP, 933 .rss_types = RTE_ETH_RSS_ESP, 934 }, 935 [MLX5_EXPANSION_IPV6_FRAG_EXT] = { 936 .type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT, 937 }, 938 [MLX5_EXPANSION_IPV6_ICMP6] = { 939 .type = RTE_FLOW_ITEM_TYPE_ICMP6, 940 }, 941 [MLX5_EXPANSION_GTP] = { 942 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 943 MLX5_EXPANSION_IPV6), 944 .type = RTE_FLOW_ITEM_TYPE_GTP, 945 }, 946 [MLX5_EXPANSION_GENEVE] = { 947 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 948 MLX5_EXPANSION_IPV4, 949 MLX5_EXPANSION_IPV6), 950 .type = RTE_FLOW_ITEM_TYPE_GENEVE, 951 }, 952 }; 953 954 static struct rte_flow_action_handle * 955 mlx5_action_handle_create(struct rte_eth_dev *dev, 956 const struct rte_flow_indir_action_conf *conf, 957 const struct rte_flow_action *action, 958 struct rte_flow_error *error); 959 static int mlx5_action_handle_destroy 960 (struct rte_eth_dev *dev, 961 struct rte_flow_action_handle *handle, 962 struct rte_flow_error *error); 963 static int mlx5_action_handle_update 964 (struct rte_eth_dev *dev, 965 struct rte_flow_action_handle *handle, 966 const void *update, 967 struct rte_flow_error *error); 968 static int mlx5_action_handle_query 969 (struct rte_eth_dev *dev, 970 const struct rte_flow_action_handle *handle, 971 void *data, 972 struct rte_flow_error *error); 973 static int 974 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev, 975 struct rte_flow_tunnel *app_tunnel, 976 struct rte_flow_action **actions, 977 uint32_t *num_of_actions, 978 struct rte_flow_error *error); 979 static int 980 mlx5_flow_tunnel_match(struct rte_eth_dev *dev, 981 struct rte_flow_tunnel *app_tunnel, 982 struct rte_flow_item **items, 983 uint32_t *num_of_items, 984 struct rte_flow_error *error); 985 static int 986 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev, 987 struct rte_flow_item *pmd_items, 988 uint32_t num_items, struct rte_flow_error *err); 989 static int 990 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev, 991 struct rte_flow_action *pmd_actions, 992 uint32_t num_actions, 993 struct rte_flow_error *err); 994 static int 995 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, 996 struct rte_mbuf *m, 997 struct rte_flow_restore_info *info, 998 struct rte_flow_error *err); 999 static struct rte_flow_item_flex_handle * 1000 mlx5_flow_flex_item_create(struct rte_eth_dev *dev, 1001 const struct rte_flow_item_flex_conf *conf, 1002 struct rte_flow_error *error); 1003 static int 1004 mlx5_flow_flex_item_release(struct rte_eth_dev *dev, 1005 const struct rte_flow_item_flex_handle *handle, 1006 struct rte_flow_error *error); 1007 static int 1008 mlx5_flow_info_get(struct rte_eth_dev *dev, 1009 struct rte_flow_port_info *port_info, 1010 struct rte_flow_queue_info *queue_info, 1011 struct rte_flow_error *error); 1012 static int 1013 mlx5_flow_port_configure(struct rte_eth_dev *dev, 1014 const struct rte_flow_port_attr *port_attr, 1015 uint16_t nb_queue, 1016 const struct rte_flow_queue_attr *queue_attr[], 1017 struct rte_flow_error *err); 1018 1019 static struct rte_flow_pattern_template * 1020 mlx5_flow_pattern_template_create(struct rte_eth_dev *dev, 1021 const struct rte_flow_pattern_template_attr *attr, 1022 const struct rte_flow_item items[], 1023 struct rte_flow_error *error); 1024 1025 static int 1026 mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev, 1027 struct rte_flow_pattern_template *template, 1028 struct rte_flow_error *error); 1029 static struct rte_flow_actions_template * 1030 mlx5_flow_actions_template_create(struct rte_eth_dev *dev, 1031 const struct rte_flow_actions_template_attr *attr, 1032 const struct rte_flow_action actions[], 1033 const struct rte_flow_action masks[], 1034 struct rte_flow_error *error); 1035 static int 1036 mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev, 1037 struct rte_flow_actions_template *template, 1038 struct rte_flow_error *error); 1039 1040 static struct rte_flow_template_table * 1041 mlx5_flow_table_create(struct rte_eth_dev *dev, 1042 const struct rte_flow_template_table_attr *attr, 1043 struct rte_flow_pattern_template *item_templates[], 1044 uint8_t nb_item_templates, 1045 struct rte_flow_actions_template *action_templates[], 1046 uint8_t nb_action_templates, 1047 struct rte_flow_error *error); 1048 static int 1049 mlx5_flow_table_destroy(struct rte_eth_dev *dev, 1050 struct rte_flow_template_table *table, 1051 struct rte_flow_error *error); 1052 static int 1053 mlx5_flow_group_set_miss_actions(struct rte_eth_dev *dev, 1054 uint32_t group_id, 1055 const struct rte_flow_group_attr *attr, 1056 const struct rte_flow_action actions[], 1057 struct rte_flow_error *error); 1058 1059 static int 1060 mlx5_action_handle_query_update(struct rte_eth_dev *dev, 1061 struct rte_flow_action_handle *handle, 1062 const void *update, void *query, 1063 enum rte_flow_query_update_mode qu_mode, 1064 struct rte_flow_error *error); 1065 1066 static struct rte_flow_action_list_handle * 1067 mlx5_action_list_handle_create(struct rte_eth_dev *dev, 1068 const struct rte_flow_indir_action_conf *conf, 1069 const struct rte_flow_action *actions, 1070 struct rte_flow_error *error); 1071 1072 static int 1073 mlx5_action_list_handle_destroy(struct rte_eth_dev *dev, 1074 struct rte_flow_action_list_handle *handle, 1075 struct rte_flow_error *error); 1076 1077 static int 1078 mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev, 1079 const 1080 struct rte_flow_action_list_handle *handle, 1081 const void **update, void **query, 1082 enum rte_flow_query_update_mode mode, 1083 struct rte_flow_error *error); 1084 1085 static int 1086 mlx5_flow_calc_table_hash(struct rte_eth_dev *dev, 1087 const struct rte_flow_template_table *table, 1088 const struct rte_flow_item pattern[], 1089 uint8_t pattern_template_index, 1090 uint32_t *hash, struct rte_flow_error *error); 1091 static int 1092 mlx5_flow_calc_encap_hash(struct rte_eth_dev *dev, 1093 const struct rte_flow_item pattern[], 1094 enum rte_flow_encap_hash_field dest_field, 1095 uint8_t *hash, 1096 struct rte_flow_error *error); 1097 1098 static int 1099 mlx5_template_table_resize(struct rte_eth_dev *dev, 1100 struct rte_flow_template_table *table, 1101 uint32_t nb_rules, struct rte_flow_error *error); 1102 static int 1103 mlx5_flow_async_update_resized(struct rte_eth_dev *dev, uint32_t queue, 1104 const struct rte_flow_op_attr *attr, 1105 struct rte_flow *rule, void *user_data, 1106 struct rte_flow_error *error); 1107 static int 1108 mlx5_table_resize_complete(struct rte_eth_dev *dev, 1109 struct rte_flow_template_table *table, 1110 struct rte_flow_error *error); 1111 1112 static const struct rte_flow_ops mlx5_flow_ops = { 1113 .validate = mlx5_flow_validate, 1114 .create = mlx5_flow_create, 1115 .destroy = mlx5_flow_destroy, 1116 .flush = mlx5_flow_flush, 1117 .isolate = mlx5_flow_isolate, 1118 .query = mlx5_flow_query, 1119 .dev_dump = mlx5_flow_dev_dump, 1120 .get_q_aged_flows = mlx5_flow_get_q_aged_flows, 1121 .get_aged_flows = mlx5_flow_get_aged_flows, 1122 .action_handle_create = mlx5_action_handle_create, 1123 .action_handle_destroy = mlx5_action_handle_destroy, 1124 .action_handle_update = mlx5_action_handle_update, 1125 .action_handle_query = mlx5_action_handle_query, 1126 .action_handle_query_update = mlx5_action_handle_query_update, 1127 .action_list_handle_create = mlx5_action_list_handle_create, 1128 .action_list_handle_destroy = mlx5_action_list_handle_destroy, 1129 .tunnel_decap_set = mlx5_flow_tunnel_decap_set, 1130 .tunnel_match = mlx5_flow_tunnel_match, 1131 .tunnel_action_decap_release = mlx5_flow_tunnel_action_release, 1132 .tunnel_item_release = mlx5_flow_tunnel_item_release, 1133 .get_restore_info = mlx5_flow_tunnel_get_restore_info, 1134 .flex_item_create = mlx5_flow_flex_item_create, 1135 .flex_item_release = mlx5_flow_flex_item_release, 1136 .info_get = mlx5_flow_info_get, 1137 .pick_transfer_proxy = mlx5_flow_pick_transfer_proxy, 1138 .configure = mlx5_flow_port_configure, 1139 .pattern_template_create = mlx5_flow_pattern_template_create, 1140 .pattern_template_destroy = mlx5_flow_pattern_template_destroy, 1141 .actions_template_create = mlx5_flow_actions_template_create, 1142 .actions_template_destroy = mlx5_flow_actions_template_destroy, 1143 .template_table_create = mlx5_flow_table_create, 1144 .template_table_destroy = mlx5_flow_table_destroy, 1145 .group_set_miss_actions = mlx5_flow_group_set_miss_actions, 1146 .action_list_handle_query_update = 1147 mlx5_flow_action_list_handle_query_update, 1148 .flow_calc_table_hash = mlx5_flow_calc_table_hash, 1149 .flow_calc_encap_hash = mlx5_flow_calc_encap_hash, 1150 .flow_template_table_resize = mlx5_template_table_resize, 1151 .flow_update_resized = mlx5_flow_async_update_resized, 1152 .flow_template_table_resize_complete = mlx5_table_resize_complete, 1153 }; 1154 1155 /* Tunnel information. */ 1156 struct mlx5_flow_tunnel_info { 1157 uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ 1158 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ 1159 }; 1160 1161 static struct mlx5_flow_tunnel_info tunnels_info[] = { 1162 { 1163 .tunnel = MLX5_FLOW_LAYER_VXLAN, 1164 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, 1165 }, 1166 { 1167 .tunnel = MLX5_FLOW_LAYER_GENEVE, 1168 .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP, 1169 }, 1170 { 1171 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, 1172 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, 1173 }, 1174 { 1175 .tunnel = MLX5_FLOW_LAYER_GRE, 1176 .ptype = RTE_PTYPE_TUNNEL_GRE, 1177 }, 1178 { 1179 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, 1180 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, 1181 }, 1182 { 1183 .tunnel = MLX5_FLOW_LAYER_MPLS, 1184 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, 1185 }, 1186 { 1187 .tunnel = MLX5_FLOW_LAYER_NVGRE, 1188 .ptype = RTE_PTYPE_TUNNEL_NVGRE, 1189 }, 1190 { 1191 .tunnel = MLX5_FLOW_LAYER_IPIP, 1192 .ptype = RTE_PTYPE_TUNNEL_IP, 1193 }, 1194 { 1195 .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP, 1196 .ptype = RTE_PTYPE_TUNNEL_IP, 1197 }, 1198 { 1199 .tunnel = MLX5_FLOW_LAYER_GTP, 1200 .ptype = RTE_PTYPE_TUNNEL_GTPU, 1201 }, 1202 }; 1203 1204 1205 1206 /** 1207 * Translate tag ID to register. 1208 * 1209 * @param[in] dev 1210 * Pointer to the Ethernet device structure. 1211 * @param[in] feature 1212 * The feature that request the register. 1213 * @param[in] id 1214 * The request register ID. 1215 * @param[out] error 1216 * Error description in case of any. 1217 * 1218 * @return 1219 * The request register on success, a negative errno 1220 * value otherwise and rte_errno is set. 1221 */ 1222 int 1223 mlx5_flow_get_reg_id(struct rte_eth_dev *dev, 1224 enum mlx5_feature_name feature, 1225 uint32_t id, 1226 struct rte_flow_error *error) 1227 { 1228 struct mlx5_priv *priv = dev->data->dev_private; 1229 struct mlx5_sh_config *config = &priv->sh->config; 1230 struct mlx5_dev_registers *reg = &priv->sh->registers; 1231 enum modify_reg start_reg; 1232 bool skip_mtr_reg = false; 1233 1234 switch (feature) { 1235 case MLX5_HAIRPIN_RX: 1236 return REG_B; 1237 case MLX5_HAIRPIN_TX: 1238 return REG_A; 1239 case MLX5_METADATA_RX: 1240 switch (config->dv_xmeta_en) { 1241 case MLX5_XMETA_MODE_LEGACY: 1242 return REG_B; 1243 case MLX5_XMETA_MODE_META16: 1244 return REG_C_0; 1245 case MLX5_XMETA_MODE_META32: 1246 return REG_C_1; 1247 case MLX5_XMETA_MODE_META32_HWS: 1248 return REG_C_1; 1249 } 1250 break; 1251 case MLX5_METADATA_TX: 1252 if (config->dv_flow_en == 2 && config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) { 1253 return REG_C_1; 1254 } else { 1255 return REG_A; 1256 } 1257 case MLX5_METADATA_FDB: 1258 switch (config->dv_xmeta_en) { 1259 case MLX5_XMETA_MODE_LEGACY: 1260 return REG_NON; 1261 case MLX5_XMETA_MODE_META16: 1262 return REG_C_0; 1263 case MLX5_XMETA_MODE_META32: 1264 return REG_C_1; 1265 case MLX5_XMETA_MODE_META32_HWS: 1266 return REG_C_1; 1267 } 1268 break; 1269 case MLX5_FLOW_MARK: 1270 switch (config->dv_xmeta_en) { 1271 case MLX5_XMETA_MODE_LEGACY: 1272 case MLX5_XMETA_MODE_META32_HWS: 1273 return REG_NON; 1274 case MLX5_XMETA_MODE_META16: 1275 return REG_C_1; 1276 case MLX5_XMETA_MODE_META32: 1277 return REG_C_0; 1278 } 1279 break; 1280 case MLX5_MTR_ID: 1281 /* 1282 * If meter color and meter id share one register, flow match 1283 * should use the meter color register for match. 1284 */ 1285 if (priv->mtr_reg_share) 1286 return reg->aso_reg; 1287 else 1288 return reg->aso_reg != REG_C_2 ? REG_C_2 : 1289 REG_C_3; 1290 case MLX5_MTR_COLOR: 1291 case MLX5_ASO_FLOW_HIT: 1292 case MLX5_ASO_CONNTRACK: 1293 case MLX5_SAMPLE_ID: 1294 /* All features use the same REG_C. */ 1295 MLX5_ASSERT(reg->aso_reg != REG_NON); 1296 return reg->aso_reg; 1297 case MLX5_COPY_MARK: 1298 /* 1299 * Metadata COPY_MARK register using is in meter suffix sub 1300 * flow while with meter. It's safe to share the same register. 1301 */ 1302 return reg->aso_reg != REG_C_2 ? REG_C_2 : REG_C_3; 1303 case MLX5_APP_TAG: 1304 /* 1305 * If meter is enable, it will engage the register for color 1306 * match and flow match. If meter color match is not using the 1307 * REG_C_2, need to skip the REG_C_x be used by meter color 1308 * match. 1309 * If meter is disable, free to use all available registers. 1310 */ 1311 start_reg = reg->aso_reg != REG_C_2 ? REG_C_2 : 1312 (priv->mtr_reg_share ? REG_C_3 : REG_C_4); 1313 skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2); 1314 if (id > (uint32_t)(REG_C_7 - start_reg)) 1315 return rte_flow_error_set(error, EINVAL, 1316 RTE_FLOW_ERROR_TYPE_ITEM, 1317 NULL, "invalid tag id"); 1318 if (priv->sh->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON) 1319 return rte_flow_error_set(error, ENOTSUP, 1320 RTE_FLOW_ERROR_TYPE_ITEM, 1321 NULL, "unsupported tag id"); 1322 /* 1323 * This case means meter is using the REG_C_x great than 2. 1324 * Take care not to conflict with meter color REG_C_x. 1325 * If the available index REG_C_y >= REG_C_x, skip the 1326 * color register. 1327 */ 1328 if (skip_mtr_reg && priv->sh->flow_mreg_c 1329 [id + start_reg - REG_C_0] >= reg->aso_reg) { 1330 if (id >= (uint32_t)(REG_C_7 - start_reg)) 1331 return rte_flow_error_set(error, EINVAL, 1332 RTE_FLOW_ERROR_TYPE_ITEM, 1333 NULL, "invalid tag id"); 1334 if (priv->sh->flow_mreg_c 1335 [id + 1 + start_reg - REG_C_0] != REG_NON) 1336 return priv->sh->flow_mreg_c 1337 [id + 1 + start_reg - REG_C_0]; 1338 return rte_flow_error_set(error, ENOTSUP, 1339 RTE_FLOW_ERROR_TYPE_ITEM, 1340 NULL, "unsupported tag id"); 1341 } 1342 return priv->sh->flow_mreg_c[id + start_reg - REG_C_0]; 1343 } 1344 MLX5_ASSERT(false); 1345 return rte_flow_error_set(error, EINVAL, 1346 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1347 NULL, "invalid feature name"); 1348 } 1349 1350 /** 1351 * Check extensive flow metadata register support. 1352 * 1353 * @param dev 1354 * Pointer to rte_eth_dev structure. 1355 * 1356 * @return 1357 * True if device supports extensive flow metadata register, otherwise false. 1358 */ 1359 bool 1360 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) 1361 { 1362 struct mlx5_priv *priv = dev->data->dev_private; 1363 1364 /* 1365 * Having available reg_c can be regarded inclusively as supporting 1366 * extensive flow metadata register, which could mean, 1367 * - metadata register copy action by modify header. 1368 * - 16 modify header actions is supported. 1369 * - reg_c's are preserved across different domain (FDB and NIC) on 1370 * packet loopback by flow lookup miss. 1371 */ 1372 return priv->sh->flow_mreg_c[2] != REG_NON; 1373 } 1374 1375 /** 1376 * Get the lowest priority. 1377 * 1378 * @param[in] dev 1379 * Pointer to the Ethernet device structure. 1380 * @param[in] attributes 1381 * Pointer to device flow rule attributes. 1382 * 1383 * @return 1384 * The value of lowest priority of flow. 1385 */ 1386 uint32_t 1387 mlx5_get_lowest_priority(struct rte_eth_dev *dev, 1388 const struct rte_flow_attr *attr) 1389 { 1390 struct mlx5_priv *priv = dev->data->dev_private; 1391 1392 if (!attr->group && !(attr->transfer && priv->fdb_def_rule)) 1393 return priv->sh->flow_max_priority - 2; 1394 return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1; 1395 } 1396 1397 /** 1398 * Calculate matcher priority of the flow. 1399 * 1400 * @param[in] dev 1401 * Pointer to the Ethernet device structure. 1402 * @param[in] attr 1403 * Pointer to device flow rule attributes. 1404 * @param[in] subpriority 1405 * The priority based on the items. 1406 * @param[in] external 1407 * Flow is user flow. 1408 * @return 1409 * The matcher priority of the flow. 1410 */ 1411 uint16_t 1412 mlx5_get_matcher_priority(struct rte_eth_dev *dev, 1413 const struct rte_flow_attr *attr, 1414 uint32_t subpriority, bool external) 1415 { 1416 uint16_t priority = (uint16_t)attr->priority; 1417 struct mlx5_priv *priv = dev->data->dev_private; 1418 1419 /* NIC root rules */ 1420 if (!attr->group && !attr->transfer) { 1421 if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) 1422 priority = priv->sh->flow_max_priority - 1; 1423 return mlx5_os_flow_adjust_priority(dev, priority, subpriority); 1424 /* FDB root rules */ 1425 } else if (attr->transfer && (!external || !priv->fdb_def_rule) && 1426 attr->group == 0 && 1427 attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) { 1428 return (priv->sh->flow_max_priority - 1) * 3; 1429 } 1430 if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) 1431 priority = MLX5_NON_ROOT_FLOW_MAX_PRIO; 1432 return priority * 3 + subpriority; 1433 } 1434 1435 /** 1436 * Verify the @p item specifications (spec, last, mask) are compatible with the 1437 * NIC capabilities. 1438 * 1439 * @param[in] item 1440 * Item specification. 1441 * @param[in] mask 1442 * @p item->mask or flow default bit-masks. 1443 * @param[in] nic_mask 1444 * Bit-masks covering supported fields by the NIC to compare with user mask. 1445 * @param[in] size 1446 * Bit-masks size in bytes. 1447 * @param[in] range_accepted 1448 * True if range of values is accepted for specific fields, false otherwise. 1449 * @param[out] error 1450 * Pointer to error structure. 1451 * 1452 * @return 1453 * 0 on success, a negative errno value otherwise and rte_errno is set. 1454 */ 1455 int 1456 mlx5_flow_item_acceptable(const struct rte_eth_dev *dev, 1457 const struct rte_flow_item *item, 1458 const uint8_t *mask, 1459 const uint8_t *nic_mask, 1460 unsigned int size, 1461 bool range_accepted, 1462 struct rte_flow_error *error) 1463 { 1464 unsigned int i; 1465 1466 MLX5_ASSERT(nic_mask); 1467 for (i = 0; i < size; ++i) 1468 if ((nic_mask[i] | mask[i]) != nic_mask[i]) 1469 return rte_flow_error_set(error, ENOTSUP, 1470 RTE_FLOW_ERROR_TYPE_ITEM, 1471 item, 1472 "mask enables non supported" 1473 " bits"); 1474 if (mlx5_hws_active(dev)) 1475 return 0; 1476 if (!item->spec && (item->mask || item->last)) 1477 return rte_flow_error_set(error, EINVAL, 1478 RTE_FLOW_ERROR_TYPE_ITEM, item, 1479 "mask/last without a spec is not" 1480 " supported"); 1481 if (item->spec && item->last && !range_accepted) { 1482 uint8_t spec[size]; 1483 uint8_t last[size]; 1484 unsigned int i; 1485 int ret; 1486 1487 for (i = 0; i < size; ++i) { 1488 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; 1489 last[i] = ((const uint8_t *)item->last)[i] & mask[i]; 1490 } 1491 ret = memcmp(spec, last, size); 1492 if (ret != 0) 1493 return rte_flow_error_set(error, EINVAL, 1494 RTE_FLOW_ERROR_TYPE_ITEM, 1495 item, 1496 "range is not valid"); 1497 } 1498 return 0; 1499 } 1500 1501 /** 1502 * Adjust the hash fields according to the @p flow information. 1503 * 1504 * @param[in] dev_flow. 1505 * Pointer to the mlx5_flow. 1506 * @param[in] tunnel 1507 * 1 when the hash field is for a tunnel item. 1508 * @param[in] layer_types 1509 * RTE_ETH_RSS_* types. 1510 * @param[in] hash_fields 1511 * Item hash fields. 1512 * 1513 * @return 1514 * The hash fields that should be used. 1515 */ 1516 uint64_t 1517 mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc, 1518 int tunnel __rte_unused, uint64_t layer_types, 1519 uint64_t hash_fields) 1520 { 1521 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 1522 int rss_request_inner = rss_desc->level >= 2; 1523 1524 /* Check RSS hash level for tunnel. */ 1525 if (tunnel && rss_request_inner) 1526 hash_fields |= IBV_RX_HASH_INNER; 1527 else if (tunnel || rss_request_inner) 1528 return 0; 1529 #endif 1530 /* Check if requested layer matches RSS hash fields. */ 1531 if (!(rss_desc->types & layer_types)) 1532 return 0; 1533 return hash_fields; 1534 } 1535 1536 /** 1537 * Lookup and set the ptype in the data Rx part. A single Ptype can be used, 1538 * if several tunnel rules are used on this queue, the tunnel ptype will be 1539 * cleared. 1540 * 1541 * @param rxq_ctrl 1542 * Rx queue to update. 1543 */ 1544 static void 1545 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) 1546 { 1547 unsigned int i; 1548 uint32_t tunnel_ptype = 0; 1549 1550 /* Look up for the ptype to use. */ 1551 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { 1552 if (!rxq_ctrl->flow_tunnels_n[i]) 1553 continue; 1554 if (!tunnel_ptype) { 1555 tunnel_ptype = tunnels_info[i].ptype; 1556 } else { 1557 tunnel_ptype = 0; 1558 break; 1559 } 1560 } 1561 rxq_ctrl->rxq.tunnel = tunnel_ptype; 1562 } 1563 1564 /** 1565 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device 1566 * flow. 1567 * 1568 * @param[in] dev 1569 * Pointer to the Ethernet device structure. 1570 * @param[in] dev_handle 1571 * Pointer to device flow handle structure. 1572 */ 1573 void 1574 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, 1575 struct mlx5_flow_handle *dev_handle) 1576 { 1577 struct mlx5_priv *priv = dev->data->dev_private; 1578 const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); 1579 struct mlx5_ind_table_obj *ind_tbl = NULL; 1580 unsigned int i; 1581 1582 if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) { 1583 struct mlx5_hrxq *hrxq; 1584 1585 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], 1586 dev_handle->rix_hrxq); 1587 if (hrxq) 1588 ind_tbl = hrxq->ind_table; 1589 } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { 1590 struct mlx5_shared_action_rss *shared_rss; 1591 1592 shared_rss = mlx5_ipool_get 1593 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 1594 dev_handle->rix_srss); 1595 if (shared_rss) 1596 ind_tbl = shared_rss->ind_tbl; 1597 } 1598 if (!ind_tbl) 1599 return; 1600 for (i = 0; i != ind_tbl->queues_n; ++i) { 1601 int idx = ind_tbl->queues[i]; 1602 struct mlx5_rxq_ctrl *rxq_ctrl; 1603 1604 if (mlx5_is_external_rxq(dev, idx)) 1605 continue; 1606 rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx); 1607 MLX5_ASSERT(rxq_ctrl != NULL); 1608 if (rxq_ctrl == NULL) 1609 continue; 1610 /* 1611 * To support metadata register copy on Tx loopback, 1612 * this must be always enabled (metadata may arive 1613 * from other port - not from local flows only. 1614 */ 1615 if (tunnel) { 1616 unsigned int j; 1617 1618 /* Increase the counter matching the flow. */ 1619 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 1620 if ((tunnels_info[j].tunnel & 1621 dev_handle->layers) == 1622 tunnels_info[j].tunnel) { 1623 rxq_ctrl->flow_tunnels_n[j]++; 1624 break; 1625 } 1626 } 1627 flow_rxq_tunnel_ptype_update(rxq_ctrl); 1628 } 1629 } 1630 } 1631 1632 static void 1633 flow_rxq_mark_flag_set(struct rte_eth_dev *dev) 1634 { 1635 struct mlx5_priv *priv = dev->data->dev_private; 1636 struct mlx5_rxq_ctrl *rxq_ctrl; 1637 uint16_t port_id; 1638 1639 if (priv->sh->shared_mark_enabled) 1640 return; 1641 if (priv->master || priv->representor) { 1642 MLX5_ETH_FOREACH_DEV(port_id, dev->device) { 1643 struct mlx5_priv *opriv = 1644 rte_eth_devices[port_id].data->dev_private; 1645 1646 if (!opriv || 1647 opriv->sh != priv->sh || 1648 opriv->domain_id != priv->domain_id || 1649 opriv->mark_enabled) 1650 continue; 1651 LIST_FOREACH(rxq_ctrl, &opriv->rxqsctrl, next) { 1652 rxq_ctrl->rxq.mark = 1; 1653 } 1654 opriv->mark_enabled = 1; 1655 } 1656 } else { 1657 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { 1658 rxq_ctrl->rxq.mark = 1; 1659 } 1660 priv->mark_enabled = 1; 1661 } 1662 priv->sh->shared_mark_enabled = 1; 1663 } 1664 1665 /** 1666 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow 1667 * 1668 * @param[in] dev 1669 * Pointer to the Ethernet device structure. 1670 * @param[in] flow 1671 * Pointer to flow structure. 1672 */ 1673 static void 1674 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) 1675 { 1676 struct mlx5_priv *priv = dev->data->dev_private; 1677 uint32_t handle_idx; 1678 struct mlx5_flow_handle *dev_handle; 1679 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 1680 1681 MLX5_ASSERT(wks); 1682 if (wks->mark) 1683 flow_rxq_mark_flag_set(dev); 1684 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 1685 handle_idx, dev_handle, next) 1686 flow_drv_rxq_flags_set(dev, dev_handle); 1687 } 1688 1689 /** 1690 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 1691 * device flow if no other flow uses it with the same kind of request. 1692 * 1693 * @param dev 1694 * Pointer to Ethernet device. 1695 * @param[in] dev_handle 1696 * Pointer to the device flow handle structure. 1697 */ 1698 static void 1699 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, 1700 struct mlx5_flow_handle *dev_handle) 1701 { 1702 struct mlx5_priv *priv = dev->data->dev_private; 1703 const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); 1704 struct mlx5_ind_table_obj *ind_tbl = NULL; 1705 unsigned int i; 1706 1707 if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) { 1708 struct mlx5_hrxq *hrxq; 1709 1710 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], 1711 dev_handle->rix_hrxq); 1712 if (hrxq) 1713 ind_tbl = hrxq->ind_table; 1714 } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { 1715 struct mlx5_shared_action_rss *shared_rss; 1716 1717 shared_rss = mlx5_ipool_get 1718 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 1719 dev_handle->rix_srss); 1720 if (shared_rss) 1721 ind_tbl = shared_rss->ind_tbl; 1722 } 1723 if (!ind_tbl) 1724 return; 1725 MLX5_ASSERT(dev->data->dev_started); 1726 for (i = 0; i != ind_tbl->queues_n; ++i) { 1727 int idx = ind_tbl->queues[i]; 1728 struct mlx5_rxq_ctrl *rxq_ctrl; 1729 1730 if (mlx5_is_external_rxq(dev, idx)) 1731 continue; 1732 rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx); 1733 MLX5_ASSERT(rxq_ctrl != NULL); 1734 if (rxq_ctrl == NULL) 1735 continue; 1736 if (tunnel) { 1737 unsigned int j; 1738 1739 /* Decrease the counter matching the flow. */ 1740 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 1741 if ((tunnels_info[j].tunnel & 1742 dev_handle->layers) == 1743 tunnels_info[j].tunnel) { 1744 rxq_ctrl->flow_tunnels_n[j]--; 1745 break; 1746 } 1747 } 1748 flow_rxq_tunnel_ptype_update(rxq_ctrl); 1749 } 1750 } 1751 } 1752 1753 /** 1754 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 1755 * @p flow if no other flow uses it with the same kind of request. 1756 * 1757 * @param dev 1758 * Pointer to Ethernet device. 1759 * @param[in] flow 1760 * Pointer to the flow. 1761 */ 1762 static void 1763 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) 1764 { 1765 struct mlx5_priv *priv = dev->data->dev_private; 1766 uint32_t handle_idx; 1767 struct mlx5_flow_handle *dev_handle; 1768 1769 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 1770 handle_idx, dev_handle, next) 1771 flow_drv_rxq_flags_trim(dev, dev_handle); 1772 } 1773 1774 /** 1775 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. 1776 * 1777 * @param dev 1778 * Pointer to Ethernet device. 1779 */ 1780 static void 1781 flow_rxq_flags_clear(struct rte_eth_dev *dev) 1782 { 1783 struct mlx5_priv *priv = dev->data->dev_private; 1784 unsigned int i; 1785 1786 for (i = 0; i != priv->rxqs_n; ++i) { 1787 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i); 1788 unsigned int j; 1789 1790 if (rxq == NULL || rxq->ctrl == NULL) 1791 continue; 1792 rxq->ctrl->rxq.mark = 0; 1793 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) 1794 rxq->ctrl->flow_tunnels_n[j] = 0; 1795 rxq->ctrl->rxq.tunnel = 0; 1796 } 1797 priv->mark_enabled = 0; 1798 priv->sh->shared_mark_enabled = 0; 1799 } 1800 1801 static uint64_t mlx5_restore_info_dynflag; 1802 1803 int 1804 mlx5_flow_rx_metadata_negotiate(struct rte_eth_dev *dev, uint64_t *features) 1805 { 1806 struct mlx5_priv *priv = dev->data->dev_private; 1807 uint64_t supported = 0; 1808 1809 if (!is_tunnel_offload_active(dev)) { 1810 supported |= RTE_ETH_RX_METADATA_USER_FLAG; 1811 supported |= RTE_ETH_RX_METADATA_USER_MARK; 1812 if ((*features & RTE_ETH_RX_METADATA_TUNNEL_ID) != 0) { 1813 DRV_LOG(DEBUG, 1814 "tunnel offload was not activated, consider setting dv_xmeta_en=%d", 1815 MLX5_XMETA_MODE_MISS_INFO); 1816 } 1817 } else { 1818 supported |= RTE_ETH_RX_METADATA_TUNNEL_ID; 1819 if ((*features & RTE_ETH_RX_METADATA_TUNNEL_ID) != 0 && 1820 mlx5_restore_info_dynflag == 0) 1821 mlx5_restore_info_dynflag = rte_flow_restore_info_dynflag(); 1822 } 1823 1824 if (((*features & supported) & RTE_ETH_RX_METADATA_TUNNEL_ID) != 0) 1825 priv->tunnel_enabled = 1; 1826 else 1827 priv->tunnel_enabled = 0; 1828 1829 *features &= supported; 1830 return 0; 1831 } 1832 1833 /** 1834 * Set the Rx queue dynamic metadata (mask and offset) for a flow 1835 * 1836 * @param[in] dev 1837 * Pointer to the Ethernet device structure. 1838 */ 1839 void 1840 mlx5_flow_rxq_dynf_set(struct rte_eth_dev *dev) 1841 { 1842 struct mlx5_priv *priv = dev->data->dev_private; 1843 uint64_t mark_flag = RTE_MBUF_F_RX_FDIR_ID; 1844 unsigned int i; 1845 1846 if (priv->tunnel_enabled) 1847 mark_flag |= mlx5_restore_info_dynflag; 1848 1849 for (i = 0; i != priv->rxqs_n; ++i) { 1850 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i); 1851 struct mlx5_rxq_data *data; 1852 1853 if (rxq == NULL || rxq->ctrl == NULL) 1854 continue; 1855 data = &rxq->ctrl->rxq; 1856 if (!data->shared || !rxq->ctrl->started) { 1857 if (!rte_flow_dynf_metadata_avail()) { 1858 data->dynf_meta = 0; 1859 data->flow_meta_mask = 0; 1860 data->flow_meta_offset = -1; 1861 data->flow_meta_port_mask = 0; 1862 } else { 1863 data->dynf_meta = 1; 1864 data->flow_meta_mask = rte_flow_dynf_metadata_mask; 1865 data->flow_meta_offset = rte_flow_dynf_metadata_offs; 1866 data->flow_meta_port_mask = priv->sh->dv_meta_mask; 1867 } 1868 data->mark_flag = mark_flag; 1869 } 1870 } 1871 } 1872 1873 /* 1874 * return a pointer to the desired action in the list of actions. 1875 * 1876 * @param[in] actions 1877 * The list of actions to search the action in. 1878 * @param[in] action 1879 * The action to find. 1880 * 1881 * @return 1882 * Pointer to the action in the list, if found. NULL otherwise. 1883 */ 1884 const struct rte_flow_action * 1885 mlx5_flow_find_action(const struct rte_flow_action *actions, 1886 enum rte_flow_action_type action) 1887 { 1888 if (actions == NULL) 1889 return NULL; 1890 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) 1891 if (actions->type == action) 1892 return actions; 1893 return NULL; 1894 } 1895 1896 /* 1897 * Validate the flag action. 1898 * 1899 * @param[in] action_flags 1900 * Bit-fields that holds the actions detected until now. 1901 * @param[in] attr 1902 * Attributes of flow that includes this action. 1903 * @param[out] error 1904 * Pointer to error structure. 1905 * 1906 * @return 1907 * 0 on success, a negative errno value otherwise and rte_errno is set. 1908 */ 1909 int 1910 mlx5_flow_validate_action_flag(uint64_t action_flags, 1911 const struct rte_flow_attr *attr, 1912 struct rte_flow_error *error) 1913 { 1914 if (action_flags & MLX5_FLOW_ACTION_MARK) 1915 return rte_flow_error_set(error, EINVAL, 1916 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1917 "can't mark and flag in same flow"); 1918 if (action_flags & MLX5_FLOW_ACTION_FLAG) 1919 return rte_flow_error_set(error, EINVAL, 1920 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1921 "can't have 2 flag" 1922 " actions in same flow"); 1923 if (attr->egress) 1924 return rte_flow_error_set(error, ENOTSUP, 1925 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1926 "flag action not supported for " 1927 "egress"); 1928 return 0; 1929 } 1930 1931 /* 1932 * Validate the mark action. 1933 * 1934 * @param[in] action 1935 * Pointer to the queue action. 1936 * @param[in] action_flags 1937 * Bit-fields that holds the actions detected until now. 1938 * @param[in] attr 1939 * Attributes of flow that includes this action. 1940 * @param[out] error 1941 * Pointer to error structure. 1942 * 1943 * @return 1944 * 0 on success, a negative errno value otherwise and rte_errno is set. 1945 */ 1946 int 1947 mlx5_flow_validate_action_mark(struct rte_eth_dev *dev, 1948 const struct rte_flow_action *action, 1949 uint64_t action_flags, 1950 const struct rte_flow_attr *attr, 1951 struct rte_flow_error *error) 1952 { 1953 const struct rte_flow_action_mark *mark = action->conf; 1954 1955 if (!mark) 1956 return rte_flow_error_set(error, EINVAL, 1957 RTE_FLOW_ERROR_TYPE_ACTION, 1958 action, 1959 "configuration cannot be null"); 1960 if (mark->id >= MLX5_FLOW_MARK_MAX) 1961 return rte_flow_error_set(error, EINVAL, 1962 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1963 &mark->id, 1964 "mark id must in 0 <= id < " 1965 RTE_STR(MLX5_FLOW_MARK_MAX)); 1966 if (action_flags & MLX5_FLOW_ACTION_FLAG) 1967 return rte_flow_error_set(error, EINVAL, 1968 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1969 "can't flag and mark in same flow"); 1970 if (action_flags & MLX5_FLOW_ACTION_MARK) 1971 return rte_flow_error_set(error, EINVAL, 1972 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1973 "can't have 2 mark actions in same" 1974 " flow"); 1975 if (attr->egress) 1976 return rte_flow_error_set(error, ENOTSUP, 1977 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1978 "mark action not supported for " 1979 "egress"); 1980 if (attr->transfer && mlx5_hws_active(dev)) 1981 return rte_flow_error_set(error, ENOTSUP, 1982 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1983 "non-template mark action not supported for transfer"); 1984 return 0; 1985 } 1986 1987 /* 1988 * Validate the drop action. 1989 * 1990 * @param[in] dev 1991 * Pointer to the Ethernet device structure. 1992 * @param[in] is_root 1993 * True if flow is validated for root table. False otherwise. 1994 * @param[in] attr 1995 * Attributes of flow that includes this action. 1996 * @param[out] error 1997 * Pointer to error structure. 1998 * 1999 * @return 2000 * 0 on success, a negative errno value otherwise and rte_errno is set. 2001 */ 2002 int 2003 mlx5_flow_validate_action_drop(struct rte_eth_dev *dev, 2004 bool is_root, 2005 const struct rte_flow_attr *attr, 2006 struct rte_flow_error *error) 2007 { 2008 struct mlx5_priv *priv = dev->data->dev_private; 2009 2010 if (priv->sh->config.dv_flow_en == 0 && attr->egress) 2011 return rte_flow_error_set(error, ENOTSUP, 2012 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 2013 "drop action not supported for " 2014 "egress"); 2015 if (priv->sh->config.dv_flow_en == 1 && is_root && (attr->egress || attr->transfer) && 2016 !priv->sh->dr_root_drop_action_en) { 2017 return rte_flow_error_set(error, ENOTSUP, 2018 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 2019 "drop action not supported for " 2020 "egress and transfer on group 0"); 2021 } 2022 return 0; 2023 } 2024 2025 /* 2026 * Check if a queue specified in the queue action is valid. 2027 * 2028 * @param[in] dev 2029 * Pointer to the Ethernet device structure. 2030 * @param[in] action 2031 * Pointer to the queue action. 2032 * @param[out] error 2033 * Pointer to error structure. 2034 * 2035 * @return 2036 * 0 on success, a negative errno value otherwise and rte_errno is set. 2037 */ 2038 int 2039 mlx5_flow_validate_target_queue(struct rte_eth_dev *dev, 2040 const struct rte_flow_action *action, 2041 struct rte_flow_error *error) 2042 { 2043 const struct rte_flow_action_queue *queue = action->conf; 2044 struct mlx5_priv *priv = dev->data->dev_private; 2045 2046 if (mlx5_is_external_rxq(dev, queue->index)) 2047 return 0; 2048 if (!priv->rxqs_n) 2049 return rte_flow_error_set(error, EINVAL, 2050 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2051 NULL, "No Rx queues configured"); 2052 if (queue->index >= priv->rxqs_n) 2053 return rte_flow_error_set(error, EINVAL, 2054 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2055 &queue->index, 2056 "queue index out of range"); 2057 if (mlx5_rxq_get(dev, queue->index) == NULL) 2058 return rte_flow_error_set(error, EINVAL, 2059 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2060 &queue->index, 2061 "queue is not configured"); 2062 return 0; 2063 } 2064 2065 /* 2066 * Validate the queue action. 2067 * 2068 * @param[in] action 2069 * Pointer to the queue action. 2070 * @param[in] action_flags 2071 * Bit-fields that holds the actions detected until now. 2072 * @param[in] dev 2073 * Pointer to the Ethernet device structure. 2074 * @param[in] attr 2075 * Attributes of flow that includes this action. 2076 * @param[out] error 2077 * Pointer to error structure. 2078 * 2079 * @return 2080 * 0 on success, a negative errno value otherwise and rte_errno is set. 2081 */ 2082 int 2083 mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 2084 uint64_t action_flags, 2085 struct rte_eth_dev *dev, 2086 const struct rte_flow_attr *attr, 2087 struct rte_flow_error *error) 2088 { 2089 const struct rte_flow_action_queue *queue = action->conf; 2090 2091 if (!queue) 2092 return rte_flow_error_set(error, EINVAL, 2093 RTE_FLOW_ERROR_TYPE_ACTION, action, 2094 "no QUEUE action configuration"); 2095 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 2096 return rte_flow_error_set(error, EINVAL, 2097 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2098 "can't have 2 fate actions in" 2099 " same flow"); 2100 if (attr->egress) 2101 return rte_flow_error_set(error, ENOTSUP, 2102 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 2103 "queue action not supported for egress."); 2104 return mlx5_flow_validate_target_queue(dev, action, error); 2105 } 2106 2107 /** 2108 * Validate queue numbers for device RSS. 2109 * 2110 * @param[in] dev 2111 * Configured device. 2112 * @param[in] queues 2113 * Array of queue numbers. 2114 * @param[in] queues_n 2115 * Size of the @p queues array. 2116 * @param[out] error 2117 * On error, filled with a textual error description. 2118 * @param[out] queue_idx 2119 * On error, filled with an offending queue index in @p queues array. 2120 * 2121 * @return 2122 * 0 on success, a negative errno code on error. 2123 */ 2124 static int 2125 mlx5_validate_rss_queues(struct rte_eth_dev *dev, 2126 const uint16_t *queues, uint32_t queues_n, 2127 const char **error, uint32_t *queue_idx) 2128 { 2129 const struct mlx5_priv *priv = dev->data->dev_private; 2130 bool is_hairpin = false; 2131 bool is_ext_rss = false; 2132 uint32_t i; 2133 2134 for (i = 0; i != queues_n; ++i) { 2135 struct mlx5_rxq_ctrl *rxq_ctrl; 2136 2137 if (mlx5_is_external_rxq(dev, queues[0])) { 2138 is_ext_rss = true; 2139 continue; 2140 } 2141 if (is_ext_rss) { 2142 *error = "Combining external and regular RSS queues is not supported"; 2143 *queue_idx = i; 2144 return -ENOTSUP; 2145 } 2146 if (queues[i] >= priv->rxqs_n) { 2147 *error = "queue index out of range"; 2148 *queue_idx = i; 2149 return -EINVAL; 2150 } 2151 rxq_ctrl = mlx5_rxq_ctrl_get(dev, queues[i]); 2152 if (rxq_ctrl == NULL) { 2153 *error = "queue is not configured"; 2154 *queue_idx = i; 2155 return -EINVAL; 2156 } 2157 if (i == 0 && rxq_ctrl->is_hairpin) 2158 is_hairpin = true; 2159 if (is_hairpin != rxq_ctrl->is_hairpin) { 2160 *error = "combining hairpin and regular RSS queues is not supported"; 2161 *queue_idx = i; 2162 return -ENOTSUP; 2163 } 2164 } 2165 return 0; 2166 } 2167 2168 /* 2169 * Validate the rss action. 2170 * 2171 * @param[in] dev 2172 * Pointer to the Ethernet device structure. 2173 * @param[in] action 2174 * Pointer to the queue action. 2175 * @param[out] error 2176 * Pointer to error structure. 2177 * 2178 * @return 2179 * 0 on success, a negative errno value otherwise and rte_errno is set. 2180 */ 2181 int 2182 mlx5_validate_action_rss(struct rte_eth_dev *dev, 2183 const struct rte_flow_action *action, 2184 struct rte_flow_error *error) 2185 { 2186 struct mlx5_priv *priv = dev->data->dev_private; 2187 const struct rte_flow_action_rss *rss = action->conf; 2188 int ret; 2189 const char *message; 2190 uint32_t queue_idx; 2191 2192 if (!rss) 2193 return rte_flow_error_set 2194 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, 2195 action, "no RSS action configuration"); 2196 if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) { 2197 DRV_LOG(WARNING, "port %u symmetric RSS supported with SORT", 2198 dev->data->port_id); 2199 } else if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && 2200 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) 2201 return rte_flow_error_set(error, ENOTSUP, 2202 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2203 &rss->func, 2204 "RSS hash function not supported"); 2205 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 2206 if (rss->level > 2) 2207 #else 2208 if (rss->level > 1) 2209 #endif 2210 return rte_flow_error_set(error, ENOTSUP, 2211 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2212 &rss->level, 2213 "tunnel RSS is not supported"); 2214 /* allow RSS key_len 0 in case of NULL (default) RSS key. */ 2215 if (rss->key_len == 0 && rss->key != NULL) 2216 return rte_flow_error_set(error, ENOTSUP, 2217 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2218 &rss->key_len, 2219 "RSS hash key length 0"); 2220 if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) 2221 return rte_flow_error_set(error, ENOTSUP, 2222 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2223 &rss->key_len, 2224 "RSS hash key too small"); 2225 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) 2226 return rte_flow_error_set(error, ENOTSUP, 2227 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2228 &rss->key_len, 2229 "RSS hash key too large"); 2230 if (rss->queue_num > priv->sh->dev_cap.ind_table_max_size) 2231 return rte_flow_error_set(error, ENOTSUP, 2232 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2233 &rss->queue_num, 2234 "number of queues too large"); 2235 if (rss->types & MLX5_RSS_HF_MASK) 2236 return rte_flow_error_set(error, ENOTSUP, 2237 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2238 &rss->types, 2239 "some RSS protocols are not" 2240 " supported"); 2241 if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) && 2242 !(rss->types & RTE_ETH_RSS_IP)) 2243 return rte_flow_error_set(error, EINVAL, 2244 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 2245 "L3 partial RSS requested but L3 RSS" 2246 " type not specified"); 2247 if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) && 2248 !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP))) 2249 return rte_flow_error_set(error, EINVAL, 2250 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 2251 "L4 partial RSS requested but L4 RSS" 2252 " type not specified"); 2253 if (!priv->rxqs_n && priv->ext_rxqs == NULL) 2254 return rte_flow_error_set(error, EINVAL, 2255 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2256 NULL, "No Rx queues configured"); 2257 if (!rss->queue_num) 2258 return rte_flow_error_set(error, EINVAL, 2259 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2260 NULL, "No queues configured"); 2261 ret = mlx5_validate_rss_queues(dev, rss->queue, rss->queue_num, 2262 &message, &queue_idx); 2263 if (ret != 0) { 2264 return rte_flow_error_set(error, -ret, 2265 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2266 &rss->queue[queue_idx], message); 2267 } 2268 return 0; 2269 } 2270 2271 /* 2272 * Validate the rss action. 2273 * 2274 * @param[in] action 2275 * Pointer to the queue action. 2276 * @param[in] action_flags 2277 * Bit-fields that holds the actions detected until now. 2278 * @param[in] dev 2279 * Pointer to the Ethernet device structure. 2280 * @param[in] attr 2281 * Attributes of flow that includes this action. 2282 * @param[in] item_flags 2283 * Items that were detected. 2284 * @param[out] error 2285 * Pointer to error structure. 2286 * 2287 * @return 2288 * 0 on success, a negative errno value otherwise and rte_errno is set. 2289 */ 2290 int 2291 mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 2292 uint64_t action_flags, 2293 struct rte_eth_dev *dev, 2294 const struct rte_flow_attr *attr, 2295 uint64_t item_flags, 2296 struct rte_flow_error *error) 2297 { 2298 const struct rte_flow_action_rss *rss = action->conf; 2299 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2300 int ret; 2301 2302 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 2303 return rte_flow_error_set(error, EINVAL, 2304 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2305 "can't have 2 fate actions" 2306 " in same flow"); 2307 ret = mlx5_validate_action_rss(dev, action, error); 2308 if (ret) 2309 return ret; 2310 if (attr->egress) 2311 return rte_flow_error_set(error, ENOTSUP, 2312 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 2313 "rss action not supported for " 2314 "egress"); 2315 if (rss->level > 1 && !tunnel) 2316 return rte_flow_error_set(error, EINVAL, 2317 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 2318 "inner RSS is not supported for " 2319 "non-tunnel flows"); 2320 if ((item_flags & MLX5_FLOW_LAYER_ECPRI) && 2321 !(item_flags & MLX5_FLOW_LAYER_INNER_L4_UDP)) { 2322 return rte_flow_error_set(error, EINVAL, 2323 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 2324 "RSS on eCPRI is not supported now"); 2325 } 2326 if ((item_flags & MLX5_FLOW_LAYER_MPLS) && 2327 !(item_flags & 2328 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3)) && 2329 rss->level > 1) 2330 return rte_flow_error_set(error, EINVAL, 2331 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 2332 "MPLS inner RSS needs to specify inner L2/L3 items after MPLS in pattern"); 2333 return 0; 2334 } 2335 2336 /* 2337 * Validate the default miss action. 2338 * 2339 * @param[in] action_flags 2340 * Bit-fields that holds the actions detected until now. 2341 * @param[out] error 2342 * Pointer to error structure. 2343 * 2344 * @return 2345 * 0 on success, a negative errno value otherwise and rte_errno is set. 2346 */ 2347 int 2348 mlx5_flow_validate_action_default_miss(uint64_t action_flags, 2349 const struct rte_flow_attr *attr, 2350 struct rte_flow_error *error) 2351 { 2352 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 2353 return rte_flow_error_set(error, EINVAL, 2354 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2355 "can't have 2 fate actions in" 2356 " same flow"); 2357 if (attr->egress) 2358 return rte_flow_error_set(error, ENOTSUP, 2359 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 2360 "default miss action not supported " 2361 "for egress"); 2362 if (attr->group) 2363 return rte_flow_error_set(error, ENOTSUP, 2364 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, 2365 "only group 0 is supported"); 2366 if (attr->transfer) 2367 return rte_flow_error_set(error, ENOTSUP, 2368 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 2369 NULL, "transfer is not supported"); 2370 return 0; 2371 } 2372 2373 /* 2374 * Validate the count action. 2375 * 2376 * @param[in] dev 2377 * Pointer to the Ethernet device structure. 2378 * @param[in] attr 2379 * Attributes of flow that includes this action. 2380 * @param[out] error 2381 * Pointer to error structure. 2382 * 2383 * @return 2384 * 0 on success, a negative errno value otherwise and rte_errno is set. 2385 */ 2386 int 2387 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, 2388 const struct rte_flow_attr *attr, 2389 struct rte_flow_error *error) 2390 { 2391 if (attr->egress) 2392 return rte_flow_error_set(error, ENOTSUP, 2393 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 2394 "count action not supported for " 2395 "egress"); 2396 return 0; 2397 } 2398 2399 /* 2400 * Validate the ASO CT action. 2401 * 2402 * @param[in] dev 2403 * Pointer to the Ethernet device structure. 2404 * @param[in] conntrack 2405 * Pointer to the CT action profile. 2406 * @param[out] error 2407 * Pointer to error structure. 2408 * 2409 * @return 2410 * 0 on success, a negative errno value otherwise and rte_errno is set. 2411 */ 2412 int 2413 mlx5_validate_action_ct(struct rte_eth_dev *dev, 2414 const struct rte_flow_action_conntrack *conntrack, 2415 struct rte_flow_error *error) 2416 { 2417 RTE_SET_USED(dev); 2418 2419 if (conntrack->state > RTE_FLOW_CONNTRACK_STATE_TIME_WAIT) 2420 return rte_flow_error_set(error, EINVAL, 2421 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2422 "Invalid CT state"); 2423 if (conntrack->last_index > RTE_FLOW_CONNTRACK_FLAG_RST) 2424 return rte_flow_error_set(error, EINVAL, 2425 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2426 "Invalid last TCP packet flag"); 2427 return 0; 2428 } 2429 2430 /** 2431 * Validate the level value for modify field action. 2432 * 2433 * @param[in] data 2434 * Pointer to the rte_flow_field_data structure either src or dst. 2435 * @param[out] error 2436 * Pointer to error structure. 2437 * 2438 * @return 2439 * 0 on success, a negative errno value otherwise and rte_errno is set. 2440 */ 2441 int 2442 flow_validate_modify_field_level(const struct rte_flow_field_data *data, 2443 struct rte_flow_error *error) 2444 { 2445 if (data->level == 0 || data->field == RTE_FLOW_FIELD_FLEX_ITEM) 2446 return 0; 2447 if (data->field != RTE_FLOW_FIELD_TAG && 2448 data->field != (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) { 2449 if (data->level > 1) 2450 return rte_flow_error_set(error, ENOTSUP, 2451 RTE_FLOW_ERROR_TYPE_ACTION, 2452 NULL, 2453 "inner header fields modification is not supported"); 2454 return 0; 2455 } 2456 if (data->tag_index != 0) 2457 return rte_flow_error_set(error, EINVAL, 2458 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2459 "tag array can be provided using 'level' or 'tag_index' fields, not both"); 2460 /* 2461 * The tag array for RTE_FLOW_FIELD_TAG type is provided using 2462 * 'tag_index' field. In old API, it was provided using 'level' field 2463 * and it is still supported for backwards compatibility. 2464 */ 2465 DRV_LOG(DEBUG, "tag array provided in 'level' field instead of 'tag_index' field."); 2466 return 0; 2467 } 2468 2469 /** 2470 * Validate ICMP6 item. 2471 * 2472 * @param[in] item 2473 * Item specification. 2474 * @param[in] item_flags 2475 * Bit-fields that holds the items detected until now. 2476 * @param[in] ext_vlan_sup 2477 * Whether extended VLAN features are supported or not. 2478 * @param[out] error 2479 * Pointer to error structure. 2480 * 2481 * @return 2482 * 0 on success, a negative errno value otherwise and rte_errno is set. 2483 */ 2484 int 2485 mlx5_flow_validate_item_icmp6(const struct rte_eth_dev *dev, 2486 const struct rte_flow_item *item, 2487 uint64_t item_flags, 2488 uint8_t target_protocol, 2489 struct rte_flow_error *error) 2490 { 2491 const struct rte_flow_item_icmp6 *mask = item->mask; 2492 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2493 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 2494 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 2495 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 2496 MLX5_FLOW_LAYER_OUTER_L4; 2497 int ret; 2498 2499 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) 2500 return rte_flow_error_set(error, EINVAL, 2501 RTE_FLOW_ERROR_TYPE_ITEM, item, 2502 "protocol filtering not compatible" 2503 " with ICMP6 layer"); 2504 if (!mlx5_hws_active(dev)) { 2505 if (!(item_flags & l3m)) 2506 return rte_flow_error_set(error, EINVAL, 2507 RTE_FLOW_ERROR_TYPE_ITEM, 2508 item, "IPv6 is mandatory to filter on ICMP6"); 2509 } 2510 if (item_flags & l4m) 2511 return rte_flow_error_set(error, EINVAL, 2512 RTE_FLOW_ERROR_TYPE_ITEM, item, 2513 "multiple L4 layers not supported"); 2514 if (!mask) 2515 mask = &rte_flow_item_icmp6_mask; 2516 ret = mlx5_flow_item_acceptable 2517 (dev, item, (const uint8_t *)mask, 2518 (const uint8_t *)&rte_flow_item_icmp6_mask, 2519 sizeof(struct rte_flow_item_icmp6), 2520 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2521 if (ret < 0) 2522 return ret; 2523 return 0; 2524 } 2525 2526 /** 2527 * Validate ICMP6 echo request/reply item. 2528 * 2529 * @param[in] item 2530 * Item specification. 2531 * @param[in] item_flags 2532 * Bit-fields that holds the items detected until now. 2533 * @param[in] ext_vlan_sup 2534 * Whether extended VLAN features are supported or not. 2535 * @param[out] error 2536 * Pointer to error structure. 2537 * 2538 * @return 2539 * 0 on success, a negative errno value otherwise and rte_errno is set. 2540 */ 2541 int 2542 mlx5_flow_validate_item_icmp6_echo(const struct rte_eth_dev *dev, 2543 const struct rte_flow_item *item, 2544 uint64_t item_flags, 2545 uint8_t target_protocol, 2546 struct rte_flow_error *error) 2547 { 2548 const struct rte_flow_item_icmp6_echo *mask = item->mask; 2549 const struct rte_flow_item_icmp6_echo nic_mask = { 2550 .hdr.base.type = 0xff, 2551 .hdr.base.code = 0xff, 2552 .hdr.identifier = RTE_BE16(0xffff), 2553 .hdr.sequence = RTE_BE16(0xffff), 2554 }; 2555 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2556 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 2557 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 2558 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 2559 MLX5_FLOW_LAYER_OUTER_L4; 2560 int ret; 2561 2562 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) 2563 return rte_flow_error_set(error, EINVAL, 2564 RTE_FLOW_ERROR_TYPE_ITEM, item, 2565 "protocol filtering not compatible" 2566 " with ICMP6 layer"); 2567 if (!mlx5_hws_active(dev)) { 2568 if (!(item_flags & l3m)) 2569 return rte_flow_error_set(error, EINVAL, 2570 RTE_FLOW_ERROR_TYPE_ITEM, 2571 item, "IPv6 is mandatory to filter on ICMP6"); 2572 } 2573 if (item_flags & l4m) 2574 return rte_flow_error_set(error, EINVAL, 2575 RTE_FLOW_ERROR_TYPE_ITEM, item, 2576 "multiple L4 layers not supported"); 2577 if (!mask) 2578 mask = &nic_mask; 2579 ret = mlx5_flow_item_acceptable 2580 (dev, item, (const uint8_t *)mask, 2581 (const uint8_t *)&nic_mask, 2582 sizeof(struct rte_flow_item_icmp6_echo), 2583 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2584 if (ret < 0) 2585 return ret; 2586 return 0; 2587 } 2588 2589 /** 2590 * Validate ICMP item. 2591 * 2592 * @param[in] item 2593 * Item specification. 2594 * @param[in] item_flags 2595 * Bit-fields that holds the items detected until now. 2596 * @param[out] error 2597 * Pointer to error structure. 2598 * 2599 * @return 2600 * 0 on success, a negative errno value otherwise and rte_errno is set. 2601 */ 2602 int 2603 mlx5_flow_validate_item_icmp(const struct rte_eth_dev *dev, 2604 const struct rte_flow_item *item, 2605 uint64_t item_flags, 2606 uint8_t target_protocol, 2607 struct rte_flow_error *error) 2608 { 2609 const struct rte_flow_item_icmp *mask = item->mask; 2610 const struct rte_flow_item_icmp nic_mask = { 2611 .hdr.icmp_type = 0xff, 2612 .hdr.icmp_code = 0xff, 2613 .hdr.icmp_ident = RTE_BE16(0xffff), 2614 .hdr.icmp_seq_nb = RTE_BE16(0xffff), 2615 }; 2616 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2617 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 2618 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 2619 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 2620 MLX5_FLOW_LAYER_OUTER_L4; 2621 int ret; 2622 2623 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) 2624 return rte_flow_error_set(error, EINVAL, 2625 RTE_FLOW_ERROR_TYPE_ITEM, item, 2626 "protocol filtering not compatible" 2627 " with ICMP layer"); 2628 if (!mlx5_hws_active(dev)) { 2629 if (!(item_flags & l3m)) 2630 return rte_flow_error_set(error, EINVAL, 2631 RTE_FLOW_ERROR_TYPE_ITEM, 2632 item, "IPv4 is mandatory to filter on ICMP"); 2633 } 2634 if (item_flags & l4m) 2635 return rte_flow_error_set(error, EINVAL, 2636 RTE_FLOW_ERROR_TYPE_ITEM, item, 2637 "multiple L4 layers not supported"); 2638 if (!mask) 2639 mask = &nic_mask; 2640 ret = mlx5_flow_item_acceptable 2641 (dev, item, (const uint8_t *)mask, 2642 (const uint8_t *)&nic_mask, 2643 sizeof(struct rte_flow_item_icmp), 2644 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2645 if (ret < 0) 2646 return ret; 2647 return 0; 2648 } 2649 2650 /** 2651 * Validate Ethernet item. 2652 * 2653 * @param[in] item 2654 * Item specification. 2655 * @param[in] item_flags 2656 * Bit-fields that holds the items detected until now. 2657 * @param[out] error 2658 * Pointer to error structure. 2659 * 2660 * @return 2661 * 0 on success, a negative errno value otherwise and rte_errno is set. 2662 */ 2663 int 2664 mlx5_flow_validate_item_eth(const struct rte_eth_dev *dev, 2665 const struct rte_flow_item *item, 2666 uint64_t item_flags, bool ext_vlan_sup, 2667 struct rte_flow_error *error) 2668 { 2669 const struct rte_flow_item_eth *mask = item->mask; 2670 const struct rte_flow_item_eth nic_mask = { 2671 .hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 2672 .hdr.src_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 2673 .hdr.ether_type = RTE_BE16(0xffff), 2674 .has_vlan = ext_vlan_sup ? 1 : 0, 2675 }; 2676 int ret; 2677 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2678 const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 2679 MLX5_FLOW_LAYER_OUTER_L2; 2680 2681 if (item_flags & ethm) 2682 return rte_flow_error_set(error, ENOTSUP, 2683 RTE_FLOW_ERROR_TYPE_ITEM, item, 2684 "multiple L2 layers not supported"); 2685 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) || 2686 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))) 2687 return rte_flow_error_set(error, EINVAL, 2688 RTE_FLOW_ERROR_TYPE_ITEM, item, 2689 "L2 layer should not follow " 2690 "L3 layers"); 2691 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) || 2692 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN))) 2693 return rte_flow_error_set(error, EINVAL, 2694 RTE_FLOW_ERROR_TYPE_ITEM, item, 2695 "L2 layer should not follow VLAN"); 2696 if (item_flags & MLX5_FLOW_LAYER_GTP) 2697 return rte_flow_error_set(error, EINVAL, 2698 RTE_FLOW_ERROR_TYPE_ITEM, item, 2699 "L2 layer should not follow GTP"); 2700 if (!mask) 2701 mask = &rte_flow_item_eth_mask; 2702 ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask, 2703 (const uint8_t *)&nic_mask, 2704 sizeof(struct rte_flow_item_eth), 2705 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2706 return ret; 2707 } 2708 2709 /** 2710 * Validate VLAN item. 2711 * 2712 * @param[in] item 2713 * Item specification. 2714 * @param[in] item_flags 2715 * Bit-fields that holds the items detected until now. 2716 * @param[in] dev 2717 * Ethernet device flow is being created on. 2718 * @param[out] error 2719 * Pointer to error structure. 2720 * 2721 * @return 2722 * 0 on success, a negative errno value otherwise and rte_errno is set. 2723 */ 2724 int 2725 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 2726 uint64_t item_flags, 2727 struct rte_eth_dev *dev, 2728 struct rte_flow_error *error) 2729 { 2730 const struct rte_flow_item_vlan *spec = item->spec; 2731 const struct rte_flow_item_vlan *mask = item->mask; 2732 const struct rte_flow_item_vlan nic_mask = { 2733 .hdr.vlan_tci = RTE_BE16(UINT16_MAX), 2734 .hdr.eth_proto = RTE_BE16(UINT16_MAX), 2735 }; 2736 uint16_t vlan_tag = 0; 2737 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2738 int ret; 2739 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 2740 MLX5_FLOW_LAYER_INNER_L4) : 2741 (MLX5_FLOW_LAYER_OUTER_L3 | 2742 MLX5_FLOW_LAYER_OUTER_L4); 2743 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 2744 MLX5_FLOW_LAYER_OUTER_VLAN; 2745 2746 if (item_flags & vlanm) 2747 return rte_flow_error_set(error, EINVAL, 2748 RTE_FLOW_ERROR_TYPE_ITEM, item, 2749 "multiple VLAN layers not supported"); 2750 else if ((item_flags & l34m) != 0) 2751 return rte_flow_error_set(error, EINVAL, 2752 RTE_FLOW_ERROR_TYPE_ITEM, item, 2753 "VLAN cannot follow L3/L4 layer"); 2754 if (!mask) 2755 mask = &rte_flow_item_vlan_mask; 2756 ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask, 2757 (const uint8_t *)&nic_mask, 2758 sizeof(struct rte_flow_item_vlan), 2759 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2760 if (ret) 2761 return ret; 2762 if (!tunnel && mask->hdr.vlan_tci != RTE_BE16(0x0fff)) { 2763 struct mlx5_priv *priv = dev->data->dev_private; 2764 2765 if (priv->vmwa_context) { 2766 /* 2767 * Non-NULL context means we have a virtual machine 2768 * and SR-IOV enabled, we have to create VLAN interface 2769 * to make hypervisor to setup E-Switch vport 2770 * context correctly. We avoid creating the multiple 2771 * VLAN interfaces, so we cannot support VLAN tag mask. 2772 */ 2773 return rte_flow_error_set(error, EINVAL, 2774 RTE_FLOW_ERROR_TYPE_ITEM, 2775 item, 2776 "VLAN tag mask is not" 2777 " supported in virtual" 2778 " environment"); 2779 } 2780 } 2781 if (spec) { 2782 vlan_tag = spec->hdr.vlan_tci; 2783 vlan_tag &= mask->hdr.vlan_tci; 2784 } 2785 /* 2786 * From verbs perspective an empty VLAN is equivalent 2787 * to a packet without VLAN layer. 2788 */ 2789 if (!vlan_tag) 2790 return rte_flow_error_set(error, EINVAL, 2791 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 2792 item->spec, 2793 "VLAN cannot be empty"); 2794 return 0; 2795 } 2796 2797 /** 2798 * Validate IPV4 item. 2799 * 2800 * @param[in] item 2801 * Item specification. 2802 * @param[in] item_flags 2803 * Bit-fields that holds the items detected until now. 2804 * @param[in] last_item 2805 * Previous validated item in the pattern items. 2806 * @param[in] ether_type 2807 * Type in the ethernet layer header (including dot1q). 2808 * @param[in] acc_mask 2809 * Acceptable mask, if NULL default internal default mask 2810 * will be used to check whether item fields are supported. 2811 * @param[in] range_accepted 2812 * True if range of values is accepted for specific fields, false otherwise. 2813 * @param[out] error 2814 * Pointer to error structure. 2815 * 2816 * @return 2817 * 0 on success, a negative errno value otherwise and rte_errno is set. 2818 */ 2819 int 2820 mlx5_flow_validate_item_ipv4(const struct rte_eth_dev *dev, 2821 const struct rte_flow_item *item, 2822 uint64_t item_flags, 2823 uint64_t last_item, 2824 uint16_t ether_type, 2825 const struct rte_flow_item_ipv4 *acc_mask, 2826 bool range_accepted, 2827 struct rte_flow_error *error) 2828 { 2829 const struct rte_flow_item_ipv4 *mask = item->mask; 2830 const struct rte_flow_item_ipv4 *spec = item->spec; 2831 const struct rte_flow_item_ipv4 nic_mask = { 2832 .hdr = { 2833 .src_addr = RTE_BE32(0xffffffff), 2834 .dst_addr = RTE_BE32(0xffffffff), 2835 .type_of_service = 0xff, 2836 .next_proto_id = 0xff, 2837 }, 2838 }; 2839 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2840 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 2841 MLX5_FLOW_LAYER_OUTER_L3; 2842 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 2843 MLX5_FLOW_LAYER_OUTER_L4; 2844 int ret; 2845 uint8_t next_proto = 0xFF; 2846 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 2847 MLX5_FLOW_LAYER_OUTER_VLAN | 2848 MLX5_FLOW_LAYER_INNER_VLAN); 2849 2850 if ((last_item & l2_vlan) && ether_type && 2851 ether_type != RTE_ETHER_TYPE_IPV4) 2852 return rte_flow_error_set(error, EINVAL, 2853 RTE_FLOW_ERROR_TYPE_ITEM, item, 2854 "IPv4 cannot follow L2/VLAN layer " 2855 "which ether type is not IPv4"); 2856 if (item_flags & MLX5_FLOW_LAYER_IPIP) { 2857 if (mask && spec) 2858 next_proto = mask->hdr.next_proto_id & 2859 spec->hdr.next_proto_id; 2860 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 2861 return rte_flow_error_set(error, EINVAL, 2862 RTE_FLOW_ERROR_TYPE_ITEM, 2863 item, 2864 "multiple tunnel " 2865 "not supported"); 2866 } 2867 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) 2868 return rte_flow_error_set(error, EINVAL, 2869 RTE_FLOW_ERROR_TYPE_ITEM, item, 2870 "wrong tunnel type - IPv6 specified " 2871 "but IPv4 item provided"); 2872 if (item_flags & l3m) 2873 return rte_flow_error_set(error, ENOTSUP, 2874 RTE_FLOW_ERROR_TYPE_ITEM, item, 2875 "multiple L3 layers not supported"); 2876 else if (item_flags & l4m) 2877 return rte_flow_error_set(error, EINVAL, 2878 RTE_FLOW_ERROR_TYPE_ITEM, item, 2879 "L3 cannot follow an L4 layer."); 2880 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 2881 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 2882 return rte_flow_error_set(error, EINVAL, 2883 RTE_FLOW_ERROR_TYPE_ITEM, item, 2884 "L3 cannot follow an NVGRE layer."); 2885 if (!mask) 2886 mask = &rte_flow_item_ipv4_mask; 2887 else if (mask->hdr.next_proto_id != 0 && 2888 mask->hdr.next_proto_id != 0xff) 2889 return rte_flow_error_set(error, EINVAL, 2890 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 2891 "partial mask is not supported" 2892 " for protocol"); 2893 ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask, 2894 acc_mask ? (const uint8_t *)acc_mask 2895 : (const uint8_t *)&nic_mask, 2896 sizeof(struct rte_flow_item_ipv4), 2897 range_accepted, error); 2898 if (ret < 0) 2899 return ret; 2900 return 0; 2901 } 2902 2903 /** 2904 * Validate IPV6 item. 2905 * 2906 * @param[in] item 2907 * Item specification. 2908 * @param[in] item_flags 2909 * Bit-fields that holds the items detected until now. 2910 * @param[in] last_item 2911 * Previous validated item in the pattern items. 2912 * @param[in] ether_type 2913 * Type in the ethernet layer header (including dot1q). 2914 * @param[in] acc_mask 2915 * Acceptable mask, if NULL default internal default mask 2916 * will be used to check whether item fields are supported. 2917 * @param[out] error 2918 * Pointer to error structure. 2919 * 2920 * @return 2921 * 0 on success, a negative errno value otherwise and rte_errno is set. 2922 */ 2923 int 2924 mlx5_flow_validate_item_ipv6(const struct rte_eth_dev *dev, 2925 const struct rte_flow_item *item, 2926 uint64_t item_flags, 2927 uint64_t last_item, 2928 uint16_t ether_type, 2929 const struct rte_flow_item_ipv6 *acc_mask, 2930 struct rte_flow_error *error) 2931 { 2932 const struct rte_flow_item_ipv6 *mask = item->mask; 2933 const struct rte_flow_item_ipv6 *spec = item->spec; 2934 const struct rte_flow_item_ipv6 nic_mask = { 2935 .hdr = { 2936 .src_addr = RTE_IPV6_MASK_FULL, 2937 .dst_addr = RTE_IPV6_MASK_FULL, 2938 .vtc_flow = RTE_BE32(0xffffffff), 2939 .proto = 0xff, 2940 }, 2941 }; 2942 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2943 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 2944 MLX5_FLOW_LAYER_OUTER_L3; 2945 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 2946 MLX5_FLOW_LAYER_OUTER_L4; 2947 int ret; 2948 uint8_t next_proto = 0xFF; 2949 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 2950 MLX5_FLOW_LAYER_OUTER_VLAN | 2951 MLX5_FLOW_LAYER_INNER_VLAN); 2952 2953 if ((last_item & l2_vlan) && ether_type && 2954 ether_type != RTE_ETHER_TYPE_IPV6) 2955 return rte_flow_error_set(error, EINVAL, 2956 RTE_FLOW_ERROR_TYPE_ITEM, item, 2957 "IPv6 cannot follow L2/VLAN layer " 2958 "which ether type is not IPv6"); 2959 if (mask && mask->hdr.proto == UINT8_MAX && spec) 2960 next_proto = spec->hdr.proto; 2961 if (item_flags & MLX5_FLOW_LAYER_IPIP) { 2962 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 2963 return rte_flow_error_set(error, EINVAL, 2964 RTE_FLOW_ERROR_TYPE_ITEM, 2965 item, 2966 "multiple tunnel " 2967 "not supported"); 2968 } 2969 if (next_proto == IPPROTO_HOPOPTS || 2970 next_proto == IPPROTO_ROUTING || 2971 next_proto == IPPROTO_FRAGMENT || 2972 next_proto == IPPROTO_AH || 2973 next_proto == IPPROTO_DSTOPTS || 2974 (!mlx5_hws_active(dev) && next_proto == IPPROTO_ESP)) 2975 return rte_flow_error_set(error, EINVAL, 2976 RTE_FLOW_ERROR_TYPE_ITEM, item, 2977 "IPv6 proto (next header) should " 2978 "not be set as extension header"); 2979 if (item_flags & MLX5_FLOW_LAYER_IPIP) 2980 return rte_flow_error_set(error, EINVAL, 2981 RTE_FLOW_ERROR_TYPE_ITEM, item, 2982 "wrong tunnel type - IPv4 specified " 2983 "but IPv6 item provided"); 2984 if (item_flags & l3m) 2985 return rte_flow_error_set(error, ENOTSUP, 2986 RTE_FLOW_ERROR_TYPE_ITEM, item, 2987 "multiple L3 layers not supported"); 2988 else if (item_flags & l4m) 2989 return rte_flow_error_set(error, EINVAL, 2990 RTE_FLOW_ERROR_TYPE_ITEM, item, 2991 "L3 cannot follow an L4 layer."); 2992 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 2993 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 2994 return rte_flow_error_set(error, EINVAL, 2995 RTE_FLOW_ERROR_TYPE_ITEM, item, 2996 "L3 cannot follow an NVGRE layer."); 2997 if (!mask) 2998 mask = &rte_flow_item_ipv6_mask; 2999 ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask, 3000 acc_mask ? (const uint8_t *)acc_mask 3001 : (const uint8_t *)&nic_mask, 3002 sizeof(struct rte_flow_item_ipv6), 3003 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3004 if (ret < 0) 3005 return ret; 3006 return 0; 3007 } 3008 3009 /** 3010 * Validate UDP item. 3011 * 3012 * @param[in] item 3013 * Item specification. 3014 * @param[in] item_flags 3015 * Bit-fields that holds the items detected until now. 3016 * @param[in] target_protocol 3017 * The next protocol in the previous item. 3018 * @param[in] flow_mask 3019 * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask. 3020 * @param[out] error 3021 * Pointer to error structure. 3022 * 3023 * @return 3024 * 0 on success, a negative errno value otherwise and rte_errno is set. 3025 */ 3026 int 3027 mlx5_flow_validate_item_udp(const struct rte_eth_dev *dev, 3028 const struct rte_flow_item *item, 3029 uint64_t item_flags, 3030 uint8_t target_protocol, 3031 struct rte_flow_error *error) 3032 { 3033 const struct rte_flow_item_udp *mask = item->mask; 3034 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 3035 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 3036 MLX5_FLOW_LAYER_OUTER_L3; 3037 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 3038 MLX5_FLOW_LAYER_OUTER_L4; 3039 int ret; 3040 3041 if (!mlx5_hws_active(dev)) { 3042 if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) 3043 return rte_flow_error_set(error, EINVAL, 3044 RTE_FLOW_ERROR_TYPE_ITEM, 3045 item, "protocol filtering not compatible with UDP layer"); 3046 if (!(item_flags & l3m)) 3047 return rte_flow_error_set(error, EINVAL, 3048 RTE_FLOW_ERROR_TYPE_ITEM, 3049 item, 3050 "L3 is mandatory to filter on L4"); 3051 } 3052 if (item_flags & l4m) 3053 return rte_flow_error_set(error, EINVAL, 3054 RTE_FLOW_ERROR_TYPE_ITEM, item, 3055 "multiple L4 layers not supported"); 3056 if (!mask) 3057 mask = &rte_flow_item_udp_mask; 3058 ret = mlx5_flow_item_acceptable 3059 (dev, item, (const uint8_t *)mask, 3060 (const uint8_t *)&rte_flow_item_udp_mask, 3061 sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED, 3062 error); 3063 if (ret < 0) 3064 return ret; 3065 return 0; 3066 } 3067 3068 /** 3069 * Validate TCP item. 3070 * 3071 * @param[in] item 3072 * Item specification. 3073 * @param[in] item_flags 3074 * Bit-fields that holds the items detected until now. 3075 * @param[in] target_protocol 3076 * The next protocol in the previous item. 3077 * @param[out] error 3078 * Pointer to error structure. 3079 * 3080 * @return 3081 * 0 on success, a negative errno value otherwise and rte_errno is set. 3082 */ 3083 int 3084 mlx5_flow_validate_item_tcp(const struct rte_eth_dev *dev, 3085 const struct rte_flow_item *item, 3086 uint64_t item_flags, 3087 uint8_t target_protocol, 3088 const struct rte_flow_item_tcp *flow_mask, 3089 struct rte_flow_error *error) 3090 { 3091 const struct rte_flow_item_tcp *mask = item->mask; 3092 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 3093 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 3094 MLX5_FLOW_LAYER_OUTER_L3; 3095 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 3096 MLX5_FLOW_LAYER_OUTER_L4; 3097 int ret; 3098 3099 MLX5_ASSERT(flow_mask); 3100 if (!mlx5_hws_active(dev)) { 3101 if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) 3102 return rte_flow_error_set(error, EINVAL, 3103 RTE_FLOW_ERROR_TYPE_ITEM, 3104 item, "protocol filtering not compatible with TCP layer"); 3105 if (!(item_flags & l3m)) 3106 return rte_flow_error_set(error, EINVAL, 3107 RTE_FLOW_ERROR_TYPE_ITEM, 3108 item, "L3 is mandatory to filter on L4"); 3109 } 3110 if (item_flags & l4m) 3111 return rte_flow_error_set(error, EINVAL, 3112 RTE_FLOW_ERROR_TYPE_ITEM, item, 3113 "multiple L4 layers not supported"); 3114 if (!mask) 3115 mask = &rte_flow_item_tcp_mask; 3116 ret = mlx5_flow_item_acceptable 3117 (dev, item, (const uint8_t *)mask, 3118 (const uint8_t *)flow_mask, 3119 sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED, 3120 error); 3121 if (ret < 0) 3122 return ret; 3123 return 0; 3124 } 3125 3126 /** 3127 * Validate VXLAN item. 3128 * 3129 * @param[in] dev 3130 * Pointer to the Ethernet device structure. 3131 * @param[in] udp_dport 3132 * UDP destination port 3133 * @param[in] item 3134 * Item specification. 3135 * @param[in] item_flags 3136 * Bit-fields that holds the items detected until now. 3137 * @param root 3138 * Whether action is on root table. 3139 * @param[out] error 3140 * Pointer to error structure. 3141 * 3142 * @return 3143 * 0 on success, a negative errno value otherwise and rte_errno is set. 3144 */ 3145 int 3146 mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev, 3147 uint16_t udp_dport, 3148 const struct rte_flow_item *item, 3149 uint64_t item_flags, 3150 bool root, 3151 struct rte_flow_error *error) 3152 { 3153 const struct rte_flow_item_vxlan *spec = item->spec; 3154 const struct rte_flow_item_vxlan *mask = item->mask; 3155 int ret; 3156 struct mlx5_priv *priv = dev->data->dev_private; 3157 union vni { 3158 uint32_t vlan_id; 3159 uint8_t vni[4]; 3160 } id = { .vlan_id = 0, }; 3161 const struct rte_flow_item_vxlan nic_mask = { 3162 .hdr.vni = { 0xff, 0xff, 0xff }, 3163 .hdr.rsvd1 = 0xff, 3164 }; 3165 const struct rte_flow_item_vxlan *valid_mask; 3166 3167 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 3168 return rte_flow_error_set(error, ENOTSUP, 3169 RTE_FLOW_ERROR_TYPE_ITEM, item, 3170 "multiple tunnel layers not" 3171 " supported"); 3172 /* HWS can match entire VXLAN, VXLAN-GBP and VXLAN-GPE headers */ 3173 if (mlx5_hws_active(dev)) 3174 return 0; 3175 valid_mask = &rte_flow_item_vxlan_mask; 3176 /* 3177 * Verify only UDPv4 is present as defined in 3178 * https://tools.ietf.org/html/rfc7348 3179 */ 3180 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 3181 return rte_flow_error_set(error, EINVAL, 3182 RTE_FLOW_ERROR_TYPE_ITEM, 3183 item, "no outer UDP layer found"); 3184 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 3185 return rte_flow_error_set(error, ENOTSUP, 3186 RTE_FLOW_ERROR_TYPE_ITEM, item, 3187 "VXLAN tunnel must be fully defined"); 3188 if (!mask) 3189 mask = &rte_flow_item_vxlan_mask; 3190 3191 if (priv->sh->steering_format_version != 3192 MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 || 3193 !udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN) { 3194 /* non-root table */ 3195 if (!root && priv->sh->misc5_cap) 3196 valid_mask = &nic_mask; 3197 /* Group zero in NIC domain */ 3198 if (!root && priv->sh->tunnel_header_0_1) 3199 valid_mask = &nic_mask; 3200 } 3201 ret = mlx5_flow_item_acceptable 3202 (dev, item, (const uint8_t *)mask, 3203 (const uint8_t *)valid_mask, 3204 sizeof(struct rte_flow_item_vxlan), 3205 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3206 if (ret < 0) 3207 return ret; 3208 if (spec) { 3209 memcpy(&id.vni[1], spec->hdr.vni, 3); 3210 memcpy(&id.vni[1], mask->hdr.vni, 3); 3211 } 3212 return 0; 3213 } 3214 3215 /** 3216 * Validate VXLAN_GPE item. 3217 * 3218 * @param[in] item 3219 * Item specification. 3220 * @param[in] item_flags 3221 * Bit-fields that holds the items detected until now. 3222 * @param[in] priv 3223 * Pointer to the private data structure. 3224 * @param[in] target_protocol 3225 * The next protocol in the previous item. 3226 * @param[out] error 3227 * Pointer to error structure. 3228 * 3229 * @return 3230 * 0 on success, a negative errno value otherwise and rte_errno is set. 3231 */ 3232 int 3233 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 3234 uint64_t item_flags, 3235 struct rte_eth_dev *dev, 3236 struct rte_flow_error *error) 3237 { 3238 struct mlx5_priv *priv = dev->data->dev_private; 3239 const struct rte_flow_item_vxlan_gpe *spec = item->spec; 3240 const struct rte_flow_item_vxlan_gpe *mask = item->mask; 3241 int ret; 3242 union vni { 3243 uint32_t vlan_id; 3244 uint8_t vni[4]; 3245 } id = { .vlan_id = 0, }; 3246 3247 struct rte_flow_item_vxlan_gpe nic_mask = { 3248 .vni = { 0xff, 0xff, 0xff }, 3249 .protocol = 0xff, 3250 .flags = 0xff, 3251 }; 3252 3253 if (!priv->sh->config.l3_vxlan_en) 3254 return rte_flow_error_set(error, ENOTSUP, 3255 RTE_FLOW_ERROR_TYPE_ITEM, item, 3256 "L3 VXLAN is not enabled by device" 3257 " parameter and/or not configured in" 3258 " firmware"); 3259 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 3260 return rte_flow_error_set(error, ENOTSUP, 3261 RTE_FLOW_ERROR_TYPE_ITEM, item, 3262 "multiple tunnel layers not" 3263 " supported"); 3264 /* 3265 * Verify only UDPv4 is present as defined in 3266 * https://tools.ietf.org/html/rfc7348 3267 */ 3268 if (!mlx5_hws_active(dev)) { 3269 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 3270 return rte_flow_error_set(error, EINVAL, 3271 RTE_FLOW_ERROR_TYPE_ITEM, 3272 item, "no outer UDP layer found"); 3273 } 3274 if (!mask) 3275 mask = &rte_flow_item_vxlan_gpe_mask; 3276 if (mlx5_hws_active(dev) || 3277 (priv->sh->misc5_cap && priv->sh->tunnel_header_0_1)) { 3278 nic_mask.rsvd0[0] = 0xff; 3279 nic_mask.rsvd0[1] = 0xff; 3280 nic_mask.rsvd1 = 0xff; 3281 } 3282 ret = mlx5_flow_item_acceptable 3283 (dev, item, (const uint8_t *)mask, 3284 (const uint8_t *)&nic_mask, 3285 sizeof(struct rte_flow_item_vxlan_gpe), 3286 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3287 if (ret < 0) 3288 return ret; 3289 if (spec) { 3290 memcpy(&id.vni[1], spec->hdr.vni, 3); 3291 memcpy(&id.vni[1], mask->hdr.vni, 3); 3292 } 3293 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 3294 return rte_flow_error_set(error, ENOTSUP, 3295 RTE_FLOW_ERROR_TYPE_ITEM, item, 3296 "VXLAN-GPE tunnel must be fully" 3297 " defined"); 3298 return 0; 3299 } 3300 /** 3301 * Validate GRE Key item. 3302 * 3303 * @param[in] item 3304 * Item specification. 3305 * @param[in] item_flags 3306 * Bit flags to mark detected items. 3307 * @param[in] gre_item 3308 * Pointer to gre_item 3309 * @param[out] error 3310 * Pointer to error structure. 3311 * 3312 * @return 3313 * 0 on success, a negative errno value otherwise and rte_errno is set. 3314 */ 3315 int 3316 mlx5_flow_validate_item_gre_key(const struct rte_eth_dev *dev, 3317 const struct rte_flow_item *item, 3318 uint64_t item_flags, 3319 const struct rte_flow_item *gre_item, 3320 struct rte_flow_error *error) 3321 { 3322 const rte_be32_t *mask = item->mask; 3323 int ret = 0; 3324 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); 3325 const struct rte_flow_item_gre *gre_spec; 3326 const struct rte_flow_item_gre *gre_mask; 3327 3328 if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) 3329 return rte_flow_error_set(error, ENOTSUP, 3330 RTE_FLOW_ERROR_TYPE_ITEM, item, 3331 "Multiple GRE key not support"); 3332 if (!(item_flags & MLX5_FLOW_LAYER_GRE)) 3333 return rte_flow_error_set(error, ENOTSUP, 3334 RTE_FLOW_ERROR_TYPE_ITEM, item, 3335 "No preceding GRE header"); 3336 if (item_flags & MLX5_FLOW_LAYER_INNER) 3337 return rte_flow_error_set(error, ENOTSUP, 3338 RTE_FLOW_ERROR_TYPE_ITEM, item, 3339 "GRE key following a wrong item"); 3340 gre_mask = gre_item->mask; 3341 if (!gre_mask) 3342 gre_mask = &rte_flow_item_gre_mask; 3343 gre_spec = gre_item->spec; 3344 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && 3345 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) 3346 return rte_flow_error_set(error, EINVAL, 3347 RTE_FLOW_ERROR_TYPE_ITEM, item, 3348 "Key bit must be on"); 3349 3350 if (!mask) 3351 mask = &gre_key_default_mask; 3352 ret = mlx5_flow_item_acceptable 3353 (dev, item, (const uint8_t *)mask, 3354 (const uint8_t *)&gre_key_default_mask, 3355 sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3356 return ret; 3357 } 3358 3359 /** 3360 * Validate GRE optional item. 3361 * 3362 * @param[in] dev 3363 * Pointer to the Ethernet device structure. 3364 * @param[in] item 3365 * Item specification. 3366 * @param[in] item_flags 3367 * Bit flags to mark detected items. 3368 * @param[in] attr 3369 * Flow rule attributes. 3370 * @param[in] gre_item 3371 * Pointer to gre_item 3372 * @param[out] error 3373 * Pointer to error structure. 3374 * 3375 * @return 3376 * 0 on success, a negative errno value otherwise and rte_errno is set. 3377 */ 3378 int 3379 mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev, 3380 const struct rte_flow_item *item, 3381 uint64_t item_flags, 3382 const struct rte_flow_attr *attr, 3383 const struct rte_flow_item *gre_item, 3384 struct rte_flow_error *error) 3385 { 3386 const struct rte_flow_item_gre *gre_spec = gre_item->spec; 3387 const struct rte_flow_item_gre *gre_mask = gre_item->mask; 3388 const struct rte_flow_item_gre_opt *spec = item->spec; 3389 const struct rte_flow_item_gre_opt *mask = item->mask; 3390 struct mlx5_priv *priv = dev->data->dev_private; 3391 int ret = 0; 3392 struct rte_flow_item_gre_opt nic_mask = { 3393 .checksum_rsvd = { 3394 .checksum = RTE_BE16(UINT16_MAX), 3395 .reserved1 = 0x0, 3396 }, 3397 .key = { 3398 .key = RTE_BE32(UINT32_MAX), 3399 }, 3400 .sequence = { 3401 .sequence = RTE_BE32(UINT32_MAX), 3402 }, 3403 }; 3404 3405 if (!(item_flags & MLX5_FLOW_LAYER_GRE)) 3406 return rte_flow_error_set(error, ENOTSUP, 3407 RTE_FLOW_ERROR_TYPE_ITEM, item, 3408 "No preceding GRE header"); 3409 if (item_flags & MLX5_FLOW_LAYER_INNER) 3410 return rte_flow_error_set(error, ENOTSUP, 3411 RTE_FLOW_ERROR_TYPE_ITEM, item, 3412 "GRE option following a wrong item"); 3413 if ((!spec && !mlx5_hws_active(dev)) || !mask) 3414 return rte_flow_error_set(error, EINVAL, 3415 RTE_FLOW_ERROR_TYPE_ITEM, item, 3416 "At least one field gre_option(checksum/key/sequence) must be specified"); 3417 if (!gre_mask) 3418 gre_mask = &rte_flow_item_gre_mask; 3419 if (mask->checksum_rsvd.checksum) 3420 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x8000)) && 3421 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x8000))) 3422 return rte_flow_error_set(error, EINVAL, 3423 RTE_FLOW_ERROR_TYPE_ITEM, 3424 item, 3425 "Checksum bit must be on"); 3426 if (mask->key.key) 3427 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && 3428 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) 3429 return rte_flow_error_set(error, EINVAL, 3430 RTE_FLOW_ERROR_TYPE_ITEM, 3431 item, "Key bit must be on"); 3432 if (mask->sequence.sequence) 3433 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x1000)) && 3434 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x1000))) 3435 return rte_flow_error_set(error, EINVAL, 3436 RTE_FLOW_ERROR_TYPE_ITEM, 3437 item, 3438 "Sequence bit must be on"); 3439 if (!mlx5_hws_active(dev)) { 3440 if (mask->checksum_rsvd.checksum || mask->sequence.sequence) { 3441 if (priv->sh->steering_format_version == 3442 MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 || 3443 ((attr->group || 3444 (attr->transfer && priv->fdb_def_rule)) && 3445 !priv->sh->misc5_cap) || 3446 (!(priv->sh->tunnel_header_0_1 && 3447 priv->sh->tunnel_header_2_3) && 3448 !attr->group && 3449 (!attr->transfer || !priv->fdb_def_rule))) 3450 return rte_flow_error_set 3451 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, 3452 item, "Checksum/Sequence not supported"); 3453 } 3454 } 3455 ret = mlx5_flow_item_acceptable 3456 (dev, item, (const uint8_t *)mask, 3457 (const uint8_t *)&nic_mask, 3458 sizeof(struct rte_flow_item_gre_opt), 3459 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3460 return ret; 3461 } 3462 3463 /** 3464 * Validate GRE item. 3465 * 3466 * @param[in] item 3467 * Item specification. 3468 * @param[in] item_flags 3469 * Bit flags to mark detected items. 3470 * @param[in] target_protocol 3471 * The next protocol in the previous item. 3472 * @param[out] error 3473 * Pointer to error structure. 3474 * 3475 * @return 3476 * 0 on success, a negative errno value otherwise and rte_errno is set. 3477 */ 3478 int 3479 mlx5_flow_validate_item_gre(const struct rte_eth_dev *dev, 3480 const struct rte_flow_item *item, 3481 uint64_t item_flags, 3482 uint8_t target_protocol, 3483 struct rte_flow_error *error) 3484 { 3485 const struct rte_flow_item_gre *spec __rte_unused = item->spec; 3486 const struct rte_flow_item_gre *mask = item->mask; 3487 int ret; 3488 const struct rte_flow_item_gre nic_mask = { 3489 .c_rsvd0_ver = RTE_BE16(0xB000), 3490 .protocol = RTE_BE16(UINT16_MAX), 3491 }; 3492 3493 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 3494 return rte_flow_error_set(error, EINVAL, 3495 RTE_FLOW_ERROR_TYPE_ITEM, item, 3496 "protocol filtering not compatible" 3497 " with this GRE layer"); 3498 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 3499 return rte_flow_error_set(error, ENOTSUP, 3500 RTE_FLOW_ERROR_TYPE_ITEM, item, 3501 "multiple tunnel layers not" 3502 " supported"); 3503 if (!mlx5_hws_active(dev)) { 3504 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 3505 return rte_flow_error_set(error, ENOTSUP, 3506 RTE_FLOW_ERROR_TYPE_ITEM, 3507 item, "L3 Layer is missing"); 3508 } 3509 if (!mask) 3510 mask = &rte_flow_item_gre_mask; 3511 ret = mlx5_flow_item_acceptable 3512 (dev, item, (const uint8_t *)mask, 3513 (const uint8_t *)&nic_mask, 3514 sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED, 3515 error); 3516 if (ret < 0) 3517 return ret; 3518 #ifndef HAVE_MLX5DV_DR 3519 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT 3520 if (spec && (spec->protocol & mask->protocol)) 3521 return rte_flow_error_set(error, ENOTSUP, 3522 RTE_FLOW_ERROR_TYPE_ITEM, item, 3523 "without MPLS support the" 3524 " specification cannot be used for" 3525 " filtering"); 3526 #endif 3527 #endif 3528 return 0; 3529 } 3530 3531 /** 3532 * Validate Geneve item. 3533 * 3534 * @param[in] item 3535 * Item specification. 3536 * @param[in] itemFlags 3537 * Bit-fields that holds the items detected until now. 3538 * @param[in] enPriv 3539 * Pointer to the private data structure. 3540 * @param[out] error 3541 * Pointer to error structure. 3542 * 3543 * @return 3544 * 0 on success, a negative errno value otherwise and rte_errno is set. 3545 */ 3546 3547 int 3548 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, 3549 uint64_t item_flags, 3550 struct rte_eth_dev *dev, 3551 struct rte_flow_error *error) 3552 { 3553 struct mlx5_priv *priv = dev->data->dev_private; 3554 const struct rte_flow_item_geneve *spec = item->spec; 3555 const struct rte_flow_item_geneve *mask = item->mask; 3556 int ret; 3557 uint16_t gbhdr; 3558 uint8_t opt_len = priv->sh->cdev->config.hca_attr.geneve_max_opt_len ? 3559 MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; 3560 const struct rte_flow_item_geneve nic_mask = { 3561 .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), 3562 .vni = { 0xff, 0xff, 0xff }, 3563 .protocol = RTE_BE16(UINT16_MAX), 3564 }; 3565 3566 if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_geneve_rx) 3567 return rte_flow_error_set(error, ENOTSUP, 3568 RTE_FLOW_ERROR_TYPE_ITEM, item, 3569 "L3 Geneve is not enabled by device" 3570 " parameter and/or not configured in" 3571 " firmware"); 3572 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 3573 return rte_flow_error_set(error, ENOTSUP, 3574 RTE_FLOW_ERROR_TYPE_ITEM, item, 3575 "multiple tunnel layers not" 3576 " supported"); 3577 /* 3578 * Verify only UDPv4 is present as defined in 3579 * https://tools.ietf.org/html/rfc7348 3580 */ 3581 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 3582 return rte_flow_error_set(error, EINVAL, 3583 RTE_FLOW_ERROR_TYPE_ITEM, item, 3584 "no outer UDP layer found"); 3585 if (!mask) 3586 mask = &rte_flow_item_geneve_mask; 3587 ret = mlx5_flow_item_acceptable 3588 (dev, item, (const uint8_t *)mask, 3589 (const uint8_t *)&nic_mask, 3590 sizeof(struct rte_flow_item_geneve), 3591 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3592 if (ret) 3593 return ret; 3594 if (spec) { 3595 gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0); 3596 if (MLX5_GENEVE_VER_VAL(gbhdr) || 3597 MLX5_GENEVE_CRITO_VAL(gbhdr) || 3598 MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1) 3599 return rte_flow_error_set(error, ENOTSUP, 3600 RTE_FLOW_ERROR_TYPE_ITEM, 3601 item, 3602 "Geneve protocol unsupported" 3603 " fields are being used"); 3604 if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len) 3605 return rte_flow_error_set 3606 (error, ENOTSUP, 3607 RTE_FLOW_ERROR_TYPE_ITEM, 3608 item, 3609 "Unsupported Geneve options length"); 3610 } 3611 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 3612 return rte_flow_error_set 3613 (error, ENOTSUP, 3614 RTE_FLOW_ERROR_TYPE_ITEM, item, 3615 "Geneve tunnel must be fully defined"); 3616 return 0; 3617 } 3618 3619 /** 3620 * Validate Geneve TLV option item. 3621 * 3622 * @param[in] item 3623 * Item specification. 3624 * @param[in] last_item 3625 * Previous validated item in the pattern items. 3626 * @param[in] geneve_item 3627 * Previous GENEVE item specification. 3628 * @param[in] dev 3629 * Pointer to the rte_eth_dev structure. 3630 * @param[out] error 3631 * Pointer to error structure. 3632 * 3633 * @return 3634 * 0 on success, a negative errno value otherwise and rte_errno is set. 3635 */ 3636 int 3637 mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, 3638 uint64_t last_item, 3639 const struct rte_flow_item *geneve_item, 3640 struct rte_eth_dev *dev, 3641 struct rte_flow_error *error) 3642 { 3643 struct mlx5_priv *priv = dev->data->dev_private; 3644 struct mlx5_dev_ctx_shared *sh = priv->sh; 3645 struct mlx5_geneve_tlv_option_resource *geneve_opt_resource; 3646 struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr; 3647 uint8_t data_max_supported = 3648 hca_attr->max_geneve_tlv_option_data_len * 4; 3649 const struct rte_flow_item_geneve *geneve_spec; 3650 const struct rte_flow_item_geneve *geneve_mask; 3651 const struct rte_flow_item_geneve_opt *spec = item->spec; 3652 const struct rte_flow_item_geneve_opt *mask = item->mask; 3653 unsigned int i; 3654 unsigned int data_len; 3655 uint8_t tlv_option_len; 3656 uint16_t optlen_m, optlen_v; 3657 const struct rte_flow_item_geneve_opt full_mask = { 3658 .option_class = RTE_BE16(0xffff), 3659 .option_type = 0xff, 3660 .option_len = 0x1f, 3661 }; 3662 3663 if (!mask) 3664 mask = &rte_flow_item_geneve_opt_mask; 3665 if (!spec) 3666 return rte_flow_error_set 3667 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3668 "Geneve TLV opt class/type/length must be specified"); 3669 if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK) 3670 return rte_flow_error_set 3671 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3672 "Geneve TLV opt length exceeds the limit (31)"); 3673 /* Check if class type and length masks are full. */ 3674 if (full_mask.option_class != mask->option_class || 3675 full_mask.option_type != mask->option_type || 3676 full_mask.option_len != (mask->option_len & full_mask.option_len)) 3677 return rte_flow_error_set 3678 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3679 "Geneve TLV opt class/type/length masks must be full"); 3680 /* Check if length is supported */ 3681 if ((uint32_t)spec->option_len > 3682 hca_attr->max_geneve_tlv_option_data_len) 3683 return rte_flow_error_set 3684 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3685 "Geneve TLV opt length not supported"); 3686 if (hca_attr->max_geneve_tlv_options > 1) 3687 DRV_LOG(DEBUG, 3688 "max_geneve_tlv_options supports more than 1 option"); 3689 /* Check GENEVE item preceding. */ 3690 if (!geneve_item || !(last_item & MLX5_FLOW_LAYER_GENEVE)) 3691 return rte_flow_error_set 3692 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3693 "Geneve opt item must be preceded with Geneve item"); 3694 geneve_spec = geneve_item->spec; 3695 geneve_mask = geneve_item->mask ? geneve_item->mask : 3696 &rte_flow_item_geneve_mask; 3697 /* Check if GENEVE TLV option size doesn't exceed option length */ 3698 if (geneve_spec && (geneve_mask->ver_opt_len_o_c_rsvd0 || 3699 geneve_spec->ver_opt_len_o_c_rsvd0)) { 3700 tlv_option_len = spec->option_len & mask->option_len; 3701 optlen_v = rte_be_to_cpu_16(geneve_spec->ver_opt_len_o_c_rsvd0); 3702 optlen_v = MLX5_GENEVE_OPTLEN_VAL(optlen_v); 3703 optlen_m = rte_be_to_cpu_16(geneve_mask->ver_opt_len_o_c_rsvd0); 3704 optlen_m = MLX5_GENEVE_OPTLEN_VAL(optlen_m); 3705 if ((optlen_v & optlen_m) <= tlv_option_len) 3706 return rte_flow_error_set 3707 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3708 "GENEVE TLV option length exceeds optlen"); 3709 } 3710 /* Check if length is 0 or data is 0. */ 3711 if (spec->data == NULL || spec->option_len == 0) 3712 return rte_flow_error_set 3713 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3714 "Geneve TLV opt with zero data/length not supported"); 3715 /* Check not all data & mask are 0. */ 3716 data_len = spec->option_len * 4; 3717 if (mask->data == NULL) { 3718 for (i = 0; i < data_len; i++) 3719 if (spec->data[i]) 3720 break; 3721 if (i == data_len) 3722 return rte_flow_error_set(error, ENOTSUP, 3723 RTE_FLOW_ERROR_TYPE_ITEM, item, 3724 "Can't match on Geneve option data 0"); 3725 } else { 3726 for (i = 0; i < data_len; i++) 3727 if (spec->data[i] & mask->data[i]) 3728 break; 3729 if (i == data_len) 3730 return rte_flow_error_set(error, ENOTSUP, 3731 RTE_FLOW_ERROR_TYPE_ITEM, item, 3732 "Can't match on Geneve option data and mask 0"); 3733 /* Check data mask supported. */ 3734 for (i = data_max_supported; i < data_len ; i++) 3735 if (mask->data[i]) 3736 return rte_flow_error_set(error, ENOTSUP, 3737 RTE_FLOW_ERROR_TYPE_ITEM, item, 3738 "Data mask is of unsupported size"); 3739 } 3740 /* Check GENEVE option is supported in NIC. */ 3741 if (!hca_attr->geneve_tlv_opt) 3742 return rte_flow_error_set 3743 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3744 "Geneve TLV opt not supported"); 3745 /* Check if we already have geneve option with different type/class. */ 3746 rte_spinlock_lock(&sh->geneve_tlv_opt_sl); 3747 geneve_opt_resource = sh->geneve_tlv_option_resource; 3748 if (geneve_opt_resource != NULL) 3749 if (geneve_opt_resource->option_class != spec->option_class || 3750 geneve_opt_resource->option_type != spec->option_type || 3751 geneve_opt_resource->length != spec->option_len) { 3752 rte_spinlock_unlock(&sh->geneve_tlv_opt_sl); 3753 return rte_flow_error_set(error, ENOTSUP, 3754 RTE_FLOW_ERROR_TYPE_ITEM, item, 3755 "Only one Geneve TLV option supported"); 3756 } 3757 rte_spinlock_unlock(&sh->geneve_tlv_opt_sl); 3758 return 0; 3759 } 3760 3761 /** 3762 * Validate MPLS item. 3763 * 3764 * @param[in] dev 3765 * Pointer to the rte_eth_dev structure. 3766 * @param[in] item 3767 * Item specification. 3768 * @param[in] item_flags 3769 * Bit-fields that holds the items detected until now. 3770 * @param[in] prev_layer 3771 * The protocol layer indicated in previous item. 3772 * @param[out] error 3773 * Pointer to error structure. 3774 * 3775 * @return 3776 * 0 on success, a negative errno value otherwise and rte_errno is set. 3777 */ 3778 int 3779 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, 3780 const struct rte_flow_item *item __rte_unused, 3781 uint64_t item_flags __rte_unused, 3782 uint64_t prev_layer __rte_unused, 3783 struct rte_flow_error *error) 3784 { 3785 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 3786 const struct rte_flow_item_mpls hws_nic_mask = { 3787 .label_tc_s = {0xff, 0xff, 0xff}, 3788 .ttl = 0xff 3789 }; 3790 const struct rte_flow_item_mpls *nic_mask = !mlx5_hws_active(dev) ? 3791 &rte_flow_item_mpls_mask : &hws_nic_mask; 3792 const struct rte_flow_item_mpls *mask = item->mask; 3793 struct mlx5_priv *priv = dev->data->dev_private; 3794 int ret; 3795 3796 if (!mlx5_hws_active(dev)) { 3797 /* MPLS has HW support in HWS */ 3798 if (!priv->sh->dev_cap.mpls_en) 3799 return rte_flow_error_set(error, ENOTSUP, 3800 RTE_FLOW_ERROR_TYPE_ITEM, 3801 item, "MPLS not supported or disabled in firmware configuration."); 3802 /* MPLS over UDP, GRE is allowed */ 3803 if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L4_UDP | 3804 MLX5_FLOW_LAYER_GRE | 3805 MLX5_FLOW_LAYER_GRE_KEY))) 3806 return rte_flow_error_set(error, EINVAL, 3807 RTE_FLOW_ERROR_TYPE_ITEM, 3808 item, "protocol filtering not compatible with MPLS layer"); 3809 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ 3810 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && 3811 !(item_flags & MLX5_FLOW_LAYER_GRE)) 3812 return rte_flow_error_set(error, ENOTSUP, 3813 RTE_FLOW_ERROR_TYPE_ITEM, item, 3814 "multiple tunnel layers not supported"); 3815 } else { 3816 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ 3817 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && 3818 !(item_flags & MLX5_FLOW_LAYER_MPLS)) 3819 return rte_flow_error_set(error, ENOTSUP, 3820 RTE_FLOW_ERROR_TYPE_ITEM, item, 3821 "multiple tunnel layers not supported"); 3822 } 3823 if (!mask) 3824 mask = nic_mask; 3825 ret = mlx5_flow_item_acceptable 3826 (dev, item, (const uint8_t *)mask, 3827 (const uint8_t *)nic_mask, 3828 sizeof(struct rte_flow_item_mpls), 3829 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3830 if (ret < 0) 3831 return ret; 3832 return 0; 3833 #else 3834 return rte_flow_error_set(error, ENOTSUP, 3835 RTE_FLOW_ERROR_TYPE_ITEM, item, 3836 "MPLS is not supported by Verbs, please" 3837 " update."); 3838 #endif 3839 } 3840 3841 /** 3842 * Validate NVGRE item. 3843 * 3844 * @param[in] item 3845 * Item specification. 3846 * @param[in] item_flags 3847 * Bit flags to mark detected items. 3848 * @param[in] target_protocol 3849 * The next protocol in the previous item. 3850 * @param[out] error 3851 * Pointer to error structure. 3852 * 3853 * @return 3854 * 0 on success, a negative errno value otherwise and rte_errno is set. 3855 */ 3856 int 3857 mlx5_flow_validate_item_nvgre(const struct rte_eth_dev *dev, 3858 const struct rte_flow_item *item, 3859 uint64_t item_flags, 3860 uint8_t target_protocol, 3861 struct rte_flow_error *error) 3862 { 3863 const struct rte_flow_item_nvgre *mask = item->mask; 3864 int ret; 3865 3866 const struct rte_flow_item_nvgre hws_nic_mask = { 3867 .c_k_s_rsvd0_ver = RTE_BE16(0xB000), 3868 .protocol = RTE_BE16(UINT16_MAX), 3869 .tni = {0xff, 0xff, 0xff}, 3870 .flow_id = 0xff 3871 }; 3872 const struct rte_flow_item_nvgre *nic_mask = !mlx5_hws_active(dev) ? 3873 &rte_flow_item_nvgre_mask : &hws_nic_mask; 3874 3875 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 3876 return rte_flow_error_set(error, EINVAL, 3877 RTE_FLOW_ERROR_TYPE_ITEM, item, 3878 "protocol filtering not compatible" 3879 " with this GRE layer"); 3880 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 3881 return rte_flow_error_set(error, ENOTSUP, 3882 RTE_FLOW_ERROR_TYPE_ITEM, item, 3883 "multiple tunnel layers not" 3884 " supported"); 3885 if (!mlx5_hws_active(dev)) { 3886 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 3887 return rte_flow_error_set(error, ENOTSUP, 3888 RTE_FLOW_ERROR_TYPE_ITEM, 3889 item, "L3 Layer is missing"); 3890 } 3891 if (!mask) 3892 mask = nic_mask; 3893 ret = mlx5_flow_item_acceptable 3894 (dev, item, (const uint8_t *)mask, 3895 (const uint8_t *)nic_mask, 3896 sizeof(struct rte_flow_item_nvgre), 3897 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3898 if (ret < 0) 3899 return ret; 3900 return 0; 3901 } 3902 3903 /** 3904 * Validate eCPRI item. 3905 * 3906 * @param[in] item 3907 * Item specification. 3908 * @param[in] item_flags 3909 * Bit-fields that holds the items detected until now. 3910 * @param[in] last_item 3911 * Previous validated item in the pattern items. 3912 * @param[in] ether_type 3913 * Type in the ethernet layer header (including dot1q). 3914 * @param[in] acc_mask 3915 * Acceptable mask, if NULL default internal default mask 3916 * will be used to check whether item fields are supported. 3917 * @param[out] error 3918 * Pointer to error structure. 3919 * 3920 * @return 3921 * 0 on success, a negative errno value otherwise and rte_errno is set. 3922 */ 3923 int 3924 mlx5_flow_validate_item_ecpri(const struct rte_eth_dev *dev, 3925 const struct rte_flow_item *item, 3926 uint64_t item_flags, 3927 uint64_t last_item, 3928 uint16_t ether_type, 3929 const struct rte_flow_item_ecpri *acc_mask, 3930 struct rte_flow_error *error) 3931 { 3932 const struct rte_flow_item_ecpri *mask = item->mask; 3933 const struct rte_flow_item_ecpri nic_mask = { 3934 .hdr = { 3935 .common = { 3936 .u32 = 3937 RTE_BE32(((const struct rte_ecpri_common_hdr) { 3938 .type = 0xFF, 3939 }).u32), 3940 }, 3941 .dummy[0] = 0xFFFFFFFF, 3942 }, 3943 }; 3944 const uint64_t outer_l2_vlan = (MLX5_FLOW_LAYER_OUTER_L2 | 3945 MLX5_FLOW_LAYER_OUTER_VLAN); 3946 struct rte_flow_item_ecpri mask_lo; 3947 3948 if (!(last_item & outer_l2_vlan) && 3949 last_item != MLX5_FLOW_LAYER_OUTER_L4_UDP) 3950 return rte_flow_error_set(error, EINVAL, 3951 RTE_FLOW_ERROR_TYPE_ITEM, item, 3952 "eCPRI can only follow L2/VLAN layer or UDP layer"); 3953 if ((last_item & outer_l2_vlan) && ether_type && 3954 ether_type != RTE_ETHER_TYPE_ECPRI) 3955 return rte_flow_error_set(error, EINVAL, 3956 RTE_FLOW_ERROR_TYPE_ITEM, item, 3957 "eCPRI cannot follow L2/VLAN layer which ether type is not 0xAEFE"); 3958 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 3959 return rte_flow_error_set(error, EINVAL, 3960 RTE_FLOW_ERROR_TYPE_ITEM, item, 3961 "eCPRI with tunnel is not supported right now"); 3962 if (item_flags & MLX5_FLOW_LAYER_OUTER_L3) 3963 return rte_flow_error_set(error, ENOTSUP, 3964 RTE_FLOW_ERROR_TYPE_ITEM, item, 3965 "multiple L3 layers not supported"); 3966 else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP) 3967 return rte_flow_error_set(error, EINVAL, 3968 RTE_FLOW_ERROR_TYPE_ITEM, item, 3969 "eCPRI cannot coexist with a TCP layer"); 3970 /* In specification, eCPRI could be over UDP layer. */ 3971 else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP) 3972 return rte_flow_error_set(error, EINVAL, 3973 RTE_FLOW_ERROR_TYPE_ITEM, item, 3974 "eCPRI over UDP layer is not yet supported right now"); 3975 /* Mask for type field in common header could be zero. */ 3976 if (!mask) 3977 mask = &rte_flow_item_ecpri_mask; 3978 mask_lo.hdr.common.u32 = rte_be_to_cpu_32(mask->hdr.common.u32); 3979 /* Input mask is in big-endian format. */ 3980 if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff) 3981 return rte_flow_error_set(error, EINVAL, 3982 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 3983 "partial mask is not supported for protocol"); 3984 else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0) 3985 return rte_flow_error_set(error, EINVAL, 3986 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 3987 "message header mask must be after a type mask"); 3988 return mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask, 3989 acc_mask ? (const uint8_t *)acc_mask 3990 : (const uint8_t *)&nic_mask, 3991 sizeof(struct rte_flow_item_ecpri), 3992 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3993 } 3994 3995 /** 3996 * Validate the NSH item. 3997 * 3998 * @param[in] dev 3999 * Pointer to Ethernet device on which flow rule is being created on. 4000 * @param[out] error 4001 * Pointer to error structure. 4002 * 4003 * @return 4004 * 0 on success, a negative errno value otherwise and rte_errno is set. 4005 */ 4006 int 4007 mlx5_flow_validate_item_nsh(struct rte_eth_dev *dev, 4008 const struct rte_flow_item *item, 4009 struct rte_flow_error *error) 4010 { 4011 struct mlx5_priv *priv = dev->data->dev_private; 4012 4013 if (item->mask) { 4014 return rte_flow_error_set(error, ENOTSUP, 4015 RTE_FLOW_ERROR_TYPE_ITEM, item, 4016 "NSH fields matching is not supported"); 4017 } 4018 4019 if (!priv->sh->config.dv_flow_en) { 4020 return rte_flow_error_set(error, ENOTSUP, 4021 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4022 NULL, "NSH support requires DV flow interface"); 4023 } 4024 4025 if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_vxlan_gpe_nsh) { 4026 return rte_flow_error_set(error, ENOTSUP, 4027 RTE_FLOW_ERROR_TYPE_ITEM, item, 4028 "Current FW does not support matching on NSH"); 4029 } 4030 4031 return 0; 4032 } 4033 4034 static int 4035 flow_null_validate(struct rte_eth_dev *dev __rte_unused, 4036 const struct rte_flow_attr *attr __rte_unused, 4037 const struct rte_flow_item items[] __rte_unused, 4038 const struct rte_flow_action actions[] __rte_unused, 4039 bool external __rte_unused, 4040 int hairpin __rte_unused, 4041 struct rte_flow_error *error) 4042 { 4043 return rte_flow_error_set(error, ENOTSUP, 4044 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 4045 } 4046 4047 static struct mlx5_flow * 4048 flow_null_prepare(struct rte_eth_dev *dev __rte_unused, 4049 const struct rte_flow_attr *attr __rte_unused, 4050 const struct rte_flow_item items[] __rte_unused, 4051 const struct rte_flow_action actions[] __rte_unused, 4052 struct rte_flow_error *error) 4053 { 4054 rte_flow_error_set(error, ENOTSUP, 4055 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 4056 return NULL; 4057 } 4058 4059 static int 4060 flow_null_translate(struct rte_eth_dev *dev __rte_unused, 4061 struct mlx5_flow *dev_flow __rte_unused, 4062 const struct rte_flow_attr *attr __rte_unused, 4063 const struct rte_flow_item items[] __rte_unused, 4064 const struct rte_flow_action actions[] __rte_unused, 4065 struct rte_flow_error *error) 4066 { 4067 return rte_flow_error_set(error, ENOTSUP, 4068 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 4069 } 4070 4071 static int 4072 flow_null_apply(struct rte_eth_dev *dev __rte_unused, 4073 struct rte_flow *flow __rte_unused, 4074 struct rte_flow_error *error) 4075 { 4076 return rte_flow_error_set(error, ENOTSUP, 4077 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 4078 } 4079 4080 static void 4081 flow_null_remove(struct rte_eth_dev *dev __rte_unused, 4082 struct rte_flow *flow __rte_unused) 4083 { 4084 } 4085 4086 static void 4087 flow_null_destroy(struct rte_eth_dev *dev __rte_unused, 4088 struct rte_flow *flow __rte_unused) 4089 { 4090 } 4091 4092 static int 4093 flow_null_query(struct rte_eth_dev *dev __rte_unused, 4094 struct rte_flow *flow __rte_unused, 4095 const struct rte_flow_action *actions __rte_unused, 4096 void *data __rte_unused, 4097 struct rte_flow_error *error) 4098 { 4099 return rte_flow_error_set(error, ENOTSUP, 4100 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 4101 } 4102 4103 static int 4104 flow_null_sync_domain(struct rte_eth_dev *dev __rte_unused, 4105 uint32_t domains __rte_unused, 4106 uint32_t flags __rte_unused) 4107 { 4108 return 0; 4109 } 4110 4111 int 4112 flow_null_get_aged_flows(struct rte_eth_dev *dev, 4113 void **context __rte_unused, 4114 uint32_t nb_contexts __rte_unused, 4115 struct rte_flow_error *error __rte_unused) 4116 { 4117 DRV_LOG(ERR, "port %u get aged flows is not supported.", 4118 dev->data->port_id); 4119 return -ENOTSUP; 4120 } 4121 4122 uint32_t 4123 flow_null_counter_allocate(struct rte_eth_dev *dev) 4124 { 4125 DRV_LOG(ERR, "port %u counter allocate is not supported.", 4126 dev->data->port_id); 4127 return 0; 4128 } 4129 4130 void 4131 flow_null_counter_free(struct rte_eth_dev *dev, 4132 uint32_t counter __rte_unused) 4133 { 4134 DRV_LOG(ERR, "port %u counter free is not supported.", 4135 dev->data->port_id); 4136 } 4137 4138 int 4139 flow_null_counter_query(struct rte_eth_dev *dev, 4140 uint32_t counter __rte_unused, 4141 bool clear __rte_unused, 4142 uint64_t *pkts __rte_unused, 4143 uint64_t *bytes __rte_unused, 4144 void **action __rte_unused) 4145 { 4146 DRV_LOG(ERR, "port %u counter query is not supported.", 4147 dev->data->port_id); 4148 return -ENOTSUP; 4149 } 4150 4151 /* Void driver to protect from null pointer reference. */ 4152 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { 4153 .validate = flow_null_validate, 4154 .prepare = flow_null_prepare, 4155 .translate = flow_null_translate, 4156 .apply = flow_null_apply, 4157 .remove = flow_null_remove, 4158 .destroy = flow_null_destroy, 4159 .query = flow_null_query, 4160 .sync_domain = flow_null_sync_domain, 4161 .get_aged_flows = flow_null_get_aged_flows, 4162 .counter_alloc = flow_null_counter_allocate, 4163 .counter_free = flow_null_counter_free, 4164 .counter_query = flow_null_counter_query 4165 }; 4166 4167 /** 4168 * Select flow driver type according to flow attributes and device 4169 * configuration. 4170 * 4171 * @param[in] dev 4172 * Pointer to the dev structure. 4173 * @param[in] attr 4174 * Pointer to the flow attributes. 4175 * 4176 * @return 4177 * flow driver type, MLX5_FLOW_TYPE_MAX otherwise. 4178 */ 4179 static enum mlx5_flow_drv_type 4180 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) 4181 { 4182 struct mlx5_priv *priv = dev->data->dev_private; 4183 /* The OS can determine first a specific flow type (DV, VERBS) */ 4184 enum mlx5_flow_drv_type type = mlx5_flow_os_get_type(); 4185 4186 if (type != MLX5_FLOW_TYPE_MAX) 4187 return type; 4188 /* 4189 * Currently when dv_flow_en == 2, only HW steering engine is 4190 * supported. New engines can also be chosen here if ready. 4191 */ 4192 if (priv->sh->config.dv_flow_en == 2) 4193 return MLX5_FLOW_TYPE_HW; 4194 if (!attr) 4195 return MLX5_FLOW_TYPE_MIN; 4196 /* If no OS specific type - continue with DV/VERBS selection */ 4197 if (attr->transfer && priv->sh->config.dv_esw_en) 4198 type = MLX5_FLOW_TYPE_DV; 4199 if (!attr->transfer) 4200 type = priv->sh->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : 4201 MLX5_FLOW_TYPE_VERBS; 4202 return type; 4203 } 4204 4205 #define flow_get_drv_ops(type) flow_drv_ops[type] 4206 4207 /** 4208 * Flow driver validation API. This abstracts calling driver specific functions. 4209 * The type of flow driver is determined according to flow attributes. 4210 * 4211 * @param[in] dev 4212 * Pointer to the dev structure. 4213 * @param[in] attr 4214 * Pointer to the flow attributes. 4215 * @param[in] items 4216 * Pointer to the list of items. 4217 * @param[in] actions 4218 * Pointer to the list of actions. 4219 * @param[in] external 4220 * This flow rule is created by request external to PMD. 4221 * @param[in] hairpin 4222 * Number of hairpin TX actions, 0 means classic flow. 4223 * @param[out] error 4224 * Pointer to the error structure. 4225 * 4226 * @return 4227 * 0 on success, a negative errno value otherwise and rte_errno is set. 4228 */ 4229 static inline int 4230 flow_drv_validate(struct rte_eth_dev *dev, 4231 const struct rte_flow_attr *attr, 4232 const struct rte_flow_item items[], 4233 const struct rte_flow_action actions[], 4234 bool external, int hairpin, struct rte_flow_error *error) 4235 { 4236 const struct mlx5_flow_driver_ops *fops; 4237 enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); 4238 4239 fops = flow_get_drv_ops(type); 4240 return fops->validate(dev, attr, items, actions, external, 4241 hairpin, error); 4242 } 4243 4244 /** 4245 * Flow driver preparation API. This abstracts calling driver specific 4246 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 4247 * calculates the size of memory required for device flow, allocates the memory, 4248 * initializes the device flow and returns the pointer. 4249 * 4250 * @note 4251 * This function initializes device flow structure such as dv or verbs in 4252 * struct mlx5_flow. However, it is caller's responsibility to initialize the 4253 * rest. For example, adding returning device flow to flow->dev_flow list and 4254 * setting backward reference to the flow should be done out of this function. 4255 * layers field is not filled either. 4256 * 4257 * @param[in] dev 4258 * Pointer to the dev structure. 4259 * @param[in] attr 4260 * Pointer to the flow attributes. 4261 * @param[in] items 4262 * Pointer to the list of items. 4263 * @param[in] actions 4264 * Pointer to the list of actions. 4265 * @param[in] flow_idx 4266 * This memory pool index to the flow. 4267 * @param[out] error 4268 * Pointer to the error structure. 4269 * 4270 * @return 4271 * Pointer to device flow on success, otherwise NULL and rte_errno is set. 4272 */ 4273 static inline struct mlx5_flow * 4274 flow_drv_prepare(struct rte_eth_dev *dev, 4275 const struct rte_flow *flow, 4276 const struct rte_flow_attr *attr, 4277 const struct rte_flow_item items[], 4278 const struct rte_flow_action actions[], 4279 uint32_t flow_idx, 4280 struct rte_flow_error *error) 4281 { 4282 const struct mlx5_flow_driver_ops *fops; 4283 enum mlx5_flow_drv_type type = flow->drv_type; 4284 struct mlx5_flow *mlx5_flow = NULL; 4285 4286 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 4287 fops = flow_get_drv_ops(type); 4288 mlx5_flow = fops->prepare(dev, attr, items, actions, error); 4289 if (mlx5_flow) 4290 mlx5_flow->flow_idx = flow_idx; 4291 return mlx5_flow; 4292 } 4293 4294 /** 4295 * Flow driver translation API. This abstracts calling driver specific 4296 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 4297 * translates a generic flow into a driver flow. flow_drv_prepare() must 4298 * precede. 4299 * 4300 * @note 4301 * dev_flow->layers could be filled as a result of parsing during translation 4302 * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled 4303 * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, 4304 * flow->actions could be overwritten even though all the expanded dev_flows 4305 * have the same actions. 4306 * 4307 * @param[in] dev 4308 * Pointer to the rte dev structure. 4309 * @param[in, out] dev_flow 4310 * Pointer to the mlx5 flow. 4311 * @param[in] attr 4312 * Pointer to the flow attributes. 4313 * @param[in] items 4314 * Pointer to the list of items. 4315 * @param[in] actions 4316 * Pointer to the list of actions. 4317 * @param[out] error 4318 * Pointer to the error structure. 4319 * 4320 * @return 4321 * 0 on success, a negative errno value otherwise and rte_errno is set. 4322 */ 4323 static inline int 4324 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, 4325 const struct rte_flow_attr *attr, 4326 const struct rte_flow_item items[], 4327 const struct rte_flow_action actions[], 4328 struct rte_flow_error *error) 4329 { 4330 const struct mlx5_flow_driver_ops *fops; 4331 enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; 4332 4333 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 4334 fops = flow_get_drv_ops(type); 4335 return fops->translate(dev, dev_flow, attr, items, actions, error); 4336 } 4337 4338 /** 4339 * Flow driver apply API. This abstracts calling driver specific functions. 4340 * Parent flow (rte_flow) should have driver type (drv_type). It applies 4341 * translated driver flows on to device. flow_drv_translate() must precede. 4342 * 4343 * @param[in] dev 4344 * Pointer to Ethernet device structure. 4345 * @param[in, out] flow 4346 * Pointer to flow structure. 4347 * @param[out] error 4348 * Pointer to error structure. 4349 * 4350 * @return 4351 * 0 on success, a negative errno value otherwise and rte_errno is set. 4352 */ 4353 static inline int 4354 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 4355 struct rte_flow_error *error) 4356 { 4357 const struct mlx5_flow_driver_ops *fops; 4358 enum mlx5_flow_drv_type type = flow->drv_type; 4359 4360 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 4361 fops = flow_get_drv_ops(type); 4362 return fops->apply(dev, flow, error); 4363 } 4364 4365 /** 4366 * Flow driver destroy API. This abstracts calling driver specific functions. 4367 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 4368 * on device and releases resources of the flow. 4369 * 4370 * @param[in] dev 4371 * Pointer to Ethernet device. 4372 * @param[in, out] flow 4373 * Pointer to flow structure. 4374 */ 4375 static inline void 4376 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 4377 { 4378 const struct mlx5_flow_driver_ops *fops; 4379 enum mlx5_flow_drv_type type = flow->drv_type; 4380 4381 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 4382 fops = flow_get_drv_ops(type); 4383 fops->destroy(dev, flow); 4384 } 4385 4386 /** 4387 * Flow driver find RSS policy tbl API. This abstracts calling driver 4388 * specific functions. Parent flow (rte_flow) should have driver 4389 * type (drv_type). It will find the RSS policy table that has the rss_desc. 4390 * 4391 * @param[in] dev 4392 * Pointer to Ethernet device. 4393 * @param[in, out] flow 4394 * Pointer to flow structure. 4395 * @param[in] policy 4396 * Pointer to meter policy table. 4397 * @param[in] rss_desc 4398 * Pointer to rss_desc 4399 */ 4400 static struct mlx5_flow_meter_sub_policy * 4401 flow_drv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev, 4402 struct rte_flow *flow, 4403 struct mlx5_flow_meter_policy *policy, 4404 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]) 4405 { 4406 const struct mlx5_flow_driver_ops *fops; 4407 enum mlx5_flow_drv_type type = flow->drv_type; 4408 4409 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 4410 fops = flow_get_drv_ops(type); 4411 return fops->meter_sub_policy_rss_prepare(dev, policy, rss_desc); 4412 } 4413 4414 /** 4415 * Flow driver color tag rule API. This abstracts calling driver 4416 * specific functions. Parent flow (rte_flow) should have driver 4417 * type (drv_type). It will create the color tag rules in hierarchy meter. 4418 * 4419 * @param[in] dev 4420 * Pointer to Ethernet device. 4421 * @param[in, out] flow 4422 * Pointer to flow structure. 4423 * @param[in] fm 4424 * Pointer to flow meter structure. 4425 * @param[in] src_port 4426 * The src port this extra rule should use. 4427 * @param[in] item 4428 * The src port id match item. 4429 * @param[out] error 4430 * Pointer to error structure. 4431 */ 4432 static int 4433 flow_drv_mtr_hierarchy_rule_create(struct rte_eth_dev *dev, 4434 struct rte_flow *flow, 4435 struct mlx5_flow_meter_info *fm, 4436 int32_t src_port, 4437 const struct rte_flow_item *item, 4438 struct rte_flow_error *error) 4439 { 4440 const struct mlx5_flow_driver_ops *fops; 4441 enum mlx5_flow_drv_type type = flow->drv_type; 4442 4443 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 4444 fops = flow_get_drv_ops(type); 4445 return fops->meter_hierarchy_rule_create(dev, fm, 4446 src_port, item, error); 4447 } 4448 4449 /** 4450 * Get RSS action from the action list. 4451 * 4452 * @param[in] dev 4453 * Pointer to Ethernet device. 4454 * @param[in] actions 4455 * Pointer to the list of actions. 4456 * @param[in] flow 4457 * Parent flow structure pointer. 4458 * 4459 * @return 4460 * Pointer to the RSS action if exist, else return NULL. 4461 */ 4462 static const struct rte_flow_action_rss* 4463 flow_get_rss_action(struct rte_eth_dev *dev, 4464 const struct rte_flow_action actions[]) 4465 { 4466 struct mlx5_priv *priv = dev->data->dev_private; 4467 const struct rte_flow_action_rss *rss = NULL; 4468 struct mlx5_meter_policy_action_container *acg; 4469 struct mlx5_meter_policy_action_container *acy; 4470 4471 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 4472 switch (actions->type) { 4473 case RTE_FLOW_ACTION_TYPE_RSS: 4474 rss = actions->conf; 4475 break; 4476 case RTE_FLOW_ACTION_TYPE_SAMPLE: 4477 { 4478 const struct rte_flow_action_sample *sample = 4479 actions->conf; 4480 const struct rte_flow_action *act = sample->actions; 4481 for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) 4482 if (act->type == RTE_FLOW_ACTION_TYPE_RSS) 4483 rss = act->conf; 4484 break; 4485 } 4486 case RTE_FLOW_ACTION_TYPE_METER: 4487 { 4488 uint32_t mtr_idx; 4489 struct mlx5_flow_meter_info *fm; 4490 struct mlx5_flow_meter_policy *policy; 4491 const struct rte_flow_action_meter *mtr = actions->conf; 4492 4493 fm = mlx5_flow_meter_find(priv, mtr->mtr_id, &mtr_idx); 4494 if (fm && !fm->def_policy) { 4495 policy = mlx5_flow_meter_policy_find(dev, 4496 fm->policy_id, NULL); 4497 MLX5_ASSERT(policy); 4498 if (policy->is_hierarchy) { 4499 policy = 4500 mlx5_flow_meter_hierarchy_get_final_policy(dev, 4501 policy); 4502 if (!policy) 4503 return NULL; 4504 } 4505 if (policy->is_rss) { 4506 acg = 4507 &policy->act_cnt[RTE_COLOR_GREEN]; 4508 acy = 4509 &policy->act_cnt[RTE_COLOR_YELLOW]; 4510 if (acg->fate_action == 4511 MLX5_FLOW_FATE_SHARED_RSS) 4512 rss = acg->rss->conf; 4513 else if (acy->fate_action == 4514 MLX5_FLOW_FATE_SHARED_RSS) 4515 rss = acy->rss->conf; 4516 } 4517 } 4518 break; 4519 } 4520 default: 4521 break; 4522 } 4523 } 4524 return rss; 4525 } 4526 4527 /** 4528 * Get ASO age action by index. 4529 * 4530 * @param[in] dev 4531 * Pointer to the Ethernet device structure. 4532 * @param[in] age_idx 4533 * Index to the ASO age action. 4534 * 4535 * @return 4536 * The specified ASO age action. 4537 */ 4538 struct mlx5_aso_age_action* 4539 flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx) 4540 { 4541 uint16_t pool_idx = age_idx & UINT16_MAX; 4542 uint16_t offset = (age_idx >> 16) & UINT16_MAX; 4543 struct mlx5_priv *priv = dev->data->dev_private; 4544 struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; 4545 struct mlx5_aso_age_pool *pool; 4546 4547 rte_rwlock_read_lock(&mng->resize_rwl); 4548 pool = mng->pools[pool_idx]; 4549 rte_rwlock_read_unlock(&mng->resize_rwl); 4550 return &pool->actions[offset - 1]; 4551 } 4552 4553 /* maps indirect action to translated direct in some actions array */ 4554 struct mlx5_translated_action_handle { 4555 struct rte_flow_action_handle *action; /**< Indirect action handle. */ 4556 int index; /**< Index in related array of rte_flow_action. */ 4557 }; 4558 4559 /** 4560 * Translates actions of type RTE_FLOW_ACTION_TYPE_INDIRECT to related 4561 * direct action if translation possible. 4562 * This functionality used to run same execution path for both direct and 4563 * indirect actions on flow create. All necessary preparations for indirect 4564 * action handling should be performed on *handle* actions list returned 4565 * from this call. 4566 * 4567 * @param[in] dev 4568 * Pointer to Ethernet device. 4569 * @param[in] actions 4570 * List of actions to translate. 4571 * @param[out] handle 4572 * List to store translated indirect action object handles. 4573 * @param[in, out] indir_n 4574 * Size of *handle* array. On return should be updated with number of 4575 * indirect actions retrieved from the *actions* list. 4576 * @param[out] translated_actions 4577 * List of actions where all indirect actions were translated to direct 4578 * if possible. NULL if no translation took place. 4579 * @param[out] error 4580 * Pointer to the error structure. 4581 * 4582 * @return 4583 * 0 on success, a negative errno value otherwise and rte_errno is set. 4584 */ 4585 static int 4586 flow_action_handles_translate(struct rte_eth_dev *dev, 4587 const struct rte_flow_action actions[], 4588 struct mlx5_translated_action_handle *handle, 4589 int *indir_n, 4590 struct rte_flow_action **translated_actions, 4591 struct rte_flow_error *error) 4592 { 4593 struct mlx5_priv *priv = dev->data->dev_private; 4594 struct rte_flow_action *translated = NULL; 4595 size_t actions_size; 4596 int n; 4597 int copied_n = 0; 4598 struct mlx5_translated_action_handle *handle_end = NULL; 4599 4600 for (n = 0; actions[n].type != RTE_FLOW_ACTION_TYPE_END; n++) { 4601 if (actions[n].type != RTE_FLOW_ACTION_TYPE_INDIRECT) 4602 continue; 4603 if (copied_n == *indir_n) { 4604 return rte_flow_error_set 4605 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_NUM, 4606 NULL, "too many shared actions"); 4607 } 4608 rte_memcpy(&handle[copied_n].action, &actions[n].conf, 4609 sizeof(actions[n].conf)); 4610 handle[copied_n].index = n; 4611 copied_n++; 4612 } 4613 n++; 4614 *indir_n = copied_n; 4615 if (!copied_n) 4616 return 0; 4617 actions_size = sizeof(struct rte_flow_action) * n; 4618 translated = mlx5_malloc(MLX5_MEM_ZERO, actions_size, 0, SOCKET_ID_ANY); 4619 if (!translated) { 4620 rte_errno = ENOMEM; 4621 return -ENOMEM; 4622 } 4623 memcpy(translated, actions, actions_size); 4624 for (handle_end = handle + copied_n; handle < handle_end; handle++) { 4625 struct mlx5_shared_action_rss *shared_rss; 4626 uint32_t act_idx = (uint32_t)(uintptr_t)handle->action; 4627 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; 4628 uint32_t idx = act_idx & 4629 ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1); 4630 4631 switch (type) { 4632 case MLX5_INDIRECT_ACTION_TYPE_RSS: 4633 shared_rss = mlx5_ipool_get 4634 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); 4635 translated[handle->index].type = 4636 RTE_FLOW_ACTION_TYPE_RSS; 4637 translated[handle->index].conf = 4638 &shared_rss->origin; 4639 break; 4640 case MLX5_INDIRECT_ACTION_TYPE_COUNT: 4641 translated[handle->index].type = 4642 (enum rte_flow_action_type) 4643 MLX5_RTE_FLOW_ACTION_TYPE_COUNT; 4644 translated[handle->index].conf = (void *)(uintptr_t)idx; 4645 break; 4646 case MLX5_INDIRECT_ACTION_TYPE_METER_MARK: 4647 translated[handle->index].type = 4648 (enum rte_flow_action_type) 4649 MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK; 4650 translated[handle->index].conf = (void *)(uintptr_t)idx; 4651 break; 4652 case MLX5_INDIRECT_ACTION_TYPE_AGE: 4653 if (priv->sh->flow_hit_aso_en) { 4654 translated[handle->index].type = 4655 (enum rte_flow_action_type) 4656 MLX5_RTE_FLOW_ACTION_TYPE_AGE; 4657 translated[handle->index].conf = 4658 (void *)(uintptr_t)idx; 4659 break; 4660 } 4661 /* Fall-through */ 4662 case MLX5_INDIRECT_ACTION_TYPE_CT: 4663 if (priv->sh->ct_aso_en) { 4664 translated[handle->index].type = 4665 RTE_FLOW_ACTION_TYPE_CONNTRACK; 4666 translated[handle->index].conf = 4667 (void *)(uintptr_t)idx; 4668 break; 4669 } 4670 /* Fall-through */ 4671 default: 4672 mlx5_free(translated); 4673 return rte_flow_error_set 4674 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, 4675 NULL, "invalid indirect action type"); 4676 } 4677 } 4678 *translated_actions = translated; 4679 return 0; 4680 } 4681 4682 /** 4683 * Get Shared RSS action from the action list. 4684 * 4685 * @param[in] dev 4686 * Pointer to Ethernet device. 4687 * @param[in] shared 4688 * Pointer to the list of actions. 4689 * @param[in] shared_n 4690 * Actions list length. 4691 * 4692 * @return 4693 * The MLX5 RSS action ID if exists, otherwise return 0. 4694 */ 4695 static uint32_t 4696 flow_get_shared_rss_action(struct rte_eth_dev *dev, 4697 struct mlx5_translated_action_handle *handle, 4698 int shared_n) 4699 { 4700 struct mlx5_translated_action_handle *handle_end; 4701 struct mlx5_priv *priv = dev->data->dev_private; 4702 struct mlx5_shared_action_rss *shared_rss; 4703 4704 4705 for (handle_end = handle + shared_n; handle < handle_end; handle++) { 4706 uint32_t act_idx = (uint32_t)(uintptr_t)handle->action; 4707 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; 4708 uint32_t idx = act_idx & 4709 ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1); 4710 switch (type) { 4711 case MLX5_INDIRECT_ACTION_TYPE_RSS: 4712 shared_rss = mlx5_ipool_get 4713 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 4714 idx); 4715 rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1, 4716 rte_memory_order_relaxed); 4717 return idx; 4718 default: 4719 break; 4720 } 4721 } 4722 return 0; 4723 } 4724 4725 static unsigned int 4726 find_graph_root(uint32_t rss_level) 4727 { 4728 return rss_level < 2 ? MLX5_EXPANSION_ROOT : 4729 MLX5_EXPANSION_ROOT_OUTER; 4730 } 4731 4732 /** 4733 * Get layer flags from the prefix flow. 4734 * 4735 * Some flows may be split to several subflows, the prefix subflow gets the 4736 * match items and the suffix sub flow gets the actions. 4737 * Some actions need the user defined match item flags to get the detail for 4738 * the action. 4739 * This function helps the suffix flow to get the item layer flags from prefix 4740 * subflow. 4741 * 4742 * @param[in] dev_flow 4743 * Pointer the created prefix subflow. 4744 * 4745 * @return 4746 * The layers get from prefix subflow. 4747 */ 4748 static inline uint64_t 4749 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow) 4750 { 4751 uint64_t layers = 0; 4752 4753 /* 4754 * Layers bits could be localization, but usually the compiler will 4755 * help to do the optimization work for source code. 4756 * If no decap actions, use the layers directly. 4757 */ 4758 if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP)) 4759 return dev_flow->handle->layers; 4760 /* Convert L3 layers with decap action. */ 4761 if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4) 4762 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4; 4763 else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6) 4764 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6; 4765 /* Convert L4 layers with decap action. */ 4766 if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP) 4767 layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP; 4768 else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP) 4769 layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP; 4770 return layers; 4771 } 4772 4773 /** 4774 * Get metadata split action information. 4775 * 4776 * @param[in] actions 4777 * Pointer to the list of actions. 4778 * @param[out] qrss 4779 * Pointer to the return pointer. 4780 * @param[out] qrss_type 4781 * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned 4782 * if no QUEUE/RSS is found. 4783 * @param[out] encap_idx 4784 * Pointer to the index of the encap action if exists, otherwise the last 4785 * action index. 4786 * 4787 * @return 4788 * Total number of actions. 4789 */ 4790 static int 4791 flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[], 4792 const struct rte_flow_action **qrss, 4793 int *encap_idx) 4794 { 4795 const struct rte_flow_action_raw_encap *raw_encap; 4796 int actions_n = 0; 4797 int raw_decap_idx = -1; 4798 4799 *encap_idx = -1; 4800 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 4801 switch (actions->type) { 4802 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 4803 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 4804 *encap_idx = actions_n; 4805 break; 4806 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 4807 raw_decap_idx = actions_n; 4808 break; 4809 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 4810 raw_encap = actions->conf; 4811 if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 4812 *encap_idx = raw_decap_idx != -1 ? 4813 raw_decap_idx : actions_n; 4814 break; 4815 case RTE_FLOW_ACTION_TYPE_QUEUE: 4816 case RTE_FLOW_ACTION_TYPE_RSS: 4817 *qrss = actions; 4818 break; 4819 default: 4820 break; 4821 } 4822 actions_n++; 4823 } 4824 if (*encap_idx == -1) 4825 *encap_idx = actions_n; 4826 /* Count RTE_FLOW_ACTION_TYPE_END. */ 4827 return actions_n + 1; 4828 } 4829 4830 /** 4831 * Check if the action will change packet. 4832 * 4833 * @param dev 4834 * Pointer to Ethernet device. 4835 * @param[in] type 4836 * action type. 4837 * 4838 * @return 4839 * true if action will change packet, false otherwise. 4840 */ 4841 static bool flow_check_modify_action_type(struct rte_eth_dev *dev, 4842 enum rte_flow_action_type type) 4843 { 4844 struct mlx5_priv *priv = dev->data->dev_private; 4845 4846 switch (type) { 4847 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: 4848 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: 4849 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: 4850 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: 4851 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: 4852 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: 4853 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: 4854 case RTE_FLOW_ACTION_TYPE_SET_TP_DST: 4855 case RTE_FLOW_ACTION_TYPE_DEC_TTL: 4856 case RTE_FLOW_ACTION_TYPE_SET_TTL: 4857 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: 4858 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: 4859 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: 4860 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: 4861 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: 4862 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: 4863 case RTE_FLOW_ACTION_TYPE_SET_META: 4864 case RTE_FLOW_ACTION_TYPE_SET_TAG: 4865 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: 4866 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 4867 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 4868 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 4869 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 4870 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 4871 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 4872 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 4873 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 4874 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 4875 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: 4876 return true; 4877 case RTE_FLOW_ACTION_TYPE_FLAG: 4878 case RTE_FLOW_ACTION_TYPE_MARK: 4879 if (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 4880 priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_META32_HWS) 4881 return true; 4882 else 4883 return false; 4884 default: 4885 return false; 4886 } 4887 } 4888 4889 /** 4890 * Check meter action from the action list. 4891 * 4892 * @param dev 4893 * Pointer to Ethernet device. 4894 * @param[in] actions 4895 * Pointer to the list of actions. 4896 * @param[out] has_mtr 4897 * Pointer to the meter exist flag. 4898 * @param[out] has_modify 4899 * Pointer to the flag showing there's packet change action. 4900 * @param[out] meter_id 4901 * Pointer to the meter id. 4902 * 4903 * @return 4904 * Total number of actions. 4905 */ 4906 static int 4907 flow_check_meter_action(struct rte_eth_dev *dev, 4908 const struct rte_flow_action actions[], 4909 bool *has_mtr, bool *has_modify, uint32_t *meter_id) 4910 { 4911 const struct rte_flow_action_meter *mtr = NULL; 4912 int actions_n = 0; 4913 4914 MLX5_ASSERT(has_mtr); 4915 *has_mtr = false; 4916 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 4917 switch (actions->type) { 4918 case RTE_FLOW_ACTION_TYPE_METER: 4919 mtr = actions->conf; 4920 *meter_id = mtr->mtr_id; 4921 *has_mtr = true; 4922 break; 4923 default: 4924 break; 4925 } 4926 if (!*has_mtr) 4927 *has_modify |= flow_check_modify_action_type(dev, 4928 actions->type); 4929 actions_n++; 4930 } 4931 /* Count RTE_FLOW_ACTION_TYPE_END. */ 4932 return actions_n + 1; 4933 } 4934 4935 /** 4936 * Check if the flow should be split due to hairpin. 4937 * The reason for the split is that in current HW we can't 4938 * support encap and push-vlan on Rx, so if a flow contains 4939 * these actions we move it to Tx. 4940 * 4941 * @param dev 4942 * Pointer to Ethernet device. 4943 * @param[in] attr 4944 * Flow rule attributes. 4945 * @param[in] actions 4946 * Associated actions (list terminated by the END action). 4947 * 4948 * @return 4949 * > 0 the number of actions and the flow should be split, 4950 * 0 when no split required. 4951 */ 4952 static int 4953 flow_check_hairpin_split(struct rte_eth_dev *dev, 4954 const struct rte_flow_attr *attr, 4955 const struct rte_flow_action actions[]) 4956 { 4957 int queue_action = 0; 4958 int action_n = 0; 4959 int split = 0; 4960 int push_vlan = 0; 4961 const struct rte_flow_action_queue *queue; 4962 const struct rte_flow_action_rss *rss; 4963 const struct rte_flow_action_raw_encap *raw_encap; 4964 const struct rte_eth_hairpin_conf *conf; 4965 4966 if (!attr->ingress) 4967 return 0; 4968 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 4969 if (actions->type == RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) 4970 push_vlan = 1; 4971 switch (actions->type) { 4972 case RTE_FLOW_ACTION_TYPE_QUEUE: 4973 queue = actions->conf; 4974 if (queue == NULL) 4975 return 0; 4976 conf = mlx5_rxq_get_hairpin_conf(dev, queue->index); 4977 if (conf == NULL || conf->tx_explicit != 0) 4978 return 0; 4979 queue_action = 1; 4980 action_n++; 4981 break; 4982 case RTE_FLOW_ACTION_TYPE_RSS: 4983 rss = actions->conf; 4984 if (rss == NULL || rss->queue_num == 0) 4985 return 0; 4986 conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]); 4987 if (conf == NULL || conf->tx_explicit != 0) 4988 return 0; 4989 queue_action = 1; 4990 action_n++; 4991 break; 4992 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 4993 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 4994 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 4995 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 4996 split++; 4997 action_n++; 4998 break; 4999 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 5000 if (push_vlan) 5001 split++; 5002 action_n++; 5003 break; 5004 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 5005 raw_encap = actions->conf; 5006 if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 5007 split++; 5008 action_n++; 5009 break; 5010 default: 5011 action_n++; 5012 break; 5013 } 5014 } 5015 if (split && queue_action) 5016 return action_n; 5017 return 0; 5018 } 5019 5020 int 5021 flow_dv_mreg_match_cb(void *tool_ctx __rte_unused, 5022 struct mlx5_list_entry *entry, void *cb_ctx) 5023 { 5024 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 5025 struct mlx5_flow_mreg_copy_resource *mcp_res = 5026 container_of(entry, typeof(*mcp_res), hlist_ent); 5027 5028 return mcp_res->mark_id != *(uint32_t *)(ctx->data); 5029 } 5030 5031 struct mlx5_list_entry * 5032 flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx) 5033 { 5034 struct rte_eth_dev *dev = tool_ctx; 5035 struct mlx5_priv *priv = dev->data->dev_private; 5036 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 5037 struct mlx5_flow_mreg_copy_resource *mcp_res; 5038 struct rte_flow_error *error = ctx->error; 5039 uint32_t idx = 0; 5040 int ret; 5041 uint32_t mark_id = *(uint32_t *)(ctx->data); 5042 struct rte_flow_attr attr = { 5043 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 5044 .ingress = 1, 5045 }; 5046 struct mlx5_rte_flow_item_tag tag_spec = { 5047 .data = mark_id, 5048 }; 5049 struct rte_flow_item items[] = { 5050 [1] = { .type = RTE_FLOW_ITEM_TYPE_END, }, 5051 }; 5052 struct rte_flow_action_mark ftag = { 5053 .id = mark_id, 5054 }; 5055 struct mlx5_flow_action_copy_mreg cp_mreg = { 5056 .dst = REG_B, 5057 .src = REG_NON, 5058 }; 5059 struct rte_flow_action_jump jump = { 5060 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 5061 }; 5062 struct rte_flow_action actions[] = { 5063 [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, 5064 }; 5065 5066 /* Fill the register fields in the flow. */ 5067 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 5068 if (ret < 0) 5069 return NULL; 5070 tag_spec.id = ret; 5071 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 5072 if (ret < 0) 5073 return NULL; 5074 cp_mreg.src = ret; 5075 /* Provide the full width of FLAG specific value. */ 5076 if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT)) 5077 tag_spec.data = MLX5_FLOW_MARK_DEFAULT; 5078 /* Build a new flow. */ 5079 if (mark_id != MLX5_DEFAULT_COPY_ID) { 5080 items[0] = (struct rte_flow_item){ 5081 .type = (enum rte_flow_item_type) 5082 MLX5_RTE_FLOW_ITEM_TYPE_TAG, 5083 .spec = &tag_spec, 5084 }; 5085 items[1] = (struct rte_flow_item){ 5086 .type = RTE_FLOW_ITEM_TYPE_END, 5087 }; 5088 actions[0] = (struct rte_flow_action){ 5089 .type = (enum rte_flow_action_type) 5090 MLX5_RTE_FLOW_ACTION_TYPE_MARK, 5091 .conf = &ftag, 5092 }; 5093 actions[1] = (struct rte_flow_action){ 5094 .type = (enum rte_flow_action_type) 5095 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 5096 .conf = &cp_mreg, 5097 }; 5098 actions[2] = (struct rte_flow_action){ 5099 .type = RTE_FLOW_ACTION_TYPE_JUMP, 5100 .conf = &jump, 5101 }; 5102 actions[3] = (struct rte_flow_action){ 5103 .type = RTE_FLOW_ACTION_TYPE_END, 5104 }; 5105 } else { 5106 /* Default rule, wildcard match. */ 5107 attr.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR; 5108 items[0] = (struct rte_flow_item){ 5109 .type = RTE_FLOW_ITEM_TYPE_END, 5110 }; 5111 actions[0] = (struct rte_flow_action){ 5112 .type = (enum rte_flow_action_type) 5113 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 5114 .conf = &cp_mreg, 5115 }; 5116 actions[1] = (struct rte_flow_action){ 5117 .type = RTE_FLOW_ACTION_TYPE_JUMP, 5118 .conf = &jump, 5119 }; 5120 actions[2] = (struct rte_flow_action){ 5121 .type = RTE_FLOW_ACTION_TYPE_END, 5122 }; 5123 } 5124 /* Build a new entry. */ 5125 mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx); 5126 if (!mcp_res) { 5127 rte_errno = ENOMEM; 5128 return NULL; 5129 } 5130 mcp_res->idx = idx; 5131 mcp_res->mark_id = mark_id; 5132 /* 5133 * The copy Flows are not included in any list. There 5134 * ones are referenced from other Flows and can not 5135 * be applied, removed, deleted in arbitrary order 5136 * by list traversing. 5137 */ 5138 mcp_res->rix_flow = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_MCP, 5139 &attr, items, actions, false, error); 5140 if (!mcp_res->rix_flow) { 5141 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx); 5142 return NULL; 5143 } 5144 return &mcp_res->hlist_ent; 5145 } 5146 5147 struct mlx5_list_entry * 5148 flow_dv_mreg_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry, 5149 void *cb_ctx __rte_unused) 5150 { 5151 struct rte_eth_dev *dev = tool_ctx; 5152 struct mlx5_priv *priv = dev->data->dev_private; 5153 struct mlx5_flow_mreg_copy_resource *mcp_res; 5154 uint32_t idx = 0; 5155 5156 mcp_res = mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx); 5157 if (!mcp_res) { 5158 rte_errno = ENOMEM; 5159 return NULL; 5160 } 5161 memcpy(mcp_res, oentry, sizeof(*mcp_res)); 5162 mcp_res->idx = idx; 5163 return &mcp_res->hlist_ent; 5164 } 5165 5166 void 5167 flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry) 5168 { 5169 struct mlx5_flow_mreg_copy_resource *mcp_res = 5170 container_of(entry, typeof(*mcp_res), hlist_ent); 5171 struct rte_eth_dev *dev = tool_ctx; 5172 struct mlx5_priv *priv = dev->data->dev_private; 5173 5174 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); 5175 } 5176 5177 /** 5178 * Add a flow of copying flow metadata registers in RX_CP_TBL. 5179 * 5180 * As mark_id is unique, if there's already a registered flow for the mark_id, 5181 * return by increasing the reference counter of the resource. Otherwise, create 5182 * the resource (mcp_res) and flow. 5183 * 5184 * Flow looks like, 5185 * - If ingress port is ANY and reg_c[1] is mark_id, 5186 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 5187 * 5188 * For default flow (zero mark_id), flow is like, 5189 * - If ingress port is ANY, 5190 * reg_b := reg_c[0] and jump to RX_ACT_TBL. 5191 * 5192 * @param dev 5193 * Pointer to Ethernet device. 5194 * @param mark_id 5195 * ID of MARK action, zero means default flow for META. 5196 * @param[out] error 5197 * Perform verbose error reporting if not NULL. 5198 * 5199 * @return 5200 * Associated resource on success, NULL otherwise and rte_errno is set. 5201 */ 5202 static struct mlx5_flow_mreg_copy_resource * 5203 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, 5204 struct rte_flow_error *error) 5205 { 5206 struct mlx5_priv *priv = dev->data->dev_private; 5207 struct mlx5_list_entry *entry; 5208 struct mlx5_flow_cb_ctx ctx = { 5209 .dev = dev, 5210 .error = error, 5211 .data = &mark_id, 5212 }; 5213 5214 /* Check if already registered. */ 5215 MLX5_ASSERT(priv->sh->mreg_cp_tbl); 5216 entry = mlx5_hlist_register(priv->sh->mreg_cp_tbl, mark_id, &ctx); 5217 if (!entry) 5218 return NULL; 5219 return container_of(entry, struct mlx5_flow_mreg_copy_resource, 5220 hlist_ent); 5221 } 5222 5223 void 5224 flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry) 5225 { 5226 struct mlx5_flow_mreg_copy_resource *mcp_res = 5227 container_of(entry, typeof(*mcp_res), hlist_ent); 5228 struct rte_eth_dev *dev = tool_ctx; 5229 struct mlx5_priv *priv = dev->data->dev_private; 5230 5231 MLX5_ASSERT(mcp_res->rix_flow); 5232 mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_MCP, mcp_res->rix_flow); 5233 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); 5234 } 5235 5236 /** 5237 * Release flow in RX_CP_TBL. 5238 * 5239 * @param dev 5240 * Pointer to Ethernet device. 5241 * @flow 5242 * Parent flow for wich copying is provided. 5243 */ 5244 static void 5245 flow_mreg_del_copy_action(struct rte_eth_dev *dev, 5246 struct rte_flow *flow) 5247 { 5248 struct mlx5_flow_mreg_copy_resource *mcp_res; 5249 struct mlx5_priv *priv = dev->data->dev_private; 5250 5251 if (!flow->rix_mreg_copy) 5252 return; 5253 mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], 5254 flow->rix_mreg_copy); 5255 if (!mcp_res || !priv->sh->mreg_cp_tbl) 5256 return; 5257 MLX5_ASSERT(mcp_res->rix_flow); 5258 mlx5_hlist_unregister(priv->sh->mreg_cp_tbl, &mcp_res->hlist_ent); 5259 flow->rix_mreg_copy = 0; 5260 } 5261 5262 /** 5263 * Remove the default copy action from RX_CP_TBL. 5264 * 5265 * This functions is called in the mlx5_dev_start(). No thread safe 5266 * is guaranteed. 5267 * 5268 * @param dev 5269 * Pointer to Ethernet device. 5270 */ 5271 static void 5272 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) 5273 { 5274 struct mlx5_list_entry *entry; 5275 struct mlx5_priv *priv = dev->data->dev_private; 5276 struct mlx5_flow_cb_ctx ctx; 5277 uint32_t mark_id; 5278 5279 /* Check if default flow is registered. */ 5280 if (!priv->sh->mreg_cp_tbl) 5281 return; 5282 mark_id = MLX5_DEFAULT_COPY_ID; 5283 ctx.data = &mark_id; 5284 entry = mlx5_hlist_lookup(priv->sh->mreg_cp_tbl, mark_id, &ctx); 5285 if (!entry) 5286 return; 5287 mlx5_hlist_unregister(priv->sh->mreg_cp_tbl, entry); 5288 } 5289 5290 /** 5291 * Add the default copy action in RX_CP_TBL. 5292 * 5293 * This functions is called in the mlx5_dev_start(). No thread safe 5294 * is guaranteed. 5295 * 5296 * @param dev 5297 * Pointer to Ethernet device. 5298 * @param[out] error 5299 * Perform verbose error reporting if not NULL. 5300 * 5301 * @return 5302 * 0 for success, negative value otherwise and rte_errno is set. 5303 */ 5304 static int 5305 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, 5306 struct rte_flow_error *error) 5307 { 5308 struct mlx5_priv *priv = dev->data->dev_private; 5309 struct mlx5_flow_mreg_copy_resource *mcp_res; 5310 struct mlx5_flow_cb_ctx ctx; 5311 uint32_t mark_id; 5312 5313 /* Check whether extensive metadata feature is engaged. */ 5314 if (!priv->sh->config.dv_flow_en || 5315 priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 5316 !mlx5_flow_ext_mreg_supported(dev) || 5317 !priv->sh->dv_regc0_mask) 5318 return 0; 5319 /* 5320 * Add default mreg copy flow may be called multiple time, but 5321 * only be called once in stop. Avoid register it twice. 5322 */ 5323 mark_id = MLX5_DEFAULT_COPY_ID; 5324 ctx.data = &mark_id; 5325 if (mlx5_hlist_lookup(priv->sh->mreg_cp_tbl, mark_id, &ctx)) 5326 return 0; 5327 mcp_res = flow_mreg_add_copy_action(dev, mark_id, error); 5328 if (!mcp_res) 5329 return -rte_errno; 5330 return 0; 5331 } 5332 5333 /** 5334 * Add a flow of copying flow metadata registers in RX_CP_TBL. 5335 * 5336 * All the flow having Q/RSS action should be split by 5337 * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL 5338 * performs the following, 5339 * - CQE->flow_tag := reg_c[1] (MARK) 5340 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 5341 * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1] 5342 * but there should be a flow per each MARK ID set by MARK action. 5343 * 5344 * For the aforementioned reason, if there's a MARK action in flow's action 5345 * list, a corresponding flow should be added to the RX_CP_TBL in order to copy 5346 * the MARK ID to CQE's flow_tag like, 5347 * - If reg_c[1] is mark_id, 5348 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 5349 * 5350 * For SET_META action which stores value in reg_c[0], as the destination is 5351 * also a flow metadata register (reg_b), adding a default flow is enough. Zero 5352 * MARK ID means the default flow. The default flow looks like, 5353 * - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL. 5354 * 5355 * @param dev 5356 * Pointer to Ethernet device. 5357 * @param flow 5358 * Pointer to flow structure. 5359 * @param[in] actions 5360 * Pointer to the list of actions. 5361 * @param[out] error 5362 * Perform verbose error reporting if not NULL. 5363 * 5364 * @return 5365 * 0 on success, negative value otherwise and rte_errno is set. 5366 */ 5367 static int 5368 flow_mreg_update_copy_table(struct rte_eth_dev *dev, 5369 struct rte_flow *flow, 5370 const struct rte_flow_action *actions, 5371 struct rte_flow_error *error) 5372 { 5373 struct mlx5_priv *priv = dev->data->dev_private; 5374 struct mlx5_sh_config *config = &priv->sh->config; 5375 struct mlx5_flow_mreg_copy_resource *mcp_res; 5376 const struct rte_flow_action_mark *mark; 5377 5378 /* Check whether extensive metadata feature is engaged. */ 5379 if (!config->dv_flow_en || 5380 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 5381 !mlx5_flow_ext_mreg_supported(dev) || 5382 !priv->sh->dv_regc0_mask) 5383 return 0; 5384 /* Find MARK action. */ 5385 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 5386 switch (actions->type) { 5387 case RTE_FLOW_ACTION_TYPE_FLAG: 5388 mcp_res = flow_mreg_add_copy_action 5389 (dev, MLX5_FLOW_MARK_DEFAULT, error); 5390 if (!mcp_res) 5391 return -rte_errno; 5392 flow->rix_mreg_copy = mcp_res->idx; 5393 return 0; 5394 case RTE_FLOW_ACTION_TYPE_MARK: 5395 mark = (const struct rte_flow_action_mark *) 5396 actions->conf; 5397 mcp_res = 5398 flow_mreg_add_copy_action(dev, mark->id, error); 5399 if (!mcp_res) 5400 return -rte_errno; 5401 flow->rix_mreg_copy = mcp_res->idx; 5402 return 0; 5403 default: 5404 break; 5405 } 5406 } 5407 return 0; 5408 } 5409 5410 #define MLX5_MAX_SPLIT_ACTIONS 24 5411 #define MLX5_MAX_SPLIT_ITEMS 24 5412 5413 /** 5414 * Split the hairpin flow. 5415 * Since HW can't support encap and push-vlan on Rx, we move these 5416 * actions to Tx. 5417 * If the count action is after the encap then we also 5418 * move the count action. in this case the count will also measure 5419 * the outer bytes. 5420 * 5421 * @param dev 5422 * Pointer to Ethernet device. 5423 * @param[in] actions 5424 * Associated actions (list terminated by the END action). 5425 * @param[out] actions_rx 5426 * Rx flow actions. 5427 * @param[out] actions_tx 5428 * Tx flow actions.. 5429 * @param[out] pattern_tx 5430 * The pattern items for the Tx flow. 5431 * @param[out] flow_id 5432 * The flow ID connected to this flow. 5433 * 5434 * @return 5435 * 0 on success. 5436 */ 5437 static int 5438 flow_hairpin_split(struct rte_eth_dev *dev, 5439 const struct rte_flow_action actions[], 5440 struct rte_flow_action actions_rx[], 5441 struct rte_flow_action actions_tx[], 5442 struct rte_flow_item pattern_tx[], 5443 uint32_t flow_id) 5444 { 5445 const struct rte_flow_action_raw_encap *raw_encap; 5446 const struct rte_flow_action_raw_decap *raw_decap; 5447 struct mlx5_rte_flow_action_set_tag *set_tag; 5448 struct rte_flow_action *tag_action; 5449 struct mlx5_rte_flow_item_tag *tag_item; 5450 struct rte_flow_item *item; 5451 char *addr; 5452 int push_vlan = 0; 5453 int encap = 0; 5454 5455 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 5456 if (actions->type == RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) 5457 push_vlan = 1; 5458 switch (actions->type) { 5459 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 5460 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 5461 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 5462 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 5463 rte_memcpy(actions_tx, actions, 5464 sizeof(struct rte_flow_action)); 5465 actions_tx++; 5466 break; 5467 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 5468 if (push_vlan) { 5469 rte_memcpy(actions_tx, actions, 5470 sizeof(struct rte_flow_action)); 5471 actions_tx++; 5472 } else { 5473 rte_memcpy(actions_rx, actions, 5474 sizeof(struct rte_flow_action)); 5475 actions_rx++; 5476 } 5477 break; 5478 case RTE_FLOW_ACTION_TYPE_COUNT: 5479 case RTE_FLOW_ACTION_TYPE_AGE: 5480 if (encap) { 5481 rte_memcpy(actions_tx, actions, 5482 sizeof(struct rte_flow_action)); 5483 actions_tx++; 5484 } else { 5485 rte_memcpy(actions_rx, actions, 5486 sizeof(struct rte_flow_action)); 5487 actions_rx++; 5488 } 5489 break; 5490 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 5491 raw_encap = actions->conf; 5492 if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) { 5493 memcpy(actions_tx, actions, 5494 sizeof(struct rte_flow_action)); 5495 actions_tx++; 5496 encap = 1; 5497 } else { 5498 rte_memcpy(actions_rx, actions, 5499 sizeof(struct rte_flow_action)); 5500 actions_rx++; 5501 } 5502 break; 5503 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 5504 raw_decap = actions->conf; 5505 if (raw_decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) { 5506 memcpy(actions_tx, actions, 5507 sizeof(struct rte_flow_action)); 5508 actions_tx++; 5509 } else { 5510 rte_memcpy(actions_rx, actions, 5511 sizeof(struct rte_flow_action)); 5512 actions_rx++; 5513 } 5514 break; 5515 default: 5516 rte_memcpy(actions_rx, actions, 5517 sizeof(struct rte_flow_action)); 5518 actions_rx++; 5519 break; 5520 } 5521 } 5522 /* Add set meta action and end action for the Rx flow. */ 5523 tag_action = actions_rx; 5524 tag_action->type = (enum rte_flow_action_type) 5525 MLX5_RTE_FLOW_ACTION_TYPE_TAG; 5526 actions_rx++; 5527 rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action)); 5528 actions_rx++; 5529 set_tag = (void *)actions_rx; 5530 *set_tag = (struct mlx5_rte_flow_action_set_tag) { 5531 .id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL), 5532 .data = flow_id, 5533 }; 5534 MLX5_ASSERT(set_tag->id > REG_NON); 5535 tag_action->conf = set_tag; 5536 /* Create Tx item list. */ 5537 rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); 5538 addr = (void *)&pattern_tx[2]; 5539 item = pattern_tx; 5540 item->type = (enum rte_flow_item_type) 5541 MLX5_RTE_FLOW_ITEM_TYPE_TAG; 5542 tag_item = (void *)addr; 5543 tag_item->data = flow_id; 5544 tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); 5545 MLX5_ASSERT(set_tag->id > REG_NON); 5546 item->spec = tag_item; 5547 addr += sizeof(struct mlx5_rte_flow_item_tag); 5548 tag_item = (void *)addr; 5549 tag_item->data = UINT32_MAX; 5550 tag_item->id = UINT16_MAX; 5551 item->mask = tag_item; 5552 item->last = NULL; 5553 item++; 5554 item->type = RTE_FLOW_ITEM_TYPE_END; 5555 return 0; 5556 } 5557 5558 /** 5559 * The last stage of splitting chain, just creates the subflow 5560 * without any modification. 5561 * 5562 * @param[in] dev 5563 * Pointer to Ethernet device. 5564 * @param[in] flow 5565 * Parent flow structure pointer. 5566 * @param[in, out] sub_flow 5567 * Pointer to return the created subflow, may be NULL. 5568 * @param[in] attr 5569 * Flow rule attributes. 5570 * @param[in] items 5571 * Pattern specification (list terminated by the END pattern item). 5572 * @param[in] actions 5573 * Associated actions (list terminated by the END action). 5574 * @param[in] flow_split_info 5575 * Pointer to flow split info structure. 5576 * @param[out] error 5577 * Perform verbose error reporting if not NULL. 5578 * @return 5579 * 0 on success, negative value otherwise 5580 */ 5581 static int 5582 flow_create_split_inner(struct rte_eth_dev *dev, 5583 struct rte_flow *flow, 5584 struct mlx5_flow **sub_flow, 5585 const struct rte_flow_attr *attr, 5586 const struct rte_flow_item items[], 5587 const struct rte_flow_action actions[], 5588 struct mlx5_flow_split_info *flow_split_info, 5589 struct rte_flow_error *error) 5590 { 5591 struct mlx5_flow *dev_flow; 5592 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 5593 5594 dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, 5595 flow_split_info->flow_idx, error); 5596 if (!dev_flow) 5597 return -rte_errno; 5598 dev_flow->flow = flow; 5599 dev_flow->external = flow_split_info->external; 5600 dev_flow->skip_scale = flow_split_info->skip_scale; 5601 /* Subflow object was created, we must include one in the list. */ 5602 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, 5603 dev_flow->handle, next); 5604 /* 5605 * If dev_flow is as one of the suffix flow, some actions in suffix 5606 * flow may need some user defined item layer flags, and pass the 5607 * Metadata rxq mark flag to suffix flow as well. 5608 */ 5609 if (flow_split_info->prefix_layers) 5610 dev_flow->handle->layers = flow_split_info->prefix_layers; 5611 if (flow_split_info->prefix_mark) { 5612 MLX5_ASSERT(wks); 5613 wks->mark = 1; 5614 } 5615 if (sub_flow) 5616 *sub_flow = dev_flow; 5617 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 5618 dev_flow->dv.table_id = flow_split_info->table_id; 5619 #endif 5620 return flow_drv_translate(dev, dev_flow, attr, items, actions, error); 5621 } 5622 5623 /** 5624 * Get the sub policy of a meter. 5625 * 5626 * @param[in] dev 5627 * Pointer to Ethernet device. 5628 * @param[in] flow 5629 * Parent flow structure pointer. 5630 * @param wks 5631 * Pointer to thread flow work space. 5632 * @param[in] attr 5633 * Flow rule attributes. 5634 * @param[in] items 5635 * Pattern specification (list terminated by the END pattern item). 5636 * @param[out] error 5637 * Perform verbose error reporting if not NULL. 5638 * 5639 * @return 5640 * Pointer to the meter sub policy, NULL otherwise and rte_errno is set. 5641 */ 5642 static struct mlx5_flow_meter_sub_policy * 5643 get_meter_sub_policy(struct rte_eth_dev *dev, 5644 struct rte_flow *flow, 5645 struct mlx5_flow_workspace *wks, 5646 const struct rte_flow_attr *attr, 5647 const struct rte_flow_item items[], 5648 struct rte_flow_error *error) 5649 { 5650 struct mlx5_flow_meter_policy *policy; 5651 struct mlx5_flow_meter_policy *final_policy; 5652 struct mlx5_flow_meter_sub_policy *sub_policy = NULL; 5653 5654 policy = wks->policy; 5655 final_policy = policy->is_hierarchy ? wks->final_policy : policy; 5656 if (final_policy->is_rss || final_policy->is_queue) { 5657 struct mlx5_flow_rss_desc rss_desc_v[MLX5_MTR_RTE_COLORS]; 5658 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS] = {0}; 5659 uint32_t i; 5660 5661 /* 5662 * This is a tmp dev_flow, 5663 * no need to register any matcher for it in translate. 5664 */ 5665 wks->skip_matcher_reg = 1; 5666 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) { 5667 struct mlx5_flow dev_flow = {0}; 5668 struct mlx5_flow_handle dev_handle = { {0} }; 5669 uint8_t fate = final_policy->act_cnt[i].fate_action; 5670 5671 if (fate == MLX5_FLOW_FATE_SHARED_RSS) { 5672 const struct rte_flow_action_rss *rss_act = 5673 final_policy->act_cnt[i].rss->conf; 5674 struct rte_flow_action rss_actions[2] = { 5675 [0] = { 5676 .type = RTE_FLOW_ACTION_TYPE_RSS, 5677 .conf = rss_act, 5678 }, 5679 [1] = { 5680 .type = RTE_FLOW_ACTION_TYPE_END, 5681 .conf = NULL, 5682 } 5683 }; 5684 5685 dev_flow.handle = &dev_handle; 5686 dev_flow.ingress = attr->ingress; 5687 dev_flow.flow = flow; 5688 dev_flow.external = 0; 5689 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 5690 dev_flow.dv.transfer = attr->transfer; 5691 #endif 5692 /** 5693 * Translate RSS action to get rss hash fields. 5694 */ 5695 if (flow_drv_translate(dev, &dev_flow, attr, 5696 items, rss_actions, error)) 5697 goto exit; 5698 rss_desc_v[i] = wks->rss_desc; 5699 rss_desc_v[i].symmetric_hash_function = 5700 dev_flow.symmetric_hash_function; 5701 rss_desc_v[i].key_len = MLX5_RSS_HASH_KEY_LEN; 5702 rss_desc_v[i].hash_fields = 5703 dev_flow.hash_fields; 5704 rss_desc_v[i].queue_num = 5705 rss_desc_v[i].hash_fields ? 5706 rss_desc_v[i].queue_num : 1; 5707 rss_desc_v[i].tunnel = 5708 !!(dev_flow.handle->layers & 5709 MLX5_FLOW_LAYER_TUNNEL); 5710 /* Use the RSS queues in the containers. */ 5711 rss_desc_v[i].queue = 5712 (uint16_t *)(uintptr_t)rss_act->queue; 5713 rss_desc[i] = &rss_desc_v[i]; 5714 } else if (fate == MLX5_FLOW_FATE_QUEUE) { 5715 /* This is queue action. */ 5716 rss_desc_v[i] = wks->rss_desc; 5717 rss_desc_v[i].key_len = 0; 5718 rss_desc_v[i].hash_fields = 0; 5719 rss_desc_v[i].queue = 5720 &final_policy->act_cnt[i].queue; 5721 rss_desc_v[i].queue_num = 1; 5722 rss_desc[i] = &rss_desc_v[i]; 5723 } else { 5724 rss_desc[i] = NULL; 5725 } 5726 } 5727 sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev, 5728 flow, policy, rss_desc); 5729 } else { 5730 enum mlx5_meter_domain mtr_domain = 5731 attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER : 5732 (attr->egress ? MLX5_MTR_DOMAIN_EGRESS : 5733 MLX5_MTR_DOMAIN_INGRESS); 5734 sub_policy = policy->sub_policys[mtr_domain][0]; 5735 } 5736 if (!sub_policy) 5737 rte_flow_error_set(error, EINVAL, 5738 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 5739 "Failed to get meter sub-policy."); 5740 exit: 5741 return sub_policy; 5742 } 5743 5744 /** 5745 * Split the meter flow. 5746 * 5747 * As meter flow will split to three sub flow, other than meter 5748 * action, the other actions make sense to only meter accepts 5749 * the packet. If it need to be dropped, no other additional 5750 * actions should be take. 5751 * 5752 * One kind of special action which decapsulates the L3 tunnel 5753 * header will be in the prefix sub flow, as not to take the 5754 * L3 tunnel header into account. 5755 * 5756 * @param[in] dev 5757 * Pointer to Ethernet device. 5758 * @param[in] flow 5759 * Parent flow structure pointer. 5760 * @param wks 5761 * Pointer to thread flow work space. 5762 * @param[in] attr 5763 * Flow rule attributes. 5764 * @param[in] items 5765 * Pattern specification (list terminated by the END pattern item). 5766 * @param[out] sfx_items 5767 * Suffix flow match items (list terminated by the END pattern item). 5768 * @param[in] actions 5769 * Associated actions (list terminated by the END action). 5770 * @param[out] actions_sfx 5771 * Suffix flow actions. 5772 * @param[out] actions_pre 5773 * Prefix flow actions. 5774 * @param[out] mtr_flow_id 5775 * Pointer to meter flow id. 5776 * @param[out] error 5777 * Perform verbose error reporting if not NULL. 5778 * 5779 * @return 5780 * 0 on success, a negative errno value otherwise and rte_errno is set. 5781 */ 5782 static int 5783 flow_meter_split_prep(struct rte_eth_dev *dev, 5784 struct rte_flow *flow, 5785 struct mlx5_flow_workspace *wks, 5786 const struct rte_flow_attr *attr, 5787 const struct rte_flow_item items[], 5788 struct rte_flow_item sfx_items[], 5789 const struct rte_flow_action actions[], 5790 struct rte_flow_action actions_sfx[], 5791 struct rte_flow_action actions_pre[], 5792 uint32_t *mtr_flow_id, 5793 struct rte_flow_error *error) 5794 { 5795 struct mlx5_priv *priv = dev->data->dev_private; 5796 struct mlx5_flow_meter_info *fm = wks->fm; 5797 struct rte_flow_action *tag_action = NULL; 5798 struct rte_flow_item *tag_item; 5799 struct mlx5_rte_flow_action_set_tag *set_tag; 5800 const struct rte_flow_action_raw_encap *raw_encap; 5801 const struct rte_flow_action_raw_decap *raw_decap; 5802 struct mlx5_rte_flow_item_tag *tag_item_spec; 5803 struct mlx5_rte_flow_item_tag *tag_item_mask; 5804 uint32_t tag_id = 0; 5805 bool vlan_actions; 5806 struct rte_flow_item *orig_sfx_items = sfx_items; 5807 const struct rte_flow_item *orig_items = items; 5808 struct rte_flow_action *hw_mtr_action; 5809 struct rte_flow_action *action_pre_head = NULL; 5810 uint16_t flow_src_port = priv->representor_id; 5811 bool mtr_first; 5812 uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0; 5813 uint8_t mtr_reg_bits = priv->mtr_reg_share ? 5814 MLX5_MTR_IDLE_BITS_IN_COLOR_REG : MLX5_REG_BITS; 5815 uint32_t flow_id = 0; 5816 uint32_t flow_id_reversed = 0; 5817 uint8_t flow_id_bits = 0; 5818 bool after_meter = false; 5819 int shift; 5820 5821 /* Prepare the suffix subflow items. */ 5822 tag_item = sfx_items++; 5823 tag_item->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG; 5824 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 5825 int item_type = items->type; 5826 5827 switch (item_type) { 5828 case RTE_FLOW_ITEM_TYPE_PORT_ID: 5829 case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: 5830 case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: 5831 if (mlx5_flow_get_item_vport_id(dev, items, &flow_src_port, NULL, error)) 5832 return -rte_errno; 5833 if (!fm->def_policy && wks->policy->hierarchy_match_port && 5834 flow_src_port != priv->representor_id) { 5835 if (flow_drv_mtr_hierarchy_rule_create(dev, 5836 flow, fm, 5837 flow_src_port, 5838 items, 5839 error)) 5840 return -rte_errno; 5841 } 5842 memcpy(sfx_items, items, sizeof(*sfx_items)); 5843 sfx_items++; 5844 break; 5845 case RTE_FLOW_ITEM_TYPE_VLAN: 5846 /* 5847 * Copy VLAN items in case VLAN actions are performed. 5848 * If there are no VLAN actions, these items will be VOID. 5849 */ 5850 memcpy(sfx_items, items, sizeof(*sfx_items)); 5851 sfx_items->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN; 5852 sfx_items++; 5853 break; 5854 default: 5855 break; 5856 } 5857 } 5858 sfx_items->type = RTE_FLOW_ITEM_TYPE_END; 5859 sfx_items++; 5860 mtr_first = priv->sh->meter_aso_en && 5861 (attr->egress || (attr->transfer && flow_src_port != UINT16_MAX)); 5862 /* For ASO meter, meter must be before tag in TX direction. */ 5863 if (mtr_first) { 5864 action_pre_head = actions_pre++; 5865 /* Leave space for tag action. */ 5866 tag_action = actions_pre++; 5867 } 5868 /* Prepare the actions for prefix and suffix flow. */ 5869 vlan_actions = false; 5870 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 5871 struct rte_flow_action *action_cur = NULL; 5872 5873 switch (actions->type) { 5874 case RTE_FLOW_ACTION_TYPE_METER: 5875 if (mtr_first) { 5876 action_cur = action_pre_head; 5877 } else { 5878 /* Leave space for tag action. */ 5879 tag_action = actions_pre++; 5880 action_cur = actions_pre++; 5881 } 5882 after_meter = true; 5883 break; 5884 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 5885 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 5886 action_cur = actions_pre++; 5887 break; 5888 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 5889 raw_encap = actions->conf; 5890 if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) 5891 action_cur = actions_pre++; 5892 break; 5893 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 5894 raw_decap = actions->conf; 5895 if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 5896 action_cur = actions_pre++; 5897 break; 5898 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 5899 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 5900 vlan_actions = true; 5901 break; 5902 case RTE_FLOW_ACTION_TYPE_COUNT: 5903 if (fm->def_policy) 5904 action_cur = after_meter ? 5905 actions_sfx++ : actions_pre++; 5906 break; 5907 default: 5908 break; 5909 } 5910 if (!action_cur) 5911 action_cur = (fm->def_policy) ? 5912 actions_sfx++ : actions_pre++; 5913 memcpy(action_cur, actions, sizeof(struct rte_flow_action)); 5914 } 5915 /* If there are no VLAN actions, convert VLAN items to VOID in suffix flow items. */ 5916 if (!vlan_actions) { 5917 struct rte_flow_item *it = orig_sfx_items; 5918 5919 for (; it->type != RTE_FLOW_ITEM_TYPE_END; it++) 5920 if (it->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN) 5921 it->type = RTE_FLOW_ITEM_TYPE_VOID; 5922 } 5923 /* Add end action to the actions. */ 5924 actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; 5925 if (priv->sh->meter_aso_en) { 5926 /** 5927 * For ASO meter, need to add an extra jump action explicitly, 5928 * to jump from meter to policer table. 5929 */ 5930 struct mlx5_flow_meter_sub_policy *sub_policy; 5931 struct mlx5_flow_tbl_data_entry *tbl_data; 5932 5933 if (!fm->def_policy) { 5934 sub_policy = get_meter_sub_policy(dev, flow, wks, 5935 attr, orig_items, 5936 error); 5937 if (!sub_policy) 5938 return -rte_errno; 5939 } else { 5940 enum mlx5_meter_domain mtr_domain = 5941 attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER : 5942 (attr->egress ? MLX5_MTR_DOMAIN_EGRESS : 5943 MLX5_MTR_DOMAIN_INGRESS); 5944 5945 sub_policy = 5946 &priv->sh->mtrmng->def_policy[mtr_domain]->sub_policy; 5947 } 5948 tbl_data = container_of(sub_policy->tbl_rsc, 5949 struct mlx5_flow_tbl_data_entry, tbl); 5950 hw_mtr_action = actions_pre++; 5951 hw_mtr_action->type = (enum rte_flow_action_type) 5952 MLX5_RTE_FLOW_ACTION_TYPE_JUMP; 5953 hw_mtr_action->conf = tbl_data->jump.action; 5954 } 5955 actions_pre->type = RTE_FLOW_ACTION_TYPE_END; 5956 actions_pre++; 5957 if (!tag_action) 5958 return rte_flow_error_set(error, ENOMEM, 5959 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 5960 NULL, "No tag action space."); 5961 if (!mtr_flow_id) { 5962 tag_action->type = RTE_FLOW_ACTION_TYPE_VOID; 5963 goto exit; 5964 } 5965 /* Only default-policy Meter creates mtr flow id. */ 5966 if (fm->def_policy) { 5967 mlx5_ipool_malloc(fm->flow_ipool, &tag_id); 5968 if (!tag_id) 5969 return rte_flow_error_set(error, ENOMEM, 5970 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 5971 "Failed to allocate meter flow id."); 5972 flow_id = tag_id - 1; 5973 flow_id_bits = (!flow_id) ? 1 : 5974 (MLX5_REG_BITS - rte_clz32(flow_id)); 5975 if ((flow_id_bits + priv->sh->mtrmng->max_mtr_bits) > 5976 mtr_reg_bits) { 5977 mlx5_ipool_free(fm->flow_ipool, tag_id); 5978 return rte_flow_error_set(error, EINVAL, 5979 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 5980 "Meter flow id exceeds max limit."); 5981 } 5982 if (flow_id_bits > priv->sh->mtrmng->max_mtr_flow_bits) 5983 priv->sh->mtrmng->max_mtr_flow_bits = flow_id_bits; 5984 } 5985 /* Build tag actions and items for meter_id/meter flow_id. */ 5986 set_tag = (struct mlx5_rte_flow_action_set_tag *)actions_pre; 5987 tag_item_spec = (struct mlx5_rte_flow_item_tag *)sfx_items; 5988 tag_item_mask = tag_item_spec + 1; 5989 /* Both flow_id and meter_id share the same register. */ 5990 *set_tag = (struct mlx5_rte_flow_action_set_tag) { 5991 .id = (enum modify_reg)mlx5_flow_get_reg_id(dev, MLX5_MTR_ID, 5992 0, error), 5993 .offset = mtr_id_offset, 5994 .length = mtr_reg_bits, 5995 .data = flow->meter, 5996 }; 5997 /* 5998 * The color Reg bits used by flow_id are growing from 5999 * msb to lsb, so must do bit reverse for flow_id val in RegC. 6000 */ 6001 for (shift = 0; shift < flow_id_bits; shift++) 6002 flow_id_reversed = (flow_id_reversed << 1) | 6003 ((flow_id >> shift) & 0x1); 6004 set_tag->data |= 6005 flow_id_reversed << (mtr_reg_bits - flow_id_bits); 6006 tag_item_spec->id = set_tag->id; 6007 tag_item_spec->data = set_tag->data << mtr_id_offset; 6008 tag_item_mask->data = UINT32_MAX << mtr_id_offset; 6009 tag_action->type = (enum rte_flow_action_type) 6010 MLX5_RTE_FLOW_ACTION_TYPE_TAG; 6011 tag_action->conf = set_tag; 6012 tag_item->spec = tag_item_spec; 6013 tag_item->last = NULL; 6014 tag_item->mask = tag_item_mask; 6015 exit: 6016 if (mtr_flow_id) 6017 *mtr_flow_id = tag_id; 6018 return 0; 6019 } 6020 6021 /** 6022 * Split action list having QUEUE/RSS for metadata register copy. 6023 * 6024 * Once Q/RSS action is detected in user's action list, the flow action 6025 * should be split in order to copy metadata registers, which will happen in 6026 * RX_CP_TBL like, 6027 * - CQE->flow_tag := reg_c[1] (MARK) 6028 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 6029 * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL. 6030 * This is because the last action of each flow must be a terminal action 6031 * (QUEUE, RSS or DROP). 6032 * 6033 * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is 6034 * stored and kept in the mlx5_flow structure per each sub_flow. 6035 * 6036 * The Q/RSS action is replaced with, 6037 * - SET_TAG, setting the allocated flow ID to reg_c[2]. 6038 * And the following JUMP action is added at the end, 6039 * - JUMP, to RX_CP_TBL. 6040 * 6041 * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by 6042 * flow_create_split_metadata() routine. The flow will look like, 6043 * - If flow ID matches (reg_c[2]), perform Q/RSS. 6044 * 6045 * @param dev 6046 * Pointer to Ethernet device. 6047 * @param[out] split_actions 6048 * Pointer to store split actions to jump to CP_TBL. 6049 * @param[in] actions 6050 * Pointer to the list of original flow actions. 6051 * @param[in] qrss 6052 * Pointer to the Q/RSS action. 6053 * @param[in] actions_n 6054 * Number of original actions. 6055 * @param[in] mtr_sfx 6056 * Check if it is in meter suffix table. 6057 * @param[out] error 6058 * Perform verbose error reporting if not NULL. 6059 * 6060 * @return 6061 * non-zero unique flow_id on success, otherwise 0 and 6062 * error/rte_error are set. 6063 */ 6064 static uint32_t 6065 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, 6066 struct rte_flow_action *split_actions, 6067 const struct rte_flow_action *actions, 6068 const struct rte_flow_action *qrss, 6069 int actions_n, int mtr_sfx, 6070 struct rte_flow_error *error) 6071 { 6072 struct mlx5_priv *priv = dev->data->dev_private; 6073 struct mlx5_rte_flow_action_set_tag *set_tag; 6074 struct rte_flow_action_jump *jump; 6075 const int qrss_idx = qrss - actions; 6076 uint32_t flow_id = 0; 6077 int ret = 0; 6078 6079 /* 6080 * Given actions will be split 6081 * - Replace QUEUE/RSS action with SET_TAG to set flow ID. 6082 * - Add jump to mreg CP_TBL. 6083 * As a result, there will be one more action. 6084 */ 6085 memcpy(split_actions, actions, sizeof(*split_actions) * actions_n); 6086 /* Count MLX5_RTE_FLOW_ACTION_TYPE_TAG. */ 6087 ++actions_n; 6088 set_tag = (void *)(split_actions + actions_n); 6089 /* 6090 * If we are not the meter suffix flow, add the tag action. 6091 * Since meter suffix flow already has the tag added. 6092 */ 6093 if (!mtr_sfx) { 6094 /* 6095 * Allocate the new subflow ID. This one is unique within 6096 * device and not shared with representors. Otherwise, 6097 * we would have to resolve multi-thread access synch 6098 * issue. Each flow on the shared device is appended 6099 * with source vport identifier, so the resulting 6100 * flows will be unique in the shared (by master and 6101 * representors) domain even if they have coinciding 6102 * IDs. 6103 */ 6104 mlx5_ipool_malloc(priv->sh->ipool 6105 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id); 6106 if (!flow_id) 6107 return rte_flow_error_set(error, ENOMEM, 6108 RTE_FLOW_ERROR_TYPE_ACTION, 6109 NULL, "can't allocate id " 6110 "for split Q/RSS subflow"); 6111 /* Internal SET_TAG action to set flow ID. */ 6112 *set_tag = (struct mlx5_rte_flow_action_set_tag){ 6113 .data = flow_id, 6114 }; 6115 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); 6116 if (ret < 0) 6117 return ret; 6118 set_tag->id = ret; 6119 /* Construct new actions array. */ 6120 /* Replace QUEUE/RSS action. */ 6121 split_actions[qrss_idx] = (struct rte_flow_action){ 6122 .type = (enum rte_flow_action_type) 6123 MLX5_RTE_FLOW_ACTION_TYPE_TAG, 6124 .conf = set_tag, 6125 }; 6126 } else { 6127 /* 6128 * If we are the suffix flow of meter, tag already exist. 6129 * Set the QUEUE/RSS action to void. 6130 */ 6131 split_actions[qrss_idx].type = RTE_FLOW_ACTION_TYPE_VOID; 6132 } 6133 /* JUMP action to jump to mreg copy table (CP_TBL). */ 6134 jump = (void *)(set_tag + 1); 6135 *jump = (struct rte_flow_action_jump){ 6136 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 6137 }; 6138 split_actions[actions_n - 2] = (struct rte_flow_action){ 6139 .type = RTE_FLOW_ACTION_TYPE_JUMP, 6140 .conf = jump, 6141 }; 6142 split_actions[actions_n - 1] = (struct rte_flow_action){ 6143 .type = RTE_FLOW_ACTION_TYPE_END, 6144 }; 6145 return flow_id; 6146 } 6147 6148 /** 6149 * Extend the given action list for Tx metadata copy. 6150 * 6151 * Copy the given action list to the ext_actions and add flow metadata register 6152 * copy action in order to copy reg_a set by WQE to reg_c[0]. 6153 * 6154 * @param[out] ext_actions 6155 * Pointer to the extended action list. 6156 * @param[in] actions 6157 * Pointer to the list of actions. 6158 * @param[in] actions_n 6159 * Number of actions in the list. 6160 * @param[out] error 6161 * Perform verbose error reporting if not NULL. 6162 * @param[in] encap_idx 6163 * The encap action index. 6164 * 6165 * @return 6166 * 0 on success, negative value otherwise 6167 */ 6168 static int 6169 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, 6170 struct rte_flow_action *ext_actions, 6171 const struct rte_flow_action *actions, 6172 int actions_n, struct rte_flow_error *error, 6173 int encap_idx) 6174 { 6175 struct mlx5_flow_action_copy_mreg *cp_mreg = 6176 (struct mlx5_flow_action_copy_mreg *) 6177 (ext_actions + actions_n + 1); 6178 int ret; 6179 6180 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 6181 if (ret < 0) 6182 return ret; 6183 cp_mreg->dst = ret; 6184 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error); 6185 if (ret < 0) 6186 return ret; 6187 cp_mreg->src = ret; 6188 if (encap_idx != 0) 6189 memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx); 6190 if (encap_idx == actions_n - 1) { 6191 ext_actions[actions_n - 1] = (struct rte_flow_action){ 6192 .type = (enum rte_flow_action_type) 6193 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 6194 .conf = cp_mreg, 6195 }; 6196 ext_actions[actions_n] = (struct rte_flow_action){ 6197 .type = RTE_FLOW_ACTION_TYPE_END, 6198 }; 6199 } else { 6200 ext_actions[encap_idx] = (struct rte_flow_action){ 6201 .type = (enum rte_flow_action_type) 6202 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 6203 .conf = cp_mreg, 6204 }; 6205 memcpy(ext_actions + encap_idx + 1, actions + encap_idx, 6206 sizeof(*ext_actions) * (actions_n - encap_idx)); 6207 } 6208 return 0; 6209 } 6210 6211 /** 6212 * Check the match action from the action list. 6213 * 6214 * @param[in] actions 6215 * Pointer to the list of actions. 6216 * @param[in] attr 6217 * Flow rule attributes. 6218 * @param[in] action 6219 * The action to be check if exist. 6220 * @param[out] match_action_pos 6221 * Pointer to the position of the matched action if exists, otherwise is -1. 6222 * @param[out] qrss_action_pos 6223 * Pointer to the position of the Queue/RSS action if exists, otherwise is -1. 6224 * @param[out] modify_after_mirror 6225 * Pointer to the flag of modify action after FDB mirroring. 6226 * 6227 * @return 6228 * > 0 the total number of actions. 6229 * 0 if not found match action in action list. 6230 */ 6231 static int 6232 flow_check_match_action(const struct rte_flow_action actions[], 6233 const struct rte_flow_attr *attr, 6234 enum rte_flow_action_type action, 6235 int *match_action_pos, int *qrss_action_pos, 6236 int *modify_after_mirror) 6237 { 6238 const struct rte_flow_action_sample *sample; 6239 const struct rte_flow_action_raw_decap *decap; 6240 const struct rte_flow_action *action_cur = NULL; 6241 int actions_n = 0; 6242 uint32_t ratio = 0; 6243 int sub_type = 0; 6244 int flag = 0; 6245 int fdb_mirror = 0; 6246 6247 *match_action_pos = -1; 6248 *qrss_action_pos = -1; 6249 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 6250 if (actions->type == action) { 6251 flag = 1; 6252 *match_action_pos = actions_n; 6253 } 6254 switch (actions->type) { 6255 case RTE_FLOW_ACTION_TYPE_QUEUE: 6256 case RTE_FLOW_ACTION_TYPE_RSS: 6257 *qrss_action_pos = actions_n; 6258 break; 6259 case RTE_FLOW_ACTION_TYPE_SAMPLE: 6260 sample = actions->conf; 6261 ratio = sample->ratio; 6262 sub_type = ((const struct rte_flow_action *) 6263 (sample->actions))->type; 6264 if (ratio == 1 && attr->transfer && 6265 sub_type != RTE_FLOW_ACTION_TYPE_END) 6266 fdb_mirror = 1; 6267 break; 6268 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: 6269 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: 6270 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: 6271 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: 6272 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: 6273 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: 6274 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: 6275 case RTE_FLOW_ACTION_TYPE_SET_TP_DST: 6276 case RTE_FLOW_ACTION_TYPE_DEC_TTL: 6277 case RTE_FLOW_ACTION_TYPE_SET_TTL: 6278 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: 6279 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: 6280 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: 6281 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: 6282 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: 6283 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: 6284 case RTE_FLOW_ACTION_TYPE_FLAG: 6285 case RTE_FLOW_ACTION_TYPE_MARK: 6286 case RTE_FLOW_ACTION_TYPE_SET_META: 6287 case RTE_FLOW_ACTION_TYPE_SET_TAG: 6288 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: 6289 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 6290 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 6291 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 6292 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 6293 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 6294 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: 6295 case RTE_FLOW_ACTION_TYPE_METER: 6296 if (fdb_mirror) 6297 *modify_after_mirror = 1; 6298 break; 6299 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 6300 decap = actions->conf; 6301 action_cur = actions; 6302 while ((++action_cur)->type == RTE_FLOW_ACTION_TYPE_VOID) 6303 ; 6304 if (action_cur->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { 6305 const struct rte_flow_action_raw_encap *encap = 6306 action_cur->conf; 6307 if (decap->size <= 6308 MLX5_ENCAPSULATION_DECISION_SIZE && 6309 encap->size > 6310 MLX5_ENCAPSULATION_DECISION_SIZE) 6311 /* L3 encap. */ 6312 break; 6313 } 6314 if (fdb_mirror) 6315 *modify_after_mirror = 1; 6316 break; 6317 default: 6318 break; 6319 } 6320 actions_n++; 6321 } 6322 if (flag && fdb_mirror && !*modify_after_mirror) { 6323 /* FDB mirroring uses the destination array to implement 6324 * instead of FLOW_SAMPLER object. 6325 */ 6326 if (sub_type != RTE_FLOW_ACTION_TYPE_END) 6327 flag = 0; 6328 } 6329 /* Count RTE_FLOW_ACTION_TYPE_END. */ 6330 return flag ? actions_n + 1 : 0; 6331 } 6332 6333 #define SAMPLE_SUFFIX_ITEM 3 6334 6335 /** 6336 * Split the sample flow. 6337 * 6338 * As sample flow will split to two sub flow, sample flow with 6339 * sample action, the other actions will move to new suffix flow. 6340 * 6341 * Also add unique tag id with tag action in the sample flow, 6342 * the same tag id will be as match in the suffix flow. 6343 * 6344 * @param dev 6345 * Pointer to Ethernet device. 6346 * @param[in] add_tag 6347 * Add extra tag action flag. 6348 * @param[out] sfx_items 6349 * Suffix flow match items (list terminated by the END pattern item). 6350 * @param[in] actions 6351 * Associated actions (list terminated by the END action). 6352 * @param[out] actions_sfx 6353 * Suffix flow actions. 6354 * @param[out] actions_pre 6355 * Prefix flow actions. 6356 * @param[in] actions_n 6357 * The total number of actions. 6358 * @param[in] sample_action_pos 6359 * The sample action position. 6360 * @param[in] qrss_action_pos 6361 * The Queue/RSS action position. 6362 * @param[in] jump_table 6363 * Add extra jump action flag. 6364 * @param[out] error 6365 * Perform verbose error reporting if not NULL. 6366 * 6367 * @return 6368 * 0 on success, or unique flow_id, a negative errno value 6369 * otherwise and rte_errno is set. 6370 */ 6371 static int 6372 flow_sample_split_prep(struct rte_eth_dev *dev, 6373 int add_tag, 6374 const struct rte_flow_item items[], 6375 struct rte_flow_item sfx_items[], 6376 const struct rte_flow_action actions[], 6377 struct rte_flow_action actions_sfx[], 6378 struct rte_flow_action actions_pre[], 6379 int actions_n, 6380 int sample_action_pos, 6381 int qrss_action_pos, 6382 int jump_table, 6383 struct rte_flow_error *error) 6384 { 6385 struct mlx5_priv *priv = dev->data->dev_private; 6386 struct mlx5_rte_flow_action_set_tag *set_tag; 6387 struct mlx5_rte_flow_item_tag *tag_spec; 6388 struct mlx5_rte_flow_item_tag *tag_mask; 6389 struct rte_flow_action_jump *jump_action; 6390 uint32_t tag_id = 0; 6391 int append_index = 0; 6392 int set_tag_idx = -1; 6393 int index; 6394 int ret; 6395 6396 if (sample_action_pos < 0) 6397 return rte_flow_error_set(error, EINVAL, 6398 RTE_FLOW_ERROR_TYPE_ACTION, 6399 NULL, "invalid position of sample " 6400 "action in list"); 6401 /* Prepare the actions for prefix and suffix flow. */ 6402 if (add_tag) { 6403 /* Update the new added tag action index preceding 6404 * the PUSH_VLAN or ENCAP action. 6405 */ 6406 const struct rte_flow_action_raw_encap *raw_encap; 6407 const struct rte_flow_action *action = actions; 6408 int encap_idx; 6409 int action_idx = 0; 6410 int raw_decap_idx = -1; 6411 int push_vlan_idx = -1; 6412 for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) { 6413 switch (action->type) { 6414 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 6415 raw_decap_idx = action_idx; 6416 break; 6417 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 6418 raw_encap = action->conf; 6419 if (raw_encap->size > 6420 MLX5_ENCAPSULATION_DECISION_SIZE) { 6421 encap_idx = raw_decap_idx != -1 ? 6422 raw_decap_idx : action_idx; 6423 if (encap_idx < sample_action_pos && 6424 push_vlan_idx == -1) 6425 set_tag_idx = encap_idx; 6426 } 6427 break; 6428 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 6429 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 6430 encap_idx = action_idx; 6431 if (encap_idx < sample_action_pos && 6432 push_vlan_idx == -1) 6433 set_tag_idx = encap_idx; 6434 break; 6435 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 6436 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 6437 if (action_idx < sample_action_pos && 6438 push_vlan_idx == -1) { 6439 set_tag_idx = action_idx; 6440 push_vlan_idx = action_idx; 6441 } 6442 break; 6443 default: 6444 break; 6445 } 6446 action_idx++; 6447 } 6448 } 6449 /* Prepare the actions for prefix and suffix flow. */ 6450 if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) { 6451 index = qrss_action_pos; 6452 /* Put the preceding the Queue/RSS action into prefix flow. */ 6453 if (index != 0) 6454 memcpy(actions_pre, actions, 6455 sizeof(struct rte_flow_action) * index); 6456 /* Put others preceding the sample action into prefix flow. */ 6457 if (sample_action_pos > index + 1) 6458 memcpy(actions_pre + index, actions + index + 1, 6459 sizeof(struct rte_flow_action) * 6460 (sample_action_pos - index - 1)); 6461 index = sample_action_pos - 1; 6462 /* Put Queue/RSS action into Suffix flow. */ 6463 memcpy(actions_sfx, actions + qrss_action_pos, 6464 sizeof(struct rte_flow_action)); 6465 actions_sfx++; 6466 } else if (add_tag && set_tag_idx >= 0) { 6467 if (set_tag_idx > 0) 6468 memcpy(actions_pre, actions, 6469 sizeof(struct rte_flow_action) * set_tag_idx); 6470 memcpy(actions_pre + set_tag_idx + 1, actions + set_tag_idx, 6471 sizeof(struct rte_flow_action) * 6472 (sample_action_pos - set_tag_idx)); 6473 index = sample_action_pos; 6474 } else { 6475 index = sample_action_pos; 6476 if (index != 0) 6477 memcpy(actions_pre, actions, 6478 sizeof(struct rte_flow_action) * index); 6479 } 6480 /* For CX5, add an extra tag action for NIC-RX and E-Switch ingress. 6481 * For CX6DX and above, metadata registers Cx preserve their value, 6482 * add an extra tag action for NIC-RX and E-Switch Domain. 6483 */ 6484 if (add_tag) { 6485 /* Prepare the prefix tag action. */ 6486 append_index++; 6487 set_tag = (void *)(actions_pre + actions_n + append_index); 6488 /* Trust VF/SF on CX5 not supported meter so that the reserved 6489 * metadata regC is REG_NON, back to use application tag 6490 * index 0. 6491 */ 6492 if (unlikely(priv->sh->registers.aso_reg == REG_NON)) 6493 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error); 6494 else 6495 ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error); 6496 if (ret < 0) 6497 return ret; 6498 mlx5_ipool_malloc(priv->sh->ipool 6499 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id); 6500 *set_tag = (struct mlx5_rte_flow_action_set_tag) { 6501 .id = ret, 6502 .data = tag_id, 6503 }; 6504 /* Prepare the suffix subflow items. */ 6505 tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM); 6506 tag_spec->data = tag_id; 6507 tag_spec->id = set_tag->id; 6508 tag_mask = tag_spec + 1; 6509 tag_mask->data = UINT32_MAX; 6510 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 6511 if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID || 6512 items->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR || 6513 items->type == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT) { 6514 memcpy(sfx_items, items, sizeof(*sfx_items)); 6515 sfx_items++; 6516 break; 6517 } 6518 } 6519 sfx_items[0] = (struct rte_flow_item){ 6520 .type = (enum rte_flow_item_type) 6521 MLX5_RTE_FLOW_ITEM_TYPE_TAG, 6522 .spec = tag_spec, 6523 .last = NULL, 6524 .mask = tag_mask, 6525 }; 6526 sfx_items[1] = (struct rte_flow_item){ 6527 .type = (enum rte_flow_item_type) 6528 RTE_FLOW_ITEM_TYPE_END, 6529 }; 6530 /* Prepare the tag action in prefix subflow. */ 6531 set_tag_idx = (set_tag_idx == -1) ? index : set_tag_idx; 6532 actions_pre[set_tag_idx] = 6533 (struct rte_flow_action){ 6534 .type = (enum rte_flow_action_type) 6535 MLX5_RTE_FLOW_ACTION_TYPE_TAG, 6536 .conf = set_tag, 6537 }; 6538 /* Update next sample position due to add one tag action */ 6539 index += 1; 6540 } 6541 /* Copy the sample action into prefix flow. */ 6542 memcpy(actions_pre + index, actions + sample_action_pos, 6543 sizeof(struct rte_flow_action)); 6544 index += 1; 6545 /* For the modify action after the sample action in E-Switch mirroring, 6546 * Add the extra jump action in prefix subflow and jump into the next 6547 * table, then do the modify action in the new table. 6548 */ 6549 if (jump_table) { 6550 /* Prepare the prefix jump action. */ 6551 append_index++; 6552 jump_action = (void *)(actions_pre + actions_n + append_index); 6553 jump_action->group = jump_table; 6554 actions_pre[index++] = 6555 (struct rte_flow_action){ 6556 .type = (enum rte_flow_action_type) 6557 RTE_FLOW_ACTION_TYPE_JUMP, 6558 .conf = jump_action, 6559 }; 6560 } 6561 actions_pre[index] = (struct rte_flow_action){ 6562 .type = (enum rte_flow_action_type) 6563 RTE_FLOW_ACTION_TYPE_END, 6564 }; 6565 /* Put the actions after sample into Suffix flow. */ 6566 memcpy(actions_sfx, actions + sample_action_pos + 1, 6567 sizeof(struct rte_flow_action) * 6568 (actions_n - sample_action_pos - 1)); 6569 return tag_id; 6570 } 6571 6572 /** 6573 * The splitting for metadata feature. 6574 * 6575 * - Q/RSS action on NIC Rx should be split in order to pass by 6576 * the mreg copy table (RX_CP_TBL) and then it jumps to the 6577 * action table (RX_ACT_TBL) which has the split Q/RSS action. 6578 * 6579 * - All the actions on NIC Tx should have a mreg copy action to 6580 * copy reg_a from WQE to reg_c[0]. 6581 * 6582 * @param dev 6583 * Pointer to Ethernet device. 6584 * @param[in] flow 6585 * Parent flow structure pointer. 6586 * @param[in] attr 6587 * Flow rule attributes. 6588 * @param[in] items 6589 * Pattern specification (list terminated by the END pattern item). 6590 * @param[in] actions 6591 * Associated actions (list terminated by the END action). 6592 * @param[in] flow_split_info 6593 * Pointer to flow split info structure. 6594 * @param[out] error 6595 * Perform verbose error reporting if not NULL. 6596 * @return 6597 * 0 on success, negative value otherwise 6598 */ 6599 static int 6600 flow_create_split_metadata(struct rte_eth_dev *dev, 6601 struct rte_flow *flow, 6602 const struct rte_flow_attr *attr, 6603 const struct rte_flow_item items[], 6604 const struct rte_flow_action actions[], 6605 struct mlx5_flow_split_info *flow_split_info, 6606 struct rte_flow_error *error) 6607 { 6608 struct mlx5_priv *priv = dev->data->dev_private; 6609 struct mlx5_sh_config *config = &priv->sh->config; 6610 const struct rte_flow_action *qrss = NULL; 6611 struct rte_flow_action *ext_actions = NULL; 6612 struct mlx5_flow *dev_flow = NULL; 6613 uint32_t qrss_id = 0; 6614 int mtr_sfx = 0; 6615 size_t act_size; 6616 int actions_n; 6617 int encap_idx; 6618 int ret; 6619 6620 /* Check whether extensive metadata feature is engaged. */ 6621 if (!config->dv_flow_en || 6622 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 6623 !mlx5_flow_ext_mreg_supported(dev)) 6624 return flow_create_split_inner(dev, flow, NULL, attr, items, 6625 actions, flow_split_info, error); 6626 actions_n = flow_parse_metadata_split_actions_info(actions, &qrss, 6627 &encap_idx); 6628 if (qrss) { 6629 /* Exclude hairpin flows from splitting. */ 6630 if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) { 6631 const struct rte_flow_action_queue *queue; 6632 6633 queue = qrss->conf; 6634 if (mlx5_rxq_is_hairpin(dev, queue->index)) 6635 qrss = NULL; 6636 } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) { 6637 const struct rte_flow_action_rss *rss; 6638 6639 rss = qrss->conf; 6640 if (mlx5_rxq_is_hairpin(dev, rss->queue[0])) 6641 qrss = NULL; 6642 } 6643 } 6644 if (qrss) { 6645 /* Check if it is in meter suffix table. */ 6646 mtr_sfx = attr->group == 6647 ((attr->transfer && priv->fdb_def_rule) ? 6648 (MLX5_FLOW_TABLE_LEVEL_METER - 1) : 6649 MLX5_FLOW_TABLE_LEVEL_METER); 6650 /* 6651 * Q/RSS action on NIC Rx should be split in order to pass by 6652 * the mreg copy table (RX_CP_TBL) and then it jumps to the 6653 * action table (RX_ACT_TBL) which has the split Q/RSS action. 6654 */ 6655 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 6656 sizeof(struct rte_flow_action_set_tag) + 6657 sizeof(struct rte_flow_action_jump); 6658 ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0, 6659 SOCKET_ID_ANY); 6660 if (!ext_actions) 6661 return rte_flow_error_set(error, ENOMEM, 6662 RTE_FLOW_ERROR_TYPE_ACTION, 6663 NULL, "no memory to split " 6664 "metadata flow"); 6665 /* 6666 * Create the new actions list with removed Q/RSS action 6667 * and appended set tag and jump to register copy table 6668 * (RX_CP_TBL). We should preallocate unique tag ID here 6669 * in advance, because it is needed for set tag action. 6670 */ 6671 qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions, 6672 qrss, actions_n, 6673 mtr_sfx, error); 6674 if (!mtr_sfx && !qrss_id) { 6675 ret = -rte_errno; 6676 goto exit; 6677 } 6678 } else if (attr->egress) { 6679 /* 6680 * All the actions on NIC Tx should have a metadata register 6681 * copy action to copy reg_a from WQE to reg_c[meta] 6682 */ 6683 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 6684 sizeof(struct mlx5_flow_action_copy_mreg); 6685 ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0, 6686 SOCKET_ID_ANY); 6687 if (!ext_actions) 6688 return rte_flow_error_set(error, ENOMEM, 6689 RTE_FLOW_ERROR_TYPE_ACTION, 6690 NULL, "no memory to split " 6691 "metadata flow"); 6692 /* Create the action list appended with copy register. */ 6693 ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions, 6694 actions_n, error, encap_idx); 6695 if (ret < 0) 6696 goto exit; 6697 } 6698 /* Add the unmodified original or prefix subflow. */ 6699 ret = flow_create_split_inner(dev, flow, &dev_flow, attr, 6700 items, ext_actions ? ext_actions : 6701 actions, flow_split_info, error); 6702 if (ret < 0) 6703 goto exit; 6704 MLX5_ASSERT(dev_flow); 6705 if (qrss) { 6706 const struct rte_flow_attr q_attr = { 6707 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 6708 .ingress = 1, 6709 }; 6710 /* Internal PMD action to set register. */ 6711 struct mlx5_rte_flow_item_tag q_tag_spec = { 6712 .data = qrss_id, 6713 .id = REG_NON, 6714 }; 6715 struct rte_flow_item q_items[] = { 6716 { 6717 .type = (enum rte_flow_item_type) 6718 MLX5_RTE_FLOW_ITEM_TYPE_TAG, 6719 .spec = &q_tag_spec, 6720 .last = NULL, 6721 .mask = NULL, 6722 }, 6723 { 6724 .type = RTE_FLOW_ITEM_TYPE_END, 6725 }, 6726 }; 6727 struct rte_flow_action q_actions[] = { 6728 { 6729 .type = qrss->type, 6730 .conf = qrss->conf, 6731 }, 6732 { 6733 .type = RTE_FLOW_ACTION_TYPE_END, 6734 }, 6735 }; 6736 uint64_t layers = flow_get_prefix_layer_flags(dev_flow); 6737 6738 /* 6739 * Configure the tag item only if there is no meter subflow. 6740 * Since tag is already marked in the meter suffix subflow 6741 * we can just use the meter suffix items as is. 6742 */ 6743 if (qrss_id) { 6744 /* Not meter subflow. */ 6745 MLX5_ASSERT(!mtr_sfx); 6746 /* 6747 * Put unique id in prefix flow due to it is destroyed 6748 * after suffix flow and id will be freed after there 6749 * is no actual flows with this id and identifier 6750 * reallocation becomes possible (for example, for 6751 * other flows in other threads). 6752 */ 6753 dev_flow->handle->split_flow_id = qrss_id; 6754 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, 6755 error); 6756 if (ret < 0) 6757 goto exit; 6758 q_tag_spec.id = ret; 6759 } 6760 dev_flow = NULL; 6761 /* Add suffix subflow to execute Q/RSS. */ 6762 flow_split_info->prefix_layers = layers; 6763 flow_split_info->prefix_mark = 0; 6764 flow_split_info->table_id = 0; 6765 ret = flow_create_split_inner(dev, flow, &dev_flow, 6766 &q_attr, mtr_sfx ? items : 6767 q_items, q_actions, 6768 flow_split_info, error); 6769 if (ret < 0) 6770 goto exit; 6771 /* qrss ID should be freed if failed. */ 6772 qrss_id = 0; 6773 MLX5_ASSERT(dev_flow); 6774 } 6775 6776 exit: 6777 /* 6778 * We do not destroy the partially created sub_flows in case of error. 6779 * These ones are included into parent flow list and will be destroyed 6780 * by flow_drv_destroy. 6781 */ 6782 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], 6783 qrss_id); 6784 mlx5_free(ext_actions); 6785 return ret; 6786 } 6787 6788 /** 6789 * Create meter internal drop flow with the original pattern. 6790 * 6791 * @param dev 6792 * Pointer to Ethernet device. 6793 * @param[in] flow 6794 * Parent flow structure pointer. 6795 * @param[in] attr 6796 * Flow rule attributes. 6797 * @param[in] items 6798 * Pattern specification (list terminated by the END pattern item). 6799 * @param[in] flow_split_info 6800 * Pointer to flow split info structure. 6801 * @param[in] fm 6802 * Pointer to flow meter structure. 6803 * @param[out] error 6804 * Perform verbose error reporting if not NULL. 6805 * @return 6806 * 0 on success, negative value otherwise 6807 */ 6808 static uint32_t 6809 flow_meter_create_drop_flow_with_org_pattern(struct rte_eth_dev *dev, 6810 struct rte_flow *flow, 6811 const struct rte_flow_attr *attr, 6812 const struct rte_flow_item items[], 6813 struct mlx5_flow_split_info *flow_split_info, 6814 struct mlx5_flow_meter_info *fm, 6815 struct rte_flow_error *error) 6816 { 6817 struct mlx5_flow *dev_flow = NULL; 6818 struct rte_flow_attr drop_attr = *attr; 6819 struct rte_flow_action drop_actions[3]; 6820 struct mlx5_flow_split_info drop_split_info = *flow_split_info; 6821 6822 MLX5_ASSERT(fm->drop_cnt); 6823 drop_actions[0].type = 6824 (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_COUNT; 6825 drop_actions[0].conf = (void *)(uintptr_t)fm->drop_cnt; 6826 drop_actions[1].type = RTE_FLOW_ACTION_TYPE_DROP; 6827 drop_actions[1].conf = NULL; 6828 drop_actions[2].type = RTE_FLOW_ACTION_TYPE_END; 6829 drop_actions[2].conf = NULL; 6830 drop_split_info.external = false; 6831 drop_split_info.skip_scale |= 1 << MLX5_SCALE_FLOW_GROUP_BIT; 6832 drop_split_info.table_id = MLX5_MTR_TABLE_ID_DROP; 6833 drop_attr.group = MLX5_FLOW_TABLE_LEVEL_METER; 6834 return flow_create_split_inner(dev, flow, &dev_flow, 6835 &drop_attr, items, drop_actions, 6836 &drop_split_info, error); 6837 } 6838 6839 static int 6840 flow_count_vlan_items(const struct rte_flow_item items[]) 6841 { 6842 int items_n = 0; 6843 6844 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 6845 if (items->type == RTE_FLOW_ITEM_TYPE_VLAN || 6846 items->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN) 6847 items_n++; 6848 } 6849 return items_n; 6850 } 6851 6852 /** 6853 * The splitting for meter feature. 6854 * 6855 * - The meter flow will be split to two flows as prefix and 6856 * suffix flow. The packets make sense only it pass the prefix 6857 * meter action. 6858 * 6859 * - Reg_C_5 is used for the packet to match betweend prefix and 6860 * suffix flow. 6861 * 6862 * @param dev 6863 * Pointer to Ethernet device. 6864 * @param[in] flow 6865 * Parent flow structure pointer. 6866 * @param[in] attr 6867 * Flow rule attributes. 6868 * @param[in] items 6869 * Pattern specification (list terminated by the END pattern item). 6870 * @param[in] actions 6871 * Associated actions (list terminated by the END action). 6872 * @param[in] flow_split_info 6873 * Pointer to flow split info structure. 6874 * @param[out] error 6875 * Perform verbose error reporting if not NULL. 6876 * @return 6877 * 0 on success, negative value otherwise 6878 */ 6879 static int 6880 flow_create_split_meter(struct rte_eth_dev *dev, 6881 struct rte_flow *flow, 6882 const struct rte_flow_attr *attr, 6883 const struct rte_flow_item items[], 6884 const struct rte_flow_action actions[], 6885 struct mlx5_flow_split_info *flow_split_info, 6886 struct rte_flow_error *error) 6887 { 6888 struct mlx5_priv *priv = dev->data->dev_private; 6889 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 6890 struct rte_flow_action *sfx_actions = NULL; 6891 struct rte_flow_action *pre_actions = NULL; 6892 struct rte_flow_item *sfx_items = NULL; 6893 struct mlx5_flow *dev_flow = NULL; 6894 struct rte_flow_attr sfx_attr = *attr; 6895 struct mlx5_flow_meter_info *fm = NULL; 6896 uint8_t skip_scale_restore; 6897 bool has_mtr = false; 6898 bool has_modify = false; 6899 bool set_mtr_reg = true; 6900 bool is_mtr_hierarchy = false; 6901 uint32_t meter_id = 0; 6902 uint32_t mtr_idx = 0; 6903 uint32_t mtr_flow_id = 0; 6904 size_t act_size; 6905 size_t item_size; 6906 int actions_n = 0; 6907 int vlan_items_n = 0; 6908 int ret = 0; 6909 6910 if (priv->mtr_en) 6911 actions_n = flow_check_meter_action(dev, actions, &has_mtr, 6912 &has_modify, &meter_id); 6913 if (has_mtr) { 6914 if (flow->meter) { 6915 fm = flow_dv_meter_find_by_idx(priv, flow->meter); 6916 if (!fm) 6917 return rte_flow_error_set(error, EINVAL, 6918 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 6919 NULL, "Meter not found."); 6920 } else { 6921 fm = mlx5_flow_meter_find(priv, meter_id, &mtr_idx); 6922 if (!fm) 6923 return rte_flow_error_set(error, EINVAL, 6924 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 6925 NULL, "Meter not found."); 6926 ret = mlx5_flow_meter_attach(priv, fm, 6927 &sfx_attr, error); 6928 if (ret) 6929 return -rte_errno; 6930 flow->meter = mtr_idx; 6931 } 6932 MLX5_ASSERT(wks); 6933 wks->fm = fm; 6934 if (!fm->def_policy) { 6935 wks->policy = mlx5_flow_meter_policy_find(dev, 6936 fm->policy_id, 6937 NULL); 6938 MLX5_ASSERT(wks->policy); 6939 if (wks->policy->mark) 6940 wks->mark = 1; 6941 if (wks->policy->is_hierarchy) { 6942 wks->final_policy = 6943 mlx5_flow_meter_hierarchy_get_final_policy(dev, 6944 wks->policy); 6945 if (!wks->final_policy) 6946 return rte_flow_error_set(error, 6947 EINVAL, 6948 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 6949 "Failed to find terminal policy of hierarchy."); 6950 is_mtr_hierarchy = true; 6951 } 6952 } 6953 /* 6954 * If it isn't default-policy Meter, and 6955 * 1. Not meter hierarchy and there's no action in flow to change 6956 * packet (modify/encap/decap etc.), OR 6957 * 2. No drop count needed for this meter. 6958 * Then no need to use regC to save meter id anymore. 6959 */ 6960 if (!fm->def_policy && ((!has_modify && !is_mtr_hierarchy) || !fm->drop_cnt)) 6961 set_mtr_reg = false; 6962 /* Prefix actions: meter, decap, encap, tag, jump, end, cnt. */ 6963 #define METER_PREFIX_ACTION 7 6964 act_size = (sizeof(struct rte_flow_action) * 6965 (actions_n + METER_PREFIX_ACTION)) + 6966 sizeof(struct mlx5_rte_flow_action_set_tag); 6967 /* Flow can have multiple VLAN items. Account for them in suffix items. */ 6968 vlan_items_n = flow_count_vlan_items(items); 6969 /* Suffix items: tag, [vlans], port id, end. */ 6970 #define METER_SUFFIX_ITEM 3 6971 item_size = sizeof(struct rte_flow_item) * (METER_SUFFIX_ITEM + vlan_items_n) + 6972 sizeof(struct mlx5_rte_flow_item_tag) * 2; 6973 sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size), 6974 0, SOCKET_ID_ANY); 6975 if (!sfx_actions) 6976 return rte_flow_error_set(error, ENOMEM, 6977 RTE_FLOW_ERROR_TYPE_ACTION, 6978 NULL, "no memory to split " 6979 "meter flow"); 6980 sfx_items = (struct rte_flow_item *)((char *)sfx_actions + 6981 act_size); 6982 /* There's no suffix flow for meter of non-default policy. */ 6983 if (!fm->def_policy) 6984 pre_actions = sfx_actions + 1; 6985 else 6986 pre_actions = sfx_actions + actions_n; 6987 ret = flow_meter_split_prep(dev, flow, wks, &sfx_attr, 6988 items, sfx_items, actions, 6989 sfx_actions, pre_actions, 6990 (set_mtr_reg ? &mtr_flow_id : NULL), 6991 error); 6992 if (ret) { 6993 ret = -rte_errno; 6994 goto exit; 6995 } 6996 /* Add the prefix subflow. */ 6997 skip_scale_restore = flow_split_info->skip_scale; 6998 flow_split_info->skip_scale |= 6999 1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT; 7000 ret = flow_create_split_inner(dev, flow, &dev_flow, 7001 attr, items, pre_actions, 7002 flow_split_info, error); 7003 flow_split_info->skip_scale = skip_scale_restore; 7004 if (ret) { 7005 if (mtr_flow_id) 7006 mlx5_ipool_free(fm->flow_ipool, mtr_flow_id); 7007 ret = -rte_errno; 7008 goto exit; 7009 } 7010 if (mtr_flow_id) { 7011 dev_flow->handle->split_flow_id = mtr_flow_id; 7012 dev_flow->handle->is_meter_flow_id = 1; 7013 } 7014 if (!fm->def_policy) { 7015 if (!set_mtr_reg && fm->drop_cnt) 7016 ret = 7017 flow_meter_create_drop_flow_with_org_pattern(dev, flow, 7018 &sfx_attr, items, 7019 flow_split_info, 7020 fm, error); 7021 goto exit; 7022 } 7023 /* Setting the sfx group atrr. */ 7024 sfx_attr.group = sfx_attr.transfer ? 7025 (MLX5_FLOW_TABLE_LEVEL_METER - 1) : 7026 MLX5_FLOW_TABLE_LEVEL_METER; 7027 flow_split_info->prefix_layers = 7028 flow_get_prefix_layer_flags(dev_flow); 7029 flow_split_info->prefix_mark |= wks->mark; 7030 flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX; 7031 } 7032 /* Add the prefix subflow. */ 7033 ret = flow_create_split_metadata(dev, flow, 7034 &sfx_attr, sfx_items ? 7035 sfx_items : items, 7036 sfx_actions ? sfx_actions : actions, 7037 flow_split_info, error); 7038 exit: 7039 if (sfx_actions) 7040 mlx5_free(sfx_actions); 7041 return ret; 7042 } 7043 7044 /** 7045 * The splitting for sample feature. 7046 * 7047 * Once Sample action is detected in the action list, the flow actions should 7048 * be split into prefix sub flow and suffix sub flow. 7049 * 7050 * The original items remain in the prefix sub flow, all actions preceding the 7051 * sample action and the sample action itself will be copied to the prefix 7052 * sub flow, the actions following the sample action will be copied to the 7053 * suffix sub flow, Queue action always be located in the suffix sub flow. 7054 * 7055 * In order to make the packet from prefix sub flow matches with suffix sub 7056 * flow, an extra tag action be added into prefix sub flow, and the suffix sub 7057 * flow uses tag item with the unique flow id. 7058 * 7059 * @param dev 7060 * Pointer to Ethernet device. 7061 * @param[in] flow 7062 * Parent flow structure pointer. 7063 * @param[in] attr 7064 * Flow rule attributes. 7065 * @param[in] items 7066 * Pattern specification (list terminated by the END pattern item). 7067 * @param[in] actions 7068 * Associated actions (list terminated by the END action). 7069 * @param[in] flow_split_info 7070 * Pointer to flow split info structure. 7071 * @param[out] error 7072 * Perform verbose error reporting if not NULL. 7073 * @return 7074 * 0 on success, negative value otherwise 7075 */ 7076 static int 7077 flow_create_split_sample(struct rte_eth_dev *dev, 7078 struct rte_flow *flow, 7079 const struct rte_flow_attr *attr, 7080 const struct rte_flow_item items[], 7081 const struct rte_flow_action actions[], 7082 struct mlx5_flow_split_info *flow_split_info, 7083 struct rte_flow_error *error) 7084 { 7085 struct mlx5_priv *priv = dev->data->dev_private; 7086 struct rte_flow_action *sfx_actions = NULL; 7087 struct rte_flow_action *pre_actions = NULL; 7088 struct rte_flow_item *sfx_items = NULL; 7089 struct mlx5_flow *dev_flow = NULL; 7090 struct rte_flow_attr sfx_attr = *attr; 7091 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 7092 struct mlx5_flow_dv_sample_resource *sample_res; 7093 struct mlx5_flow_tbl_data_entry *sfx_tbl_data; 7094 struct mlx5_flow_tbl_resource *sfx_tbl; 7095 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 7096 #endif 7097 size_t act_size; 7098 size_t item_size; 7099 uint32_t fdb_tx = 0; 7100 int32_t tag_id = 0; 7101 int actions_n = 0; 7102 int sample_action_pos; 7103 int qrss_action_pos; 7104 int add_tag = 0; 7105 int modify_after_mirror = 0; 7106 uint16_t jump_table = 0; 7107 const uint32_t next_ft_step = 1; 7108 int ret = 0; 7109 struct mlx5_priv *item_port_priv = NULL; 7110 const struct rte_flow_item *item; 7111 7112 if (priv->sampler_en) 7113 actions_n = flow_check_match_action(actions, attr, 7114 RTE_FLOW_ACTION_TYPE_SAMPLE, 7115 &sample_action_pos, &qrss_action_pos, 7116 &modify_after_mirror); 7117 if (actions_n) { 7118 /* The prefix actions must includes sample, tag, end. */ 7119 act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1) 7120 + sizeof(struct mlx5_rte_flow_action_set_tag); 7121 item_size = sizeof(struct rte_flow_item) * SAMPLE_SUFFIX_ITEM + 7122 sizeof(struct mlx5_rte_flow_item_tag) * 2; 7123 sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + 7124 item_size), 0, SOCKET_ID_ANY); 7125 if (!sfx_actions) 7126 return rte_flow_error_set(error, ENOMEM, 7127 RTE_FLOW_ERROR_TYPE_ACTION, 7128 NULL, "no memory to split " 7129 "sample flow"); 7130 for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 7131 if (item->type == RTE_FLOW_ITEM_TYPE_PORT_ID) { 7132 const struct rte_flow_item_port_id *spec; 7133 7134 spec = (const struct rte_flow_item_port_id *)item->spec; 7135 if (spec) 7136 item_port_priv = 7137 mlx5_port_to_eswitch_info(spec->id, true); 7138 break; 7139 } else if (item->type == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT) { 7140 const struct rte_flow_item_ethdev *spec; 7141 7142 spec = (const struct rte_flow_item_ethdev *)item->spec; 7143 if (spec) 7144 item_port_priv = 7145 mlx5_port_to_eswitch_info(spec->port_id, true); 7146 break; 7147 } else if (item->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) { 7148 const struct rte_flow_item_ethdev *spec; 7149 7150 spec = (const struct rte_flow_item_ethdev *)item->spec; 7151 if (spec) 7152 item_port_priv = 7153 mlx5_port_to_eswitch_info(spec->port_id, true); 7154 break; 7155 } 7156 } 7157 /* The representor_id is UINT16_MAX for uplink. */ 7158 fdb_tx = (attr->transfer && 7159 flow_source_vport_representor(priv, item_port_priv)); 7160 /* 7161 * When reg_c_preserve is set, metadata registers Cx preserve 7162 * their value even through packet duplication. 7163 */ 7164 add_tag = (!fdb_tx || 7165 priv->sh->cdev->config.hca_attr.reg_c_preserve); 7166 if (add_tag) 7167 sfx_items = (struct rte_flow_item *)((char *)sfx_actions 7168 + act_size); 7169 if (modify_after_mirror) 7170 jump_table = attr->group * MLX5_FLOW_TABLE_FACTOR + 7171 next_ft_step; 7172 pre_actions = sfx_actions + actions_n; 7173 tag_id = flow_sample_split_prep(dev, add_tag, items, sfx_items, 7174 actions, sfx_actions, 7175 pre_actions, actions_n, 7176 sample_action_pos, 7177 qrss_action_pos, jump_table, 7178 error); 7179 if (tag_id < 0 || (add_tag && !tag_id)) { 7180 ret = -rte_errno; 7181 goto exit; 7182 } 7183 if (modify_after_mirror) 7184 flow_split_info->skip_scale = 7185 1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT; 7186 /* Add the prefix subflow. */ 7187 ret = flow_create_split_inner(dev, flow, &dev_flow, attr, 7188 items, pre_actions, 7189 flow_split_info, error); 7190 if (ret) { 7191 ret = -rte_errno; 7192 goto exit; 7193 } 7194 dev_flow->handle->split_flow_id = tag_id; 7195 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 7196 if (!modify_after_mirror) { 7197 /* Set the sfx group attr. */ 7198 sample_res = (struct mlx5_flow_dv_sample_resource *) 7199 dev_flow->dv.sample_res; 7200 sfx_tbl = (struct mlx5_flow_tbl_resource *) 7201 sample_res->normal_path_tbl; 7202 sfx_tbl_data = container_of(sfx_tbl, 7203 struct mlx5_flow_tbl_data_entry, 7204 tbl); 7205 sfx_attr.group = sfx_attr.transfer ? 7206 (sfx_tbl_data->level - 1) : sfx_tbl_data->level; 7207 } else { 7208 MLX5_ASSERT(attr->transfer); 7209 sfx_attr.group = jump_table; 7210 } 7211 flow_split_info->prefix_layers = 7212 flow_get_prefix_layer_flags(dev_flow); 7213 MLX5_ASSERT(wks); 7214 flow_split_info->prefix_mark |= wks->mark; 7215 /* Suffix group level already be scaled with factor, set 7216 * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale 7217 * again in translation. 7218 */ 7219 flow_split_info->skip_scale = 1 << MLX5_SCALE_FLOW_GROUP_BIT; 7220 #endif 7221 } 7222 /* Add the suffix subflow. */ 7223 ret = flow_create_split_meter(dev, flow, &sfx_attr, 7224 sfx_items ? sfx_items : items, 7225 sfx_actions ? sfx_actions : actions, 7226 flow_split_info, error); 7227 exit: 7228 if (sfx_actions) 7229 mlx5_free(sfx_actions); 7230 return ret; 7231 } 7232 7233 /** 7234 * Split the flow to subflow set. The splitters might be linked 7235 * in the chain, like this: 7236 * flow_create_split_outer() calls: 7237 * flow_create_split_meter() calls: 7238 * flow_create_split_metadata(meter_subflow_0) calls: 7239 * flow_create_split_inner(metadata_subflow_0) 7240 * flow_create_split_inner(metadata_subflow_1) 7241 * flow_create_split_inner(metadata_subflow_2) 7242 * flow_create_split_metadata(meter_subflow_1) calls: 7243 * flow_create_split_inner(metadata_subflow_0) 7244 * flow_create_split_inner(metadata_subflow_1) 7245 * flow_create_split_inner(metadata_subflow_2) 7246 * 7247 * This provide flexible way to add new levels of flow splitting. 7248 * The all of successfully created subflows are included to the 7249 * parent flow dev_flow list. 7250 * 7251 * @param dev 7252 * Pointer to Ethernet device. 7253 * @param[in] flow 7254 * Parent flow structure pointer. 7255 * @param[in] attr 7256 * Flow rule attributes. 7257 * @param[in] items 7258 * Pattern specification (list terminated by the END pattern item). 7259 * @param[in] actions 7260 * Associated actions (list terminated by the END action). 7261 * @param[in] flow_split_info 7262 * Pointer to flow split info structure. 7263 * @param[out] error 7264 * Perform verbose error reporting if not NULL. 7265 * @return 7266 * 0 on success, negative value otherwise 7267 */ 7268 static int 7269 flow_create_split_outer(struct rte_eth_dev *dev, 7270 struct rte_flow *flow, 7271 const struct rte_flow_attr *attr, 7272 const struct rte_flow_item items[], 7273 const struct rte_flow_action actions[], 7274 struct mlx5_flow_split_info *flow_split_info, 7275 struct rte_flow_error *error) 7276 { 7277 int ret; 7278 7279 ret = flow_create_split_sample(dev, flow, attr, items, 7280 actions, flow_split_info, error); 7281 MLX5_ASSERT(ret <= 0); 7282 return ret; 7283 } 7284 7285 static inline struct mlx5_flow_tunnel * 7286 flow_tunnel_from_rule(const struct mlx5_flow *flow) 7287 { 7288 struct mlx5_flow_tunnel *tunnel; 7289 7290 #pragma GCC diagnostic push 7291 #pragma GCC diagnostic ignored "-Wcast-qual" 7292 tunnel = (typeof(tunnel))flow->tunnel; 7293 #pragma GCC diagnostic pop 7294 7295 return tunnel; 7296 } 7297 7298 /** 7299 * Create a flow and add it to @p list. 7300 * 7301 * @param dev 7302 * Pointer to Ethernet device. 7303 * @param list 7304 * Pointer to a TAILQ flow list. If this parameter NULL, 7305 * no list insertion occurred, flow is just created, 7306 * this is caller's responsibility to track the 7307 * created flow. 7308 * @param[in] attr 7309 * Flow rule attributes. 7310 * @param[in] items 7311 * Pattern specification (list terminated by the END pattern item). 7312 * @param[in] actions 7313 * Associated actions (list terminated by the END action). 7314 * @param[in] external 7315 * This flow rule is created by request external to PMD. 7316 * @param[out] error 7317 * Perform verbose error reporting if not NULL. 7318 * 7319 * @return 7320 * A flow index on success, 0 otherwise and rte_errno is set. 7321 */ 7322 uintptr_t 7323 flow_legacy_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, 7324 const struct rte_flow_attr *attr, 7325 const struct rte_flow_item items[], 7326 const struct rte_flow_action original_actions[], 7327 bool external, struct rte_flow_error *error) 7328 { 7329 struct mlx5_priv *priv = dev->data->dev_private; 7330 struct rte_flow *flow = NULL; 7331 struct mlx5_flow *dev_flow; 7332 const struct rte_flow_action_rss *rss = NULL; 7333 struct mlx5_translated_action_handle 7334 indir_actions[MLX5_MAX_INDIRECT_ACTIONS]; 7335 int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS; 7336 union { 7337 struct mlx5_flow_expand_rss buf; 7338 uint8_t buffer[8192]; 7339 } expand_buffer; 7340 union { 7341 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 7342 uint8_t buffer[2048]; 7343 } actions_rx; 7344 union { 7345 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 7346 uint8_t buffer[2048]; 7347 } actions_hairpin_tx; 7348 union { 7349 struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS]; 7350 uint8_t buffer[2048]; 7351 } items_tx; 7352 struct mlx5_rte_flow_item_sq sq_specs[RTE_MAX_QUEUES_PER_PORT]; 7353 struct mlx5_flow_expand_rss *buf = &expand_buffer.buf; 7354 struct mlx5_flow_rss_desc *rss_desc; 7355 const struct rte_flow_action *p_actions_rx; 7356 uint32_t i; 7357 uint32_t idx = 0; 7358 int hairpin_flow; 7359 struct rte_flow_attr attr_tx = { .priority = 0 }; 7360 const struct rte_flow_action *actions; 7361 struct rte_flow_action *translated_actions = NULL; 7362 struct mlx5_flow_tunnel *tunnel; 7363 struct tunnel_default_miss_ctx default_miss_ctx = { 0, }; 7364 struct mlx5_flow_workspace *wks = mlx5_flow_push_thread_workspace(); 7365 struct mlx5_flow_split_info flow_split_info = { 7366 .external = !!external, 7367 .skip_scale = 0, 7368 .flow_idx = 0, 7369 .prefix_mark = 0, 7370 .prefix_layers = 0, 7371 .table_id = 0 7372 }; 7373 int ret; 7374 struct mlx5_shared_action_rss *shared_rss_action; 7375 7376 if (!wks) 7377 return rte_flow_error_set(error, ENOMEM, 7378 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 7379 NULL, 7380 "failed to push flow workspace"); 7381 memset(indir_actions, 0, sizeof(indir_actions)); 7382 rss_desc = &wks->rss_desc; 7383 ret = flow_action_handles_translate(dev, original_actions, 7384 indir_actions, 7385 &indir_actions_n, 7386 &translated_actions, error); 7387 if (ret < 0) { 7388 MLX5_ASSERT(translated_actions == NULL); 7389 return 0; 7390 } 7391 actions = translated_actions ? translated_actions : original_actions; 7392 p_actions_rx = actions; 7393 hairpin_flow = flow_check_hairpin_split(dev, attr, actions); 7394 ret = flow_drv_validate(dev, attr, items, p_actions_rx, 7395 external, hairpin_flow, error); 7396 if (ret < 0) 7397 goto error_before_hairpin_split; 7398 flow = mlx5_ipool_zmalloc(priv->flows[type], &idx); 7399 if (!flow) { 7400 rte_errno = ENOMEM; 7401 goto error_before_hairpin_split; 7402 } 7403 if (hairpin_flow > 0) { 7404 if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { 7405 rte_errno = EINVAL; 7406 goto error_before_hairpin_split; 7407 } 7408 flow_hairpin_split(dev, actions, actions_rx.actions, 7409 actions_hairpin_tx.actions, items_tx.items, 7410 idx); 7411 p_actions_rx = actions_rx.actions; 7412 } 7413 flow_split_info.flow_idx = idx; 7414 flow->drv_type = flow_get_drv_type(dev, attr); 7415 MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && 7416 flow->drv_type < MLX5_FLOW_TYPE_MAX); 7417 memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue)); 7418 /* RSS Action only works on NIC RX domain */ 7419 if (attr->ingress) 7420 rss = flow_get_rss_action(dev, p_actions_rx); 7421 if (rss) { 7422 MLX5_ASSERT(rss->queue_num <= RTE_ETH_RSS_RETA_SIZE_512); 7423 rss_desc->symmetric_hash_function = MLX5_RSS_IS_SYMM(rss->func); 7424 /* 7425 * The following information is required by 7426 * mlx5_flow_hashfields_adjust() in advance. 7427 */ 7428 rss_desc->level = rss->level; 7429 /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */ 7430 rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types; 7431 } 7432 flow->dev_handles = 0; 7433 if (rss && rss->types) { 7434 unsigned int graph_root; 7435 7436 graph_root = find_graph_root(rss->level); 7437 ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer), 7438 items, rss->types, 7439 mlx5_support_expansion, graph_root); 7440 MLX5_ASSERT(ret > 0 && 7441 (unsigned int)ret < sizeof(expand_buffer.buffer)); 7442 if (rte_log_can_log(mlx5_logtype, RTE_LOG_DEBUG)) { 7443 for (i = 0; i < buf->entries; ++i) 7444 mlx5_dbg__print_pattern(buf->entry[i].pattern); 7445 } 7446 } else { 7447 ret = mlx5_flow_expand_sqn((struct mlx5_flow_expand_sqn *)buf, 7448 sizeof(expand_buffer.buffer), 7449 items, sq_specs); 7450 if (ret) { 7451 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, 7452 NULL, "not enough memory for rte_flow"); 7453 goto error; 7454 } 7455 if (buf->entries == 0) { 7456 buf->entries = 1; 7457 buf->entry[0].pattern = (void *)(uintptr_t)items; 7458 } 7459 } 7460 rss_desc->shared_rss = flow_get_shared_rss_action(dev, indir_actions, 7461 indir_actions_n); 7462 for (i = 0; i < buf->entries; ++i) { 7463 /* Initialize flow split data. */ 7464 flow_split_info.prefix_layers = 0; 7465 flow_split_info.prefix_mark = 0; 7466 flow_split_info.skip_scale = 0; 7467 /* 7468 * The splitter may create multiple dev_flows, 7469 * depending on configuration. In the simplest 7470 * case it just creates unmodified original flow. 7471 */ 7472 ret = flow_create_split_outer(dev, flow, attr, 7473 buf->entry[i].pattern, 7474 p_actions_rx, &flow_split_info, 7475 error); 7476 if (ret < 0) 7477 goto error; 7478 if (is_flow_tunnel_steer_rule(wks->flows[0].tof_type)) { 7479 ret = flow_tunnel_add_default_miss(dev, flow, attr, 7480 p_actions_rx, 7481 idx, 7482 wks->flows[0].tunnel, 7483 &default_miss_ctx, 7484 error); 7485 if (ret < 0) { 7486 mlx5_free(default_miss_ctx.queue); 7487 goto error; 7488 } 7489 } 7490 } 7491 /* Create the tx flow. */ 7492 if (hairpin_flow) { 7493 attr_tx.group = MLX5_HAIRPIN_TX_TABLE; 7494 attr_tx.ingress = 0; 7495 attr_tx.egress = 1; 7496 dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items, 7497 actions_hairpin_tx.actions, 7498 idx, error); 7499 if (!dev_flow) 7500 goto error; 7501 dev_flow->flow = flow; 7502 dev_flow->external = 0; 7503 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, 7504 dev_flow->handle, next); 7505 ret = flow_drv_translate(dev, dev_flow, &attr_tx, 7506 items_tx.items, 7507 actions_hairpin_tx.actions, error); 7508 if (ret < 0) 7509 goto error; 7510 } 7511 /* 7512 * Update the metadata register copy table. If extensive 7513 * metadata feature is enabled and registers are supported 7514 * we might create the extra rte_flow for each unique 7515 * MARK/FLAG action ID. 7516 * 7517 * The table is updated for ingress and transfer flows only, because 7518 * the egress Flows belong to the different device and 7519 * copy table should be updated in peer NIC Rx domain. 7520 */ 7521 if ((attr->ingress || attr->transfer) && 7522 (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) { 7523 ret = flow_mreg_update_copy_table(dev, flow, actions, error); 7524 if (ret) 7525 goto error; 7526 } 7527 /* 7528 * If the flow is external (from application) OR device is started, 7529 * OR mreg discover, then apply immediately. 7530 */ 7531 if (external || dev->data->dev_started || 7532 (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP && 7533 attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) { 7534 ret = flow_drv_apply(dev, flow, error); 7535 if (ret < 0) 7536 goto error; 7537 } 7538 flow->type = type; 7539 flow_rxq_flags_set(dev, flow); 7540 rte_free(translated_actions); 7541 tunnel = flow_tunnel_from_rule(wks->flows); 7542 if (tunnel) { 7543 flow->tunnel = 1; 7544 flow->tunnel_id = tunnel->tunnel_id; 7545 rte_atomic_fetch_add_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed); 7546 mlx5_free(default_miss_ctx.queue); 7547 } 7548 mlx5_flow_pop_thread_workspace(); 7549 return idx; 7550 error: 7551 MLX5_ASSERT(flow); 7552 ret = rte_errno; /* Save rte_errno before cleanup. */ 7553 flow_mreg_del_copy_action(dev, flow); 7554 flow_drv_destroy(dev, flow); 7555 7556 if (rss_desc->shared_rss) { 7557 shared_rss_action = (struct mlx5_shared_action_rss *) 7558 mlx5_ipool_get 7559 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 7560 rss_desc->shared_rss); 7561 if (shared_rss_action) 7562 rte_atomic_fetch_sub_explicit(&(shared_rss_action)->refcnt, 1, 7563 rte_memory_order_relaxed); 7564 } 7565 mlx5_ipool_free(priv->flows[type], idx); 7566 rte_errno = ret; /* Restore rte_errno. */ 7567 ret = rte_errno; 7568 rte_errno = ret; 7569 error_before_hairpin_split: 7570 mlx5_flow_pop_thread_workspace(); 7571 rte_free(translated_actions); 7572 return 0; 7573 } 7574 7575 /** 7576 * Create a dedicated flow rule on e-switch table 0 (root table), to direct all 7577 * incoming packets to table 1. 7578 * 7579 * Other flow rules, requested for group n, will be created in 7580 * e-switch table n+1. 7581 * Jump action to e-switch group n will be created to group n+1. 7582 * 7583 * Used when working in switchdev mode, to utilise advantages of table 1 7584 * and above. 7585 * 7586 * @param dev 7587 * Pointer to Ethernet device. 7588 * 7589 * @return 7590 * Pointer to flow on success, NULL otherwise and rte_errno is set. 7591 */ 7592 struct rte_flow * 7593 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) 7594 { 7595 const struct rte_flow_attr attr = { 7596 .group = 0, 7597 .priority = 0, 7598 .ingress = 0, 7599 .egress = 0, 7600 .transfer = 1, 7601 }; 7602 const struct rte_flow_item pattern = { 7603 .type = RTE_FLOW_ITEM_TYPE_END, 7604 }; 7605 struct rte_flow_action_jump jump = { 7606 .group = 1, 7607 }; 7608 const struct rte_flow_action actions[] = { 7609 { 7610 .type = RTE_FLOW_ACTION_TYPE_JUMP, 7611 .conf = &jump, 7612 }, 7613 { 7614 .type = RTE_FLOW_ACTION_TYPE_END, 7615 }, 7616 }; 7617 struct rte_flow_error error; 7618 7619 return (void *)(uintptr_t)mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_CTL, 7620 &attr, &pattern, 7621 actions, false, &error); 7622 } 7623 7624 /** 7625 * Create a dedicated flow rule on e-switch table 1, matches ESW manager 7626 * and sq number, directs all packets to peer vport. 7627 * 7628 * @param dev 7629 * Pointer to Ethernet device. 7630 * @param sq_num 7631 * SQ number. 7632 * 7633 * @return 7634 * Flow ID on success, 0 otherwise and rte_errno is set. 7635 */ 7636 uint32_t 7637 mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sq_num) 7638 { 7639 struct rte_flow_attr attr = { 7640 .group = 0, 7641 .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR, 7642 .ingress = 0, 7643 .egress = 0, 7644 .transfer = 1, 7645 }; 7646 struct rte_flow_item_port_id port_spec = { 7647 .id = MLX5_PORT_ESW_MGR, 7648 }; 7649 struct mlx5_rte_flow_item_sq sq_spec = { 7650 .queue = sq_num, 7651 }; 7652 struct rte_flow_item pattern[] = { 7653 { 7654 .type = RTE_FLOW_ITEM_TYPE_PORT_ID, 7655 .spec = &port_spec, 7656 }, 7657 { 7658 .type = (enum rte_flow_item_type) 7659 MLX5_RTE_FLOW_ITEM_TYPE_SQ, 7660 .spec = &sq_spec, 7661 }, 7662 { 7663 .type = RTE_FLOW_ITEM_TYPE_END, 7664 }, 7665 }; 7666 struct rte_flow_action_jump jump = { 7667 .group = 1, 7668 }; 7669 struct rte_flow_action_port_id port = { 7670 .id = dev->data->port_id, 7671 }; 7672 struct rte_flow_action actions[] = { 7673 { 7674 .type = RTE_FLOW_ACTION_TYPE_JUMP, 7675 .conf = &jump, 7676 }, 7677 { 7678 .type = RTE_FLOW_ACTION_TYPE_END, 7679 }, 7680 }; 7681 struct rte_flow_error error; 7682 7683 /* 7684 * Creates group 0, highest priority jump flow. 7685 * Matches txq to bypass kernel packets. 7686 */ 7687 if (mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, actions, 7688 false, &error) == 0) 7689 return 0; 7690 /* Create group 1, lowest priority redirect flow for txq. */ 7691 attr.group = 1; 7692 actions[0].conf = &port; 7693 actions[0].type = RTE_FLOW_ACTION_TYPE_PORT_ID; 7694 return mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, 7695 actions, false, &error); 7696 } 7697 7698 /** 7699 * Validate a flow supported by the NIC. 7700 * 7701 * @see rte_flow_validate() 7702 * @see rte_flow_ops 7703 */ 7704 int 7705 mlx5_flow_validate(struct rte_eth_dev *dev, 7706 const struct rte_flow_attr *attr, 7707 const struct rte_flow_item items[], 7708 const struct rte_flow_action original_actions[], 7709 struct rte_flow_error *error) 7710 { 7711 int hairpin_flow; 7712 struct mlx5_translated_action_handle 7713 indir_actions[MLX5_MAX_INDIRECT_ACTIONS]; 7714 int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS; 7715 const struct rte_flow_action *actions; 7716 struct rte_flow_action *translated_actions = NULL; 7717 int ret = flow_action_handles_translate(dev, original_actions, 7718 indir_actions, 7719 &indir_actions_n, 7720 &translated_actions, error); 7721 7722 if (ret) 7723 return ret; 7724 actions = translated_actions ? translated_actions : original_actions; 7725 hairpin_flow = flow_check_hairpin_split(dev, attr, actions); 7726 ret = flow_drv_validate(dev, attr, items, actions, 7727 true, hairpin_flow, error); 7728 rte_free(translated_actions); 7729 return ret; 7730 } 7731 7732 static int 7733 mlx5_flow_cache_flow_info(struct rte_eth_dev *dev, 7734 const struct rte_flow_attr *attr, 7735 const uint32_t orig_prio, 7736 const struct rte_flow_item *items, 7737 const struct rte_flow_action *actions, 7738 uint32_t flow_idx) 7739 { 7740 struct mlx5_priv *priv = dev->data->dev_private; 7741 struct rte_pmd_mlx5_flow_engine_mode_info *mode_info = &priv->mode_info; 7742 struct mlx5_dv_flow_info *flow_info, *tmp_info; 7743 struct rte_flow_error error; 7744 int len, ret; 7745 7746 flow_info = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*flow_info), 0, SOCKET_ID_ANY); 7747 if (!flow_info) { 7748 DRV_LOG(ERR, "No enough memory for flow_info caching."); 7749 return -1; 7750 } 7751 flow_info->orig_prio = orig_prio; 7752 flow_info->attr = *attr; 7753 /* Standby mode rule awlays saves it in low priority entry. */ 7754 flow_info->flow_idx_low_prio = flow_idx; 7755 7756 /* Store matching items. */ 7757 ret = rte_flow_conv(RTE_FLOW_CONV_OP_PATTERN, NULL, 0, items, &error); 7758 if (ret <= 0) { 7759 DRV_LOG(ERR, "Can't get items length."); 7760 goto end; 7761 } 7762 len = RTE_ALIGN(ret, 16); 7763 flow_info->items = mlx5_malloc(MLX5_MEM_ZERO, len, 0, SOCKET_ID_ANY); 7764 if (!flow_info->items) { 7765 DRV_LOG(ERR, "No enough memory for items caching."); 7766 goto end; 7767 } 7768 ret = rte_flow_conv(RTE_FLOW_CONV_OP_PATTERN, flow_info->items, ret, items, &error); 7769 if (ret <= 0) { 7770 DRV_LOG(ERR, "Can't duplicate items."); 7771 goto end; 7772 } 7773 7774 /* Store flow actions. */ 7775 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, actions, &error); 7776 if (ret <= 0) { 7777 DRV_LOG(ERR, "Can't get actions length."); 7778 goto end; 7779 } 7780 len = RTE_ALIGN(ret, 16); 7781 flow_info->actions = mlx5_malloc(MLX5_MEM_ZERO, len, 0, SOCKET_ID_ANY); 7782 if (!flow_info->actions) { 7783 DRV_LOG(ERR, "No enough memory for actions caching."); 7784 goto end; 7785 } 7786 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, flow_info->actions, ret, actions, &error); 7787 if (ret <= 0) { 7788 DRV_LOG(ERR, "Can't duplicate actions."); 7789 goto end; 7790 } 7791 7792 /* Insert to the list end. */ 7793 if (LIST_EMPTY(&mode_info->hot_upgrade)) { 7794 LIST_INSERT_HEAD(&mode_info->hot_upgrade, flow_info, next); 7795 } else { 7796 tmp_info = LIST_FIRST(&mode_info->hot_upgrade); 7797 while (LIST_NEXT(tmp_info, next)) 7798 tmp_info = LIST_NEXT(tmp_info, next); 7799 LIST_INSERT_AFTER(tmp_info, flow_info, next); 7800 } 7801 return 0; 7802 end: 7803 if (flow_info->items) 7804 mlx5_free(flow_info->items); 7805 if (flow_info->actions) 7806 mlx5_free(flow_info->actions); 7807 mlx5_free(flow_info); 7808 return -1; 7809 } 7810 7811 static int 7812 mlx5_flow_cache_flow_toggle(struct rte_eth_dev *dev, bool orig_prio) 7813 { 7814 struct mlx5_priv *priv = dev->data->dev_private; 7815 struct rte_pmd_mlx5_flow_engine_mode_info *mode_info = &priv->mode_info; 7816 struct mlx5_dv_flow_info *flow_info; 7817 struct rte_flow_attr attr; 7818 struct rte_flow_error error; 7819 struct rte_flow *high, *low; 7820 7821 flow_info = LIST_FIRST(&mode_info->hot_upgrade); 7822 while (flow_info) { 7823 /* DUP flow may have the same priority. */ 7824 if (flow_info->orig_prio != flow_info->attr.priority) { 7825 attr = flow_info->attr; 7826 if (orig_prio) 7827 attr.priority = flow_info->orig_prio; 7828 flow_info->flow_idx_high_prio = mlx5_flow_list_create(dev, 7829 MLX5_FLOW_TYPE_GEN, &attr, 7830 flow_info->items, flow_info->actions, 7831 true, &error); 7832 if (!flow_info->flow_idx_high_prio) { 7833 DRV_LOG(ERR, "Priority toggle failed internally."); 7834 goto err; 7835 } 7836 } 7837 flow_info = LIST_NEXT(flow_info, next); 7838 } 7839 /* Delete the low priority rules and swap the flow handle. */ 7840 flow_info = LIST_FIRST(&mode_info->hot_upgrade); 7841 while (flow_info) { 7842 MLX5_ASSERT(flow_info->flow_idx_low_prio); 7843 if (flow_info->orig_prio != flow_info->attr.priority) { 7844 high = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], 7845 flow_info->flow_idx_high_prio); 7846 low = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], 7847 flow_info->flow_idx_low_prio); 7848 if (high && low) { 7849 RTE_SWAP(*low, *high); 7850 mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, 7851 flow_info->flow_idx_low_prio); 7852 flow_info->flow_idx_high_prio = 0; 7853 } 7854 } 7855 flow_info = LIST_NEXT(flow_info, next); 7856 } 7857 return 0; 7858 err: 7859 /* Destroy preceding successful high priority rules. */ 7860 flow_info = LIST_FIRST(&mode_info->hot_upgrade); 7861 while (flow_info) { 7862 if (flow_info->orig_prio != flow_info->attr.priority) { 7863 if (flow_info->flow_idx_high_prio) 7864 mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, 7865 flow_info->flow_idx_high_prio); 7866 else 7867 break; 7868 flow_info->flow_idx_high_prio = 0; 7869 } 7870 flow_info = LIST_NEXT(flow_info, next); 7871 } 7872 return -1; 7873 } 7874 7875 /** 7876 * Set the mode of the flow engine of a process to active or standby during live migration. 7877 * 7878 * @param[in] mode 7879 * MLX5 flow engine mode, @see `enum rte_pmd_mlx5_flow_engine_mode`. 7880 * @param[in] flags 7881 * Flow engine mode specific flags. 7882 * 7883 * @return 7884 * Negative value on error, positive on success. 7885 */ 7886 int 7887 rte_pmd_mlx5_flow_engine_set_mode(enum rte_pmd_mlx5_flow_engine_mode mode, uint32_t flags) 7888 { 7889 struct mlx5_priv *priv; 7890 struct rte_pmd_mlx5_flow_engine_mode_info *mode_info; 7891 struct mlx5_dv_flow_info *flow_info, *tmp_info; 7892 uint16_t port, port_id; 7893 uint16_t toggle_num = 0; 7894 struct rte_eth_dev *dev; 7895 enum rte_pmd_mlx5_flow_engine_mode orig_mode; 7896 uint32_t orig_flags; 7897 bool need_toggle = false; 7898 7899 /* Check if flags combinations are supported. */ 7900 if (flags && flags != RTE_PMD_MLX5_FLOW_ENGINE_FLAG_STANDBY_DUP_INGRESS) { 7901 DRV_LOG(ERR, "Doesn't support such flags %u", flags); 7902 return -1; 7903 } 7904 MLX5_ETH_FOREACH_DEV(port, NULL) { 7905 dev = &rte_eth_devices[port]; 7906 priv = dev->data->dev_private; 7907 mode_info = &priv->mode_info; 7908 /* No mode change. Assume all devices hold the same mode. */ 7909 if (mode_info->mode == mode) { 7910 DRV_LOG(INFO, "Process flow engine has been in mode %u", mode); 7911 if (mode_info->mode_flag != flags && !LIST_EMPTY(&mode_info->hot_upgrade)) { 7912 DRV_LOG(ERR, "Port %u has rule cache with different flag %u\n", 7913 port, mode_info->mode_flag); 7914 orig_mode = mode_info->mode; 7915 orig_flags = mode_info->mode_flag; 7916 goto err; 7917 } 7918 mode_info->mode_flag = flags; 7919 toggle_num++; 7920 continue; 7921 } 7922 /* Active -> standby. */ 7923 if (mode == RTE_PMD_MLX5_FLOW_ENGINE_MODE_STANDBY) { 7924 if (!LIST_EMPTY(&mode_info->hot_upgrade)) { 7925 DRV_LOG(ERR, "Cached rule existed"); 7926 orig_mode = mode_info->mode; 7927 orig_flags = mode_info->mode_flag; 7928 goto err; 7929 } 7930 mode_info->mode_flag = flags; 7931 mode_info->mode = mode; 7932 toggle_num++; 7933 /* Standby -> active. */ 7934 } else if (mode == RTE_PMD_MLX5_FLOW_ENGINE_MODE_ACTIVE) { 7935 if (LIST_EMPTY(&mode_info->hot_upgrade)) { 7936 DRV_LOG(INFO, "No cached rule existed"); 7937 } else { 7938 if (mlx5_flow_cache_flow_toggle(dev, true)) { 7939 orig_mode = mode_info->mode; 7940 orig_flags = mode_info->mode_flag; 7941 need_toggle = true; 7942 goto err; 7943 } 7944 } 7945 toggle_num++; 7946 } 7947 } 7948 if (mode == RTE_PMD_MLX5_FLOW_ENGINE_MODE_ACTIVE) { 7949 /* Clear cache flow rules. */ 7950 MLX5_ETH_FOREACH_DEV(port, NULL) { 7951 priv = rte_eth_devices[port].data->dev_private; 7952 mode_info = &priv->mode_info; 7953 flow_info = LIST_FIRST(&mode_info->hot_upgrade); 7954 while (flow_info) { 7955 tmp_info = LIST_NEXT(flow_info, next); 7956 LIST_REMOVE(flow_info, next); 7957 mlx5_free(flow_info->actions); 7958 mlx5_free(flow_info->items); 7959 mlx5_free(flow_info); 7960 flow_info = tmp_info; 7961 } 7962 MLX5_ASSERT(LIST_EMPTY(&mode_info->hot_upgrade)); 7963 } 7964 } 7965 return toggle_num; 7966 err: 7967 /* Rollback all preceding successful ports. */ 7968 MLX5_ETH_FOREACH_DEV(port_id, NULL) { 7969 if (port_id == port) 7970 break; 7971 priv = rte_eth_devices[port_id].data->dev_private; 7972 mode_info = &priv->mode_info; 7973 if (need_toggle && !LIST_EMPTY(&mode_info->hot_upgrade) && 7974 mlx5_flow_cache_flow_toggle(dev, false)) 7975 return -EPERM; 7976 mode_info->mode = orig_mode; 7977 mode_info->mode_flag = orig_flags; 7978 } 7979 return -EINVAL; 7980 } 7981 /** 7982 * Create a flow. 7983 * 7984 * @see rte_flow_create() 7985 * @see rte_flow_ops 7986 */ 7987 struct rte_flow * 7988 mlx5_flow_create(struct rte_eth_dev *dev, 7989 const struct rte_flow_attr *attr, 7990 const struct rte_flow_item items[], 7991 const struct rte_flow_action actions[], 7992 struct rte_flow_error *error) 7993 { 7994 struct mlx5_priv *priv = dev->data->dev_private; 7995 struct rte_flow_attr *new_attr = (void *)(uintptr_t)attr; 7996 uint32_t prio = attr->priority; 7997 uintptr_t flow_idx; 7998 7999 /* 8000 * If the device is not started yet, it is not allowed to created a 8001 * flow from application. PMD default flows and traffic control flows 8002 * are not affected. 8003 */ 8004 if (unlikely(!dev->data->dev_started)) { 8005 DRV_LOG(DEBUG, "port %u is not started when " 8006 "inserting a flow", dev->data->port_id); 8007 rte_flow_error_set(error, ENODEV, 8008 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8009 NULL, 8010 "port not started"); 8011 return NULL; 8012 } 8013 if (unlikely(mlx5_need_cache_flow(priv, attr))) { 8014 if (attr->transfer || 8015 (attr->ingress && !(priv->mode_info.mode_flag & 8016 RTE_PMD_MLX5_FLOW_ENGINE_FLAG_STANDBY_DUP_INGRESS))) 8017 new_attr->priority += 1; 8018 } 8019 flow_idx = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_GEN, attr, items, actions, 8020 true, error); 8021 if (!flow_idx) 8022 return NULL; 8023 if (unlikely(mlx5_need_cache_flow(priv, attr))) { 8024 if (mlx5_flow_cache_flow_info(dev, attr, prio, items, actions, flow_idx)) { 8025 mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx); 8026 flow_idx = 0; 8027 } 8028 } 8029 return (void *)(uintptr_t)flow_idx; 8030 } 8031 8032 uintptr_t 8033 mlx5_flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, 8034 const struct rte_flow_attr *attr, 8035 const struct rte_flow_item items[], 8036 const struct rte_flow_action actions[], 8037 bool external, struct rte_flow_error *error) 8038 { 8039 const struct mlx5_flow_driver_ops *fops; 8040 enum mlx5_flow_drv_type drv_type = flow_get_drv_type(dev, attr); 8041 8042 fops = flow_get_drv_ops(drv_type); 8043 return fops->list_create(dev, type, attr, items, actions, external, 8044 error); 8045 } 8046 8047 /** 8048 * Destroy a flow in a list. 8049 * 8050 * @param dev 8051 * Pointer to Ethernet device. 8052 * @param[in] flow_idx 8053 * Index of flow to destroy. 8054 */ 8055 void 8056 flow_legacy_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, 8057 uintptr_t flow_idx) 8058 { 8059 struct mlx5_priv *priv = dev->data->dev_private; 8060 struct rte_flow *flow = mlx5_ipool_get(priv->flows[type], (uint32_t)flow_idx); 8061 8062 if (!flow) 8063 return; 8064 MLX5_ASSERT((type >= MLX5_FLOW_TYPE_CTL) && (type < MLX5_FLOW_TYPE_MAXI)); 8065 MLX5_ASSERT(flow->type == type); 8066 /* 8067 * Update RX queue flags only if port is started, otherwise it is 8068 * already clean. 8069 */ 8070 if (dev->data->dev_started) 8071 flow_rxq_flags_trim(dev, flow); 8072 flow_drv_destroy(dev, flow); 8073 if (flow->tunnel) { 8074 struct mlx5_flow_tunnel *tunnel; 8075 8076 tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id); 8077 RTE_VERIFY(tunnel); 8078 if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1, 8079 rte_memory_order_relaxed) - 1)) 8080 mlx5_flow_tunnel_free(dev, tunnel); 8081 } 8082 flow_mreg_del_copy_action(dev, flow); 8083 mlx5_ipool_free(priv->flows[type], flow_idx); 8084 } 8085 8086 void 8087 mlx5_flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, 8088 uintptr_t flow_idx) 8089 { 8090 const struct mlx5_flow_driver_ops *fops; 8091 struct rte_flow_attr attr = { .transfer = 0 }; 8092 enum mlx5_flow_drv_type drv_type = flow_get_drv_type(dev, &attr); 8093 8094 fops = flow_get_drv_ops(drv_type); 8095 fops->list_destroy(dev, type, flow_idx); 8096 } 8097 8098 /** 8099 * Destroy all flows. 8100 * 8101 * @param dev 8102 * Pointer to Ethernet device. 8103 * @param type 8104 * Flow type to be flushed. 8105 * @param active 8106 * If flushing is called actively. 8107 */ 8108 void 8109 mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type, 8110 bool active) 8111 { 8112 struct mlx5_priv *priv = dev->data->dev_private; 8113 uint32_t num_flushed = 0, fidx = 1; 8114 struct rte_flow *flow; 8115 struct rte_pmd_mlx5_flow_engine_mode_info *mode_info = &priv->mode_info; 8116 struct mlx5_dv_flow_info *flow_info; 8117 8118 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 8119 if (priv->sh->config.dv_flow_en == 2 && 8120 type == MLX5_FLOW_TYPE_GEN) { 8121 flow_hw_q_flow_flush(dev, NULL); 8122 } 8123 #endif 8124 MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) { 8125 if (priv->sh->config.dv_flow_en == 2) { 8126 mlx5_flow_list_destroy(dev, type, (uintptr_t)flow); 8127 } else { 8128 mlx5_flow_list_destroy(dev, type, fidx); 8129 } 8130 if (unlikely(mlx5_need_cache_flow(priv, NULL) && type == MLX5_FLOW_TYPE_GEN)) { 8131 flow_info = LIST_FIRST(&mode_info->hot_upgrade); 8132 while (flow_info) { 8133 /* Romove the cache flow info. */ 8134 if (flow_info->flow_idx_low_prio == (uint32_t)(uintptr_t)fidx) { 8135 MLX5_ASSERT(!flow_info->flow_idx_high_prio); 8136 LIST_REMOVE(flow_info, next); 8137 mlx5_free(flow_info->items); 8138 mlx5_free(flow_info->actions); 8139 mlx5_free(flow_info); 8140 break; 8141 } 8142 flow_info = LIST_NEXT(flow_info, next); 8143 } 8144 } 8145 num_flushed++; 8146 } 8147 if (active) { 8148 DRV_LOG(INFO, "port %u: %u flows flushed before stopping", 8149 dev->data->port_id, num_flushed); 8150 } 8151 } 8152 8153 /** 8154 * Stop all default actions for flows. 8155 * 8156 * @param dev 8157 * Pointer to Ethernet device. 8158 */ 8159 void 8160 mlx5_flow_stop_default(struct rte_eth_dev *dev) 8161 { 8162 #ifdef HAVE_MLX5_HWS_SUPPORT 8163 struct mlx5_priv *priv = dev->data->dev_private; 8164 8165 if (priv->sh->config.dv_flow_en == 2) { 8166 mlx5_flow_nta_del_default_copy_action(dev); 8167 if (!rte_atomic_load_explicit(&priv->hws_mark_refcnt, 8168 rte_memory_order_relaxed)) 8169 flow_hw_rxq_flag_set(dev, false); 8170 return; 8171 } 8172 #endif 8173 flow_mreg_del_default_copy_action(dev); 8174 flow_rxq_flags_clear(dev); 8175 } 8176 8177 /** 8178 * Set rxq flag. 8179 * 8180 * @param[in] dev 8181 * Pointer to the rte_eth_dev structure. 8182 * @param[in] enable 8183 * Flag to enable or not. 8184 */ 8185 void 8186 flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable) 8187 { 8188 struct mlx5_priv *priv = dev->data->dev_private; 8189 unsigned int i; 8190 8191 if ((!priv->mark_enabled && !enable) || 8192 (priv->mark_enabled && enable)) 8193 return; 8194 for (i = 0; i < priv->rxqs_n; ++i) { 8195 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i); 8196 8197 /* With RXQ start/stop feature, RXQ might be stopped. */ 8198 if (!rxq_ctrl) 8199 continue; 8200 rxq_ctrl->rxq.mark = enable; 8201 } 8202 priv->mark_enabled = enable; 8203 } 8204 8205 /** 8206 * Start all default actions for flows. 8207 * 8208 * @param dev 8209 * Pointer to Ethernet device. 8210 * @return 8211 * 0 on success, a negative errno value otherwise and rte_errno is set. 8212 */ 8213 int 8214 mlx5_flow_start_default(struct rte_eth_dev *dev) 8215 { 8216 struct rte_flow_error error; 8217 #ifdef HAVE_MLX5_HWS_SUPPORT 8218 struct mlx5_priv *priv = dev->data->dev_private; 8219 8220 if (priv->sh->config.dv_flow_en == 2) 8221 return mlx5_flow_nta_add_default_copy_action(dev, &error); 8222 #endif 8223 /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ 8224 return flow_mreg_add_default_copy_action(dev, &error); 8225 } 8226 8227 /** 8228 * Release key of thread specific flow workspace data. 8229 */ 8230 void 8231 flow_release_workspace(void *data) 8232 { 8233 struct mlx5_flow_workspace *wks = data; 8234 struct mlx5_flow_workspace *next; 8235 8236 while (wks) { 8237 next = wks->next; 8238 free(wks); 8239 wks = next; 8240 } 8241 } 8242 8243 /** 8244 * Get thread specific current flow workspace. 8245 * 8246 * @return pointer to thread specific flow workspace data, NULL on error. 8247 */ 8248 struct mlx5_flow_workspace* 8249 mlx5_flow_get_thread_workspace(void) 8250 { 8251 struct mlx5_flow_workspace *data; 8252 8253 data = mlx5_flow_os_get_specific_workspace(); 8254 MLX5_ASSERT(data && data->inuse); 8255 if (!data || !data->inuse) 8256 DRV_LOG(ERR, "flow workspace not initialized."); 8257 return data; 8258 } 8259 8260 /** 8261 * Allocate and init new flow workspace. 8262 * 8263 * @return pointer to flow workspace data, NULL on error. 8264 */ 8265 static struct mlx5_flow_workspace* 8266 flow_alloc_thread_workspace(void) 8267 { 8268 size_t data_size = RTE_ALIGN(sizeof(struct mlx5_flow_workspace), sizeof(long)); 8269 size_t rss_queue_array_size = sizeof(uint16_t) * RTE_ETH_RSS_RETA_SIZE_512; 8270 struct mlx5_flow_workspace *data = calloc(1, data_size + 8271 rss_queue_array_size); 8272 8273 if (!data) { 8274 DRV_LOG(ERR, "Failed to allocate flow workspace memory."); 8275 return NULL; 8276 } 8277 data->rss_desc.queue = RTE_PTR_ADD(data, data_size); 8278 return data; 8279 } 8280 8281 /** 8282 * Get new thread specific flow workspace. 8283 * 8284 * If current workspace inuse, create new one and set as current. 8285 * 8286 * @return pointer to thread specific flow workspace data, NULL on error. 8287 */ 8288 struct mlx5_flow_workspace* 8289 mlx5_flow_push_thread_workspace(void) 8290 { 8291 struct mlx5_flow_workspace *curr; 8292 struct mlx5_flow_workspace *data; 8293 8294 curr = mlx5_flow_os_get_specific_workspace(); 8295 if (!curr) { 8296 data = flow_alloc_thread_workspace(); 8297 if (!data) 8298 return NULL; 8299 mlx5_flow_os_workspace_gc_add(data); 8300 } else if (!curr->inuse) { 8301 data = curr; 8302 } else if (curr->next) { 8303 data = curr->next; 8304 } else { 8305 data = flow_alloc_thread_workspace(); 8306 if (!data) 8307 return NULL; 8308 curr->next = data; 8309 data->prev = curr; 8310 } 8311 data->inuse = 1; 8312 data->flow_idx = 0; 8313 /* Set as current workspace */ 8314 if (mlx5_flow_os_set_specific_workspace(data)) 8315 DRV_LOG(ERR, "Failed to set flow workspace to thread."); 8316 return data; 8317 } 8318 8319 /** 8320 * Close current thread specific flow workspace. 8321 * 8322 * If previous workspace available, set it as current. 8323 * 8324 * @return pointer to thread specific flow workspace data, NULL on error. 8325 */ 8326 void 8327 mlx5_flow_pop_thread_workspace(void) 8328 { 8329 struct mlx5_flow_workspace *data = mlx5_flow_get_thread_workspace(); 8330 8331 if (!data) 8332 return; 8333 if (!data->inuse) { 8334 DRV_LOG(ERR, "Failed to close unused flow workspace."); 8335 return; 8336 } 8337 data->inuse = 0; 8338 if (!data->prev) 8339 return; 8340 if (mlx5_flow_os_set_specific_workspace(data->prev)) 8341 DRV_LOG(ERR, "Failed to set flow workspace to thread."); 8342 } 8343 8344 /** 8345 * Verify the flow list is empty 8346 * 8347 * @param dev 8348 * Pointer to Ethernet device. 8349 * 8350 * @return the number of flows not released. 8351 */ 8352 int 8353 mlx5_flow_verify(struct rte_eth_dev *dev __rte_unused) 8354 { 8355 struct mlx5_priv *priv = dev->data->dev_private; 8356 struct rte_flow *flow; 8357 uint32_t idx = 0; 8358 int ret = 0, i; 8359 8360 for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) { 8361 MLX5_IPOOL_FOREACH(priv->flows[i], idx, flow) { 8362 DRV_LOG(DEBUG, "port %u flow %p still referenced", 8363 dev->data->port_id, (void *)flow); 8364 ret++; 8365 } 8366 } 8367 return ret; 8368 } 8369 8370 /** 8371 * Enable default hairpin egress flow. 8372 * 8373 * @param dev 8374 * Pointer to Ethernet device. 8375 * @param sq_num 8376 * The SQ hw number. 8377 * 8378 * @return 8379 * 0 on success, a negative errno value otherwise and rte_errno is set. 8380 */ 8381 int 8382 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, 8383 uint32_t sq_num) 8384 { 8385 const struct rte_flow_attr attr = { 8386 .egress = 1, 8387 .priority = 0, 8388 }; 8389 struct mlx5_rte_flow_item_sq queue_spec = { 8390 .queue = sq_num, 8391 }; 8392 struct mlx5_rte_flow_item_sq queue_mask = { 8393 .queue = UINT32_MAX, 8394 }; 8395 struct rte_flow_item items[] = { 8396 { 8397 .type = (enum rte_flow_item_type) 8398 MLX5_RTE_FLOW_ITEM_TYPE_SQ, 8399 .spec = &queue_spec, 8400 .last = NULL, 8401 .mask = &queue_mask, 8402 }, 8403 { 8404 .type = RTE_FLOW_ITEM_TYPE_END, 8405 }, 8406 }; 8407 struct rte_flow_action_jump jump = { 8408 .group = MLX5_HAIRPIN_TX_TABLE, 8409 }; 8410 struct rte_flow_action actions[2]; 8411 uint32_t flow_idx; 8412 struct rte_flow_error error; 8413 8414 actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; 8415 actions[0].conf = &jump; 8416 actions[1].type = RTE_FLOW_ACTION_TYPE_END; 8417 flow_idx = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_CTL, 8418 &attr, items, actions, false, &error); 8419 if (!flow_idx) { 8420 DRV_LOG(DEBUG, 8421 "Failed to create ctrl flow: rte_errno(%d)," 8422 " type(%d), message(%s)", 8423 rte_errno, error.type, 8424 error.message ? error.message : " (no stated reason)"); 8425 return -rte_errno; 8426 } 8427 return 0; 8428 } 8429 8430 /** 8431 * Enable a control flow configured from the control plane. 8432 * 8433 * @param dev 8434 * Pointer to Ethernet device. 8435 * @param eth_spec 8436 * An Ethernet flow spec to apply. 8437 * @param eth_mask 8438 * An Ethernet flow mask to apply. 8439 * @param vlan_spec 8440 * A VLAN flow spec to apply. 8441 * @param vlan_mask 8442 * A VLAN flow mask to apply. 8443 * 8444 * @return 8445 * 0 on success, a negative errno value otherwise and rte_errno is set. 8446 */ 8447 int 8448 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, 8449 struct rte_flow_item_eth *eth_spec, 8450 struct rte_flow_item_eth *eth_mask, 8451 struct rte_flow_item_vlan *vlan_spec, 8452 struct rte_flow_item_vlan *vlan_mask) 8453 { 8454 struct mlx5_priv *priv = dev->data->dev_private; 8455 const struct rte_flow_attr attr = { 8456 .ingress = 1, 8457 .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR, 8458 }; 8459 struct rte_flow_item items[] = { 8460 { 8461 .type = RTE_FLOW_ITEM_TYPE_ETH, 8462 .spec = eth_spec, 8463 .last = NULL, 8464 .mask = eth_mask, 8465 }, 8466 { 8467 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : 8468 RTE_FLOW_ITEM_TYPE_END, 8469 .spec = vlan_spec, 8470 .last = NULL, 8471 .mask = vlan_mask, 8472 }, 8473 { 8474 .type = RTE_FLOW_ITEM_TYPE_END, 8475 }, 8476 }; 8477 uint16_t queue[priv->reta_idx_n]; 8478 struct rte_flow_action_rss action_rss = { 8479 .func = RTE_ETH_HASH_FUNCTION_DEFAULT, 8480 .level = 0, 8481 .types = priv->rss_conf.rss_hf, 8482 .key_len = priv->rss_conf.rss_key_len, 8483 .queue_num = priv->reta_idx_n, 8484 .key = priv->rss_conf.rss_key, 8485 .queue = queue, 8486 }; 8487 struct rte_flow_action actions[] = { 8488 { 8489 .type = RTE_FLOW_ACTION_TYPE_RSS, 8490 .conf = &action_rss, 8491 }, 8492 { 8493 .type = RTE_FLOW_ACTION_TYPE_END, 8494 }, 8495 }; 8496 uint32_t flow_idx; 8497 struct rte_flow_error error; 8498 unsigned int i; 8499 8500 if (!priv->reta_idx_n || !priv->rxqs_n) { 8501 return 0; 8502 } 8503 if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) 8504 action_rss.types = 0; 8505 for (i = 0; i != priv->reta_idx_n; ++i) 8506 queue[i] = (*priv->reta_idx)[i]; 8507 flow_idx = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_CTL, 8508 &attr, items, actions, false, &error); 8509 if (!flow_idx) 8510 return -rte_errno; 8511 return 0; 8512 } 8513 8514 /** 8515 * Enable a flow control configured from the control plane. 8516 * 8517 * @param dev 8518 * Pointer to Ethernet device. 8519 * @param eth_spec 8520 * An Ethernet flow spec to apply. 8521 * @param eth_mask 8522 * An Ethernet flow mask to apply. 8523 * 8524 * @return 8525 * 0 on success, a negative errno value otherwise and rte_errno is set. 8526 */ 8527 int 8528 mlx5_ctrl_flow(struct rte_eth_dev *dev, 8529 struct rte_flow_item_eth *eth_spec, 8530 struct rte_flow_item_eth *eth_mask) 8531 { 8532 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); 8533 } 8534 8535 /** 8536 * Create default miss flow rule matching lacp traffic 8537 * 8538 * @param dev 8539 * Pointer to Ethernet device. 8540 * @param eth_spec 8541 * An Ethernet flow spec to apply. 8542 * 8543 * @return 8544 * 0 on success, a negative errno value otherwise and rte_errno is set. 8545 */ 8546 int 8547 mlx5_flow_lacp_miss(struct rte_eth_dev *dev) 8548 { 8549 /* 8550 * The LACP matching is done by only using ether type since using 8551 * a multicast dst mac causes kernel to give low priority to this flow. 8552 */ 8553 static const struct rte_flow_item_eth lacp_spec = { 8554 .hdr.ether_type = RTE_BE16(0x8809), 8555 }; 8556 static const struct rte_flow_item_eth lacp_mask = { 8557 .hdr.ether_type = 0xffff, 8558 }; 8559 const struct rte_flow_attr attr = { 8560 .ingress = 1, 8561 }; 8562 struct rte_flow_item items[] = { 8563 { 8564 .type = RTE_FLOW_ITEM_TYPE_ETH, 8565 .spec = &lacp_spec, 8566 .mask = &lacp_mask, 8567 }, 8568 { 8569 .type = RTE_FLOW_ITEM_TYPE_END, 8570 }, 8571 }; 8572 struct rte_flow_action actions[] = { 8573 { 8574 .type = (enum rte_flow_action_type) 8575 MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS, 8576 }, 8577 { 8578 .type = RTE_FLOW_ACTION_TYPE_END, 8579 }, 8580 }; 8581 struct rte_flow_error error; 8582 uint32_t flow_idx = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_CTL, 8583 &attr, items, actions, 8584 false, &error); 8585 8586 if (!flow_idx) 8587 return -rte_errno; 8588 return 0; 8589 } 8590 8591 /** 8592 * Destroy a flow. 8593 * 8594 * @see rte_flow_destroy() 8595 * @see rte_flow_ops 8596 */ 8597 int 8598 mlx5_flow_destroy(struct rte_eth_dev *dev, 8599 struct rte_flow *flow, 8600 struct rte_flow_error *error __rte_unused) 8601 { 8602 struct mlx5_priv *priv = dev->data->dev_private; 8603 struct rte_pmd_mlx5_flow_engine_mode_info *mode_info = &priv->mode_info; 8604 struct mlx5_dv_flow_info *flow_info; 8605 8606 mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, 8607 (uintptr_t)(void *)flow); 8608 if (unlikely(mlx5_need_cache_flow(priv, NULL))) { 8609 flow_info = LIST_FIRST(&mode_info->hot_upgrade); 8610 while (flow_info) { 8611 /* Romove the cache flow info. */ 8612 if (flow_info->flow_idx_low_prio == (uint32_t)(uintptr_t)flow) { 8613 MLX5_ASSERT(!flow_info->flow_idx_high_prio); 8614 LIST_REMOVE(flow_info, next); 8615 mlx5_free(flow_info->items); 8616 mlx5_free(flow_info->actions); 8617 mlx5_free(flow_info); 8618 break; 8619 } 8620 flow_info = LIST_NEXT(flow_info, next); 8621 } 8622 } 8623 return 0; 8624 } 8625 8626 /** 8627 * Destroy all flows. 8628 * 8629 * @see rte_flow_flush() 8630 * @see rte_flow_ops 8631 */ 8632 int 8633 mlx5_flow_flush(struct rte_eth_dev *dev, 8634 struct rte_flow_error *error __rte_unused) 8635 { 8636 mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, false); 8637 return 0; 8638 } 8639 8640 /** 8641 * Isolated mode. 8642 * 8643 * @see rte_flow_isolate() 8644 * @see rte_flow_ops 8645 */ 8646 int 8647 mlx5_flow_isolate(struct rte_eth_dev *dev, 8648 int enable, 8649 struct rte_flow_error *error) 8650 { 8651 struct mlx5_priv *priv = dev->data->dev_private; 8652 8653 if (dev->data->dev_started) { 8654 rte_flow_error_set(error, EBUSY, 8655 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8656 NULL, 8657 "port must be stopped first"); 8658 return -rte_errno; 8659 } 8660 if (!enable && !priv->sh->config.repr_matching) 8661 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 8662 "isolated mode cannot be disabled when " 8663 "representor matching is disabled"); 8664 priv->isolated = !!enable; 8665 if (enable) 8666 dev->dev_ops = &mlx5_dev_ops_isolate; 8667 else 8668 dev->dev_ops = &mlx5_dev_ops; 8669 8670 dev->rx_descriptor_status = mlx5_rx_descriptor_status; 8671 dev->tx_descriptor_status = mlx5_tx_descriptor_status; 8672 8673 return 0; 8674 } 8675 8676 /** 8677 * Query a flow. 8678 * 8679 * @see rte_flow_query() 8680 * @see rte_flow_ops 8681 */ 8682 static int 8683 flow_drv_query(struct rte_eth_dev *dev, 8684 struct rte_flow *eflow, 8685 const struct rte_flow_action *actions, 8686 void *data, 8687 struct rte_flow_error *error) 8688 { 8689 struct mlx5_priv *priv = dev->data->dev_private; 8690 const struct mlx5_flow_driver_ops *fops; 8691 struct rte_flow *flow = NULL; 8692 enum mlx5_flow_drv_type ftype = MLX5_FLOW_TYPE_MIN; 8693 8694 if (priv->sh->config.dv_flow_en == 2) { 8695 #ifdef HAVE_MLX5_HWS_SUPPORT 8696 flow = eflow; 8697 ftype = MLX5_FLOW_TYPE_HW; 8698 #endif 8699 } else { 8700 flow = (struct rte_flow *)mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], 8701 (uintptr_t)(void *)eflow); 8702 } 8703 if (!flow) { 8704 return rte_flow_error_set(error, ENOENT, 8705 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8706 NULL, 8707 "invalid flow handle"); 8708 } 8709 if (ftype == MLX5_FLOW_TYPE_MIN) 8710 ftype = flow->drv_type; 8711 MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); 8712 fops = flow_get_drv_ops(ftype); 8713 8714 return fops->query(dev, flow, actions, data, error); 8715 } 8716 8717 /** 8718 * Query a flow. 8719 * 8720 * @see rte_flow_query() 8721 * @see rte_flow_ops 8722 */ 8723 int 8724 mlx5_flow_query(struct rte_eth_dev *dev, 8725 struct rte_flow *flow, 8726 const struct rte_flow_action *actions, 8727 void *data, 8728 struct rte_flow_error *error) 8729 { 8730 int ret; 8731 8732 ret = flow_drv_query(dev, flow, actions, data, 8733 error); 8734 if (ret < 0) 8735 return ret; 8736 return 0; 8737 } 8738 8739 /** 8740 * Get rte_flow callbacks. 8741 * 8742 * @param dev 8743 * Pointer to Ethernet device structure. 8744 * @param ops 8745 * Pointer to operation-specific structure. 8746 * 8747 * @return 0 8748 */ 8749 int 8750 mlx5_flow_ops_get(struct rte_eth_dev *dev __rte_unused, 8751 const struct rte_flow_ops **ops) 8752 { 8753 *ops = &mlx5_flow_ops; 8754 return 0; 8755 } 8756 8757 /** 8758 * Validate meter policy actions. 8759 * Dispatcher for action type specific validation. 8760 * 8761 * @param[in] dev 8762 * Pointer to the Ethernet device structure. 8763 * @param[in] action 8764 * The meter policy action object to validate. 8765 * @param[in] attr 8766 * Attributes of flow to determine steering domain. 8767 * @param[out] is_rss 8768 * Is RSS or not. 8769 * @param[out] domain_bitmap 8770 * Domain bitmap. 8771 * @param[out] is_def_policy 8772 * Is default policy or not. 8773 * @param[out] error 8774 * Perform verbose error reporting if not NULL. Initialized in case of 8775 * error only. 8776 * 8777 * @return 8778 * 0 on success, otherwise negative errno value. 8779 */ 8780 int 8781 mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev, 8782 const struct rte_flow_action *actions[RTE_COLORS], 8783 struct rte_flow_attr *attr, 8784 bool *is_rss, 8785 uint8_t *domain_bitmap, 8786 uint8_t *policy_mode, 8787 struct rte_mtr_error *error) 8788 { 8789 const struct mlx5_flow_driver_ops *fops; 8790 8791 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8792 return fops->validate_mtr_acts(dev, actions, attr, is_rss, 8793 domain_bitmap, policy_mode, error); 8794 } 8795 8796 /** 8797 * Destroy the meter table set. 8798 * 8799 * @param[in] dev 8800 * Pointer to Ethernet device. 8801 * @param[in] mtr_policy 8802 * Meter policy struct. 8803 */ 8804 void 8805 mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev, 8806 struct mlx5_flow_meter_policy *mtr_policy) 8807 { 8808 const struct mlx5_flow_driver_ops *fops; 8809 8810 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8811 fops->destroy_mtr_acts(dev, mtr_policy); 8812 } 8813 8814 /** 8815 * Create policy action, lock free, 8816 * (mutex should be acquired by caller). 8817 * Dispatcher for action type specific call. 8818 * 8819 * @param[in] dev 8820 * Pointer to the Ethernet device structure. 8821 * @param[in] mtr_policy 8822 * Meter policy struct. 8823 * @param[in] action 8824 * Action specification used to create meter actions. 8825 * @param[in] attr 8826 * Flow rule attributes. 8827 * @param[out] error 8828 * Perform verbose error reporting if not NULL. Initialized in case of 8829 * error only. 8830 * 8831 * @return 8832 * 0 on success, otherwise negative errno value. 8833 */ 8834 int 8835 mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev, 8836 struct mlx5_flow_meter_policy *mtr_policy, 8837 const struct rte_flow_action *actions[RTE_COLORS], 8838 struct rte_flow_attr *attr, 8839 struct rte_mtr_error *error) 8840 { 8841 const struct mlx5_flow_driver_ops *fops; 8842 8843 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8844 return fops->create_mtr_acts(dev, mtr_policy, actions, attr, error); 8845 } 8846 8847 /** 8848 * Create policy rules, lock free, 8849 * (mutex should be acquired by caller). 8850 * Dispatcher for action type specific call. 8851 * 8852 * @param[in] dev 8853 * Pointer to the Ethernet device structure. 8854 * @param[in] mtr_policy 8855 * Meter policy struct. 8856 * 8857 * @return 8858 * 0 on success, -1 otherwise. 8859 */ 8860 int 8861 mlx5_flow_create_policy_rules(struct rte_eth_dev *dev, 8862 struct mlx5_flow_meter_policy *mtr_policy) 8863 { 8864 const struct mlx5_flow_driver_ops *fops; 8865 8866 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8867 return fops->create_policy_rules(dev, mtr_policy); 8868 } 8869 8870 /** 8871 * Destroy policy rules, lock free, 8872 * (mutex should be acquired by caller). 8873 * Dispatcher for action type specific call. 8874 * 8875 * @param[in] dev 8876 * Pointer to the Ethernet device structure. 8877 * @param[in] mtr_policy 8878 * Meter policy struct. 8879 */ 8880 void 8881 mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev, 8882 struct mlx5_flow_meter_policy *mtr_policy) 8883 { 8884 const struct mlx5_flow_driver_ops *fops; 8885 8886 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8887 fops->destroy_policy_rules(dev, mtr_policy); 8888 } 8889 8890 /** 8891 * Destroy the default policy table set. 8892 * 8893 * @param[in] dev 8894 * Pointer to Ethernet device. 8895 */ 8896 void 8897 mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev) 8898 { 8899 const struct mlx5_flow_driver_ops *fops; 8900 8901 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8902 fops->destroy_def_policy(dev); 8903 } 8904 8905 /** 8906 * Destroy the default policy table set. 8907 * 8908 * @param[in] dev 8909 * Pointer to Ethernet device. 8910 * 8911 * @return 8912 * 0 on success, -1 otherwise. 8913 */ 8914 int 8915 mlx5_flow_create_def_policy(struct rte_eth_dev *dev) 8916 { 8917 const struct mlx5_flow_driver_ops *fops; 8918 8919 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8920 return fops->create_def_policy(dev); 8921 } 8922 8923 /** 8924 * Create the needed meter and suffix tables. 8925 * 8926 * @param[in] dev 8927 * Pointer to Ethernet device. 8928 * 8929 * @return 8930 * 0 on success, -1 otherwise. 8931 */ 8932 int 8933 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev, 8934 struct mlx5_flow_meter_info *fm, 8935 uint32_t mtr_idx, 8936 uint8_t domain_bitmap) 8937 { 8938 const struct mlx5_flow_driver_ops *fops; 8939 8940 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8941 return fops->create_mtr_tbls(dev, fm, mtr_idx, domain_bitmap); 8942 } 8943 8944 /** 8945 * Destroy the meter table set. 8946 * 8947 * @param[in] dev 8948 * Pointer to Ethernet device. 8949 * @param[in] tbl 8950 * Pointer to the meter table set. 8951 */ 8952 void 8953 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, 8954 struct mlx5_flow_meter_info *fm) 8955 { 8956 const struct mlx5_flow_driver_ops *fops; 8957 8958 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8959 fops->destroy_mtr_tbls(dev, fm); 8960 } 8961 8962 /** 8963 * Destroy the global meter drop table. 8964 * 8965 * @param[in] dev 8966 * Pointer to Ethernet device. 8967 */ 8968 void 8969 mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev) 8970 { 8971 const struct mlx5_flow_driver_ops *fops; 8972 8973 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8974 fops->destroy_mtr_drop_tbls(dev); 8975 } 8976 8977 /** 8978 * Destroy the sub policy table with RX queue. 8979 * 8980 * @param[in] dev 8981 * Pointer to Ethernet device. 8982 * @param[in] mtr_policy 8983 * Pointer to meter policy table. 8984 */ 8985 void 8986 mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev, 8987 struct mlx5_flow_meter_policy *mtr_policy) 8988 { 8989 const struct mlx5_flow_driver_ops *fops; 8990 8991 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8992 fops->destroy_sub_policy_with_rxq(dev, mtr_policy); 8993 } 8994 8995 /** 8996 * Allocate the needed aso flow meter id. 8997 * 8998 * @param[in] dev 8999 * Pointer to Ethernet device. 9000 * 9001 * @return 9002 * Index to aso flow meter on success, NULL otherwise. 9003 */ 9004 uint32_t 9005 mlx5_flow_mtr_alloc(struct rte_eth_dev *dev) 9006 { 9007 const struct mlx5_flow_driver_ops *fops; 9008 9009 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 9010 return fops->create_meter(dev); 9011 } 9012 9013 /** 9014 * Free the aso flow meter id. 9015 * 9016 * @param[in] dev 9017 * Pointer to Ethernet device. 9018 * @param[in] mtr_idx 9019 * Index to aso flow meter to be free. 9020 * 9021 * @return 9022 * 0 on success. 9023 */ 9024 void 9025 mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx) 9026 { 9027 const struct mlx5_flow_driver_ops *fops; 9028 9029 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 9030 fops->free_meter(dev, mtr_idx); 9031 } 9032 9033 /** 9034 * Allocate a counter. 9035 * 9036 * @param[in] dev 9037 * Pointer to Ethernet device structure. 9038 * 9039 * @return 9040 * Index to allocated counter on success, 0 otherwise. 9041 */ 9042 uint32_t 9043 mlx5_counter_alloc(struct rte_eth_dev *dev) 9044 { 9045 struct rte_flow_attr attr = { .transfer = 0 }; 9046 9047 return flow_get_drv_ops(flow_get_drv_type(dev, &attr))->counter_alloc 9048 (dev); 9049 } 9050 9051 /** 9052 * Free a counter. 9053 * 9054 * @param[in] dev 9055 * Pointer to Ethernet device structure. 9056 * @param[in] cnt 9057 * Index to counter to be free. 9058 */ 9059 void 9060 mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt) 9061 { 9062 struct rte_flow_attr attr = { .transfer = 0 }; 9063 9064 flow_get_drv_ops(flow_get_drv_type(dev, &attr))->counter_free(dev, cnt); 9065 } 9066 9067 /** 9068 * Query counter statistics. 9069 * 9070 * @param[in] dev 9071 * Pointer to Ethernet device structure. 9072 * @param[in] cnt 9073 * Index to counter to query. 9074 * @param[in] clear 9075 * Set to clear counter statistics. 9076 * @param[out] pkts 9077 * The counter hits packets number to save. 9078 * @param[out] bytes 9079 * The counter hits bytes number to save. 9080 * 9081 * @return 9082 * 0 on success, a negative errno value otherwise. 9083 */ 9084 int 9085 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, 9086 bool clear, uint64_t *pkts, uint64_t *bytes, void **action) 9087 { 9088 struct rte_flow_attr attr = { .transfer = 0 }; 9089 9090 return flow_get_drv_ops(flow_get_drv_type(dev, &attr))->counter_query 9091 (dev, cnt, clear, pkts, bytes, action); 9092 } 9093 9094 /** 9095 * Get information about HWS pre-configurable resources. 9096 * 9097 * @param[in] dev 9098 * Pointer to the rte_eth_dev structure. 9099 * @param[out] port_info 9100 * Pointer to port information. 9101 * @param[out] queue_info 9102 * Pointer to queue information. 9103 * @param[out] error 9104 * Pointer to error structure. 9105 * 9106 * @return 9107 * 0 on success, a negative errno value otherwise and rte_errno is set. 9108 */ 9109 static int 9110 mlx5_flow_info_get(struct rte_eth_dev *dev, 9111 struct rte_flow_port_info *port_info, 9112 struct rte_flow_queue_info *queue_info, 9113 struct rte_flow_error *error) 9114 { 9115 const struct mlx5_flow_driver_ops *fops; 9116 struct rte_flow_attr attr = {0}; 9117 9118 if (flow_get_drv_type(dev, &attr) != MLX5_FLOW_TYPE_HW) 9119 return rte_flow_error_set(error, ENOTSUP, 9120 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9121 NULL, 9122 "info get with incorrect steering mode"); 9123 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9124 return fops->info_get(dev, port_info, queue_info, error); 9125 } 9126 9127 /** 9128 * Configure port HWS resources. 9129 * 9130 * @param[in] dev 9131 * Pointer to the rte_eth_dev structure. 9132 * @param[in] port_attr 9133 * Port configuration attributes. 9134 * @param[in] nb_queue 9135 * Number of queue. 9136 * @param[in] queue_attr 9137 * Array that holds attributes for each flow queue. 9138 * @param[out] error 9139 * Pointer to error structure. 9140 * 9141 * @return 9142 * 0 on success, a negative errno value otherwise and rte_errno is set. 9143 */ 9144 static int 9145 mlx5_flow_port_configure(struct rte_eth_dev *dev, 9146 const struct rte_flow_port_attr *port_attr, 9147 uint16_t nb_queue, 9148 const struct rte_flow_queue_attr *queue_attr[], 9149 struct rte_flow_error *error) 9150 { 9151 const struct mlx5_flow_driver_ops *fops; 9152 struct rte_flow_attr attr = {0}; 9153 9154 if (flow_get_drv_type(dev, &attr) != MLX5_FLOW_TYPE_HW) 9155 return rte_flow_error_set(error, ENOTSUP, 9156 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9157 NULL, 9158 "port configure with incorrect steering mode"); 9159 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9160 return fops->configure(dev, port_attr, nb_queue, queue_attr, error); 9161 } 9162 9163 /** 9164 * Validate item template. 9165 * 9166 * @param[in] dev 9167 * Pointer to the rte_eth_dev structure. 9168 * @param[in] attr 9169 * Pointer to the item template attributes. 9170 * @param[in] items 9171 * The template item pattern. 9172 * @param[out] error 9173 * Pointer to error structure. 9174 * 9175 * @return 9176 * 0 on success, a negative errno value otherwise and rte_errno is set. 9177 */ 9178 int 9179 mlx5_flow_pattern_validate(struct rte_eth_dev *dev, 9180 const struct rte_flow_pattern_template_attr *attr, 9181 const struct rte_flow_item items[], 9182 struct rte_flow_error *error) 9183 { 9184 const struct mlx5_flow_driver_ops *fops; 9185 struct rte_flow_attr fattr = {0}; 9186 uint64_t item_flags = 0; 9187 9188 if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) { 9189 rte_flow_error_set(error, ENOTSUP, 9190 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 9191 "pattern validate with incorrect steering mode"); 9192 return -ENOTSUP; 9193 } 9194 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9195 return fops->pattern_validate(dev, attr, items, &item_flags, error); 9196 } 9197 9198 /** 9199 * Create flow item template. 9200 * 9201 * @param[in] dev 9202 * Pointer to the rte_eth_dev structure. 9203 * @param[in] attr 9204 * Pointer to the item template attributes. 9205 * @param[in] items 9206 * The template item pattern. 9207 * @param[out] error 9208 * Pointer to error structure. 9209 * 9210 * @return 9211 * 0 on success, a negative errno value otherwise and rte_errno is set. 9212 */ 9213 static struct rte_flow_pattern_template * 9214 mlx5_flow_pattern_template_create(struct rte_eth_dev *dev, 9215 const struct rte_flow_pattern_template_attr *attr, 9216 const struct rte_flow_item items[], 9217 struct rte_flow_error *error) 9218 { 9219 const struct mlx5_flow_driver_ops *fops; 9220 struct rte_flow_attr fattr = {0}; 9221 9222 if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) { 9223 rte_flow_error_set(error, ENOTSUP, 9224 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9225 NULL, 9226 "pattern create with incorrect steering mode"); 9227 return NULL; 9228 } 9229 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9230 return fops->pattern_template_create(dev, attr, items, error); 9231 } 9232 9233 /** 9234 * Destroy flow item template. 9235 * 9236 * @param[in] dev 9237 * Pointer to the rte_eth_dev structure. 9238 * @param[in] template 9239 * Pointer to the item template to be destroyed. 9240 * @param[out] error 9241 * Pointer to error structure. 9242 * 9243 * @return 9244 * 0 on success, a negative errno value otherwise and rte_errno is set. 9245 */ 9246 static int 9247 mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev, 9248 struct rte_flow_pattern_template *template, 9249 struct rte_flow_error *error) 9250 { 9251 const struct mlx5_flow_driver_ops *fops; 9252 struct rte_flow_attr attr = {0}; 9253 9254 if (flow_get_drv_type(dev, &attr) != MLX5_FLOW_TYPE_HW) 9255 return rte_flow_error_set(error, ENOTSUP, 9256 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9257 NULL, 9258 "pattern destroy with incorrect steering mode"); 9259 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9260 return fops->pattern_template_destroy(dev, template, error); 9261 } 9262 9263 /** 9264 * Validate flow actions template. 9265 * 9266 * @param[in] dev 9267 * Pointer to the rte_eth_dev structure. 9268 * @param[in] attr 9269 * Pointer to the action template attributes. 9270 * @param[in] actions 9271 * Associated actions (list terminated by the END action). 9272 * @param[in] masks 9273 * List of actions that marks which of the action's member is constant. 9274 * @param[out] error 9275 * Pointer to error structure. 9276 * 9277 * @return 9278 * 0 on success, a negative errno value otherwise and rte_errno is set. 9279 */ 9280 int 9281 mlx5_flow_actions_validate(struct rte_eth_dev *dev, 9282 const struct rte_flow_actions_template_attr *attr, 9283 const struct rte_flow_action actions[], 9284 const struct rte_flow_action masks[], 9285 struct rte_flow_error *error) 9286 { 9287 const struct mlx5_flow_driver_ops *fops; 9288 struct rte_flow_attr fattr = {0}; 9289 9290 if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) { 9291 rte_flow_error_set(error, ENOTSUP, 9292 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 9293 "actions validate with incorrect steering mode"); 9294 return -ENOTSUP; 9295 } 9296 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9297 return fops->actions_validate(dev, attr, actions, masks, error); 9298 } 9299 9300 /** 9301 * Create flow item template. 9302 * 9303 * @param[in] dev 9304 * Pointer to the rte_eth_dev structure. 9305 * @param[in] attr 9306 * Pointer to the action template attributes. 9307 * @param[in] actions 9308 * Associated actions (list terminated by the END action). 9309 * @param[in] masks 9310 * List of actions that marks which of the action's member is constant. 9311 * @param[out] error 9312 * Pointer to error structure. 9313 * 9314 * @return 9315 * 0 on success, a negative errno value otherwise and rte_errno is set. 9316 */ 9317 static struct rte_flow_actions_template * 9318 mlx5_flow_actions_template_create(struct rte_eth_dev *dev, 9319 const struct rte_flow_actions_template_attr *attr, 9320 const struct rte_flow_action actions[], 9321 const struct rte_flow_action masks[], 9322 struct rte_flow_error *error) 9323 { 9324 const struct mlx5_flow_driver_ops *fops; 9325 struct rte_flow_attr fattr = {0}; 9326 9327 if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) { 9328 rte_flow_error_set(error, ENOTSUP, 9329 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9330 NULL, 9331 "action create with incorrect steering mode"); 9332 return NULL; 9333 } 9334 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9335 return fops->actions_template_create(dev, attr, actions, masks, error); 9336 } 9337 9338 /** 9339 * Destroy flow action template. 9340 * 9341 * @param[in] dev 9342 * Pointer to the rte_eth_dev structure. 9343 * @param[in] template 9344 * Pointer to the action template to be destroyed. 9345 * @param[out] error 9346 * Pointer to error structure. 9347 * 9348 * @return 9349 * 0 on success, a negative errno value otherwise and rte_errno is set. 9350 */ 9351 static int 9352 mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev, 9353 struct rte_flow_actions_template *template, 9354 struct rte_flow_error *error) 9355 { 9356 const struct mlx5_flow_driver_ops *fops; 9357 struct rte_flow_attr attr = {0}; 9358 9359 if (flow_get_drv_type(dev, &attr) != MLX5_FLOW_TYPE_HW) 9360 return rte_flow_error_set(error, ENOTSUP, 9361 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9362 NULL, 9363 "action destroy with incorrect steering mode"); 9364 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9365 return fops->actions_template_destroy(dev, template, error); 9366 } 9367 9368 /** 9369 * Create flow table. 9370 * 9371 * @param[in] dev 9372 * Pointer to the rte_eth_dev structure. 9373 * @param[in] attr 9374 * Pointer to the table attributes. 9375 * @param[in] item_templates 9376 * Item template array to be binded to the table. 9377 * @param[in] nb_item_templates 9378 * Number of item template. 9379 * @param[in] action_templates 9380 * Action template array to be binded to the table. 9381 * @param[in] nb_action_templates 9382 * Number of action template. 9383 * @param[out] error 9384 * Pointer to error structure. 9385 * 9386 * @return 9387 * Table on success, NULL otherwise and rte_errno is set. 9388 */ 9389 static struct rte_flow_template_table * 9390 mlx5_flow_table_create(struct rte_eth_dev *dev, 9391 const struct rte_flow_template_table_attr *attr, 9392 struct rte_flow_pattern_template *item_templates[], 9393 uint8_t nb_item_templates, 9394 struct rte_flow_actions_template *action_templates[], 9395 uint8_t nb_action_templates, 9396 struct rte_flow_error *error) 9397 { 9398 const struct mlx5_flow_driver_ops *fops; 9399 struct rte_flow_attr fattr = {0}; 9400 9401 if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) { 9402 rte_flow_error_set(error, ENOTSUP, 9403 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9404 NULL, 9405 "table create with incorrect steering mode"); 9406 return NULL; 9407 } 9408 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9409 return fops->template_table_create(dev, 9410 attr, 9411 item_templates, 9412 nb_item_templates, 9413 action_templates, 9414 nb_action_templates, 9415 error); 9416 } 9417 9418 /** 9419 * PMD destroy flow table. 9420 * 9421 * @param[in] dev 9422 * Pointer to the rte_eth_dev structure. 9423 * @param[in] table 9424 * Pointer to the table to be destroyed. 9425 * @param[out] error 9426 * Pointer to error structure. 9427 * 9428 * @return 9429 * 0 on success, a negative errno value otherwise and rte_errno is set. 9430 */ 9431 static int 9432 mlx5_flow_table_destroy(struct rte_eth_dev *dev, 9433 struct rte_flow_template_table *table, 9434 struct rte_flow_error *error) 9435 { 9436 const struct mlx5_flow_driver_ops *fops; 9437 struct rte_flow_attr attr = {0}; 9438 9439 if (flow_get_drv_type(dev, &attr) != MLX5_FLOW_TYPE_HW) 9440 return rte_flow_error_set(error, ENOTSUP, 9441 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9442 NULL, 9443 "table destroy with incorrect steering mode"); 9444 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9445 return fops->template_table_destroy(dev, table, error); 9446 } 9447 9448 /** 9449 * PMD group set miss actions. 9450 * 9451 * @param[in] dev 9452 * Pointer to the rte_eth_dev structure. 9453 * @param[in] attr 9454 * Pointer to group attributes 9455 * @param[in] actions 9456 * Array of actions 9457 * @param[out] error 9458 * Pointer to error structure. 9459 * 9460 * @return 9461 * 0 on success, a negative errno value otherwise and rte_errno is set. 9462 */ 9463 static int 9464 mlx5_flow_group_set_miss_actions(struct rte_eth_dev *dev, 9465 uint32_t group_id, 9466 const struct rte_flow_group_attr *attr, 9467 const struct rte_flow_action actions[], 9468 struct rte_flow_error *error) 9469 { 9470 const struct mlx5_flow_driver_ops *fops; 9471 struct rte_flow_attr fattr = {0}; 9472 9473 if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) 9474 return rte_flow_error_set(error, ENOTSUP, 9475 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9476 NULL, 9477 "group set miss actions with incorrect steering mode"); 9478 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 9479 return fops->group_set_miss_actions(dev, group_id, attr, actions, error); 9480 } 9481 9482 /** 9483 * Allocate a new memory for the counter values wrapped by all the needed 9484 * management. 9485 * 9486 * @param[in] sh 9487 * Pointer to mlx5_dev_ctx_shared object. 9488 * 9489 * @return 9490 * 0 on success, a negative errno value otherwise. 9491 */ 9492 static int 9493 mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) 9494 { 9495 struct mlx5_counter_stats_mem_mng *mem_mng; 9496 volatile struct flow_counter_stats *raw_data; 9497 int raws_n = MLX5_CNT_MR_ALLOC_BULK + MLX5_MAX_PENDING_QUERIES; 9498 int size = (sizeof(struct flow_counter_stats) * 9499 MLX5_COUNTERS_PER_POOL + 9500 sizeof(struct mlx5_counter_stats_raw)) * raws_n + 9501 sizeof(struct mlx5_counter_stats_mem_mng); 9502 size_t pgsize = rte_mem_page_size(); 9503 uint8_t *mem; 9504 int ret; 9505 int i; 9506 9507 if (pgsize == (size_t)-1) { 9508 DRV_LOG(ERR, "Failed to get mem page size"); 9509 rte_errno = ENOMEM; 9510 return -ENOMEM; 9511 } 9512 mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, SOCKET_ID_ANY); 9513 if (!mem) { 9514 rte_errno = ENOMEM; 9515 return -ENOMEM; 9516 } 9517 mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1; 9518 size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n; 9519 ret = mlx5_os_wrapped_mkey_create(sh->cdev->ctx, sh->cdev->pd, 9520 sh->cdev->pdn, mem, size, 9521 &mem_mng->wm); 9522 if (ret) { 9523 rte_errno = errno; 9524 mlx5_free(mem); 9525 return -rte_errno; 9526 } 9527 mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size); 9528 raw_data = (volatile struct flow_counter_stats *)mem; 9529 for (i = 0; i < raws_n; ++i) { 9530 mem_mng->raws[i].mem_mng = mem_mng; 9531 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL; 9532 } 9533 for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) 9534 LIST_INSERT_HEAD(&sh->sws_cmng.free_stat_raws, 9535 mem_mng->raws + MLX5_CNT_MR_ALLOC_BULK + i, 9536 next); 9537 LIST_INSERT_HEAD(&sh->sws_cmng.mem_mngs, mem_mng, next); 9538 sh->sws_cmng.mem_mng = mem_mng; 9539 return 0; 9540 } 9541 9542 /** 9543 * Set the statistic memory to the new counter pool. 9544 * 9545 * @param[in] sh 9546 * Pointer to mlx5_dev_ctx_shared object. 9547 * @param[in] pool 9548 * Pointer to the pool to set the statistic memory. 9549 * 9550 * @return 9551 * 0 on success, a negative errno value otherwise. 9552 */ 9553 static int 9554 mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh, 9555 struct mlx5_flow_counter_pool *pool) 9556 { 9557 struct mlx5_flow_counter_mng *cmng = &sh->sws_cmng; 9558 /* Resize statistic memory once used out. */ 9559 if (!(pool->index % MLX5_CNT_MR_ALLOC_BULK) && 9560 mlx5_flow_create_counter_stat_mem_mng(sh)) { 9561 DRV_LOG(ERR, "Cannot resize counter stat mem."); 9562 return -1; 9563 } 9564 rte_spinlock_lock(&pool->sl); 9565 pool->raw = cmng->mem_mng->raws + pool->index % MLX5_CNT_MR_ALLOC_BULK; 9566 rte_spinlock_unlock(&pool->sl); 9567 pool->raw_hw = NULL; 9568 return 0; 9569 } 9570 9571 #define MLX5_POOL_QUERY_FREQ_US 1000000 9572 9573 /** 9574 * Set the periodic procedure for triggering asynchronous batch queries for all 9575 * the counter pools. 9576 * 9577 * @param[in] sh 9578 * Pointer to mlx5_dev_ctx_shared object. 9579 */ 9580 void 9581 mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh) 9582 { 9583 uint32_t pools_n, us; 9584 9585 pools_n = rte_atomic_load_explicit(&sh->sws_cmng.n_valid, rte_memory_order_relaxed); 9586 us = MLX5_POOL_QUERY_FREQ_US / pools_n; 9587 DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us); 9588 if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { 9589 sh->sws_cmng.query_thread_on = 0; 9590 DRV_LOG(ERR, "Cannot reinitialize query alarm"); 9591 } else { 9592 sh->sws_cmng.query_thread_on = 1; 9593 } 9594 } 9595 9596 /** 9597 * The periodic procedure for triggering asynchronous batch queries for all the 9598 * counter pools. This function is probably called by the host thread. 9599 * 9600 * @param[in] arg 9601 * The parameter for the alarm process. 9602 */ 9603 void 9604 mlx5_flow_query_alarm(void *arg) 9605 { 9606 struct mlx5_dev_ctx_shared *sh = arg; 9607 struct mlx5_flow_counter_mng *cmng = &sh->sws_cmng; 9608 uint16_t pool_index = cmng->pool_index; 9609 struct mlx5_flow_counter_pool *pool; 9610 uint16_t n_valid; 9611 int ret; 9612 9613 if (cmng->pending_queries >= MLX5_MAX_PENDING_QUERIES) 9614 goto set_alarm; 9615 rte_spinlock_lock(&cmng->pool_update_sl); 9616 pool = cmng->pools[pool_index]; 9617 n_valid = cmng->n_valid; 9618 rte_spinlock_unlock(&cmng->pool_update_sl); 9619 /* Set the statistic memory to the new created pool. */ 9620 if ((!pool->raw && mlx5_flow_set_counter_stat_mem(sh, pool))) 9621 goto set_alarm; 9622 if (pool->raw_hw) 9623 /* There is a pool query in progress. */ 9624 goto set_alarm; 9625 pool->raw_hw = LIST_FIRST(&cmng->free_stat_raws); 9626 if (!pool->raw_hw) 9627 /* No free counter statistics raw memory. */ 9628 goto set_alarm; 9629 /* 9630 * Identify the counters released between query trigger and query 9631 * handle more efficiently. The counter released in this gap period 9632 * should wait for a new round of query as the new arrived packets 9633 * will not be taken into account. 9634 */ 9635 pool->query_gen++; 9636 ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0, 9637 MLX5_COUNTERS_PER_POOL, 9638 NULL, NULL, 9639 pool->raw_hw->mem_mng->wm.lkey, 9640 (void *)(uintptr_t) 9641 pool->raw_hw->data, 9642 sh->devx_comp, 9643 (uint64_t)(uintptr_t)pool); 9644 if (ret) { 9645 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID" 9646 " %d", pool->min_dcs->id); 9647 pool->raw_hw = NULL; 9648 goto set_alarm; 9649 } 9650 LIST_REMOVE(pool->raw_hw, next); 9651 cmng->pending_queries++; 9652 pool_index++; 9653 if (pool_index >= n_valid) 9654 pool_index = 0; 9655 set_alarm: 9656 cmng->pool_index = pool_index; 9657 mlx5_set_query_alarm(sh); 9658 } 9659 9660 /** 9661 * Check and callback event for new aged flow in the counter pool 9662 * 9663 * @param[in] sh 9664 * Pointer to mlx5_dev_ctx_shared object. 9665 * @param[in] pool 9666 * Pointer to Current counter pool. 9667 */ 9668 static void 9669 mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh, 9670 struct mlx5_flow_counter_pool *pool) 9671 { 9672 struct mlx5_priv *priv; 9673 struct mlx5_flow_counter *cnt; 9674 struct mlx5_age_info *age_info; 9675 struct mlx5_age_param *age_param; 9676 struct mlx5_counter_stats_raw *cur = pool->raw_hw; 9677 struct mlx5_counter_stats_raw *prev = pool->raw; 9678 const uint64_t curr_time = MLX5_CURR_TIME_SEC; 9679 const uint32_t time_delta = curr_time - pool->time_of_last_age_check; 9680 uint16_t expected = AGE_CANDIDATE; 9681 uint32_t i; 9682 9683 pool->time_of_last_age_check = curr_time; 9684 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { 9685 cnt = MLX5_POOL_GET_CNT(pool, i); 9686 age_param = MLX5_CNT_TO_AGE(cnt); 9687 if (rte_atomic_load_explicit(&age_param->state, 9688 rte_memory_order_relaxed) != AGE_CANDIDATE) 9689 continue; 9690 if (cur->data[i].hits != prev->data[i].hits) { 9691 rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0, 9692 rte_memory_order_relaxed); 9693 continue; 9694 } 9695 if (rte_atomic_fetch_add_explicit(&age_param->sec_since_last_hit, 9696 time_delta, 9697 rte_memory_order_relaxed) + time_delta <= age_param->timeout) 9698 continue; 9699 /** 9700 * Hold the lock first, or if between the 9701 * state AGE_TMOUT and tailq operation the 9702 * release happened, the release procedure 9703 * may delete a non-existent tailq node. 9704 */ 9705 priv = rte_eth_devices[age_param->port_id].data->dev_private; 9706 age_info = GET_PORT_AGE_INFO(priv); 9707 rte_spinlock_lock(&age_info->aged_sl); 9708 if (rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected, 9709 AGE_TMOUT, 9710 rte_memory_order_relaxed, 9711 rte_memory_order_relaxed)) { 9712 TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next); 9713 MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW); 9714 } 9715 rte_spinlock_unlock(&age_info->aged_sl); 9716 } 9717 mlx5_age_event_prepare(sh); 9718 } 9719 9720 /** 9721 * Handler for the HW respond about ready values from an asynchronous batch 9722 * query. This function is probably called by the host thread. 9723 * 9724 * @param[in] sh 9725 * The pointer to the shared device context. 9726 * @param[in] async_id 9727 * The Devx async ID. 9728 * @param[in] status 9729 * The status of the completion. 9730 */ 9731 void 9732 mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh, 9733 uint64_t async_id, int status) 9734 { 9735 struct mlx5_flow_counter_pool *pool = 9736 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; 9737 struct mlx5_counter_stats_raw *raw_to_free; 9738 uint8_t query_gen = pool->query_gen ^ 1; 9739 struct mlx5_flow_counter_mng *cmng = &sh->sws_cmng; 9740 enum mlx5_counter_type cnt_type = 9741 pool->is_aged ? MLX5_COUNTER_TYPE_AGE : 9742 MLX5_COUNTER_TYPE_ORIGIN; 9743 9744 if (unlikely(status)) { 9745 raw_to_free = pool->raw_hw; 9746 } else { 9747 raw_to_free = pool->raw; 9748 if (pool->is_aged) 9749 mlx5_flow_aging_check(sh, pool); 9750 rte_spinlock_lock(&pool->sl); 9751 pool->raw = pool->raw_hw; 9752 rte_spinlock_unlock(&pool->sl); 9753 /* Be sure the new raw counters data is updated in memory. */ 9754 rte_io_wmb(); 9755 if (!TAILQ_EMPTY(&pool->counters[query_gen])) { 9756 rte_spinlock_lock(&cmng->csl[cnt_type]); 9757 TAILQ_CONCAT(&cmng->counters[cnt_type], 9758 &pool->counters[query_gen], next); 9759 rte_spinlock_unlock(&cmng->csl[cnt_type]); 9760 } 9761 } 9762 LIST_INSERT_HEAD(&sh->sws_cmng.free_stat_raws, raw_to_free, next); 9763 pool->raw_hw = NULL; 9764 sh->sws_cmng.pending_queries--; 9765 } 9766 9767 static int 9768 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table, 9769 const struct flow_grp_info *grp_info, 9770 struct rte_flow_error *error) 9771 { 9772 if (grp_info->transfer && grp_info->external && 9773 grp_info->fdb_def_rule) { 9774 if (group == UINT32_MAX) 9775 return rte_flow_error_set 9776 (error, EINVAL, 9777 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 9778 NULL, 9779 "group index not supported"); 9780 *table = group + 1; 9781 } else { 9782 *table = group; 9783 } 9784 DRV_LOG(DEBUG, "port %u group=%#x table=%#x", port_id, group, *table); 9785 return 0; 9786 } 9787 9788 /** 9789 * Translate the rte_flow group index to HW table value. 9790 * 9791 * If tunnel offload is disabled, all group ids converted to flow table 9792 * id using the standard method. 9793 * If tunnel offload is enabled, group id can be converted using the 9794 * standard or tunnel conversion method. Group conversion method 9795 * selection depends on flags in `grp_info` parameter: 9796 * - Internal (grp_info.external == 0) groups conversion uses the 9797 * standard method. 9798 * - Group ids in JUMP action converted with the tunnel conversion. 9799 * - Group id in rule attribute conversion depends on a rule type and 9800 * group id value: 9801 * ** non zero group attributes converted with the tunnel method 9802 * ** zero group attribute in non-tunnel rule is converted using the 9803 * standard method - there's only one root table 9804 * ** zero group attribute in steer tunnel rule is converted with the 9805 * standard method - single root table 9806 * ** zero group attribute in match tunnel rule is a special OvS 9807 * case: that value is used for portability reasons. That group 9808 * id is converted with the tunnel conversion method. 9809 * 9810 * @param[in] dev 9811 * Port device 9812 * @param[in] tunnel 9813 * PMD tunnel offload object 9814 * @param[in] group 9815 * rte_flow group index value. 9816 * @param[out] table 9817 * HW table value. 9818 * @param[in] grp_info 9819 * flags used for conversion 9820 * @param[out] error 9821 * Pointer to error structure. 9822 * 9823 * @return 9824 * 0 on success, a negative errno value otherwise and rte_errno is set. 9825 */ 9826 int 9827 mlx5_flow_group_to_table(struct rte_eth_dev *dev, 9828 const struct mlx5_flow_tunnel *tunnel, 9829 uint32_t group, uint32_t *table, 9830 const struct flow_grp_info *grp_info, 9831 struct rte_flow_error *error) 9832 { 9833 int ret; 9834 bool standard_translation; 9835 9836 if (!grp_info->skip_scale && grp_info->external && 9837 group < MLX5_MAX_TABLES_EXTERNAL) 9838 group *= MLX5_FLOW_TABLE_FACTOR; 9839 if (is_tunnel_offload_active(dev)) { 9840 standard_translation = !grp_info->external || 9841 grp_info->std_tbl_fix; 9842 } else { 9843 standard_translation = true; 9844 } 9845 DRV_LOG(DEBUG, 9846 "port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s", 9847 dev->data->port_id, group, grp_info->transfer, 9848 grp_info->external, grp_info->fdb_def_rule, 9849 standard_translation ? "STANDARD" : "TUNNEL"); 9850 if (standard_translation) 9851 ret = flow_group_to_table(dev->data->port_id, group, table, 9852 grp_info, error); 9853 else 9854 ret = tunnel_flow_group_to_flow_table(dev, tunnel, group, 9855 table, error); 9856 9857 return ret; 9858 } 9859 9860 /** 9861 * Discover availability of metadata reg_c's. 9862 * 9863 * Iteratively use test flows to check availability. 9864 * 9865 * @param[in] dev 9866 * Pointer to the Ethernet device structure. 9867 * 9868 * @return 9869 * 0 on success, a negative errno value otherwise and rte_errno is set. 9870 */ 9871 int 9872 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) 9873 { 9874 struct mlx5_priv *priv = dev->data->dev_private; 9875 enum modify_reg idx; 9876 int n = 0; 9877 9878 /* reg_c[0] and reg_c[1] are reserved. */ 9879 priv->sh->flow_mreg_c[n++] = REG_C_0; 9880 priv->sh->flow_mreg_c[n++] = REG_C_1; 9881 /* Discover availability of other reg_c's. */ 9882 for (idx = REG_C_2; idx <= REG_C_7; ++idx) { 9883 struct rte_flow_attr attr = { 9884 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 9885 .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR, 9886 .ingress = 1, 9887 }; 9888 struct rte_flow_item items[] = { 9889 [0] = { 9890 .type = RTE_FLOW_ITEM_TYPE_END, 9891 }, 9892 }; 9893 struct rte_flow_action actions[] = { 9894 [0] = { 9895 .type = (enum rte_flow_action_type) 9896 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 9897 .conf = &(struct mlx5_flow_action_copy_mreg){ 9898 .src = REG_C_1, 9899 .dst = idx, 9900 }, 9901 }, 9902 [1] = { 9903 .type = RTE_FLOW_ACTION_TYPE_JUMP, 9904 .conf = &(struct rte_flow_action_jump){ 9905 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 9906 }, 9907 }, 9908 [2] = { 9909 .type = RTE_FLOW_ACTION_TYPE_END, 9910 }, 9911 }; 9912 uint32_t flow_idx; 9913 struct rte_flow *flow; 9914 struct rte_flow_error error; 9915 9916 if (!priv->sh->config.dv_flow_en) 9917 break; 9918 /* Create internal flow, validation skips copy action. */ 9919 flow_idx = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr, 9920 items, actions, false, &error); 9921 flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], 9922 flow_idx); 9923 if (!flow) 9924 continue; 9925 priv->sh->flow_mreg_c[n++] = idx; 9926 mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx); 9927 } 9928 for (; n < MLX5_MREG_C_NUM; ++n) 9929 priv->sh->flow_mreg_c[n] = REG_NON; 9930 priv->sh->metadata_regc_check_flag = 1; 9931 return 0; 9932 } 9933 9934 int 9935 save_dump_file(const uint8_t *data, uint32_t size, 9936 uint32_t type, uint64_t id, void *arg, FILE *file) 9937 { 9938 char line[BUF_SIZE]; 9939 uint32_t out = 0; 9940 uint32_t k; 9941 uint32_t actions_num; 9942 struct rte_flow_query_count *count; 9943 9944 memset(line, 0, BUF_SIZE); 9945 switch (type) { 9946 case DR_DUMP_REC_TYPE_PMD_MODIFY_HDR: 9947 actions_num = *(uint32_t *)(arg); 9948 out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",%d,", 9949 type, id, actions_num); 9950 break; 9951 case DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT: 9952 out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",", 9953 type, id); 9954 break; 9955 case DR_DUMP_REC_TYPE_PMD_COUNTER: 9956 count = (struct rte_flow_query_count *)arg; 9957 fprintf(file, 9958 "%d,0x%" PRIx64 ",%" PRIu64 ",%" PRIu64 "\n", 9959 type, id, count->hits, count->bytes); 9960 return 0; 9961 default: 9962 return -1; 9963 } 9964 9965 for (k = 0; k < size; k++) { 9966 /* Make sure we do not overrun the line buffer length. */ 9967 if (out >= BUF_SIZE - 4) { 9968 line[out] = '\0'; 9969 break; 9970 } 9971 out += snprintf(line + out, BUF_SIZE - out, "%02x", 9972 (data[k]) & 0xff); 9973 } 9974 fprintf(file, "%s\n", line); 9975 return 0; 9976 } 9977 9978 int 9979 mlx5_flow_query_counter(struct rte_eth_dev *dev, struct rte_flow *flow, 9980 struct rte_flow_query_count *count, struct rte_flow_error *error) 9981 { 9982 struct rte_flow_action action[2]; 9983 enum mlx5_flow_drv_type ftype; 9984 const struct mlx5_flow_driver_ops *fops; 9985 9986 if (!flow) { 9987 return rte_flow_error_set(error, ENOENT, 9988 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9989 NULL, 9990 "invalid flow handle"); 9991 } 9992 action[0].type = RTE_FLOW_ACTION_TYPE_COUNT; 9993 action[1].type = RTE_FLOW_ACTION_TYPE_END; 9994 if (flow->counter) { 9995 memset(count, 0, sizeof(struct rte_flow_query_count)); 9996 ftype = (enum mlx5_flow_drv_type)(flow->drv_type); 9997 MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && 9998 ftype < MLX5_FLOW_TYPE_MAX); 9999 fops = flow_get_drv_ops(ftype); 10000 return fops->query(dev, flow, action, count, error); 10001 } 10002 return -1; 10003 } 10004 10005 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 10006 /** 10007 * Dump flow ipool data to file 10008 * 10009 * @param[in] dev 10010 * The pointer to Ethernet device. 10011 * @param[in] file 10012 * A pointer to a file for output. 10013 * @param[out] error 10014 * Perform verbose error reporting if not NULL. PMDs initialize this 10015 * structure in case of error only. 10016 * @return 10017 * 0 on success, a negative value otherwise. 10018 */ 10019 int 10020 mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, 10021 struct rte_flow *flow, FILE *file, 10022 struct rte_flow_error *error) 10023 { 10024 struct mlx5_priv *priv = dev->data->dev_private; 10025 struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; 10026 struct mlx5_flow_dv_encap_decap_resource *encap_decap; 10027 uint32_t handle_idx; 10028 struct mlx5_flow_handle *dh; 10029 struct rte_flow_query_count count; 10030 uint32_t actions_num; 10031 const uint8_t *data; 10032 size_t size; 10033 uint64_t id; 10034 uint32_t type; 10035 void *action = NULL; 10036 10037 if (!flow) { 10038 return rte_flow_error_set(error, ENOENT, 10039 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 10040 NULL, 10041 "invalid flow handle"); 10042 } 10043 handle_idx = flow->dev_handles; 10044 /* query counter */ 10045 if (flow->counter && 10046 (!mlx5_counter_query(dev, flow->counter, false, 10047 &count.hits, &count.bytes, &action)) && action) { 10048 id = (uint64_t)(uintptr_t)action; 10049 type = DR_DUMP_REC_TYPE_PMD_COUNTER; 10050 save_dump_file(NULL, 0, type, 10051 id, (void *)&count, file); 10052 } 10053 10054 while (handle_idx) { 10055 dh = mlx5_ipool_get(priv->sh->ipool 10056 [MLX5_IPOOL_MLX5_FLOW], handle_idx); 10057 if (!dh) 10058 continue; 10059 handle_idx = dh->next.next; 10060 10061 /* Get modify_hdr and encap_decap buf from ipools. */ 10062 encap_decap = NULL; 10063 modify_hdr = dh->dvh.modify_hdr; 10064 10065 if (dh->dvh.rix_encap_decap) { 10066 encap_decap = mlx5_ipool_get(priv->sh->ipool 10067 [MLX5_IPOOL_DECAP_ENCAP], 10068 dh->dvh.rix_encap_decap); 10069 } 10070 if (modify_hdr) { 10071 data = (const uint8_t *)modify_hdr->actions; 10072 size = (size_t)(modify_hdr->actions_num) * 8; 10073 id = (uint64_t)(uintptr_t)modify_hdr->action; 10074 actions_num = modify_hdr->actions_num; 10075 type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; 10076 save_dump_file(data, size, type, id, 10077 (void *)(&actions_num), file); 10078 } 10079 if (encap_decap) { 10080 data = encap_decap->buf; 10081 size = encap_decap->size; 10082 id = (uint64_t)(uintptr_t)encap_decap->action; 10083 type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT; 10084 save_dump_file(data, size, type, 10085 id, NULL, file); 10086 } 10087 } 10088 return 0; 10089 } 10090 10091 /** 10092 * Dump all flow's encap_decap/modify_hdr/counter data to file 10093 * 10094 * @param[in] dev 10095 * The pointer to Ethernet device. 10096 * @param[in] file 10097 * A pointer to a file for output. 10098 * @param[out] error 10099 * Perform verbose error reporting if not NULL. PMDs initialize this 10100 * structure in case of error only. 10101 * @return 10102 * 0 on success, a negative value otherwise. 10103 */ 10104 static int 10105 mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, 10106 FILE *file, struct rte_flow_error *error __rte_unused) 10107 { 10108 struct mlx5_priv *priv = dev->data->dev_private; 10109 struct mlx5_dev_ctx_shared *sh = priv->sh; 10110 struct mlx5_hlist *h; 10111 struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; 10112 struct mlx5_flow_dv_encap_decap_resource *encap_decap; 10113 struct rte_flow_query_count count; 10114 uint32_t actions_num; 10115 const uint8_t *data; 10116 size_t size; 10117 uint64_t id; 10118 uint32_t type; 10119 uint32_t i; 10120 uint32_t j; 10121 struct mlx5_list_inconst *l_inconst; 10122 struct mlx5_list_entry *e; 10123 int lcore_index; 10124 struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng; 10125 uint32_t max; 10126 void *action; 10127 10128 /* encap_decap hlist is lcore_share, get global core cache. */ 10129 i = MLX5_LIST_GLOBAL; 10130 h = sh->encaps_decaps; 10131 if (h) { 10132 for (j = 0; j <= h->mask; j++) { 10133 l_inconst = &h->buckets[j].l; 10134 if (!l_inconst || !l_inconst->cache[i]) 10135 continue; 10136 10137 e = LIST_FIRST(&l_inconst->cache[i]->h); 10138 while (e) { 10139 encap_decap = 10140 (struct mlx5_flow_dv_encap_decap_resource *)e; 10141 data = encap_decap->buf; 10142 size = encap_decap->size; 10143 id = (uint64_t)(uintptr_t)encap_decap->action; 10144 type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT; 10145 save_dump_file(data, size, type, 10146 id, NULL, file); 10147 e = LIST_NEXT(e, next); 10148 } 10149 } 10150 } 10151 10152 /* get modify_hdr */ 10153 h = sh->modify_cmds; 10154 if (h) { 10155 lcore_index = rte_lcore_index(rte_lcore_id()); 10156 if (unlikely(lcore_index == -1)) { 10157 lcore_index = MLX5_LIST_NLCORE; 10158 rte_spinlock_lock(&h->l_const.lcore_lock); 10159 } 10160 i = lcore_index; 10161 10162 if (lcore_index == MLX5_LIST_NLCORE) { 10163 for (i = 0; i <= (uint32_t)lcore_index; i++) { 10164 for (j = 0; j <= h->mask; j++) { 10165 l_inconst = &h->buckets[j].l; 10166 if (!l_inconst || !l_inconst->cache[i]) 10167 continue; 10168 10169 e = LIST_FIRST(&l_inconst->cache[i]->h); 10170 while (e) { 10171 modify_hdr = 10172 (struct mlx5_flow_dv_modify_hdr_resource *)e; 10173 data = (const uint8_t *)modify_hdr->actions; 10174 size = (size_t)(modify_hdr->actions_num) * 8; 10175 actions_num = modify_hdr->actions_num; 10176 id = (uint64_t)(uintptr_t)modify_hdr->action; 10177 type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; 10178 save_dump_file(data, size, type, id, 10179 (void *)(&actions_num), file); 10180 e = LIST_NEXT(e, next); 10181 } 10182 } 10183 } 10184 } else { 10185 for (j = 0; j <= h->mask; j++) { 10186 l_inconst = &h->buckets[j].l; 10187 if (!l_inconst || !l_inconst->cache[i]) 10188 continue; 10189 10190 e = LIST_FIRST(&l_inconst->cache[i]->h); 10191 while (e) { 10192 modify_hdr = 10193 (struct mlx5_flow_dv_modify_hdr_resource *)e; 10194 data = (const uint8_t *)modify_hdr->actions; 10195 size = (size_t)(modify_hdr->actions_num) * 8; 10196 actions_num = modify_hdr->actions_num; 10197 id = (uint64_t)(uintptr_t)modify_hdr->action; 10198 type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; 10199 save_dump_file(data, size, type, id, 10200 (void *)(&actions_num), file); 10201 e = LIST_NEXT(e, next); 10202 } 10203 } 10204 } 10205 10206 if (unlikely(lcore_index == MLX5_LIST_NLCORE)) 10207 rte_spinlock_unlock(&h->l_const.lcore_lock); 10208 } 10209 10210 /* get counter */ 10211 MLX5_ASSERT(cmng->n_valid <= MLX5_COUNTER_POOLS_MAX_NUM); 10212 max = MLX5_COUNTERS_PER_POOL * cmng->n_valid; 10213 for (j = 1; j <= max; j++) { 10214 action = NULL; 10215 if ((!mlx5_counter_query(dev, j, false, &count.hits, 10216 &count.bytes, &action)) && action) { 10217 id = (uint64_t)(uintptr_t)action; 10218 type = DR_DUMP_REC_TYPE_PMD_COUNTER; 10219 save_dump_file(NULL, 0, type, 10220 id, (void *)&count, file); 10221 } 10222 } 10223 return 0; 10224 } 10225 #endif 10226 10227 /** 10228 * Dump flow raw hw data to file 10229 * 10230 * @param[in] dev 10231 * The pointer to Ethernet device. 10232 * @param[in] file 10233 * A pointer to a file for output. 10234 * @param[out] error 10235 * Perform verbose error reporting if not NULL. PMDs initialize this 10236 * structure in case of error only. 10237 * @return 10238 * 0 on success, a negative value otherwise. 10239 */ 10240 int 10241 mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, 10242 FILE *file, 10243 struct rte_flow_error *error __rte_unused) 10244 { 10245 struct mlx5_priv *priv = dev->data->dev_private; 10246 struct mlx5_dev_ctx_shared *sh = priv->sh; 10247 uint32_t handle_idx; 10248 int ret; 10249 struct mlx5_flow_handle *dh; 10250 struct rte_flow *flow; 10251 10252 if (!sh->config.dv_flow_en) { 10253 if (fputs("device dv flow disabled\n", file) <= 0) 10254 return -errno; 10255 return -ENOTSUP; 10256 } 10257 10258 /* dump all */ 10259 if (!flow_idx) { 10260 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 10261 if (mlx5_flow_dev_dump_sh_all(dev, file, error)) 10262 return -EINVAL; 10263 10264 if (sh->config.dv_flow_en == 2) 10265 return mlx5dr_debug_dump(priv->dr_ctx, file); 10266 #endif 10267 return mlx5_devx_cmd_flow_dump(sh->fdb_domain, 10268 sh->rx_domain, 10269 sh->tx_domain, file); 10270 } 10271 /* dump one */ 10272 flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], 10273 (uintptr_t)(void *)flow_idx); 10274 if (!flow) 10275 return -EINVAL; 10276 10277 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 10278 mlx5_flow_dev_dump_ipool(dev, flow, file, error); 10279 #endif 10280 handle_idx = flow->dev_handles; 10281 while (handle_idx) { 10282 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 10283 handle_idx); 10284 if (!dh) 10285 return -ENOENT; 10286 if (dh->drv_flow) { 10287 if (sh->config.dv_flow_en == 2) 10288 return -ENOTSUP; 10289 10290 ret = mlx5_devx_cmd_flow_single_dump(dh->drv_flow, 10291 file); 10292 if (ret) 10293 return -ENOENT; 10294 } 10295 handle_idx = dh->next.next; 10296 } 10297 return 0; 10298 } 10299 10300 /** 10301 * Get aged-out flows. 10302 * 10303 * @param[in] dev 10304 * Pointer to the Ethernet device structure. 10305 * @param[in] context 10306 * The address of an array of pointers to the aged-out flows contexts. 10307 * @param[in] nb_countexts 10308 * The length of context array pointers. 10309 * @param[out] error 10310 * Perform verbose error reporting if not NULL. Initialized in case of 10311 * error only. 10312 * 10313 * @return 10314 * how many contexts get in success, otherwise negative errno value. 10315 * if nb_contexts is 0, return the amount of all aged contexts. 10316 * if nb_contexts is not 0 , return the amount of aged flows reported 10317 * in the context array. 10318 */ 10319 int 10320 mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts, 10321 uint32_t nb_contexts, struct rte_flow_error *error) 10322 { 10323 struct rte_flow_attr attr = { .transfer = 0 }; 10324 10325 return flow_get_drv_ops(flow_get_drv_type(dev, &attr))->get_aged_flows 10326 (dev, contexts, nb_contexts, error); 10327 } 10328 10329 /** 10330 * Get aged-out flows per HWS queue. 10331 * 10332 * @param[in] dev 10333 * Pointer to the Ethernet device structure. 10334 * @param[in] queue_id 10335 * Flow queue to query. 10336 * @param[in] context 10337 * The address of an array of pointers to the aged-out flows contexts. 10338 * @param[in] nb_countexts 10339 * The length of context array pointers. 10340 * @param[out] error 10341 * Perform verbose error reporting if not NULL. Initialized in case of 10342 * error only. 10343 * 10344 * @return 10345 * how many contexts get in success, otherwise negative errno value. 10346 * if nb_contexts is 0, return the amount of all aged contexts. 10347 * if nb_contexts is not 0 , return the amount of aged flows reported 10348 * in the context array. 10349 */ 10350 int 10351 mlx5_flow_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id, 10352 void **contexts, uint32_t nb_contexts, 10353 struct rte_flow_error *error) 10354 { 10355 const struct mlx5_flow_driver_ops *fops; 10356 struct rte_flow_attr attr = { 0 }; 10357 10358 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_HW) { 10359 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 10360 return fops->get_q_aged_flows(dev, queue_id, contexts, 10361 nb_contexts, error); 10362 } 10363 DRV_LOG(ERR, "port %u queue %u get aged flows is not supported.", 10364 dev->data->port_id, queue_id); 10365 return rte_flow_error_set(error, ENOTSUP, 10366 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 10367 "get Q aged flows with incorrect steering mode"); 10368 } 10369 10370 /* Wrapper for driver action_validate op callback */ 10371 static int 10372 flow_drv_action_validate(struct rte_eth_dev *dev, 10373 const struct rte_flow_indir_action_conf *conf, 10374 const struct rte_flow_action *action, 10375 const struct mlx5_flow_driver_ops *fops, 10376 struct rte_flow_error *error) 10377 { 10378 static const char err_msg[] = "indirect action validation unsupported"; 10379 10380 if (!fops->action_validate) { 10381 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 10382 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 10383 NULL, err_msg); 10384 return -rte_errno; 10385 } 10386 return fops->action_validate(dev, conf, action, error); 10387 } 10388 10389 /** 10390 * Destroys the shared action by handle. 10391 * 10392 * @param dev 10393 * Pointer to Ethernet device structure. 10394 * @param[in] handle 10395 * Handle for the indirect action object to be destroyed. 10396 * @param[out] error 10397 * Perform verbose error reporting if not NULL. PMDs initialize this 10398 * structure in case of error only. 10399 * 10400 * @return 10401 * 0 on success, a negative errno value otherwise and rte_errno is set. 10402 * 10403 * @note: wrapper for driver action_create op callback. 10404 */ 10405 static int 10406 mlx5_action_handle_destroy(struct rte_eth_dev *dev, 10407 struct rte_flow_action_handle *handle, 10408 struct rte_flow_error *error) 10409 { 10410 static const char err_msg[] = "indirect action destruction unsupported"; 10411 struct rte_flow_attr attr = { .transfer = 0 }; 10412 const struct mlx5_flow_driver_ops *fops = 10413 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 10414 10415 if (!fops->action_destroy) { 10416 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 10417 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 10418 NULL, err_msg); 10419 return -rte_errno; 10420 } 10421 return fops->action_destroy(dev, handle, error); 10422 } 10423 10424 /* Wrapper for driver action_destroy op callback */ 10425 static int 10426 flow_drv_action_update(struct rte_eth_dev *dev, 10427 struct rte_flow_action_handle *handle, 10428 const void *update, 10429 const struct mlx5_flow_driver_ops *fops, 10430 struct rte_flow_error *error) 10431 { 10432 static const char err_msg[] = "indirect action update unsupported"; 10433 10434 if (!fops->action_update) { 10435 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 10436 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 10437 NULL, err_msg); 10438 return -rte_errno; 10439 } 10440 return fops->action_update(dev, handle, update, error); 10441 } 10442 10443 /* Wrapper for driver action_destroy op callback */ 10444 static int 10445 flow_drv_action_query(struct rte_eth_dev *dev, 10446 const struct rte_flow_action_handle *handle, 10447 void *data, 10448 const struct mlx5_flow_driver_ops *fops, 10449 struct rte_flow_error *error) 10450 { 10451 static const char err_msg[] = "indirect action query unsupported"; 10452 10453 if (!fops->action_query) { 10454 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 10455 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 10456 NULL, err_msg); 10457 return -rte_errno; 10458 } 10459 return fops->action_query(dev, handle, data, error); 10460 } 10461 10462 /** 10463 * Create indirect action for reuse in multiple flow rules. 10464 * 10465 * @param dev 10466 * Pointer to Ethernet device structure. 10467 * @param conf 10468 * Pointer to indirect action object configuration. 10469 * @param[in] action 10470 * Action configuration for indirect action object creation. 10471 * @param[out] error 10472 * Perform verbose error reporting if not NULL. PMDs initialize this 10473 * structure in case of error only. 10474 * @return 10475 * A valid handle in case of success, NULL otherwise and rte_errno is set. 10476 */ 10477 static struct rte_flow_action_handle * 10478 mlx5_action_handle_create(struct rte_eth_dev *dev, 10479 const struct rte_flow_indir_action_conf *conf, 10480 const struct rte_flow_action *action, 10481 struct rte_flow_error *error) 10482 { 10483 static const char err_msg[] = "indirect action creation unsupported"; 10484 struct rte_flow_attr attr = { .transfer = 0 }; 10485 const struct mlx5_flow_driver_ops *fops = 10486 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 10487 10488 if (flow_drv_action_validate(dev, conf, action, fops, error)) 10489 return NULL; 10490 if (!fops->action_create) { 10491 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 10492 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 10493 NULL, err_msg); 10494 return NULL; 10495 } 10496 return fops->action_create(dev, conf, action, error); 10497 } 10498 10499 /** 10500 * Updates inplace the indirect action configuration pointed by *handle* 10501 * with the configuration provided as *update* argument. 10502 * The update of the indirect action configuration effects all flow rules 10503 * reusing the action via handle. 10504 * 10505 * @param dev 10506 * Pointer to Ethernet device structure. 10507 * @param[in] handle 10508 * Handle for the indirect action to be updated. 10509 * @param[in] update 10510 * Action specification used to modify the action pointed by handle. 10511 * *update* could be of same type with the action pointed by the *handle* 10512 * handle argument, or some other structures like a wrapper, depending on 10513 * the indirect action type. 10514 * @param[out] error 10515 * Perform verbose error reporting if not NULL. PMDs initialize this 10516 * structure in case of error only. 10517 * 10518 * @return 10519 * 0 on success, a negative errno value otherwise and rte_errno is set. 10520 */ 10521 static int 10522 mlx5_action_handle_update(struct rte_eth_dev *dev, 10523 struct rte_flow_action_handle *handle, 10524 const void *update, 10525 struct rte_flow_error *error) 10526 { 10527 struct rte_flow_attr attr = { .transfer = 0 }; 10528 const struct mlx5_flow_driver_ops *fops = 10529 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 10530 int ret; 10531 uint32_t act_idx = (uint32_t)(uintptr_t)handle; 10532 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; 10533 10534 switch (type) { 10535 case MLX5_INDIRECT_ACTION_TYPE_CT: 10536 case MLX5_INDIRECT_ACTION_TYPE_METER_MARK: 10537 ret = 0; 10538 break; 10539 default: 10540 ret = flow_drv_action_validate(dev, NULL, 10541 (const struct rte_flow_action *)update, 10542 fops, error); 10543 } 10544 if (ret) 10545 return ret; 10546 return flow_drv_action_update(dev, handle, update, fops, 10547 error); 10548 } 10549 10550 /** 10551 * Query the indirect action by handle. 10552 * 10553 * This function allows retrieving action-specific data such as counters. 10554 * Data is gathered by special action which may be present/referenced in 10555 * more than one flow rule definition. 10556 * 10557 * see @RTE_FLOW_ACTION_TYPE_COUNT 10558 * 10559 * @param dev 10560 * Pointer to Ethernet device structure. 10561 * @param[in] handle 10562 * Handle for the indirect action to query. 10563 * @param[in, out] data 10564 * Pointer to storage for the associated query data type. 10565 * @param[out] error 10566 * Perform verbose error reporting if not NULL. PMDs initialize this 10567 * structure in case of error only. 10568 * 10569 * @return 10570 * 0 on success, a negative errno value otherwise and rte_errno is set. 10571 */ 10572 static int 10573 mlx5_action_handle_query(struct rte_eth_dev *dev, 10574 const struct rte_flow_action_handle *handle, 10575 void *data, 10576 struct rte_flow_error *error) 10577 { 10578 struct rte_flow_attr attr = { .transfer = 0 }; 10579 const struct mlx5_flow_driver_ops *fops = 10580 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 10581 10582 return flow_drv_action_query(dev, handle, data, fops, error); 10583 } 10584 10585 static int 10586 mlx5_action_handle_query_update(struct rte_eth_dev *dev, 10587 struct rte_flow_action_handle *handle, 10588 const void *update, void *query, 10589 enum rte_flow_query_update_mode qu_mode, 10590 struct rte_flow_error *error) 10591 { 10592 struct rte_flow_attr attr = { .transfer = 0 }; 10593 enum mlx5_flow_drv_type drv_type = flow_get_drv_type(dev, &attr); 10594 const struct mlx5_flow_driver_ops *fops; 10595 10596 if (drv_type == MLX5_FLOW_TYPE_MIN || drv_type == MLX5_FLOW_TYPE_MAX) 10597 return rte_flow_error_set(error, ENOTSUP, 10598 RTE_FLOW_ERROR_TYPE_ACTION, 10599 NULL, "invalid driver type"); 10600 fops = flow_get_drv_ops(drv_type); 10601 if (!fops || !fops->action_query_update) 10602 return rte_flow_error_set(error, ENOTSUP, 10603 RTE_FLOW_ERROR_TYPE_ACTION, 10604 NULL, "no query_update handler"); 10605 return fops->action_query_update(dev, handle, update, 10606 query, qu_mode, error); 10607 } 10608 10609 10610 #define MLX5_DRV_FOPS_OR_ERR(dev, fops, drv_cb, ret) \ 10611 { \ 10612 struct rte_flow_attr attr = { .transfer = 0 }; \ 10613 enum mlx5_flow_drv_type drv_type = flow_get_drv_type((dev), &attr); \ 10614 if (drv_type == MLX5_FLOW_TYPE_MIN || \ 10615 drv_type == MLX5_FLOW_TYPE_MAX) { \ 10616 rte_flow_error_set(error, ENOTSUP, \ 10617 RTE_FLOW_ERROR_TYPE_ACTION, \ 10618 NULL, "invalid driver type"); \ 10619 return ret; \ 10620 } \ 10621 (fops) = flow_get_drv_ops(drv_type); \ 10622 if (!(fops) || !(fops)->drv_cb) { \ 10623 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, \ 10624 NULL, "no action_list handler"); \ 10625 return ret; \ 10626 } \ 10627 } 10628 10629 static struct rte_flow_action_list_handle * 10630 mlx5_action_list_handle_create(struct rte_eth_dev *dev, 10631 const struct rte_flow_indir_action_conf *conf, 10632 const struct rte_flow_action *actions, 10633 struct rte_flow_error *error) 10634 { 10635 const struct mlx5_flow_driver_ops *fops; 10636 10637 MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_create, NULL); 10638 return fops->action_list_handle_create(dev, conf, actions, error); 10639 } 10640 10641 static int 10642 mlx5_action_list_handle_destroy(struct rte_eth_dev *dev, 10643 struct rte_flow_action_list_handle *handle, 10644 struct rte_flow_error *error) 10645 { 10646 const struct mlx5_flow_driver_ops *fops; 10647 10648 MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_destroy, ENOTSUP); 10649 return fops->action_list_handle_destroy(dev, handle, error); 10650 } 10651 10652 static int 10653 mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev, 10654 const 10655 struct rte_flow_action_list_handle *handle, 10656 const void **update, void **query, 10657 enum rte_flow_query_update_mode mode, 10658 struct rte_flow_error *error) 10659 { 10660 const struct mlx5_flow_driver_ops *fops; 10661 10662 MLX5_DRV_FOPS_OR_ERR(dev, fops, 10663 action_list_handle_query_update, ENOTSUP); 10664 return fops->action_list_handle_query_update(dev, handle, update, query, 10665 mode, error); 10666 } 10667 static int 10668 mlx5_flow_calc_table_hash(struct rte_eth_dev *dev, 10669 const struct rte_flow_template_table *table, 10670 const struct rte_flow_item pattern[], 10671 uint8_t pattern_template_index, 10672 uint32_t *hash, struct rte_flow_error *error) 10673 { 10674 struct rte_flow_attr attr = { .transfer = 0 }; 10675 enum mlx5_flow_drv_type drv_type = flow_get_drv_type(dev, &attr); 10676 const struct mlx5_flow_driver_ops *fops; 10677 10678 if (drv_type == MLX5_FLOW_TYPE_MIN || drv_type == MLX5_FLOW_TYPE_MAX) 10679 return rte_flow_error_set(error, ENOTSUP, 10680 RTE_FLOW_ERROR_TYPE_ACTION, 10681 NULL, "invalid driver type"); 10682 fops = flow_get_drv_ops(drv_type); 10683 if (!fops || !fops->action_query_update) 10684 return rte_flow_error_set(error, ENOTSUP, 10685 RTE_FLOW_ERROR_TYPE_ACTION, 10686 NULL, "no query_update handler"); 10687 return fops->flow_calc_table_hash(dev, table, pattern, pattern_template_index, 10688 hash, error); 10689 } 10690 10691 static int 10692 mlx5_flow_calc_encap_hash(struct rte_eth_dev *dev, 10693 const struct rte_flow_item pattern[], 10694 enum rte_flow_encap_hash_field dest_field, 10695 uint8_t *hash, 10696 struct rte_flow_error *error) 10697 { 10698 enum mlx5_flow_drv_type drv_type = flow_get_drv_type(dev, NULL); 10699 const struct mlx5_flow_driver_ops *fops; 10700 10701 if (drv_type == MLX5_FLOW_TYPE_MIN || drv_type == MLX5_FLOW_TYPE_MAX) 10702 return rte_flow_error_set(error, ENOTSUP, 10703 RTE_FLOW_ERROR_TYPE_ACTION, 10704 NULL, "invalid driver type"); 10705 fops = flow_get_drv_ops(drv_type); 10706 if (!fops || !fops->flow_calc_encap_hash) 10707 return rte_flow_error_set(error, ENOTSUP, 10708 RTE_FLOW_ERROR_TYPE_ACTION, 10709 NULL, "no calc encap hash handler"); 10710 return fops->flow_calc_encap_hash(dev, pattern, dest_field, hash, error); 10711 } 10712 10713 static int 10714 mlx5_template_table_resize(struct rte_eth_dev *dev, 10715 struct rte_flow_template_table *table, 10716 uint32_t nb_rules, struct rte_flow_error *error) 10717 { 10718 const struct mlx5_flow_driver_ops *fops; 10719 10720 MLX5_DRV_FOPS_OR_ERR(dev, fops, table_resize, ENOTSUP); 10721 return fops->table_resize(dev, table, nb_rules, error); 10722 } 10723 10724 static int 10725 mlx5_table_resize_complete(struct rte_eth_dev *dev, 10726 struct rte_flow_template_table *table, 10727 struct rte_flow_error *error) 10728 { 10729 const struct mlx5_flow_driver_ops *fops; 10730 10731 MLX5_DRV_FOPS_OR_ERR(dev, fops, table_resize_complete, ENOTSUP); 10732 return fops->table_resize_complete(dev, table, error); 10733 } 10734 10735 static int 10736 mlx5_flow_async_update_resized(struct rte_eth_dev *dev, uint32_t queue, 10737 const struct rte_flow_op_attr *op_attr, 10738 struct rte_flow *rule, void *user_data, 10739 struct rte_flow_error *error) 10740 { 10741 const struct mlx5_flow_driver_ops *fops; 10742 10743 MLX5_DRV_FOPS_OR_ERR(dev, fops, flow_update_resized, ENOTSUP); 10744 return fops->flow_update_resized(dev, queue, op_attr, rule, user_data, error); 10745 } 10746 10747 /** 10748 * Destroy all indirect actions (shared RSS). 10749 * 10750 * @param dev 10751 * Pointer to Ethernet device. 10752 * 10753 * @return 10754 * 0 on success, a negative errno value otherwise and rte_errno is set. 10755 */ 10756 int 10757 mlx5_action_handle_flush(struct rte_eth_dev *dev) 10758 { 10759 struct rte_flow_error error; 10760 struct mlx5_priv *priv = dev->data->dev_private; 10761 struct mlx5_shared_action_rss *shared_rss; 10762 int ret = 0; 10763 uint32_t idx; 10764 10765 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 10766 priv->rss_shared_actions, idx, shared_rss, next) { 10767 ret |= mlx5_action_handle_destroy(dev, 10768 (struct rte_flow_action_handle *)(uintptr_t)idx, &error); 10769 } 10770 return ret; 10771 } 10772 10773 /** 10774 * Validate existing indirect actions against current device configuration 10775 * and attach them to device resources. 10776 * 10777 * @param dev 10778 * Pointer to Ethernet device. 10779 * 10780 * @return 10781 * 0 on success, a negative errno value otherwise and rte_errno is set. 10782 */ 10783 int 10784 mlx5_action_handle_attach(struct rte_eth_dev *dev) 10785 { 10786 struct mlx5_priv *priv = dev->data->dev_private; 10787 int ret = 0; 10788 struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last; 10789 10790 LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { 10791 const char *message; 10792 uint32_t queue_idx; 10793 10794 ret = mlx5_validate_rss_queues(dev, ind_tbl->queues, 10795 ind_tbl->queues_n, 10796 &message, &queue_idx); 10797 if (ret != 0) { 10798 DRV_LOG(ERR, "Port %u cannot use queue %u in RSS: %s", 10799 dev->data->port_id, ind_tbl->queues[queue_idx], 10800 message); 10801 break; 10802 } 10803 } 10804 if (ret != 0) 10805 return ret; 10806 LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { 10807 ret = mlx5_ind_table_obj_attach(dev, ind_tbl); 10808 if (ret != 0) { 10809 DRV_LOG(ERR, "Port %u could not attach " 10810 "indirection table obj %p", 10811 dev->data->port_id, (void *)ind_tbl); 10812 goto error; 10813 } 10814 } 10815 10816 return 0; 10817 error: 10818 ind_tbl_last = ind_tbl; 10819 LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { 10820 if (ind_tbl == ind_tbl_last) 10821 break; 10822 if (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0) 10823 DRV_LOG(CRIT, "Port %u could not detach " 10824 "indirection table obj %p on rollback", 10825 dev->data->port_id, (void *)ind_tbl); 10826 } 10827 return ret; 10828 } 10829 10830 /** 10831 * Detach indirect actions of the device from its resources. 10832 * 10833 * @param dev 10834 * Pointer to Ethernet device. 10835 * 10836 * @return 10837 * 0 on success, a negative errno value otherwise and rte_errno is set. 10838 */ 10839 int 10840 mlx5_action_handle_detach(struct rte_eth_dev *dev) 10841 { 10842 struct mlx5_priv *priv = dev->data->dev_private; 10843 int ret = 0; 10844 struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last; 10845 10846 LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { 10847 ret = mlx5_ind_table_obj_detach(dev, ind_tbl); 10848 if (ret != 0) { 10849 DRV_LOG(ERR, "Port %u could not detach " 10850 "indirection table obj %p", 10851 dev->data->port_id, (void *)ind_tbl); 10852 goto error; 10853 } 10854 } 10855 return 0; 10856 error: 10857 ind_tbl_last = ind_tbl; 10858 LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { 10859 if (ind_tbl == ind_tbl_last) 10860 break; 10861 if (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0) 10862 DRV_LOG(CRIT, "Port %u could not attach " 10863 "indirection table obj %p on rollback", 10864 dev->data->port_id, (void *)ind_tbl); 10865 } 10866 return ret; 10867 } 10868 10869 #ifndef HAVE_MLX5DV_DR 10870 #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1)) 10871 #else 10872 #define MLX5_DOMAIN_SYNC_FLOW \ 10873 (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW) 10874 #endif 10875 10876 int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains) 10877 { 10878 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 10879 const struct mlx5_flow_driver_ops *fops; 10880 int ret; 10881 struct rte_flow_attr attr = { .transfer = 0 }; 10882 10883 fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 10884 ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW); 10885 if (ret > 0) 10886 ret = -ret; 10887 return ret; 10888 } 10889 10890 const struct mlx5_flow_tunnel * 10891 mlx5_get_tof(const struct rte_flow_item *item, 10892 const struct rte_flow_action *action, 10893 enum mlx5_tof_rule_type *rule_type) 10894 { 10895 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 10896 if (item->type == (typeof(item->type)) 10897 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) { 10898 *rule_type = MLX5_TUNNEL_OFFLOAD_MATCH_RULE; 10899 return flow_items_to_tunnel(item); 10900 } 10901 } 10902 for (; action->conf != RTE_FLOW_ACTION_TYPE_END; action++) { 10903 if (action->type == (typeof(action->type)) 10904 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) { 10905 *rule_type = MLX5_TUNNEL_OFFLOAD_SET_RULE; 10906 return flow_actions_to_tunnel(action); 10907 } 10908 } 10909 return NULL; 10910 } 10911 10912 /** 10913 * tunnel offload functionality is defined for DV environment only 10914 */ 10915 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 10916 __extension__ 10917 union tunnel_offload_mark { 10918 uint32_t val; 10919 struct { 10920 uint32_t app_reserve:8; 10921 uint32_t table_id:15; 10922 uint32_t transfer:1; 10923 uint32_t _unused_:8; 10924 }; 10925 }; 10926 10927 static bool 10928 mlx5_access_tunnel_offload_db 10929 (struct rte_eth_dev *dev, 10930 bool (*match)(struct rte_eth_dev *, 10931 struct mlx5_flow_tunnel *, const void *), 10932 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), 10933 void (*miss)(struct rte_eth_dev *, void *), 10934 void *ctx, bool lock_op); 10935 10936 static int 10937 flow_tunnel_add_default_miss(struct rte_eth_dev *dev, 10938 struct rte_flow *flow, 10939 const struct rte_flow_attr *attr, 10940 const struct rte_flow_action *app_actions, 10941 uint32_t flow_idx, 10942 const struct mlx5_flow_tunnel *tunnel, 10943 struct tunnel_default_miss_ctx *ctx, 10944 struct rte_flow_error *error) 10945 { 10946 struct mlx5_priv *priv = dev->data->dev_private; 10947 struct mlx5_flow *dev_flow; 10948 struct rte_flow_attr miss_attr = *attr; 10949 const struct rte_flow_item miss_items[2] = { 10950 { 10951 .type = RTE_FLOW_ITEM_TYPE_ETH, 10952 .spec = NULL, 10953 .last = NULL, 10954 .mask = NULL 10955 }, 10956 { 10957 .type = RTE_FLOW_ITEM_TYPE_END, 10958 .spec = NULL, 10959 .last = NULL, 10960 .mask = NULL 10961 } 10962 }; 10963 union tunnel_offload_mark mark_id; 10964 struct rte_flow_action_mark miss_mark; 10965 struct rte_flow_action miss_actions[3] = { 10966 [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark }, 10967 [2] = { .type = RTE_FLOW_ACTION_TYPE_END, .conf = NULL } 10968 }; 10969 const struct rte_flow_action_jump *jump_data; 10970 uint32_t i, flow_table = 0; /* prevent compilation warning */ 10971 struct flow_grp_info grp_info = { 10972 .external = 1, 10973 .transfer = attr->transfer, 10974 .fdb_def_rule = !!priv->fdb_def_rule, 10975 .std_tbl_fix = 0, 10976 }; 10977 int ret; 10978 10979 if (!attr->transfer) { 10980 uint32_t q_size; 10981 10982 miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS; 10983 q_size = priv->reta_idx_n * sizeof(ctx->queue[0]); 10984 ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size, 10985 0, SOCKET_ID_ANY); 10986 if (!ctx->queue) 10987 return rte_flow_error_set 10988 (error, ENOMEM, 10989 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 10990 NULL, "invalid default miss RSS"); 10991 ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT, 10992 ctx->action_rss.level = 0, 10993 ctx->action_rss.types = priv->rss_conf.rss_hf, 10994 ctx->action_rss.key_len = priv->rss_conf.rss_key_len, 10995 ctx->action_rss.queue_num = priv->reta_idx_n, 10996 ctx->action_rss.key = priv->rss_conf.rss_key, 10997 ctx->action_rss.queue = ctx->queue; 10998 if (!priv->reta_idx_n || !priv->rxqs_n) 10999 return rte_flow_error_set 11000 (error, EINVAL, 11001 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 11002 NULL, "invalid port configuration"); 11003 if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) 11004 ctx->action_rss.types = 0; 11005 for (i = 0; i != priv->reta_idx_n; ++i) 11006 ctx->queue[i] = (*priv->reta_idx)[i]; 11007 } else { 11008 miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP; 11009 ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP; 11010 } 11011 miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw; 11012 for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++); 11013 jump_data = app_actions->conf; 11014 miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY; 11015 miss_attr.group = jump_data->group; 11016 ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group, 11017 &flow_table, &grp_info, error); 11018 if (ret) 11019 return rte_flow_error_set(error, EINVAL, 11020 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 11021 NULL, "invalid tunnel id"); 11022 mark_id.app_reserve = 0; 11023 mark_id.table_id = tunnel_flow_tbl_to_id(flow_table); 11024 mark_id.transfer = !!attr->transfer; 11025 mark_id._unused_ = 0; 11026 miss_mark.id = mark_id.val; 11027 dev_flow = flow_drv_prepare(dev, flow, &miss_attr, 11028 miss_items, miss_actions, flow_idx, error); 11029 if (!dev_flow) 11030 return -rte_errno; 11031 dev_flow->flow = flow; 11032 dev_flow->external = true; 11033 dev_flow->tunnel = tunnel; 11034 dev_flow->tof_type = MLX5_TUNNEL_OFFLOAD_MISS_RULE; 11035 /* Subflow object was created, we must include one in the list. */ 11036 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, 11037 dev_flow->handle, next); 11038 DRV_LOG(DEBUG, 11039 "port %u tunnel type=%d id=%u miss rule priority=%u group=%u", 11040 dev->data->port_id, tunnel->app_tunnel.type, 11041 tunnel->tunnel_id, miss_attr.priority, miss_attr.group); 11042 ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items, 11043 miss_actions, error); 11044 if (!ret) 11045 ret = flow_mreg_update_copy_table(dev, flow, miss_actions, 11046 error); 11047 11048 return ret; 11049 } 11050 11051 static const struct mlx5_flow_tbl_data_entry * 11052 tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark) 11053 { 11054 struct mlx5_priv *priv = dev->data->dev_private; 11055 struct mlx5_dev_ctx_shared *sh = priv->sh; 11056 struct mlx5_list_entry *he; 11057 union tunnel_offload_mark mbits = { .val = mark }; 11058 union mlx5_flow_tbl_key table_key = { 11059 { 11060 .level = tunnel_id_to_flow_tbl(mbits.table_id), 11061 .id = 0, 11062 .reserved = 0, 11063 .dummy = 0, 11064 .is_fdb = !!mbits.transfer, 11065 .is_egress = 0, 11066 } 11067 }; 11068 struct mlx5_flow_cb_ctx ctx = { 11069 .data = &table_key.v64, 11070 }; 11071 11072 he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, &ctx); 11073 return he ? 11074 container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL; 11075 } 11076 11077 static void 11078 mlx5_flow_tunnel_grp2tbl_remove_cb(void *tool_ctx, 11079 struct mlx5_list_entry *entry) 11080 { 11081 struct mlx5_dev_ctx_shared *sh = tool_ctx; 11082 struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); 11083 11084 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], 11085 tunnel_flow_tbl_to_id(tte->flow_table)); 11086 mlx5_free(tte); 11087 } 11088 11089 static int 11090 mlx5_flow_tunnel_grp2tbl_match_cb(void *tool_ctx __rte_unused, 11091 struct mlx5_list_entry *entry, void *cb_ctx) 11092 { 11093 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 11094 union tunnel_tbl_key tbl = { 11095 .val = *(uint64_t *)(ctx->data), 11096 }; 11097 struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); 11098 11099 return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group; 11100 } 11101 11102 static struct mlx5_list_entry * 11103 mlx5_flow_tunnel_grp2tbl_create_cb(void *tool_ctx, void *cb_ctx) 11104 { 11105 struct mlx5_dev_ctx_shared *sh = tool_ctx; 11106 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 11107 struct tunnel_tbl_entry *tte; 11108 union tunnel_tbl_key tbl = { 11109 .val = *(uint64_t *)(ctx->data), 11110 }; 11111 11112 tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, 11113 sizeof(*tte), 0, 11114 SOCKET_ID_ANY); 11115 if (!tte) 11116 goto err; 11117 mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], 11118 &tte->flow_table); 11119 if (tte->flow_table >= MLX5_MAX_TABLES) { 11120 DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.", 11121 tte->flow_table); 11122 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], 11123 tte->flow_table); 11124 goto err; 11125 } else if (!tte->flow_table) { 11126 goto err; 11127 } 11128 tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table); 11129 tte->tunnel_id = tbl.tunnel_id; 11130 tte->group = tbl.group; 11131 return &tte->hash; 11132 err: 11133 if (tte) 11134 mlx5_free(tte); 11135 return NULL; 11136 } 11137 11138 static struct mlx5_list_entry * 11139 mlx5_flow_tunnel_grp2tbl_clone_cb(void *tool_ctx __rte_unused, 11140 struct mlx5_list_entry *oentry, 11141 void *cb_ctx __rte_unused) 11142 { 11143 struct tunnel_tbl_entry *tte = mlx5_malloc(MLX5_MEM_SYS, sizeof(*tte), 11144 0, SOCKET_ID_ANY); 11145 11146 if (!tte) 11147 return NULL; 11148 memcpy(tte, oentry, sizeof(*tte)); 11149 return &tte->hash; 11150 } 11151 11152 static void 11153 mlx5_flow_tunnel_grp2tbl_clone_free_cb(void *tool_ctx __rte_unused, 11154 struct mlx5_list_entry *entry) 11155 { 11156 struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); 11157 11158 mlx5_free(tte); 11159 } 11160 11161 static uint32_t 11162 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, 11163 const struct mlx5_flow_tunnel *tunnel, 11164 uint32_t group, uint32_t *table, 11165 struct rte_flow_error *error) 11166 { 11167 struct mlx5_list_entry *he; 11168 struct tunnel_tbl_entry *tte; 11169 union tunnel_tbl_key key = { 11170 .tunnel_id = tunnel ? tunnel->tunnel_id : 0, 11171 .group = group 11172 }; 11173 struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); 11174 struct mlx5_hlist *group_hash; 11175 struct mlx5_flow_cb_ctx ctx = { 11176 .data = &key.val, 11177 }; 11178 11179 group_hash = tunnel ? tunnel->groups : thub->groups; 11180 he = mlx5_hlist_register(group_hash, key.val, &ctx); 11181 if (!he) 11182 return rte_flow_error_set(error, EINVAL, 11183 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 11184 NULL, 11185 "tunnel group index not supported"); 11186 tte = container_of(he, typeof(*tte), hash); 11187 *table = tte->flow_table; 11188 DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x", 11189 dev->data->port_id, key.tunnel_id, group, *table); 11190 return 0; 11191 } 11192 11193 static void 11194 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, 11195 struct mlx5_flow_tunnel *tunnel) 11196 { 11197 struct mlx5_priv *priv = dev->data->dev_private; 11198 struct mlx5_indexed_pool *ipool; 11199 11200 DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x", 11201 dev->data->port_id, tunnel->tunnel_id); 11202 LIST_REMOVE(tunnel, chain); 11203 mlx5_hlist_destroy(tunnel->groups); 11204 ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID]; 11205 mlx5_ipool_free(ipool, tunnel->tunnel_id); 11206 } 11207 11208 static bool 11209 mlx5_access_tunnel_offload_db 11210 (struct rte_eth_dev *dev, 11211 bool (*match)(struct rte_eth_dev *, 11212 struct mlx5_flow_tunnel *, const void *), 11213 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), 11214 void (*miss)(struct rte_eth_dev *, void *), 11215 void *ctx, bool lock_op) 11216 { 11217 bool verdict = false; 11218 struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); 11219 struct mlx5_flow_tunnel *tunnel; 11220 11221 rte_spinlock_lock(&thub->sl); 11222 LIST_FOREACH(tunnel, &thub->tunnels, chain) { 11223 verdict = match(dev, tunnel, (const void *)ctx); 11224 if (verdict) 11225 break; 11226 } 11227 if (!lock_op) 11228 rte_spinlock_unlock(&thub->sl); 11229 if (verdict && hit) 11230 hit(dev, tunnel, ctx); 11231 if (!verdict && miss) 11232 miss(dev, ctx); 11233 if (lock_op) 11234 rte_spinlock_unlock(&thub->sl); 11235 11236 return verdict; 11237 } 11238 11239 struct tunnel_db_find_tunnel_id_ctx { 11240 uint32_t tunnel_id; 11241 struct mlx5_flow_tunnel *tunnel; 11242 }; 11243 11244 static bool 11245 find_tunnel_id_match(struct rte_eth_dev *dev, 11246 struct mlx5_flow_tunnel *tunnel, const void *x) 11247 { 11248 const struct tunnel_db_find_tunnel_id_ctx *ctx = x; 11249 11250 RTE_SET_USED(dev); 11251 return tunnel->tunnel_id == ctx->tunnel_id; 11252 } 11253 11254 static void 11255 find_tunnel_id_hit(struct rte_eth_dev *dev, 11256 struct mlx5_flow_tunnel *tunnel, void *x) 11257 { 11258 struct tunnel_db_find_tunnel_id_ctx *ctx = x; 11259 RTE_SET_USED(dev); 11260 ctx->tunnel = tunnel; 11261 } 11262 11263 static struct mlx5_flow_tunnel * 11264 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id) 11265 { 11266 struct tunnel_db_find_tunnel_id_ctx ctx = { 11267 .tunnel_id = id, 11268 }; 11269 11270 mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match, 11271 find_tunnel_id_hit, NULL, &ctx, true); 11272 11273 return ctx.tunnel; 11274 } 11275 11276 static struct mlx5_flow_tunnel * 11277 mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, 11278 const struct rte_flow_tunnel *app_tunnel) 11279 { 11280 struct mlx5_priv *priv = dev->data->dev_private; 11281 struct mlx5_indexed_pool *ipool; 11282 struct mlx5_flow_tunnel *tunnel; 11283 uint32_t id; 11284 11285 ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID]; 11286 tunnel = mlx5_ipool_zmalloc(ipool, &id); 11287 if (!tunnel) 11288 return NULL; 11289 if (id >= MLX5_MAX_TUNNELS) { 11290 mlx5_ipool_free(ipool, id); 11291 DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id); 11292 return NULL; 11293 } 11294 tunnel->groups = mlx5_hlist_create("tunnel groups", 64, false, true, 11295 priv->sh, 11296 mlx5_flow_tunnel_grp2tbl_create_cb, 11297 mlx5_flow_tunnel_grp2tbl_match_cb, 11298 mlx5_flow_tunnel_grp2tbl_remove_cb, 11299 mlx5_flow_tunnel_grp2tbl_clone_cb, 11300 mlx5_flow_tunnel_grp2tbl_clone_free_cb); 11301 if (!tunnel->groups) { 11302 mlx5_ipool_free(ipool, id); 11303 return NULL; 11304 } 11305 /* initiate new PMD tunnel */ 11306 memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel)); 11307 tunnel->tunnel_id = id; 11308 tunnel->action.type = (typeof(tunnel->action.type)) 11309 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET; 11310 tunnel->action.conf = tunnel; 11311 tunnel->item.type = (typeof(tunnel->item.type)) 11312 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL; 11313 tunnel->item.spec = tunnel; 11314 tunnel->item.last = NULL; 11315 tunnel->item.mask = NULL; 11316 11317 DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x", 11318 dev->data->port_id, tunnel->tunnel_id); 11319 11320 return tunnel; 11321 } 11322 11323 struct tunnel_db_get_tunnel_ctx { 11324 const struct rte_flow_tunnel *app_tunnel; 11325 struct mlx5_flow_tunnel *tunnel; 11326 }; 11327 11328 static bool get_tunnel_match(struct rte_eth_dev *dev, 11329 struct mlx5_flow_tunnel *tunnel, const void *x) 11330 { 11331 const struct tunnel_db_get_tunnel_ctx *ctx = x; 11332 11333 RTE_SET_USED(dev); 11334 return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel, 11335 sizeof(*ctx->app_tunnel)); 11336 } 11337 11338 static void get_tunnel_hit(struct rte_eth_dev *dev, 11339 struct mlx5_flow_tunnel *tunnel, void *x) 11340 { 11341 /* called under tunnel spinlock protection */ 11342 struct tunnel_db_get_tunnel_ctx *ctx = x; 11343 11344 RTE_SET_USED(dev); 11345 tunnel->refctn++; 11346 ctx->tunnel = tunnel; 11347 } 11348 11349 static void get_tunnel_miss(struct rte_eth_dev *dev, void *x) 11350 { 11351 /* called under tunnel spinlock protection */ 11352 struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); 11353 struct tunnel_db_get_tunnel_ctx *ctx = x; 11354 11355 rte_spinlock_unlock(&thub->sl); 11356 ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel); 11357 rte_spinlock_lock(&thub->sl); 11358 if (ctx->tunnel) { 11359 ctx->tunnel->refctn = 1; 11360 LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain); 11361 } 11362 } 11363 11364 11365 static int 11366 mlx5_get_flow_tunnel(struct rte_eth_dev *dev, 11367 const struct rte_flow_tunnel *app_tunnel, 11368 struct mlx5_flow_tunnel **tunnel) 11369 { 11370 struct tunnel_db_get_tunnel_ctx ctx = { 11371 .app_tunnel = app_tunnel, 11372 }; 11373 11374 mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit, 11375 get_tunnel_miss, &ctx, true); 11376 *tunnel = ctx.tunnel; 11377 return ctx.tunnel ? 0 : -ENOMEM; 11378 } 11379 11380 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id) 11381 { 11382 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub; 11383 11384 if (!thub) 11385 return; 11386 if (!LIST_EMPTY(&thub->tunnels)) 11387 DRV_LOG(WARNING, "port %u tunnels present", port_id); 11388 mlx5_hlist_destroy(thub->groups); 11389 mlx5_free(thub); 11390 } 11391 11392 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) 11393 { 11394 int err; 11395 struct mlx5_flow_tunnel_hub *thub; 11396 11397 thub = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*thub), 11398 0, SOCKET_ID_ANY); 11399 if (!thub) 11400 return -ENOMEM; 11401 LIST_INIT(&thub->tunnels); 11402 rte_spinlock_init(&thub->sl); 11403 thub->groups = mlx5_hlist_create("flow groups", 64, 11404 false, true, sh, 11405 mlx5_flow_tunnel_grp2tbl_create_cb, 11406 mlx5_flow_tunnel_grp2tbl_match_cb, 11407 mlx5_flow_tunnel_grp2tbl_remove_cb, 11408 mlx5_flow_tunnel_grp2tbl_clone_cb, 11409 mlx5_flow_tunnel_grp2tbl_clone_free_cb); 11410 if (!thub->groups) { 11411 err = -rte_errno; 11412 goto err; 11413 } 11414 sh->tunnel_hub = thub; 11415 11416 return 0; 11417 11418 err: 11419 if (thub->groups) 11420 mlx5_hlist_destroy(thub->groups); 11421 if (thub) 11422 mlx5_free(thub); 11423 return err; 11424 } 11425 11426 static inline int 11427 mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, 11428 struct rte_flow_tunnel *tunnel, 11429 struct rte_flow_error *error) 11430 { 11431 struct mlx5_priv *priv = dev->data->dev_private; 11432 11433 if (!priv->sh->config.dv_flow_en) 11434 return rte_flow_error_set(error, ENOTSUP, 11435 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 11436 "flow DV interface is off"); 11437 if (!is_tunnel_offload_active(dev)) 11438 return rte_flow_error_set(error, ENOTSUP, 11439 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 11440 "tunnel offload was not activated, consider setting dv_xmeta_en=3"); 11441 if (!tunnel) 11442 return rte_flow_error_set(error, EINVAL, 11443 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 11444 "no application tunnel"); 11445 switch (tunnel->type) { 11446 default: 11447 return rte_flow_error_set(error, EINVAL, 11448 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 11449 "unsupported tunnel type"); 11450 case RTE_FLOW_ITEM_TYPE_VXLAN: 11451 case RTE_FLOW_ITEM_TYPE_GRE: 11452 case RTE_FLOW_ITEM_TYPE_NVGRE: 11453 case RTE_FLOW_ITEM_TYPE_GENEVE: 11454 break; 11455 } 11456 return 0; 11457 } 11458 11459 static int 11460 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev, 11461 struct rte_flow_tunnel *app_tunnel, 11462 struct rte_flow_action **actions, 11463 uint32_t *num_of_actions, 11464 struct rte_flow_error *error) 11465 { 11466 struct mlx5_flow_tunnel *tunnel; 11467 int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error); 11468 11469 if (ret) 11470 return ret; 11471 ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); 11472 if (ret < 0) { 11473 return rte_flow_error_set(error, ret, 11474 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 11475 "failed to initialize pmd tunnel"); 11476 } 11477 *actions = &tunnel->action; 11478 *num_of_actions = 1; 11479 return 0; 11480 } 11481 11482 static int 11483 mlx5_flow_tunnel_match(struct rte_eth_dev *dev, 11484 struct rte_flow_tunnel *app_tunnel, 11485 struct rte_flow_item **items, 11486 uint32_t *num_of_items, 11487 struct rte_flow_error *error) 11488 { 11489 struct mlx5_flow_tunnel *tunnel; 11490 int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error); 11491 11492 if (ret) 11493 return ret; 11494 ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); 11495 if (ret < 0) { 11496 return rte_flow_error_set(error, ret, 11497 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 11498 "failed to initialize pmd tunnel"); 11499 } 11500 *items = &tunnel->item; 11501 *num_of_items = 1; 11502 return 0; 11503 } 11504 11505 struct tunnel_db_element_release_ctx { 11506 struct rte_flow_item *items; 11507 struct rte_flow_action *actions; 11508 uint32_t num_elements; 11509 struct rte_flow_error *error; 11510 int ret; 11511 }; 11512 11513 static bool 11514 tunnel_element_release_match(struct rte_eth_dev *dev, 11515 struct mlx5_flow_tunnel *tunnel, const void *x) 11516 { 11517 const struct tunnel_db_element_release_ctx *ctx = x; 11518 11519 RTE_SET_USED(dev); 11520 if (ctx->num_elements != 1) 11521 return false; 11522 else if (ctx->items) 11523 return ctx->items == &tunnel->item; 11524 else if (ctx->actions) 11525 return ctx->actions == &tunnel->action; 11526 11527 return false; 11528 } 11529 11530 static void 11531 tunnel_element_release_hit(struct rte_eth_dev *dev, 11532 struct mlx5_flow_tunnel *tunnel, void *x) 11533 { 11534 struct tunnel_db_element_release_ctx *ctx = x; 11535 ctx->ret = 0; 11536 if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed) - 1)) 11537 mlx5_flow_tunnel_free(dev, tunnel); 11538 } 11539 11540 static void 11541 tunnel_element_release_miss(struct rte_eth_dev *dev, void *x) 11542 { 11543 struct tunnel_db_element_release_ctx *ctx = x; 11544 RTE_SET_USED(dev); 11545 ctx->ret = rte_flow_error_set(ctx->error, EINVAL, 11546 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 11547 "invalid argument"); 11548 } 11549 11550 static int 11551 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev, 11552 struct rte_flow_item *pmd_items, 11553 uint32_t num_items, struct rte_flow_error *err) 11554 { 11555 struct tunnel_db_element_release_ctx ctx = { 11556 .items = pmd_items, 11557 .actions = NULL, 11558 .num_elements = num_items, 11559 .error = err, 11560 }; 11561 11562 mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, 11563 tunnel_element_release_hit, 11564 tunnel_element_release_miss, &ctx, false); 11565 11566 return ctx.ret; 11567 } 11568 11569 static int 11570 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev, 11571 struct rte_flow_action *pmd_actions, 11572 uint32_t num_actions, struct rte_flow_error *err) 11573 { 11574 struct tunnel_db_element_release_ctx ctx = { 11575 .items = NULL, 11576 .actions = pmd_actions, 11577 .num_elements = num_actions, 11578 .error = err, 11579 }; 11580 11581 mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, 11582 tunnel_element_release_hit, 11583 tunnel_element_release_miss, &ctx, false); 11584 11585 return ctx.ret; 11586 } 11587 11588 static int 11589 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, 11590 struct rte_mbuf *m, 11591 struct rte_flow_restore_info *info, 11592 struct rte_flow_error *err) 11593 { 11594 uint64_t ol_flags = m->ol_flags; 11595 const struct mlx5_flow_tbl_data_entry *tble; 11596 const uint64_t mask = RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 11597 struct mlx5_priv *priv = dev->data->dev_private; 11598 11599 if (priv->tunnel_enabled == 0) 11600 goto err; 11601 if ((ol_flags & mask) != mask) 11602 goto err; 11603 tble = tunnel_mark_decode(dev, m->hash.fdir.hi); 11604 if (!tble) { 11605 DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x", 11606 dev->data->port_id, m->hash.fdir.hi); 11607 goto err; 11608 } 11609 MLX5_ASSERT(tble->tunnel); 11610 memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel)); 11611 info->group_id = tble->group_id; 11612 info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL | 11613 RTE_FLOW_RESTORE_INFO_GROUP_ID | 11614 RTE_FLOW_RESTORE_INFO_ENCAPSULATED; 11615 11616 return 0; 11617 11618 err: 11619 return rte_flow_error_set(err, EINVAL, 11620 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 11621 "failed to get restore info"); 11622 } 11623 11624 #else /* HAVE_IBV_FLOW_DV_SUPPORT */ 11625 static int 11626 mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev, 11627 __rte_unused struct rte_flow_tunnel *app_tunnel, 11628 __rte_unused struct rte_flow_action **actions, 11629 __rte_unused uint32_t *num_of_actions, 11630 __rte_unused struct rte_flow_error *error) 11631 { 11632 return -ENOTSUP; 11633 } 11634 11635 static int 11636 mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev, 11637 __rte_unused struct rte_flow_tunnel *app_tunnel, 11638 __rte_unused struct rte_flow_item **items, 11639 __rte_unused uint32_t *num_of_items, 11640 __rte_unused struct rte_flow_error *error) 11641 { 11642 return -ENOTSUP; 11643 } 11644 11645 static int 11646 mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev, 11647 __rte_unused struct rte_flow_item *pmd_items, 11648 __rte_unused uint32_t num_items, 11649 __rte_unused struct rte_flow_error *err) 11650 { 11651 return -ENOTSUP; 11652 } 11653 11654 static int 11655 mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev, 11656 __rte_unused struct rte_flow_action *pmd_action, 11657 __rte_unused uint32_t num_actions, 11658 __rte_unused struct rte_flow_error *err) 11659 { 11660 return -ENOTSUP; 11661 } 11662 11663 static int 11664 mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev, 11665 __rte_unused struct rte_mbuf *m, 11666 __rte_unused struct rte_flow_restore_info *i, 11667 __rte_unused struct rte_flow_error *err) 11668 { 11669 return -ENOTSUP; 11670 } 11671 11672 static int 11673 flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev, 11674 __rte_unused struct rte_flow *flow, 11675 __rte_unused const struct rte_flow_attr *attr, 11676 __rte_unused const struct rte_flow_action *actions, 11677 __rte_unused uint32_t flow_idx, 11678 __rte_unused const struct mlx5_flow_tunnel *tunnel, 11679 __rte_unused struct tunnel_default_miss_ctx *ctx, 11680 __rte_unused struct rte_flow_error *error) 11681 { 11682 return -ENOTSUP; 11683 } 11684 11685 static struct mlx5_flow_tunnel * 11686 mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev, 11687 __rte_unused uint32_t id) 11688 { 11689 return NULL; 11690 } 11691 11692 static void 11693 mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev, 11694 __rte_unused struct mlx5_flow_tunnel *tunnel) 11695 { 11696 } 11697 11698 static uint32_t 11699 tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev, 11700 __rte_unused const struct mlx5_flow_tunnel *t, 11701 __rte_unused uint32_t group, 11702 __rte_unused uint32_t *table, 11703 struct rte_flow_error *error) 11704 { 11705 return rte_flow_error_set(error, ENOTSUP, 11706 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 11707 "tunnel offload requires DV support"); 11708 } 11709 11710 void 11711 mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh, 11712 __rte_unused uint16_t port_id) 11713 { 11714 } 11715 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 11716 11717 /* Flex flow item API */ 11718 static struct rte_flow_item_flex_handle * 11719 mlx5_flow_flex_item_create(struct rte_eth_dev *dev, 11720 const struct rte_flow_item_flex_conf *conf, 11721 struct rte_flow_error *error) 11722 { 11723 static const char err_msg[] = "flex item creation unsupported"; 11724 struct mlx5_priv *priv = dev->data->dev_private; 11725 struct rte_flow_attr attr = { .transfer = 0 }; 11726 const struct mlx5_flow_driver_ops *fops = 11727 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 11728 11729 if (!priv->pci_dev) { 11730 rte_flow_error_set(error, ENOTSUP, 11731 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 11732 "create flex item on PF only"); 11733 return NULL; 11734 } 11735 switch (priv->pci_dev->id.device_id) { 11736 case PCI_DEVICE_ID_MELLANOX_BLUEFIELD2: 11737 case PCI_DEVICE_ID_MELLANOX_BLUEFIELD3: 11738 break; 11739 default: 11740 rte_flow_error_set(error, ENOTSUP, 11741 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 11742 "flex item available on BlueField ports only"); 11743 return NULL; 11744 } 11745 if (!fops->item_create) { 11746 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 11747 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 11748 NULL, err_msg); 11749 return NULL; 11750 } 11751 return fops->item_create(dev, conf, error); 11752 } 11753 11754 static int 11755 mlx5_flow_flex_item_release(struct rte_eth_dev *dev, 11756 const struct rte_flow_item_flex_handle *handle, 11757 struct rte_flow_error *error) 11758 { 11759 static const char err_msg[] = "flex item release unsupported"; 11760 struct rte_flow_attr attr = { .transfer = 0 }; 11761 const struct mlx5_flow_driver_ops *fops = 11762 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 11763 11764 if (!fops->item_release) { 11765 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 11766 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 11767 NULL, err_msg); 11768 return -rte_errno; 11769 } 11770 return fops->item_release(dev, handle, error); 11771 } 11772 11773 static void 11774 mlx5_dbg__print_pattern(const struct rte_flow_item *item) 11775 { 11776 int ret; 11777 struct rte_flow_error error; 11778 11779 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 11780 char *item_name; 11781 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, &item_name, 11782 sizeof(item_name), 11783 (void *)(uintptr_t)item->type, &error); 11784 if (ret > 0) 11785 printf("%s ", item_name); 11786 else 11787 printf("%d\n", (int)item->type); 11788 } 11789 printf("END\n"); 11790 } 11791 11792 static int 11793 mlx5_flow_is_std_vxlan_port(const struct rte_flow_item *udp_item) 11794 { 11795 const struct rte_flow_item_udp *spec = udp_item->spec; 11796 const struct rte_flow_item_udp *mask = udp_item->mask; 11797 uint16_t udp_dport = 0; 11798 11799 if (spec != NULL) { 11800 if (!mask) 11801 mask = &rte_flow_item_udp_mask; 11802 udp_dport = rte_be_to_cpu_16(spec->hdr.dst_port & 11803 mask->hdr.dst_port); 11804 } 11805 return (!udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN); 11806 } 11807 11808 static const struct mlx5_flow_expand_node * 11809 mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern, 11810 unsigned int item_idx, 11811 const struct mlx5_flow_expand_node graph[], 11812 const struct mlx5_flow_expand_node *node) 11813 { 11814 const struct rte_flow_item *item = pattern + item_idx, *prev_item; 11815 11816 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN && 11817 node != NULL && 11818 node->type == RTE_FLOW_ITEM_TYPE_VXLAN) { 11819 /* 11820 * The expansion node is VXLAN and it is also the last 11821 * expandable item in the pattern, so need to continue 11822 * expansion of the inner tunnel. 11823 */ 11824 MLX5_ASSERT(item_idx > 0); 11825 prev_item = pattern + item_idx - 1; 11826 MLX5_ASSERT(prev_item->type == RTE_FLOW_ITEM_TYPE_UDP); 11827 if (mlx5_flow_is_std_vxlan_port(prev_item)) 11828 return &graph[MLX5_EXPANSION_STD_VXLAN]; 11829 return &graph[MLX5_EXPANSION_L3_VXLAN]; 11830 } 11831 return node; 11832 } 11833 11834 /* Map of Verbs to Flow priority with 8 Verbs priorities. */ 11835 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { 11836 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, 11837 }; 11838 11839 /* Map of Verbs to Flow priority with 16 Verbs priorities. */ 11840 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { 11841 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, 11842 { 9, 10, 11 }, { 12, 13, 14 }, 11843 }; 11844 11845 /** 11846 * Discover the number of available flow priorities. 11847 * 11848 * @param dev 11849 * Ethernet device. 11850 * 11851 * @return 11852 * On success, number of available flow priorities. 11853 * On failure, a negative errno-style code and rte_errno is set. 11854 */ 11855 int 11856 mlx5_flow_discover_priorities(struct rte_eth_dev *dev) 11857 { 11858 static const uint16_t vprio[] = {8, 16}; 11859 const struct mlx5_priv *priv = dev->data->dev_private; 11860 const struct mlx5_flow_driver_ops *fops; 11861 enum mlx5_flow_drv_type type; 11862 int ret; 11863 11864 type = mlx5_flow_os_get_type(); 11865 if (type == MLX5_FLOW_TYPE_MAX) { 11866 type = MLX5_FLOW_TYPE_VERBS; 11867 if (priv->sh->cdev->config.devx && priv->sh->config.dv_flow_en) 11868 type = MLX5_FLOW_TYPE_DV; 11869 } 11870 fops = flow_get_drv_ops(type); 11871 if (fops->discover_priorities == NULL) { 11872 DRV_LOG(ERR, "Priority discovery not supported"); 11873 rte_errno = ENOTSUP; 11874 return -rte_errno; 11875 } 11876 ret = fops->discover_priorities(dev, vprio, RTE_DIM(vprio)); 11877 if (ret < 0) 11878 return ret; 11879 switch (ret) { 11880 case 8: 11881 ret = RTE_DIM(priority_map_3); 11882 break; 11883 case 16: 11884 ret = RTE_DIM(priority_map_5); 11885 break; 11886 default: 11887 rte_errno = ENOTSUP; 11888 DRV_LOG(ERR, 11889 "port %u maximum priority: %d expected 8/16", 11890 dev->data->port_id, ret); 11891 return -rte_errno; 11892 } 11893 DRV_LOG(INFO, "port %u supported flow priorities:" 11894 " 0-%d for ingress or egress root table," 11895 " 0-%d for non-root table or transfer root table.", 11896 dev->data->port_id, ret - 2, 11897 MLX5_NON_ROOT_FLOW_MAX_PRIO - 1); 11898 return ret; 11899 } 11900 11901 /** 11902 * Adjust flow priority based on the highest layer and the request priority. 11903 * 11904 * @param[in] dev 11905 * Pointer to the Ethernet device structure. 11906 * @param[in] priority 11907 * The rule base priority. 11908 * @param[in] subpriority 11909 * The priority based on the items. 11910 * 11911 * @return 11912 * The new priority. 11913 */ 11914 uint32_t 11915 mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 11916 uint32_t subpriority) 11917 { 11918 uint32_t res = 0; 11919 struct mlx5_priv *priv = dev->data->dev_private; 11920 11921 switch (priv->sh->flow_max_priority) { 11922 case RTE_DIM(priority_map_3): 11923 res = priority_map_3[priority][subpriority]; 11924 break; 11925 case RTE_DIM(priority_map_5): 11926 res = priority_map_5[priority][subpriority]; 11927 break; 11928 } 11929 return res; 11930 } 11931 11932 /** 11933 * Get the priority for sending traffic to kernel table. 11934 * 11935 * @param[in] dev 11936 * Pointer to the Ethernet device structure. 11937 * 11938 * @return 11939 * On success: the value of priority for sending traffic to kernel table 11940 * On failure: -1 11941 */ 11942 uint32_t 11943 mlx5_get_send_to_kernel_priority(struct rte_eth_dev *dev) 11944 { 11945 struct mlx5_priv *priv = dev->data->dev_private; 11946 uint32_t res; 11947 11948 switch (priv->sh->flow_max_priority) { 11949 case RTE_DIM(priority_map_5): 11950 res = 15; 11951 break; 11952 case RTE_DIM(priority_map_3): 11953 res = 7; 11954 break; 11955 default: 11956 DRV_LOG(ERR, 11957 "port %u maximum priority: %d expected 8/16", 11958 dev->data->port_id, priv->sh->flow_max_priority); 11959 res = (uint32_t)-1; 11960 } 11961 return res; 11962 } 11963 11964 /** 11965 * Get the E-Switch Manager vport id. 11966 * 11967 * @param[in] dev 11968 * Pointer to the Ethernet device structure. 11969 * 11970 * @return 11971 * The vport id. 11972 */ 11973 int16_t mlx5_flow_get_esw_manager_vport_id(struct rte_eth_dev *dev) 11974 { 11975 struct mlx5_priv *priv = dev->data->dev_private; 11976 struct mlx5_common_device *cdev = priv->sh->cdev; 11977 11978 /* New FW exposes E-Switch Manager vport ID, can use it directly. */ 11979 if (cdev->config.hca_attr.esw_mgr_vport_id_valid) 11980 return (int16_t)cdev->config.hca_attr.esw_mgr_vport_id; 11981 11982 if (priv->pci_dev == NULL) 11983 return 0; 11984 switch (priv->pci_dev->id.device_id) { 11985 case PCI_DEVICE_ID_MELLANOX_BLUEFIELD: 11986 case PCI_DEVICE_ID_MELLANOX_BLUEFIELD2: 11987 case PCI_DEVICE_ID_MELLANOX_BLUEFIELD3: 11988 /* 11989 * In old FW which doesn't expose the E-Switch Manager vport ID in the capability, 11990 * only the BF embedded CPUs control the E-Switch Manager port. Hence, 11991 * ECPF vport ID is selected and not the host port (0) in any BF case. 11992 */ 11993 return (int16_t)MLX5_ECPF_VPORT_ID; 11994 default: 11995 return MLX5_PF_VPORT_ID; 11996 } 11997 } 11998 11999 /** 12000 * Parse item to get the vport id. 12001 * 12002 * @param[in] dev 12003 * Pointer to the Ethernet device structure. 12004 * @param[in] item 12005 * The src port id match item. 12006 * @param[out] vport_id 12007 * Pointer to put the vport id. 12008 * @param[out] all_ports 12009 * Indicate if the item matches all ports. 12010 * @param[out] error 12011 * Pointer to error structure. 12012 * 12013 * @return 12014 * 0 on success, a negative errno value otherwise and rte_errno is set. 12015 */ 12016 int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev, 12017 const struct rte_flow_item *item, 12018 uint16_t *vport_id, 12019 bool *all_ports, 12020 struct rte_flow_error *error) 12021 { 12022 struct mlx5_priv *port_priv; 12023 const struct rte_flow_item_port_id *pid_v = NULL; 12024 const struct rte_flow_item_ethdev *dev_v = NULL; 12025 uint32_t esw_mgr_port; 12026 uint32_t src_port; 12027 12028 if (all_ports) 12029 *all_ports = false; 12030 switch (item->type) { 12031 case RTE_FLOW_ITEM_TYPE_PORT_ID: 12032 pid_v = item->spec; 12033 if (!pid_v) 12034 return 0; 12035 src_port = pid_v->id; 12036 esw_mgr_port = MLX5_PORT_ESW_MGR; 12037 break; 12038 case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: 12039 dev_v = item->spec; 12040 if (!dev_v) { 12041 if (all_ports) 12042 *all_ports = true; 12043 return 0; 12044 } 12045 src_port = dev_v->port_id; 12046 esw_mgr_port = MLX5_REPRESENTED_PORT_ESW_MGR; 12047 break; 12048 case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: 12049 src_port = MLX5_REPRESENTED_PORT_ESW_MGR; 12050 esw_mgr_port = MLX5_REPRESENTED_PORT_ESW_MGR; 12051 break; 12052 default: 12053 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 12054 NULL, "Incorrect item type."); 12055 } 12056 if (src_port == esw_mgr_port) { 12057 *vport_id = mlx5_flow_get_esw_manager_vport_id(dev); 12058 } else { 12059 port_priv = mlx5_port_to_eswitch_info(src_port, false); 12060 if (!port_priv) 12061 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 12062 NULL, "Failed to get port info."); 12063 *vport_id = port_priv->representor_id; 12064 } 12065 12066 return 0; 12067 } 12068 12069 int 12070 mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev, 12071 uint16_t *proxy_port_id, 12072 struct rte_flow_error *error) 12073 { 12074 const struct mlx5_priv *priv = dev->data->dev_private; 12075 uint16_t port_id; 12076 12077 if (!priv->sh->config.dv_esw_en) 12078 return rte_flow_error_set(error, EINVAL, 12079 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 12080 NULL, 12081 "unable to provide a proxy port" 12082 " without E-Switch configured"); 12083 if (!priv->master && !priv->representor) 12084 return rte_flow_error_set(error, EINVAL, 12085 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 12086 NULL, 12087 "unable to provide a proxy port" 12088 " for port which is not a master" 12089 " or a representor port"); 12090 if (priv->master) { 12091 *proxy_port_id = dev->data->port_id; 12092 return 0; 12093 } 12094 MLX5_ETH_FOREACH_DEV(port_id, dev->device) { 12095 const struct rte_eth_dev *port_dev = &rte_eth_devices[port_id]; 12096 const struct mlx5_priv *port_priv = port_dev->data->dev_private; 12097 12098 if (port_priv->master && 12099 port_priv->domain_id == priv->domain_id) { 12100 *proxy_port_id = port_id; 12101 return 0; 12102 } 12103 } 12104 return rte_flow_error_set(error, ENODEV, 12105 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 12106 NULL, "unable to find a proxy port"); 12107 } 12108 12109 /** 12110 * Discover IPv6 traffic class ID support in rdma-core and firmware. 12111 * 12112 * @param dev 12113 * Ethernet device. 12114 * 12115 * @return 12116 * 0, rdma-core is good to work with firmware. 12117 * -EOPNOTSUPP, rdma-core could not work with new IPv6 TC ID. 12118 */ 12119 int 12120 mlx5_flow_discover_ipv6_tc_support(struct rte_eth_dev *dev) 12121 { 12122 struct rte_flow_action_set_dscp set_dscp; 12123 struct rte_flow_attr attr; 12124 struct rte_flow_action actions[2]; 12125 struct rte_flow_item items[3]; 12126 struct rte_flow_error error; 12127 uint32_t flow_idx; 12128 12129 memset(&attr, 0, sizeof(attr)); 12130 memset(actions, 0, sizeof(actions)); 12131 memset(items, 0, sizeof(items)); 12132 attr.group = 1; 12133 attr.egress = 1; 12134 items[0].type = RTE_FLOW_ITEM_TYPE_ETH; 12135 items[1].type = RTE_FLOW_ITEM_TYPE_IPV6; 12136 items[2].type = RTE_FLOW_ITEM_TYPE_END; 12137 /* Random value */ 12138 set_dscp.dscp = 9; 12139 actions[0].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP; 12140 actions[0].conf = &set_dscp; 12141 actions[1].type = RTE_FLOW_ACTION_TYPE_END; 12142 12143 flow_idx = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr, items, 12144 actions, true, &error); 12145 if (!flow_idx) 12146 return -EOPNOTSUPP; 12147 12148 mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx); 12149 return 0; 12150 } 12151 12152 void * 12153 rte_pmd_mlx5_create_geneve_tlv_parser(uint16_t port_id, 12154 const struct rte_pmd_mlx5_geneve_tlv tlv_list[], 12155 uint8_t nb_options) 12156 { 12157 #ifdef HAVE_MLX5_HWS_SUPPORT 12158 return mlx5_geneve_tlv_parser_create(port_id, tlv_list, nb_options); 12159 #else 12160 (void)port_id; 12161 (void)tlv_list; 12162 (void)nb_options; 12163 DRV_LOG(ERR, "%s is not supported.", __func__); 12164 rte_errno = ENOTSUP; 12165 return NULL; 12166 #endif 12167 } 12168 12169 int 12170 rte_pmd_mlx5_destroy_geneve_tlv_parser(void *handle) 12171 { 12172 #ifdef HAVE_MLX5_HWS_SUPPORT 12173 return mlx5_geneve_tlv_parser_destroy(handle); 12174 #else 12175 (void)handle; 12176 DRV_LOG(ERR, "%s is not supported.", __func__); 12177 rte_errno = ENOTSUP; 12178 return -rte_errno; 12179 #endif 12180 } 12181