1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdalign.h> 7 #include <stdint.h> 8 #include <string.h> 9 #include <stdbool.h> 10 #include <sys/queue.h> 11 12 #include <rte_common.h> 13 #include <rte_ether.h> 14 #include <ethdev_driver.h> 15 #include <rte_eal_paging.h> 16 #include <rte_flow.h> 17 #include <rte_cycles.h> 18 #include <rte_flow_driver.h> 19 #include <rte_malloc.h> 20 #include <rte_ip.h> 21 22 #include <mlx5_glue.h> 23 #include <mlx5_devx_cmds.h> 24 #include <mlx5_prm.h> 25 #include <mlx5_malloc.h> 26 27 #include "mlx5_defs.h" 28 #include "mlx5.h" 29 #include "mlx5_flow.h" 30 #include "mlx5_flow_os.h" 31 #include "mlx5_rx.h" 32 #include "mlx5_tx.h" 33 #include "mlx5_common_os.h" 34 #include "rte_pmd_mlx5.h" 35 36 struct tunnel_default_miss_ctx { 37 uint16_t *queue; 38 __extension__ 39 union { 40 struct rte_flow_action_rss action_rss; 41 struct rte_flow_action_queue miss_queue; 42 struct rte_flow_action_jump miss_jump; 43 uint8_t raw[0]; 44 }; 45 }; 46 47 static int 48 flow_tunnel_add_default_miss(struct rte_eth_dev *dev, 49 struct rte_flow *flow, 50 const struct rte_flow_attr *attr, 51 const struct rte_flow_action *app_actions, 52 uint32_t flow_idx, 53 const struct mlx5_flow_tunnel *tunnel, 54 struct tunnel_default_miss_ctx *ctx, 55 struct rte_flow_error *error); 56 static struct mlx5_flow_tunnel * 57 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id); 58 static void 59 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel); 60 static uint32_t 61 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, 62 const struct mlx5_flow_tunnel *tunnel, 63 uint32_t group, uint32_t *table, 64 struct rte_flow_error *error); 65 66 static struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void); 67 static void mlx5_flow_pop_thread_workspace(void); 68 69 70 /** Device flow drivers. */ 71 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; 72 73 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; 74 75 const struct mlx5_flow_driver_ops *flow_drv_ops[] = { 76 [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, 77 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 78 [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, 79 [MLX5_FLOW_TYPE_HW] = &mlx5_flow_hw_drv_ops, 80 #endif 81 [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, 82 [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops 83 }; 84 85 /** Helper macro to build input graph for mlx5_flow_expand_rss(). */ 86 #define MLX5_FLOW_EXPAND_RSS_NEXT(...) \ 87 (const int []){ \ 88 __VA_ARGS__, 0, \ 89 } 90 91 /** Node object of input graph for mlx5_flow_expand_rss(). */ 92 struct mlx5_flow_expand_node { 93 const int *const next; 94 /**< 95 * List of next node indexes. Index 0 is interpreted as a terminator. 96 */ 97 const enum rte_flow_item_type type; 98 /**< Pattern item type of current node. */ 99 uint64_t rss_types; 100 /**< 101 * RSS types bit-field associated with this node 102 * (see RTE_ETH_RSS_* definitions). 103 */ 104 uint64_t node_flags; 105 /**< 106 * Bit-fields that define how the node is used in the expansion. 107 * (see MLX5_EXPANSION_NODE_* definitions). 108 */ 109 }; 110 111 /* Optional expand field. The expansion alg will not go deeper. */ 112 #define MLX5_EXPANSION_NODE_OPTIONAL (UINT64_C(1) << 0) 113 114 /* The node is not added implicitly as expansion to the flow pattern. 115 * If the node type does not match the flow pattern item type, the 116 * expansion alg will go deeper to its next items. 117 * In the current implementation, the list of next nodes indexes can 118 * have up to one node with this flag set and it has to be the last 119 * node index (before the list terminator). 120 */ 121 #define MLX5_EXPANSION_NODE_EXPLICIT (UINT64_C(1) << 1) 122 123 /** Object returned by mlx5_flow_expand_rss(). */ 124 struct mlx5_flow_expand_rss { 125 uint32_t entries; 126 /**< Number of entries @p patterns and @p priorities. */ 127 struct { 128 struct rte_flow_item *pattern; /**< Expanded pattern array. */ 129 uint32_t priority; /**< Priority offset for each expansion. */ 130 } entry[]; 131 }; 132 133 static void 134 mlx5_dbg__print_pattern(const struct rte_flow_item *item); 135 136 static const struct mlx5_flow_expand_node * 137 mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern, 138 unsigned int item_idx, 139 const struct mlx5_flow_expand_node graph[], 140 const struct mlx5_flow_expand_node *node); 141 142 static bool 143 mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) 144 { 145 switch (item->type) { 146 case RTE_FLOW_ITEM_TYPE_ETH: 147 case RTE_FLOW_ITEM_TYPE_VLAN: 148 case RTE_FLOW_ITEM_TYPE_IPV4: 149 case RTE_FLOW_ITEM_TYPE_IPV6: 150 case RTE_FLOW_ITEM_TYPE_UDP: 151 case RTE_FLOW_ITEM_TYPE_TCP: 152 case RTE_FLOW_ITEM_TYPE_VXLAN: 153 case RTE_FLOW_ITEM_TYPE_NVGRE: 154 case RTE_FLOW_ITEM_TYPE_GRE: 155 case RTE_FLOW_ITEM_TYPE_GENEVE: 156 case RTE_FLOW_ITEM_TYPE_MPLS: 157 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 158 case RTE_FLOW_ITEM_TYPE_GRE_KEY: 159 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: 160 case RTE_FLOW_ITEM_TYPE_GTP: 161 return true; 162 default: 163 break; 164 } 165 return false; 166 } 167 168 /** 169 * Network Service Header (NSH) and its next protocol values 170 * are described in RFC-8393. 171 */ 172 static enum rte_flow_item_type 173 mlx5_nsh_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) 174 { 175 enum rte_flow_item_type type; 176 177 switch (proto_mask & proto_spec) { 178 case 0: 179 type = RTE_FLOW_ITEM_TYPE_VOID; 180 break; 181 case RTE_VXLAN_GPE_TYPE_IPV4: 182 type = RTE_FLOW_ITEM_TYPE_IPV4; 183 break; 184 case RTE_VXLAN_GPE_TYPE_IPV6: 185 type = RTE_VXLAN_GPE_TYPE_IPV6; 186 break; 187 case RTE_VXLAN_GPE_TYPE_ETH: 188 type = RTE_FLOW_ITEM_TYPE_ETH; 189 break; 190 default: 191 type = RTE_FLOW_ITEM_TYPE_END; 192 } 193 return type; 194 } 195 196 static enum rte_flow_item_type 197 mlx5_inet_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) 198 { 199 enum rte_flow_item_type type; 200 201 switch (proto_mask & proto_spec) { 202 case 0: 203 type = RTE_FLOW_ITEM_TYPE_VOID; 204 break; 205 case IPPROTO_UDP: 206 type = RTE_FLOW_ITEM_TYPE_UDP; 207 break; 208 case IPPROTO_TCP: 209 type = RTE_FLOW_ITEM_TYPE_TCP; 210 break; 211 case IPPROTO_IPIP: 212 type = RTE_FLOW_ITEM_TYPE_IPV4; 213 break; 214 case IPPROTO_IPV6: 215 type = RTE_FLOW_ITEM_TYPE_IPV6; 216 break; 217 default: 218 type = RTE_FLOW_ITEM_TYPE_END; 219 } 220 return type; 221 } 222 223 static enum rte_flow_item_type 224 mlx5_ethertype_to_item_type(rte_be16_t type_spec, 225 rte_be16_t type_mask, bool is_tunnel) 226 { 227 enum rte_flow_item_type type; 228 229 switch (rte_be_to_cpu_16(type_spec & type_mask)) { 230 case 0: 231 type = RTE_FLOW_ITEM_TYPE_VOID; 232 break; 233 case RTE_ETHER_TYPE_TEB: 234 type = is_tunnel ? 235 RTE_FLOW_ITEM_TYPE_ETH : RTE_FLOW_ITEM_TYPE_END; 236 break; 237 case RTE_ETHER_TYPE_VLAN: 238 type = !is_tunnel ? 239 RTE_FLOW_ITEM_TYPE_VLAN : RTE_FLOW_ITEM_TYPE_END; 240 break; 241 case RTE_ETHER_TYPE_IPV4: 242 type = RTE_FLOW_ITEM_TYPE_IPV4; 243 break; 244 case RTE_ETHER_TYPE_IPV6: 245 type = RTE_FLOW_ITEM_TYPE_IPV6; 246 break; 247 default: 248 type = RTE_FLOW_ITEM_TYPE_END; 249 } 250 return type; 251 } 252 253 static enum rte_flow_item_type 254 mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) 255 { 256 #define MLX5_XSET_ITEM_MASK_SPEC(type, fld) \ 257 do { \ 258 const void *m = item->mask; \ 259 const void *s = item->spec; \ 260 mask = m ? \ 261 ((const struct rte_flow_item_##type *)m)->fld : \ 262 rte_flow_item_##type##_mask.fld; \ 263 spec = ((const struct rte_flow_item_##type *)s)->fld; \ 264 } while (0) 265 266 enum rte_flow_item_type ret; 267 uint16_t spec, mask; 268 269 if (item == NULL || item->spec == NULL) 270 return RTE_FLOW_ITEM_TYPE_VOID; 271 switch (item->type) { 272 case RTE_FLOW_ITEM_TYPE_ETH: 273 MLX5_XSET_ITEM_MASK_SPEC(eth, type); 274 if (!mask) 275 return RTE_FLOW_ITEM_TYPE_VOID; 276 ret = mlx5_ethertype_to_item_type(spec, mask, false); 277 break; 278 case RTE_FLOW_ITEM_TYPE_VLAN: 279 MLX5_XSET_ITEM_MASK_SPEC(vlan, inner_type); 280 if (!mask) 281 return RTE_FLOW_ITEM_TYPE_VOID; 282 ret = mlx5_ethertype_to_item_type(spec, mask, false); 283 break; 284 case RTE_FLOW_ITEM_TYPE_IPV4: 285 MLX5_XSET_ITEM_MASK_SPEC(ipv4, hdr.next_proto_id); 286 if (!mask) 287 return RTE_FLOW_ITEM_TYPE_VOID; 288 ret = mlx5_inet_proto_to_item_type(spec, mask); 289 break; 290 case RTE_FLOW_ITEM_TYPE_IPV6: 291 MLX5_XSET_ITEM_MASK_SPEC(ipv6, hdr.proto); 292 if (!mask) 293 return RTE_FLOW_ITEM_TYPE_VOID; 294 ret = mlx5_inet_proto_to_item_type(spec, mask); 295 break; 296 case RTE_FLOW_ITEM_TYPE_GENEVE: 297 MLX5_XSET_ITEM_MASK_SPEC(geneve, protocol); 298 ret = mlx5_ethertype_to_item_type(spec, mask, true); 299 break; 300 case RTE_FLOW_ITEM_TYPE_GRE: 301 MLX5_XSET_ITEM_MASK_SPEC(gre, protocol); 302 ret = mlx5_ethertype_to_item_type(spec, mask, true); 303 break; 304 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 305 MLX5_XSET_ITEM_MASK_SPEC(vxlan_gpe, protocol); 306 ret = mlx5_nsh_proto_to_item_type(spec, mask); 307 break; 308 default: 309 ret = RTE_FLOW_ITEM_TYPE_VOID; 310 break; 311 } 312 return ret; 313 #undef MLX5_XSET_ITEM_MASK_SPEC 314 } 315 316 static const int * 317 mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[], 318 const int *next_node) 319 { 320 const struct mlx5_flow_expand_node *node = NULL; 321 const int *next = next_node; 322 323 while (next && *next) { 324 /* 325 * Skip the nodes with the MLX5_EXPANSION_NODE_EXPLICIT 326 * flag set, because they were not found in the flow pattern. 327 */ 328 node = &graph[*next]; 329 if (!(node->node_flags & MLX5_EXPANSION_NODE_EXPLICIT)) 330 break; 331 next = node->next; 332 } 333 return next; 334 } 335 336 #define MLX5_RSS_EXP_ELT_N 16 337 338 /** 339 * Expand RSS flows into several possible flows according to the RSS hash 340 * fields requested and the driver capabilities. 341 * 342 * @param[out] buf 343 * Buffer to store the result expansion. 344 * @param[in] size 345 * Buffer size in bytes. If 0, @p buf can be NULL. 346 * @param[in] pattern 347 * User flow pattern. 348 * @param[in] types 349 * RSS types to expand (see RTE_ETH_RSS_* definitions). 350 * @param[in] graph 351 * Input graph to expand @p pattern according to @p types. 352 * @param[in] graph_root_index 353 * Index of root node in @p graph, typically 0. 354 * 355 * @return 356 * A positive value representing the size of @p buf in bytes regardless of 357 * @p size on success, a negative errno value otherwise and rte_errno is 358 * set, the following errors are defined: 359 * 360 * -E2BIG: graph-depth @p graph is too deep. 361 * -EINVAL: @p size has not enough space for expanded pattern. 362 */ 363 static int 364 mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, 365 const struct rte_flow_item *pattern, uint64_t types, 366 const struct mlx5_flow_expand_node graph[], 367 int graph_root_index) 368 { 369 const struct rte_flow_item *item; 370 const struct mlx5_flow_expand_node *node = &graph[graph_root_index]; 371 const int *next_node; 372 const int *stack[MLX5_RSS_EXP_ELT_N]; 373 int stack_pos = 0; 374 struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N]; 375 unsigned int i, item_idx, last_expand_item_idx = 0; 376 size_t lsize; 377 size_t user_pattern_size = 0; 378 void *addr = NULL; 379 const struct mlx5_flow_expand_node *next = NULL; 380 struct rte_flow_item missed_item; 381 int missed = 0; 382 int elt = 0; 383 const struct rte_flow_item *last_expand_item = NULL; 384 385 memset(&missed_item, 0, sizeof(missed_item)); 386 lsize = offsetof(struct mlx5_flow_expand_rss, entry) + 387 MLX5_RSS_EXP_ELT_N * sizeof(buf->entry[0]); 388 if (lsize > size) 389 return -EINVAL; 390 buf->entry[0].priority = 0; 391 buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N]; 392 buf->entries = 0; 393 addr = buf->entry[0].pattern; 394 for (item = pattern, item_idx = 0; 395 item->type != RTE_FLOW_ITEM_TYPE_END; 396 item++, item_idx++) { 397 if (!mlx5_flow_is_rss_expandable_item(item)) { 398 user_pattern_size += sizeof(*item); 399 continue; 400 } 401 last_expand_item = item; 402 last_expand_item_idx = item_idx; 403 i = 0; 404 while (node->next && node->next[i]) { 405 next = &graph[node->next[i]]; 406 if (next->type == item->type) 407 break; 408 if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) { 409 node = next; 410 i = 0; 411 } else { 412 ++i; 413 } 414 } 415 if (next) 416 node = next; 417 user_pattern_size += sizeof(*item); 418 } 419 user_pattern_size += sizeof(*item); /* Handle END item. */ 420 lsize += user_pattern_size; 421 if (lsize > size) 422 return -EINVAL; 423 /* Copy the user pattern in the first entry of the buffer. */ 424 rte_memcpy(addr, pattern, user_pattern_size); 425 addr = (void *)(((uintptr_t)addr) + user_pattern_size); 426 buf->entries = 1; 427 /* Start expanding. */ 428 memset(flow_items, 0, sizeof(flow_items)); 429 user_pattern_size -= sizeof(*item); 430 /* 431 * Check if the last valid item has spec set, need complete pattern, 432 * and the pattern can be used for expansion. 433 */ 434 missed_item.type = mlx5_flow_expand_rss_item_complete(last_expand_item); 435 if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) { 436 /* Item type END indicates expansion is not required. */ 437 return lsize; 438 } 439 if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) { 440 next = NULL; 441 missed = 1; 442 i = 0; 443 while (node->next && node->next[i]) { 444 next = &graph[node->next[i]]; 445 if (next->type == missed_item.type) { 446 flow_items[0].type = missed_item.type; 447 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END; 448 break; 449 } 450 if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) { 451 node = next; 452 i = 0; 453 } else { 454 ++i; 455 } 456 next = NULL; 457 } 458 } 459 if (next && missed) { 460 elt = 2; /* missed item + item end. */ 461 node = next; 462 lsize += elt * sizeof(*item) + user_pattern_size; 463 if (lsize > size) 464 return -EINVAL; 465 if (node->rss_types & types) { 466 buf->entry[buf->entries].priority = 1; 467 buf->entry[buf->entries].pattern = addr; 468 buf->entries++; 469 rte_memcpy(addr, buf->entry[0].pattern, 470 user_pattern_size); 471 addr = (void *)(((uintptr_t)addr) + user_pattern_size); 472 rte_memcpy(addr, flow_items, elt * sizeof(*item)); 473 addr = (void *)(((uintptr_t)addr) + 474 elt * sizeof(*item)); 475 } 476 } else if (last_expand_item != NULL) { 477 node = mlx5_flow_expand_rss_adjust_node(pattern, 478 last_expand_item_idx, graph, node); 479 } 480 memset(flow_items, 0, sizeof(flow_items)); 481 next_node = mlx5_flow_expand_rss_skip_explicit(graph, 482 node->next); 483 stack[stack_pos] = next_node; 484 node = next_node ? &graph[*next_node] : NULL; 485 while (node) { 486 flow_items[stack_pos].type = node->type; 487 if (node->rss_types & types) { 488 size_t n; 489 /* 490 * compute the number of items to copy from the 491 * expansion and copy it. 492 * When the stack_pos is 0, there are 1 element in it, 493 * plus the addition END item. 494 */ 495 elt = stack_pos + 2; 496 flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END; 497 lsize += elt * sizeof(*item) + user_pattern_size; 498 if (lsize > size) 499 return -EINVAL; 500 n = elt * sizeof(*item); 501 buf->entry[buf->entries].priority = 502 stack_pos + 1 + missed; 503 buf->entry[buf->entries].pattern = addr; 504 buf->entries++; 505 rte_memcpy(addr, buf->entry[0].pattern, 506 user_pattern_size); 507 addr = (void *)(((uintptr_t)addr) + 508 user_pattern_size); 509 rte_memcpy(addr, &missed_item, 510 missed * sizeof(*item)); 511 addr = (void *)(((uintptr_t)addr) + 512 missed * sizeof(*item)); 513 rte_memcpy(addr, flow_items, n); 514 addr = (void *)(((uintptr_t)addr) + n); 515 } 516 /* Go deeper. */ 517 if (!(node->node_flags & MLX5_EXPANSION_NODE_OPTIONAL) && 518 node->next) { 519 next_node = mlx5_flow_expand_rss_skip_explicit(graph, 520 node->next); 521 if (stack_pos++ == MLX5_RSS_EXP_ELT_N) { 522 rte_errno = E2BIG; 523 return -rte_errno; 524 } 525 stack[stack_pos] = next_node; 526 } else if (*(next_node + 1)) { 527 /* Follow up with the next possibility. */ 528 next_node = mlx5_flow_expand_rss_skip_explicit(graph, 529 ++next_node); 530 } else if (!stack_pos) { 531 /* 532 * Completing the traverse over the different paths. 533 * The next_node is advanced to the terminator. 534 */ 535 ++next_node; 536 } else { 537 /* Move to the next path. */ 538 while (stack_pos) { 539 next_node = stack[--stack_pos]; 540 next_node++; 541 if (*next_node) 542 break; 543 } 544 next_node = mlx5_flow_expand_rss_skip_explicit(graph, 545 next_node); 546 stack[stack_pos] = next_node; 547 } 548 node = next_node && *next_node ? &graph[*next_node] : NULL; 549 }; 550 return lsize; 551 } 552 553 enum mlx5_expansion { 554 MLX5_EXPANSION_ROOT, 555 MLX5_EXPANSION_ROOT_OUTER, 556 MLX5_EXPANSION_OUTER_ETH, 557 MLX5_EXPANSION_OUTER_VLAN, 558 MLX5_EXPANSION_OUTER_IPV4, 559 MLX5_EXPANSION_OUTER_IPV4_UDP, 560 MLX5_EXPANSION_OUTER_IPV4_TCP, 561 MLX5_EXPANSION_OUTER_IPV6, 562 MLX5_EXPANSION_OUTER_IPV6_UDP, 563 MLX5_EXPANSION_OUTER_IPV6_TCP, 564 MLX5_EXPANSION_VXLAN, 565 MLX5_EXPANSION_STD_VXLAN, 566 MLX5_EXPANSION_L3_VXLAN, 567 MLX5_EXPANSION_VXLAN_GPE, 568 MLX5_EXPANSION_GRE, 569 MLX5_EXPANSION_NVGRE, 570 MLX5_EXPANSION_GRE_KEY, 571 MLX5_EXPANSION_MPLS, 572 MLX5_EXPANSION_ETH, 573 MLX5_EXPANSION_VLAN, 574 MLX5_EXPANSION_IPV4, 575 MLX5_EXPANSION_IPV4_UDP, 576 MLX5_EXPANSION_IPV4_TCP, 577 MLX5_EXPANSION_IPV6, 578 MLX5_EXPANSION_IPV6_UDP, 579 MLX5_EXPANSION_IPV6_TCP, 580 MLX5_EXPANSION_IPV6_FRAG_EXT, 581 MLX5_EXPANSION_GTP, 582 MLX5_EXPANSION_GENEVE, 583 }; 584 585 /** Supported expansion of items. */ 586 static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { 587 [MLX5_EXPANSION_ROOT] = { 588 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 589 MLX5_EXPANSION_IPV4, 590 MLX5_EXPANSION_IPV6), 591 .type = RTE_FLOW_ITEM_TYPE_END, 592 }, 593 [MLX5_EXPANSION_ROOT_OUTER] = { 594 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, 595 MLX5_EXPANSION_OUTER_IPV4, 596 MLX5_EXPANSION_OUTER_IPV6), 597 .type = RTE_FLOW_ITEM_TYPE_END, 598 }, 599 [MLX5_EXPANSION_OUTER_ETH] = { 600 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), 601 .type = RTE_FLOW_ITEM_TYPE_ETH, 602 .rss_types = 0, 603 }, 604 [MLX5_EXPANSION_OUTER_VLAN] = { 605 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 606 MLX5_EXPANSION_OUTER_IPV6), 607 .type = RTE_FLOW_ITEM_TYPE_VLAN, 608 .node_flags = MLX5_EXPANSION_NODE_EXPLICIT, 609 }, 610 [MLX5_EXPANSION_OUTER_IPV4] = { 611 .next = MLX5_FLOW_EXPAND_RSS_NEXT 612 (MLX5_EXPANSION_OUTER_IPV4_UDP, 613 MLX5_EXPANSION_OUTER_IPV4_TCP, 614 MLX5_EXPANSION_GRE, 615 MLX5_EXPANSION_NVGRE, 616 MLX5_EXPANSION_IPV4, 617 MLX5_EXPANSION_IPV6), 618 .type = RTE_FLOW_ITEM_TYPE_IPV4, 619 .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | 620 RTE_ETH_RSS_NONFRAG_IPV4_OTHER, 621 }, 622 [MLX5_EXPANSION_OUTER_IPV4_UDP] = { 623 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 624 MLX5_EXPANSION_VXLAN_GPE, 625 MLX5_EXPANSION_MPLS, 626 MLX5_EXPANSION_GENEVE, 627 MLX5_EXPANSION_GTP), 628 .type = RTE_FLOW_ITEM_TYPE_UDP, 629 .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP, 630 }, 631 [MLX5_EXPANSION_OUTER_IPV4_TCP] = { 632 .type = RTE_FLOW_ITEM_TYPE_TCP, 633 .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP, 634 }, 635 [MLX5_EXPANSION_OUTER_IPV6] = { 636 .next = MLX5_FLOW_EXPAND_RSS_NEXT 637 (MLX5_EXPANSION_OUTER_IPV6_UDP, 638 MLX5_EXPANSION_OUTER_IPV6_TCP, 639 MLX5_EXPANSION_IPV4, 640 MLX5_EXPANSION_IPV6, 641 MLX5_EXPANSION_GRE, 642 MLX5_EXPANSION_NVGRE), 643 .type = RTE_FLOW_ITEM_TYPE_IPV6, 644 .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | 645 RTE_ETH_RSS_NONFRAG_IPV6_OTHER, 646 }, 647 [MLX5_EXPANSION_OUTER_IPV6_UDP] = { 648 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 649 MLX5_EXPANSION_VXLAN_GPE, 650 MLX5_EXPANSION_MPLS, 651 MLX5_EXPANSION_GENEVE, 652 MLX5_EXPANSION_GTP), 653 .type = RTE_FLOW_ITEM_TYPE_UDP, 654 .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP, 655 }, 656 [MLX5_EXPANSION_OUTER_IPV6_TCP] = { 657 .type = RTE_FLOW_ITEM_TYPE_TCP, 658 .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP, 659 }, 660 [MLX5_EXPANSION_VXLAN] = { 661 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 662 MLX5_EXPANSION_IPV4, 663 MLX5_EXPANSION_IPV6), 664 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 665 }, 666 [MLX5_EXPANSION_STD_VXLAN] = { 667 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), 668 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 669 }, 670 [MLX5_EXPANSION_L3_VXLAN] = { 671 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 672 MLX5_EXPANSION_IPV6), 673 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 674 }, 675 [MLX5_EXPANSION_VXLAN_GPE] = { 676 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 677 MLX5_EXPANSION_IPV4, 678 MLX5_EXPANSION_IPV6), 679 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, 680 }, 681 [MLX5_EXPANSION_GRE] = { 682 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 683 MLX5_EXPANSION_IPV4, 684 MLX5_EXPANSION_IPV6, 685 MLX5_EXPANSION_GRE_KEY, 686 MLX5_EXPANSION_MPLS), 687 .type = RTE_FLOW_ITEM_TYPE_GRE, 688 }, 689 [MLX5_EXPANSION_GRE_KEY] = { 690 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 691 MLX5_EXPANSION_IPV6, 692 MLX5_EXPANSION_MPLS), 693 .type = RTE_FLOW_ITEM_TYPE_GRE_KEY, 694 .node_flags = MLX5_EXPANSION_NODE_OPTIONAL, 695 }, 696 [MLX5_EXPANSION_NVGRE] = { 697 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), 698 .type = RTE_FLOW_ITEM_TYPE_NVGRE, 699 }, 700 [MLX5_EXPANSION_MPLS] = { 701 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 702 MLX5_EXPANSION_IPV6, 703 MLX5_EXPANSION_ETH), 704 .type = RTE_FLOW_ITEM_TYPE_MPLS, 705 .node_flags = MLX5_EXPANSION_NODE_OPTIONAL, 706 }, 707 [MLX5_EXPANSION_ETH] = { 708 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), 709 .type = RTE_FLOW_ITEM_TYPE_ETH, 710 }, 711 [MLX5_EXPANSION_VLAN] = { 712 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 713 MLX5_EXPANSION_IPV6), 714 .type = RTE_FLOW_ITEM_TYPE_VLAN, 715 .node_flags = MLX5_EXPANSION_NODE_EXPLICIT, 716 }, 717 [MLX5_EXPANSION_IPV4] = { 718 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, 719 MLX5_EXPANSION_IPV4_TCP), 720 .type = RTE_FLOW_ITEM_TYPE_IPV4, 721 .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | 722 RTE_ETH_RSS_NONFRAG_IPV4_OTHER, 723 }, 724 [MLX5_EXPANSION_IPV4_UDP] = { 725 .type = RTE_FLOW_ITEM_TYPE_UDP, 726 .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP, 727 }, 728 [MLX5_EXPANSION_IPV4_TCP] = { 729 .type = RTE_FLOW_ITEM_TYPE_TCP, 730 .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP, 731 }, 732 [MLX5_EXPANSION_IPV6] = { 733 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, 734 MLX5_EXPANSION_IPV6_TCP, 735 MLX5_EXPANSION_IPV6_FRAG_EXT), 736 .type = RTE_FLOW_ITEM_TYPE_IPV6, 737 .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | 738 RTE_ETH_RSS_NONFRAG_IPV6_OTHER, 739 }, 740 [MLX5_EXPANSION_IPV6_UDP] = { 741 .type = RTE_FLOW_ITEM_TYPE_UDP, 742 .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP, 743 }, 744 [MLX5_EXPANSION_IPV6_TCP] = { 745 .type = RTE_FLOW_ITEM_TYPE_TCP, 746 .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP, 747 }, 748 [MLX5_EXPANSION_IPV6_FRAG_EXT] = { 749 .type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT, 750 }, 751 [MLX5_EXPANSION_GTP] = { 752 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 753 MLX5_EXPANSION_IPV6), 754 .type = RTE_FLOW_ITEM_TYPE_GTP, 755 }, 756 [MLX5_EXPANSION_GENEVE] = { 757 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 758 MLX5_EXPANSION_IPV4, 759 MLX5_EXPANSION_IPV6), 760 .type = RTE_FLOW_ITEM_TYPE_GENEVE, 761 }, 762 }; 763 764 static struct rte_flow_action_handle * 765 mlx5_action_handle_create(struct rte_eth_dev *dev, 766 const struct rte_flow_indir_action_conf *conf, 767 const struct rte_flow_action *action, 768 struct rte_flow_error *error); 769 static int mlx5_action_handle_destroy 770 (struct rte_eth_dev *dev, 771 struct rte_flow_action_handle *handle, 772 struct rte_flow_error *error); 773 static int mlx5_action_handle_update 774 (struct rte_eth_dev *dev, 775 struct rte_flow_action_handle *handle, 776 const void *update, 777 struct rte_flow_error *error); 778 static int mlx5_action_handle_query 779 (struct rte_eth_dev *dev, 780 const struct rte_flow_action_handle *handle, 781 void *data, 782 struct rte_flow_error *error); 783 static int 784 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev, 785 struct rte_flow_tunnel *app_tunnel, 786 struct rte_flow_action **actions, 787 uint32_t *num_of_actions, 788 struct rte_flow_error *error); 789 static int 790 mlx5_flow_tunnel_match(struct rte_eth_dev *dev, 791 struct rte_flow_tunnel *app_tunnel, 792 struct rte_flow_item **items, 793 uint32_t *num_of_items, 794 struct rte_flow_error *error); 795 static int 796 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev, 797 struct rte_flow_item *pmd_items, 798 uint32_t num_items, struct rte_flow_error *err); 799 static int 800 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev, 801 struct rte_flow_action *pmd_actions, 802 uint32_t num_actions, 803 struct rte_flow_error *err); 804 static int 805 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, 806 struct rte_mbuf *m, 807 struct rte_flow_restore_info *info, 808 struct rte_flow_error *err); 809 static struct rte_flow_item_flex_handle * 810 mlx5_flow_flex_item_create(struct rte_eth_dev *dev, 811 const struct rte_flow_item_flex_conf *conf, 812 struct rte_flow_error *error); 813 static int 814 mlx5_flow_flex_item_release(struct rte_eth_dev *dev, 815 const struct rte_flow_item_flex_handle *handle, 816 struct rte_flow_error *error); 817 static int 818 mlx5_flow_info_get(struct rte_eth_dev *dev, 819 struct rte_flow_port_info *port_info, 820 struct rte_flow_queue_info *queue_info, 821 struct rte_flow_error *error); 822 static int 823 mlx5_flow_port_configure(struct rte_eth_dev *dev, 824 const struct rte_flow_port_attr *port_attr, 825 uint16_t nb_queue, 826 const struct rte_flow_queue_attr *queue_attr[], 827 struct rte_flow_error *err); 828 829 static struct rte_flow_pattern_template * 830 mlx5_flow_pattern_template_create(struct rte_eth_dev *dev, 831 const struct rte_flow_pattern_template_attr *attr, 832 const struct rte_flow_item items[], 833 struct rte_flow_error *error); 834 835 static int 836 mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev, 837 struct rte_flow_pattern_template *template, 838 struct rte_flow_error *error); 839 static struct rte_flow_actions_template * 840 mlx5_flow_actions_template_create(struct rte_eth_dev *dev, 841 const struct rte_flow_actions_template_attr *attr, 842 const struct rte_flow_action actions[], 843 const struct rte_flow_action masks[], 844 struct rte_flow_error *error); 845 static int 846 mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev, 847 struct rte_flow_actions_template *template, 848 struct rte_flow_error *error); 849 850 static struct rte_flow_template_table * 851 mlx5_flow_table_create(struct rte_eth_dev *dev, 852 const struct rte_flow_template_table_attr *attr, 853 struct rte_flow_pattern_template *item_templates[], 854 uint8_t nb_item_templates, 855 struct rte_flow_actions_template *action_templates[], 856 uint8_t nb_action_templates, 857 struct rte_flow_error *error); 858 static int 859 mlx5_flow_table_destroy(struct rte_eth_dev *dev, 860 struct rte_flow_template_table *table, 861 struct rte_flow_error *error); 862 static struct rte_flow * 863 mlx5_flow_async_flow_create(struct rte_eth_dev *dev, 864 uint32_t queue, 865 const struct rte_flow_op_attr *attr, 866 struct rte_flow_template_table *table, 867 const struct rte_flow_item items[], 868 uint8_t pattern_template_index, 869 const struct rte_flow_action actions[], 870 uint8_t action_template_index, 871 void *user_data, 872 struct rte_flow_error *error); 873 static int 874 mlx5_flow_async_flow_destroy(struct rte_eth_dev *dev, 875 uint32_t queue, 876 const struct rte_flow_op_attr *attr, 877 struct rte_flow *flow, 878 void *user_data, 879 struct rte_flow_error *error); 880 static int 881 mlx5_flow_pull(struct rte_eth_dev *dev, 882 uint32_t queue, 883 struct rte_flow_op_result res[], 884 uint16_t n_res, 885 struct rte_flow_error *error); 886 static int 887 mlx5_flow_push(struct rte_eth_dev *dev, 888 uint32_t queue, 889 struct rte_flow_error *error); 890 891 static struct rte_flow_action_handle * 892 mlx5_flow_async_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, 893 const struct rte_flow_op_attr *attr, 894 const struct rte_flow_indir_action_conf *conf, 895 const struct rte_flow_action *action, 896 void *user_data, 897 struct rte_flow_error *error); 898 899 static int 900 mlx5_flow_async_action_handle_update(struct rte_eth_dev *dev, uint32_t queue, 901 const struct rte_flow_op_attr *attr, 902 struct rte_flow_action_handle *handle, 903 const void *update, 904 void *user_data, 905 struct rte_flow_error *error); 906 907 static int 908 mlx5_flow_async_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue, 909 const struct rte_flow_op_attr *attr, 910 struct rte_flow_action_handle *handle, 911 void *user_data, 912 struct rte_flow_error *error); 913 914 static const struct rte_flow_ops mlx5_flow_ops = { 915 .validate = mlx5_flow_validate, 916 .create = mlx5_flow_create, 917 .destroy = mlx5_flow_destroy, 918 .flush = mlx5_flow_flush, 919 .isolate = mlx5_flow_isolate, 920 .query = mlx5_flow_query, 921 .dev_dump = mlx5_flow_dev_dump, 922 .get_aged_flows = mlx5_flow_get_aged_flows, 923 .action_handle_create = mlx5_action_handle_create, 924 .action_handle_destroy = mlx5_action_handle_destroy, 925 .action_handle_update = mlx5_action_handle_update, 926 .action_handle_query = mlx5_action_handle_query, 927 .tunnel_decap_set = mlx5_flow_tunnel_decap_set, 928 .tunnel_match = mlx5_flow_tunnel_match, 929 .tunnel_action_decap_release = mlx5_flow_tunnel_action_release, 930 .tunnel_item_release = mlx5_flow_tunnel_item_release, 931 .get_restore_info = mlx5_flow_tunnel_get_restore_info, 932 .flex_item_create = mlx5_flow_flex_item_create, 933 .flex_item_release = mlx5_flow_flex_item_release, 934 .info_get = mlx5_flow_info_get, 935 .configure = mlx5_flow_port_configure, 936 .pattern_template_create = mlx5_flow_pattern_template_create, 937 .pattern_template_destroy = mlx5_flow_pattern_template_destroy, 938 .actions_template_create = mlx5_flow_actions_template_create, 939 .actions_template_destroy = mlx5_flow_actions_template_destroy, 940 .template_table_create = mlx5_flow_table_create, 941 .template_table_destroy = mlx5_flow_table_destroy, 942 .async_create = mlx5_flow_async_flow_create, 943 .async_destroy = mlx5_flow_async_flow_destroy, 944 .pull = mlx5_flow_pull, 945 .push = mlx5_flow_push, 946 .async_action_handle_create = mlx5_flow_async_action_handle_create, 947 .async_action_handle_update = mlx5_flow_async_action_handle_update, 948 .async_action_handle_destroy = mlx5_flow_async_action_handle_destroy, 949 }; 950 951 /* Tunnel information. */ 952 struct mlx5_flow_tunnel_info { 953 uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ 954 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ 955 }; 956 957 static struct mlx5_flow_tunnel_info tunnels_info[] = { 958 { 959 .tunnel = MLX5_FLOW_LAYER_VXLAN, 960 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, 961 }, 962 { 963 .tunnel = MLX5_FLOW_LAYER_GENEVE, 964 .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP, 965 }, 966 { 967 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, 968 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, 969 }, 970 { 971 .tunnel = MLX5_FLOW_LAYER_GRE, 972 .ptype = RTE_PTYPE_TUNNEL_GRE, 973 }, 974 { 975 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, 976 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, 977 }, 978 { 979 .tunnel = MLX5_FLOW_LAYER_MPLS, 980 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, 981 }, 982 { 983 .tunnel = MLX5_FLOW_LAYER_NVGRE, 984 .ptype = RTE_PTYPE_TUNNEL_NVGRE, 985 }, 986 { 987 .tunnel = MLX5_FLOW_LAYER_IPIP, 988 .ptype = RTE_PTYPE_TUNNEL_IP, 989 }, 990 { 991 .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP, 992 .ptype = RTE_PTYPE_TUNNEL_IP, 993 }, 994 { 995 .tunnel = MLX5_FLOW_LAYER_GTP, 996 .ptype = RTE_PTYPE_TUNNEL_GTPU, 997 }, 998 }; 999 1000 1001 1002 /** 1003 * Translate tag ID to register. 1004 * 1005 * @param[in] dev 1006 * Pointer to the Ethernet device structure. 1007 * @param[in] feature 1008 * The feature that request the register. 1009 * @param[in] id 1010 * The request register ID. 1011 * @param[out] error 1012 * Error description in case of any. 1013 * 1014 * @return 1015 * The request register on success, a negative errno 1016 * value otherwise and rte_errno is set. 1017 */ 1018 int 1019 mlx5_flow_get_reg_id(struct rte_eth_dev *dev, 1020 enum mlx5_feature_name feature, 1021 uint32_t id, 1022 struct rte_flow_error *error) 1023 { 1024 struct mlx5_priv *priv = dev->data->dev_private; 1025 struct mlx5_sh_config *config = &priv->sh->config; 1026 enum modify_reg start_reg; 1027 bool skip_mtr_reg = false; 1028 1029 switch (feature) { 1030 case MLX5_HAIRPIN_RX: 1031 return REG_B; 1032 case MLX5_HAIRPIN_TX: 1033 return REG_A; 1034 case MLX5_METADATA_RX: 1035 switch (config->dv_xmeta_en) { 1036 case MLX5_XMETA_MODE_LEGACY: 1037 return REG_B; 1038 case MLX5_XMETA_MODE_META16: 1039 return REG_C_0; 1040 case MLX5_XMETA_MODE_META32: 1041 return REG_C_1; 1042 } 1043 break; 1044 case MLX5_METADATA_TX: 1045 return REG_A; 1046 case MLX5_METADATA_FDB: 1047 switch (config->dv_xmeta_en) { 1048 case MLX5_XMETA_MODE_LEGACY: 1049 return REG_NON; 1050 case MLX5_XMETA_MODE_META16: 1051 return REG_C_0; 1052 case MLX5_XMETA_MODE_META32: 1053 return REG_C_1; 1054 } 1055 break; 1056 case MLX5_FLOW_MARK: 1057 switch (config->dv_xmeta_en) { 1058 case MLX5_XMETA_MODE_LEGACY: 1059 return REG_NON; 1060 case MLX5_XMETA_MODE_META16: 1061 return REG_C_1; 1062 case MLX5_XMETA_MODE_META32: 1063 return REG_C_0; 1064 } 1065 break; 1066 case MLX5_MTR_ID: 1067 /* 1068 * If meter color and meter id share one register, flow match 1069 * should use the meter color register for match. 1070 */ 1071 if (priv->mtr_reg_share) 1072 return priv->mtr_color_reg; 1073 else 1074 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : 1075 REG_C_3; 1076 case MLX5_MTR_COLOR: 1077 case MLX5_ASO_FLOW_HIT: 1078 case MLX5_ASO_CONNTRACK: 1079 case MLX5_SAMPLE_ID: 1080 /* All features use the same REG_C. */ 1081 MLX5_ASSERT(priv->mtr_color_reg != REG_NON); 1082 return priv->mtr_color_reg; 1083 case MLX5_COPY_MARK: 1084 /* 1085 * Metadata COPY_MARK register using is in meter suffix sub 1086 * flow while with meter. It's safe to share the same register. 1087 */ 1088 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3; 1089 case MLX5_APP_TAG: 1090 /* 1091 * If meter is enable, it will engage the register for color 1092 * match and flow match. If meter color match is not using the 1093 * REG_C_2, need to skip the REG_C_x be used by meter color 1094 * match. 1095 * If meter is disable, free to use all available registers. 1096 */ 1097 start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 : 1098 (priv->mtr_reg_share ? REG_C_3 : REG_C_4); 1099 skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2); 1100 if (id > (uint32_t)(REG_C_7 - start_reg)) 1101 return rte_flow_error_set(error, EINVAL, 1102 RTE_FLOW_ERROR_TYPE_ITEM, 1103 NULL, "invalid tag id"); 1104 if (priv->sh->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON) 1105 return rte_flow_error_set(error, ENOTSUP, 1106 RTE_FLOW_ERROR_TYPE_ITEM, 1107 NULL, "unsupported tag id"); 1108 /* 1109 * This case means meter is using the REG_C_x great than 2. 1110 * Take care not to conflict with meter color REG_C_x. 1111 * If the available index REG_C_y >= REG_C_x, skip the 1112 * color register. 1113 */ 1114 if (skip_mtr_reg && priv->sh->flow_mreg_c 1115 [id + start_reg - REG_C_0] >= priv->mtr_color_reg) { 1116 if (id >= (uint32_t)(REG_C_7 - start_reg)) 1117 return rte_flow_error_set(error, EINVAL, 1118 RTE_FLOW_ERROR_TYPE_ITEM, 1119 NULL, "invalid tag id"); 1120 if (priv->sh->flow_mreg_c 1121 [id + 1 + start_reg - REG_C_0] != REG_NON) 1122 return priv->sh->flow_mreg_c 1123 [id + 1 + start_reg - REG_C_0]; 1124 return rte_flow_error_set(error, ENOTSUP, 1125 RTE_FLOW_ERROR_TYPE_ITEM, 1126 NULL, "unsupported tag id"); 1127 } 1128 return priv->sh->flow_mreg_c[id + start_reg - REG_C_0]; 1129 } 1130 MLX5_ASSERT(false); 1131 return rte_flow_error_set(error, EINVAL, 1132 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1133 NULL, "invalid feature name"); 1134 } 1135 1136 /** 1137 * Check extensive flow metadata register support. 1138 * 1139 * @param dev 1140 * Pointer to rte_eth_dev structure. 1141 * 1142 * @return 1143 * True if device supports extensive flow metadata register, otherwise false. 1144 */ 1145 bool 1146 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) 1147 { 1148 struct mlx5_priv *priv = dev->data->dev_private; 1149 1150 /* 1151 * Having available reg_c can be regarded inclusively as supporting 1152 * extensive flow metadata register, which could mean, 1153 * - metadata register copy action by modify header. 1154 * - 16 modify header actions is supported. 1155 * - reg_c's are preserved across different domain (FDB and NIC) on 1156 * packet loopback by flow lookup miss. 1157 */ 1158 return priv->sh->flow_mreg_c[2] != REG_NON; 1159 } 1160 1161 /** 1162 * Get the lowest priority. 1163 * 1164 * @param[in] dev 1165 * Pointer to the Ethernet device structure. 1166 * @param[in] attributes 1167 * Pointer to device flow rule attributes. 1168 * 1169 * @return 1170 * The value of lowest priority of flow. 1171 */ 1172 uint32_t 1173 mlx5_get_lowest_priority(struct rte_eth_dev *dev, 1174 const struct rte_flow_attr *attr) 1175 { 1176 struct mlx5_priv *priv = dev->data->dev_private; 1177 1178 if (!attr->group && !attr->transfer) 1179 return priv->sh->flow_max_priority - 2; 1180 return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1; 1181 } 1182 1183 /** 1184 * Calculate matcher priority of the flow. 1185 * 1186 * @param[in] dev 1187 * Pointer to the Ethernet device structure. 1188 * @param[in] attr 1189 * Pointer to device flow rule attributes. 1190 * @param[in] subpriority 1191 * The priority based on the items. 1192 * @param[in] external 1193 * Flow is user flow. 1194 * @return 1195 * The matcher priority of the flow. 1196 */ 1197 uint16_t 1198 mlx5_get_matcher_priority(struct rte_eth_dev *dev, 1199 const struct rte_flow_attr *attr, 1200 uint32_t subpriority, bool external) 1201 { 1202 uint16_t priority = (uint16_t)attr->priority; 1203 struct mlx5_priv *priv = dev->data->dev_private; 1204 1205 if (!attr->group && !attr->transfer) { 1206 if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) 1207 priority = priv->sh->flow_max_priority - 1; 1208 return mlx5_os_flow_adjust_priority(dev, priority, subpriority); 1209 } else if (!external && attr->transfer && attr->group == 0 && 1210 attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) { 1211 return (priv->sh->flow_max_priority - 1) * 3; 1212 } 1213 if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) 1214 priority = MLX5_NON_ROOT_FLOW_MAX_PRIO; 1215 return priority * 3 + subpriority; 1216 } 1217 1218 /** 1219 * Verify the @p item specifications (spec, last, mask) are compatible with the 1220 * NIC capabilities. 1221 * 1222 * @param[in] item 1223 * Item specification. 1224 * @param[in] mask 1225 * @p item->mask or flow default bit-masks. 1226 * @param[in] nic_mask 1227 * Bit-masks covering supported fields by the NIC to compare with user mask. 1228 * @param[in] size 1229 * Bit-masks size in bytes. 1230 * @param[in] range_accepted 1231 * True if range of values is accepted for specific fields, false otherwise. 1232 * @param[out] error 1233 * Pointer to error structure. 1234 * 1235 * @return 1236 * 0 on success, a negative errno value otherwise and rte_errno is set. 1237 */ 1238 int 1239 mlx5_flow_item_acceptable(const struct rte_flow_item *item, 1240 const uint8_t *mask, 1241 const uint8_t *nic_mask, 1242 unsigned int size, 1243 bool range_accepted, 1244 struct rte_flow_error *error) 1245 { 1246 unsigned int i; 1247 1248 MLX5_ASSERT(nic_mask); 1249 for (i = 0; i < size; ++i) 1250 if ((nic_mask[i] | mask[i]) != nic_mask[i]) 1251 return rte_flow_error_set(error, ENOTSUP, 1252 RTE_FLOW_ERROR_TYPE_ITEM, 1253 item, 1254 "mask enables non supported" 1255 " bits"); 1256 if (!item->spec && (item->mask || item->last)) 1257 return rte_flow_error_set(error, EINVAL, 1258 RTE_FLOW_ERROR_TYPE_ITEM, item, 1259 "mask/last without a spec is not" 1260 " supported"); 1261 if (item->spec && item->last && !range_accepted) { 1262 uint8_t spec[size]; 1263 uint8_t last[size]; 1264 unsigned int i; 1265 int ret; 1266 1267 for (i = 0; i < size; ++i) { 1268 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; 1269 last[i] = ((const uint8_t *)item->last)[i] & mask[i]; 1270 } 1271 ret = memcmp(spec, last, size); 1272 if (ret != 0) 1273 return rte_flow_error_set(error, EINVAL, 1274 RTE_FLOW_ERROR_TYPE_ITEM, 1275 item, 1276 "range is not valid"); 1277 } 1278 return 0; 1279 } 1280 1281 /** 1282 * Adjust the hash fields according to the @p flow information. 1283 * 1284 * @param[in] dev_flow. 1285 * Pointer to the mlx5_flow. 1286 * @param[in] tunnel 1287 * 1 when the hash field is for a tunnel item. 1288 * @param[in] layer_types 1289 * RTE_ETH_RSS_* types. 1290 * @param[in] hash_fields 1291 * Item hash fields. 1292 * 1293 * @return 1294 * The hash fields that should be used. 1295 */ 1296 uint64_t 1297 mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc, 1298 int tunnel __rte_unused, uint64_t layer_types, 1299 uint64_t hash_fields) 1300 { 1301 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 1302 int rss_request_inner = rss_desc->level >= 2; 1303 1304 /* Check RSS hash level for tunnel. */ 1305 if (tunnel && rss_request_inner) 1306 hash_fields |= IBV_RX_HASH_INNER; 1307 else if (tunnel || rss_request_inner) 1308 return 0; 1309 #endif 1310 /* Check if requested layer matches RSS hash fields. */ 1311 if (!(rss_desc->types & layer_types)) 1312 return 0; 1313 return hash_fields; 1314 } 1315 1316 /** 1317 * Lookup and set the ptype in the data Rx part. A single Ptype can be used, 1318 * if several tunnel rules are used on this queue, the tunnel ptype will be 1319 * cleared. 1320 * 1321 * @param rxq_ctrl 1322 * Rx queue to update. 1323 */ 1324 static void 1325 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) 1326 { 1327 unsigned int i; 1328 uint32_t tunnel_ptype = 0; 1329 1330 /* Look up for the ptype to use. */ 1331 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { 1332 if (!rxq_ctrl->flow_tunnels_n[i]) 1333 continue; 1334 if (!tunnel_ptype) { 1335 tunnel_ptype = tunnels_info[i].ptype; 1336 } else { 1337 tunnel_ptype = 0; 1338 break; 1339 } 1340 } 1341 rxq_ctrl->rxq.tunnel = tunnel_ptype; 1342 } 1343 1344 /** 1345 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device 1346 * flow. 1347 * 1348 * @param[in] dev 1349 * Pointer to the Ethernet device structure. 1350 * @param[in] dev_handle 1351 * Pointer to device flow handle structure. 1352 */ 1353 void 1354 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, 1355 struct mlx5_flow_handle *dev_handle) 1356 { 1357 struct mlx5_priv *priv = dev->data->dev_private; 1358 const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); 1359 struct mlx5_ind_table_obj *ind_tbl = NULL; 1360 unsigned int i; 1361 1362 if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) { 1363 struct mlx5_hrxq *hrxq; 1364 1365 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], 1366 dev_handle->rix_hrxq); 1367 if (hrxq) 1368 ind_tbl = hrxq->ind_table; 1369 } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { 1370 struct mlx5_shared_action_rss *shared_rss; 1371 1372 shared_rss = mlx5_ipool_get 1373 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 1374 dev_handle->rix_srss); 1375 if (shared_rss) 1376 ind_tbl = shared_rss->ind_tbl; 1377 } 1378 if (!ind_tbl) 1379 return; 1380 for (i = 0; i != ind_tbl->queues_n; ++i) { 1381 int idx = ind_tbl->queues[i]; 1382 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx); 1383 1384 MLX5_ASSERT(rxq_ctrl != NULL); 1385 if (rxq_ctrl == NULL) 1386 continue; 1387 /* 1388 * To support metadata register copy on Tx loopback, 1389 * this must be always enabled (metadata may arive 1390 * from other port - not from local flows only. 1391 */ 1392 if (tunnel) { 1393 unsigned int j; 1394 1395 /* Increase the counter matching the flow. */ 1396 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 1397 if ((tunnels_info[j].tunnel & 1398 dev_handle->layers) == 1399 tunnels_info[j].tunnel) { 1400 rxq_ctrl->flow_tunnels_n[j]++; 1401 break; 1402 } 1403 } 1404 flow_rxq_tunnel_ptype_update(rxq_ctrl); 1405 } 1406 } 1407 } 1408 1409 static void 1410 flow_rxq_mark_flag_set(struct rte_eth_dev *dev) 1411 { 1412 struct mlx5_priv *priv = dev->data->dev_private; 1413 struct mlx5_rxq_ctrl *rxq_ctrl; 1414 1415 if (priv->mark_enabled) 1416 return; 1417 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { 1418 rxq_ctrl->rxq.mark = 1; 1419 } 1420 priv->mark_enabled = 1; 1421 } 1422 1423 /** 1424 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow 1425 * 1426 * @param[in] dev 1427 * Pointer to the Ethernet device structure. 1428 * @param[in] flow 1429 * Pointer to flow structure. 1430 */ 1431 static void 1432 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) 1433 { 1434 struct mlx5_priv *priv = dev->data->dev_private; 1435 uint32_t handle_idx; 1436 struct mlx5_flow_handle *dev_handle; 1437 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 1438 1439 MLX5_ASSERT(wks); 1440 if (wks->mark) 1441 flow_rxq_mark_flag_set(dev); 1442 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 1443 handle_idx, dev_handle, next) 1444 flow_drv_rxq_flags_set(dev, dev_handle); 1445 } 1446 1447 /** 1448 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 1449 * device flow if no other flow uses it with the same kind of request. 1450 * 1451 * @param dev 1452 * Pointer to Ethernet device. 1453 * @param[in] dev_handle 1454 * Pointer to the device flow handle structure. 1455 */ 1456 static void 1457 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, 1458 struct mlx5_flow_handle *dev_handle) 1459 { 1460 struct mlx5_priv *priv = dev->data->dev_private; 1461 const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); 1462 struct mlx5_ind_table_obj *ind_tbl = NULL; 1463 unsigned int i; 1464 1465 if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) { 1466 struct mlx5_hrxq *hrxq; 1467 1468 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], 1469 dev_handle->rix_hrxq); 1470 if (hrxq) 1471 ind_tbl = hrxq->ind_table; 1472 } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { 1473 struct mlx5_shared_action_rss *shared_rss; 1474 1475 shared_rss = mlx5_ipool_get 1476 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 1477 dev_handle->rix_srss); 1478 if (shared_rss) 1479 ind_tbl = shared_rss->ind_tbl; 1480 } 1481 if (!ind_tbl) 1482 return; 1483 MLX5_ASSERT(dev->data->dev_started); 1484 for (i = 0; i != ind_tbl->queues_n; ++i) { 1485 int idx = ind_tbl->queues[i]; 1486 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx); 1487 1488 MLX5_ASSERT(rxq_ctrl != NULL); 1489 if (rxq_ctrl == NULL) 1490 continue; 1491 if (tunnel) { 1492 unsigned int j; 1493 1494 /* Decrease the counter matching the flow. */ 1495 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 1496 if ((tunnels_info[j].tunnel & 1497 dev_handle->layers) == 1498 tunnels_info[j].tunnel) { 1499 rxq_ctrl->flow_tunnels_n[j]--; 1500 break; 1501 } 1502 } 1503 flow_rxq_tunnel_ptype_update(rxq_ctrl); 1504 } 1505 } 1506 } 1507 1508 /** 1509 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 1510 * @p flow if no other flow uses it with the same kind of request. 1511 * 1512 * @param dev 1513 * Pointer to Ethernet device. 1514 * @param[in] flow 1515 * Pointer to the flow. 1516 */ 1517 static void 1518 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) 1519 { 1520 struct mlx5_priv *priv = dev->data->dev_private; 1521 uint32_t handle_idx; 1522 struct mlx5_flow_handle *dev_handle; 1523 1524 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 1525 handle_idx, dev_handle, next) 1526 flow_drv_rxq_flags_trim(dev, dev_handle); 1527 } 1528 1529 /** 1530 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. 1531 * 1532 * @param dev 1533 * Pointer to Ethernet device. 1534 */ 1535 static void 1536 flow_rxq_flags_clear(struct rte_eth_dev *dev) 1537 { 1538 struct mlx5_priv *priv = dev->data->dev_private; 1539 unsigned int i; 1540 1541 for (i = 0; i != priv->rxqs_n; ++i) { 1542 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i); 1543 unsigned int j; 1544 1545 if (rxq == NULL || rxq->ctrl == NULL) 1546 continue; 1547 rxq->ctrl->rxq.mark = 0; 1548 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) 1549 rxq->ctrl->flow_tunnels_n[j] = 0; 1550 rxq->ctrl->rxq.tunnel = 0; 1551 } 1552 priv->mark_enabled = 0; 1553 } 1554 1555 /** 1556 * Set the Rx queue dynamic metadata (mask and offset) for a flow 1557 * 1558 * @param[in] dev 1559 * Pointer to the Ethernet device structure. 1560 */ 1561 void 1562 mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev) 1563 { 1564 struct mlx5_priv *priv = dev->data->dev_private; 1565 unsigned int i; 1566 1567 for (i = 0; i != priv->rxqs_n; ++i) { 1568 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i); 1569 struct mlx5_rxq_data *data; 1570 1571 if (rxq == NULL || rxq->ctrl == NULL) 1572 continue; 1573 data = &rxq->ctrl->rxq; 1574 if (!rte_flow_dynf_metadata_avail()) { 1575 data->dynf_meta = 0; 1576 data->flow_meta_mask = 0; 1577 data->flow_meta_offset = -1; 1578 data->flow_meta_port_mask = 0; 1579 } else { 1580 data->dynf_meta = 1; 1581 data->flow_meta_mask = rte_flow_dynf_metadata_mask; 1582 data->flow_meta_offset = rte_flow_dynf_metadata_offs; 1583 data->flow_meta_port_mask = priv->sh->dv_meta_mask; 1584 } 1585 } 1586 } 1587 1588 /* 1589 * return a pointer to the desired action in the list of actions. 1590 * 1591 * @param[in] actions 1592 * The list of actions to search the action in. 1593 * @param[in] action 1594 * The action to find. 1595 * 1596 * @return 1597 * Pointer to the action in the list, if found. NULL otherwise. 1598 */ 1599 const struct rte_flow_action * 1600 mlx5_flow_find_action(const struct rte_flow_action *actions, 1601 enum rte_flow_action_type action) 1602 { 1603 if (actions == NULL) 1604 return NULL; 1605 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) 1606 if (actions->type == action) 1607 return actions; 1608 return NULL; 1609 } 1610 1611 /* 1612 * Validate the flag action. 1613 * 1614 * @param[in] action_flags 1615 * Bit-fields that holds the actions detected until now. 1616 * @param[in] attr 1617 * Attributes of flow that includes this action. 1618 * @param[out] error 1619 * Pointer to error structure. 1620 * 1621 * @return 1622 * 0 on success, a negative errno value otherwise and rte_errno is set. 1623 */ 1624 int 1625 mlx5_flow_validate_action_flag(uint64_t action_flags, 1626 const struct rte_flow_attr *attr, 1627 struct rte_flow_error *error) 1628 { 1629 if (action_flags & MLX5_FLOW_ACTION_MARK) 1630 return rte_flow_error_set(error, EINVAL, 1631 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1632 "can't mark and flag in same flow"); 1633 if (action_flags & MLX5_FLOW_ACTION_FLAG) 1634 return rte_flow_error_set(error, EINVAL, 1635 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1636 "can't have 2 flag" 1637 " actions in same flow"); 1638 if (attr->egress) 1639 return rte_flow_error_set(error, ENOTSUP, 1640 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1641 "flag action not supported for " 1642 "egress"); 1643 return 0; 1644 } 1645 1646 /* 1647 * Validate the mark action. 1648 * 1649 * @param[in] action 1650 * Pointer to the queue action. 1651 * @param[in] action_flags 1652 * Bit-fields that holds the actions detected until now. 1653 * @param[in] attr 1654 * Attributes of flow that includes this action. 1655 * @param[out] error 1656 * Pointer to error structure. 1657 * 1658 * @return 1659 * 0 on success, a negative errno value otherwise and rte_errno is set. 1660 */ 1661 int 1662 mlx5_flow_validate_action_mark(const struct rte_flow_action *action, 1663 uint64_t action_flags, 1664 const struct rte_flow_attr *attr, 1665 struct rte_flow_error *error) 1666 { 1667 const struct rte_flow_action_mark *mark = action->conf; 1668 1669 if (!mark) 1670 return rte_flow_error_set(error, EINVAL, 1671 RTE_FLOW_ERROR_TYPE_ACTION, 1672 action, 1673 "configuration cannot be null"); 1674 if (mark->id >= MLX5_FLOW_MARK_MAX) 1675 return rte_flow_error_set(error, EINVAL, 1676 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1677 &mark->id, 1678 "mark id must in 0 <= id < " 1679 RTE_STR(MLX5_FLOW_MARK_MAX)); 1680 if (action_flags & MLX5_FLOW_ACTION_FLAG) 1681 return rte_flow_error_set(error, EINVAL, 1682 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1683 "can't flag and mark in same flow"); 1684 if (action_flags & MLX5_FLOW_ACTION_MARK) 1685 return rte_flow_error_set(error, EINVAL, 1686 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1687 "can't have 2 mark actions in same" 1688 " flow"); 1689 if (attr->egress) 1690 return rte_flow_error_set(error, ENOTSUP, 1691 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1692 "mark action not supported for " 1693 "egress"); 1694 return 0; 1695 } 1696 1697 /* 1698 * Validate the drop action. 1699 * 1700 * @param[in] action_flags 1701 * Bit-fields that holds the actions detected until now. 1702 * @param[in] attr 1703 * Attributes of flow that includes this action. 1704 * @param[out] error 1705 * Pointer to error structure. 1706 * 1707 * @return 1708 * 0 on success, a negative errno value otherwise and rte_errno is set. 1709 */ 1710 int 1711 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused, 1712 const struct rte_flow_attr *attr, 1713 struct rte_flow_error *error) 1714 { 1715 if (attr->egress) 1716 return rte_flow_error_set(error, ENOTSUP, 1717 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1718 "drop action not supported for " 1719 "egress"); 1720 return 0; 1721 } 1722 1723 /* 1724 * Validate the queue action. 1725 * 1726 * @param[in] action 1727 * Pointer to the queue action. 1728 * @param[in] action_flags 1729 * Bit-fields that holds the actions detected until now. 1730 * @param[in] dev 1731 * Pointer to the Ethernet device structure. 1732 * @param[in] attr 1733 * Attributes of flow that includes this action. 1734 * @param[out] error 1735 * Pointer to error structure. 1736 * 1737 * @return 1738 * 0 on success, a negative errno value otherwise and rte_errno is set. 1739 */ 1740 int 1741 mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 1742 uint64_t action_flags, 1743 struct rte_eth_dev *dev, 1744 const struct rte_flow_attr *attr, 1745 struct rte_flow_error *error) 1746 { 1747 struct mlx5_priv *priv = dev->data->dev_private; 1748 const struct rte_flow_action_queue *queue = action->conf; 1749 1750 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 1751 return rte_flow_error_set(error, EINVAL, 1752 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1753 "can't have 2 fate actions in" 1754 " same flow"); 1755 if (attr->egress) 1756 return rte_flow_error_set(error, ENOTSUP, 1757 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1758 "queue action not supported for egress."); 1759 if (mlx5_is_external_rxq(dev, queue->index)) 1760 return 0; 1761 if (!priv->rxqs_n) 1762 return rte_flow_error_set(error, EINVAL, 1763 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1764 NULL, "No Rx queues configured"); 1765 if (queue->index >= priv->rxqs_n) 1766 return rte_flow_error_set(error, EINVAL, 1767 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1768 &queue->index, 1769 "queue index out of range"); 1770 if (mlx5_rxq_get(dev, queue->index) == NULL) 1771 return rte_flow_error_set(error, EINVAL, 1772 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1773 &queue->index, 1774 "queue is not configured"); 1775 return 0; 1776 } 1777 1778 /** 1779 * Validate queue numbers for device RSS. 1780 * 1781 * @param[in] dev 1782 * Configured device. 1783 * @param[in] queues 1784 * Array of queue numbers. 1785 * @param[in] queues_n 1786 * Size of the @p queues array. 1787 * @param[out] error 1788 * On error, filled with a textual error description. 1789 * @param[out] queue_idx 1790 * On error, filled with an offending queue index in @p queues array. 1791 * 1792 * @return 1793 * 0 on success, a negative errno code on error. 1794 */ 1795 static int 1796 mlx5_validate_rss_queues(struct rte_eth_dev *dev, 1797 const uint16_t *queues, uint32_t queues_n, 1798 const char **error, uint32_t *queue_idx) 1799 { 1800 const struct mlx5_priv *priv = dev->data->dev_private; 1801 bool is_hairpin = false; 1802 bool is_ext_rss = false; 1803 uint32_t i; 1804 1805 for (i = 0; i != queues_n; ++i) { 1806 struct mlx5_rxq_ctrl *rxq_ctrl; 1807 1808 if (mlx5_is_external_rxq(dev, queues[0])) { 1809 is_ext_rss = true; 1810 continue; 1811 } 1812 if (is_ext_rss) { 1813 *error = "Combining external and regular RSS queues is not supported"; 1814 *queue_idx = i; 1815 return -ENOTSUP; 1816 } 1817 if (queues[i] >= priv->rxqs_n) { 1818 *error = "queue index out of range"; 1819 *queue_idx = i; 1820 return -EINVAL; 1821 } 1822 rxq_ctrl = mlx5_rxq_ctrl_get(dev, queues[i]); 1823 if (rxq_ctrl == NULL) { 1824 *error = "queue is not configured"; 1825 *queue_idx = i; 1826 return -EINVAL; 1827 } 1828 if (i == 0 && rxq_ctrl->is_hairpin) 1829 is_hairpin = true; 1830 if (is_hairpin != rxq_ctrl->is_hairpin) { 1831 *error = "combining hairpin and regular RSS queues is not supported"; 1832 *queue_idx = i; 1833 return -ENOTSUP; 1834 } 1835 } 1836 return 0; 1837 } 1838 1839 /* 1840 * Validate the rss action. 1841 * 1842 * @param[in] dev 1843 * Pointer to the Ethernet device structure. 1844 * @param[in] action 1845 * Pointer to the queue action. 1846 * @param[out] error 1847 * Pointer to error structure. 1848 * 1849 * @return 1850 * 0 on success, a negative errno value otherwise and rte_errno is set. 1851 */ 1852 int 1853 mlx5_validate_action_rss(struct rte_eth_dev *dev, 1854 const struct rte_flow_action *action, 1855 struct rte_flow_error *error) 1856 { 1857 struct mlx5_priv *priv = dev->data->dev_private; 1858 const struct rte_flow_action_rss *rss = action->conf; 1859 int ret; 1860 const char *message; 1861 uint32_t queue_idx; 1862 1863 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && 1864 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) 1865 return rte_flow_error_set(error, ENOTSUP, 1866 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1867 &rss->func, 1868 "RSS hash function not supported"); 1869 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 1870 if (rss->level > 2) 1871 #else 1872 if (rss->level > 1) 1873 #endif 1874 return rte_flow_error_set(error, ENOTSUP, 1875 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1876 &rss->level, 1877 "tunnel RSS is not supported"); 1878 /* allow RSS key_len 0 in case of NULL (default) RSS key. */ 1879 if (rss->key_len == 0 && rss->key != NULL) 1880 return rte_flow_error_set(error, ENOTSUP, 1881 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1882 &rss->key_len, 1883 "RSS hash key length 0"); 1884 if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) 1885 return rte_flow_error_set(error, ENOTSUP, 1886 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1887 &rss->key_len, 1888 "RSS hash key too small"); 1889 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) 1890 return rte_flow_error_set(error, ENOTSUP, 1891 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1892 &rss->key_len, 1893 "RSS hash key too large"); 1894 if (rss->queue_num > priv->sh->dev_cap.ind_table_max_size) 1895 return rte_flow_error_set(error, ENOTSUP, 1896 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1897 &rss->queue_num, 1898 "number of queues too large"); 1899 if (rss->types & MLX5_RSS_HF_MASK) 1900 return rte_flow_error_set(error, ENOTSUP, 1901 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1902 &rss->types, 1903 "some RSS protocols are not" 1904 " supported"); 1905 if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) && 1906 !(rss->types & RTE_ETH_RSS_IP)) 1907 return rte_flow_error_set(error, EINVAL, 1908 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1909 "L3 partial RSS requested but L3 RSS" 1910 " type not specified"); 1911 if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) && 1912 !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP))) 1913 return rte_flow_error_set(error, EINVAL, 1914 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1915 "L4 partial RSS requested but L4 RSS" 1916 " type not specified"); 1917 if (!priv->rxqs_n && priv->ext_rxqs == NULL) 1918 return rte_flow_error_set(error, EINVAL, 1919 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1920 NULL, "No Rx queues configured"); 1921 if (!rss->queue_num) 1922 return rte_flow_error_set(error, EINVAL, 1923 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1924 NULL, "No queues configured"); 1925 ret = mlx5_validate_rss_queues(dev, rss->queue, rss->queue_num, 1926 &message, &queue_idx); 1927 if (ret != 0) { 1928 return rte_flow_error_set(error, -ret, 1929 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1930 &rss->queue[queue_idx], message); 1931 } 1932 return 0; 1933 } 1934 1935 /* 1936 * Validate the rss action. 1937 * 1938 * @param[in] action 1939 * Pointer to the queue action. 1940 * @param[in] action_flags 1941 * Bit-fields that holds the actions detected until now. 1942 * @param[in] dev 1943 * Pointer to the Ethernet device structure. 1944 * @param[in] attr 1945 * Attributes of flow that includes this action. 1946 * @param[in] item_flags 1947 * Items that were detected. 1948 * @param[out] error 1949 * Pointer to error structure. 1950 * 1951 * @return 1952 * 0 on success, a negative errno value otherwise and rte_errno is set. 1953 */ 1954 int 1955 mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 1956 uint64_t action_flags, 1957 struct rte_eth_dev *dev, 1958 const struct rte_flow_attr *attr, 1959 uint64_t item_flags, 1960 struct rte_flow_error *error) 1961 { 1962 const struct rte_flow_action_rss *rss = action->conf; 1963 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1964 int ret; 1965 1966 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 1967 return rte_flow_error_set(error, EINVAL, 1968 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1969 "can't have 2 fate actions" 1970 " in same flow"); 1971 ret = mlx5_validate_action_rss(dev, action, error); 1972 if (ret) 1973 return ret; 1974 if (attr->egress) 1975 return rte_flow_error_set(error, ENOTSUP, 1976 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1977 "rss action not supported for " 1978 "egress"); 1979 if (rss->level > 1 && !tunnel) 1980 return rte_flow_error_set(error, EINVAL, 1981 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1982 "inner RSS is not supported for " 1983 "non-tunnel flows"); 1984 if ((item_flags & MLX5_FLOW_LAYER_ECPRI) && 1985 !(item_flags & MLX5_FLOW_LAYER_INNER_L4_UDP)) { 1986 return rte_flow_error_set(error, EINVAL, 1987 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1988 "RSS on eCPRI is not supported now"); 1989 } 1990 if ((item_flags & MLX5_FLOW_LAYER_MPLS) && 1991 !(item_flags & 1992 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3)) && 1993 rss->level > 1) 1994 return rte_flow_error_set(error, EINVAL, 1995 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 1996 "MPLS inner RSS needs to specify inner L2/L3 items after MPLS in pattern"); 1997 return 0; 1998 } 1999 2000 /* 2001 * Validate the default miss action. 2002 * 2003 * @param[in] action_flags 2004 * Bit-fields that holds the actions detected until now. 2005 * @param[out] error 2006 * Pointer to error structure. 2007 * 2008 * @return 2009 * 0 on success, a negative errno value otherwise and rte_errno is set. 2010 */ 2011 int 2012 mlx5_flow_validate_action_default_miss(uint64_t action_flags, 2013 const struct rte_flow_attr *attr, 2014 struct rte_flow_error *error) 2015 { 2016 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 2017 return rte_flow_error_set(error, EINVAL, 2018 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2019 "can't have 2 fate actions in" 2020 " same flow"); 2021 if (attr->egress) 2022 return rte_flow_error_set(error, ENOTSUP, 2023 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 2024 "default miss action not supported " 2025 "for egress"); 2026 if (attr->group) 2027 return rte_flow_error_set(error, ENOTSUP, 2028 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, 2029 "only group 0 is supported"); 2030 if (attr->transfer) 2031 return rte_flow_error_set(error, ENOTSUP, 2032 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 2033 NULL, "transfer is not supported"); 2034 return 0; 2035 } 2036 2037 /* 2038 * Validate the count action. 2039 * 2040 * @param[in] dev 2041 * Pointer to the Ethernet device structure. 2042 * @param[in] attr 2043 * Attributes of flow that includes this action. 2044 * @param[out] error 2045 * Pointer to error structure. 2046 * 2047 * @return 2048 * 0 on success, a negative errno value otherwise and rte_errno is set. 2049 */ 2050 int 2051 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, 2052 const struct rte_flow_attr *attr, 2053 struct rte_flow_error *error) 2054 { 2055 if (attr->egress) 2056 return rte_flow_error_set(error, ENOTSUP, 2057 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 2058 "count action not supported for " 2059 "egress"); 2060 return 0; 2061 } 2062 2063 /* 2064 * Validate the ASO CT action. 2065 * 2066 * @param[in] dev 2067 * Pointer to the Ethernet device structure. 2068 * @param[in] conntrack 2069 * Pointer to the CT action profile. 2070 * @param[out] error 2071 * Pointer to error structure. 2072 * 2073 * @return 2074 * 0 on success, a negative errno value otherwise and rte_errno is set. 2075 */ 2076 int 2077 mlx5_validate_action_ct(struct rte_eth_dev *dev, 2078 const struct rte_flow_action_conntrack *conntrack, 2079 struct rte_flow_error *error) 2080 { 2081 RTE_SET_USED(dev); 2082 2083 if (conntrack->state > RTE_FLOW_CONNTRACK_STATE_TIME_WAIT) 2084 return rte_flow_error_set(error, EINVAL, 2085 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2086 "Invalid CT state"); 2087 if (conntrack->last_index > RTE_FLOW_CONNTRACK_FLAG_RST) 2088 return rte_flow_error_set(error, EINVAL, 2089 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2090 "Invalid last TCP packet flag"); 2091 return 0; 2092 } 2093 2094 /** 2095 * Verify the @p attributes will be correctly understood by the NIC and store 2096 * them in the @p flow if everything is correct. 2097 * 2098 * @param[in] dev 2099 * Pointer to the Ethernet device structure. 2100 * @param[in] attributes 2101 * Pointer to flow attributes 2102 * @param[out] error 2103 * Pointer to error structure. 2104 * 2105 * @return 2106 * 0 on success, a negative errno value otherwise and rte_errno is set. 2107 */ 2108 int 2109 mlx5_flow_validate_attributes(struct rte_eth_dev *dev, 2110 const struct rte_flow_attr *attributes, 2111 struct rte_flow_error *error) 2112 { 2113 struct mlx5_priv *priv = dev->data->dev_private; 2114 uint32_t priority_max = priv->sh->flow_max_priority - 1; 2115 2116 if (attributes->group) 2117 return rte_flow_error_set(error, ENOTSUP, 2118 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 2119 NULL, "groups is not supported"); 2120 if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR && 2121 attributes->priority >= priority_max) 2122 return rte_flow_error_set(error, ENOTSUP, 2123 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 2124 NULL, "priority out of range"); 2125 if (attributes->egress) 2126 return rte_flow_error_set(error, ENOTSUP, 2127 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 2128 "egress is not supported"); 2129 if (attributes->transfer && !priv->sh->config.dv_esw_en) 2130 return rte_flow_error_set(error, ENOTSUP, 2131 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 2132 NULL, "transfer is not supported"); 2133 if (!attributes->ingress) 2134 return rte_flow_error_set(error, EINVAL, 2135 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 2136 NULL, 2137 "ingress attribute is mandatory"); 2138 return 0; 2139 } 2140 2141 /** 2142 * Validate ICMP6 item. 2143 * 2144 * @param[in] item 2145 * Item specification. 2146 * @param[in] item_flags 2147 * Bit-fields that holds the items detected until now. 2148 * @param[in] ext_vlan_sup 2149 * Whether extended VLAN features are supported or not. 2150 * @param[out] error 2151 * Pointer to error structure. 2152 * 2153 * @return 2154 * 0 on success, a negative errno value otherwise and rte_errno is set. 2155 */ 2156 int 2157 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, 2158 uint64_t item_flags, 2159 uint8_t target_protocol, 2160 struct rte_flow_error *error) 2161 { 2162 const struct rte_flow_item_icmp6 *mask = item->mask; 2163 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2164 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 2165 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 2166 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 2167 MLX5_FLOW_LAYER_OUTER_L4; 2168 int ret; 2169 2170 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) 2171 return rte_flow_error_set(error, EINVAL, 2172 RTE_FLOW_ERROR_TYPE_ITEM, item, 2173 "protocol filtering not compatible" 2174 " with ICMP6 layer"); 2175 if (!(item_flags & l3m)) 2176 return rte_flow_error_set(error, EINVAL, 2177 RTE_FLOW_ERROR_TYPE_ITEM, item, 2178 "IPv6 is mandatory to filter on" 2179 " ICMP6"); 2180 if (item_flags & l4m) 2181 return rte_flow_error_set(error, EINVAL, 2182 RTE_FLOW_ERROR_TYPE_ITEM, item, 2183 "multiple L4 layers not supported"); 2184 if (!mask) 2185 mask = &rte_flow_item_icmp6_mask; 2186 ret = mlx5_flow_item_acceptable 2187 (item, (const uint8_t *)mask, 2188 (const uint8_t *)&rte_flow_item_icmp6_mask, 2189 sizeof(struct rte_flow_item_icmp6), 2190 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2191 if (ret < 0) 2192 return ret; 2193 return 0; 2194 } 2195 2196 /** 2197 * Validate ICMP item. 2198 * 2199 * @param[in] item 2200 * Item specification. 2201 * @param[in] item_flags 2202 * Bit-fields that holds the items detected until now. 2203 * @param[out] error 2204 * Pointer to error structure. 2205 * 2206 * @return 2207 * 0 on success, a negative errno value otherwise and rte_errno is set. 2208 */ 2209 int 2210 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, 2211 uint64_t item_flags, 2212 uint8_t target_protocol, 2213 struct rte_flow_error *error) 2214 { 2215 const struct rte_flow_item_icmp *mask = item->mask; 2216 const struct rte_flow_item_icmp nic_mask = { 2217 .hdr.icmp_type = 0xff, 2218 .hdr.icmp_code = 0xff, 2219 .hdr.icmp_ident = RTE_BE16(0xffff), 2220 .hdr.icmp_seq_nb = RTE_BE16(0xffff), 2221 }; 2222 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2223 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 2224 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 2225 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 2226 MLX5_FLOW_LAYER_OUTER_L4; 2227 int ret; 2228 2229 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) 2230 return rte_flow_error_set(error, EINVAL, 2231 RTE_FLOW_ERROR_TYPE_ITEM, item, 2232 "protocol filtering not compatible" 2233 " with ICMP layer"); 2234 if (!(item_flags & l3m)) 2235 return rte_flow_error_set(error, EINVAL, 2236 RTE_FLOW_ERROR_TYPE_ITEM, item, 2237 "IPv4 is mandatory to filter" 2238 " on ICMP"); 2239 if (item_flags & l4m) 2240 return rte_flow_error_set(error, EINVAL, 2241 RTE_FLOW_ERROR_TYPE_ITEM, item, 2242 "multiple L4 layers not supported"); 2243 if (!mask) 2244 mask = &nic_mask; 2245 ret = mlx5_flow_item_acceptable 2246 (item, (const uint8_t *)mask, 2247 (const uint8_t *)&nic_mask, 2248 sizeof(struct rte_flow_item_icmp), 2249 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2250 if (ret < 0) 2251 return ret; 2252 return 0; 2253 } 2254 2255 /** 2256 * Validate Ethernet item. 2257 * 2258 * @param[in] item 2259 * Item specification. 2260 * @param[in] item_flags 2261 * Bit-fields that holds the items detected until now. 2262 * @param[out] error 2263 * Pointer to error structure. 2264 * 2265 * @return 2266 * 0 on success, a negative errno value otherwise and rte_errno is set. 2267 */ 2268 int 2269 mlx5_flow_validate_item_eth(const struct rte_flow_item *item, 2270 uint64_t item_flags, bool ext_vlan_sup, 2271 struct rte_flow_error *error) 2272 { 2273 const struct rte_flow_item_eth *mask = item->mask; 2274 const struct rte_flow_item_eth nic_mask = { 2275 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 2276 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", 2277 .type = RTE_BE16(0xffff), 2278 .has_vlan = ext_vlan_sup ? 1 : 0, 2279 }; 2280 int ret; 2281 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2282 const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 2283 MLX5_FLOW_LAYER_OUTER_L2; 2284 2285 if (item_flags & ethm) 2286 return rte_flow_error_set(error, ENOTSUP, 2287 RTE_FLOW_ERROR_TYPE_ITEM, item, 2288 "multiple L2 layers not supported"); 2289 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) || 2290 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))) 2291 return rte_flow_error_set(error, EINVAL, 2292 RTE_FLOW_ERROR_TYPE_ITEM, item, 2293 "L2 layer should not follow " 2294 "L3 layers"); 2295 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) || 2296 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN))) 2297 return rte_flow_error_set(error, EINVAL, 2298 RTE_FLOW_ERROR_TYPE_ITEM, item, 2299 "L2 layer should not follow VLAN"); 2300 if (item_flags & MLX5_FLOW_LAYER_GTP) 2301 return rte_flow_error_set(error, EINVAL, 2302 RTE_FLOW_ERROR_TYPE_ITEM, item, 2303 "L2 layer should not follow GTP"); 2304 if (!mask) 2305 mask = &rte_flow_item_eth_mask; 2306 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 2307 (const uint8_t *)&nic_mask, 2308 sizeof(struct rte_flow_item_eth), 2309 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2310 return ret; 2311 } 2312 2313 /** 2314 * Validate VLAN item. 2315 * 2316 * @param[in] item 2317 * Item specification. 2318 * @param[in] item_flags 2319 * Bit-fields that holds the items detected until now. 2320 * @param[in] dev 2321 * Ethernet device flow is being created on. 2322 * @param[out] error 2323 * Pointer to error structure. 2324 * 2325 * @return 2326 * 0 on success, a negative errno value otherwise and rte_errno is set. 2327 */ 2328 int 2329 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 2330 uint64_t item_flags, 2331 struct rte_eth_dev *dev, 2332 struct rte_flow_error *error) 2333 { 2334 const struct rte_flow_item_vlan *spec = item->spec; 2335 const struct rte_flow_item_vlan *mask = item->mask; 2336 const struct rte_flow_item_vlan nic_mask = { 2337 .tci = RTE_BE16(UINT16_MAX), 2338 .inner_type = RTE_BE16(UINT16_MAX), 2339 }; 2340 uint16_t vlan_tag = 0; 2341 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2342 int ret; 2343 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 2344 MLX5_FLOW_LAYER_INNER_L4) : 2345 (MLX5_FLOW_LAYER_OUTER_L3 | 2346 MLX5_FLOW_LAYER_OUTER_L4); 2347 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 2348 MLX5_FLOW_LAYER_OUTER_VLAN; 2349 2350 if (item_flags & vlanm) 2351 return rte_flow_error_set(error, EINVAL, 2352 RTE_FLOW_ERROR_TYPE_ITEM, item, 2353 "multiple VLAN layers not supported"); 2354 else if ((item_flags & l34m) != 0) 2355 return rte_flow_error_set(error, EINVAL, 2356 RTE_FLOW_ERROR_TYPE_ITEM, item, 2357 "VLAN cannot follow L3/L4 layer"); 2358 if (!mask) 2359 mask = &rte_flow_item_vlan_mask; 2360 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 2361 (const uint8_t *)&nic_mask, 2362 sizeof(struct rte_flow_item_vlan), 2363 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2364 if (ret) 2365 return ret; 2366 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { 2367 struct mlx5_priv *priv = dev->data->dev_private; 2368 2369 if (priv->vmwa_context) { 2370 /* 2371 * Non-NULL context means we have a virtual machine 2372 * and SR-IOV enabled, we have to create VLAN interface 2373 * to make hypervisor to setup E-Switch vport 2374 * context correctly. We avoid creating the multiple 2375 * VLAN interfaces, so we cannot support VLAN tag mask. 2376 */ 2377 return rte_flow_error_set(error, EINVAL, 2378 RTE_FLOW_ERROR_TYPE_ITEM, 2379 item, 2380 "VLAN tag mask is not" 2381 " supported in virtual" 2382 " environment"); 2383 } 2384 } 2385 if (spec) { 2386 vlan_tag = spec->tci; 2387 vlan_tag &= mask->tci; 2388 } 2389 /* 2390 * From verbs perspective an empty VLAN is equivalent 2391 * to a packet without VLAN layer. 2392 */ 2393 if (!vlan_tag) 2394 return rte_flow_error_set(error, EINVAL, 2395 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 2396 item->spec, 2397 "VLAN cannot be empty"); 2398 return 0; 2399 } 2400 2401 /** 2402 * Validate IPV4 item. 2403 * 2404 * @param[in] item 2405 * Item specification. 2406 * @param[in] item_flags 2407 * Bit-fields that holds the items detected until now. 2408 * @param[in] last_item 2409 * Previous validated item in the pattern items. 2410 * @param[in] ether_type 2411 * Type in the ethernet layer header (including dot1q). 2412 * @param[in] acc_mask 2413 * Acceptable mask, if NULL default internal default mask 2414 * will be used to check whether item fields are supported. 2415 * @param[in] range_accepted 2416 * True if range of values is accepted for specific fields, false otherwise. 2417 * @param[out] error 2418 * Pointer to error structure. 2419 * 2420 * @return 2421 * 0 on success, a negative errno value otherwise and rte_errno is set. 2422 */ 2423 int 2424 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, 2425 uint64_t item_flags, 2426 uint64_t last_item, 2427 uint16_t ether_type, 2428 const struct rte_flow_item_ipv4 *acc_mask, 2429 bool range_accepted, 2430 struct rte_flow_error *error) 2431 { 2432 const struct rte_flow_item_ipv4 *mask = item->mask; 2433 const struct rte_flow_item_ipv4 *spec = item->spec; 2434 const struct rte_flow_item_ipv4 nic_mask = { 2435 .hdr = { 2436 .src_addr = RTE_BE32(0xffffffff), 2437 .dst_addr = RTE_BE32(0xffffffff), 2438 .type_of_service = 0xff, 2439 .next_proto_id = 0xff, 2440 }, 2441 }; 2442 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2443 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 2444 MLX5_FLOW_LAYER_OUTER_L3; 2445 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 2446 MLX5_FLOW_LAYER_OUTER_L4; 2447 int ret; 2448 uint8_t next_proto = 0xFF; 2449 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 2450 MLX5_FLOW_LAYER_OUTER_VLAN | 2451 MLX5_FLOW_LAYER_INNER_VLAN); 2452 2453 if ((last_item & l2_vlan) && ether_type && 2454 ether_type != RTE_ETHER_TYPE_IPV4) 2455 return rte_flow_error_set(error, EINVAL, 2456 RTE_FLOW_ERROR_TYPE_ITEM, item, 2457 "IPv4 cannot follow L2/VLAN layer " 2458 "which ether type is not IPv4"); 2459 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) { 2460 if (mask && spec) 2461 next_proto = mask->hdr.next_proto_id & 2462 spec->hdr.next_proto_id; 2463 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 2464 return rte_flow_error_set(error, EINVAL, 2465 RTE_FLOW_ERROR_TYPE_ITEM, 2466 item, 2467 "multiple tunnel " 2468 "not supported"); 2469 } 2470 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) 2471 return rte_flow_error_set(error, EINVAL, 2472 RTE_FLOW_ERROR_TYPE_ITEM, item, 2473 "wrong tunnel type - IPv6 specified " 2474 "but IPv4 item provided"); 2475 if (item_flags & l3m) 2476 return rte_flow_error_set(error, ENOTSUP, 2477 RTE_FLOW_ERROR_TYPE_ITEM, item, 2478 "multiple L3 layers not supported"); 2479 else if (item_flags & l4m) 2480 return rte_flow_error_set(error, EINVAL, 2481 RTE_FLOW_ERROR_TYPE_ITEM, item, 2482 "L3 cannot follow an L4 layer."); 2483 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 2484 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 2485 return rte_flow_error_set(error, EINVAL, 2486 RTE_FLOW_ERROR_TYPE_ITEM, item, 2487 "L3 cannot follow an NVGRE layer."); 2488 if (!mask) 2489 mask = &rte_flow_item_ipv4_mask; 2490 else if (mask->hdr.next_proto_id != 0 && 2491 mask->hdr.next_proto_id != 0xff) 2492 return rte_flow_error_set(error, EINVAL, 2493 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 2494 "partial mask is not supported" 2495 " for protocol"); 2496 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 2497 acc_mask ? (const uint8_t *)acc_mask 2498 : (const uint8_t *)&nic_mask, 2499 sizeof(struct rte_flow_item_ipv4), 2500 range_accepted, error); 2501 if (ret < 0) 2502 return ret; 2503 return 0; 2504 } 2505 2506 /** 2507 * Validate IPV6 item. 2508 * 2509 * @param[in] item 2510 * Item specification. 2511 * @param[in] item_flags 2512 * Bit-fields that holds the items detected until now. 2513 * @param[in] last_item 2514 * Previous validated item in the pattern items. 2515 * @param[in] ether_type 2516 * Type in the ethernet layer header (including dot1q). 2517 * @param[in] acc_mask 2518 * Acceptable mask, if NULL default internal default mask 2519 * will be used to check whether item fields are supported. 2520 * @param[out] error 2521 * Pointer to error structure. 2522 * 2523 * @return 2524 * 0 on success, a negative errno value otherwise and rte_errno is set. 2525 */ 2526 int 2527 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, 2528 uint64_t item_flags, 2529 uint64_t last_item, 2530 uint16_t ether_type, 2531 const struct rte_flow_item_ipv6 *acc_mask, 2532 struct rte_flow_error *error) 2533 { 2534 const struct rte_flow_item_ipv6 *mask = item->mask; 2535 const struct rte_flow_item_ipv6 *spec = item->spec; 2536 const struct rte_flow_item_ipv6 nic_mask = { 2537 .hdr = { 2538 .src_addr = 2539 "\xff\xff\xff\xff\xff\xff\xff\xff" 2540 "\xff\xff\xff\xff\xff\xff\xff\xff", 2541 .dst_addr = 2542 "\xff\xff\xff\xff\xff\xff\xff\xff" 2543 "\xff\xff\xff\xff\xff\xff\xff\xff", 2544 .vtc_flow = RTE_BE32(0xffffffff), 2545 .proto = 0xff, 2546 }, 2547 }; 2548 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2549 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 2550 MLX5_FLOW_LAYER_OUTER_L3; 2551 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 2552 MLX5_FLOW_LAYER_OUTER_L4; 2553 int ret; 2554 uint8_t next_proto = 0xFF; 2555 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 2556 MLX5_FLOW_LAYER_OUTER_VLAN | 2557 MLX5_FLOW_LAYER_INNER_VLAN); 2558 2559 if ((last_item & l2_vlan) && ether_type && 2560 ether_type != RTE_ETHER_TYPE_IPV6) 2561 return rte_flow_error_set(error, EINVAL, 2562 RTE_FLOW_ERROR_TYPE_ITEM, item, 2563 "IPv6 cannot follow L2/VLAN layer " 2564 "which ether type is not IPv6"); 2565 if (mask && mask->hdr.proto == UINT8_MAX && spec) 2566 next_proto = spec->hdr.proto; 2567 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) { 2568 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 2569 return rte_flow_error_set(error, EINVAL, 2570 RTE_FLOW_ERROR_TYPE_ITEM, 2571 item, 2572 "multiple tunnel " 2573 "not supported"); 2574 } 2575 if (next_proto == IPPROTO_HOPOPTS || 2576 next_proto == IPPROTO_ROUTING || 2577 next_proto == IPPROTO_FRAGMENT || 2578 next_proto == IPPROTO_ESP || 2579 next_proto == IPPROTO_AH || 2580 next_proto == IPPROTO_DSTOPTS) 2581 return rte_flow_error_set(error, EINVAL, 2582 RTE_FLOW_ERROR_TYPE_ITEM, item, 2583 "IPv6 proto (next header) should " 2584 "not be set as extension header"); 2585 if (item_flags & MLX5_FLOW_LAYER_IPIP) 2586 return rte_flow_error_set(error, EINVAL, 2587 RTE_FLOW_ERROR_TYPE_ITEM, item, 2588 "wrong tunnel type - IPv4 specified " 2589 "but IPv6 item provided"); 2590 if (item_flags & l3m) 2591 return rte_flow_error_set(error, ENOTSUP, 2592 RTE_FLOW_ERROR_TYPE_ITEM, item, 2593 "multiple L3 layers not supported"); 2594 else if (item_flags & l4m) 2595 return rte_flow_error_set(error, EINVAL, 2596 RTE_FLOW_ERROR_TYPE_ITEM, item, 2597 "L3 cannot follow an L4 layer."); 2598 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 2599 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 2600 return rte_flow_error_set(error, EINVAL, 2601 RTE_FLOW_ERROR_TYPE_ITEM, item, 2602 "L3 cannot follow an NVGRE layer."); 2603 if (!mask) 2604 mask = &rte_flow_item_ipv6_mask; 2605 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 2606 acc_mask ? (const uint8_t *)acc_mask 2607 : (const uint8_t *)&nic_mask, 2608 sizeof(struct rte_flow_item_ipv6), 2609 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2610 if (ret < 0) 2611 return ret; 2612 return 0; 2613 } 2614 2615 /** 2616 * Validate UDP item. 2617 * 2618 * @param[in] item 2619 * Item specification. 2620 * @param[in] item_flags 2621 * Bit-fields that holds the items detected until now. 2622 * @param[in] target_protocol 2623 * The next protocol in the previous item. 2624 * @param[in] flow_mask 2625 * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask. 2626 * @param[out] error 2627 * Pointer to error structure. 2628 * 2629 * @return 2630 * 0 on success, a negative errno value otherwise and rte_errno is set. 2631 */ 2632 int 2633 mlx5_flow_validate_item_udp(const struct rte_flow_item *item, 2634 uint64_t item_flags, 2635 uint8_t target_protocol, 2636 struct rte_flow_error *error) 2637 { 2638 const struct rte_flow_item_udp *mask = item->mask; 2639 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2640 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 2641 MLX5_FLOW_LAYER_OUTER_L3; 2642 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 2643 MLX5_FLOW_LAYER_OUTER_L4; 2644 int ret; 2645 2646 if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) 2647 return rte_flow_error_set(error, EINVAL, 2648 RTE_FLOW_ERROR_TYPE_ITEM, item, 2649 "protocol filtering not compatible" 2650 " with UDP layer"); 2651 if (!(item_flags & l3m)) 2652 return rte_flow_error_set(error, EINVAL, 2653 RTE_FLOW_ERROR_TYPE_ITEM, item, 2654 "L3 is mandatory to filter on L4"); 2655 if (item_flags & l4m) 2656 return rte_flow_error_set(error, EINVAL, 2657 RTE_FLOW_ERROR_TYPE_ITEM, item, 2658 "multiple L4 layers not supported"); 2659 if (!mask) 2660 mask = &rte_flow_item_udp_mask; 2661 ret = mlx5_flow_item_acceptable 2662 (item, (const uint8_t *)mask, 2663 (const uint8_t *)&rte_flow_item_udp_mask, 2664 sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED, 2665 error); 2666 if (ret < 0) 2667 return ret; 2668 return 0; 2669 } 2670 2671 /** 2672 * Validate TCP item. 2673 * 2674 * @param[in] item 2675 * Item specification. 2676 * @param[in] item_flags 2677 * Bit-fields that holds the items detected until now. 2678 * @param[in] target_protocol 2679 * The next protocol in the previous item. 2680 * @param[out] error 2681 * Pointer to error structure. 2682 * 2683 * @return 2684 * 0 on success, a negative errno value otherwise and rte_errno is set. 2685 */ 2686 int 2687 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, 2688 uint64_t item_flags, 2689 uint8_t target_protocol, 2690 const struct rte_flow_item_tcp *flow_mask, 2691 struct rte_flow_error *error) 2692 { 2693 const struct rte_flow_item_tcp *mask = item->mask; 2694 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2695 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 2696 MLX5_FLOW_LAYER_OUTER_L3; 2697 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 2698 MLX5_FLOW_LAYER_OUTER_L4; 2699 int ret; 2700 2701 MLX5_ASSERT(flow_mask); 2702 if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) 2703 return rte_flow_error_set(error, EINVAL, 2704 RTE_FLOW_ERROR_TYPE_ITEM, item, 2705 "protocol filtering not compatible" 2706 " with TCP layer"); 2707 if (!(item_flags & l3m)) 2708 return rte_flow_error_set(error, EINVAL, 2709 RTE_FLOW_ERROR_TYPE_ITEM, item, 2710 "L3 is mandatory to filter on L4"); 2711 if (item_flags & l4m) 2712 return rte_flow_error_set(error, EINVAL, 2713 RTE_FLOW_ERROR_TYPE_ITEM, item, 2714 "multiple L4 layers not supported"); 2715 if (!mask) 2716 mask = &rte_flow_item_tcp_mask; 2717 ret = mlx5_flow_item_acceptable 2718 (item, (const uint8_t *)mask, 2719 (const uint8_t *)flow_mask, 2720 sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED, 2721 error); 2722 if (ret < 0) 2723 return ret; 2724 return 0; 2725 } 2726 2727 /** 2728 * Validate VXLAN item. 2729 * 2730 * @param[in] dev 2731 * Pointer to the Ethernet device structure. 2732 * @param[in] udp_dport 2733 * UDP destination port 2734 * @param[in] item 2735 * Item specification. 2736 * @param[in] item_flags 2737 * Bit-fields that holds the items detected until now. 2738 * @param[in] attr 2739 * Flow rule attributes. 2740 * @param[out] error 2741 * Pointer to error structure. 2742 * 2743 * @return 2744 * 0 on success, a negative errno value otherwise and rte_errno is set. 2745 */ 2746 int 2747 mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev, 2748 uint16_t udp_dport, 2749 const struct rte_flow_item *item, 2750 uint64_t item_flags, 2751 const struct rte_flow_attr *attr, 2752 struct rte_flow_error *error) 2753 { 2754 const struct rte_flow_item_vxlan *spec = item->spec; 2755 const struct rte_flow_item_vxlan *mask = item->mask; 2756 int ret; 2757 struct mlx5_priv *priv = dev->data->dev_private; 2758 union vni { 2759 uint32_t vlan_id; 2760 uint8_t vni[4]; 2761 } id = { .vlan_id = 0, }; 2762 const struct rte_flow_item_vxlan nic_mask = { 2763 .vni = "\xff\xff\xff", 2764 .rsvd1 = 0xff, 2765 }; 2766 const struct rte_flow_item_vxlan *valid_mask; 2767 2768 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2769 return rte_flow_error_set(error, ENOTSUP, 2770 RTE_FLOW_ERROR_TYPE_ITEM, item, 2771 "multiple tunnel layers not" 2772 " supported"); 2773 valid_mask = &rte_flow_item_vxlan_mask; 2774 /* 2775 * Verify only UDPv4 is present as defined in 2776 * https://tools.ietf.org/html/rfc7348 2777 */ 2778 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 2779 return rte_flow_error_set(error, EINVAL, 2780 RTE_FLOW_ERROR_TYPE_ITEM, item, 2781 "no outer UDP layer found"); 2782 if (!mask) 2783 mask = &rte_flow_item_vxlan_mask; 2784 2785 if (priv->sh->steering_format_version != 2786 MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 || 2787 !udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN) { 2788 /* FDB domain & NIC domain non-zero group */ 2789 if ((attr->transfer || attr->group) && priv->sh->misc5_cap) 2790 valid_mask = &nic_mask; 2791 /* Group zero in NIC domain */ 2792 if (!attr->group && !attr->transfer && 2793 priv->sh->tunnel_header_0_1) 2794 valid_mask = &nic_mask; 2795 } 2796 ret = mlx5_flow_item_acceptable 2797 (item, (const uint8_t *)mask, 2798 (const uint8_t *)valid_mask, 2799 sizeof(struct rte_flow_item_vxlan), 2800 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2801 if (ret < 0) 2802 return ret; 2803 if (spec) { 2804 memcpy(&id.vni[1], spec->vni, 3); 2805 memcpy(&id.vni[1], mask->vni, 3); 2806 } 2807 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 2808 return rte_flow_error_set(error, ENOTSUP, 2809 RTE_FLOW_ERROR_TYPE_ITEM, item, 2810 "VXLAN tunnel must be fully defined"); 2811 return 0; 2812 } 2813 2814 /** 2815 * Validate VXLAN_GPE item. 2816 * 2817 * @param[in] item 2818 * Item specification. 2819 * @param[in] item_flags 2820 * Bit-fields that holds the items detected until now. 2821 * @param[in] priv 2822 * Pointer to the private data structure. 2823 * @param[in] target_protocol 2824 * The next protocol in the previous item. 2825 * @param[out] error 2826 * Pointer to error structure. 2827 * 2828 * @return 2829 * 0 on success, a negative errno value otherwise and rte_errno is set. 2830 */ 2831 int 2832 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 2833 uint64_t item_flags, 2834 struct rte_eth_dev *dev, 2835 struct rte_flow_error *error) 2836 { 2837 struct mlx5_priv *priv = dev->data->dev_private; 2838 const struct rte_flow_item_vxlan_gpe *spec = item->spec; 2839 const struct rte_flow_item_vxlan_gpe *mask = item->mask; 2840 int ret; 2841 union vni { 2842 uint32_t vlan_id; 2843 uint8_t vni[4]; 2844 } id = { .vlan_id = 0, }; 2845 2846 if (!priv->sh->config.l3_vxlan_en) 2847 return rte_flow_error_set(error, ENOTSUP, 2848 RTE_FLOW_ERROR_TYPE_ITEM, item, 2849 "L3 VXLAN is not enabled by device" 2850 " parameter and/or not configured in" 2851 " firmware"); 2852 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2853 return rte_flow_error_set(error, ENOTSUP, 2854 RTE_FLOW_ERROR_TYPE_ITEM, item, 2855 "multiple tunnel layers not" 2856 " supported"); 2857 /* 2858 * Verify only UDPv4 is present as defined in 2859 * https://tools.ietf.org/html/rfc7348 2860 */ 2861 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 2862 return rte_flow_error_set(error, EINVAL, 2863 RTE_FLOW_ERROR_TYPE_ITEM, item, 2864 "no outer UDP layer found"); 2865 if (!mask) 2866 mask = &rte_flow_item_vxlan_gpe_mask; 2867 ret = mlx5_flow_item_acceptable 2868 (item, (const uint8_t *)mask, 2869 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, 2870 sizeof(struct rte_flow_item_vxlan_gpe), 2871 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2872 if (ret < 0) 2873 return ret; 2874 if (spec) { 2875 if (spec->protocol) 2876 return rte_flow_error_set(error, ENOTSUP, 2877 RTE_FLOW_ERROR_TYPE_ITEM, 2878 item, 2879 "VxLAN-GPE protocol" 2880 " not supported"); 2881 memcpy(&id.vni[1], spec->vni, 3); 2882 memcpy(&id.vni[1], mask->vni, 3); 2883 } 2884 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 2885 return rte_flow_error_set(error, ENOTSUP, 2886 RTE_FLOW_ERROR_TYPE_ITEM, item, 2887 "VXLAN-GPE tunnel must be fully" 2888 " defined"); 2889 return 0; 2890 } 2891 /** 2892 * Validate GRE Key item. 2893 * 2894 * @param[in] item 2895 * Item specification. 2896 * @param[in] item_flags 2897 * Bit flags to mark detected items. 2898 * @param[in] gre_item 2899 * Pointer to gre_item 2900 * @param[out] error 2901 * Pointer to error structure. 2902 * 2903 * @return 2904 * 0 on success, a negative errno value otherwise and rte_errno is set. 2905 */ 2906 int 2907 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, 2908 uint64_t item_flags, 2909 const struct rte_flow_item *gre_item, 2910 struct rte_flow_error *error) 2911 { 2912 const rte_be32_t *mask = item->mask; 2913 int ret = 0; 2914 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); 2915 const struct rte_flow_item_gre *gre_spec; 2916 const struct rte_flow_item_gre *gre_mask; 2917 2918 if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) 2919 return rte_flow_error_set(error, ENOTSUP, 2920 RTE_FLOW_ERROR_TYPE_ITEM, item, 2921 "Multiple GRE key not support"); 2922 if (!(item_flags & MLX5_FLOW_LAYER_GRE)) 2923 return rte_flow_error_set(error, ENOTSUP, 2924 RTE_FLOW_ERROR_TYPE_ITEM, item, 2925 "No preceding GRE header"); 2926 if (item_flags & MLX5_FLOW_LAYER_INNER) 2927 return rte_flow_error_set(error, ENOTSUP, 2928 RTE_FLOW_ERROR_TYPE_ITEM, item, 2929 "GRE key following a wrong item"); 2930 gre_mask = gre_item->mask; 2931 if (!gre_mask) 2932 gre_mask = &rte_flow_item_gre_mask; 2933 gre_spec = gre_item->spec; 2934 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && 2935 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) 2936 return rte_flow_error_set(error, EINVAL, 2937 RTE_FLOW_ERROR_TYPE_ITEM, item, 2938 "Key bit must be on"); 2939 2940 if (!mask) 2941 mask = &gre_key_default_mask; 2942 ret = mlx5_flow_item_acceptable 2943 (item, (const uint8_t *)mask, 2944 (const uint8_t *)&gre_key_default_mask, 2945 sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 2946 return ret; 2947 } 2948 2949 /** 2950 * Validate GRE optional item. 2951 * 2952 * @param[in] dev 2953 * Pointer to the Ethernet device structure. 2954 * @param[in] item 2955 * Item specification. 2956 * @param[in] item_flags 2957 * Bit flags to mark detected items. 2958 * @param[in] attr 2959 * Flow rule attributes. 2960 * @param[in] gre_item 2961 * Pointer to gre_item 2962 * @param[out] error 2963 * Pointer to error structure. 2964 * 2965 * @return 2966 * 0 on success, a negative errno value otherwise and rte_errno is set. 2967 */ 2968 int 2969 mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev, 2970 const struct rte_flow_item *item, 2971 uint64_t item_flags, 2972 const struct rte_flow_attr *attr, 2973 const struct rte_flow_item *gre_item, 2974 struct rte_flow_error *error) 2975 { 2976 const struct rte_flow_item_gre *gre_spec = gre_item->spec; 2977 const struct rte_flow_item_gre *gre_mask = gre_item->mask; 2978 const struct rte_flow_item_gre_opt *spec = item->spec; 2979 const struct rte_flow_item_gre_opt *mask = item->mask; 2980 struct mlx5_priv *priv = dev->data->dev_private; 2981 int ret = 0; 2982 struct rte_flow_item_gre_opt nic_mask = { 2983 .checksum_rsvd = { 2984 .checksum = RTE_BE16(UINT16_MAX), 2985 .reserved1 = 0x0, 2986 }, 2987 .key = { 2988 .key = RTE_BE32(UINT32_MAX), 2989 }, 2990 .sequence = { 2991 .sequence = RTE_BE32(UINT32_MAX), 2992 }, 2993 }; 2994 2995 if (!(item_flags & MLX5_FLOW_LAYER_GRE)) 2996 return rte_flow_error_set(error, ENOTSUP, 2997 RTE_FLOW_ERROR_TYPE_ITEM, item, 2998 "No preceding GRE header"); 2999 if (item_flags & MLX5_FLOW_LAYER_INNER) 3000 return rte_flow_error_set(error, ENOTSUP, 3001 RTE_FLOW_ERROR_TYPE_ITEM, item, 3002 "GRE option following a wrong item"); 3003 if (!spec || !mask) 3004 return rte_flow_error_set(error, EINVAL, 3005 RTE_FLOW_ERROR_TYPE_ITEM, item, 3006 "At least one field gre_option(checksum/key/sequence) must be specified"); 3007 if (!gre_mask) 3008 gre_mask = &rte_flow_item_gre_mask; 3009 if (mask->checksum_rsvd.checksum) 3010 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x8000)) && 3011 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x8000))) 3012 return rte_flow_error_set(error, EINVAL, 3013 RTE_FLOW_ERROR_TYPE_ITEM, 3014 item, 3015 "Checksum bit must be on"); 3016 if (mask->key.key) 3017 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && 3018 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) 3019 return rte_flow_error_set(error, EINVAL, 3020 RTE_FLOW_ERROR_TYPE_ITEM, 3021 item, "Key bit must be on"); 3022 if (mask->sequence.sequence) 3023 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x1000)) && 3024 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x1000))) 3025 return rte_flow_error_set(error, EINVAL, 3026 RTE_FLOW_ERROR_TYPE_ITEM, 3027 item, 3028 "Sequence bit must be on"); 3029 if (mask->checksum_rsvd.checksum || mask->sequence.sequence) { 3030 if (priv->sh->steering_format_version == 3031 MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 || 3032 ((attr->group || attr->transfer) && 3033 !priv->sh->misc5_cap) || 3034 (!(priv->sh->tunnel_header_0_1 && 3035 priv->sh->tunnel_header_2_3) && 3036 !attr->group && !attr->transfer)) 3037 return rte_flow_error_set(error, EINVAL, 3038 RTE_FLOW_ERROR_TYPE_ITEM, 3039 item, 3040 "Checksum/Sequence not supported"); 3041 } 3042 ret = mlx5_flow_item_acceptable 3043 (item, (const uint8_t *)mask, 3044 (const uint8_t *)&nic_mask, 3045 sizeof(struct rte_flow_item_gre_opt), 3046 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3047 return ret; 3048 } 3049 3050 /** 3051 * Validate GRE item. 3052 * 3053 * @param[in] item 3054 * Item specification. 3055 * @param[in] item_flags 3056 * Bit flags to mark detected items. 3057 * @param[in] target_protocol 3058 * The next protocol in the previous item. 3059 * @param[out] error 3060 * Pointer to error structure. 3061 * 3062 * @return 3063 * 0 on success, a negative errno value otherwise and rte_errno is set. 3064 */ 3065 int 3066 mlx5_flow_validate_item_gre(const struct rte_flow_item *item, 3067 uint64_t item_flags, 3068 uint8_t target_protocol, 3069 struct rte_flow_error *error) 3070 { 3071 const struct rte_flow_item_gre *spec __rte_unused = item->spec; 3072 const struct rte_flow_item_gre *mask = item->mask; 3073 int ret; 3074 const struct rte_flow_item_gre nic_mask = { 3075 .c_rsvd0_ver = RTE_BE16(0xB000), 3076 .protocol = RTE_BE16(UINT16_MAX), 3077 }; 3078 3079 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 3080 return rte_flow_error_set(error, EINVAL, 3081 RTE_FLOW_ERROR_TYPE_ITEM, item, 3082 "protocol filtering not compatible" 3083 " with this GRE layer"); 3084 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 3085 return rte_flow_error_set(error, ENOTSUP, 3086 RTE_FLOW_ERROR_TYPE_ITEM, item, 3087 "multiple tunnel layers not" 3088 " supported"); 3089 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 3090 return rte_flow_error_set(error, ENOTSUP, 3091 RTE_FLOW_ERROR_TYPE_ITEM, item, 3092 "L3 Layer is missing"); 3093 if (!mask) 3094 mask = &rte_flow_item_gre_mask; 3095 ret = mlx5_flow_item_acceptable 3096 (item, (const uint8_t *)mask, 3097 (const uint8_t *)&nic_mask, 3098 sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED, 3099 error); 3100 if (ret < 0) 3101 return ret; 3102 #ifndef HAVE_MLX5DV_DR 3103 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT 3104 if (spec && (spec->protocol & mask->protocol)) 3105 return rte_flow_error_set(error, ENOTSUP, 3106 RTE_FLOW_ERROR_TYPE_ITEM, item, 3107 "without MPLS support the" 3108 " specification cannot be used for" 3109 " filtering"); 3110 #endif 3111 #endif 3112 return 0; 3113 } 3114 3115 /** 3116 * Validate Geneve item. 3117 * 3118 * @param[in] item 3119 * Item specification. 3120 * @param[in] itemFlags 3121 * Bit-fields that holds the items detected until now. 3122 * @param[in] enPriv 3123 * Pointer to the private data structure. 3124 * @param[out] error 3125 * Pointer to error structure. 3126 * 3127 * @return 3128 * 0 on success, a negative errno value otherwise and rte_errno is set. 3129 */ 3130 3131 int 3132 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, 3133 uint64_t item_flags, 3134 struct rte_eth_dev *dev, 3135 struct rte_flow_error *error) 3136 { 3137 struct mlx5_priv *priv = dev->data->dev_private; 3138 const struct rte_flow_item_geneve *spec = item->spec; 3139 const struct rte_flow_item_geneve *mask = item->mask; 3140 int ret; 3141 uint16_t gbhdr; 3142 uint8_t opt_len = priv->sh->cdev->config.hca_attr.geneve_max_opt_len ? 3143 MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; 3144 const struct rte_flow_item_geneve nic_mask = { 3145 .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), 3146 .vni = "\xff\xff\xff", 3147 .protocol = RTE_BE16(UINT16_MAX), 3148 }; 3149 3150 if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_geneve_rx) 3151 return rte_flow_error_set(error, ENOTSUP, 3152 RTE_FLOW_ERROR_TYPE_ITEM, item, 3153 "L3 Geneve is not enabled by device" 3154 " parameter and/or not configured in" 3155 " firmware"); 3156 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 3157 return rte_flow_error_set(error, ENOTSUP, 3158 RTE_FLOW_ERROR_TYPE_ITEM, item, 3159 "multiple tunnel layers not" 3160 " supported"); 3161 /* 3162 * Verify only UDPv4 is present as defined in 3163 * https://tools.ietf.org/html/rfc7348 3164 */ 3165 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 3166 return rte_flow_error_set(error, EINVAL, 3167 RTE_FLOW_ERROR_TYPE_ITEM, item, 3168 "no outer UDP layer found"); 3169 if (!mask) 3170 mask = &rte_flow_item_geneve_mask; 3171 ret = mlx5_flow_item_acceptable 3172 (item, (const uint8_t *)mask, 3173 (const uint8_t *)&nic_mask, 3174 sizeof(struct rte_flow_item_geneve), 3175 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3176 if (ret) 3177 return ret; 3178 if (spec) { 3179 gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0); 3180 if (MLX5_GENEVE_VER_VAL(gbhdr) || 3181 MLX5_GENEVE_CRITO_VAL(gbhdr) || 3182 MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1) 3183 return rte_flow_error_set(error, ENOTSUP, 3184 RTE_FLOW_ERROR_TYPE_ITEM, 3185 item, 3186 "Geneve protocol unsupported" 3187 " fields are being used"); 3188 if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len) 3189 return rte_flow_error_set 3190 (error, ENOTSUP, 3191 RTE_FLOW_ERROR_TYPE_ITEM, 3192 item, 3193 "Unsupported Geneve options length"); 3194 } 3195 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 3196 return rte_flow_error_set 3197 (error, ENOTSUP, 3198 RTE_FLOW_ERROR_TYPE_ITEM, item, 3199 "Geneve tunnel must be fully defined"); 3200 return 0; 3201 } 3202 3203 /** 3204 * Validate Geneve TLV option item. 3205 * 3206 * @param[in] item 3207 * Item specification. 3208 * @param[in] last_item 3209 * Previous validated item in the pattern items. 3210 * @param[in] geneve_item 3211 * Previous GENEVE item specification. 3212 * @param[in] dev 3213 * Pointer to the rte_eth_dev structure. 3214 * @param[out] error 3215 * Pointer to error structure. 3216 * 3217 * @return 3218 * 0 on success, a negative errno value otherwise and rte_errno is set. 3219 */ 3220 int 3221 mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, 3222 uint64_t last_item, 3223 const struct rte_flow_item *geneve_item, 3224 struct rte_eth_dev *dev, 3225 struct rte_flow_error *error) 3226 { 3227 struct mlx5_priv *priv = dev->data->dev_private; 3228 struct mlx5_dev_ctx_shared *sh = priv->sh; 3229 struct mlx5_geneve_tlv_option_resource *geneve_opt_resource; 3230 struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr; 3231 uint8_t data_max_supported = 3232 hca_attr->max_geneve_tlv_option_data_len * 4; 3233 const struct rte_flow_item_geneve *geneve_spec; 3234 const struct rte_flow_item_geneve *geneve_mask; 3235 const struct rte_flow_item_geneve_opt *spec = item->spec; 3236 const struct rte_flow_item_geneve_opt *mask = item->mask; 3237 unsigned int i; 3238 unsigned int data_len; 3239 uint8_t tlv_option_len; 3240 uint16_t optlen_m, optlen_v; 3241 const struct rte_flow_item_geneve_opt full_mask = { 3242 .option_class = RTE_BE16(0xffff), 3243 .option_type = 0xff, 3244 .option_len = 0x1f, 3245 }; 3246 3247 if (!mask) 3248 mask = &rte_flow_item_geneve_opt_mask; 3249 if (!spec) 3250 return rte_flow_error_set 3251 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3252 "Geneve TLV opt class/type/length must be specified"); 3253 if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK) 3254 return rte_flow_error_set 3255 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3256 "Geneve TLV opt length exceeds the limit (31)"); 3257 /* Check if class type and length masks are full. */ 3258 if (full_mask.option_class != mask->option_class || 3259 full_mask.option_type != mask->option_type || 3260 full_mask.option_len != (mask->option_len & full_mask.option_len)) 3261 return rte_flow_error_set 3262 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3263 "Geneve TLV opt class/type/length masks must be full"); 3264 /* Check if length is supported */ 3265 if ((uint32_t)spec->option_len > 3266 hca_attr->max_geneve_tlv_option_data_len) 3267 return rte_flow_error_set 3268 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3269 "Geneve TLV opt length not supported"); 3270 if (hca_attr->max_geneve_tlv_options > 1) 3271 DRV_LOG(DEBUG, 3272 "max_geneve_tlv_options supports more than 1 option"); 3273 /* Check GENEVE item preceding. */ 3274 if (!geneve_item || !(last_item & MLX5_FLOW_LAYER_GENEVE)) 3275 return rte_flow_error_set 3276 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3277 "Geneve opt item must be preceded with Geneve item"); 3278 geneve_spec = geneve_item->spec; 3279 geneve_mask = geneve_item->mask ? geneve_item->mask : 3280 &rte_flow_item_geneve_mask; 3281 /* Check if GENEVE TLV option size doesn't exceed option length */ 3282 if (geneve_spec && (geneve_mask->ver_opt_len_o_c_rsvd0 || 3283 geneve_spec->ver_opt_len_o_c_rsvd0)) { 3284 tlv_option_len = spec->option_len & mask->option_len; 3285 optlen_v = rte_be_to_cpu_16(geneve_spec->ver_opt_len_o_c_rsvd0); 3286 optlen_v = MLX5_GENEVE_OPTLEN_VAL(optlen_v); 3287 optlen_m = rte_be_to_cpu_16(geneve_mask->ver_opt_len_o_c_rsvd0); 3288 optlen_m = MLX5_GENEVE_OPTLEN_VAL(optlen_m); 3289 if ((optlen_v & optlen_m) <= tlv_option_len) 3290 return rte_flow_error_set 3291 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3292 "GENEVE TLV option length exceeds optlen"); 3293 } 3294 /* Check if length is 0 or data is 0. */ 3295 if (spec->data == NULL || spec->option_len == 0) 3296 return rte_flow_error_set 3297 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3298 "Geneve TLV opt with zero data/length not supported"); 3299 /* Check not all data & mask are 0. */ 3300 data_len = spec->option_len * 4; 3301 if (mask->data == NULL) { 3302 for (i = 0; i < data_len; i++) 3303 if (spec->data[i]) 3304 break; 3305 if (i == data_len) 3306 return rte_flow_error_set(error, ENOTSUP, 3307 RTE_FLOW_ERROR_TYPE_ITEM, item, 3308 "Can't match on Geneve option data 0"); 3309 } else { 3310 for (i = 0; i < data_len; i++) 3311 if (spec->data[i] & mask->data[i]) 3312 break; 3313 if (i == data_len) 3314 return rte_flow_error_set(error, ENOTSUP, 3315 RTE_FLOW_ERROR_TYPE_ITEM, item, 3316 "Can't match on Geneve option data and mask 0"); 3317 /* Check data mask supported. */ 3318 for (i = data_max_supported; i < data_len ; i++) 3319 if (mask->data[i]) 3320 return rte_flow_error_set(error, ENOTSUP, 3321 RTE_FLOW_ERROR_TYPE_ITEM, item, 3322 "Data mask is of unsupported size"); 3323 } 3324 /* Check GENEVE option is supported in NIC. */ 3325 if (!hca_attr->geneve_tlv_opt) 3326 return rte_flow_error_set 3327 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, 3328 "Geneve TLV opt not supported"); 3329 /* Check if we already have geneve option with different type/class. */ 3330 rte_spinlock_lock(&sh->geneve_tlv_opt_sl); 3331 geneve_opt_resource = sh->geneve_tlv_option_resource; 3332 if (geneve_opt_resource != NULL) 3333 if (geneve_opt_resource->option_class != spec->option_class || 3334 geneve_opt_resource->option_type != spec->option_type || 3335 geneve_opt_resource->length != spec->option_len) { 3336 rte_spinlock_unlock(&sh->geneve_tlv_opt_sl); 3337 return rte_flow_error_set(error, ENOTSUP, 3338 RTE_FLOW_ERROR_TYPE_ITEM, item, 3339 "Only one Geneve TLV option supported"); 3340 } 3341 rte_spinlock_unlock(&sh->geneve_tlv_opt_sl); 3342 return 0; 3343 } 3344 3345 /** 3346 * Validate MPLS item. 3347 * 3348 * @param[in] dev 3349 * Pointer to the rte_eth_dev structure. 3350 * @param[in] item 3351 * Item specification. 3352 * @param[in] item_flags 3353 * Bit-fields that holds the items detected until now. 3354 * @param[in] prev_layer 3355 * The protocol layer indicated in previous item. 3356 * @param[out] error 3357 * Pointer to error structure. 3358 * 3359 * @return 3360 * 0 on success, a negative errno value otherwise and rte_errno is set. 3361 */ 3362 int 3363 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, 3364 const struct rte_flow_item *item __rte_unused, 3365 uint64_t item_flags __rte_unused, 3366 uint64_t prev_layer __rte_unused, 3367 struct rte_flow_error *error) 3368 { 3369 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 3370 const struct rte_flow_item_mpls *mask = item->mask; 3371 struct mlx5_priv *priv = dev->data->dev_private; 3372 int ret; 3373 3374 if (!priv->sh->dev_cap.mpls_en) 3375 return rte_flow_error_set(error, ENOTSUP, 3376 RTE_FLOW_ERROR_TYPE_ITEM, item, 3377 "MPLS not supported or" 3378 " disabled in firmware" 3379 " configuration."); 3380 /* MPLS over UDP, GRE is allowed */ 3381 if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L4_UDP | 3382 MLX5_FLOW_LAYER_GRE | 3383 MLX5_FLOW_LAYER_GRE_KEY))) 3384 return rte_flow_error_set(error, EINVAL, 3385 RTE_FLOW_ERROR_TYPE_ITEM, item, 3386 "protocol filtering not compatible" 3387 " with MPLS layer"); 3388 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ 3389 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && 3390 !(item_flags & MLX5_FLOW_LAYER_GRE)) 3391 return rte_flow_error_set(error, ENOTSUP, 3392 RTE_FLOW_ERROR_TYPE_ITEM, item, 3393 "multiple tunnel layers not" 3394 " supported"); 3395 if (!mask) 3396 mask = &rte_flow_item_mpls_mask; 3397 ret = mlx5_flow_item_acceptable 3398 (item, (const uint8_t *)mask, 3399 (const uint8_t *)&rte_flow_item_mpls_mask, 3400 sizeof(struct rte_flow_item_mpls), 3401 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3402 if (ret < 0) 3403 return ret; 3404 return 0; 3405 #else 3406 return rte_flow_error_set(error, ENOTSUP, 3407 RTE_FLOW_ERROR_TYPE_ITEM, item, 3408 "MPLS is not supported by Verbs, please" 3409 " update."); 3410 #endif 3411 } 3412 3413 /** 3414 * Validate NVGRE item. 3415 * 3416 * @param[in] item 3417 * Item specification. 3418 * @param[in] item_flags 3419 * Bit flags to mark detected items. 3420 * @param[in] target_protocol 3421 * The next protocol in the previous item. 3422 * @param[out] error 3423 * Pointer to error structure. 3424 * 3425 * @return 3426 * 0 on success, a negative errno value otherwise and rte_errno is set. 3427 */ 3428 int 3429 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, 3430 uint64_t item_flags, 3431 uint8_t target_protocol, 3432 struct rte_flow_error *error) 3433 { 3434 const struct rte_flow_item_nvgre *mask = item->mask; 3435 int ret; 3436 3437 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 3438 return rte_flow_error_set(error, EINVAL, 3439 RTE_FLOW_ERROR_TYPE_ITEM, item, 3440 "protocol filtering not compatible" 3441 " with this GRE layer"); 3442 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 3443 return rte_flow_error_set(error, ENOTSUP, 3444 RTE_FLOW_ERROR_TYPE_ITEM, item, 3445 "multiple tunnel layers not" 3446 " supported"); 3447 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 3448 return rte_flow_error_set(error, ENOTSUP, 3449 RTE_FLOW_ERROR_TYPE_ITEM, item, 3450 "L3 Layer is missing"); 3451 if (!mask) 3452 mask = &rte_flow_item_nvgre_mask; 3453 ret = mlx5_flow_item_acceptable 3454 (item, (const uint8_t *)mask, 3455 (const uint8_t *)&rte_flow_item_nvgre_mask, 3456 sizeof(struct rte_flow_item_nvgre), 3457 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3458 if (ret < 0) 3459 return ret; 3460 return 0; 3461 } 3462 3463 /** 3464 * Validate eCPRI item. 3465 * 3466 * @param[in] item 3467 * Item specification. 3468 * @param[in] item_flags 3469 * Bit-fields that holds the items detected until now. 3470 * @param[in] last_item 3471 * Previous validated item in the pattern items. 3472 * @param[in] ether_type 3473 * Type in the ethernet layer header (including dot1q). 3474 * @param[in] acc_mask 3475 * Acceptable mask, if NULL default internal default mask 3476 * will be used to check whether item fields are supported. 3477 * @param[out] error 3478 * Pointer to error structure. 3479 * 3480 * @return 3481 * 0 on success, a negative errno value otherwise and rte_errno is set. 3482 */ 3483 int 3484 mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item, 3485 uint64_t item_flags, 3486 uint64_t last_item, 3487 uint16_t ether_type, 3488 const struct rte_flow_item_ecpri *acc_mask, 3489 struct rte_flow_error *error) 3490 { 3491 const struct rte_flow_item_ecpri *mask = item->mask; 3492 const struct rte_flow_item_ecpri nic_mask = { 3493 .hdr = { 3494 .common = { 3495 .u32 = 3496 RTE_BE32(((const struct rte_ecpri_common_hdr) { 3497 .type = 0xFF, 3498 }).u32), 3499 }, 3500 .dummy[0] = 0xFFFFFFFF, 3501 }, 3502 }; 3503 const uint64_t outer_l2_vlan = (MLX5_FLOW_LAYER_OUTER_L2 | 3504 MLX5_FLOW_LAYER_OUTER_VLAN); 3505 struct rte_flow_item_ecpri mask_lo; 3506 3507 if (!(last_item & outer_l2_vlan) && 3508 last_item != MLX5_FLOW_LAYER_OUTER_L4_UDP) 3509 return rte_flow_error_set(error, EINVAL, 3510 RTE_FLOW_ERROR_TYPE_ITEM, item, 3511 "eCPRI can only follow L2/VLAN layer or UDP layer"); 3512 if ((last_item & outer_l2_vlan) && ether_type && 3513 ether_type != RTE_ETHER_TYPE_ECPRI) 3514 return rte_flow_error_set(error, EINVAL, 3515 RTE_FLOW_ERROR_TYPE_ITEM, item, 3516 "eCPRI cannot follow L2/VLAN layer which ether type is not 0xAEFE"); 3517 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 3518 return rte_flow_error_set(error, EINVAL, 3519 RTE_FLOW_ERROR_TYPE_ITEM, item, 3520 "eCPRI with tunnel is not supported right now"); 3521 if (item_flags & MLX5_FLOW_LAYER_OUTER_L3) 3522 return rte_flow_error_set(error, ENOTSUP, 3523 RTE_FLOW_ERROR_TYPE_ITEM, item, 3524 "multiple L3 layers not supported"); 3525 else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP) 3526 return rte_flow_error_set(error, EINVAL, 3527 RTE_FLOW_ERROR_TYPE_ITEM, item, 3528 "eCPRI cannot coexist with a TCP layer"); 3529 /* In specification, eCPRI could be over UDP layer. */ 3530 else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP) 3531 return rte_flow_error_set(error, EINVAL, 3532 RTE_FLOW_ERROR_TYPE_ITEM, item, 3533 "eCPRI over UDP layer is not yet supported right now"); 3534 /* Mask for type field in common header could be zero. */ 3535 if (!mask) 3536 mask = &rte_flow_item_ecpri_mask; 3537 mask_lo.hdr.common.u32 = rte_be_to_cpu_32(mask->hdr.common.u32); 3538 /* Input mask is in big-endian format. */ 3539 if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff) 3540 return rte_flow_error_set(error, EINVAL, 3541 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 3542 "partial mask is not supported for protocol"); 3543 else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0) 3544 return rte_flow_error_set(error, EINVAL, 3545 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 3546 "message header mask must be after a type mask"); 3547 return mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 3548 acc_mask ? (const uint8_t *)acc_mask 3549 : (const uint8_t *)&nic_mask, 3550 sizeof(struct rte_flow_item_ecpri), 3551 MLX5_ITEM_RANGE_NOT_ACCEPTED, error); 3552 } 3553 3554 static int 3555 flow_null_validate(struct rte_eth_dev *dev __rte_unused, 3556 const struct rte_flow_attr *attr __rte_unused, 3557 const struct rte_flow_item items[] __rte_unused, 3558 const struct rte_flow_action actions[] __rte_unused, 3559 bool external __rte_unused, 3560 int hairpin __rte_unused, 3561 struct rte_flow_error *error) 3562 { 3563 return rte_flow_error_set(error, ENOTSUP, 3564 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 3565 } 3566 3567 static struct mlx5_flow * 3568 flow_null_prepare(struct rte_eth_dev *dev __rte_unused, 3569 const struct rte_flow_attr *attr __rte_unused, 3570 const struct rte_flow_item items[] __rte_unused, 3571 const struct rte_flow_action actions[] __rte_unused, 3572 struct rte_flow_error *error) 3573 { 3574 rte_flow_error_set(error, ENOTSUP, 3575 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 3576 return NULL; 3577 } 3578 3579 static int 3580 flow_null_translate(struct rte_eth_dev *dev __rte_unused, 3581 struct mlx5_flow *dev_flow __rte_unused, 3582 const struct rte_flow_attr *attr __rte_unused, 3583 const struct rte_flow_item items[] __rte_unused, 3584 const struct rte_flow_action actions[] __rte_unused, 3585 struct rte_flow_error *error) 3586 { 3587 return rte_flow_error_set(error, ENOTSUP, 3588 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 3589 } 3590 3591 static int 3592 flow_null_apply(struct rte_eth_dev *dev __rte_unused, 3593 struct rte_flow *flow __rte_unused, 3594 struct rte_flow_error *error) 3595 { 3596 return rte_flow_error_set(error, ENOTSUP, 3597 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 3598 } 3599 3600 static void 3601 flow_null_remove(struct rte_eth_dev *dev __rte_unused, 3602 struct rte_flow *flow __rte_unused) 3603 { 3604 } 3605 3606 static void 3607 flow_null_destroy(struct rte_eth_dev *dev __rte_unused, 3608 struct rte_flow *flow __rte_unused) 3609 { 3610 } 3611 3612 static int 3613 flow_null_query(struct rte_eth_dev *dev __rte_unused, 3614 struct rte_flow *flow __rte_unused, 3615 const struct rte_flow_action *actions __rte_unused, 3616 void *data __rte_unused, 3617 struct rte_flow_error *error) 3618 { 3619 return rte_flow_error_set(error, ENOTSUP, 3620 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 3621 } 3622 3623 static int 3624 flow_null_sync_domain(struct rte_eth_dev *dev __rte_unused, 3625 uint32_t domains __rte_unused, 3626 uint32_t flags __rte_unused) 3627 { 3628 return 0; 3629 } 3630 3631 /* Void driver to protect from null pointer reference. */ 3632 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { 3633 .validate = flow_null_validate, 3634 .prepare = flow_null_prepare, 3635 .translate = flow_null_translate, 3636 .apply = flow_null_apply, 3637 .remove = flow_null_remove, 3638 .destroy = flow_null_destroy, 3639 .query = flow_null_query, 3640 .sync_domain = flow_null_sync_domain, 3641 }; 3642 3643 /** 3644 * Select flow driver type according to flow attributes and device 3645 * configuration. 3646 * 3647 * @param[in] dev 3648 * Pointer to the dev structure. 3649 * @param[in] attr 3650 * Pointer to the flow attributes. 3651 * 3652 * @return 3653 * flow driver type, MLX5_FLOW_TYPE_MAX otherwise. 3654 */ 3655 static enum mlx5_flow_drv_type 3656 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) 3657 { 3658 struct mlx5_priv *priv = dev->data->dev_private; 3659 /* The OS can determine first a specific flow type (DV, VERBS) */ 3660 enum mlx5_flow_drv_type type = mlx5_flow_os_get_type(); 3661 3662 if (type != MLX5_FLOW_TYPE_MAX) 3663 return type; 3664 /* 3665 * Currently when dv_flow_en == 2, only HW steering engine is 3666 * supported. New engines can also be chosen here if ready. 3667 */ 3668 if (priv->sh->config.dv_flow_en == 2) 3669 return MLX5_FLOW_TYPE_HW; 3670 /* If no OS specific type - continue with DV/VERBS selection */ 3671 if (attr->transfer && priv->sh->config.dv_esw_en) 3672 type = MLX5_FLOW_TYPE_DV; 3673 if (!attr->transfer) 3674 type = priv->sh->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : 3675 MLX5_FLOW_TYPE_VERBS; 3676 return type; 3677 } 3678 3679 #define flow_get_drv_ops(type) flow_drv_ops[type] 3680 3681 /** 3682 * Flow driver validation API. This abstracts calling driver specific functions. 3683 * The type of flow driver is determined according to flow attributes. 3684 * 3685 * @param[in] dev 3686 * Pointer to the dev structure. 3687 * @param[in] attr 3688 * Pointer to the flow attributes. 3689 * @param[in] items 3690 * Pointer to the list of items. 3691 * @param[in] actions 3692 * Pointer to the list of actions. 3693 * @param[in] external 3694 * This flow rule is created by request external to PMD. 3695 * @param[in] hairpin 3696 * Number of hairpin TX actions, 0 means classic flow. 3697 * @param[out] error 3698 * Pointer to the error structure. 3699 * 3700 * @return 3701 * 0 on success, a negative errno value otherwise and rte_errno is set. 3702 */ 3703 static inline int 3704 flow_drv_validate(struct rte_eth_dev *dev, 3705 const struct rte_flow_attr *attr, 3706 const struct rte_flow_item items[], 3707 const struct rte_flow_action actions[], 3708 bool external, int hairpin, struct rte_flow_error *error) 3709 { 3710 const struct mlx5_flow_driver_ops *fops; 3711 enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); 3712 3713 fops = flow_get_drv_ops(type); 3714 return fops->validate(dev, attr, items, actions, external, 3715 hairpin, error); 3716 } 3717 3718 /** 3719 * Flow driver preparation API. This abstracts calling driver specific 3720 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 3721 * calculates the size of memory required for device flow, allocates the memory, 3722 * initializes the device flow and returns the pointer. 3723 * 3724 * @note 3725 * This function initializes device flow structure such as dv or verbs in 3726 * struct mlx5_flow. However, it is caller's responsibility to initialize the 3727 * rest. For example, adding returning device flow to flow->dev_flow list and 3728 * setting backward reference to the flow should be done out of this function. 3729 * layers field is not filled either. 3730 * 3731 * @param[in] dev 3732 * Pointer to the dev structure. 3733 * @param[in] attr 3734 * Pointer to the flow attributes. 3735 * @param[in] items 3736 * Pointer to the list of items. 3737 * @param[in] actions 3738 * Pointer to the list of actions. 3739 * @param[in] flow_idx 3740 * This memory pool index to the flow. 3741 * @param[out] error 3742 * Pointer to the error structure. 3743 * 3744 * @return 3745 * Pointer to device flow on success, otherwise NULL and rte_errno is set. 3746 */ 3747 static inline struct mlx5_flow * 3748 flow_drv_prepare(struct rte_eth_dev *dev, 3749 const struct rte_flow *flow, 3750 const struct rte_flow_attr *attr, 3751 const struct rte_flow_item items[], 3752 const struct rte_flow_action actions[], 3753 uint32_t flow_idx, 3754 struct rte_flow_error *error) 3755 { 3756 const struct mlx5_flow_driver_ops *fops; 3757 enum mlx5_flow_drv_type type = flow->drv_type; 3758 struct mlx5_flow *mlx5_flow = NULL; 3759 3760 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 3761 fops = flow_get_drv_ops(type); 3762 mlx5_flow = fops->prepare(dev, attr, items, actions, error); 3763 if (mlx5_flow) 3764 mlx5_flow->flow_idx = flow_idx; 3765 return mlx5_flow; 3766 } 3767 3768 /** 3769 * Flow driver translation API. This abstracts calling driver specific 3770 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 3771 * translates a generic flow into a driver flow. flow_drv_prepare() must 3772 * precede. 3773 * 3774 * @note 3775 * dev_flow->layers could be filled as a result of parsing during translation 3776 * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled 3777 * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, 3778 * flow->actions could be overwritten even though all the expanded dev_flows 3779 * have the same actions. 3780 * 3781 * @param[in] dev 3782 * Pointer to the rte dev structure. 3783 * @param[in, out] dev_flow 3784 * Pointer to the mlx5 flow. 3785 * @param[in] attr 3786 * Pointer to the flow attributes. 3787 * @param[in] items 3788 * Pointer to the list of items. 3789 * @param[in] actions 3790 * Pointer to the list of actions. 3791 * @param[out] error 3792 * Pointer to the error structure. 3793 * 3794 * @return 3795 * 0 on success, a negative errno value otherwise and rte_errno is set. 3796 */ 3797 static inline int 3798 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, 3799 const struct rte_flow_attr *attr, 3800 const struct rte_flow_item items[], 3801 const struct rte_flow_action actions[], 3802 struct rte_flow_error *error) 3803 { 3804 const struct mlx5_flow_driver_ops *fops; 3805 enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; 3806 3807 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 3808 fops = flow_get_drv_ops(type); 3809 return fops->translate(dev, dev_flow, attr, items, actions, error); 3810 } 3811 3812 /** 3813 * Flow driver apply API. This abstracts calling driver specific functions. 3814 * Parent flow (rte_flow) should have driver type (drv_type). It applies 3815 * translated driver flows on to device. flow_drv_translate() must precede. 3816 * 3817 * @param[in] dev 3818 * Pointer to Ethernet device structure. 3819 * @param[in, out] flow 3820 * Pointer to flow structure. 3821 * @param[out] error 3822 * Pointer to error structure. 3823 * 3824 * @return 3825 * 0 on success, a negative errno value otherwise and rte_errno is set. 3826 */ 3827 static inline int 3828 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 3829 struct rte_flow_error *error) 3830 { 3831 const struct mlx5_flow_driver_ops *fops; 3832 enum mlx5_flow_drv_type type = flow->drv_type; 3833 3834 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 3835 fops = flow_get_drv_ops(type); 3836 return fops->apply(dev, flow, error); 3837 } 3838 3839 /** 3840 * Flow driver destroy API. This abstracts calling driver specific functions. 3841 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 3842 * on device and releases resources of the flow. 3843 * 3844 * @param[in] dev 3845 * Pointer to Ethernet device. 3846 * @param[in, out] flow 3847 * Pointer to flow structure. 3848 */ 3849 static inline void 3850 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 3851 { 3852 const struct mlx5_flow_driver_ops *fops; 3853 enum mlx5_flow_drv_type type = flow->drv_type; 3854 3855 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 3856 fops = flow_get_drv_ops(type); 3857 fops->destroy(dev, flow); 3858 } 3859 3860 /** 3861 * Flow driver find RSS policy tbl API. This abstracts calling driver 3862 * specific functions. Parent flow (rte_flow) should have driver 3863 * type (drv_type). It will find the RSS policy table that has the rss_desc. 3864 * 3865 * @param[in] dev 3866 * Pointer to Ethernet device. 3867 * @param[in, out] flow 3868 * Pointer to flow structure. 3869 * @param[in] policy 3870 * Pointer to meter policy table. 3871 * @param[in] rss_desc 3872 * Pointer to rss_desc 3873 */ 3874 static struct mlx5_flow_meter_sub_policy * 3875 flow_drv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev, 3876 struct rte_flow *flow, 3877 struct mlx5_flow_meter_policy *policy, 3878 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]) 3879 { 3880 const struct mlx5_flow_driver_ops *fops; 3881 enum mlx5_flow_drv_type type = flow->drv_type; 3882 3883 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 3884 fops = flow_get_drv_ops(type); 3885 return fops->meter_sub_policy_rss_prepare(dev, policy, rss_desc); 3886 } 3887 3888 /** 3889 * Flow driver color tag rule API. This abstracts calling driver 3890 * specific functions. Parent flow (rte_flow) should have driver 3891 * type (drv_type). It will create the color tag rules in hierarchy meter. 3892 * 3893 * @param[in] dev 3894 * Pointer to Ethernet device. 3895 * @param[in, out] flow 3896 * Pointer to flow structure. 3897 * @param[in] fm 3898 * Pointer to flow meter structure. 3899 * @param[in] src_port 3900 * The src port this extra rule should use. 3901 * @param[in] item 3902 * The src port id match item. 3903 * @param[out] error 3904 * Pointer to error structure. 3905 */ 3906 static int 3907 flow_drv_mtr_hierarchy_rule_create(struct rte_eth_dev *dev, 3908 struct rte_flow *flow, 3909 struct mlx5_flow_meter_info *fm, 3910 int32_t src_port, 3911 const struct rte_flow_item *item, 3912 struct rte_flow_error *error) 3913 { 3914 const struct mlx5_flow_driver_ops *fops; 3915 enum mlx5_flow_drv_type type = flow->drv_type; 3916 3917 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 3918 fops = flow_get_drv_ops(type); 3919 return fops->meter_hierarchy_rule_create(dev, fm, 3920 src_port, item, error); 3921 } 3922 3923 /** 3924 * Get RSS action from the action list. 3925 * 3926 * @param[in] dev 3927 * Pointer to Ethernet device. 3928 * @param[in] actions 3929 * Pointer to the list of actions. 3930 * @param[in] flow 3931 * Parent flow structure pointer. 3932 * 3933 * @return 3934 * Pointer to the RSS action if exist, else return NULL. 3935 */ 3936 static const struct rte_flow_action_rss* 3937 flow_get_rss_action(struct rte_eth_dev *dev, 3938 const struct rte_flow_action actions[]) 3939 { 3940 struct mlx5_priv *priv = dev->data->dev_private; 3941 const struct rte_flow_action_rss *rss = NULL; 3942 struct mlx5_meter_policy_action_container *acg; 3943 struct mlx5_meter_policy_action_container *acy; 3944 3945 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3946 switch (actions->type) { 3947 case RTE_FLOW_ACTION_TYPE_RSS: 3948 rss = actions->conf; 3949 break; 3950 case RTE_FLOW_ACTION_TYPE_SAMPLE: 3951 { 3952 const struct rte_flow_action_sample *sample = 3953 actions->conf; 3954 const struct rte_flow_action *act = sample->actions; 3955 for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) 3956 if (act->type == RTE_FLOW_ACTION_TYPE_RSS) 3957 rss = act->conf; 3958 break; 3959 } 3960 case RTE_FLOW_ACTION_TYPE_METER: 3961 { 3962 uint32_t mtr_idx; 3963 struct mlx5_flow_meter_info *fm; 3964 struct mlx5_flow_meter_policy *policy; 3965 const struct rte_flow_action_meter *mtr = actions->conf; 3966 3967 fm = mlx5_flow_meter_find(priv, mtr->mtr_id, &mtr_idx); 3968 if (fm && !fm->def_policy) { 3969 policy = mlx5_flow_meter_policy_find(dev, 3970 fm->policy_id, NULL); 3971 MLX5_ASSERT(policy); 3972 if (policy->is_hierarchy) { 3973 policy = 3974 mlx5_flow_meter_hierarchy_get_final_policy(dev, 3975 policy); 3976 if (!policy) 3977 return NULL; 3978 } 3979 if (policy->is_rss) { 3980 acg = 3981 &policy->act_cnt[RTE_COLOR_GREEN]; 3982 acy = 3983 &policy->act_cnt[RTE_COLOR_YELLOW]; 3984 if (acg->fate_action == 3985 MLX5_FLOW_FATE_SHARED_RSS) 3986 rss = acg->rss->conf; 3987 else if (acy->fate_action == 3988 MLX5_FLOW_FATE_SHARED_RSS) 3989 rss = acy->rss->conf; 3990 } 3991 } 3992 break; 3993 } 3994 default: 3995 break; 3996 } 3997 } 3998 return rss; 3999 } 4000 4001 /** 4002 * Get ASO age action by index. 4003 * 4004 * @param[in] dev 4005 * Pointer to the Ethernet device structure. 4006 * @param[in] age_idx 4007 * Index to the ASO age action. 4008 * 4009 * @return 4010 * The specified ASO age action. 4011 */ 4012 struct mlx5_aso_age_action* 4013 flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx) 4014 { 4015 uint16_t pool_idx = age_idx & UINT16_MAX; 4016 uint16_t offset = (age_idx >> 16) & UINT16_MAX; 4017 struct mlx5_priv *priv = dev->data->dev_private; 4018 struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; 4019 struct mlx5_aso_age_pool *pool; 4020 4021 rte_rwlock_read_lock(&mng->resize_rwl); 4022 pool = mng->pools[pool_idx]; 4023 rte_rwlock_read_unlock(&mng->resize_rwl); 4024 return &pool->actions[offset - 1]; 4025 } 4026 4027 /* maps indirect action to translated direct in some actions array */ 4028 struct mlx5_translated_action_handle { 4029 struct rte_flow_action_handle *action; /**< Indirect action handle. */ 4030 int index; /**< Index in related array of rte_flow_action. */ 4031 }; 4032 4033 /** 4034 * Translates actions of type RTE_FLOW_ACTION_TYPE_INDIRECT to related 4035 * direct action if translation possible. 4036 * This functionality used to run same execution path for both direct and 4037 * indirect actions on flow create. All necessary preparations for indirect 4038 * action handling should be performed on *handle* actions list returned 4039 * from this call. 4040 * 4041 * @param[in] dev 4042 * Pointer to Ethernet device. 4043 * @param[in] actions 4044 * List of actions to translate. 4045 * @param[out] handle 4046 * List to store translated indirect action object handles. 4047 * @param[in, out] indir_n 4048 * Size of *handle* array. On return should be updated with number of 4049 * indirect actions retrieved from the *actions* list. 4050 * @param[out] translated_actions 4051 * List of actions where all indirect actions were translated to direct 4052 * if possible. NULL if no translation took place. 4053 * @param[out] error 4054 * Pointer to the error structure. 4055 * 4056 * @return 4057 * 0 on success, a negative errno value otherwise and rte_errno is set. 4058 */ 4059 static int 4060 flow_action_handles_translate(struct rte_eth_dev *dev, 4061 const struct rte_flow_action actions[], 4062 struct mlx5_translated_action_handle *handle, 4063 int *indir_n, 4064 struct rte_flow_action **translated_actions, 4065 struct rte_flow_error *error) 4066 { 4067 struct mlx5_priv *priv = dev->data->dev_private; 4068 struct rte_flow_action *translated = NULL; 4069 size_t actions_size; 4070 int n; 4071 int copied_n = 0; 4072 struct mlx5_translated_action_handle *handle_end = NULL; 4073 4074 for (n = 0; actions[n].type != RTE_FLOW_ACTION_TYPE_END; n++) { 4075 if (actions[n].type != RTE_FLOW_ACTION_TYPE_INDIRECT) 4076 continue; 4077 if (copied_n == *indir_n) { 4078 return rte_flow_error_set 4079 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_NUM, 4080 NULL, "too many shared actions"); 4081 } 4082 rte_memcpy(&handle[copied_n].action, &actions[n].conf, 4083 sizeof(actions[n].conf)); 4084 handle[copied_n].index = n; 4085 copied_n++; 4086 } 4087 n++; 4088 *indir_n = copied_n; 4089 if (!copied_n) 4090 return 0; 4091 actions_size = sizeof(struct rte_flow_action) * n; 4092 translated = mlx5_malloc(MLX5_MEM_ZERO, actions_size, 0, SOCKET_ID_ANY); 4093 if (!translated) { 4094 rte_errno = ENOMEM; 4095 return -ENOMEM; 4096 } 4097 memcpy(translated, actions, actions_size); 4098 for (handle_end = handle + copied_n; handle < handle_end; handle++) { 4099 struct mlx5_shared_action_rss *shared_rss; 4100 uint32_t act_idx = (uint32_t)(uintptr_t)handle->action; 4101 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; 4102 uint32_t idx = act_idx & 4103 ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1); 4104 4105 switch (type) { 4106 case MLX5_INDIRECT_ACTION_TYPE_RSS: 4107 shared_rss = mlx5_ipool_get 4108 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); 4109 translated[handle->index].type = 4110 RTE_FLOW_ACTION_TYPE_RSS; 4111 translated[handle->index].conf = 4112 &shared_rss->origin; 4113 break; 4114 case MLX5_INDIRECT_ACTION_TYPE_COUNT: 4115 translated[handle->index].type = 4116 (enum rte_flow_action_type) 4117 MLX5_RTE_FLOW_ACTION_TYPE_COUNT; 4118 translated[handle->index].conf = (void *)(uintptr_t)idx; 4119 break; 4120 case MLX5_INDIRECT_ACTION_TYPE_AGE: 4121 if (priv->sh->flow_hit_aso_en) { 4122 translated[handle->index].type = 4123 (enum rte_flow_action_type) 4124 MLX5_RTE_FLOW_ACTION_TYPE_AGE; 4125 translated[handle->index].conf = 4126 (void *)(uintptr_t)idx; 4127 break; 4128 } 4129 /* Fall-through */ 4130 case MLX5_INDIRECT_ACTION_TYPE_CT: 4131 if (priv->sh->ct_aso_en) { 4132 translated[handle->index].type = 4133 RTE_FLOW_ACTION_TYPE_CONNTRACK; 4134 translated[handle->index].conf = 4135 (void *)(uintptr_t)idx; 4136 break; 4137 } 4138 /* Fall-through */ 4139 default: 4140 mlx5_free(translated); 4141 return rte_flow_error_set 4142 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, 4143 NULL, "invalid indirect action type"); 4144 } 4145 } 4146 *translated_actions = translated; 4147 return 0; 4148 } 4149 4150 /** 4151 * Get Shared RSS action from the action list. 4152 * 4153 * @param[in] dev 4154 * Pointer to Ethernet device. 4155 * @param[in] shared 4156 * Pointer to the list of actions. 4157 * @param[in] shared_n 4158 * Actions list length. 4159 * 4160 * @return 4161 * The MLX5 RSS action ID if exists, otherwise return 0. 4162 */ 4163 static uint32_t 4164 flow_get_shared_rss_action(struct rte_eth_dev *dev, 4165 struct mlx5_translated_action_handle *handle, 4166 int shared_n) 4167 { 4168 struct mlx5_translated_action_handle *handle_end; 4169 struct mlx5_priv *priv = dev->data->dev_private; 4170 struct mlx5_shared_action_rss *shared_rss; 4171 4172 4173 for (handle_end = handle + shared_n; handle < handle_end; handle++) { 4174 uint32_t act_idx = (uint32_t)(uintptr_t)handle->action; 4175 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; 4176 uint32_t idx = act_idx & 4177 ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1); 4178 switch (type) { 4179 case MLX5_INDIRECT_ACTION_TYPE_RSS: 4180 shared_rss = mlx5_ipool_get 4181 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 4182 idx); 4183 __atomic_add_fetch(&shared_rss->refcnt, 1, 4184 __ATOMIC_RELAXED); 4185 return idx; 4186 default: 4187 break; 4188 } 4189 } 4190 return 0; 4191 } 4192 4193 static unsigned int 4194 find_graph_root(uint32_t rss_level) 4195 { 4196 return rss_level < 2 ? MLX5_EXPANSION_ROOT : 4197 MLX5_EXPANSION_ROOT_OUTER; 4198 } 4199 4200 /** 4201 * Get layer flags from the prefix flow. 4202 * 4203 * Some flows may be split to several subflows, the prefix subflow gets the 4204 * match items and the suffix sub flow gets the actions. 4205 * Some actions need the user defined match item flags to get the detail for 4206 * the action. 4207 * This function helps the suffix flow to get the item layer flags from prefix 4208 * subflow. 4209 * 4210 * @param[in] dev_flow 4211 * Pointer the created prefix subflow. 4212 * 4213 * @return 4214 * The layers get from prefix subflow. 4215 */ 4216 static inline uint64_t 4217 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow) 4218 { 4219 uint64_t layers = 0; 4220 4221 /* 4222 * Layers bits could be localization, but usually the compiler will 4223 * help to do the optimization work for source code. 4224 * If no decap actions, use the layers directly. 4225 */ 4226 if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP)) 4227 return dev_flow->handle->layers; 4228 /* Convert L3 layers with decap action. */ 4229 if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4) 4230 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4; 4231 else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6) 4232 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6; 4233 /* Convert L4 layers with decap action. */ 4234 if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP) 4235 layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP; 4236 else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP) 4237 layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP; 4238 return layers; 4239 } 4240 4241 /** 4242 * Get metadata split action information. 4243 * 4244 * @param[in] actions 4245 * Pointer to the list of actions. 4246 * @param[out] qrss 4247 * Pointer to the return pointer. 4248 * @param[out] qrss_type 4249 * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned 4250 * if no QUEUE/RSS is found. 4251 * @param[out] encap_idx 4252 * Pointer to the index of the encap action if exists, otherwise the last 4253 * action index. 4254 * 4255 * @return 4256 * Total number of actions. 4257 */ 4258 static int 4259 flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[], 4260 const struct rte_flow_action **qrss, 4261 int *encap_idx) 4262 { 4263 const struct rte_flow_action_raw_encap *raw_encap; 4264 int actions_n = 0; 4265 int raw_decap_idx = -1; 4266 4267 *encap_idx = -1; 4268 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 4269 switch (actions->type) { 4270 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 4271 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 4272 *encap_idx = actions_n; 4273 break; 4274 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 4275 raw_decap_idx = actions_n; 4276 break; 4277 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 4278 raw_encap = actions->conf; 4279 if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 4280 *encap_idx = raw_decap_idx != -1 ? 4281 raw_decap_idx : actions_n; 4282 break; 4283 case RTE_FLOW_ACTION_TYPE_QUEUE: 4284 case RTE_FLOW_ACTION_TYPE_RSS: 4285 *qrss = actions; 4286 break; 4287 default: 4288 break; 4289 } 4290 actions_n++; 4291 } 4292 if (*encap_idx == -1) 4293 *encap_idx = actions_n; 4294 /* Count RTE_FLOW_ACTION_TYPE_END. */ 4295 return actions_n + 1; 4296 } 4297 4298 /** 4299 * Check if the action will change packet. 4300 * 4301 * @param dev 4302 * Pointer to Ethernet device. 4303 * @param[in] type 4304 * action type. 4305 * 4306 * @return 4307 * true if action will change packet, false otherwise. 4308 */ 4309 static bool flow_check_modify_action_type(struct rte_eth_dev *dev, 4310 enum rte_flow_action_type type) 4311 { 4312 struct mlx5_priv *priv = dev->data->dev_private; 4313 4314 switch (type) { 4315 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: 4316 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: 4317 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: 4318 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: 4319 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: 4320 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: 4321 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: 4322 case RTE_FLOW_ACTION_TYPE_SET_TP_DST: 4323 case RTE_FLOW_ACTION_TYPE_DEC_TTL: 4324 case RTE_FLOW_ACTION_TYPE_SET_TTL: 4325 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: 4326 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: 4327 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: 4328 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: 4329 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: 4330 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: 4331 case RTE_FLOW_ACTION_TYPE_SET_META: 4332 case RTE_FLOW_ACTION_TYPE_SET_TAG: 4333 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: 4334 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 4335 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 4336 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 4337 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 4338 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 4339 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 4340 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 4341 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 4342 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 4343 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: 4344 return true; 4345 case RTE_FLOW_ACTION_TYPE_FLAG: 4346 case RTE_FLOW_ACTION_TYPE_MARK: 4347 if (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) 4348 return true; 4349 else 4350 return false; 4351 default: 4352 return false; 4353 } 4354 } 4355 4356 /** 4357 * Check meter action from the action list. 4358 * 4359 * @param dev 4360 * Pointer to Ethernet device. 4361 * @param[in] actions 4362 * Pointer to the list of actions. 4363 * @param[out] has_mtr 4364 * Pointer to the meter exist flag. 4365 * @param[out] has_modify 4366 * Pointer to the flag showing there's packet change action. 4367 * @param[out] meter_id 4368 * Pointer to the meter id. 4369 * 4370 * @return 4371 * Total number of actions. 4372 */ 4373 static int 4374 flow_check_meter_action(struct rte_eth_dev *dev, 4375 const struct rte_flow_action actions[], 4376 bool *has_mtr, bool *has_modify, uint32_t *meter_id) 4377 { 4378 const struct rte_flow_action_meter *mtr = NULL; 4379 int actions_n = 0; 4380 4381 MLX5_ASSERT(has_mtr); 4382 *has_mtr = false; 4383 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 4384 switch (actions->type) { 4385 case RTE_FLOW_ACTION_TYPE_METER: 4386 mtr = actions->conf; 4387 *meter_id = mtr->mtr_id; 4388 *has_mtr = true; 4389 break; 4390 default: 4391 break; 4392 } 4393 if (!*has_mtr) 4394 *has_modify |= flow_check_modify_action_type(dev, 4395 actions->type); 4396 actions_n++; 4397 } 4398 /* Count RTE_FLOW_ACTION_TYPE_END. */ 4399 return actions_n + 1; 4400 } 4401 4402 /** 4403 * Check if the flow should be split due to hairpin. 4404 * The reason for the split is that in current HW we can't 4405 * support encap and push-vlan on Rx, so if a flow contains 4406 * these actions we move it to Tx. 4407 * 4408 * @param dev 4409 * Pointer to Ethernet device. 4410 * @param[in] attr 4411 * Flow rule attributes. 4412 * @param[in] actions 4413 * Associated actions (list terminated by the END action). 4414 * 4415 * @return 4416 * > 0 the number of actions and the flow should be split, 4417 * 0 when no split required. 4418 */ 4419 static int 4420 flow_check_hairpin_split(struct rte_eth_dev *dev, 4421 const struct rte_flow_attr *attr, 4422 const struct rte_flow_action actions[]) 4423 { 4424 int queue_action = 0; 4425 int action_n = 0; 4426 int split = 0; 4427 const struct rte_flow_action_queue *queue; 4428 const struct rte_flow_action_rss *rss; 4429 const struct rte_flow_action_raw_encap *raw_encap; 4430 const struct rte_eth_hairpin_conf *conf; 4431 4432 if (!attr->ingress) 4433 return 0; 4434 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 4435 switch (actions->type) { 4436 case RTE_FLOW_ACTION_TYPE_QUEUE: 4437 queue = actions->conf; 4438 if (queue == NULL) 4439 return 0; 4440 conf = mlx5_rxq_get_hairpin_conf(dev, queue->index); 4441 if (conf == NULL || conf->tx_explicit != 0) 4442 return 0; 4443 queue_action = 1; 4444 action_n++; 4445 break; 4446 case RTE_FLOW_ACTION_TYPE_RSS: 4447 rss = actions->conf; 4448 if (rss == NULL || rss->queue_num == 0) 4449 return 0; 4450 conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]); 4451 if (conf == NULL || conf->tx_explicit != 0) 4452 return 0; 4453 queue_action = 1; 4454 action_n++; 4455 break; 4456 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 4457 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 4458 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 4459 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 4460 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 4461 split++; 4462 action_n++; 4463 break; 4464 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 4465 raw_encap = actions->conf; 4466 if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 4467 split++; 4468 action_n++; 4469 break; 4470 default: 4471 action_n++; 4472 break; 4473 } 4474 } 4475 if (split && queue_action) 4476 return action_n; 4477 return 0; 4478 } 4479 4480 /* Declare flow create/destroy prototype in advance. */ 4481 static uint32_t 4482 flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, 4483 const struct rte_flow_attr *attr, 4484 const struct rte_flow_item items[], 4485 const struct rte_flow_action actions[], 4486 bool external, struct rte_flow_error *error); 4487 4488 static void 4489 flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, 4490 uint32_t flow_idx); 4491 4492 int 4493 flow_dv_mreg_match_cb(void *tool_ctx __rte_unused, 4494 struct mlx5_list_entry *entry, void *cb_ctx) 4495 { 4496 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 4497 struct mlx5_flow_mreg_copy_resource *mcp_res = 4498 container_of(entry, typeof(*mcp_res), hlist_ent); 4499 4500 return mcp_res->mark_id != *(uint32_t *)(ctx->data); 4501 } 4502 4503 struct mlx5_list_entry * 4504 flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx) 4505 { 4506 struct rte_eth_dev *dev = tool_ctx; 4507 struct mlx5_priv *priv = dev->data->dev_private; 4508 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 4509 struct mlx5_flow_mreg_copy_resource *mcp_res; 4510 struct rte_flow_error *error = ctx->error; 4511 uint32_t idx = 0; 4512 int ret; 4513 uint32_t mark_id = *(uint32_t *)(ctx->data); 4514 struct rte_flow_attr attr = { 4515 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 4516 .ingress = 1, 4517 }; 4518 struct mlx5_rte_flow_item_tag tag_spec = { 4519 .data = mark_id, 4520 }; 4521 struct rte_flow_item items[] = { 4522 [1] = { .type = RTE_FLOW_ITEM_TYPE_END, }, 4523 }; 4524 struct rte_flow_action_mark ftag = { 4525 .id = mark_id, 4526 }; 4527 struct mlx5_flow_action_copy_mreg cp_mreg = { 4528 .dst = REG_B, 4529 .src = REG_NON, 4530 }; 4531 struct rte_flow_action_jump jump = { 4532 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 4533 }; 4534 struct rte_flow_action actions[] = { 4535 [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, 4536 }; 4537 4538 /* Fill the register fields in the flow. */ 4539 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 4540 if (ret < 0) 4541 return NULL; 4542 tag_spec.id = ret; 4543 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 4544 if (ret < 0) 4545 return NULL; 4546 cp_mreg.src = ret; 4547 /* Provide the full width of FLAG specific value. */ 4548 if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT)) 4549 tag_spec.data = MLX5_FLOW_MARK_DEFAULT; 4550 /* Build a new flow. */ 4551 if (mark_id != MLX5_DEFAULT_COPY_ID) { 4552 items[0] = (struct rte_flow_item){ 4553 .type = (enum rte_flow_item_type) 4554 MLX5_RTE_FLOW_ITEM_TYPE_TAG, 4555 .spec = &tag_spec, 4556 }; 4557 items[1] = (struct rte_flow_item){ 4558 .type = RTE_FLOW_ITEM_TYPE_END, 4559 }; 4560 actions[0] = (struct rte_flow_action){ 4561 .type = (enum rte_flow_action_type) 4562 MLX5_RTE_FLOW_ACTION_TYPE_MARK, 4563 .conf = &ftag, 4564 }; 4565 actions[1] = (struct rte_flow_action){ 4566 .type = (enum rte_flow_action_type) 4567 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 4568 .conf = &cp_mreg, 4569 }; 4570 actions[2] = (struct rte_flow_action){ 4571 .type = RTE_FLOW_ACTION_TYPE_JUMP, 4572 .conf = &jump, 4573 }; 4574 actions[3] = (struct rte_flow_action){ 4575 .type = RTE_FLOW_ACTION_TYPE_END, 4576 }; 4577 } else { 4578 /* Default rule, wildcard match. */ 4579 attr.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR; 4580 items[0] = (struct rte_flow_item){ 4581 .type = RTE_FLOW_ITEM_TYPE_END, 4582 }; 4583 actions[0] = (struct rte_flow_action){ 4584 .type = (enum rte_flow_action_type) 4585 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 4586 .conf = &cp_mreg, 4587 }; 4588 actions[1] = (struct rte_flow_action){ 4589 .type = RTE_FLOW_ACTION_TYPE_JUMP, 4590 .conf = &jump, 4591 }; 4592 actions[2] = (struct rte_flow_action){ 4593 .type = RTE_FLOW_ACTION_TYPE_END, 4594 }; 4595 } 4596 /* Build a new entry. */ 4597 mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx); 4598 if (!mcp_res) { 4599 rte_errno = ENOMEM; 4600 return NULL; 4601 } 4602 mcp_res->idx = idx; 4603 mcp_res->mark_id = mark_id; 4604 /* 4605 * The copy Flows are not included in any list. There 4606 * ones are referenced from other Flows and can not 4607 * be applied, removed, deleted in arbitrary order 4608 * by list traversing. 4609 */ 4610 mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP, 4611 &attr, items, actions, false, error); 4612 if (!mcp_res->rix_flow) { 4613 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx); 4614 return NULL; 4615 } 4616 return &mcp_res->hlist_ent; 4617 } 4618 4619 struct mlx5_list_entry * 4620 flow_dv_mreg_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry, 4621 void *cb_ctx __rte_unused) 4622 { 4623 struct rte_eth_dev *dev = tool_ctx; 4624 struct mlx5_priv *priv = dev->data->dev_private; 4625 struct mlx5_flow_mreg_copy_resource *mcp_res; 4626 uint32_t idx = 0; 4627 4628 mcp_res = mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx); 4629 if (!mcp_res) { 4630 rte_errno = ENOMEM; 4631 return NULL; 4632 } 4633 memcpy(mcp_res, oentry, sizeof(*mcp_res)); 4634 mcp_res->idx = idx; 4635 return &mcp_res->hlist_ent; 4636 } 4637 4638 void 4639 flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry) 4640 { 4641 struct mlx5_flow_mreg_copy_resource *mcp_res = 4642 container_of(entry, typeof(*mcp_res), hlist_ent); 4643 struct rte_eth_dev *dev = tool_ctx; 4644 struct mlx5_priv *priv = dev->data->dev_private; 4645 4646 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); 4647 } 4648 4649 /** 4650 * Add a flow of copying flow metadata registers in RX_CP_TBL. 4651 * 4652 * As mark_id is unique, if there's already a registered flow for the mark_id, 4653 * return by increasing the reference counter of the resource. Otherwise, create 4654 * the resource (mcp_res) and flow. 4655 * 4656 * Flow looks like, 4657 * - If ingress port is ANY and reg_c[1] is mark_id, 4658 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 4659 * 4660 * For default flow (zero mark_id), flow is like, 4661 * - If ingress port is ANY, 4662 * reg_b := reg_c[0] and jump to RX_ACT_TBL. 4663 * 4664 * @param dev 4665 * Pointer to Ethernet device. 4666 * @param mark_id 4667 * ID of MARK action, zero means default flow for META. 4668 * @param[out] error 4669 * Perform verbose error reporting if not NULL. 4670 * 4671 * @return 4672 * Associated resource on success, NULL otherwise and rte_errno is set. 4673 */ 4674 static struct mlx5_flow_mreg_copy_resource * 4675 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, 4676 struct rte_flow_error *error) 4677 { 4678 struct mlx5_priv *priv = dev->data->dev_private; 4679 struct mlx5_list_entry *entry; 4680 struct mlx5_flow_cb_ctx ctx = { 4681 .dev = dev, 4682 .error = error, 4683 .data = &mark_id, 4684 }; 4685 4686 /* Check if already registered. */ 4687 MLX5_ASSERT(priv->mreg_cp_tbl); 4688 entry = mlx5_hlist_register(priv->mreg_cp_tbl, mark_id, &ctx); 4689 if (!entry) 4690 return NULL; 4691 return container_of(entry, struct mlx5_flow_mreg_copy_resource, 4692 hlist_ent); 4693 } 4694 4695 void 4696 flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry) 4697 { 4698 struct mlx5_flow_mreg_copy_resource *mcp_res = 4699 container_of(entry, typeof(*mcp_res), hlist_ent); 4700 struct rte_eth_dev *dev = tool_ctx; 4701 struct mlx5_priv *priv = dev->data->dev_private; 4702 4703 MLX5_ASSERT(mcp_res->rix_flow); 4704 flow_list_destroy(dev, MLX5_FLOW_TYPE_MCP, mcp_res->rix_flow); 4705 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); 4706 } 4707 4708 /** 4709 * Release flow in RX_CP_TBL. 4710 * 4711 * @param dev 4712 * Pointer to Ethernet device. 4713 * @flow 4714 * Parent flow for wich copying is provided. 4715 */ 4716 static void 4717 flow_mreg_del_copy_action(struct rte_eth_dev *dev, 4718 struct rte_flow *flow) 4719 { 4720 struct mlx5_flow_mreg_copy_resource *mcp_res; 4721 struct mlx5_priv *priv = dev->data->dev_private; 4722 4723 if (!flow->rix_mreg_copy) 4724 return; 4725 mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], 4726 flow->rix_mreg_copy); 4727 if (!mcp_res || !priv->mreg_cp_tbl) 4728 return; 4729 MLX5_ASSERT(mcp_res->rix_flow); 4730 mlx5_hlist_unregister(priv->mreg_cp_tbl, &mcp_res->hlist_ent); 4731 flow->rix_mreg_copy = 0; 4732 } 4733 4734 /** 4735 * Remove the default copy action from RX_CP_TBL. 4736 * 4737 * This functions is called in the mlx5_dev_start(). No thread safe 4738 * is guaranteed. 4739 * 4740 * @param dev 4741 * Pointer to Ethernet device. 4742 */ 4743 static void 4744 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) 4745 { 4746 struct mlx5_list_entry *entry; 4747 struct mlx5_priv *priv = dev->data->dev_private; 4748 struct mlx5_flow_cb_ctx ctx; 4749 uint32_t mark_id; 4750 4751 /* Check if default flow is registered. */ 4752 if (!priv->mreg_cp_tbl) 4753 return; 4754 mark_id = MLX5_DEFAULT_COPY_ID; 4755 ctx.data = &mark_id; 4756 entry = mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx); 4757 if (!entry) 4758 return; 4759 mlx5_hlist_unregister(priv->mreg_cp_tbl, entry); 4760 } 4761 4762 /** 4763 * Add the default copy action in in RX_CP_TBL. 4764 * 4765 * This functions is called in the mlx5_dev_start(). No thread safe 4766 * is guaranteed. 4767 * 4768 * @param dev 4769 * Pointer to Ethernet device. 4770 * @param[out] error 4771 * Perform verbose error reporting if not NULL. 4772 * 4773 * @return 4774 * 0 for success, negative value otherwise and rte_errno is set. 4775 */ 4776 static int 4777 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, 4778 struct rte_flow_error *error) 4779 { 4780 struct mlx5_priv *priv = dev->data->dev_private; 4781 struct mlx5_flow_mreg_copy_resource *mcp_res; 4782 struct mlx5_flow_cb_ctx ctx; 4783 uint32_t mark_id; 4784 4785 /* Check whether extensive metadata feature is engaged. */ 4786 if (!priv->sh->config.dv_flow_en || 4787 priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 4788 !mlx5_flow_ext_mreg_supported(dev) || 4789 !priv->sh->dv_regc0_mask) 4790 return 0; 4791 /* 4792 * Add default mreg copy flow may be called multiple time, but 4793 * only be called once in stop. Avoid register it twice. 4794 */ 4795 mark_id = MLX5_DEFAULT_COPY_ID; 4796 ctx.data = &mark_id; 4797 if (mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx)) 4798 return 0; 4799 mcp_res = flow_mreg_add_copy_action(dev, mark_id, error); 4800 if (!mcp_res) 4801 return -rte_errno; 4802 return 0; 4803 } 4804 4805 /** 4806 * Add a flow of copying flow metadata registers in RX_CP_TBL. 4807 * 4808 * All the flow having Q/RSS action should be split by 4809 * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL 4810 * performs the following, 4811 * - CQE->flow_tag := reg_c[1] (MARK) 4812 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 4813 * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1] 4814 * but there should be a flow per each MARK ID set by MARK action. 4815 * 4816 * For the aforementioned reason, if there's a MARK action in flow's action 4817 * list, a corresponding flow should be added to the RX_CP_TBL in order to copy 4818 * the MARK ID to CQE's flow_tag like, 4819 * - If reg_c[1] is mark_id, 4820 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 4821 * 4822 * For SET_META action which stores value in reg_c[0], as the destination is 4823 * also a flow metadata register (reg_b), adding a default flow is enough. Zero 4824 * MARK ID means the default flow. The default flow looks like, 4825 * - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL. 4826 * 4827 * @param dev 4828 * Pointer to Ethernet device. 4829 * @param flow 4830 * Pointer to flow structure. 4831 * @param[in] actions 4832 * Pointer to the list of actions. 4833 * @param[out] error 4834 * Perform verbose error reporting if not NULL. 4835 * 4836 * @return 4837 * 0 on success, negative value otherwise and rte_errno is set. 4838 */ 4839 static int 4840 flow_mreg_update_copy_table(struct rte_eth_dev *dev, 4841 struct rte_flow *flow, 4842 const struct rte_flow_action *actions, 4843 struct rte_flow_error *error) 4844 { 4845 struct mlx5_priv *priv = dev->data->dev_private; 4846 struct mlx5_sh_config *config = &priv->sh->config; 4847 struct mlx5_flow_mreg_copy_resource *mcp_res; 4848 const struct rte_flow_action_mark *mark; 4849 4850 /* Check whether extensive metadata feature is engaged. */ 4851 if (!config->dv_flow_en || 4852 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 4853 !mlx5_flow_ext_mreg_supported(dev) || 4854 !priv->sh->dv_regc0_mask) 4855 return 0; 4856 /* Find MARK action. */ 4857 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 4858 switch (actions->type) { 4859 case RTE_FLOW_ACTION_TYPE_FLAG: 4860 mcp_res = flow_mreg_add_copy_action 4861 (dev, MLX5_FLOW_MARK_DEFAULT, error); 4862 if (!mcp_res) 4863 return -rte_errno; 4864 flow->rix_mreg_copy = mcp_res->idx; 4865 return 0; 4866 case RTE_FLOW_ACTION_TYPE_MARK: 4867 mark = (const struct rte_flow_action_mark *) 4868 actions->conf; 4869 mcp_res = 4870 flow_mreg_add_copy_action(dev, mark->id, error); 4871 if (!mcp_res) 4872 return -rte_errno; 4873 flow->rix_mreg_copy = mcp_res->idx; 4874 return 0; 4875 default: 4876 break; 4877 } 4878 } 4879 return 0; 4880 } 4881 4882 #define MLX5_MAX_SPLIT_ACTIONS 24 4883 #define MLX5_MAX_SPLIT_ITEMS 24 4884 4885 /** 4886 * Split the hairpin flow. 4887 * Since HW can't support encap and push-vlan on Rx, we move these 4888 * actions to Tx. 4889 * If the count action is after the encap then we also 4890 * move the count action. in this case the count will also measure 4891 * the outer bytes. 4892 * 4893 * @param dev 4894 * Pointer to Ethernet device. 4895 * @param[in] actions 4896 * Associated actions (list terminated by the END action). 4897 * @param[out] actions_rx 4898 * Rx flow actions. 4899 * @param[out] actions_tx 4900 * Tx flow actions.. 4901 * @param[out] pattern_tx 4902 * The pattern items for the Tx flow. 4903 * @param[out] flow_id 4904 * The flow ID connected to this flow. 4905 * 4906 * @return 4907 * 0 on success. 4908 */ 4909 static int 4910 flow_hairpin_split(struct rte_eth_dev *dev, 4911 const struct rte_flow_action actions[], 4912 struct rte_flow_action actions_rx[], 4913 struct rte_flow_action actions_tx[], 4914 struct rte_flow_item pattern_tx[], 4915 uint32_t flow_id) 4916 { 4917 const struct rte_flow_action_raw_encap *raw_encap; 4918 const struct rte_flow_action_raw_decap *raw_decap; 4919 struct mlx5_rte_flow_action_set_tag *set_tag; 4920 struct rte_flow_action *tag_action; 4921 struct mlx5_rte_flow_item_tag *tag_item; 4922 struct rte_flow_item *item; 4923 char *addr; 4924 int encap = 0; 4925 4926 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 4927 switch (actions->type) { 4928 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 4929 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 4930 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 4931 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 4932 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 4933 rte_memcpy(actions_tx, actions, 4934 sizeof(struct rte_flow_action)); 4935 actions_tx++; 4936 break; 4937 case RTE_FLOW_ACTION_TYPE_COUNT: 4938 if (encap) { 4939 rte_memcpy(actions_tx, actions, 4940 sizeof(struct rte_flow_action)); 4941 actions_tx++; 4942 } else { 4943 rte_memcpy(actions_rx, actions, 4944 sizeof(struct rte_flow_action)); 4945 actions_rx++; 4946 } 4947 break; 4948 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 4949 raw_encap = actions->conf; 4950 if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) { 4951 memcpy(actions_tx, actions, 4952 sizeof(struct rte_flow_action)); 4953 actions_tx++; 4954 encap = 1; 4955 } else { 4956 rte_memcpy(actions_rx, actions, 4957 sizeof(struct rte_flow_action)); 4958 actions_rx++; 4959 } 4960 break; 4961 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 4962 raw_decap = actions->conf; 4963 if (raw_decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) { 4964 memcpy(actions_tx, actions, 4965 sizeof(struct rte_flow_action)); 4966 actions_tx++; 4967 } else { 4968 rte_memcpy(actions_rx, actions, 4969 sizeof(struct rte_flow_action)); 4970 actions_rx++; 4971 } 4972 break; 4973 default: 4974 rte_memcpy(actions_rx, actions, 4975 sizeof(struct rte_flow_action)); 4976 actions_rx++; 4977 break; 4978 } 4979 } 4980 /* Add set meta action and end action for the Rx flow. */ 4981 tag_action = actions_rx; 4982 tag_action->type = (enum rte_flow_action_type) 4983 MLX5_RTE_FLOW_ACTION_TYPE_TAG; 4984 actions_rx++; 4985 rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action)); 4986 actions_rx++; 4987 set_tag = (void *)actions_rx; 4988 *set_tag = (struct mlx5_rte_flow_action_set_tag) { 4989 .id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL), 4990 .data = flow_id, 4991 }; 4992 MLX5_ASSERT(set_tag->id > REG_NON); 4993 tag_action->conf = set_tag; 4994 /* Create Tx item list. */ 4995 rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); 4996 addr = (void *)&pattern_tx[2]; 4997 item = pattern_tx; 4998 item->type = (enum rte_flow_item_type) 4999 MLX5_RTE_FLOW_ITEM_TYPE_TAG; 5000 tag_item = (void *)addr; 5001 tag_item->data = flow_id; 5002 tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); 5003 MLX5_ASSERT(set_tag->id > REG_NON); 5004 item->spec = tag_item; 5005 addr += sizeof(struct mlx5_rte_flow_item_tag); 5006 tag_item = (void *)addr; 5007 tag_item->data = UINT32_MAX; 5008 tag_item->id = UINT16_MAX; 5009 item->mask = tag_item; 5010 item->last = NULL; 5011 item++; 5012 item->type = RTE_FLOW_ITEM_TYPE_END; 5013 return 0; 5014 } 5015 5016 /** 5017 * The last stage of splitting chain, just creates the subflow 5018 * without any modification. 5019 * 5020 * @param[in] dev 5021 * Pointer to Ethernet device. 5022 * @param[in] flow 5023 * Parent flow structure pointer. 5024 * @param[in, out] sub_flow 5025 * Pointer to return the created subflow, may be NULL. 5026 * @param[in] attr 5027 * Flow rule attributes. 5028 * @param[in] items 5029 * Pattern specification (list terminated by the END pattern item). 5030 * @param[in] actions 5031 * Associated actions (list terminated by the END action). 5032 * @param[in] flow_split_info 5033 * Pointer to flow split info structure. 5034 * @param[out] error 5035 * Perform verbose error reporting if not NULL. 5036 * @return 5037 * 0 on success, negative value otherwise 5038 */ 5039 static int 5040 flow_create_split_inner(struct rte_eth_dev *dev, 5041 struct rte_flow *flow, 5042 struct mlx5_flow **sub_flow, 5043 const struct rte_flow_attr *attr, 5044 const struct rte_flow_item items[], 5045 const struct rte_flow_action actions[], 5046 struct mlx5_flow_split_info *flow_split_info, 5047 struct rte_flow_error *error) 5048 { 5049 struct mlx5_flow *dev_flow; 5050 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 5051 5052 dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, 5053 flow_split_info->flow_idx, error); 5054 if (!dev_flow) 5055 return -rte_errno; 5056 dev_flow->flow = flow; 5057 dev_flow->external = flow_split_info->external; 5058 dev_flow->skip_scale = flow_split_info->skip_scale; 5059 /* Subflow object was created, we must include one in the list. */ 5060 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, 5061 dev_flow->handle, next); 5062 /* 5063 * If dev_flow is as one of the suffix flow, some actions in suffix 5064 * flow may need some user defined item layer flags, and pass the 5065 * Metadata rxq mark flag to suffix flow as well. 5066 */ 5067 if (flow_split_info->prefix_layers) 5068 dev_flow->handle->layers = flow_split_info->prefix_layers; 5069 if (flow_split_info->prefix_mark) { 5070 MLX5_ASSERT(wks); 5071 wks->mark = 1; 5072 } 5073 if (sub_flow) 5074 *sub_flow = dev_flow; 5075 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 5076 dev_flow->dv.table_id = flow_split_info->table_id; 5077 #endif 5078 return flow_drv_translate(dev, dev_flow, attr, items, actions, error); 5079 } 5080 5081 /** 5082 * Get the sub policy of a meter. 5083 * 5084 * @param[in] dev 5085 * Pointer to Ethernet device. 5086 * @param[in] flow 5087 * Parent flow structure pointer. 5088 * @param wks 5089 * Pointer to thread flow work space. 5090 * @param[in] attr 5091 * Flow rule attributes. 5092 * @param[in] items 5093 * Pattern specification (list terminated by the END pattern item). 5094 * @param[out] error 5095 * Perform verbose error reporting if not NULL. 5096 * 5097 * @return 5098 * Pointer to the meter sub policy, NULL otherwise and rte_errno is set. 5099 */ 5100 static struct mlx5_flow_meter_sub_policy * 5101 get_meter_sub_policy(struct rte_eth_dev *dev, 5102 struct rte_flow *flow, 5103 struct mlx5_flow_workspace *wks, 5104 const struct rte_flow_attr *attr, 5105 const struct rte_flow_item items[], 5106 struct rte_flow_error *error) 5107 { 5108 struct mlx5_flow_meter_policy *policy; 5109 struct mlx5_flow_meter_policy *final_policy; 5110 struct mlx5_flow_meter_sub_policy *sub_policy = NULL; 5111 5112 policy = wks->policy; 5113 final_policy = policy->is_hierarchy ? wks->final_policy : policy; 5114 if (final_policy->is_rss || final_policy->is_queue) { 5115 struct mlx5_flow_rss_desc rss_desc_v[MLX5_MTR_RTE_COLORS]; 5116 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS] = {0}; 5117 uint32_t i; 5118 5119 /* 5120 * This is a tmp dev_flow, 5121 * no need to register any matcher for it in translate. 5122 */ 5123 wks->skip_matcher_reg = 1; 5124 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) { 5125 struct mlx5_flow dev_flow = {0}; 5126 struct mlx5_flow_handle dev_handle = { {0} }; 5127 uint8_t fate = final_policy->act_cnt[i].fate_action; 5128 5129 if (fate == MLX5_FLOW_FATE_SHARED_RSS) { 5130 const struct rte_flow_action_rss *rss_act = 5131 final_policy->act_cnt[i].rss->conf; 5132 struct rte_flow_action rss_actions[2] = { 5133 [0] = { 5134 .type = RTE_FLOW_ACTION_TYPE_RSS, 5135 .conf = rss_act, 5136 }, 5137 [1] = { 5138 .type = RTE_FLOW_ACTION_TYPE_END, 5139 .conf = NULL, 5140 } 5141 }; 5142 5143 dev_flow.handle = &dev_handle; 5144 dev_flow.ingress = attr->ingress; 5145 dev_flow.flow = flow; 5146 dev_flow.external = 0; 5147 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 5148 dev_flow.dv.transfer = attr->transfer; 5149 #endif 5150 /** 5151 * Translate RSS action to get rss hash fields. 5152 */ 5153 if (flow_drv_translate(dev, &dev_flow, attr, 5154 items, rss_actions, error)) 5155 goto exit; 5156 rss_desc_v[i] = wks->rss_desc; 5157 rss_desc_v[i].key_len = MLX5_RSS_HASH_KEY_LEN; 5158 rss_desc_v[i].hash_fields = 5159 dev_flow.hash_fields; 5160 rss_desc_v[i].queue_num = 5161 rss_desc_v[i].hash_fields ? 5162 rss_desc_v[i].queue_num : 1; 5163 rss_desc_v[i].tunnel = 5164 !!(dev_flow.handle->layers & 5165 MLX5_FLOW_LAYER_TUNNEL); 5166 /* Use the RSS queues in the containers. */ 5167 rss_desc_v[i].queue = 5168 (uint16_t *)(uintptr_t)rss_act->queue; 5169 rss_desc[i] = &rss_desc_v[i]; 5170 } else if (fate == MLX5_FLOW_FATE_QUEUE) { 5171 /* This is queue action. */ 5172 rss_desc_v[i] = wks->rss_desc; 5173 rss_desc_v[i].key_len = 0; 5174 rss_desc_v[i].hash_fields = 0; 5175 rss_desc_v[i].queue = 5176 &final_policy->act_cnt[i].queue; 5177 rss_desc_v[i].queue_num = 1; 5178 rss_desc[i] = &rss_desc_v[i]; 5179 } else { 5180 rss_desc[i] = NULL; 5181 } 5182 } 5183 sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev, 5184 flow, policy, rss_desc); 5185 } else { 5186 enum mlx5_meter_domain mtr_domain = 5187 attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER : 5188 (attr->egress ? MLX5_MTR_DOMAIN_EGRESS : 5189 MLX5_MTR_DOMAIN_INGRESS); 5190 sub_policy = policy->sub_policys[mtr_domain][0]; 5191 } 5192 if (!sub_policy) 5193 rte_flow_error_set(error, EINVAL, 5194 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 5195 "Failed to get meter sub-policy."); 5196 exit: 5197 return sub_policy; 5198 } 5199 5200 /** 5201 * Split the meter flow. 5202 * 5203 * As meter flow will split to three sub flow, other than meter 5204 * action, the other actions make sense to only meter accepts 5205 * the packet. If it need to be dropped, no other additional 5206 * actions should be take. 5207 * 5208 * One kind of special action which decapsulates the L3 tunnel 5209 * header will be in the prefix sub flow, as not to take the 5210 * L3 tunnel header into account. 5211 * 5212 * @param[in] dev 5213 * Pointer to Ethernet device. 5214 * @param[in] flow 5215 * Parent flow structure pointer. 5216 * @param wks 5217 * Pointer to thread flow work space. 5218 * @param[in] attr 5219 * Flow rule attributes. 5220 * @param[in] items 5221 * Pattern specification (list terminated by the END pattern item). 5222 * @param[out] sfx_items 5223 * Suffix flow match items (list terminated by the END pattern item). 5224 * @param[in] actions 5225 * Associated actions (list terminated by the END action). 5226 * @param[out] actions_sfx 5227 * Suffix flow actions. 5228 * @param[out] actions_pre 5229 * Prefix flow actions. 5230 * @param[out] mtr_flow_id 5231 * Pointer to meter flow id. 5232 * @param[out] error 5233 * Perform verbose error reporting if not NULL. 5234 * 5235 * @return 5236 * 0 on success, a negative errno value otherwise and rte_errno is set. 5237 */ 5238 static int 5239 flow_meter_split_prep(struct rte_eth_dev *dev, 5240 struct rte_flow *flow, 5241 struct mlx5_flow_workspace *wks, 5242 const struct rte_flow_attr *attr, 5243 const struct rte_flow_item items[], 5244 struct rte_flow_item sfx_items[], 5245 const struct rte_flow_action actions[], 5246 struct rte_flow_action actions_sfx[], 5247 struct rte_flow_action actions_pre[], 5248 uint32_t *mtr_flow_id, 5249 struct rte_flow_error *error) 5250 { 5251 struct mlx5_priv *priv = dev->data->dev_private; 5252 struct mlx5_flow_meter_info *fm = wks->fm; 5253 struct rte_flow_action *tag_action = NULL; 5254 struct rte_flow_item *tag_item; 5255 struct mlx5_rte_flow_action_set_tag *set_tag; 5256 const struct rte_flow_action_raw_encap *raw_encap; 5257 const struct rte_flow_action_raw_decap *raw_decap; 5258 struct mlx5_rte_flow_item_tag *tag_item_spec; 5259 struct mlx5_rte_flow_item_tag *tag_item_mask; 5260 uint32_t tag_id = 0; 5261 struct rte_flow_item *vlan_item_dst = NULL; 5262 const struct rte_flow_item *vlan_item_src = NULL; 5263 const struct rte_flow_item *orig_items = items; 5264 struct rte_flow_action *hw_mtr_action; 5265 struct rte_flow_action *action_pre_head = NULL; 5266 int32_t flow_src_port = priv->representor_id; 5267 bool mtr_first; 5268 uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0; 5269 uint8_t mtr_reg_bits = priv->mtr_reg_share ? 5270 MLX5_MTR_IDLE_BITS_IN_COLOR_REG : MLX5_REG_BITS; 5271 uint32_t flow_id = 0; 5272 uint32_t flow_id_reversed = 0; 5273 uint8_t flow_id_bits = 0; 5274 int shift; 5275 5276 /* Prepare the suffix subflow items. */ 5277 tag_item = sfx_items++; 5278 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 5279 struct mlx5_priv *port_priv; 5280 const struct rte_flow_item_port_id *pid_v; 5281 int item_type = items->type; 5282 5283 switch (item_type) { 5284 case RTE_FLOW_ITEM_TYPE_PORT_ID: 5285 pid_v = items->spec; 5286 MLX5_ASSERT(pid_v); 5287 port_priv = mlx5_port_to_eswitch_info(pid_v->id, false); 5288 if (!port_priv) 5289 return rte_flow_error_set(error, 5290 rte_errno, 5291 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 5292 pid_v, 5293 "Failed to get port info."); 5294 flow_src_port = port_priv->representor_id; 5295 if (!fm->def_policy && wks->policy->is_hierarchy && 5296 flow_src_port != priv->representor_id) { 5297 if (flow_drv_mtr_hierarchy_rule_create(dev, 5298 flow, fm, 5299 flow_src_port, 5300 items, 5301 error)) 5302 return -rte_errno; 5303 } 5304 memcpy(sfx_items, items, sizeof(*sfx_items)); 5305 sfx_items++; 5306 break; 5307 case RTE_FLOW_ITEM_TYPE_VLAN: 5308 /* Determine if copy vlan item below. */ 5309 vlan_item_src = items; 5310 vlan_item_dst = sfx_items++; 5311 vlan_item_dst->type = RTE_FLOW_ITEM_TYPE_VOID; 5312 break; 5313 default: 5314 break; 5315 } 5316 } 5317 sfx_items->type = RTE_FLOW_ITEM_TYPE_END; 5318 sfx_items++; 5319 mtr_first = priv->sh->meter_aso_en && 5320 (attr->egress || (attr->transfer && flow_src_port != UINT16_MAX)); 5321 /* For ASO meter, meter must be before tag in TX direction. */ 5322 if (mtr_first) { 5323 action_pre_head = actions_pre++; 5324 /* Leave space for tag action. */ 5325 tag_action = actions_pre++; 5326 } 5327 /* Prepare the actions for prefix and suffix flow. */ 5328 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 5329 struct rte_flow_action *action_cur = NULL; 5330 5331 switch (actions->type) { 5332 case RTE_FLOW_ACTION_TYPE_METER: 5333 if (mtr_first) { 5334 action_cur = action_pre_head; 5335 } else { 5336 /* Leave space for tag action. */ 5337 tag_action = actions_pre++; 5338 action_cur = actions_pre++; 5339 } 5340 break; 5341 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 5342 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 5343 action_cur = actions_pre++; 5344 break; 5345 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 5346 raw_encap = actions->conf; 5347 if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) 5348 action_cur = actions_pre++; 5349 break; 5350 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 5351 raw_decap = actions->conf; 5352 if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 5353 action_cur = actions_pre++; 5354 break; 5355 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 5356 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 5357 if (vlan_item_dst && vlan_item_src) { 5358 memcpy(vlan_item_dst, vlan_item_src, 5359 sizeof(*vlan_item_dst)); 5360 /* 5361 * Convert to internal match item, it is used 5362 * for vlan push and set vid. 5363 */ 5364 vlan_item_dst->type = (enum rte_flow_item_type) 5365 MLX5_RTE_FLOW_ITEM_TYPE_VLAN; 5366 } 5367 break; 5368 default: 5369 break; 5370 } 5371 if (!action_cur) 5372 action_cur = (fm->def_policy) ? 5373 actions_sfx++ : actions_pre++; 5374 memcpy(action_cur, actions, sizeof(struct rte_flow_action)); 5375 } 5376 /* Add end action to the actions. */ 5377 actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; 5378 if (priv->sh->meter_aso_en) { 5379 /** 5380 * For ASO meter, need to add an extra jump action explicitly, 5381 * to jump from meter to policer table. 5382 */ 5383 struct mlx5_flow_meter_sub_policy *sub_policy; 5384 struct mlx5_flow_tbl_data_entry *tbl_data; 5385 5386 if (!fm->def_policy) { 5387 sub_policy = get_meter_sub_policy(dev, flow, wks, 5388 attr, orig_items, 5389 error); 5390 if (!sub_policy) 5391 return -rte_errno; 5392 } else { 5393 enum mlx5_meter_domain mtr_domain = 5394 attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER : 5395 (attr->egress ? MLX5_MTR_DOMAIN_EGRESS : 5396 MLX5_MTR_DOMAIN_INGRESS); 5397 5398 sub_policy = 5399 &priv->sh->mtrmng->def_policy[mtr_domain]->sub_policy; 5400 } 5401 tbl_data = container_of(sub_policy->tbl_rsc, 5402 struct mlx5_flow_tbl_data_entry, tbl); 5403 hw_mtr_action = actions_pre++; 5404 hw_mtr_action->type = (enum rte_flow_action_type) 5405 MLX5_RTE_FLOW_ACTION_TYPE_JUMP; 5406 hw_mtr_action->conf = tbl_data->jump.action; 5407 } 5408 actions_pre->type = RTE_FLOW_ACTION_TYPE_END; 5409 actions_pre++; 5410 if (!tag_action) 5411 return rte_flow_error_set(error, ENOMEM, 5412 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 5413 NULL, "No tag action space."); 5414 if (!mtr_flow_id) { 5415 tag_action->type = RTE_FLOW_ACTION_TYPE_VOID; 5416 goto exit; 5417 } 5418 /* Only default-policy Meter creates mtr flow id. */ 5419 if (fm->def_policy) { 5420 mlx5_ipool_malloc(fm->flow_ipool, &tag_id); 5421 if (!tag_id) 5422 return rte_flow_error_set(error, ENOMEM, 5423 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 5424 "Failed to allocate meter flow id."); 5425 flow_id = tag_id - 1; 5426 flow_id_bits = (!flow_id) ? 1 : 5427 (MLX5_REG_BITS - __builtin_clz(flow_id)); 5428 if ((flow_id_bits + priv->sh->mtrmng->max_mtr_bits) > 5429 mtr_reg_bits) { 5430 mlx5_ipool_free(fm->flow_ipool, tag_id); 5431 return rte_flow_error_set(error, EINVAL, 5432 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 5433 "Meter flow id exceeds max limit."); 5434 } 5435 if (flow_id_bits > priv->sh->mtrmng->max_mtr_flow_bits) 5436 priv->sh->mtrmng->max_mtr_flow_bits = flow_id_bits; 5437 } 5438 /* Build tag actions and items for meter_id/meter flow_id. */ 5439 set_tag = (struct mlx5_rte_flow_action_set_tag *)actions_pre; 5440 tag_item_spec = (struct mlx5_rte_flow_item_tag *)sfx_items; 5441 tag_item_mask = tag_item_spec + 1; 5442 /* Both flow_id and meter_id share the same register. */ 5443 *set_tag = (struct mlx5_rte_flow_action_set_tag) { 5444 .id = (enum modify_reg)mlx5_flow_get_reg_id(dev, MLX5_MTR_ID, 5445 0, error), 5446 .offset = mtr_id_offset, 5447 .length = mtr_reg_bits, 5448 .data = flow->meter, 5449 }; 5450 /* 5451 * The color Reg bits used by flow_id are growing from 5452 * msb to lsb, so must do bit reverse for flow_id val in RegC. 5453 */ 5454 for (shift = 0; shift < flow_id_bits; shift++) 5455 flow_id_reversed = (flow_id_reversed << 1) | 5456 ((flow_id >> shift) & 0x1); 5457 set_tag->data |= 5458 flow_id_reversed << (mtr_reg_bits - flow_id_bits); 5459 tag_item_spec->id = set_tag->id; 5460 tag_item_spec->data = set_tag->data << mtr_id_offset; 5461 tag_item_mask->data = UINT32_MAX << mtr_id_offset; 5462 tag_action->type = (enum rte_flow_action_type) 5463 MLX5_RTE_FLOW_ACTION_TYPE_TAG; 5464 tag_action->conf = set_tag; 5465 tag_item->type = (enum rte_flow_item_type) 5466 MLX5_RTE_FLOW_ITEM_TYPE_TAG; 5467 tag_item->spec = tag_item_spec; 5468 tag_item->last = NULL; 5469 tag_item->mask = tag_item_mask; 5470 exit: 5471 if (mtr_flow_id) 5472 *mtr_flow_id = tag_id; 5473 return 0; 5474 } 5475 5476 /** 5477 * Split action list having QUEUE/RSS for metadata register copy. 5478 * 5479 * Once Q/RSS action is detected in user's action list, the flow action 5480 * should be split in order to copy metadata registers, which will happen in 5481 * RX_CP_TBL like, 5482 * - CQE->flow_tag := reg_c[1] (MARK) 5483 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 5484 * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL. 5485 * This is because the last action of each flow must be a terminal action 5486 * (QUEUE, RSS or DROP). 5487 * 5488 * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is 5489 * stored and kept in the mlx5_flow structure per each sub_flow. 5490 * 5491 * The Q/RSS action is replaced with, 5492 * - SET_TAG, setting the allocated flow ID to reg_c[2]. 5493 * And the following JUMP action is added at the end, 5494 * - JUMP, to RX_CP_TBL. 5495 * 5496 * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by 5497 * flow_create_split_metadata() routine. The flow will look like, 5498 * - If flow ID matches (reg_c[2]), perform Q/RSS. 5499 * 5500 * @param dev 5501 * Pointer to Ethernet device. 5502 * @param[out] split_actions 5503 * Pointer to store split actions to jump to CP_TBL. 5504 * @param[in] actions 5505 * Pointer to the list of original flow actions. 5506 * @param[in] qrss 5507 * Pointer to the Q/RSS action. 5508 * @param[in] actions_n 5509 * Number of original actions. 5510 * @param[in] mtr_sfx 5511 * Check if it is in meter suffix table. 5512 * @param[out] error 5513 * Perform verbose error reporting if not NULL. 5514 * 5515 * @return 5516 * non-zero unique flow_id on success, otherwise 0 and 5517 * error/rte_error are set. 5518 */ 5519 static uint32_t 5520 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, 5521 struct rte_flow_action *split_actions, 5522 const struct rte_flow_action *actions, 5523 const struct rte_flow_action *qrss, 5524 int actions_n, int mtr_sfx, 5525 struct rte_flow_error *error) 5526 { 5527 struct mlx5_priv *priv = dev->data->dev_private; 5528 struct mlx5_rte_flow_action_set_tag *set_tag; 5529 struct rte_flow_action_jump *jump; 5530 const int qrss_idx = qrss - actions; 5531 uint32_t flow_id = 0; 5532 int ret = 0; 5533 5534 /* 5535 * Given actions will be split 5536 * - Replace QUEUE/RSS action with SET_TAG to set flow ID. 5537 * - Add jump to mreg CP_TBL. 5538 * As a result, there will be one more action. 5539 */ 5540 memcpy(split_actions, actions, sizeof(*split_actions) * actions_n); 5541 /* Count MLX5_RTE_FLOW_ACTION_TYPE_TAG. */ 5542 ++actions_n; 5543 set_tag = (void *)(split_actions + actions_n); 5544 /* 5545 * If we are not the meter suffix flow, add the tag action. 5546 * Since meter suffix flow already has the tag added. 5547 */ 5548 if (!mtr_sfx) { 5549 /* 5550 * Allocate the new subflow ID. This one is unique within 5551 * device and not shared with representors. Otherwise, 5552 * we would have to resolve multi-thread access synch 5553 * issue. Each flow on the shared device is appended 5554 * with source vport identifier, so the resulting 5555 * flows will be unique in the shared (by master and 5556 * representors) domain even if they have coinciding 5557 * IDs. 5558 */ 5559 mlx5_ipool_malloc(priv->sh->ipool 5560 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id); 5561 if (!flow_id) 5562 return rte_flow_error_set(error, ENOMEM, 5563 RTE_FLOW_ERROR_TYPE_ACTION, 5564 NULL, "can't allocate id " 5565 "for split Q/RSS subflow"); 5566 /* Internal SET_TAG action to set flow ID. */ 5567 *set_tag = (struct mlx5_rte_flow_action_set_tag){ 5568 .data = flow_id, 5569 }; 5570 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); 5571 if (ret < 0) 5572 return ret; 5573 set_tag->id = ret; 5574 /* Construct new actions array. */ 5575 /* Replace QUEUE/RSS action. */ 5576 split_actions[qrss_idx] = (struct rte_flow_action){ 5577 .type = (enum rte_flow_action_type) 5578 MLX5_RTE_FLOW_ACTION_TYPE_TAG, 5579 .conf = set_tag, 5580 }; 5581 } else { 5582 /* 5583 * If we are the suffix flow of meter, tag already exist. 5584 * Set the QUEUE/RSS action to void. 5585 */ 5586 split_actions[qrss_idx].type = RTE_FLOW_ACTION_TYPE_VOID; 5587 } 5588 /* JUMP action to jump to mreg copy table (CP_TBL). */ 5589 jump = (void *)(set_tag + 1); 5590 *jump = (struct rte_flow_action_jump){ 5591 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 5592 }; 5593 split_actions[actions_n - 2] = (struct rte_flow_action){ 5594 .type = RTE_FLOW_ACTION_TYPE_JUMP, 5595 .conf = jump, 5596 }; 5597 split_actions[actions_n - 1] = (struct rte_flow_action){ 5598 .type = RTE_FLOW_ACTION_TYPE_END, 5599 }; 5600 return flow_id; 5601 } 5602 5603 /** 5604 * Extend the given action list for Tx metadata copy. 5605 * 5606 * Copy the given action list to the ext_actions and add flow metadata register 5607 * copy action in order to copy reg_a set by WQE to reg_c[0]. 5608 * 5609 * @param[out] ext_actions 5610 * Pointer to the extended action list. 5611 * @param[in] actions 5612 * Pointer to the list of actions. 5613 * @param[in] actions_n 5614 * Number of actions in the list. 5615 * @param[out] error 5616 * Perform verbose error reporting if not NULL. 5617 * @param[in] encap_idx 5618 * The encap action index. 5619 * 5620 * @return 5621 * 0 on success, negative value otherwise 5622 */ 5623 static int 5624 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, 5625 struct rte_flow_action *ext_actions, 5626 const struct rte_flow_action *actions, 5627 int actions_n, struct rte_flow_error *error, 5628 int encap_idx) 5629 { 5630 struct mlx5_flow_action_copy_mreg *cp_mreg = 5631 (struct mlx5_flow_action_copy_mreg *) 5632 (ext_actions + actions_n + 1); 5633 int ret; 5634 5635 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 5636 if (ret < 0) 5637 return ret; 5638 cp_mreg->dst = ret; 5639 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error); 5640 if (ret < 0) 5641 return ret; 5642 cp_mreg->src = ret; 5643 if (encap_idx != 0) 5644 memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx); 5645 if (encap_idx == actions_n - 1) { 5646 ext_actions[actions_n - 1] = (struct rte_flow_action){ 5647 .type = (enum rte_flow_action_type) 5648 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 5649 .conf = cp_mreg, 5650 }; 5651 ext_actions[actions_n] = (struct rte_flow_action){ 5652 .type = RTE_FLOW_ACTION_TYPE_END, 5653 }; 5654 } else { 5655 ext_actions[encap_idx] = (struct rte_flow_action){ 5656 .type = (enum rte_flow_action_type) 5657 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 5658 .conf = cp_mreg, 5659 }; 5660 memcpy(ext_actions + encap_idx + 1, actions + encap_idx, 5661 sizeof(*ext_actions) * (actions_n - encap_idx)); 5662 } 5663 return 0; 5664 } 5665 5666 /** 5667 * Check the match action from the action list. 5668 * 5669 * @param[in] actions 5670 * Pointer to the list of actions. 5671 * @param[in] attr 5672 * Flow rule attributes. 5673 * @param[in] action 5674 * The action to be check if exist. 5675 * @param[out] match_action_pos 5676 * Pointer to the position of the matched action if exists, otherwise is -1. 5677 * @param[out] qrss_action_pos 5678 * Pointer to the position of the Queue/RSS action if exists, otherwise is -1. 5679 * @param[out] modify_after_mirror 5680 * Pointer to the flag of modify action after FDB mirroring. 5681 * 5682 * @return 5683 * > 0 the total number of actions. 5684 * 0 if not found match action in action list. 5685 */ 5686 static int 5687 flow_check_match_action(const struct rte_flow_action actions[], 5688 const struct rte_flow_attr *attr, 5689 enum rte_flow_action_type action, 5690 int *match_action_pos, int *qrss_action_pos, 5691 int *modify_after_mirror) 5692 { 5693 const struct rte_flow_action_sample *sample; 5694 const struct rte_flow_action_raw_decap *decap; 5695 int actions_n = 0; 5696 uint32_t ratio = 0; 5697 int sub_type = 0; 5698 int flag = 0; 5699 int fdb_mirror = 0; 5700 5701 *match_action_pos = -1; 5702 *qrss_action_pos = -1; 5703 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 5704 if (actions->type == action) { 5705 flag = 1; 5706 *match_action_pos = actions_n; 5707 } 5708 switch (actions->type) { 5709 case RTE_FLOW_ACTION_TYPE_QUEUE: 5710 case RTE_FLOW_ACTION_TYPE_RSS: 5711 *qrss_action_pos = actions_n; 5712 break; 5713 case RTE_FLOW_ACTION_TYPE_SAMPLE: 5714 sample = actions->conf; 5715 ratio = sample->ratio; 5716 sub_type = ((const struct rte_flow_action *) 5717 (sample->actions))->type; 5718 if (ratio == 1 && attr->transfer) 5719 fdb_mirror = 1; 5720 break; 5721 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: 5722 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: 5723 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: 5724 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: 5725 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: 5726 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: 5727 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: 5728 case RTE_FLOW_ACTION_TYPE_SET_TP_DST: 5729 case RTE_FLOW_ACTION_TYPE_DEC_TTL: 5730 case RTE_FLOW_ACTION_TYPE_SET_TTL: 5731 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: 5732 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: 5733 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: 5734 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: 5735 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: 5736 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: 5737 case RTE_FLOW_ACTION_TYPE_FLAG: 5738 case RTE_FLOW_ACTION_TYPE_MARK: 5739 case RTE_FLOW_ACTION_TYPE_SET_META: 5740 case RTE_FLOW_ACTION_TYPE_SET_TAG: 5741 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: 5742 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 5743 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 5744 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 5745 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 5746 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 5747 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: 5748 case RTE_FLOW_ACTION_TYPE_METER: 5749 if (fdb_mirror) 5750 *modify_after_mirror = 1; 5751 break; 5752 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 5753 decap = actions->conf; 5754 while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID) 5755 ; 5756 actions_n++; 5757 if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { 5758 const struct rte_flow_action_raw_encap *encap = 5759 actions->conf; 5760 if (decap->size <= 5761 MLX5_ENCAPSULATION_DECISION_SIZE && 5762 encap->size > 5763 MLX5_ENCAPSULATION_DECISION_SIZE) 5764 /* L3 encap. */ 5765 break; 5766 } 5767 if (fdb_mirror) 5768 *modify_after_mirror = 1; 5769 break; 5770 default: 5771 break; 5772 } 5773 actions_n++; 5774 } 5775 if (flag && fdb_mirror && !*modify_after_mirror) { 5776 /* FDB mirroring uses the destination array to implement 5777 * instead of FLOW_SAMPLER object. 5778 */ 5779 if (sub_type != RTE_FLOW_ACTION_TYPE_END) 5780 flag = 0; 5781 } 5782 /* Count RTE_FLOW_ACTION_TYPE_END. */ 5783 return flag ? actions_n + 1 : 0; 5784 } 5785 5786 #define SAMPLE_SUFFIX_ITEM 2 5787 5788 /** 5789 * Split the sample flow. 5790 * 5791 * As sample flow will split to two sub flow, sample flow with 5792 * sample action, the other actions will move to new suffix flow. 5793 * 5794 * Also add unique tag id with tag action in the sample flow, 5795 * the same tag id will be as match in the suffix flow. 5796 * 5797 * @param dev 5798 * Pointer to Ethernet device. 5799 * @param[in] add_tag 5800 * Add extra tag action flag. 5801 * @param[out] sfx_items 5802 * Suffix flow match items (list terminated by the END pattern item). 5803 * @param[in] actions 5804 * Associated actions (list terminated by the END action). 5805 * @param[out] actions_sfx 5806 * Suffix flow actions. 5807 * @param[out] actions_pre 5808 * Prefix flow actions. 5809 * @param[in] actions_n 5810 * The total number of actions. 5811 * @param[in] sample_action_pos 5812 * The sample action position. 5813 * @param[in] qrss_action_pos 5814 * The Queue/RSS action position. 5815 * @param[in] jump_table 5816 * Add extra jump action flag. 5817 * @param[out] error 5818 * Perform verbose error reporting if not NULL. 5819 * 5820 * @return 5821 * 0 on success, or unique flow_id, a negative errno value 5822 * otherwise and rte_errno is set. 5823 */ 5824 static int 5825 flow_sample_split_prep(struct rte_eth_dev *dev, 5826 int add_tag, 5827 struct rte_flow_item sfx_items[], 5828 const struct rte_flow_action actions[], 5829 struct rte_flow_action actions_sfx[], 5830 struct rte_flow_action actions_pre[], 5831 int actions_n, 5832 int sample_action_pos, 5833 int qrss_action_pos, 5834 int jump_table, 5835 struct rte_flow_error *error) 5836 { 5837 struct mlx5_priv *priv = dev->data->dev_private; 5838 struct mlx5_rte_flow_action_set_tag *set_tag; 5839 struct mlx5_rte_flow_item_tag *tag_spec; 5840 struct mlx5_rte_flow_item_tag *tag_mask; 5841 struct rte_flow_action_jump *jump_action; 5842 uint32_t tag_id = 0; 5843 int append_index = 0; 5844 int set_tag_idx = -1; 5845 int index; 5846 int ret; 5847 5848 if (sample_action_pos < 0) 5849 return rte_flow_error_set(error, EINVAL, 5850 RTE_FLOW_ERROR_TYPE_ACTION, 5851 NULL, "invalid position of sample " 5852 "action in list"); 5853 /* Prepare the actions for prefix and suffix flow. */ 5854 if (add_tag) { 5855 /* Update the new added tag action index preceding 5856 * the PUSH_VLAN or ENCAP action. 5857 */ 5858 const struct rte_flow_action_raw_encap *raw_encap; 5859 const struct rte_flow_action *action = actions; 5860 int encap_idx; 5861 int action_idx = 0; 5862 int raw_decap_idx = -1; 5863 int push_vlan_idx = -1; 5864 for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) { 5865 switch (action->type) { 5866 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 5867 raw_decap_idx = action_idx; 5868 break; 5869 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 5870 raw_encap = action->conf; 5871 if (raw_encap->size > 5872 MLX5_ENCAPSULATION_DECISION_SIZE) { 5873 encap_idx = raw_decap_idx != -1 ? 5874 raw_decap_idx : action_idx; 5875 if (encap_idx < sample_action_pos && 5876 push_vlan_idx == -1) 5877 set_tag_idx = encap_idx; 5878 } 5879 break; 5880 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 5881 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 5882 encap_idx = action_idx; 5883 if (encap_idx < sample_action_pos && 5884 push_vlan_idx == -1) 5885 set_tag_idx = encap_idx; 5886 break; 5887 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 5888 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 5889 push_vlan_idx = action_idx; 5890 if (push_vlan_idx < sample_action_pos) 5891 set_tag_idx = action_idx; 5892 break; 5893 default: 5894 break; 5895 } 5896 action_idx++; 5897 } 5898 } 5899 /* Prepare the actions for prefix and suffix flow. */ 5900 if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) { 5901 index = qrss_action_pos; 5902 /* Put the preceding the Queue/RSS action into prefix flow. */ 5903 if (index != 0) 5904 memcpy(actions_pre, actions, 5905 sizeof(struct rte_flow_action) * index); 5906 /* Put others preceding the sample action into prefix flow. */ 5907 if (sample_action_pos > index + 1) 5908 memcpy(actions_pre + index, actions + index + 1, 5909 sizeof(struct rte_flow_action) * 5910 (sample_action_pos - index - 1)); 5911 index = sample_action_pos - 1; 5912 /* Put Queue/RSS action into Suffix flow. */ 5913 memcpy(actions_sfx, actions + qrss_action_pos, 5914 sizeof(struct rte_flow_action)); 5915 actions_sfx++; 5916 } else if (add_tag && set_tag_idx >= 0) { 5917 if (set_tag_idx > 0) 5918 memcpy(actions_pre, actions, 5919 sizeof(struct rte_flow_action) * set_tag_idx); 5920 memcpy(actions_pre + set_tag_idx + 1, actions + set_tag_idx, 5921 sizeof(struct rte_flow_action) * 5922 (sample_action_pos - set_tag_idx)); 5923 index = sample_action_pos; 5924 } else { 5925 index = sample_action_pos; 5926 if (index != 0) 5927 memcpy(actions_pre, actions, 5928 sizeof(struct rte_flow_action) * index); 5929 } 5930 /* For CX5, add an extra tag action for NIC-RX and E-Switch ingress. 5931 * For CX6DX and above, metadata registers Cx preserve their value, 5932 * add an extra tag action for NIC-RX and E-Switch Domain. 5933 */ 5934 if (add_tag) { 5935 /* Prepare the prefix tag action. */ 5936 append_index++; 5937 set_tag = (void *)(actions_pre + actions_n + append_index); 5938 ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error); 5939 /* Trust VF/SF on CX5 not supported meter so that the reserved 5940 * metadata regC is REG_NON, back to use application tag 5941 * index 0. 5942 */ 5943 if (unlikely(ret == REG_NON)) 5944 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error); 5945 if (ret < 0) 5946 return ret; 5947 mlx5_ipool_malloc(priv->sh->ipool 5948 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id); 5949 *set_tag = (struct mlx5_rte_flow_action_set_tag) { 5950 .id = ret, 5951 .data = tag_id, 5952 }; 5953 /* Prepare the suffix subflow items. */ 5954 tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM); 5955 tag_spec->data = tag_id; 5956 tag_spec->id = set_tag->id; 5957 tag_mask = tag_spec + 1; 5958 tag_mask->data = UINT32_MAX; 5959 sfx_items[0] = (struct rte_flow_item){ 5960 .type = (enum rte_flow_item_type) 5961 MLX5_RTE_FLOW_ITEM_TYPE_TAG, 5962 .spec = tag_spec, 5963 .last = NULL, 5964 .mask = tag_mask, 5965 }; 5966 sfx_items[1] = (struct rte_flow_item){ 5967 .type = (enum rte_flow_item_type) 5968 RTE_FLOW_ITEM_TYPE_END, 5969 }; 5970 /* Prepare the tag action in prefix subflow. */ 5971 set_tag_idx = (set_tag_idx == -1) ? index : set_tag_idx; 5972 actions_pre[set_tag_idx] = 5973 (struct rte_flow_action){ 5974 .type = (enum rte_flow_action_type) 5975 MLX5_RTE_FLOW_ACTION_TYPE_TAG, 5976 .conf = set_tag, 5977 }; 5978 /* Update next sample position due to add one tag action */ 5979 index += 1; 5980 } 5981 /* Copy the sample action into prefix flow. */ 5982 memcpy(actions_pre + index, actions + sample_action_pos, 5983 sizeof(struct rte_flow_action)); 5984 index += 1; 5985 /* For the modify action after the sample action in E-Switch mirroring, 5986 * Add the extra jump action in prefix subflow and jump into the next 5987 * table, then do the modify action in the new table. 5988 */ 5989 if (jump_table) { 5990 /* Prepare the prefix jump action. */ 5991 append_index++; 5992 jump_action = (void *)(actions_pre + actions_n + append_index); 5993 jump_action->group = jump_table; 5994 actions_pre[index++] = 5995 (struct rte_flow_action){ 5996 .type = (enum rte_flow_action_type) 5997 RTE_FLOW_ACTION_TYPE_JUMP, 5998 .conf = jump_action, 5999 }; 6000 } 6001 actions_pre[index] = (struct rte_flow_action){ 6002 .type = (enum rte_flow_action_type) 6003 RTE_FLOW_ACTION_TYPE_END, 6004 }; 6005 /* Put the actions after sample into Suffix flow. */ 6006 memcpy(actions_sfx, actions + sample_action_pos + 1, 6007 sizeof(struct rte_flow_action) * 6008 (actions_n - sample_action_pos - 1)); 6009 return tag_id; 6010 } 6011 6012 /** 6013 * The splitting for metadata feature. 6014 * 6015 * - Q/RSS action on NIC Rx should be split in order to pass by 6016 * the mreg copy table (RX_CP_TBL) and then it jumps to the 6017 * action table (RX_ACT_TBL) which has the split Q/RSS action. 6018 * 6019 * - All the actions on NIC Tx should have a mreg copy action to 6020 * copy reg_a from WQE to reg_c[0]. 6021 * 6022 * @param dev 6023 * Pointer to Ethernet device. 6024 * @param[in] flow 6025 * Parent flow structure pointer. 6026 * @param[in] attr 6027 * Flow rule attributes. 6028 * @param[in] items 6029 * Pattern specification (list terminated by the END pattern item). 6030 * @param[in] actions 6031 * Associated actions (list terminated by the END action). 6032 * @param[in] flow_split_info 6033 * Pointer to flow split info structure. 6034 * @param[out] error 6035 * Perform verbose error reporting if not NULL. 6036 * @return 6037 * 0 on success, negative value otherwise 6038 */ 6039 static int 6040 flow_create_split_metadata(struct rte_eth_dev *dev, 6041 struct rte_flow *flow, 6042 const struct rte_flow_attr *attr, 6043 const struct rte_flow_item items[], 6044 const struct rte_flow_action actions[], 6045 struct mlx5_flow_split_info *flow_split_info, 6046 struct rte_flow_error *error) 6047 { 6048 struct mlx5_priv *priv = dev->data->dev_private; 6049 struct mlx5_sh_config *config = &priv->sh->config; 6050 const struct rte_flow_action *qrss = NULL; 6051 struct rte_flow_action *ext_actions = NULL; 6052 struct mlx5_flow *dev_flow = NULL; 6053 uint32_t qrss_id = 0; 6054 int mtr_sfx = 0; 6055 size_t act_size; 6056 int actions_n; 6057 int encap_idx; 6058 int ret; 6059 6060 /* Check whether extensive metadata feature is engaged. */ 6061 if (!config->dv_flow_en || 6062 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 6063 !mlx5_flow_ext_mreg_supported(dev)) 6064 return flow_create_split_inner(dev, flow, NULL, attr, items, 6065 actions, flow_split_info, error); 6066 actions_n = flow_parse_metadata_split_actions_info(actions, &qrss, 6067 &encap_idx); 6068 if (qrss) { 6069 /* Exclude hairpin flows from splitting. */ 6070 if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) { 6071 const struct rte_flow_action_queue *queue; 6072 6073 queue = qrss->conf; 6074 if (mlx5_rxq_is_hairpin(dev, queue->index)) 6075 qrss = NULL; 6076 } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) { 6077 const struct rte_flow_action_rss *rss; 6078 6079 rss = qrss->conf; 6080 if (mlx5_rxq_is_hairpin(dev, rss->queue[0])) 6081 qrss = NULL; 6082 } 6083 } 6084 if (qrss) { 6085 /* Check if it is in meter suffix table. */ 6086 mtr_sfx = attr->group == (attr->transfer ? 6087 (MLX5_FLOW_TABLE_LEVEL_METER - 1) : 6088 MLX5_FLOW_TABLE_LEVEL_METER); 6089 /* 6090 * Q/RSS action on NIC Rx should be split in order to pass by 6091 * the mreg copy table (RX_CP_TBL) and then it jumps to the 6092 * action table (RX_ACT_TBL) which has the split Q/RSS action. 6093 */ 6094 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 6095 sizeof(struct rte_flow_action_set_tag) + 6096 sizeof(struct rte_flow_action_jump); 6097 ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0, 6098 SOCKET_ID_ANY); 6099 if (!ext_actions) 6100 return rte_flow_error_set(error, ENOMEM, 6101 RTE_FLOW_ERROR_TYPE_ACTION, 6102 NULL, "no memory to split " 6103 "metadata flow"); 6104 /* 6105 * Create the new actions list with removed Q/RSS action 6106 * and appended set tag and jump to register copy table 6107 * (RX_CP_TBL). We should preallocate unique tag ID here 6108 * in advance, because it is needed for set tag action. 6109 */ 6110 qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions, 6111 qrss, actions_n, 6112 mtr_sfx, error); 6113 if (!mtr_sfx && !qrss_id) { 6114 ret = -rte_errno; 6115 goto exit; 6116 } 6117 } else if (attr->egress && !attr->transfer) { 6118 /* 6119 * All the actions on NIC Tx should have a metadata register 6120 * copy action to copy reg_a from WQE to reg_c[meta] 6121 */ 6122 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 6123 sizeof(struct mlx5_flow_action_copy_mreg); 6124 ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0, 6125 SOCKET_ID_ANY); 6126 if (!ext_actions) 6127 return rte_flow_error_set(error, ENOMEM, 6128 RTE_FLOW_ERROR_TYPE_ACTION, 6129 NULL, "no memory to split " 6130 "metadata flow"); 6131 /* Create the action list appended with copy register. */ 6132 ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions, 6133 actions_n, error, encap_idx); 6134 if (ret < 0) 6135 goto exit; 6136 } 6137 /* Add the unmodified original or prefix subflow. */ 6138 ret = flow_create_split_inner(dev, flow, &dev_flow, attr, 6139 items, ext_actions ? ext_actions : 6140 actions, flow_split_info, error); 6141 if (ret < 0) 6142 goto exit; 6143 MLX5_ASSERT(dev_flow); 6144 if (qrss) { 6145 const struct rte_flow_attr q_attr = { 6146 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 6147 .ingress = 1, 6148 }; 6149 /* Internal PMD action to set register. */ 6150 struct mlx5_rte_flow_item_tag q_tag_spec = { 6151 .data = qrss_id, 6152 .id = REG_NON, 6153 }; 6154 struct rte_flow_item q_items[] = { 6155 { 6156 .type = (enum rte_flow_item_type) 6157 MLX5_RTE_FLOW_ITEM_TYPE_TAG, 6158 .spec = &q_tag_spec, 6159 .last = NULL, 6160 .mask = NULL, 6161 }, 6162 { 6163 .type = RTE_FLOW_ITEM_TYPE_END, 6164 }, 6165 }; 6166 struct rte_flow_action q_actions[] = { 6167 { 6168 .type = qrss->type, 6169 .conf = qrss->conf, 6170 }, 6171 { 6172 .type = RTE_FLOW_ACTION_TYPE_END, 6173 }, 6174 }; 6175 uint64_t layers = flow_get_prefix_layer_flags(dev_flow); 6176 6177 /* 6178 * Configure the tag item only if there is no meter subflow. 6179 * Since tag is already marked in the meter suffix subflow 6180 * we can just use the meter suffix items as is. 6181 */ 6182 if (qrss_id) { 6183 /* Not meter subflow. */ 6184 MLX5_ASSERT(!mtr_sfx); 6185 /* 6186 * Put unique id in prefix flow due to it is destroyed 6187 * after suffix flow and id will be freed after there 6188 * is no actual flows with this id and identifier 6189 * reallocation becomes possible (for example, for 6190 * other flows in other threads). 6191 */ 6192 dev_flow->handle->split_flow_id = qrss_id; 6193 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, 6194 error); 6195 if (ret < 0) 6196 goto exit; 6197 q_tag_spec.id = ret; 6198 } 6199 dev_flow = NULL; 6200 /* Add suffix subflow to execute Q/RSS. */ 6201 flow_split_info->prefix_layers = layers; 6202 flow_split_info->prefix_mark = 0; 6203 flow_split_info->table_id = 0; 6204 ret = flow_create_split_inner(dev, flow, &dev_flow, 6205 &q_attr, mtr_sfx ? items : 6206 q_items, q_actions, 6207 flow_split_info, error); 6208 if (ret < 0) 6209 goto exit; 6210 /* qrss ID should be freed if failed. */ 6211 qrss_id = 0; 6212 MLX5_ASSERT(dev_flow); 6213 } 6214 6215 exit: 6216 /* 6217 * We do not destroy the partially created sub_flows in case of error. 6218 * These ones are included into parent flow list and will be destroyed 6219 * by flow_drv_destroy. 6220 */ 6221 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], 6222 qrss_id); 6223 mlx5_free(ext_actions); 6224 return ret; 6225 } 6226 6227 /** 6228 * Create meter internal drop flow with the original pattern. 6229 * 6230 * @param dev 6231 * Pointer to Ethernet device. 6232 * @param[in] flow 6233 * Parent flow structure pointer. 6234 * @param[in] attr 6235 * Flow rule attributes. 6236 * @param[in] items 6237 * Pattern specification (list terminated by the END pattern item). 6238 * @param[in] flow_split_info 6239 * Pointer to flow split info structure. 6240 * @param[in] fm 6241 * Pointer to flow meter structure. 6242 * @param[out] error 6243 * Perform verbose error reporting if not NULL. 6244 * @return 6245 * 0 on success, negative value otherwise 6246 */ 6247 static uint32_t 6248 flow_meter_create_drop_flow_with_org_pattern(struct rte_eth_dev *dev, 6249 struct rte_flow *flow, 6250 const struct rte_flow_attr *attr, 6251 const struct rte_flow_item items[], 6252 struct mlx5_flow_split_info *flow_split_info, 6253 struct mlx5_flow_meter_info *fm, 6254 struct rte_flow_error *error) 6255 { 6256 struct mlx5_flow *dev_flow = NULL; 6257 struct rte_flow_attr drop_attr = *attr; 6258 struct rte_flow_action drop_actions[3]; 6259 struct mlx5_flow_split_info drop_split_info = *flow_split_info; 6260 6261 MLX5_ASSERT(fm->drop_cnt); 6262 drop_actions[0].type = 6263 (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_COUNT; 6264 drop_actions[0].conf = (void *)(uintptr_t)fm->drop_cnt; 6265 drop_actions[1].type = RTE_FLOW_ACTION_TYPE_DROP; 6266 drop_actions[1].conf = NULL; 6267 drop_actions[2].type = RTE_FLOW_ACTION_TYPE_END; 6268 drop_actions[2].conf = NULL; 6269 drop_split_info.external = false; 6270 drop_split_info.skip_scale |= 1 << MLX5_SCALE_FLOW_GROUP_BIT; 6271 drop_split_info.table_id = MLX5_MTR_TABLE_ID_DROP; 6272 drop_attr.group = MLX5_FLOW_TABLE_LEVEL_METER; 6273 return flow_create_split_inner(dev, flow, &dev_flow, 6274 &drop_attr, items, drop_actions, 6275 &drop_split_info, error); 6276 } 6277 6278 /** 6279 * The splitting for meter feature. 6280 * 6281 * - The meter flow will be split to two flows as prefix and 6282 * suffix flow. The packets make sense only it pass the prefix 6283 * meter action. 6284 * 6285 * - Reg_C_5 is used for the packet to match betweend prefix and 6286 * suffix flow. 6287 * 6288 * @param dev 6289 * Pointer to Ethernet device. 6290 * @param[in] flow 6291 * Parent flow structure pointer. 6292 * @param[in] attr 6293 * Flow rule attributes. 6294 * @param[in] items 6295 * Pattern specification (list terminated by the END pattern item). 6296 * @param[in] actions 6297 * Associated actions (list terminated by the END action). 6298 * @param[in] flow_split_info 6299 * Pointer to flow split info structure. 6300 * @param[out] error 6301 * Perform verbose error reporting if not NULL. 6302 * @return 6303 * 0 on success, negative value otherwise 6304 */ 6305 static int 6306 flow_create_split_meter(struct rte_eth_dev *dev, 6307 struct rte_flow *flow, 6308 const struct rte_flow_attr *attr, 6309 const struct rte_flow_item items[], 6310 const struct rte_flow_action actions[], 6311 struct mlx5_flow_split_info *flow_split_info, 6312 struct rte_flow_error *error) 6313 { 6314 struct mlx5_priv *priv = dev->data->dev_private; 6315 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 6316 struct rte_flow_action *sfx_actions = NULL; 6317 struct rte_flow_action *pre_actions = NULL; 6318 struct rte_flow_item *sfx_items = NULL; 6319 struct mlx5_flow *dev_flow = NULL; 6320 struct rte_flow_attr sfx_attr = *attr; 6321 struct mlx5_flow_meter_info *fm = NULL; 6322 uint8_t skip_scale_restore; 6323 bool has_mtr = false; 6324 bool has_modify = false; 6325 bool set_mtr_reg = true; 6326 bool is_mtr_hierarchy = false; 6327 uint32_t meter_id = 0; 6328 uint32_t mtr_idx = 0; 6329 uint32_t mtr_flow_id = 0; 6330 size_t act_size; 6331 size_t item_size; 6332 int actions_n = 0; 6333 int ret = 0; 6334 6335 if (priv->mtr_en) 6336 actions_n = flow_check_meter_action(dev, actions, &has_mtr, 6337 &has_modify, &meter_id); 6338 if (has_mtr) { 6339 if (flow->meter) { 6340 fm = flow_dv_meter_find_by_idx(priv, flow->meter); 6341 if (!fm) 6342 return rte_flow_error_set(error, EINVAL, 6343 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 6344 NULL, "Meter not found."); 6345 } else { 6346 fm = mlx5_flow_meter_find(priv, meter_id, &mtr_idx); 6347 if (!fm) 6348 return rte_flow_error_set(error, EINVAL, 6349 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 6350 NULL, "Meter not found."); 6351 ret = mlx5_flow_meter_attach(priv, fm, 6352 &sfx_attr, error); 6353 if (ret) 6354 return -rte_errno; 6355 flow->meter = mtr_idx; 6356 } 6357 MLX5_ASSERT(wks); 6358 wks->fm = fm; 6359 if (!fm->def_policy) { 6360 wks->policy = mlx5_flow_meter_policy_find(dev, 6361 fm->policy_id, 6362 NULL); 6363 MLX5_ASSERT(wks->policy); 6364 if (wks->policy->mark) 6365 wks->mark = 1; 6366 if (wks->policy->is_hierarchy) { 6367 wks->final_policy = 6368 mlx5_flow_meter_hierarchy_get_final_policy(dev, 6369 wks->policy); 6370 if (!wks->final_policy) 6371 return rte_flow_error_set(error, 6372 EINVAL, 6373 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 6374 "Failed to find terminal policy of hierarchy."); 6375 is_mtr_hierarchy = true; 6376 } 6377 } 6378 /* 6379 * If it isn't default-policy Meter, and 6380 * 1. There's no action in flow to change 6381 * packet (modify/encap/decap etc.), OR 6382 * 2. No drop count needed for this meter. 6383 * 3. It's not meter hierarchy. 6384 * Then no need to use regC to save meter id anymore. 6385 */ 6386 if (!fm->def_policy && !is_mtr_hierarchy && 6387 (!has_modify || !fm->drop_cnt)) 6388 set_mtr_reg = false; 6389 /* Prefix actions: meter, decap, encap, tag, jump, end. */ 6390 act_size = sizeof(struct rte_flow_action) * (actions_n + 6) + 6391 sizeof(struct mlx5_rte_flow_action_set_tag); 6392 /* Suffix items: tag, vlan, port id, end. */ 6393 #define METER_SUFFIX_ITEM 4 6394 item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM + 6395 sizeof(struct mlx5_rte_flow_item_tag) * 2; 6396 sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size), 6397 0, SOCKET_ID_ANY); 6398 if (!sfx_actions) 6399 return rte_flow_error_set(error, ENOMEM, 6400 RTE_FLOW_ERROR_TYPE_ACTION, 6401 NULL, "no memory to split " 6402 "meter flow"); 6403 sfx_items = (struct rte_flow_item *)((char *)sfx_actions + 6404 act_size); 6405 /* There's no suffix flow for meter of non-default policy. */ 6406 if (!fm->def_policy) 6407 pre_actions = sfx_actions + 1; 6408 else 6409 pre_actions = sfx_actions + actions_n; 6410 ret = flow_meter_split_prep(dev, flow, wks, &sfx_attr, 6411 items, sfx_items, actions, 6412 sfx_actions, pre_actions, 6413 (set_mtr_reg ? &mtr_flow_id : NULL), 6414 error); 6415 if (ret) { 6416 ret = -rte_errno; 6417 goto exit; 6418 } 6419 /* Add the prefix subflow. */ 6420 skip_scale_restore = flow_split_info->skip_scale; 6421 flow_split_info->skip_scale |= 6422 1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT; 6423 ret = flow_create_split_inner(dev, flow, &dev_flow, 6424 attr, items, pre_actions, 6425 flow_split_info, error); 6426 flow_split_info->skip_scale = skip_scale_restore; 6427 if (ret) { 6428 if (mtr_flow_id) 6429 mlx5_ipool_free(fm->flow_ipool, mtr_flow_id); 6430 ret = -rte_errno; 6431 goto exit; 6432 } 6433 if (mtr_flow_id) { 6434 dev_flow->handle->split_flow_id = mtr_flow_id; 6435 dev_flow->handle->is_meter_flow_id = 1; 6436 } 6437 if (!fm->def_policy) { 6438 if (!set_mtr_reg && fm->drop_cnt) 6439 ret = 6440 flow_meter_create_drop_flow_with_org_pattern(dev, flow, 6441 &sfx_attr, items, 6442 flow_split_info, 6443 fm, error); 6444 goto exit; 6445 } 6446 /* Setting the sfx group atrr. */ 6447 sfx_attr.group = sfx_attr.transfer ? 6448 (MLX5_FLOW_TABLE_LEVEL_METER - 1) : 6449 MLX5_FLOW_TABLE_LEVEL_METER; 6450 flow_split_info->prefix_layers = 6451 flow_get_prefix_layer_flags(dev_flow); 6452 flow_split_info->prefix_mark |= wks->mark; 6453 flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX; 6454 } 6455 /* Add the prefix subflow. */ 6456 ret = flow_create_split_metadata(dev, flow, 6457 &sfx_attr, sfx_items ? 6458 sfx_items : items, 6459 sfx_actions ? sfx_actions : actions, 6460 flow_split_info, error); 6461 exit: 6462 if (sfx_actions) 6463 mlx5_free(sfx_actions); 6464 return ret; 6465 } 6466 6467 /** 6468 * The splitting for sample feature. 6469 * 6470 * Once Sample action is detected in the action list, the flow actions should 6471 * be split into prefix sub flow and suffix sub flow. 6472 * 6473 * The original items remain in the prefix sub flow, all actions preceding the 6474 * sample action and the sample action itself will be copied to the prefix 6475 * sub flow, the actions following the sample action will be copied to the 6476 * suffix sub flow, Queue action always be located in the suffix sub flow. 6477 * 6478 * In order to make the packet from prefix sub flow matches with suffix sub 6479 * flow, an extra tag action be added into prefix sub flow, and the suffix sub 6480 * flow uses tag item with the unique flow id. 6481 * 6482 * @param dev 6483 * Pointer to Ethernet device. 6484 * @param[in] flow 6485 * Parent flow structure pointer. 6486 * @param[in] attr 6487 * Flow rule attributes. 6488 * @param[in] items 6489 * Pattern specification (list terminated by the END pattern item). 6490 * @param[in] actions 6491 * Associated actions (list terminated by the END action). 6492 * @param[in] flow_split_info 6493 * Pointer to flow split info structure. 6494 * @param[out] error 6495 * Perform verbose error reporting if not NULL. 6496 * @return 6497 * 0 on success, negative value otherwise 6498 */ 6499 static int 6500 flow_create_split_sample(struct rte_eth_dev *dev, 6501 struct rte_flow *flow, 6502 const struct rte_flow_attr *attr, 6503 const struct rte_flow_item items[], 6504 const struct rte_flow_action actions[], 6505 struct mlx5_flow_split_info *flow_split_info, 6506 struct rte_flow_error *error) 6507 { 6508 struct mlx5_priv *priv = dev->data->dev_private; 6509 struct rte_flow_action *sfx_actions = NULL; 6510 struct rte_flow_action *pre_actions = NULL; 6511 struct rte_flow_item *sfx_items = NULL; 6512 struct mlx5_flow *dev_flow = NULL; 6513 struct rte_flow_attr sfx_attr = *attr; 6514 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 6515 struct mlx5_flow_dv_sample_resource *sample_res; 6516 struct mlx5_flow_tbl_data_entry *sfx_tbl_data; 6517 struct mlx5_flow_tbl_resource *sfx_tbl; 6518 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 6519 #endif 6520 size_t act_size; 6521 size_t item_size; 6522 uint32_t fdb_tx = 0; 6523 int32_t tag_id = 0; 6524 int actions_n = 0; 6525 int sample_action_pos; 6526 int qrss_action_pos; 6527 int add_tag = 0; 6528 int modify_after_mirror = 0; 6529 uint16_t jump_table = 0; 6530 const uint32_t next_ft_step = 1; 6531 int ret = 0; 6532 6533 if (priv->sampler_en) 6534 actions_n = flow_check_match_action(actions, attr, 6535 RTE_FLOW_ACTION_TYPE_SAMPLE, 6536 &sample_action_pos, &qrss_action_pos, 6537 &modify_after_mirror); 6538 if (actions_n) { 6539 /* The prefix actions must includes sample, tag, end. */ 6540 act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1) 6541 + sizeof(struct mlx5_rte_flow_action_set_tag); 6542 item_size = sizeof(struct rte_flow_item) * SAMPLE_SUFFIX_ITEM + 6543 sizeof(struct mlx5_rte_flow_item_tag) * 2; 6544 sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + 6545 item_size), 0, SOCKET_ID_ANY); 6546 if (!sfx_actions) 6547 return rte_flow_error_set(error, ENOMEM, 6548 RTE_FLOW_ERROR_TYPE_ACTION, 6549 NULL, "no memory to split " 6550 "sample flow"); 6551 /* The representor_id is UINT16_MAX for uplink. */ 6552 fdb_tx = (attr->transfer && priv->representor_id != UINT16_MAX); 6553 /* 6554 * When reg_c_preserve is set, metadata registers Cx preserve 6555 * their value even through packet duplication. 6556 */ 6557 add_tag = (!fdb_tx || 6558 priv->sh->cdev->config.hca_attr.reg_c_preserve); 6559 if (add_tag) 6560 sfx_items = (struct rte_flow_item *)((char *)sfx_actions 6561 + act_size); 6562 if (modify_after_mirror) 6563 jump_table = attr->group * MLX5_FLOW_TABLE_FACTOR + 6564 next_ft_step; 6565 pre_actions = sfx_actions + actions_n; 6566 tag_id = flow_sample_split_prep(dev, add_tag, sfx_items, 6567 actions, sfx_actions, 6568 pre_actions, actions_n, 6569 sample_action_pos, 6570 qrss_action_pos, jump_table, 6571 error); 6572 if (tag_id < 0 || (add_tag && !tag_id)) { 6573 ret = -rte_errno; 6574 goto exit; 6575 } 6576 if (modify_after_mirror) 6577 flow_split_info->skip_scale = 6578 1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT; 6579 /* Add the prefix subflow. */ 6580 ret = flow_create_split_inner(dev, flow, &dev_flow, attr, 6581 items, pre_actions, 6582 flow_split_info, error); 6583 if (ret) { 6584 ret = -rte_errno; 6585 goto exit; 6586 } 6587 dev_flow->handle->split_flow_id = tag_id; 6588 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 6589 if (!modify_after_mirror) { 6590 /* Set the sfx group attr. */ 6591 sample_res = (struct mlx5_flow_dv_sample_resource *) 6592 dev_flow->dv.sample_res; 6593 sfx_tbl = (struct mlx5_flow_tbl_resource *) 6594 sample_res->normal_path_tbl; 6595 sfx_tbl_data = container_of(sfx_tbl, 6596 struct mlx5_flow_tbl_data_entry, 6597 tbl); 6598 sfx_attr.group = sfx_attr.transfer ? 6599 (sfx_tbl_data->level - 1) : sfx_tbl_data->level; 6600 } else { 6601 MLX5_ASSERT(attr->transfer); 6602 sfx_attr.group = jump_table; 6603 } 6604 flow_split_info->prefix_layers = 6605 flow_get_prefix_layer_flags(dev_flow); 6606 MLX5_ASSERT(wks); 6607 flow_split_info->prefix_mark |= wks->mark; 6608 /* Suffix group level already be scaled with factor, set 6609 * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale 6610 * again in translation. 6611 */ 6612 flow_split_info->skip_scale = 1 << MLX5_SCALE_FLOW_GROUP_BIT; 6613 #endif 6614 } 6615 /* Add the suffix subflow. */ 6616 ret = flow_create_split_meter(dev, flow, &sfx_attr, 6617 sfx_items ? sfx_items : items, 6618 sfx_actions ? sfx_actions : actions, 6619 flow_split_info, error); 6620 exit: 6621 if (sfx_actions) 6622 mlx5_free(sfx_actions); 6623 return ret; 6624 } 6625 6626 /** 6627 * Split the flow to subflow set. The splitters might be linked 6628 * in the chain, like this: 6629 * flow_create_split_outer() calls: 6630 * flow_create_split_meter() calls: 6631 * flow_create_split_metadata(meter_subflow_0) calls: 6632 * flow_create_split_inner(metadata_subflow_0) 6633 * flow_create_split_inner(metadata_subflow_1) 6634 * flow_create_split_inner(metadata_subflow_2) 6635 * flow_create_split_metadata(meter_subflow_1) calls: 6636 * flow_create_split_inner(metadata_subflow_0) 6637 * flow_create_split_inner(metadata_subflow_1) 6638 * flow_create_split_inner(metadata_subflow_2) 6639 * 6640 * This provide flexible way to add new levels of flow splitting. 6641 * The all of successfully created subflows are included to the 6642 * parent flow dev_flow list. 6643 * 6644 * @param dev 6645 * Pointer to Ethernet device. 6646 * @param[in] flow 6647 * Parent flow structure pointer. 6648 * @param[in] attr 6649 * Flow rule attributes. 6650 * @param[in] items 6651 * Pattern specification (list terminated by the END pattern item). 6652 * @param[in] actions 6653 * Associated actions (list terminated by the END action). 6654 * @param[in] flow_split_info 6655 * Pointer to flow split info structure. 6656 * @param[out] error 6657 * Perform verbose error reporting if not NULL. 6658 * @return 6659 * 0 on success, negative value otherwise 6660 */ 6661 static int 6662 flow_create_split_outer(struct rte_eth_dev *dev, 6663 struct rte_flow *flow, 6664 const struct rte_flow_attr *attr, 6665 const struct rte_flow_item items[], 6666 const struct rte_flow_action actions[], 6667 struct mlx5_flow_split_info *flow_split_info, 6668 struct rte_flow_error *error) 6669 { 6670 int ret; 6671 6672 ret = flow_create_split_sample(dev, flow, attr, items, 6673 actions, flow_split_info, error); 6674 MLX5_ASSERT(ret <= 0); 6675 return ret; 6676 } 6677 6678 static inline struct mlx5_flow_tunnel * 6679 flow_tunnel_from_rule(const struct mlx5_flow *flow) 6680 { 6681 struct mlx5_flow_tunnel *tunnel; 6682 6683 #pragma GCC diagnostic push 6684 #pragma GCC diagnostic ignored "-Wcast-qual" 6685 tunnel = (typeof(tunnel))flow->tunnel; 6686 #pragma GCC diagnostic pop 6687 6688 return tunnel; 6689 } 6690 6691 /** 6692 * Adjust flow RSS workspace if needed. 6693 * 6694 * @param wks 6695 * Pointer to thread flow work space. 6696 * @param rss_desc 6697 * Pointer to RSS descriptor. 6698 * @param[in] nrssq_num 6699 * New RSS queue number. 6700 * 6701 * @return 6702 * 0 on success, -1 otherwise and rte_errno is set. 6703 */ 6704 static int 6705 flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks, 6706 struct mlx5_flow_rss_desc *rss_desc, 6707 uint32_t nrssq_num) 6708 { 6709 if (likely(nrssq_num <= wks->rssq_num)) 6710 return 0; 6711 rss_desc->queue = realloc(rss_desc->queue, 6712 sizeof(*rss_desc->queue) * RTE_ALIGN(nrssq_num, 2)); 6713 if (!rss_desc->queue) { 6714 rte_errno = ENOMEM; 6715 return -1; 6716 } 6717 wks->rssq_num = RTE_ALIGN(nrssq_num, 2); 6718 return 0; 6719 } 6720 6721 /** 6722 * Create a flow and add it to @p list. 6723 * 6724 * @param dev 6725 * Pointer to Ethernet device. 6726 * @param list 6727 * Pointer to a TAILQ flow list. If this parameter NULL, 6728 * no list insertion occurred, flow is just created, 6729 * this is caller's responsibility to track the 6730 * created flow. 6731 * @param[in] attr 6732 * Flow rule attributes. 6733 * @param[in] items 6734 * Pattern specification (list terminated by the END pattern item). 6735 * @param[in] actions 6736 * Associated actions (list terminated by the END action). 6737 * @param[in] external 6738 * This flow rule is created by request external to PMD. 6739 * @param[out] error 6740 * Perform verbose error reporting if not NULL. 6741 * 6742 * @return 6743 * A flow index on success, 0 otherwise and rte_errno is set. 6744 */ 6745 static uint32_t 6746 flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, 6747 const struct rte_flow_attr *attr, 6748 const struct rte_flow_item items[], 6749 const struct rte_flow_action original_actions[], 6750 bool external, struct rte_flow_error *error) 6751 { 6752 struct mlx5_priv *priv = dev->data->dev_private; 6753 struct rte_flow *flow = NULL; 6754 struct mlx5_flow *dev_flow; 6755 const struct rte_flow_action_rss *rss = NULL; 6756 struct mlx5_translated_action_handle 6757 indir_actions[MLX5_MAX_INDIRECT_ACTIONS]; 6758 int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS; 6759 union { 6760 struct mlx5_flow_expand_rss buf; 6761 uint8_t buffer[4096]; 6762 } expand_buffer; 6763 union { 6764 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 6765 uint8_t buffer[2048]; 6766 } actions_rx; 6767 union { 6768 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 6769 uint8_t buffer[2048]; 6770 } actions_hairpin_tx; 6771 union { 6772 struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS]; 6773 uint8_t buffer[2048]; 6774 } items_tx; 6775 struct mlx5_flow_expand_rss *buf = &expand_buffer.buf; 6776 struct mlx5_flow_rss_desc *rss_desc; 6777 const struct rte_flow_action *p_actions_rx; 6778 uint32_t i; 6779 uint32_t idx = 0; 6780 int hairpin_flow; 6781 struct rte_flow_attr attr_tx = { .priority = 0 }; 6782 const struct rte_flow_action *actions; 6783 struct rte_flow_action *translated_actions = NULL; 6784 struct mlx5_flow_tunnel *tunnel; 6785 struct tunnel_default_miss_ctx default_miss_ctx = { 0, }; 6786 struct mlx5_flow_workspace *wks = mlx5_flow_push_thread_workspace(); 6787 struct mlx5_flow_split_info flow_split_info = { 6788 .external = !!external, 6789 .skip_scale = 0, 6790 .flow_idx = 0, 6791 .prefix_mark = 0, 6792 .prefix_layers = 0, 6793 .table_id = 0 6794 }; 6795 int ret; 6796 6797 MLX5_ASSERT(wks); 6798 rss_desc = &wks->rss_desc; 6799 ret = flow_action_handles_translate(dev, original_actions, 6800 indir_actions, 6801 &indir_actions_n, 6802 &translated_actions, error); 6803 if (ret < 0) { 6804 MLX5_ASSERT(translated_actions == NULL); 6805 return 0; 6806 } 6807 actions = translated_actions ? translated_actions : original_actions; 6808 p_actions_rx = actions; 6809 hairpin_flow = flow_check_hairpin_split(dev, attr, actions); 6810 ret = flow_drv_validate(dev, attr, items, p_actions_rx, 6811 external, hairpin_flow, error); 6812 if (ret < 0) 6813 goto error_before_hairpin_split; 6814 flow = mlx5_ipool_zmalloc(priv->flows[type], &idx); 6815 if (!flow) { 6816 rte_errno = ENOMEM; 6817 goto error_before_hairpin_split; 6818 } 6819 if (hairpin_flow > 0) { 6820 if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { 6821 rte_errno = EINVAL; 6822 goto error_before_hairpin_split; 6823 } 6824 flow_hairpin_split(dev, actions, actions_rx.actions, 6825 actions_hairpin_tx.actions, items_tx.items, 6826 idx); 6827 p_actions_rx = actions_rx.actions; 6828 } 6829 flow_split_info.flow_idx = idx; 6830 flow->drv_type = flow_get_drv_type(dev, attr); 6831 MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && 6832 flow->drv_type < MLX5_FLOW_TYPE_MAX); 6833 memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue)); 6834 /* RSS Action only works on NIC RX domain */ 6835 if (attr->ingress && !attr->transfer) 6836 rss = flow_get_rss_action(dev, p_actions_rx); 6837 if (rss) { 6838 if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num)) 6839 return 0; 6840 /* 6841 * The following information is required by 6842 * mlx5_flow_hashfields_adjust() in advance. 6843 */ 6844 rss_desc->level = rss->level; 6845 /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */ 6846 rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types; 6847 } 6848 flow->dev_handles = 0; 6849 if (rss && rss->types) { 6850 unsigned int graph_root; 6851 6852 graph_root = find_graph_root(rss->level); 6853 ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer), 6854 items, rss->types, 6855 mlx5_support_expansion, graph_root); 6856 MLX5_ASSERT(ret > 0 && 6857 (unsigned int)ret < sizeof(expand_buffer.buffer)); 6858 if (rte_log_can_log(mlx5_logtype, RTE_LOG_DEBUG)) { 6859 for (i = 0; i < buf->entries; ++i) 6860 mlx5_dbg__print_pattern(buf->entry[i].pattern); 6861 } 6862 } else { 6863 buf->entries = 1; 6864 buf->entry[0].pattern = (void *)(uintptr_t)items; 6865 } 6866 rss_desc->shared_rss = flow_get_shared_rss_action(dev, indir_actions, 6867 indir_actions_n); 6868 for (i = 0; i < buf->entries; ++i) { 6869 /* Initialize flow split data. */ 6870 flow_split_info.prefix_layers = 0; 6871 flow_split_info.prefix_mark = 0; 6872 flow_split_info.skip_scale = 0; 6873 /* 6874 * The splitter may create multiple dev_flows, 6875 * depending on configuration. In the simplest 6876 * case it just creates unmodified original flow. 6877 */ 6878 ret = flow_create_split_outer(dev, flow, attr, 6879 buf->entry[i].pattern, 6880 p_actions_rx, &flow_split_info, 6881 error); 6882 if (ret < 0) 6883 goto error; 6884 if (is_flow_tunnel_steer_rule(wks->flows[0].tof_type)) { 6885 ret = flow_tunnel_add_default_miss(dev, flow, attr, 6886 p_actions_rx, 6887 idx, 6888 wks->flows[0].tunnel, 6889 &default_miss_ctx, 6890 error); 6891 if (ret < 0) { 6892 mlx5_free(default_miss_ctx.queue); 6893 goto error; 6894 } 6895 } 6896 } 6897 /* Create the tx flow. */ 6898 if (hairpin_flow) { 6899 attr_tx.group = MLX5_HAIRPIN_TX_TABLE; 6900 attr_tx.ingress = 0; 6901 attr_tx.egress = 1; 6902 dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items, 6903 actions_hairpin_tx.actions, 6904 idx, error); 6905 if (!dev_flow) 6906 goto error; 6907 dev_flow->flow = flow; 6908 dev_flow->external = 0; 6909 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, 6910 dev_flow->handle, next); 6911 ret = flow_drv_translate(dev, dev_flow, &attr_tx, 6912 items_tx.items, 6913 actions_hairpin_tx.actions, error); 6914 if (ret < 0) 6915 goto error; 6916 } 6917 /* 6918 * Update the metadata register copy table. If extensive 6919 * metadata feature is enabled and registers are supported 6920 * we might create the extra rte_flow for each unique 6921 * MARK/FLAG action ID. 6922 * 6923 * The table is updated for ingress Flows only, because 6924 * the egress Flows belong to the different device and 6925 * copy table should be updated in peer NIC Rx domain. 6926 */ 6927 if (attr->ingress && 6928 (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) { 6929 ret = flow_mreg_update_copy_table(dev, flow, actions, error); 6930 if (ret) 6931 goto error; 6932 } 6933 /* 6934 * If the flow is external (from application) OR device is started, 6935 * OR mreg discover, then apply immediately. 6936 */ 6937 if (external || dev->data->dev_started || 6938 (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP && 6939 attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) { 6940 ret = flow_drv_apply(dev, flow, error); 6941 if (ret < 0) 6942 goto error; 6943 } 6944 flow->type = type; 6945 flow_rxq_flags_set(dev, flow); 6946 rte_free(translated_actions); 6947 tunnel = flow_tunnel_from_rule(wks->flows); 6948 if (tunnel) { 6949 flow->tunnel = 1; 6950 flow->tunnel_id = tunnel->tunnel_id; 6951 __atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED); 6952 mlx5_free(default_miss_ctx.queue); 6953 } 6954 mlx5_flow_pop_thread_workspace(); 6955 return idx; 6956 error: 6957 MLX5_ASSERT(flow); 6958 ret = rte_errno; /* Save rte_errno before cleanup. */ 6959 flow_mreg_del_copy_action(dev, flow); 6960 flow_drv_destroy(dev, flow); 6961 if (rss_desc->shared_rss) 6962 __atomic_sub_fetch(&((struct mlx5_shared_action_rss *) 6963 mlx5_ipool_get 6964 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 6965 rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED); 6966 mlx5_ipool_free(priv->flows[type], idx); 6967 rte_errno = ret; /* Restore rte_errno. */ 6968 ret = rte_errno; 6969 rte_errno = ret; 6970 mlx5_flow_pop_thread_workspace(); 6971 error_before_hairpin_split: 6972 rte_free(translated_actions); 6973 return 0; 6974 } 6975 6976 /** 6977 * Create a dedicated flow rule on e-switch table 0 (root table), to direct all 6978 * incoming packets to table 1. 6979 * 6980 * Other flow rules, requested for group n, will be created in 6981 * e-switch table n+1. 6982 * Jump action to e-switch group n will be created to group n+1. 6983 * 6984 * Used when working in switchdev mode, to utilise advantages of table 1 6985 * and above. 6986 * 6987 * @param dev 6988 * Pointer to Ethernet device. 6989 * 6990 * @return 6991 * Pointer to flow on success, NULL otherwise and rte_errno is set. 6992 */ 6993 struct rte_flow * 6994 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) 6995 { 6996 const struct rte_flow_attr attr = { 6997 .group = 0, 6998 .priority = 0, 6999 .ingress = 1, 7000 .egress = 0, 7001 .transfer = 1, 7002 }; 7003 const struct rte_flow_item pattern = { 7004 .type = RTE_FLOW_ITEM_TYPE_END, 7005 }; 7006 struct rte_flow_action_jump jump = { 7007 .group = 1, 7008 }; 7009 const struct rte_flow_action actions[] = { 7010 { 7011 .type = RTE_FLOW_ACTION_TYPE_JUMP, 7012 .conf = &jump, 7013 }, 7014 { 7015 .type = RTE_FLOW_ACTION_TYPE_END, 7016 }, 7017 }; 7018 struct rte_flow_error error; 7019 7020 return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_CTL, 7021 &attr, &pattern, 7022 actions, false, &error); 7023 } 7024 7025 /** 7026 * Create a dedicated flow rule on e-switch table 1, matches ESW manager 7027 * and sq number, directs all packets to peer vport. 7028 * 7029 * @param dev 7030 * Pointer to Ethernet device. 7031 * @param txq 7032 * Txq index. 7033 * 7034 * @return 7035 * Flow ID on success, 0 otherwise and rte_errno is set. 7036 */ 7037 uint32_t 7038 mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq) 7039 { 7040 struct rte_flow_attr attr = { 7041 .group = 0, 7042 .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR, 7043 .ingress = 1, 7044 .egress = 0, 7045 .transfer = 1, 7046 }; 7047 struct rte_flow_item_port_id port_spec = { 7048 .id = MLX5_PORT_ESW_MGR, 7049 }; 7050 struct mlx5_rte_flow_item_tx_queue txq_spec = { 7051 .queue = txq, 7052 }; 7053 struct rte_flow_item pattern[] = { 7054 { 7055 .type = RTE_FLOW_ITEM_TYPE_PORT_ID, 7056 .spec = &port_spec, 7057 }, 7058 { 7059 .type = (enum rte_flow_item_type) 7060 MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, 7061 .spec = &txq_spec, 7062 }, 7063 { 7064 .type = RTE_FLOW_ITEM_TYPE_END, 7065 }, 7066 }; 7067 struct rte_flow_action_jump jump = { 7068 .group = 1, 7069 }; 7070 struct rte_flow_action_port_id port = { 7071 .id = dev->data->port_id, 7072 }; 7073 struct rte_flow_action actions[] = { 7074 { 7075 .type = RTE_FLOW_ACTION_TYPE_JUMP, 7076 .conf = &jump, 7077 }, 7078 { 7079 .type = RTE_FLOW_ACTION_TYPE_END, 7080 }, 7081 }; 7082 struct rte_flow_error error; 7083 7084 /* 7085 * Creates group 0, highest priority jump flow. 7086 * Matches txq to bypass kernel packets. 7087 */ 7088 if (flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, actions, 7089 false, &error) == 0) 7090 return 0; 7091 /* Create group 1, lowest priority redirect flow for txq. */ 7092 attr.group = 1; 7093 actions[0].conf = &port; 7094 actions[0].type = RTE_FLOW_ACTION_TYPE_PORT_ID; 7095 return flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, 7096 actions, false, &error); 7097 } 7098 7099 /** 7100 * Validate a flow supported by the NIC. 7101 * 7102 * @see rte_flow_validate() 7103 * @see rte_flow_ops 7104 */ 7105 int 7106 mlx5_flow_validate(struct rte_eth_dev *dev, 7107 const struct rte_flow_attr *attr, 7108 const struct rte_flow_item items[], 7109 const struct rte_flow_action original_actions[], 7110 struct rte_flow_error *error) 7111 { 7112 int hairpin_flow; 7113 struct mlx5_translated_action_handle 7114 indir_actions[MLX5_MAX_INDIRECT_ACTIONS]; 7115 int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS; 7116 const struct rte_flow_action *actions; 7117 struct rte_flow_action *translated_actions = NULL; 7118 int ret = flow_action_handles_translate(dev, original_actions, 7119 indir_actions, 7120 &indir_actions_n, 7121 &translated_actions, error); 7122 7123 if (ret) 7124 return ret; 7125 actions = translated_actions ? translated_actions : original_actions; 7126 hairpin_flow = flow_check_hairpin_split(dev, attr, actions); 7127 ret = flow_drv_validate(dev, attr, items, actions, 7128 true, hairpin_flow, error); 7129 rte_free(translated_actions); 7130 return ret; 7131 } 7132 7133 /** 7134 * Create a flow. 7135 * 7136 * @see rte_flow_create() 7137 * @see rte_flow_ops 7138 */ 7139 struct rte_flow * 7140 mlx5_flow_create(struct rte_eth_dev *dev, 7141 const struct rte_flow_attr *attr, 7142 const struct rte_flow_item items[], 7143 const struct rte_flow_action actions[], 7144 struct rte_flow_error *error) 7145 { 7146 struct mlx5_priv *priv = dev->data->dev_private; 7147 7148 if (priv->sh->config.dv_flow_en == 2) { 7149 rte_flow_error_set(error, ENOTSUP, 7150 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 7151 NULL, 7152 "Flow non-Q creation not supported"); 7153 return NULL; 7154 } 7155 /* 7156 * If the device is not started yet, it is not allowed to created a 7157 * flow from application. PMD default flows and traffic control flows 7158 * are not affected. 7159 */ 7160 if (unlikely(!dev->data->dev_started)) { 7161 DRV_LOG(DEBUG, "port %u is not started when " 7162 "inserting a flow", dev->data->port_id); 7163 rte_flow_error_set(error, ENODEV, 7164 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 7165 NULL, 7166 "port not started"); 7167 return NULL; 7168 } 7169 7170 return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_GEN, 7171 attr, items, actions, 7172 true, error); 7173 } 7174 7175 /** 7176 * Destroy a flow in a list. 7177 * 7178 * @param dev 7179 * Pointer to Ethernet device. 7180 * @param[in] flow_idx 7181 * Index of flow to destroy. 7182 */ 7183 static void 7184 flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, 7185 uint32_t flow_idx) 7186 { 7187 struct mlx5_priv *priv = dev->data->dev_private; 7188 struct rte_flow *flow = mlx5_ipool_get(priv->flows[type], flow_idx); 7189 7190 if (!flow) 7191 return; 7192 MLX5_ASSERT(flow->type == type); 7193 /* 7194 * Update RX queue flags only if port is started, otherwise it is 7195 * already clean. 7196 */ 7197 if (dev->data->dev_started) 7198 flow_rxq_flags_trim(dev, flow); 7199 flow_drv_destroy(dev, flow); 7200 if (flow->tunnel) { 7201 struct mlx5_flow_tunnel *tunnel; 7202 7203 tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id); 7204 RTE_VERIFY(tunnel); 7205 if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) 7206 mlx5_flow_tunnel_free(dev, tunnel); 7207 } 7208 flow_mreg_del_copy_action(dev, flow); 7209 mlx5_ipool_free(priv->flows[type], flow_idx); 7210 } 7211 7212 /** 7213 * Destroy all flows. 7214 * 7215 * @param dev 7216 * Pointer to Ethernet device. 7217 * @param type 7218 * Flow type to be flushed. 7219 * @param active 7220 * If flushing is called actively. 7221 */ 7222 void 7223 mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type, 7224 bool active) 7225 { 7226 struct mlx5_priv *priv = dev->data->dev_private; 7227 uint32_t num_flushed = 0, fidx = 1; 7228 struct rte_flow *flow; 7229 7230 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 7231 if (priv->sh->config.dv_flow_en == 2 && 7232 type == MLX5_FLOW_TYPE_GEN) { 7233 flow_hw_q_flow_flush(dev, NULL); 7234 return; 7235 } 7236 #endif 7237 7238 MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) { 7239 flow_list_destroy(dev, type, fidx); 7240 num_flushed++; 7241 } 7242 if (active) { 7243 DRV_LOG(INFO, "port %u: %u flows flushed before stopping", 7244 dev->data->port_id, num_flushed); 7245 } 7246 } 7247 7248 /** 7249 * Stop all default actions for flows. 7250 * 7251 * @param dev 7252 * Pointer to Ethernet device. 7253 */ 7254 void 7255 mlx5_flow_stop_default(struct rte_eth_dev *dev) 7256 { 7257 flow_mreg_del_default_copy_action(dev); 7258 flow_rxq_flags_clear(dev); 7259 } 7260 7261 /** 7262 * Start all default actions for flows. 7263 * 7264 * @param dev 7265 * Pointer to Ethernet device. 7266 * @return 7267 * 0 on success, a negative errno value otherwise and rte_errno is set. 7268 */ 7269 int 7270 mlx5_flow_start_default(struct rte_eth_dev *dev) 7271 { 7272 struct rte_flow_error error; 7273 7274 /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ 7275 return flow_mreg_add_default_copy_action(dev, &error); 7276 } 7277 7278 /** 7279 * Release key of thread specific flow workspace data. 7280 */ 7281 void 7282 flow_release_workspace(void *data) 7283 { 7284 struct mlx5_flow_workspace *wks = data; 7285 struct mlx5_flow_workspace *next; 7286 7287 while (wks) { 7288 next = wks->next; 7289 free(wks->rss_desc.queue); 7290 free(wks); 7291 wks = next; 7292 } 7293 } 7294 7295 /** 7296 * Get thread specific current flow workspace. 7297 * 7298 * @return pointer to thread specific flow workspace data, NULL on error. 7299 */ 7300 struct mlx5_flow_workspace* 7301 mlx5_flow_get_thread_workspace(void) 7302 { 7303 struct mlx5_flow_workspace *data; 7304 7305 data = mlx5_flow_os_get_specific_workspace(); 7306 MLX5_ASSERT(data && data->inuse); 7307 if (!data || !data->inuse) 7308 DRV_LOG(ERR, "flow workspace not initialized."); 7309 return data; 7310 } 7311 7312 /** 7313 * Allocate and init new flow workspace. 7314 * 7315 * @return pointer to flow workspace data, NULL on error. 7316 */ 7317 static struct mlx5_flow_workspace* 7318 flow_alloc_thread_workspace(void) 7319 { 7320 struct mlx5_flow_workspace *data = calloc(1, sizeof(*data)); 7321 7322 if (!data) { 7323 DRV_LOG(ERR, "Failed to allocate flow workspace " 7324 "memory."); 7325 return NULL; 7326 } 7327 data->rss_desc.queue = calloc(1, 7328 sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM); 7329 if (!data->rss_desc.queue) 7330 goto err; 7331 data->rssq_num = MLX5_RSSQ_DEFAULT_NUM; 7332 return data; 7333 err: 7334 free(data->rss_desc.queue); 7335 free(data); 7336 return NULL; 7337 } 7338 7339 /** 7340 * Get new thread specific flow workspace. 7341 * 7342 * If current workspace inuse, create new one and set as current. 7343 * 7344 * @return pointer to thread specific flow workspace data, NULL on error. 7345 */ 7346 static struct mlx5_flow_workspace* 7347 mlx5_flow_push_thread_workspace(void) 7348 { 7349 struct mlx5_flow_workspace *curr; 7350 struct mlx5_flow_workspace *data; 7351 7352 curr = mlx5_flow_os_get_specific_workspace(); 7353 if (!curr) { 7354 data = flow_alloc_thread_workspace(); 7355 if (!data) 7356 return NULL; 7357 } else if (!curr->inuse) { 7358 data = curr; 7359 } else if (curr->next) { 7360 data = curr->next; 7361 } else { 7362 data = flow_alloc_thread_workspace(); 7363 if (!data) 7364 return NULL; 7365 curr->next = data; 7366 data->prev = curr; 7367 } 7368 data->inuse = 1; 7369 data->flow_idx = 0; 7370 /* Set as current workspace */ 7371 if (mlx5_flow_os_set_specific_workspace(data)) 7372 DRV_LOG(ERR, "Failed to set flow workspace to thread."); 7373 return data; 7374 } 7375 7376 /** 7377 * Close current thread specific flow workspace. 7378 * 7379 * If previous workspace available, set it as current. 7380 * 7381 * @return pointer to thread specific flow workspace data, NULL on error. 7382 */ 7383 static void 7384 mlx5_flow_pop_thread_workspace(void) 7385 { 7386 struct mlx5_flow_workspace *data = mlx5_flow_get_thread_workspace(); 7387 7388 if (!data) 7389 return; 7390 if (!data->inuse) { 7391 DRV_LOG(ERR, "Failed to close unused flow workspace."); 7392 return; 7393 } 7394 data->inuse = 0; 7395 if (!data->prev) 7396 return; 7397 if (mlx5_flow_os_set_specific_workspace(data->prev)) 7398 DRV_LOG(ERR, "Failed to set flow workspace to thread."); 7399 } 7400 7401 /** 7402 * Verify the flow list is empty 7403 * 7404 * @param dev 7405 * Pointer to Ethernet device. 7406 * 7407 * @return the number of flows not released. 7408 */ 7409 int 7410 mlx5_flow_verify(struct rte_eth_dev *dev __rte_unused) 7411 { 7412 struct mlx5_priv *priv = dev->data->dev_private; 7413 struct rte_flow *flow; 7414 uint32_t idx = 0; 7415 int ret = 0, i; 7416 7417 for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) { 7418 MLX5_IPOOL_FOREACH(priv->flows[i], idx, flow) { 7419 DRV_LOG(DEBUG, "port %u flow %p still referenced", 7420 dev->data->port_id, (void *)flow); 7421 ret++; 7422 } 7423 } 7424 return ret; 7425 } 7426 7427 /** 7428 * Enable default hairpin egress flow. 7429 * 7430 * @param dev 7431 * Pointer to Ethernet device. 7432 * @param queue 7433 * The queue index. 7434 * 7435 * @return 7436 * 0 on success, a negative errno value otherwise and rte_errno is set. 7437 */ 7438 int 7439 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, 7440 uint32_t queue) 7441 { 7442 const struct rte_flow_attr attr = { 7443 .egress = 1, 7444 .priority = 0, 7445 }; 7446 struct mlx5_rte_flow_item_tx_queue queue_spec = { 7447 .queue = queue, 7448 }; 7449 struct mlx5_rte_flow_item_tx_queue queue_mask = { 7450 .queue = UINT32_MAX, 7451 }; 7452 struct rte_flow_item items[] = { 7453 { 7454 .type = (enum rte_flow_item_type) 7455 MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, 7456 .spec = &queue_spec, 7457 .last = NULL, 7458 .mask = &queue_mask, 7459 }, 7460 { 7461 .type = RTE_FLOW_ITEM_TYPE_END, 7462 }, 7463 }; 7464 struct rte_flow_action_jump jump = { 7465 .group = MLX5_HAIRPIN_TX_TABLE, 7466 }; 7467 struct rte_flow_action actions[2]; 7468 uint32_t flow_idx; 7469 struct rte_flow_error error; 7470 7471 actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; 7472 actions[0].conf = &jump; 7473 actions[1].type = RTE_FLOW_ACTION_TYPE_END; 7474 flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL, 7475 &attr, items, actions, false, &error); 7476 if (!flow_idx) { 7477 DRV_LOG(DEBUG, 7478 "Failed to create ctrl flow: rte_errno(%d)," 7479 " type(%d), message(%s)", 7480 rte_errno, error.type, 7481 error.message ? error.message : " (no stated reason)"); 7482 return -rte_errno; 7483 } 7484 return 0; 7485 } 7486 7487 /** 7488 * Enable a control flow configured from the control plane. 7489 * 7490 * @param dev 7491 * Pointer to Ethernet device. 7492 * @param eth_spec 7493 * An Ethernet flow spec to apply. 7494 * @param eth_mask 7495 * An Ethernet flow mask to apply. 7496 * @param vlan_spec 7497 * A VLAN flow spec to apply. 7498 * @param vlan_mask 7499 * A VLAN flow mask to apply. 7500 * 7501 * @return 7502 * 0 on success, a negative errno value otherwise and rte_errno is set. 7503 */ 7504 int 7505 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, 7506 struct rte_flow_item_eth *eth_spec, 7507 struct rte_flow_item_eth *eth_mask, 7508 struct rte_flow_item_vlan *vlan_spec, 7509 struct rte_flow_item_vlan *vlan_mask) 7510 { 7511 struct mlx5_priv *priv = dev->data->dev_private; 7512 const struct rte_flow_attr attr = { 7513 .ingress = 1, 7514 .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR, 7515 }; 7516 struct rte_flow_item items[] = { 7517 { 7518 .type = RTE_FLOW_ITEM_TYPE_ETH, 7519 .spec = eth_spec, 7520 .last = NULL, 7521 .mask = eth_mask, 7522 }, 7523 { 7524 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : 7525 RTE_FLOW_ITEM_TYPE_END, 7526 .spec = vlan_spec, 7527 .last = NULL, 7528 .mask = vlan_mask, 7529 }, 7530 { 7531 .type = RTE_FLOW_ITEM_TYPE_END, 7532 }, 7533 }; 7534 uint16_t queue[priv->reta_idx_n]; 7535 struct rte_flow_action_rss action_rss = { 7536 .func = RTE_ETH_HASH_FUNCTION_DEFAULT, 7537 .level = 0, 7538 .types = priv->rss_conf.rss_hf, 7539 .key_len = priv->rss_conf.rss_key_len, 7540 .queue_num = priv->reta_idx_n, 7541 .key = priv->rss_conf.rss_key, 7542 .queue = queue, 7543 }; 7544 struct rte_flow_action actions[] = { 7545 { 7546 .type = RTE_FLOW_ACTION_TYPE_RSS, 7547 .conf = &action_rss, 7548 }, 7549 { 7550 .type = RTE_FLOW_ACTION_TYPE_END, 7551 }, 7552 }; 7553 uint32_t flow_idx; 7554 struct rte_flow_error error; 7555 unsigned int i; 7556 7557 if (!priv->reta_idx_n || !priv->rxqs_n) { 7558 return 0; 7559 } 7560 if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) 7561 action_rss.types = 0; 7562 for (i = 0; i != priv->reta_idx_n; ++i) 7563 queue[i] = (*priv->reta_idx)[i]; 7564 flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL, 7565 &attr, items, actions, false, &error); 7566 if (!flow_idx) 7567 return -rte_errno; 7568 return 0; 7569 } 7570 7571 /** 7572 * Enable a flow control configured from the control plane. 7573 * 7574 * @param dev 7575 * Pointer to Ethernet device. 7576 * @param eth_spec 7577 * An Ethernet flow spec to apply. 7578 * @param eth_mask 7579 * An Ethernet flow mask to apply. 7580 * 7581 * @return 7582 * 0 on success, a negative errno value otherwise and rte_errno is set. 7583 */ 7584 int 7585 mlx5_ctrl_flow(struct rte_eth_dev *dev, 7586 struct rte_flow_item_eth *eth_spec, 7587 struct rte_flow_item_eth *eth_mask) 7588 { 7589 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); 7590 } 7591 7592 /** 7593 * Create default miss flow rule matching lacp traffic 7594 * 7595 * @param dev 7596 * Pointer to Ethernet device. 7597 * @param eth_spec 7598 * An Ethernet flow spec to apply. 7599 * 7600 * @return 7601 * 0 on success, a negative errno value otherwise and rte_errno is set. 7602 */ 7603 int 7604 mlx5_flow_lacp_miss(struct rte_eth_dev *dev) 7605 { 7606 /* 7607 * The LACP matching is done by only using ether type since using 7608 * a multicast dst mac causes kernel to give low priority to this flow. 7609 */ 7610 static const struct rte_flow_item_eth lacp_spec = { 7611 .type = RTE_BE16(0x8809), 7612 }; 7613 static const struct rte_flow_item_eth lacp_mask = { 7614 .type = 0xffff, 7615 }; 7616 const struct rte_flow_attr attr = { 7617 .ingress = 1, 7618 }; 7619 struct rte_flow_item items[] = { 7620 { 7621 .type = RTE_FLOW_ITEM_TYPE_ETH, 7622 .spec = &lacp_spec, 7623 .mask = &lacp_mask, 7624 }, 7625 { 7626 .type = RTE_FLOW_ITEM_TYPE_END, 7627 }, 7628 }; 7629 struct rte_flow_action actions[] = { 7630 { 7631 .type = (enum rte_flow_action_type) 7632 MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS, 7633 }, 7634 { 7635 .type = RTE_FLOW_ACTION_TYPE_END, 7636 }, 7637 }; 7638 struct rte_flow_error error; 7639 uint32_t flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL, 7640 &attr, items, actions, 7641 false, &error); 7642 7643 if (!flow_idx) 7644 return -rte_errno; 7645 return 0; 7646 } 7647 7648 /** 7649 * Destroy a flow. 7650 * 7651 * @see rte_flow_destroy() 7652 * @see rte_flow_ops 7653 */ 7654 int 7655 mlx5_flow_destroy(struct rte_eth_dev *dev, 7656 struct rte_flow *flow, 7657 struct rte_flow_error *error __rte_unused) 7658 { 7659 struct mlx5_priv *priv = dev->data->dev_private; 7660 7661 if (priv->sh->config.dv_flow_en == 2) 7662 return rte_flow_error_set(error, ENOTSUP, 7663 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 7664 NULL, 7665 "Flow non-Q destruction not supported"); 7666 flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, 7667 (uintptr_t)(void *)flow); 7668 return 0; 7669 } 7670 7671 /** 7672 * Destroy all flows. 7673 * 7674 * @see rte_flow_flush() 7675 * @see rte_flow_ops 7676 */ 7677 int 7678 mlx5_flow_flush(struct rte_eth_dev *dev, 7679 struct rte_flow_error *error __rte_unused) 7680 { 7681 mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, false); 7682 return 0; 7683 } 7684 7685 /** 7686 * Isolated mode. 7687 * 7688 * @see rte_flow_isolate() 7689 * @see rte_flow_ops 7690 */ 7691 int 7692 mlx5_flow_isolate(struct rte_eth_dev *dev, 7693 int enable, 7694 struct rte_flow_error *error) 7695 { 7696 struct mlx5_priv *priv = dev->data->dev_private; 7697 7698 if (dev->data->dev_started) { 7699 rte_flow_error_set(error, EBUSY, 7700 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 7701 NULL, 7702 "port must be stopped first"); 7703 return -rte_errno; 7704 } 7705 priv->isolated = !!enable; 7706 if (enable) 7707 dev->dev_ops = &mlx5_dev_ops_isolate; 7708 else 7709 dev->dev_ops = &mlx5_dev_ops; 7710 7711 dev->rx_descriptor_status = mlx5_rx_descriptor_status; 7712 dev->tx_descriptor_status = mlx5_tx_descriptor_status; 7713 7714 return 0; 7715 } 7716 7717 /** 7718 * Query a flow. 7719 * 7720 * @see rte_flow_query() 7721 * @see rte_flow_ops 7722 */ 7723 static int 7724 flow_drv_query(struct rte_eth_dev *dev, 7725 uint32_t flow_idx, 7726 const struct rte_flow_action *actions, 7727 void *data, 7728 struct rte_flow_error *error) 7729 { 7730 struct mlx5_priv *priv = dev->data->dev_private; 7731 const struct mlx5_flow_driver_ops *fops; 7732 struct rte_flow *flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], 7733 flow_idx); 7734 enum mlx5_flow_drv_type ftype; 7735 7736 if (!flow) { 7737 return rte_flow_error_set(error, ENOENT, 7738 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 7739 NULL, 7740 "invalid flow handle"); 7741 } 7742 ftype = flow->drv_type; 7743 MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); 7744 fops = flow_get_drv_ops(ftype); 7745 7746 return fops->query(dev, flow, actions, data, error); 7747 } 7748 7749 /** 7750 * Query a flow. 7751 * 7752 * @see rte_flow_query() 7753 * @see rte_flow_ops 7754 */ 7755 int 7756 mlx5_flow_query(struct rte_eth_dev *dev, 7757 struct rte_flow *flow, 7758 const struct rte_flow_action *actions, 7759 void *data, 7760 struct rte_flow_error *error) 7761 { 7762 int ret; 7763 struct mlx5_priv *priv = dev->data->dev_private; 7764 7765 if (priv->sh->config.dv_flow_en == 2) 7766 return rte_flow_error_set(error, ENOTSUP, 7767 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 7768 NULL, 7769 "Flow non-Q query not supported"); 7770 ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data, 7771 error); 7772 if (ret < 0) 7773 return ret; 7774 return 0; 7775 } 7776 7777 /** 7778 * Get rte_flow callbacks. 7779 * 7780 * @param dev 7781 * Pointer to Ethernet device structure. 7782 * @param ops 7783 * Pointer to operation-specific structure. 7784 * 7785 * @return 0 7786 */ 7787 int 7788 mlx5_flow_ops_get(struct rte_eth_dev *dev __rte_unused, 7789 const struct rte_flow_ops **ops) 7790 { 7791 *ops = &mlx5_flow_ops; 7792 return 0; 7793 } 7794 7795 /** 7796 * Validate meter policy actions. 7797 * Dispatcher for action type specific validation. 7798 * 7799 * @param[in] dev 7800 * Pointer to the Ethernet device structure. 7801 * @param[in] action 7802 * The meter policy action object to validate. 7803 * @param[in] attr 7804 * Attributes of flow to determine steering domain. 7805 * @param[out] is_rss 7806 * Is RSS or not. 7807 * @param[out] domain_bitmap 7808 * Domain bitmap. 7809 * @param[out] is_def_policy 7810 * Is default policy or not. 7811 * @param[out] error 7812 * Perform verbose error reporting if not NULL. Initialized in case of 7813 * error only. 7814 * 7815 * @return 7816 * 0 on success, otherwise negative errno value. 7817 */ 7818 int 7819 mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev, 7820 const struct rte_flow_action *actions[RTE_COLORS], 7821 struct rte_flow_attr *attr, 7822 bool *is_rss, 7823 uint8_t *domain_bitmap, 7824 uint8_t *policy_mode, 7825 struct rte_mtr_error *error) 7826 { 7827 const struct mlx5_flow_driver_ops *fops; 7828 7829 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 7830 return fops->validate_mtr_acts(dev, actions, attr, is_rss, 7831 domain_bitmap, policy_mode, error); 7832 } 7833 7834 /** 7835 * Destroy the meter table set. 7836 * 7837 * @param[in] dev 7838 * Pointer to Ethernet device. 7839 * @param[in] mtr_policy 7840 * Meter policy struct. 7841 */ 7842 void 7843 mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev, 7844 struct mlx5_flow_meter_policy *mtr_policy) 7845 { 7846 const struct mlx5_flow_driver_ops *fops; 7847 7848 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 7849 fops->destroy_mtr_acts(dev, mtr_policy); 7850 } 7851 7852 /** 7853 * Create policy action, lock free, 7854 * (mutex should be acquired by caller). 7855 * Dispatcher for action type specific call. 7856 * 7857 * @param[in] dev 7858 * Pointer to the Ethernet device structure. 7859 * @param[in] mtr_policy 7860 * Meter policy struct. 7861 * @param[in] action 7862 * Action specification used to create meter actions. 7863 * @param[out] error 7864 * Perform verbose error reporting if not NULL. Initialized in case of 7865 * error only. 7866 * 7867 * @return 7868 * 0 on success, otherwise negative errno value. 7869 */ 7870 int 7871 mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev, 7872 struct mlx5_flow_meter_policy *mtr_policy, 7873 const struct rte_flow_action *actions[RTE_COLORS], 7874 struct rte_mtr_error *error) 7875 { 7876 const struct mlx5_flow_driver_ops *fops; 7877 7878 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 7879 return fops->create_mtr_acts(dev, mtr_policy, actions, error); 7880 } 7881 7882 /** 7883 * Create policy rules, lock free, 7884 * (mutex should be acquired by caller). 7885 * Dispatcher for action type specific call. 7886 * 7887 * @param[in] dev 7888 * Pointer to the Ethernet device structure. 7889 * @param[in] mtr_policy 7890 * Meter policy struct. 7891 * 7892 * @return 7893 * 0 on success, -1 otherwise. 7894 */ 7895 int 7896 mlx5_flow_create_policy_rules(struct rte_eth_dev *dev, 7897 struct mlx5_flow_meter_policy *mtr_policy) 7898 { 7899 const struct mlx5_flow_driver_ops *fops; 7900 7901 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 7902 return fops->create_policy_rules(dev, mtr_policy); 7903 } 7904 7905 /** 7906 * Destroy policy rules, lock free, 7907 * (mutex should be acquired by caller). 7908 * Dispatcher for action type specific call. 7909 * 7910 * @param[in] dev 7911 * Pointer to the Ethernet device structure. 7912 * @param[in] mtr_policy 7913 * Meter policy struct. 7914 */ 7915 void 7916 mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev, 7917 struct mlx5_flow_meter_policy *mtr_policy) 7918 { 7919 const struct mlx5_flow_driver_ops *fops; 7920 7921 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 7922 fops->destroy_policy_rules(dev, mtr_policy); 7923 } 7924 7925 /** 7926 * Destroy the default policy table set. 7927 * 7928 * @param[in] dev 7929 * Pointer to Ethernet device. 7930 */ 7931 void 7932 mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev) 7933 { 7934 const struct mlx5_flow_driver_ops *fops; 7935 7936 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 7937 fops->destroy_def_policy(dev); 7938 } 7939 7940 /** 7941 * Destroy the default policy table set. 7942 * 7943 * @param[in] dev 7944 * Pointer to Ethernet device. 7945 * 7946 * @return 7947 * 0 on success, -1 otherwise. 7948 */ 7949 int 7950 mlx5_flow_create_def_policy(struct rte_eth_dev *dev) 7951 { 7952 const struct mlx5_flow_driver_ops *fops; 7953 7954 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 7955 return fops->create_def_policy(dev); 7956 } 7957 7958 /** 7959 * Create the needed meter and suffix tables. 7960 * 7961 * @param[in] dev 7962 * Pointer to Ethernet device. 7963 * 7964 * @return 7965 * 0 on success, -1 otherwise. 7966 */ 7967 int 7968 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev, 7969 struct mlx5_flow_meter_info *fm, 7970 uint32_t mtr_idx, 7971 uint8_t domain_bitmap) 7972 { 7973 const struct mlx5_flow_driver_ops *fops; 7974 7975 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 7976 return fops->create_mtr_tbls(dev, fm, mtr_idx, domain_bitmap); 7977 } 7978 7979 /** 7980 * Destroy the meter table set. 7981 * 7982 * @param[in] dev 7983 * Pointer to Ethernet device. 7984 * @param[in] tbl 7985 * Pointer to the meter table set. 7986 */ 7987 void 7988 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, 7989 struct mlx5_flow_meter_info *fm) 7990 { 7991 const struct mlx5_flow_driver_ops *fops; 7992 7993 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 7994 fops->destroy_mtr_tbls(dev, fm); 7995 } 7996 7997 /** 7998 * Destroy the global meter drop table. 7999 * 8000 * @param[in] dev 8001 * Pointer to Ethernet device. 8002 */ 8003 void 8004 mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev) 8005 { 8006 const struct mlx5_flow_driver_ops *fops; 8007 8008 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8009 fops->destroy_mtr_drop_tbls(dev); 8010 } 8011 8012 /** 8013 * Destroy the sub policy table with RX queue. 8014 * 8015 * @param[in] dev 8016 * Pointer to Ethernet device. 8017 * @param[in] mtr_policy 8018 * Pointer to meter policy table. 8019 */ 8020 void 8021 mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev, 8022 struct mlx5_flow_meter_policy *mtr_policy) 8023 { 8024 const struct mlx5_flow_driver_ops *fops; 8025 8026 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8027 fops->destroy_sub_policy_with_rxq(dev, mtr_policy); 8028 } 8029 8030 /** 8031 * Allocate the needed aso flow meter id. 8032 * 8033 * @param[in] dev 8034 * Pointer to Ethernet device. 8035 * 8036 * @return 8037 * Index to aso flow meter on success, NULL otherwise. 8038 */ 8039 uint32_t 8040 mlx5_flow_mtr_alloc(struct rte_eth_dev *dev) 8041 { 8042 const struct mlx5_flow_driver_ops *fops; 8043 8044 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8045 return fops->create_meter(dev); 8046 } 8047 8048 /** 8049 * Free the aso flow meter id. 8050 * 8051 * @param[in] dev 8052 * Pointer to Ethernet device. 8053 * @param[in] mtr_idx 8054 * Index to aso flow meter to be free. 8055 * 8056 * @return 8057 * 0 on success. 8058 */ 8059 void 8060 mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx) 8061 { 8062 const struct mlx5_flow_driver_ops *fops; 8063 8064 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8065 fops->free_meter(dev, mtr_idx); 8066 } 8067 8068 /** 8069 * Allocate a counter. 8070 * 8071 * @param[in] dev 8072 * Pointer to Ethernet device structure. 8073 * 8074 * @return 8075 * Index to allocated counter on success, 0 otherwise. 8076 */ 8077 uint32_t 8078 mlx5_counter_alloc(struct rte_eth_dev *dev) 8079 { 8080 const struct mlx5_flow_driver_ops *fops; 8081 struct rte_flow_attr attr = { .transfer = 0 }; 8082 8083 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 8084 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8085 return fops->counter_alloc(dev); 8086 } 8087 DRV_LOG(ERR, 8088 "port %u counter allocate is not supported.", 8089 dev->data->port_id); 8090 return 0; 8091 } 8092 8093 /** 8094 * Free a counter. 8095 * 8096 * @param[in] dev 8097 * Pointer to Ethernet device structure. 8098 * @param[in] cnt 8099 * Index to counter to be free. 8100 */ 8101 void 8102 mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt) 8103 { 8104 const struct mlx5_flow_driver_ops *fops; 8105 struct rte_flow_attr attr = { .transfer = 0 }; 8106 8107 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 8108 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8109 fops->counter_free(dev, cnt); 8110 return; 8111 } 8112 DRV_LOG(ERR, 8113 "port %u counter free is not supported.", 8114 dev->data->port_id); 8115 } 8116 8117 /** 8118 * Query counter statistics. 8119 * 8120 * @param[in] dev 8121 * Pointer to Ethernet device structure. 8122 * @param[in] cnt 8123 * Index to counter to query. 8124 * @param[in] clear 8125 * Set to clear counter statistics. 8126 * @param[out] pkts 8127 * The counter hits packets number to save. 8128 * @param[out] bytes 8129 * The counter hits bytes number to save. 8130 * 8131 * @return 8132 * 0 on success, a negative errno value otherwise. 8133 */ 8134 int 8135 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, 8136 bool clear, uint64_t *pkts, uint64_t *bytes, void **action) 8137 { 8138 const struct mlx5_flow_driver_ops *fops; 8139 struct rte_flow_attr attr = { .transfer = 0 }; 8140 8141 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 8142 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 8143 return fops->counter_query(dev, cnt, clear, pkts, 8144 bytes, action); 8145 } 8146 DRV_LOG(ERR, 8147 "port %u counter query is not supported.", 8148 dev->data->port_id); 8149 return -ENOTSUP; 8150 } 8151 8152 /** 8153 * Get information about HWS pre-configurable resources. 8154 * 8155 * @param[in] dev 8156 * Pointer to the rte_eth_dev structure. 8157 * @param[out] port_info 8158 * Pointer to port information. 8159 * @param[out] queue_info 8160 * Pointer to queue information. 8161 * @param[out] error 8162 * Pointer to error structure. 8163 * 8164 * @return 8165 * 0 on success, a negative errno value otherwise and rte_errno is set. 8166 */ 8167 static int 8168 mlx5_flow_info_get(struct rte_eth_dev *dev, 8169 struct rte_flow_port_info *port_info, 8170 struct rte_flow_queue_info *queue_info, 8171 struct rte_flow_error *error) 8172 { 8173 const struct mlx5_flow_driver_ops *fops; 8174 8175 if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) 8176 return rte_flow_error_set(error, ENOTSUP, 8177 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8178 NULL, 8179 "info get with incorrect steering mode"); 8180 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 8181 return fops->info_get(dev, port_info, queue_info, error); 8182 } 8183 8184 /** 8185 * Configure port HWS resources. 8186 * 8187 * @param[in] dev 8188 * Pointer to the rte_eth_dev structure. 8189 * @param[in] port_attr 8190 * Port configuration attributes. 8191 * @param[in] nb_queue 8192 * Number of queue. 8193 * @param[in] queue_attr 8194 * Array that holds attributes for each flow queue. 8195 * @param[out] error 8196 * Pointer to error structure. 8197 * 8198 * @return 8199 * 0 on success, a negative errno value otherwise and rte_errno is set. 8200 */ 8201 static int 8202 mlx5_flow_port_configure(struct rte_eth_dev *dev, 8203 const struct rte_flow_port_attr *port_attr, 8204 uint16_t nb_queue, 8205 const struct rte_flow_queue_attr *queue_attr[], 8206 struct rte_flow_error *error) 8207 { 8208 const struct mlx5_flow_driver_ops *fops; 8209 8210 if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) 8211 return rte_flow_error_set(error, ENOTSUP, 8212 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8213 NULL, 8214 "port configure with incorrect steering mode"); 8215 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 8216 return fops->configure(dev, port_attr, nb_queue, queue_attr, error); 8217 } 8218 8219 /** 8220 * Create flow item template. 8221 * 8222 * @param[in] dev 8223 * Pointer to the rte_eth_dev structure. 8224 * @param[in] attr 8225 * Pointer to the item template attributes. 8226 * @param[in] items 8227 * The template item pattern. 8228 * @param[out] error 8229 * Pointer to error structure. 8230 * 8231 * @return 8232 * 0 on success, a negative errno value otherwise and rte_errno is set. 8233 */ 8234 static struct rte_flow_pattern_template * 8235 mlx5_flow_pattern_template_create(struct rte_eth_dev *dev, 8236 const struct rte_flow_pattern_template_attr *attr, 8237 const struct rte_flow_item items[], 8238 struct rte_flow_error *error) 8239 { 8240 const struct mlx5_flow_driver_ops *fops; 8241 8242 if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) { 8243 rte_flow_error_set(error, ENOTSUP, 8244 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8245 NULL, 8246 "pattern create with incorrect steering mode"); 8247 return NULL; 8248 } 8249 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 8250 return fops->pattern_template_create(dev, attr, items, error); 8251 } 8252 8253 /** 8254 * Destroy flow item template. 8255 * 8256 * @param[in] dev 8257 * Pointer to the rte_eth_dev structure. 8258 * @param[in] template 8259 * Pointer to the item template to be destroyed. 8260 * @param[out] error 8261 * Pointer to error structure. 8262 * 8263 * @return 8264 * 0 on success, a negative errno value otherwise and rte_errno is set. 8265 */ 8266 static int 8267 mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev, 8268 struct rte_flow_pattern_template *template, 8269 struct rte_flow_error *error) 8270 { 8271 const struct mlx5_flow_driver_ops *fops; 8272 8273 if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) 8274 return rte_flow_error_set(error, ENOTSUP, 8275 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8276 NULL, 8277 "pattern destroy with incorrect steering mode"); 8278 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 8279 return fops->pattern_template_destroy(dev, template, error); 8280 } 8281 8282 /** 8283 * Create flow item template. 8284 * 8285 * @param[in] dev 8286 * Pointer to the rte_eth_dev structure. 8287 * @param[in] attr 8288 * Pointer to the action template attributes. 8289 * @param[in] actions 8290 * Associated actions (list terminated by the END action). 8291 * @param[in] masks 8292 * List of actions that marks which of the action's member is constant. 8293 * @param[out] error 8294 * Pointer to error structure. 8295 * 8296 * @return 8297 * 0 on success, a negative errno value otherwise and rte_errno is set. 8298 */ 8299 static struct rte_flow_actions_template * 8300 mlx5_flow_actions_template_create(struct rte_eth_dev *dev, 8301 const struct rte_flow_actions_template_attr *attr, 8302 const struct rte_flow_action actions[], 8303 const struct rte_flow_action masks[], 8304 struct rte_flow_error *error) 8305 { 8306 const struct mlx5_flow_driver_ops *fops; 8307 8308 if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) { 8309 rte_flow_error_set(error, ENOTSUP, 8310 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8311 NULL, 8312 "action create with incorrect steering mode"); 8313 return NULL; 8314 } 8315 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 8316 return fops->actions_template_create(dev, attr, actions, masks, error); 8317 } 8318 8319 /** 8320 * Destroy flow action template. 8321 * 8322 * @param[in] dev 8323 * Pointer to the rte_eth_dev structure. 8324 * @param[in] template 8325 * Pointer to the action template to be destroyed. 8326 * @param[out] error 8327 * Pointer to error structure. 8328 * 8329 * @return 8330 * 0 on success, a negative errno value otherwise and rte_errno is set. 8331 */ 8332 static int 8333 mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev, 8334 struct rte_flow_actions_template *template, 8335 struct rte_flow_error *error) 8336 { 8337 const struct mlx5_flow_driver_ops *fops; 8338 8339 if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) 8340 return rte_flow_error_set(error, ENOTSUP, 8341 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8342 NULL, 8343 "action destroy with incorrect steering mode"); 8344 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 8345 return fops->actions_template_destroy(dev, template, error); 8346 } 8347 8348 /** 8349 * Create flow table. 8350 * 8351 * @param[in] dev 8352 * Pointer to the rte_eth_dev structure. 8353 * @param[in] attr 8354 * Pointer to the table attributes. 8355 * @param[in] item_templates 8356 * Item template array to be binded to the table. 8357 * @param[in] nb_item_templates 8358 * Number of item template. 8359 * @param[in] action_templates 8360 * Action template array to be binded to the table. 8361 * @param[in] nb_action_templates 8362 * Number of action template. 8363 * @param[out] error 8364 * Pointer to error structure. 8365 * 8366 * @return 8367 * Table on success, NULL otherwise and rte_errno is set. 8368 */ 8369 static struct rte_flow_template_table * 8370 mlx5_flow_table_create(struct rte_eth_dev *dev, 8371 const struct rte_flow_template_table_attr *attr, 8372 struct rte_flow_pattern_template *item_templates[], 8373 uint8_t nb_item_templates, 8374 struct rte_flow_actions_template *action_templates[], 8375 uint8_t nb_action_templates, 8376 struct rte_flow_error *error) 8377 { 8378 const struct mlx5_flow_driver_ops *fops; 8379 8380 if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) { 8381 rte_flow_error_set(error, ENOTSUP, 8382 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8383 NULL, 8384 "table create with incorrect steering mode"); 8385 return NULL; 8386 } 8387 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 8388 return fops->template_table_create(dev, 8389 attr, 8390 item_templates, 8391 nb_item_templates, 8392 action_templates, 8393 nb_action_templates, 8394 error); 8395 } 8396 8397 /** 8398 * PMD destroy flow table. 8399 * 8400 * @param[in] dev 8401 * Pointer to the rte_eth_dev structure. 8402 * @param[in] table 8403 * Pointer to the table to be destroyed. 8404 * @param[out] error 8405 * Pointer to error structure. 8406 * 8407 * @return 8408 * 0 on success, a negative errno value otherwise and rte_errno is set. 8409 */ 8410 static int 8411 mlx5_flow_table_destroy(struct rte_eth_dev *dev, 8412 struct rte_flow_template_table *table, 8413 struct rte_flow_error *error) 8414 { 8415 const struct mlx5_flow_driver_ops *fops; 8416 8417 if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) 8418 return rte_flow_error_set(error, ENOTSUP, 8419 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8420 NULL, 8421 "table destroy with incorrect steering mode"); 8422 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 8423 return fops->template_table_destroy(dev, table, error); 8424 } 8425 8426 /** 8427 * Enqueue flow creation. 8428 * 8429 * @param[in] dev 8430 * Pointer to the rte_eth_dev structure. 8431 * @param[in] queue_id 8432 * The queue to create the flow. 8433 * @param[in] attr 8434 * Pointer to the flow operation attributes. 8435 * @param[in] items 8436 * Items with flow spec value. 8437 * @param[in] pattern_template_index 8438 * The item pattern flow follows from the table. 8439 * @param[in] actions 8440 * Action with flow spec value. 8441 * @param[in] action_template_index 8442 * The action pattern flow follows from the table. 8443 * @param[in] user_data 8444 * Pointer to the user_data. 8445 * @param[out] error 8446 * Pointer to error structure. 8447 * 8448 * @return 8449 * Flow pointer on success, NULL otherwise and rte_errno is set. 8450 */ 8451 static struct rte_flow * 8452 mlx5_flow_async_flow_create(struct rte_eth_dev *dev, 8453 uint32_t queue_id, 8454 const struct rte_flow_op_attr *attr, 8455 struct rte_flow_template_table *table, 8456 const struct rte_flow_item items[], 8457 uint8_t pattern_template_index, 8458 const struct rte_flow_action actions[], 8459 uint8_t action_template_index, 8460 void *user_data, 8461 struct rte_flow_error *error) 8462 { 8463 const struct mlx5_flow_driver_ops *fops; 8464 8465 if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) { 8466 rte_flow_error_set(error, ENOTSUP, 8467 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8468 NULL, 8469 "flow_q create with incorrect steering mode"); 8470 return NULL; 8471 } 8472 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 8473 return fops->async_flow_create(dev, queue_id, attr, table, 8474 items, pattern_template_index, 8475 actions, action_template_index, 8476 user_data, error); 8477 } 8478 8479 /** 8480 * Enqueue flow destruction. 8481 * 8482 * @param[in] dev 8483 * Pointer to the rte_eth_dev structure. 8484 * @param[in] queue 8485 * The queue to destroy the flow. 8486 * @param[in] attr 8487 * Pointer to the flow operation attributes. 8488 * @param[in] flow 8489 * Pointer to the flow to be destroyed. 8490 * @param[in] user_data 8491 * Pointer to the user_data. 8492 * @param[out] error 8493 * Pointer to error structure. 8494 * 8495 * @return 8496 * 0 on success, negative value otherwise and rte_errno is set. 8497 */ 8498 static int 8499 mlx5_flow_async_flow_destroy(struct rte_eth_dev *dev, 8500 uint32_t queue, 8501 const struct rte_flow_op_attr *attr, 8502 struct rte_flow *flow, 8503 void *user_data, 8504 struct rte_flow_error *error) 8505 { 8506 const struct mlx5_flow_driver_ops *fops; 8507 8508 if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) 8509 return rte_flow_error_set(error, ENOTSUP, 8510 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8511 NULL, 8512 "flow_q destroy with incorrect steering mode"); 8513 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 8514 return fops->async_flow_destroy(dev, queue, attr, flow, 8515 user_data, error); 8516 } 8517 8518 /** 8519 * Pull the enqueued flows. 8520 * 8521 * @param[in] dev 8522 * Pointer to the rte_eth_dev structure. 8523 * @param[in] queue 8524 * The queue to pull the result. 8525 * @param[in/out] res 8526 * Array to save the results. 8527 * @param[in] n_res 8528 * Available result with the array. 8529 * @param[out] error 8530 * Pointer to error structure. 8531 * 8532 * @return 8533 * Result number on success, negative value otherwise and rte_errno is set. 8534 */ 8535 static int 8536 mlx5_flow_pull(struct rte_eth_dev *dev, 8537 uint32_t queue, 8538 struct rte_flow_op_result res[], 8539 uint16_t n_res, 8540 struct rte_flow_error *error) 8541 { 8542 const struct mlx5_flow_driver_ops *fops; 8543 8544 if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) 8545 return rte_flow_error_set(error, ENOTSUP, 8546 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8547 NULL, 8548 "flow_q pull with incorrect steering mode"); 8549 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 8550 return fops->pull(dev, queue, res, n_res, error); 8551 } 8552 8553 /** 8554 * Push the enqueued flows. 8555 * 8556 * @param[in] dev 8557 * Pointer to the rte_eth_dev structure. 8558 * @param[in] queue 8559 * The queue to push the flows. 8560 * @param[out] error 8561 * Pointer to error structure. 8562 * 8563 * @return 8564 * 0 on success, negative value otherwise and rte_errno is set. 8565 */ 8566 static int 8567 mlx5_flow_push(struct rte_eth_dev *dev, 8568 uint32_t queue, 8569 struct rte_flow_error *error) 8570 { 8571 const struct mlx5_flow_driver_ops *fops; 8572 8573 if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) 8574 return rte_flow_error_set(error, ENOTSUP, 8575 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8576 NULL, 8577 "flow_q push with incorrect steering mode"); 8578 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 8579 return fops->push(dev, queue, error); 8580 } 8581 8582 /** 8583 * Create shared action. 8584 * 8585 * @param[in] dev 8586 * Pointer to the rte_eth_dev structure. 8587 * @param[in] queue 8588 * Which queue to be used.. 8589 * @param[in] attr 8590 * Operation attribute. 8591 * @param[in] conf 8592 * Indirect action configuration. 8593 * @param[in] action 8594 * rte_flow action detail. 8595 * @param[in] user_data 8596 * Pointer to the user_data. 8597 * @param[out] error 8598 * Pointer to error structure. 8599 * 8600 * @return 8601 * Action handle on success, NULL otherwise and rte_errno is set. 8602 */ 8603 static struct rte_flow_action_handle * 8604 mlx5_flow_async_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, 8605 const struct rte_flow_op_attr *attr, 8606 const struct rte_flow_indir_action_conf *conf, 8607 const struct rte_flow_action *action, 8608 void *user_data, 8609 struct rte_flow_error *error) 8610 { 8611 const struct mlx5_flow_driver_ops *fops = 8612 flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 8613 8614 return fops->async_action_create(dev, queue, attr, conf, action, 8615 user_data, error); 8616 } 8617 8618 /** 8619 * Update shared action. 8620 * 8621 * @param[in] dev 8622 * Pointer to the rte_eth_dev structure. 8623 * @param[in] queue 8624 * Which queue to be used.. 8625 * @param[in] attr 8626 * Operation attribute. 8627 * @param[in] handle 8628 * Action handle to be updated. 8629 * @param[in] update 8630 * Update value. 8631 * @param[in] user_data 8632 * Pointer to the user_data. 8633 * @param[out] error 8634 * Pointer to error structure. 8635 * 8636 * @return 8637 * 0 on success, negative value otherwise and rte_errno is set. 8638 */ 8639 static int 8640 mlx5_flow_async_action_handle_update(struct rte_eth_dev *dev, uint32_t queue, 8641 const struct rte_flow_op_attr *attr, 8642 struct rte_flow_action_handle *handle, 8643 const void *update, 8644 void *user_data, 8645 struct rte_flow_error *error) 8646 { 8647 const struct mlx5_flow_driver_ops *fops = 8648 flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 8649 8650 return fops->async_action_update(dev, queue, attr, handle, 8651 update, user_data, error); 8652 } 8653 8654 /** 8655 * Destroy shared action. 8656 * 8657 * @param[in] dev 8658 * Pointer to the rte_eth_dev structure. 8659 * @param[in] queue 8660 * Which queue to be used.. 8661 * @param[in] attr 8662 * Operation attribute. 8663 * @param[in] handle 8664 * Action handle to be destroyed. 8665 * @param[in] user_data 8666 * Pointer to the user_data. 8667 * @param[out] error 8668 * Pointer to error structure. 8669 * 8670 * @return 8671 * 0 on success, negative value otherwise and rte_errno is set. 8672 */ 8673 static int 8674 mlx5_flow_async_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue, 8675 const struct rte_flow_op_attr *attr, 8676 struct rte_flow_action_handle *handle, 8677 void *user_data, 8678 struct rte_flow_error *error) 8679 { 8680 const struct mlx5_flow_driver_ops *fops = 8681 flow_get_drv_ops(MLX5_FLOW_TYPE_HW); 8682 8683 return fops->async_action_destroy(dev, queue, attr, handle, 8684 user_data, error); 8685 } 8686 8687 /** 8688 * Allocate a new memory for the counter values wrapped by all the needed 8689 * management. 8690 * 8691 * @param[in] sh 8692 * Pointer to mlx5_dev_ctx_shared object. 8693 * 8694 * @return 8695 * 0 on success, a negative errno value otherwise. 8696 */ 8697 static int 8698 mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) 8699 { 8700 struct mlx5_counter_stats_mem_mng *mem_mng; 8701 volatile struct flow_counter_stats *raw_data; 8702 int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES; 8703 int size = (sizeof(struct flow_counter_stats) * 8704 MLX5_COUNTERS_PER_POOL + 8705 sizeof(struct mlx5_counter_stats_raw)) * raws_n + 8706 sizeof(struct mlx5_counter_stats_mem_mng); 8707 size_t pgsize = rte_mem_page_size(); 8708 uint8_t *mem; 8709 int ret; 8710 int i; 8711 8712 if (pgsize == (size_t)-1) { 8713 DRV_LOG(ERR, "Failed to get mem page size"); 8714 rte_errno = ENOMEM; 8715 return -ENOMEM; 8716 } 8717 mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, SOCKET_ID_ANY); 8718 if (!mem) { 8719 rte_errno = ENOMEM; 8720 return -ENOMEM; 8721 } 8722 mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1; 8723 size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n; 8724 ret = mlx5_os_wrapped_mkey_create(sh->cdev->ctx, sh->cdev->pd, 8725 sh->cdev->pdn, mem, size, 8726 &mem_mng->wm); 8727 if (ret) { 8728 rte_errno = errno; 8729 mlx5_free(mem); 8730 return -rte_errno; 8731 } 8732 mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size); 8733 raw_data = (volatile struct flow_counter_stats *)mem; 8734 for (i = 0; i < raws_n; ++i) { 8735 mem_mng->raws[i].mem_mng = mem_mng; 8736 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL; 8737 } 8738 for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) 8739 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, 8740 mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i, 8741 next); 8742 LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next); 8743 sh->cmng.mem_mng = mem_mng; 8744 return 0; 8745 } 8746 8747 /** 8748 * Set the statistic memory to the new counter pool. 8749 * 8750 * @param[in] sh 8751 * Pointer to mlx5_dev_ctx_shared object. 8752 * @param[in] pool 8753 * Pointer to the pool to set the statistic memory. 8754 * 8755 * @return 8756 * 0 on success, a negative errno value otherwise. 8757 */ 8758 static int 8759 mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh, 8760 struct mlx5_flow_counter_pool *pool) 8761 { 8762 struct mlx5_flow_counter_mng *cmng = &sh->cmng; 8763 /* Resize statistic memory once used out. */ 8764 if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) && 8765 mlx5_flow_create_counter_stat_mem_mng(sh)) { 8766 DRV_LOG(ERR, "Cannot resize counter stat mem."); 8767 return -1; 8768 } 8769 rte_spinlock_lock(&pool->sl); 8770 pool->raw = cmng->mem_mng->raws + pool->index % 8771 MLX5_CNT_CONTAINER_RESIZE; 8772 rte_spinlock_unlock(&pool->sl); 8773 pool->raw_hw = NULL; 8774 return 0; 8775 } 8776 8777 #define MLX5_POOL_QUERY_FREQ_US 1000000 8778 8779 /** 8780 * Set the periodic procedure for triggering asynchronous batch queries for all 8781 * the counter pools. 8782 * 8783 * @param[in] sh 8784 * Pointer to mlx5_dev_ctx_shared object. 8785 */ 8786 void 8787 mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh) 8788 { 8789 uint32_t pools_n, us; 8790 8791 pools_n = __atomic_load_n(&sh->cmng.n_valid, __ATOMIC_RELAXED); 8792 us = MLX5_POOL_QUERY_FREQ_US / pools_n; 8793 DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us); 8794 if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { 8795 sh->cmng.query_thread_on = 0; 8796 DRV_LOG(ERR, "Cannot reinitialize query alarm"); 8797 } else { 8798 sh->cmng.query_thread_on = 1; 8799 } 8800 } 8801 8802 /** 8803 * The periodic procedure for triggering asynchronous batch queries for all the 8804 * counter pools. This function is probably called by the host thread. 8805 * 8806 * @param[in] arg 8807 * The parameter for the alarm process. 8808 */ 8809 void 8810 mlx5_flow_query_alarm(void *arg) 8811 { 8812 struct mlx5_dev_ctx_shared *sh = arg; 8813 int ret; 8814 uint16_t pool_index = sh->cmng.pool_index; 8815 struct mlx5_flow_counter_mng *cmng = &sh->cmng; 8816 struct mlx5_flow_counter_pool *pool; 8817 uint16_t n_valid; 8818 8819 if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) 8820 goto set_alarm; 8821 rte_spinlock_lock(&cmng->pool_update_sl); 8822 pool = cmng->pools[pool_index]; 8823 n_valid = cmng->n_valid; 8824 rte_spinlock_unlock(&cmng->pool_update_sl); 8825 /* Set the statistic memory to the new created pool. */ 8826 if ((!pool->raw && mlx5_flow_set_counter_stat_mem(sh, pool))) 8827 goto set_alarm; 8828 if (pool->raw_hw) 8829 /* There is a pool query in progress. */ 8830 goto set_alarm; 8831 pool->raw_hw = 8832 LIST_FIRST(&sh->cmng.free_stat_raws); 8833 if (!pool->raw_hw) 8834 /* No free counter statistics raw memory. */ 8835 goto set_alarm; 8836 /* 8837 * Identify the counters released between query trigger and query 8838 * handle more efficiently. The counter released in this gap period 8839 * should wait for a new round of query as the new arrived packets 8840 * will not be taken into account. 8841 */ 8842 pool->query_gen++; 8843 ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0, 8844 MLX5_COUNTERS_PER_POOL, 8845 NULL, NULL, 8846 pool->raw_hw->mem_mng->wm.lkey, 8847 (void *)(uintptr_t) 8848 pool->raw_hw->data, 8849 sh->devx_comp, 8850 (uint64_t)(uintptr_t)pool); 8851 if (ret) { 8852 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID" 8853 " %d", pool->min_dcs->id); 8854 pool->raw_hw = NULL; 8855 goto set_alarm; 8856 } 8857 LIST_REMOVE(pool->raw_hw, next); 8858 sh->cmng.pending_queries++; 8859 pool_index++; 8860 if (pool_index >= n_valid) 8861 pool_index = 0; 8862 set_alarm: 8863 sh->cmng.pool_index = pool_index; 8864 mlx5_set_query_alarm(sh); 8865 } 8866 8867 /** 8868 * Check and callback event for new aged flow in the counter pool 8869 * 8870 * @param[in] sh 8871 * Pointer to mlx5_dev_ctx_shared object. 8872 * @param[in] pool 8873 * Pointer to Current counter pool. 8874 */ 8875 static void 8876 mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh, 8877 struct mlx5_flow_counter_pool *pool) 8878 { 8879 struct mlx5_priv *priv; 8880 struct mlx5_flow_counter *cnt; 8881 struct mlx5_age_info *age_info; 8882 struct mlx5_age_param *age_param; 8883 struct mlx5_counter_stats_raw *cur = pool->raw_hw; 8884 struct mlx5_counter_stats_raw *prev = pool->raw; 8885 const uint64_t curr_time = MLX5_CURR_TIME_SEC; 8886 const uint32_t time_delta = curr_time - pool->time_of_last_age_check; 8887 uint16_t expected = AGE_CANDIDATE; 8888 uint32_t i; 8889 8890 pool->time_of_last_age_check = curr_time; 8891 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { 8892 cnt = MLX5_POOL_GET_CNT(pool, i); 8893 age_param = MLX5_CNT_TO_AGE(cnt); 8894 if (__atomic_load_n(&age_param->state, 8895 __ATOMIC_RELAXED) != AGE_CANDIDATE) 8896 continue; 8897 if (cur->data[i].hits != prev->data[i].hits) { 8898 __atomic_store_n(&age_param->sec_since_last_hit, 0, 8899 __ATOMIC_RELAXED); 8900 continue; 8901 } 8902 if (__atomic_add_fetch(&age_param->sec_since_last_hit, 8903 time_delta, 8904 __ATOMIC_RELAXED) <= age_param->timeout) 8905 continue; 8906 /** 8907 * Hold the lock first, or if between the 8908 * state AGE_TMOUT and tailq operation the 8909 * release happened, the release procedure 8910 * may delete a non-existent tailq node. 8911 */ 8912 priv = rte_eth_devices[age_param->port_id].data->dev_private; 8913 age_info = GET_PORT_AGE_INFO(priv); 8914 rte_spinlock_lock(&age_info->aged_sl); 8915 if (__atomic_compare_exchange_n(&age_param->state, &expected, 8916 AGE_TMOUT, false, 8917 __ATOMIC_RELAXED, 8918 __ATOMIC_RELAXED)) { 8919 TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next); 8920 MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW); 8921 } 8922 rte_spinlock_unlock(&age_info->aged_sl); 8923 } 8924 mlx5_age_event_prepare(sh); 8925 } 8926 8927 /** 8928 * Handler for the HW respond about ready values from an asynchronous batch 8929 * query. This function is probably called by the host thread. 8930 * 8931 * @param[in] sh 8932 * The pointer to the shared device context. 8933 * @param[in] async_id 8934 * The Devx async ID. 8935 * @param[in] status 8936 * The status of the completion. 8937 */ 8938 void 8939 mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh, 8940 uint64_t async_id, int status) 8941 { 8942 struct mlx5_flow_counter_pool *pool = 8943 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; 8944 struct mlx5_counter_stats_raw *raw_to_free; 8945 uint8_t query_gen = pool->query_gen ^ 1; 8946 struct mlx5_flow_counter_mng *cmng = &sh->cmng; 8947 enum mlx5_counter_type cnt_type = 8948 pool->is_aged ? MLX5_COUNTER_TYPE_AGE : 8949 MLX5_COUNTER_TYPE_ORIGIN; 8950 8951 if (unlikely(status)) { 8952 raw_to_free = pool->raw_hw; 8953 } else { 8954 raw_to_free = pool->raw; 8955 if (pool->is_aged) 8956 mlx5_flow_aging_check(sh, pool); 8957 rte_spinlock_lock(&pool->sl); 8958 pool->raw = pool->raw_hw; 8959 rte_spinlock_unlock(&pool->sl); 8960 /* Be sure the new raw counters data is updated in memory. */ 8961 rte_io_wmb(); 8962 if (!TAILQ_EMPTY(&pool->counters[query_gen])) { 8963 rte_spinlock_lock(&cmng->csl[cnt_type]); 8964 TAILQ_CONCAT(&cmng->counters[cnt_type], 8965 &pool->counters[query_gen], next); 8966 rte_spinlock_unlock(&cmng->csl[cnt_type]); 8967 } 8968 } 8969 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next); 8970 pool->raw_hw = NULL; 8971 sh->cmng.pending_queries--; 8972 } 8973 8974 static int 8975 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table, 8976 const struct flow_grp_info *grp_info, 8977 struct rte_flow_error *error) 8978 { 8979 if (grp_info->transfer && grp_info->external && 8980 grp_info->fdb_def_rule) { 8981 if (group == UINT32_MAX) 8982 return rte_flow_error_set 8983 (error, EINVAL, 8984 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 8985 NULL, 8986 "group index not supported"); 8987 *table = group + 1; 8988 } else { 8989 *table = group; 8990 } 8991 DRV_LOG(DEBUG, "port %u group=%#x table=%#x", port_id, group, *table); 8992 return 0; 8993 } 8994 8995 /** 8996 * Translate the rte_flow group index to HW table value. 8997 * 8998 * If tunnel offload is disabled, all group ids converted to flow table 8999 * id using the standard method. 9000 * If tunnel offload is enabled, group id can be converted using the 9001 * standard or tunnel conversion method. Group conversion method 9002 * selection depends on flags in `grp_info` parameter: 9003 * - Internal (grp_info.external == 0) groups conversion uses the 9004 * standard method. 9005 * - Group ids in JUMP action converted with the tunnel conversion. 9006 * - Group id in rule attribute conversion depends on a rule type and 9007 * group id value: 9008 * ** non zero group attributes converted with the tunnel method 9009 * ** zero group attribute in non-tunnel rule is converted using the 9010 * standard method - there's only one root table 9011 * ** zero group attribute in steer tunnel rule is converted with the 9012 * standard method - single root table 9013 * ** zero group attribute in match tunnel rule is a special OvS 9014 * case: that value is used for portability reasons. That group 9015 * id is converted with the tunnel conversion method. 9016 * 9017 * @param[in] dev 9018 * Port device 9019 * @param[in] tunnel 9020 * PMD tunnel offload object 9021 * @param[in] group 9022 * rte_flow group index value. 9023 * @param[out] table 9024 * HW table value. 9025 * @param[in] grp_info 9026 * flags used for conversion 9027 * @param[out] error 9028 * Pointer to error structure. 9029 * 9030 * @return 9031 * 0 on success, a negative errno value otherwise and rte_errno is set. 9032 */ 9033 int 9034 mlx5_flow_group_to_table(struct rte_eth_dev *dev, 9035 const struct mlx5_flow_tunnel *tunnel, 9036 uint32_t group, uint32_t *table, 9037 const struct flow_grp_info *grp_info, 9038 struct rte_flow_error *error) 9039 { 9040 int ret; 9041 bool standard_translation; 9042 9043 if (!grp_info->skip_scale && grp_info->external && 9044 group < MLX5_MAX_TABLES_EXTERNAL) 9045 group *= MLX5_FLOW_TABLE_FACTOR; 9046 if (is_tunnel_offload_active(dev)) { 9047 standard_translation = !grp_info->external || 9048 grp_info->std_tbl_fix; 9049 } else { 9050 standard_translation = true; 9051 } 9052 DRV_LOG(DEBUG, 9053 "port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s", 9054 dev->data->port_id, group, grp_info->transfer, 9055 grp_info->external, grp_info->fdb_def_rule, 9056 standard_translation ? "STANDARD" : "TUNNEL"); 9057 if (standard_translation) 9058 ret = flow_group_to_table(dev->data->port_id, group, table, 9059 grp_info, error); 9060 else 9061 ret = tunnel_flow_group_to_flow_table(dev, tunnel, group, 9062 table, error); 9063 9064 return ret; 9065 } 9066 9067 /** 9068 * Discover availability of metadata reg_c's. 9069 * 9070 * Iteratively use test flows to check availability. 9071 * 9072 * @param[in] dev 9073 * Pointer to the Ethernet device structure. 9074 * 9075 * @return 9076 * 0 on success, a negative errno value otherwise and rte_errno is set. 9077 */ 9078 int 9079 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) 9080 { 9081 struct mlx5_priv *priv = dev->data->dev_private; 9082 enum modify_reg idx; 9083 int n = 0; 9084 9085 /* reg_c[0] and reg_c[1] are reserved. */ 9086 priv->sh->flow_mreg_c[n++] = REG_C_0; 9087 priv->sh->flow_mreg_c[n++] = REG_C_1; 9088 /* Discover availability of other reg_c's. */ 9089 for (idx = REG_C_2; idx <= REG_C_7; ++idx) { 9090 struct rte_flow_attr attr = { 9091 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 9092 .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR, 9093 .ingress = 1, 9094 }; 9095 struct rte_flow_item items[] = { 9096 [0] = { 9097 .type = RTE_FLOW_ITEM_TYPE_END, 9098 }, 9099 }; 9100 struct rte_flow_action actions[] = { 9101 [0] = { 9102 .type = (enum rte_flow_action_type) 9103 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 9104 .conf = &(struct mlx5_flow_action_copy_mreg){ 9105 .src = REG_C_1, 9106 .dst = idx, 9107 }, 9108 }, 9109 [1] = { 9110 .type = RTE_FLOW_ACTION_TYPE_JUMP, 9111 .conf = &(struct rte_flow_action_jump){ 9112 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 9113 }, 9114 }, 9115 [2] = { 9116 .type = RTE_FLOW_ACTION_TYPE_END, 9117 }, 9118 }; 9119 uint32_t flow_idx; 9120 struct rte_flow *flow; 9121 struct rte_flow_error error; 9122 9123 if (!priv->sh->config.dv_flow_en) 9124 break; 9125 /* Create internal flow, validation skips copy action. */ 9126 flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr, 9127 items, actions, false, &error); 9128 flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], 9129 flow_idx); 9130 if (!flow) 9131 continue; 9132 priv->sh->flow_mreg_c[n++] = idx; 9133 flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx); 9134 } 9135 for (; n < MLX5_MREG_C_NUM; ++n) 9136 priv->sh->flow_mreg_c[n] = REG_NON; 9137 priv->sh->metadata_regc_check_flag = 1; 9138 return 0; 9139 } 9140 9141 int 9142 save_dump_file(const uint8_t *data, uint32_t size, 9143 uint32_t type, uint64_t id, void *arg, FILE *file) 9144 { 9145 char line[BUF_SIZE]; 9146 uint32_t out = 0; 9147 uint32_t k; 9148 uint32_t actions_num; 9149 struct rte_flow_query_count *count; 9150 9151 memset(line, 0, BUF_SIZE); 9152 switch (type) { 9153 case DR_DUMP_REC_TYPE_PMD_MODIFY_HDR: 9154 actions_num = *(uint32_t *)(arg); 9155 out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",%d,", 9156 type, id, actions_num); 9157 break; 9158 case DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT: 9159 out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",", 9160 type, id); 9161 break; 9162 case DR_DUMP_REC_TYPE_PMD_COUNTER: 9163 count = (struct rte_flow_query_count *)arg; 9164 fprintf(file, 9165 "%d,0x%" PRIx64 ",%" PRIu64 ",%" PRIu64 "\n", 9166 type, id, count->hits, count->bytes); 9167 return 0; 9168 default: 9169 return -1; 9170 } 9171 9172 for (k = 0; k < size; k++) { 9173 /* Make sure we do not overrun the line buffer length. */ 9174 if (out >= BUF_SIZE - 4) { 9175 line[out] = '\0'; 9176 break; 9177 } 9178 out += snprintf(line + out, BUF_SIZE - out, "%02x", 9179 (data[k]) & 0xff); 9180 } 9181 fprintf(file, "%s\n", line); 9182 return 0; 9183 } 9184 9185 int 9186 mlx5_flow_query_counter(struct rte_eth_dev *dev, struct rte_flow *flow, 9187 struct rte_flow_query_count *count, struct rte_flow_error *error) 9188 { 9189 struct rte_flow_action action[2]; 9190 enum mlx5_flow_drv_type ftype; 9191 const struct mlx5_flow_driver_ops *fops; 9192 9193 if (!flow) { 9194 return rte_flow_error_set(error, ENOENT, 9195 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9196 NULL, 9197 "invalid flow handle"); 9198 } 9199 action[0].type = RTE_FLOW_ACTION_TYPE_COUNT; 9200 action[1].type = RTE_FLOW_ACTION_TYPE_END; 9201 if (flow->counter) { 9202 memset(count, 0, sizeof(struct rte_flow_query_count)); 9203 ftype = (enum mlx5_flow_drv_type)(flow->drv_type); 9204 MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && 9205 ftype < MLX5_FLOW_TYPE_MAX); 9206 fops = flow_get_drv_ops(ftype); 9207 return fops->query(dev, flow, action, count, error); 9208 } 9209 return -1; 9210 } 9211 9212 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 9213 /** 9214 * Dump flow ipool data to file 9215 * 9216 * @param[in] dev 9217 * The pointer to Ethernet device. 9218 * @param[in] file 9219 * A pointer to a file for output. 9220 * @param[out] error 9221 * Perform verbose error reporting if not NULL. PMDs initialize this 9222 * structure in case of error only. 9223 * @return 9224 * 0 on success, a negative value otherwise. 9225 */ 9226 int 9227 mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, 9228 struct rte_flow *flow, FILE *file, 9229 struct rte_flow_error *error) 9230 { 9231 struct mlx5_priv *priv = dev->data->dev_private; 9232 struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; 9233 struct mlx5_flow_dv_encap_decap_resource *encap_decap; 9234 uint32_t handle_idx; 9235 struct mlx5_flow_handle *dh; 9236 struct rte_flow_query_count count; 9237 uint32_t actions_num; 9238 const uint8_t *data; 9239 size_t size; 9240 uint64_t id; 9241 uint32_t type; 9242 void *action = NULL; 9243 9244 if (!flow) { 9245 return rte_flow_error_set(error, ENOENT, 9246 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9247 NULL, 9248 "invalid flow handle"); 9249 } 9250 handle_idx = flow->dev_handles; 9251 /* query counter */ 9252 if (flow->counter && 9253 (!mlx5_counter_query(dev, flow->counter, false, 9254 &count.hits, &count.bytes, &action)) && action) { 9255 id = (uint64_t)(uintptr_t)action; 9256 type = DR_DUMP_REC_TYPE_PMD_COUNTER; 9257 save_dump_file(NULL, 0, type, 9258 id, (void *)&count, file); 9259 } 9260 9261 while (handle_idx) { 9262 dh = mlx5_ipool_get(priv->sh->ipool 9263 [MLX5_IPOOL_MLX5_FLOW], handle_idx); 9264 if (!dh) 9265 continue; 9266 handle_idx = dh->next.next; 9267 9268 /* Get modify_hdr and encap_decap buf from ipools. */ 9269 encap_decap = NULL; 9270 modify_hdr = dh->dvh.modify_hdr; 9271 9272 if (dh->dvh.rix_encap_decap) { 9273 encap_decap = mlx5_ipool_get(priv->sh->ipool 9274 [MLX5_IPOOL_DECAP_ENCAP], 9275 dh->dvh.rix_encap_decap); 9276 } 9277 if (modify_hdr) { 9278 data = (const uint8_t *)modify_hdr->actions; 9279 size = (size_t)(modify_hdr->actions_num) * 8; 9280 id = (uint64_t)(uintptr_t)modify_hdr->action; 9281 actions_num = modify_hdr->actions_num; 9282 type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; 9283 save_dump_file(data, size, type, id, 9284 (void *)(&actions_num), file); 9285 } 9286 if (encap_decap) { 9287 data = encap_decap->buf; 9288 size = encap_decap->size; 9289 id = (uint64_t)(uintptr_t)encap_decap->action; 9290 type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT; 9291 save_dump_file(data, size, type, 9292 id, NULL, file); 9293 } 9294 } 9295 return 0; 9296 } 9297 9298 /** 9299 * Dump all flow's encap_decap/modify_hdr/counter data to file 9300 * 9301 * @param[in] dev 9302 * The pointer to Ethernet device. 9303 * @param[in] file 9304 * A pointer to a file for output. 9305 * @param[out] error 9306 * Perform verbose error reporting if not NULL. PMDs initialize this 9307 * structure in case of error only. 9308 * @return 9309 * 0 on success, a negative value otherwise. 9310 */ 9311 static int 9312 mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, 9313 FILE *file, struct rte_flow_error *error __rte_unused) 9314 { 9315 struct mlx5_priv *priv = dev->data->dev_private; 9316 struct mlx5_dev_ctx_shared *sh = priv->sh; 9317 struct mlx5_hlist *h; 9318 struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; 9319 struct mlx5_flow_dv_encap_decap_resource *encap_decap; 9320 struct rte_flow_query_count count; 9321 uint32_t actions_num; 9322 const uint8_t *data; 9323 size_t size; 9324 uint64_t id; 9325 uint32_t type; 9326 uint32_t i; 9327 uint32_t j; 9328 struct mlx5_list_inconst *l_inconst; 9329 struct mlx5_list_entry *e; 9330 int lcore_index; 9331 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; 9332 uint32_t max; 9333 void *action; 9334 9335 /* encap_decap hlist is lcore_share, get global core cache. */ 9336 i = MLX5_LIST_GLOBAL; 9337 h = sh->encaps_decaps; 9338 if (h) { 9339 for (j = 0; j <= h->mask; j++) { 9340 l_inconst = &h->buckets[j].l; 9341 if (!l_inconst || !l_inconst->cache[i]) 9342 continue; 9343 9344 e = LIST_FIRST(&l_inconst->cache[i]->h); 9345 while (e) { 9346 encap_decap = 9347 (struct mlx5_flow_dv_encap_decap_resource *)e; 9348 data = encap_decap->buf; 9349 size = encap_decap->size; 9350 id = (uint64_t)(uintptr_t)encap_decap->action; 9351 type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT; 9352 save_dump_file(data, size, type, 9353 id, NULL, file); 9354 e = LIST_NEXT(e, next); 9355 } 9356 } 9357 } 9358 9359 /* get modify_hdr */ 9360 h = sh->modify_cmds; 9361 if (h) { 9362 lcore_index = rte_lcore_index(rte_lcore_id()); 9363 if (unlikely(lcore_index == -1)) { 9364 lcore_index = MLX5_LIST_NLCORE; 9365 rte_spinlock_lock(&h->l_const.lcore_lock); 9366 } 9367 i = lcore_index; 9368 9369 for (j = 0; j <= h->mask; j++) { 9370 l_inconst = &h->buckets[j].l; 9371 if (!l_inconst || !l_inconst->cache[i]) 9372 continue; 9373 9374 e = LIST_FIRST(&l_inconst->cache[i]->h); 9375 while (e) { 9376 modify_hdr = 9377 (struct mlx5_flow_dv_modify_hdr_resource *)e; 9378 data = (const uint8_t *)modify_hdr->actions; 9379 size = (size_t)(modify_hdr->actions_num) * 8; 9380 actions_num = modify_hdr->actions_num; 9381 id = (uint64_t)(uintptr_t)modify_hdr->action; 9382 type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; 9383 save_dump_file(data, size, type, id, 9384 (void *)(&actions_num), file); 9385 e = LIST_NEXT(e, next); 9386 } 9387 } 9388 9389 if (unlikely(lcore_index == MLX5_LIST_NLCORE)) 9390 rte_spinlock_unlock(&h->l_const.lcore_lock); 9391 } 9392 9393 /* get counter */ 9394 MLX5_ASSERT(cmng->n_valid <= cmng->n); 9395 max = MLX5_COUNTERS_PER_POOL * cmng->n_valid; 9396 for (j = 1; j <= max; j++) { 9397 action = NULL; 9398 if ((!mlx5_counter_query(dev, j, false, &count.hits, 9399 &count.bytes, &action)) && action) { 9400 id = (uint64_t)(uintptr_t)action; 9401 type = DR_DUMP_REC_TYPE_PMD_COUNTER; 9402 save_dump_file(NULL, 0, type, 9403 id, (void *)&count, file); 9404 } 9405 } 9406 return 0; 9407 } 9408 #endif 9409 9410 /** 9411 * Dump flow raw hw data to file 9412 * 9413 * @param[in] dev 9414 * The pointer to Ethernet device. 9415 * @param[in] file 9416 * A pointer to a file for output. 9417 * @param[out] error 9418 * Perform verbose error reporting if not NULL. PMDs initialize this 9419 * structure in case of error only. 9420 * @return 9421 * 0 on success, a negative value otherwise. 9422 */ 9423 int 9424 mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, 9425 FILE *file, 9426 struct rte_flow_error *error __rte_unused) 9427 { 9428 struct mlx5_priv *priv = dev->data->dev_private; 9429 struct mlx5_dev_ctx_shared *sh = priv->sh; 9430 uint32_t handle_idx; 9431 int ret; 9432 struct mlx5_flow_handle *dh; 9433 struct rte_flow *flow; 9434 9435 if (!sh->config.dv_flow_en) { 9436 if (fputs("device dv flow disabled\n", file) <= 0) 9437 return -errno; 9438 return -ENOTSUP; 9439 } 9440 9441 /* dump all */ 9442 if (!flow_idx) { 9443 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 9444 if (mlx5_flow_dev_dump_sh_all(dev, file, error)) 9445 return -EINVAL; 9446 #endif 9447 return mlx5_devx_cmd_flow_dump(sh->fdb_domain, 9448 sh->rx_domain, 9449 sh->tx_domain, file); 9450 } 9451 /* dump one */ 9452 flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], 9453 (uintptr_t)(void *)flow_idx); 9454 if (!flow) 9455 return -EINVAL; 9456 9457 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 9458 mlx5_flow_dev_dump_ipool(dev, flow, file, error); 9459 #endif 9460 handle_idx = flow->dev_handles; 9461 while (handle_idx) { 9462 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 9463 handle_idx); 9464 if (!dh) 9465 return -ENOENT; 9466 if (dh->drv_flow) { 9467 ret = mlx5_devx_cmd_flow_single_dump(dh->drv_flow, 9468 file); 9469 if (ret) 9470 return -ENOENT; 9471 } 9472 handle_idx = dh->next.next; 9473 } 9474 return 0; 9475 } 9476 9477 /** 9478 * Get aged-out flows. 9479 * 9480 * @param[in] dev 9481 * Pointer to the Ethernet device structure. 9482 * @param[in] context 9483 * The address of an array of pointers to the aged-out flows contexts. 9484 * @param[in] nb_countexts 9485 * The length of context array pointers. 9486 * @param[out] error 9487 * Perform verbose error reporting if not NULL. Initialized in case of 9488 * error only. 9489 * 9490 * @return 9491 * how many contexts get in success, otherwise negative errno value. 9492 * if nb_contexts is 0, return the amount of all aged contexts. 9493 * if nb_contexts is not 0 , return the amount of aged flows reported 9494 * in the context array. 9495 */ 9496 int 9497 mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts, 9498 uint32_t nb_contexts, struct rte_flow_error *error) 9499 { 9500 const struct mlx5_flow_driver_ops *fops; 9501 struct rte_flow_attr attr = { .transfer = 0 }; 9502 9503 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 9504 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 9505 return fops->get_aged_flows(dev, contexts, nb_contexts, 9506 error); 9507 } 9508 DRV_LOG(ERR, 9509 "port %u get aged flows is not supported.", 9510 dev->data->port_id); 9511 return -ENOTSUP; 9512 } 9513 9514 /* Wrapper for driver action_validate op callback */ 9515 static int 9516 flow_drv_action_validate(struct rte_eth_dev *dev, 9517 const struct rte_flow_indir_action_conf *conf, 9518 const struct rte_flow_action *action, 9519 const struct mlx5_flow_driver_ops *fops, 9520 struct rte_flow_error *error) 9521 { 9522 static const char err_msg[] = "indirect action validation unsupported"; 9523 9524 if (!fops->action_validate) { 9525 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 9526 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 9527 NULL, err_msg); 9528 return -rte_errno; 9529 } 9530 return fops->action_validate(dev, conf, action, error); 9531 } 9532 9533 /** 9534 * Destroys the shared action by handle. 9535 * 9536 * @param dev 9537 * Pointer to Ethernet device structure. 9538 * @param[in] handle 9539 * Handle for the indirect action object to be destroyed. 9540 * @param[out] error 9541 * Perform verbose error reporting if not NULL. PMDs initialize this 9542 * structure in case of error only. 9543 * 9544 * @return 9545 * 0 on success, a negative errno value otherwise and rte_errno is set. 9546 * 9547 * @note: wrapper for driver action_create op callback. 9548 */ 9549 static int 9550 mlx5_action_handle_destroy(struct rte_eth_dev *dev, 9551 struct rte_flow_action_handle *handle, 9552 struct rte_flow_error *error) 9553 { 9554 static const char err_msg[] = "indirect action destruction unsupported"; 9555 struct rte_flow_attr attr = { .transfer = 0 }; 9556 const struct mlx5_flow_driver_ops *fops = 9557 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 9558 9559 if (!fops->action_destroy) { 9560 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 9561 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 9562 NULL, err_msg); 9563 return -rte_errno; 9564 } 9565 return fops->action_destroy(dev, handle, error); 9566 } 9567 9568 /* Wrapper for driver action_destroy op callback */ 9569 static int 9570 flow_drv_action_update(struct rte_eth_dev *dev, 9571 struct rte_flow_action_handle *handle, 9572 const void *update, 9573 const struct mlx5_flow_driver_ops *fops, 9574 struct rte_flow_error *error) 9575 { 9576 static const char err_msg[] = "indirect action update unsupported"; 9577 9578 if (!fops->action_update) { 9579 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 9580 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 9581 NULL, err_msg); 9582 return -rte_errno; 9583 } 9584 return fops->action_update(dev, handle, update, error); 9585 } 9586 9587 /* Wrapper for driver action_destroy op callback */ 9588 static int 9589 flow_drv_action_query(struct rte_eth_dev *dev, 9590 const struct rte_flow_action_handle *handle, 9591 void *data, 9592 const struct mlx5_flow_driver_ops *fops, 9593 struct rte_flow_error *error) 9594 { 9595 static const char err_msg[] = "indirect action query unsupported"; 9596 9597 if (!fops->action_query) { 9598 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 9599 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 9600 NULL, err_msg); 9601 return -rte_errno; 9602 } 9603 return fops->action_query(dev, handle, data, error); 9604 } 9605 9606 /** 9607 * Create indirect action for reuse in multiple flow rules. 9608 * 9609 * @param dev 9610 * Pointer to Ethernet device structure. 9611 * @param conf 9612 * Pointer to indirect action object configuration. 9613 * @param[in] action 9614 * Action configuration for indirect action object creation. 9615 * @param[out] error 9616 * Perform verbose error reporting if not NULL. PMDs initialize this 9617 * structure in case of error only. 9618 * @return 9619 * A valid handle in case of success, NULL otherwise and rte_errno is set. 9620 */ 9621 static struct rte_flow_action_handle * 9622 mlx5_action_handle_create(struct rte_eth_dev *dev, 9623 const struct rte_flow_indir_action_conf *conf, 9624 const struct rte_flow_action *action, 9625 struct rte_flow_error *error) 9626 { 9627 static const char err_msg[] = "indirect action creation unsupported"; 9628 struct rte_flow_attr attr = { .transfer = 0 }; 9629 const struct mlx5_flow_driver_ops *fops = 9630 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 9631 9632 if (flow_drv_action_validate(dev, conf, action, fops, error)) 9633 return NULL; 9634 if (!fops->action_create) { 9635 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 9636 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 9637 NULL, err_msg); 9638 return NULL; 9639 } 9640 return fops->action_create(dev, conf, action, error); 9641 } 9642 9643 /** 9644 * Updates inplace the indirect action configuration pointed by *handle* 9645 * with the configuration provided as *update* argument. 9646 * The update of the indirect action configuration effects all flow rules 9647 * reusing the action via handle. 9648 * 9649 * @param dev 9650 * Pointer to Ethernet device structure. 9651 * @param[in] handle 9652 * Handle for the indirect action to be updated. 9653 * @param[in] update 9654 * Action specification used to modify the action pointed by handle. 9655 * *update* could be of same type with the action pointed by the *handle* 9656 * handle argument, or some other structures like a wrapper, depending on 9657 * the indirect action type. 9658 * @param[out] error 9659 * Perform verbose error reporting if not NULL. PMDs initialize this 9660 * structure in case of error only. 9661 * 9662 * @return 9663 * 0 on success, a negative errno value otherwise and rte_errno is set. 9664 */ 9665 static int 9666 mlx5_action_handle_update(struct rte_eth_dev *dev, 9667 struct rte_flow_action_handle *handle, 9668 const void *update, 9669 struct rte_flow_error *error) 9670 { 9671 struct rte_flow_attr attr = { .transfer = 0 }; 9672 const struct mlx5_flow_driver_ops *fops = 9673 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 9674 int ret; 9675 9676 ret = flow_drv_action_validate(dev, NULL, 9677 (const struct rte_flow_action *)update, fops, error); 9678 if (ret) 9679 return ret; 9680 return flow_drv_action_update(dev, handle, update, fops, 9681 error); 9682 } 9683 9684 /** 9685 * Query the indirect action by handle. 9686 * 9687 * This function allows retrieving action-specific data such as counters. 9688 * Data is gathered by special action which may be present/referenced in 9689 * more than one flow rule definition. 9690 * 9691 * see @RTE_FLOW_ACTION_TYPE_COUNT 9692 * 9693 * @param dev 9694 * Pointer to Ethernet device structure. 9695 * @param[in] handle 9696 * Handle for the indirect action to query. 9697 * @param[in, out] data 9698 * Pointer to storage for the associated query data type. 9699 * @param[out] error 9700 * Perform verbose error reporting if not NULL. PMDs initialize this 9701 * structure in case of error only. 9702 * 9703 * @return 9704 * 0 on success, a negative errno value otherwise and rte_errno is set. 9705 */ 9706 static int 9707 mlx5_action_handle_query(struct rte_eth_dev *dev, 9708 const struct rte_flow_action_handle *handle, 9709 void *data, 9710 struct rte_flow_error *error) 9711 { 9712 struct rte_flow_attr attr = { .transfer = 0 }; 9713 const struct mlx5_flow_driver_ops *fops = 9714 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 9715 9716 return flow_drv_action_query(dev, handle, data, fops, error); 9717 } 9718 9719 /** 9720 * Destroy all indirect actions (shared RSS). 9721 * 9722 * @param dev 9723 * Pointer to Ethernet device. 9724 * 9725 * @return 9726 * 0 on success, a negative errno value otherwise and rte_errno is set. 9727 */ 9728 int 9729 mlx5_action_handle_flush(struct rte_eth_dev *dev) 9730 { 9731 struct rte_flow_error error; 9732 struct mlx5_priv *priv = dev->data->dev_private; 9733 struct mlx5_shared_action_rss *shared_rss; 9734 int ret = 0; 9735 uint32_t idx; 9736 9737 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], 9738 priv->rss_shared_actions, idx, shared_rss, next) { 9739 ret |= mlx5_action_handle_destroy(dev, 9740 (struct rte_flow_action_handle *)(uintptr_t)idx, &error); 9741 } 9742 return ret; 9743 } 9744 9745 /** 9746 * Validate existing indirect actions against current device configuration 9747 * and attach them to device resources. 9748 * 9749 * @param dev 9750 * Pointer to Ethernet device. 9751 * 9752 * @return 9753 * 0 on success, a negative errno value otherwise and rte_errno is set. 9754 */ 9755 int 9756 mlx5_action_handle_attach(struct rte_eth_dev *dev) 9757 { 9758 struct mlx5_priv *priv = dev->data->dev_private; 9759 int ret = 0; 9760 struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last; 9761 9762 LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { 9763 const char *message; 9764 uint32_t queue_idx; 9765 9766 ret = mlx5_validate_rss_queues(dev, ind_tbl->queues, 9767 ind_tbl->queues_n, 9768 &message, &queue_idx); 9769 if (ret != 0) { 9770 DRV_LOG(ERR, "Port %u cannot use queue %u in RSS: %s", 9771 dev->data->port_id, ind_tbl->queues[queue_idx], 9772 message); 9773 break; 9774 } 9775 } 9776 if (ret != 0) 9777 return ret; 9778 LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { 9779 ret = mlx5_ind_table_obj_attach(dev, ind_tbl); 9780 if (ret != 0) { 9781 DRV_LOG(ERR, "Port %u could not attach " 9782 "indirection table obj %p", 9783 dev->data->port_id, (void *)ind_tbl); 9784 goto error; 9785 } 9786 } 9787 9788 return 0; 9789 error: 9790 ind_tbl_last = ind_tbl; 9791 LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { 9792 if (ind_tbl == ind_tbl_last) 9793 break; 9794 if (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0) 9795 DRV_LOG(CRIT, "Port %u could not detach " 9796 "indirection table obj %p on rollback", 9797 dev->data->port_id, (void *)ind_tbl); 9798 } 9799 return ret; 9800 } 9801 9802 /** 9803 * Detach indirect actions of the device from its resources. 9804 * 9805 * @param dev 9806 * Pointer to Ethernet device. 9807 * 9808 * @return 9809 * 0 on success, a negative errno value otherwise and rte_errno is set. 9810 */ 9811 int 9812 mlx5_action_handle_detach(struct rte_eth_dev *dev) 9813 { 9814 struct mlx5_priv *priv = dev->data->dev_private; 9815 int ret = 0; 9816 struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last; 9817 9818 LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { 9819 ret = mlx5_ind_table_obj_detach(dev, ind_tbl); 9820 if (ret != 0) { 9821 DRV_LOG(ERR, "Port %u could not detach " 9822 "indirection table obj %p", 9823 dev->data->port_id, (void *)ind_tbl); 9824 goto error; 9825 } 9826 } 9827 return 0; 9828 error: 9829 ind_tbl_last = ind_tbl; 9830 LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) { 9831 if (ind_tbl == ind_tbl_last) 9832 break; 9833 if (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0) 9834 DRV_LOG(CRIT, "Port %u could not attach " 9835 "indirection table obj %p on rollback", 9836 dev->data->port_id, (void *)ind_tbl); 9837 } 9838 return ret; 9839 } 9840 9841 #ifndef HAVE_MLX5DV_DR 9842 #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1)) 9843 #else 9844 #define MLX5_DOMAIN_SYNC_FLOW \ 9845 (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW) 9846 #endif 9847 9848 int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains) 9849 { 9850 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 9851 const struct mlx5_flow_driver_ops *fops; 9852 int ret; 9853 struct rte_flow_attr attr = { .transfer = 0 }; 9854 9855 fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 9856 ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW); 9857 if (ret > 0) 9858 ret = -ret; 9859 return ret; 9860 } 9861 9862 const struct mlx5_flow_tunnel * 9863 mlx5_get_tof(const struct rte_flow_item *item, 9864 const struct rte_flow_action *action, 9865 enum mlx5_tof_rule_type *rule_type) 9866 { 9867 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 9868 if (item->type == (typeof(item->type)) 9869 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) { 9870 *rule_type = MLX5_TUNNEL_OFFLOAD_MATCH_RULE; 9871 return flow_items_to_tunnel(item); 9872 } 9873 } 9874 for (; action->conf != RTE_FLOW_ACTION_TYPE_END; action++) { 9875 if (action->type == (typeof(action->type)) 9876 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) { 9877 *rule_type = MLX5_TUNNEL_OFFLOAD_SET_RULE; 9878 return flow_actions_to_tunnel(action); 9879 } 9880 } 9881 return NULL; 9882 } 9883 9884 /** 9885 * tunnel offload functionality is defined for DV environment only 9886 */ 9887 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 9888 __extension__ 9889 union tunnel_offload_mark { 9890 uint32_t val; 9891 struct { 9892 uint32_t app_reserve:8; 9893 uint32_t table_id:15; 9894 uint32_t transfer:1; 9895 uint32_t _unused_:8; 9896 }; 9897 }; 9898 9899 static bool 9900 mlx5_access_tunnel_offload_db 9901 (struct rte_eth_dev *dev, 9902 bool (*match)(struct rte_eth_dev *, 9903 struct mlx5_flow_tunnel *, const void *), 9904 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), 9905 void (*miss)(struct rte_eth_dev *, void *), 9906 void *ctx, bool lock_op); 9907 9908 static int 9909 flow_tunnel_add_default_miss(struct rte_eth_dev *dev, 9910 struct rte_flow *flow, 9911 const struct rte_flow_attr *attr, 9912 const struct rte_flow_action *app_actions, 9913 uint32_t flow_idx, 9914 const struct mlx5_flow_tunnel *tunnel, 9915 struct tunnel_default_miss_ctx *ctx, 9916 struct rte_flow_error *error) 9917 { 9918 struct mlx5_priv *priv = dev->data->dev_private; 9919 struct mlx5_flow *dev_flow; 9920 struct rte_flow_attr miss_attr = *attr; 9921 const struct rte_flow_item miss_items[2] = { 9922 { 9923 .type = RTE_FLOW_ITEM_TYPE_ETH, 9924 .spec = NULL, 9925 .last = NULL, 9926 .mask = NULL 9927 }, 9928 { 9929 .type = RTE_FLOW_ITEM_TYPE_END, 9930 .spec = NULL, 9931 .last = NULL, 9932 .mask = NULL 9933 } 9934 }; 9935 union tunnel_offload_mark mark_id; 9936 struct rte_flow_action_mark miss_mark; 9937 struct rte_flow_action miss_actions[3] = { 9938 [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark }, 9939 [2] = { .type = RTE_FLOW_ACTION_TYPE_END, .conf = NULL } 9940 }; 9941 const struct rte_flow_action_jump *jump_data; 9942 uint32_t i, flow_table = 0; /* prevent compilation warning */ 9943 struct flow_grp_info grp_info = { 9944 .external = 1, 9945 .transfer = attr->transfer, 9946 .fdb_def_rule = !!priv->fdb_def_rule, 9947 .std_tbl_fix = 0, 9948 }; 9949 int ret; 9950 9951 if (!attr->transfer) { 9952 uint32_t q_size; 9953 9954 miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS; 9955 q_size = priv->reta_idx_n * sizeof(ctx->queue[0]); 9956 ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size, 9957 0, SOCKET_ID_ANY); 9958 if (!ctx->queue) 9959 return rte_flow_error_set 9960 (error, ENOMEM, 9961 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 9962 NULL, "invalid default miss RSS"); 9963 ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT, 9964 ctx->action_rss.level = 0, 9965 ctx->action_rss.types = priv->rss_conf.rss_hf, 9966 ctx->action_rss.key_len = priv->rss_conf.rss_key_len, 9967 ctx->action_rss.queue_num = priv->reta_idx_n, 9968 ctx->action_rss.key = priv->rss_conf.rss_key, 9969 ctx->action_rss.queue = ctx->queue; 9970 if (!priv->reta_idx_n || !priv->rxqs_n) 9971 return rte_flow_error_set 9972 (error, EINVAL, 9973 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 9974 NULL, "invalid port configuration"); 9975 if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) 9976 ctx->action_rss.types = 0; 9977 for (i = 0; i != priv->reta_idx_n; ++i) 9978 ctx->queue[i] = (*priv->reta_idx)[i]; 9979 } else { 9980 miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP; 9981 ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP; 9982 } 9983 miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw; 9984 for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++); 9985 jump_data = app_actions->conf; 9986 miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY; 9987 miss_attr.group = jump_data->group; 9988 ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group, 9989 &flow_table, &grp_info, error); 9990 if (ret) 9991 return rte_flow_error_set(error, EINVAL, 9992 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 9993 NULL, "invalid tunnel id"); 9994 mark_id.app_reserve = 0; 9995 mark_id.table_id = tunnel_flow_tbl_to_id(flow_table); 9996 mark_id.transfer = !!attr->transfer; 9997 mark_id._unused_ = 0; 9998 miss_mark.id = mark_id.val; 9999 dev_flow = flow_drv_prepare(dev, flow, &miss_attr, 10000 miss_items, miss_actions, flow_idx, error); 10001 if (!dev_flow) 10002 return -rte_errno; 10003 dev_flow->flow = flow; 10004 dev_flow->external = true; 10005 dev_flow->tunnel = tunnel; 10006 dev_flow->tof_type = MLX5_TUNNEL_OFFLOAD_MISS_RULE; 10007 /* Subflow object was created, we must include one in the list. */ 10008 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, 10009 dev_flow->handle, next); 10010 DRV_LOG(DEBUG, 10011 "port %u tunnel type=%d id=%u miss rule priority=%u group=%u", 10012 dev->data->port_id, tunnel->app_tunnel.type, 10013 tunnel->tunnel_id, miss_attr.priority, miss_attr.group); 10014 ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items, 10015 miss_actions, error); 10016 if (!ret) 10017 ret = flow_mreg_update_copy_table(dev, flow, miss_actions, 10018 error); 10019 10020 return ret; 10021 } 10022 10023 static const struct mlx5_flow_tbl_data_entry * 10024 tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark) 10025 { 10026 struct mlx5_priv *priv = dev->data->dev_private; 10027 struct mlx5_dev_ctx_shared *sh = priv->sh; 10028 struct mlx5_list_entry *he; 10029 union tunnel_offload_mark mbits = { .val = mark }; 10030 union mlx5_flow_tbl_key table_key = { 10031 { 10032 .level = tunnel_id_to_flow_tbl(mbits.table_id), 10033 .id = 0, 10034 .reserved = 0, 10035 .dummy = 0, 10036 .is_fdb = !!mbits.transfer, 10037 .is_egress = 0, 10038 } 10039 }; 10040 struct mlx5_flow_cb_ctx ctx = { 10041 .data = &table_key.v64, 10042 }; 10043 10044 he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, &ctx); 10045 return he ? 10046 container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL; 10047 } 10048 10049 static void 10050 mlx5_flow_tunnel_grp2tbl_remove_cb(void *tool_ctx, 10051 struct mlx5_list_entry *entry) 10052 { 10053 struct mlx5_dev_ctx_shared *sh = tool_ctx; 10054 struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); 10055 10056 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], 10057 tunnel_flow_tbl_to_id(tte->flow_table)); 10058 mlx5_free(tte); 10059 } 10060 10061 static int 10062 mlx5_flow_tunnel_grp2tbl_match_cb(void *tool_ctx __rte_unused, 10063 struct mlx5_list_entry *entry, void *cb_ctx) 10064 { 10065 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 10066 union tunnel_tbl_key tbl = { 10067 .val = *(uint64_t *)(ctx->data), 10068 }; 10069 struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); 10070 10071 return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group; 10072 } 10073 10074 static struct mlx5_list_entry * 10075 mlx5_flow_tunnel_grp2tbl_create_cb(void *tool_ctx, void *cb_ctx) 10076 { 10077 struct mlx5_dev_ctx_shared *sh = tool_ctx; 10078 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 10079 struct tunnel_tbl_entry *tte; 10080 union tunnel_tbl_key tbl = { 10081 .val = *(uint64_t *)(ctx->data), 10082 }; 10083 10084 tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, 10085 sizeof(*tte), 0, 10086 SOCKET_ID_ANY); 10087 if (!tte) 10088 goto err; 10089 mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], 10090 &tte->flow_table); 10091 if (tte->flow_table >= MLX5_MAX_TABLES) { 10092 DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.", 10093 tte->flow_table); 10094 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], 10095 tte->flow_table); 10096 goto err; 10097 } else if (!tte->flow_table) { 10098 goto err; 10099 } 10100 tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table); 10101 tte->tunnel_id = tbl.tunnel_id; 10102 tte->group = tbl.group; 10103 return &tte->hash; 10104 err: 10105 if (tte) 10106 mlx5_free(tte); 10107 return NULL; 10108 } 10109 10110 static struct mlx5_list_entry * 10111 mlx5_flow_tunnel_grp2tbl_clone_cb(void *tool_ctx __rte_unused, 10112 struct mlx5_list_entry *oentry, 10113 void *cb_ctx __rte_unused) 10114 { 10115 struct tunnel_tbl_entry *tte = mlx5_malloc(MLX5_MEM_SYS, sizeof(*tte), 10116 0, SOCKET_ID_ANY); 10117 10118 if (!tte) 10119 return NULL; 10120 memcpy(tte, oentry, sizeof(*tte)); 10121 return &tte->hash; 10122 } 10123 10124 static void 10125 mlx5_flow_tunnel_grp2tbl_clone_free_cb(void *tool_ctx __rte_unused, 10126 struct mlx5_list_entry *entry) 10127 { 10128 struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); 10129 10130 mlx5_free(tte); 10131 } 10132 10133 static uint32_t 10134 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, 10135 const struct mlx5_flow_tunnel *tunnel, 10136 uint32_t group, uint32_t *table, 10137 struct rte_flow_error *error) 10138 { 10139 struct mlx5_list_entry *he; 10140 struct tunnel_tbl_entry *tte; 10141 union tunnel_tbl_key key = { 10142 .tunnel_id = tunnel ? tunnel->tunnel_id : 0, 10143 .group = group 10144 }; 10145 struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); 10146 struct mlx5_hlist *group_hash; 10147 struct mlx5_flow_cb_ctx ctx = { 10148 .data = &key.val, 10149 }; 10150 10151 group_hash = tunnel ? tunnel->groups : thub->groups; 10152 he = mlx5_hlist_register(group_hash, key.val, &ctx); 10153 if (!he) 10154 return rte_flow_error_set(error, EINVAL, 10155 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 10156 NULL, 10157 "tunnel group index not supported"); 10158 tte = container_of(he, typeof(*tte), hash); 10159 *table = tte->flow_table; 10160 DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x", 10161 dev->data->port_id, key.tunnel_id, group, *table); 10162 return 0; 10163 } 10164 10165 static void 10166 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, 10167 struct mlx5_flow_tunnel *tunnel) 10168 { 10169 struct mlx5_priv *priv = dev->data->dev_private; 10170 struct mlx5_indexed_pool *ipool; 10171 10172 DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x", 10173 dev->data->port_id, tunnel->tunnel_id); 10174 LIST_REMOVE(tunnel, chain); 10175 mlx5_hlist_destroy(tunnel->groups); 10176 ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID]; 10177 mlx5_ipool_free(ipool, tunnel->tunnel_id); 10178 } 10179 10180 static bool 10181 mlx5_access_tunnel_offload_db 10182 (struct rte_eth_dev *dev, 10183 bool (*match)(struct rte_eth_dev *, 10184 struct mlx5_flow_tunnel *, const void *), 10185 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), 10186 void (*miss)(struct rte_eth_dev *, void *), 10187 void *ctx, bool lock_op) 10188 { 10189 bool verdict = false; 10190 struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); 10191 struct mlx5_flow_tunnel *tunnel; 10192 10193 rte_spinlock_lock(&thub->sl); 10194 LIST_FOREACH(tunnel, &thub->tunnels, chain) { 10195 verdict = match(dev, tunnel, (const void *)ctx); 10196 if (verdict) 10197 break; 10198 } 10199 if (!lock_op) 10200 rte_spinlock_unlock(&thub->sl); 10201 if (verdict && hit) 10202 hit(dev, tunnel, ctx); 10203 if (!verdict && miss) 10204 miss(dev, ctx); 10205 if (lock_op) 10206 rte_spinlock_unlock(&thub->sl); 10207 10208 return verdict; 10209 } 10210 10211 struct tunnel_db_find_tunnel_id_ctx { 10212 uint32_t tunnel_id; 10213 struct mlx5_flow_tunnel *tunnel; 10214 }; 10215 10216 static bool 10217 find_tunnel_id_match(struct rte_eth_dev *dev, 10218 struct mlx5_flow_tunnel *tunnel, const void *x) 10219 { 10220 const struct tunnel_db_find_tunnel_id_ctx *ctx = x; 10221 10222 RTE_SET_USED(dev); 10223 return tunnel->tunnel_id == ctx->tunnel_id; 10224 } 10225 10226 static void 10227 find_tunnel_id_hit(struct rte_eth_dev *dev, 10228 struct mlx5_flow_tunnel *tunnel, void *x) 10229 { 10230 struct tunnel_db_find_tunnel_id_ctx *ctx = x; 10231 RTE_SET_USED(dev); 10232 ctx->tunnel = tunnel; 10233 } 10234 10235 static struct mlx5_flow_tunnel * 10236 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id) 10237 { 10238 struct tunnel_db_find_tunnel_id_ctx ctx = { 10239 .tunnel_id = id, 10240 }; 10241 10242 mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match, 10243 find_tunnel_id_hit, NULL, &ctx, true); 10244 10245 return ctx.tunnel; 10246 } 10247 10248 static struct mlx5_flow_tunnel * 10249 mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, 10250 const struct rte_flow_tunnel *app_tunnel) 10251 { 10252 struct mlx5_priv *priv = dev->data->dev_private; 10253 struct mlx5_indexed_pool *ipool; 10254 struct mlx5_flow_tunnel *tunnel; 10255 uint32_t id; 10256 10257 ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID]; 10258 tunnel = mlx5_ipool_zmalloc(ipool, &id); 10259 if (!tunnel) 10260 return NULL; 10261 if (id >= MLX5_MAX_TUNNELS) { 10262 mlx5_ipool_free(ipool, id); 10263 DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id); 10264 return NULL; 10265 } 10266 tunnel->groups = mlx5_hlist_create("tunnel groups", 64, false, true, 10267 priv->sh, 10268 mlx5_flow_tunnel_grp2tbl_create_cb, 10269 mlx5_flow_tunnel_grp2tbl_match_cb, 10270 mlx5_flow_tunnel_grp2tbl_remove_cb, 10271 mlx5_flow_tunnel_grp2tbl_clone_cb, 10272 mlx5_flow_tunnel_grp2tbl_clone_free_cb); 10273 if (!tunnel->groups) { 10274 mlx5_ipool_free(ipool, id); 10275 return NULL; 10276 } 10277 /* initiate new PMD tunnel */ 10278 memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel)); 10279 tunnel->tunnel_id = id; 10280 tunnel->action.type = (typeof(tunnel->action.type)) 10281 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET; 10282 tunnel->action.conf = tunnel; 10283 tunnel->item.type = (typeof(tunnel->item.type)) 10284 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL; 10285 tunnel->item.spec = tunnel; 10286 tunnel->item.last = NULL; 10287 tunnel->item.mask = NULL; 10288 10289 DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x", 10290 dev->data->port_id, tunnel->tunnel_id); 10291 10292 return tunnel; 10293 } 10294 10295 struct tunnel_db_get_tunnel_ctx { 10296 const struct rte_flow_tunnel *app_tunnel; 10297 struct mlx5_flow_tunnel *tunnel; 10298 }; 10299 10300 static bool get_tunnel_match(struct rte_eth_dev *dev, 10301 struct mlx5_flow_tunnel *tunnel, const void *x) 10302 { 10303 const struct tunnel_db_get_tunnel_ctx *ctx = x; 10304 10305 RTE_SET_USED(dev); 10306 return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel, 10307 sizeof(*ctx->app_tunnel)); 10308 } 10309 10310 static void get_tunnel_hit(struct rte_eth_dev *dev, 10311 struct mlx5_flow_tunnel *tunnel, void *x) 10312 { 10313 /* called under tunnel spinlock protection */ 10314 struct tunnel_db_get_tunnel_ctx *ctx = x; 10315 10316 RTE_SET_USED(dev); 10317 tunnel->refctn++; 10318 ctx->tunnel = tunnel; 10319 } 10320 10321 static void get_tunnel_miss(struct rte_eth_dev *dev, void *x) 10322 { 10323 /* called under tunnel spinlock protection */ 10324 struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); 10325 struct tunnel_db_get_tunnel_ctx *ctx = x; 10326 10327 rte_spinlock_unlock(&thub->sl); 10328 ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel); 10329 rte_spinlock_lock(&thub->sl); 10330 if (ctx->tunnel) { 10331 ctx->tunnel->refctn = 1; 10332 LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain); 10333 } 10334 } 10335 10336 10337 static int 10338 mlx5_get_flow_tunnel(struct rte_eth_dev *dev, 10339 const struct rte_flow_tunnel *app_tunnel, 10340 struct mlx5_flow_tunnel **tunnel) 10341 { 10342 struct tunnel_db_get_tunnel_ctx ctx = { 10343 .app_tunnel = app_tunnel, 10344 }; 10345 10346 mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit, 10347 get_tunnel_miss, &ctx, true); 10348 *tunnel = ctx.tunnel; 10349 return ctx.tunnel ? 0 : -ENOMEM; 10350 } 10351 10352 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id) 10353 { 10354 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub; 10355 10356 if (!thub) 10357 return; 10358 if (!LIST_EMPTY(&thub->tunnels)) 10359 DRV_LOG(WARNING, "port %u tunnels present", port_id); 10360 mlx5_hlist_destroy(thub->groups); 10361 mlx5_free(thub); 10362 } 10363 10364 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) 10365 { 10366 int err; 10367 struct mlx5_flow_tunnel_hub *thub; 10368 10369 thub = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*thub), 10370 0, SOCKET_ID_ANY); 10371 if (!thub) 10372 return -ENOMEM; 10373 LIST_INIT(&thub->tunnels); 10374 rte_spinlock_init(&thub->sl); 10375 thub->groups = mlx5_hlist_create("flow groups", 64, 10376 false, true, sh, 10377 mlx5_flow_tunnel_grp2tbl_create_cb, 10378 mlx5_flow_tunnel_grp2tbl_match_cb, 10379 mlx5_flow_tunnel_grp2tbl_remove_cb, 10380 mlx5_flow_tunnel_grp2tbl_clone_cb, 10381 mlx5_flow_tunnel_grp2tbl_clone_free_cb); 10382 if (!thub->groups) { 10383 err = -rte_errno; 10384 goto err; 10385 } 10386 sh->tunnel_hub = thub; 10387 10388 return 0; 10389 10390 err: 10391 if (thub->groups) 10392 mlx5_hlist_destroy(thub->groups); 10393 if (thub) 10394 mlx5_free(thub); 10395 return err; 10396 } 10397 10398 static inline int 10399 mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, 10400 struct rte_flow_tunnel *tunnel, 10401 struct rte_flow_error *error) 10402 { 10403 struct mlx5_priv *priv = dev->data->dev_private; 10404 10405 if (!priv->sh->config.dv_flow_en) 10406 return rte_flow_error_set(error, ENOTSUP, 10407 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 10408 "flow DV interface is off"); 10409 if (!is_tunnel_offload_active(dev)) 10410 return rte_flow_error_set(error, ENOTSUP, 10411 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 10412 "tunnel offload was not activated"); 10413 if (!tunnel) 10414 return rte_flow_error_set(error, EINVAL, 10415 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 10416 "no application tunnel"); 10417 switch (tunnel->type) { 10418 default: 10419 return rte_flow_error_set(error, EINVAL, 10420 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 10421 "unsupported tunnel type"); 10422 case RTE_FLOW_ITEM_TYPE_VXLAN: 10423 case RTE_FLOW_ITEM_TYPE_GRE: 10424 case RTE_FLOW_ITEM_TYPE_NVGRE: 10425 case RTE_FLOW_ITEM_TYPE_GENEVE: 10426 break; 10427 } 10428 return 0; 10429 } 10430 10431 static int 10432 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev, 10433 struct rte_flow_tunnel *app_tunnel, 10434 struct rte_flow_action **actions, 10435 uint32_t *num_of_actions, 10436 struct rte_flow_error *error) 10437 { 10438 struct mlx5_flow_tunnel *tunnel; 10439 int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error); 10440 10441 if (ret) 10442 return ret; 10443 ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); 10444 if (ret < 0) { 10445 return rte_flow_error_set(error, ret, 10446 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 10447 "failed to initialize pmd tunnel"); 10448 } 10449 *actions = &tunnel->action; 10450 *num_of_actions = 1; 10451 return 0; 10452 } 10453 10454 static int 10455 mlx5_flow_tunnel_match(struct rte_eth_dev *dev, 10456 struct rte_flow_tunnel *app_tunnel, 10457 struct rte_flow_item **items, 10458 uint32_t *num_of_items, 10459 struct rte_flow_error *error) 10460 { 10461 struct mlx5_flow_tunnel *tunnel; 10462 int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error); 10463 10464 if (ret) 10465 return ret; 10466 ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); 10467 if (ret < 0) { 10468 return rte_flow_error_set(error, ret, 10469 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 10470 "failed to initialize pmd tunnel"); 10471 } 10472 *items = &tunnel->item; 10473 *num_of_items = 1; 10474 return 0; 10475 } 10476 10477 struct tunnel_db_element_release_ctx { 10478 struct rte_flow_item *items; 10479 struct rte_flow_action *actions; 10480 uint32_t num_elements; 10481 struct rte_flow_error *error; 10482 int ret; 10483 }; 10484 10485 static bool 10486 tunnel_element_release_match(struct rte_eth_dev *dev, 10487 struct mlx5_flow_tunnel *tunnel, const void *x) 10488 { 10489 const struct tunnel_db_element_release_ctx *ctx = x; 10490 10491 RTE_SET_USED(dev); 10492 if (ctx->num_elements != 1) 10493 return false; 10494 else if (ctx->items) 10495 return ctx->items == &tunnel->item; 10496 else if (ctx->actions) 10497 return ctx->actions == &tunnel->action; 10498 10499 return false; 10500 } 10501 10502 static void 10503 tunnel_element_release_hit(struct rte_eth_dev *dev, 10504 struct mlx5_flow_tunnel *tunnel, void *x) 10505 { 10506 struct tunnel_db_element_release_ctx *ctx = x; 10507 ctx->ret = 0; 10508 if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) 10509 mlx5_flow_tunnel_free(dev, tunnel); 10510 } 10511 10512 static void 10513 tunnel_element_release_miss(struct rte_eth_dev *dev, void *x) 10514 { 10515 struct tunnel_db_element_release_ctx *ctx = x; 10516 RTE_SET_USED(dev); 10517 ctx->ret = rte_flow_error_set(ctx->error, EINVAL, 10518 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, 10519 "invalid argument"); 10520 } 10521 10522 static int 10523 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev, 10524 struct rte_flow_item *pmd_items, 10525 uint32_t num_items, struct rte_flow_error *err) 10526 { 10527 struct tunnel_db_element_release_ctx ctx = { 10528 .items = pmd_items, 10529 .actions = NULL, 10530 .num_elements = num_items, 10531 .error = err, 10532 }; 10533 10534 mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, 10535 tunnel_element_release_hit, 10536 tunnel_element_release_miss, &ctx, false); 10537 10538 return ctx.ret; 10539 } 10540 10541 static int 10542 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev, 10543 struct rte_flow_action *pmd_actions, 10544 uint32_t num_actions, struct rte_flow_error *err) 10545 { 10546 struct tunnel_db_element_release_ctx ctx = { 10547 .items = NULL, 10548 .actions = pmd_actions, 10549 .num_elements = num_actions, 10550 .error = err, 10551 }; 10552 10553 mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, 10554 tunnel_element_release_hit, 10555 tunnel_element_release_miss, &ctx, false); 10556 10557 return ctx.ret; 10558 } 10559 10560 static int 10561 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, 10562 struct rte_mbuf *m, 10563 struct rte_flow_restore_info *info, 10564 struct rte_flow_error *err) 10565 { 10566 uint64_t ol_flags = m->ol_flags; 10567 const struct mlx5_flow_tbl_data_entry *tble; 10568 const uint64_t mask = RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 10569 10570 if (!is_tunnel_offload_active(dev)) { 10571 info->flags = 0; 10572 return 0; 10573 } 10574 10575 if ((ol_flags & mask) != mask) 10576 goto err; 10577 tble = tunnel_mark_decode(dev, m->hash.fdir.hi); 10578 if (!tble) { 10579 DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x", 10580 dev->data->port_id, m->hash.fdir.hi); 10581 goto err; 10582 } 10583 MLX5_ASSERT(tble->tunnel); 10584 memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel)); 10585 info->group_id = tble->group_id; 10586 info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL | 10587 RTE_FLOW_RESTORE_INFO_GROUP_ID | 10588 RTE_FLOW_RESTORE_INFO_ENCAPSULATED; 10589 10590 return 0; 10591 10592 err: 10593 return rte_flow_error_set(err, EINVAL, 10594 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 10595 "failed to get restore info"); 10596 } 10597 10598 #else /* HAVE_IBV_FLOW_DV_SUPPORT */ 10599 static int 10600 mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev, 10601 __rte_unused struct rte_flow_tunnel *app_tunnel, 10602 __rte_unused struct rte_flow_action **actions, 10603 __rte_unused uint32_t *num_of_actions, 10604 __rte_unused struct rte_flow_error *error) 10605 { 10606 return -ENOTSUP; 10607 } 10608 10609 static int 10610 mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev, 10611 __rte_unused struct rte_flow_tunnel *app_tunnel, 10612 __rte_unused struct rte_flow_item **items, 10613 __rte_unused uint32_t *num_of_items, 10614 __rte_unused struct rte_flow_error *error) 10615 { 10616 return -ENOTSUP; 10617 } 10618 10619 static int 10620 mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev, 10621 __rte_unused struct rte_flow_item *pmd_items, 10622 __rte_unused uint32_t num_items, 10623 __rte_unused struct rte_flow_error *err) 10624 { 10625 return -ENOTSUP; 10626 } 10627 10628 static int 10629 mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev, 10630 __rte_unused struct rte_flow_action *pmd_action, 10631 __rte_unused uint32_t num_actions, 10632 __rte_unused struct rte_flow_error *err) 10633 { 10634 return -ENOTSUP; 10635 } 10636 10637 static int 10638 mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev, 10639 __rte_unused struct rte_mbuf *m, 10640 __rte_unused struct rte_flow_restore_info *i, 10641 __rte_unused struct rte_flow_error *err) 10642 { 10643 return -ENOTSUP; 10644 } 10645 10646 static int 10647 flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev, 10648 __rte_unused struct rte_flow *flow, 10649 __rte_unused const struct rte_flow_attr *attr, 10650 __rte_unused const struct rte_flow_action *actions, 10651 __rte_unused uint32_t flow_idx, 10652 __rte_unused const struct mlx5_flow_tunnel *tunnel, 10653 __rte_unused struct tunnel_default_miss_ctx *ctx, 10654 __rte_unused struct rte_flow_error *error) 10655 { 10656 return -ENOTSUP; 10657 } 10658 10659 static struct mlx5_flow_tunnel * 10660 mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev, 10661 __rte_unused uint32_t id) 10662 { 10663 return NULL; 10664 } 10665 10666 static void 10667 mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev, 10668 __rte_unused struct mlx5_flow_tunnel *tunnel) 10669 { 10670 } 10671 10672 static uint32_t 10673 tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev, 10674 __rte_unused const struct mlx5_flow_tunnel *t, 10675 __rte_unused uint32_t group, 10676 __rte_unused uint32_t *table, 10677 struct rte_flow_error *error) 10678 { 10679 return rte_flow_error_set(error, ENOTSUP, 10680 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 10681 "tunnel offload requires DV support"); 10682 } 10683 10684 void 10685 mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh, 10686 __rte_unused uint16_t port_id) 10687 { 10688 } 10689 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 10690 10691 /* Flex flow item API */ 10692 static struct rte_flow_item_flex_handle * 10693 mlx5_flow_flex_item_create(struct rte_eth_dev *dev, 10694 const struct rte_flow_item_flex_conf *conf, 10695 struct rte_flow_error *error) 10696 { 10697 static const char err_msg[] = "flex item creation unsupported"; 10698 struct mlx5_priv *priv = dev->data->dev_private; 10699 struct rte_flow_attr attr = { .transfer = 0 }; 10700 const struct mlx5_flow_driver_ops *fops = 10701 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 10702 10703 if (!priv->pci_dev) { 10704 rte_flow_error_set(error, ENOTSUP, 10705 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 10706 "create flex item on PF only"); 10707 return NULL; 10708 } 10709 switch (priv->pci_dev->id.device_id) { 10710 case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF: 10711 case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF: 10712 break; 10713 default: 10714 rte_flow_error_set(error, ENOTSUP, 10715 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 10716 "flex item available on BlueField ports only"); 10717 return NULL; 10718 } 10719 if (!fops->item_create) { 10720 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 10721 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 10722 NULL, err_msg); 10723 return NULL; 10724 } 10725 return fops->item_create(dev, conf, error); 10726 } 10727 10728 static int 10729 mlx5_flow_flex_item_release(struct rte_eth_dev *dev, 10730 const struct rte_flow_item_flex_handle *handle, 10731 struct rte_flow_error *error) 10732 { 10733 static const char err_msg[] = "flex item release unsupported"; 10734 struct rte_flow_attr attr = { .transfer = 0 }; 10735 const struct mlx5_flow_driver_ops *fops = 10736 flow_get_drv_ops(flow_get_drv_type(dev, &attr)); 10737 10738 if (!fops->item_release) { 10739 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); 10740 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 10741 NULL, err_msg); 10742 return -rte_errno; 10743 } 10744 return fops->item_release(dev, handle, error); 10745 } 10746 10747 static void 10748 mlx5_dbg__print_pattern(const struct rte_flow_item *item) 10749 { 10750 int ret; 10751 struct rte_flow_error error; 10752 10753 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 10754 char *item_name; 10755 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, &item_name, 10756 sizeof(item_name), 10757 (void *)(uintptr_t)item->type, &error); 10758 if (ret > 0) 10759 printf("%s ", item_name); 10760 else 10761 printf("%d\n", (int)item->type); 10762 } 10763 printf("END\n"); 10764 } 10765 10766 static int 10767 mlx5_flow_is_std_vxlan_port(const struct rte_flow_item *udp_item) 10768 { 10769 const struct rte_flow_item_udp *spec = udp_item->spec; 10770 const struct rte_flow_item_udp *mask = udp_item->mask; 10771 uint16_t udp_dport = 0; 10772 10773 if (spec != NULL) { 10774 if (!mask) 10775 mask = &rte_flow_item_udp_mask; 10776 udp_dport = rte_be_to_cpu_16(spec->hdr.dst_port & 10777 mask->hdr.dst_port); 10778 } 10779 return (!udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN); 10780 } 10781 10782 static const struct mlx5_flow_expand_node * 10783 mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern, 10784 unsigned int item_idx, 10785 const struct mlx5_flow_expand_node graph[], 10786 const struct mlx5_flow_expand_node *node) 10787 { 10788 const struct rte_flow_item *item = pattern + item_idx, *prev_item; 10789 10790 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN && 10791 node != NULL && 10792 node->type == RTE_FLOW_ITEM_TYPE_VXLAN) { 10793 /* 10794 * The expansion node is VXLAN and it is also the last 10795 * expandable item in the pattern, so need to continue 10796 * expansion of the inner tunnel. 10797 */ 10798 MLX5_ASSERT(item_idx > 0); 10799 prev_item = pattern + item_idx - 1; 10800 MLX5_ASSERT(prev_item->type == RTE_FLOW_ITEM_TYPE_UDP); 10801 if (mlx5_flow_is_std_vxlan_port(prev_item)) 10802 return &graph[MLX5_EXPANSION_STD_VXLAN]; 10803 return &graph[MLX5_EXPANSION_L3_VXLAN]; 10804 } 10805 return node; 10806 } 10807 10808 /* Map of Verbs to Flow priority with 8 Verbs priorities. */ 10809 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { 10810 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, 10811 }; 10812 10813 /* Map of Verbs to Flow priority with 16 Verbs priorities. */ 10814 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { 10815 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, 10816 { 9, 10, 11 }, { 12, 13, 14 }, 10817 }; 10818 10819 /** 10820 * Discover the number of available flow priorities. 10821 * 10822 * @param dev 10823 * Ethernet device. 10824 * 10825 * @return 10826 * On success, number of available flow priorities. 10827 * On failure, a negative errno-style code and rte_errno is set. 10828 */ 10829 int 10830 mlx5_flow_discover_priorities(struct rte_eth_dev *dev) 10831 { 10832 static const uint16_t vprio[] = {8, 16}; 10833 const struct mlx5_priv *priv = dev->data->dev_private; 10834 const struct mlx5_flow_driver_ops *fops; 10835 enum mlx5_flow_drv_type type; 10836 int ret; 10837 10838 type = mlx5_flow_os_get_type(); 10839 if (type == MLX5_FLOW_TYPE_MAX) { 10840 type = MLX5_FLOW_TYPE_VERBS; 10841 if (priv->sh->cdev->config.devx && priv->sh->config.dv_flow_en) 10842 type = MLX5_FLOW_TYPE_DV; 10843 } 10844 fops = flow_get_drv_ops(type); 10845 if (fops->discover_priorities == NULL) { 10846 DRV_LOG(ERR, "Priority discovery not supported"); 10847 rte_errno = ENOTSUP; 10848 return -rte_errno; 10849 } 10850 ret = fops->discover_priorities(dev, vprio, RTE_DIM(vprio)); 10851 if (ret < 0) 10852 return ret; 10853 switch (ret) { 10854 case 8: 10855 ret = RTE_DIM(priority_map_3); 10856 break; 10857 case 16: 10858 ret = RTE_DIM(priority_map_5); 10859 break; 10860 default: 10861 rte_errno = ENOTSUP; 10862 DRV_LOG(ERR, 10863 "port %u maximum priority: %d expected 8/16", 10864 dev->data->port_id, ret); 10865 return -rte_errno; 10866 } 10867 DRV_LOG(INFO, "port %u supported flow priorities:" 10868 " 0-%d for ingress or egress root table," 10869 " 0-%d for non-root table or transfer root table.", 10870 dev->data->port_id, ret - 2, 10871 MLX5_NON_ROOT_FLOW_MAX_PRIO - 1); 10872 return ret; 10873 } 10874 10875 /** 10876 * Adjust flow priority based on the highest layer and the request priority. 10877 * 10878 * @param[in] dev 10879 * Pointer to the Ethernet device structure. 10880 * @param[in] priority 10881 * The rule base priority. 10882 * @param[in] subpriority 10883 * The priority based on the items. 10884 * 10885 * @return 10886 * The new priority. 10887 */ 10888 uint32_t 10889 mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 10890 uint32_t subpriority) 10891 { 10892 uint32_t res = 0; 10893 struct mlx5_priv *priv = dev->data->dev_private; 10894 10895 switch (priv->sh->flow_max_priority) { 10896 case RTE_DIM(priority_map_3): 10897 res = priority_map_3[priority][subpriority]; 10898 break; 10899 case RTE_DIM(priority_map_5): 10900 res = priority_map_5[priority][subpriority]; 10901 break; 10902 } 10903 return res; 10904 } 10905