1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <netinet/in.h> 7 #include <sys/queue.h> 8 #include <stdalign.h> 9 #include <stdint.h> 10 #include <string.h> 11 12 /* Verbs header. */ 13 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 14 #ifdef PEDANTIC 15 #pragma GCC diagnostic ignored "-Wpedantic" 16 #endif 17 #include <infiniband/verbs.h> 18 #ifdef PEDANTIC 19 #pragma GCC diagnostic error "-Wpedantic" 20 #endif 21 22 #include <rte_common.h> 23 #include <rte_ether.h> 24 #include <rte_ethdev_driver.h> 25 #include <rte_flow.h> 26 #include <rte_flow_driver.h> 27 #include <rte_malloc.h> 28 #include <rte_ip.h> 29 30 #include "mlx5.h" 31 #include "mlx5_defs.h" 32 #include "mlx5_flow.h" 33 #include "mlx5_glue.h" 34 #include "mlx5_prm.h" 35 #include "mlx5_rxtx.h" 36 37 /* Dev ops structure defined in mlx5.c */ 38 extern const struct eth_dev_ops mlx5_dev_ops; 39 extern const struct eth_dev_ops mlx5_dev_ops_isolate; 40 41 /** Device flow drivers. */ 42 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 43 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; 44 #endif 45 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; 46 47 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; 48 49 const struct mlx5_flow_driver_ops *flow_drv_ops[] = { 50 [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, 51 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 52 [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, 53 #endif 54 [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, 55 [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops 56 }; 57 58 enum mlx5_expansion { 59 MLX5_EXPANSION_ROOT, 60 MLX5_EXPANSION_ROOT_OUTER, 61 MLX5_EXPANSION_ROOT_ETH_VLAN, 62 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, 63 MLX5_EXPANSION_OUTER_ETH, 64 MLX5_EXPANSION_OUTER_ETH_VLAN, 65 MLX5_EXPANSION_OUTER_VLAN, 66 MLX5_EXPANSION_OUTER_IPV4, 67 MLX5_EXPANSION_OUTER_IPV4_UDP, 68 MLX5_EXPANSION_OUTER_IPV4_TCP, 69 MLX5_EXPANSION_OUTER_IPV6, 70 MLX5_EXPANSION_OUTER_IPV6_UDP, 71 MLX5_EXPANSION_OUTER_IPV6_TCP, 72 MLX5_EXPANSION_VXLAN, 73 MLX5_EXPANSION_VXLAN_GPE, 74 MLX5_EXPANSION_GRE, 75 MLX5_EXPANSION_MPLS, 76 MLX5_EXPANSION_ETH, 77 MLX5_EXPANSION_ETH_VLAN, 78 MLX5_EXPANSION_VLAN, 79 MLX5_EXPANSION_IPV4, 80 MLX5_EXPANSION_IPV4_UDP, 81 MLX5_EXPANSION_IPV4_TCP, 82 MLX5_EXPANSION_IPV6, 83 MLX5_EXPANSION_IPV6_UDP, 84 MLX5_EXPANSION_IPV6_TCP, 85 }; 86 87 /** Supported expansion of items. */ 88 static const struct rte_flow_expand_node mlx5_support_expansion[] = { 89 [MLX5_EXPANSION_ROOT] = { 90 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 91 MLX5_EXPANSION_IPV4, 92 MLX5_EXPANSION_IPV6), 93 .type = RTE_FLOW_ITEM_TYPE_END, 94 }, 95 [MLX5_EXPANSION_ROOT_OUTER] = { 96 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, 97 MLX5_EXPANSION_OUTER_IPV4, 98 MLX5_EXPANSION_OUTER_IPV6), 99 .type = RTE_FLOW_ITEM_TYPE_END, 100 }, 101 [MLX5_EXPANSION_ROOT_ETH_VLAN] = { 102 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), 103 .type = RTE_FLOW_ITEM_TYPE_END, 104 }, 105 [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { 106 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), 107 .type = RTE_FLOW_ITEM_TYPE_END, 108 }, 109 [MLX5_EXPANSION_OUTER_ETH] = { 110 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 111 MLX5_EXPANSION_OUTER_IPV6, 112 MLX5_EXPANSION_MPLS), 113 .type = RTE_FLOW_ITEM_TYPE_ETH, 114 .rss_types = 0, 115 }, 116 [MLX5_EXPANSION_OUTER_ETH_VLAN] = { 117 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), 118 .type = RTE_FLOW_ITEM_TYPE_ETH, 119 .rss_types = 0, 120 }, 121 [MLX5_EXPANSION_OUTER_VLAN] = { 122 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 123 MLX5_EXPANSION_OUTER_IPV6), 124 .type = RTE_FLOW_ITEM_TYPE_VLAN, 125 }, 126 [MLX5_EXPANSION_OUTER_IPV4] = { 127 .next = RTE_FLOW_EXPAND_RSS_NEXT 128 (MLX5_EXPANSION_OUTER_IPV4_UDP, 129 MLX5_EXPANSION_OUTER_IPV4_TCP, 130 MLX5_EXPANSION_GRE, 131 MLX5_EXPANSION_IPV4, 132 MLX5_EXPANSION_IPV6), 133 .type = RTE_FLOW_ITEM_TYPE_IPV4, 134 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 135 ETH_RSS_NONFRAG_IPV4_OTHER, 136 }, 137 [MLX5_EXPANSION_OUTER_IPV4_UDP] = { 138 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 139 MLX5_EXPANSION_VXLAN_GPE), 140 .type = RTE_FLOW_ITEM_TYPE_UDP, 141 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 142 }, 143 [MLX5_EXPANSION_OUTER_IPV4_TCP] = { 144 .type = RTE_FLOW_ITEM_TYPE_TCP, 145 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 146 }, 147 [MLX5_EXPANSION_OUTER_IPV6] = { 148 .next = RTE_FLOW_EXPAND_RSS_NEXT 149 (MLX5_EXPANSION_OUTER_IPV6_UDP, 150 MLX5_EXPANSION_OUTER_IPV6_TCP, 151 MLX5_EXPANSION_IPV4, 152 MLX5_EXPANSION_IPV6), 153 .type = RTE_FLOW_ITEM_TYPE_IPV6, 154 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 155 ETH_RSS_NONFRAG_IPV6_OTHER, 156 }, 157 [MLX5_EXPANSION_OUTER_IPV6_UDP] = { 158 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 159 MLX5_EXPANSION_VXLAN_GPE), 160 .type = RTE_FLOW_ITEM_TYPE_UDP, 161 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 162 }, 163 [MLX5_EXPANSION_OUTER_IPV6_TCP] = { 164 .type = RTE_FLOW_ITEM_TYPE_TCP, 165 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 166 }, 167 [MLX5_EXPANSION_VXLAN] = { 168 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), 169 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 170 }, 171 [MLX5_EXPANSION_VXLAN_GPE] = { 172 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 173 MLX5_EXPANSION_IPV4, 174 MLX5_EXPANSION_IPV6), 175 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, 176 }, 177 [MLX5_EXPANSION_GRE] = { 178 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), 179 .type = RTE_FLOW_ITEM_TYPE_GRE, 180 }, 181 [MLX5_EXPANSION_MPLS] = { 182 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 183 MLX5_EXPANSION_IPV6), 184 .type = RTE_FLOW_ITEM_TYPE_MPLS, 185 }, 186 [MLX5_EXPANSION_ETH] = { 187 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 188 MLX5_EXPANSION_IPV6), 189 .type = RTE_FLOW_ITEM_TYPE_ETH, 190 }, 191 [MLX5_EXPANSION_ETH_VLAN] = { 192 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), 193 .type = RTE_FLOW_ITEM_TYPE_ETH, 194 }, 195 [MLX5_EXPANSION_VLAN] = { 196 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 197 MLX5_EXPANSION_IPV6), 198 .type = RTE_FLOW_ITEM_TYPE_VLAN, 199 }, 200 [MLX5_EXPANSION_IPV4] = { 201 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, 202 MLX5_EXPANSION_IPV4_TCP), 203 .type = RTE_FLOW_ITEM_TYPE_IPV4, 204 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 205 ETH_RSS_NONFRAG_IPV4_OTHER, 206 }, 207 [MLX5_EXPANSION_IPV4_UDP] = { 208 .type = RTE_FLOW_ITEM_TYPE_UDP, 209 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 210 }, 211 [MLX5_EXPANSION_IPV4_TCP] = { 212 .type = RTE_FLOW_ITEM_TYPE_TCP, 213 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 214 }, 215 [MLX5_EXPANSION_IPV6] = { 216 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, 217 MLX5_EXPANSION_IPV6_TCP), 218 .type = RTE_FLOW_ITEM_TYPE_IPV6, 219 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 220 ETH_RSS_NONFRAG_IPV6_OTHER, 221 }, 222 [MLX5_EXPANSION_IPV6_UDP] = { 223 .type = RTE_FLOW_ITEM_TYPE_UDP, 224 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 225 }, 226 [MLX5_EXPANSION_IPV6_TCP] = { 227 .type = RTE_FLOW_ITEM_TYPE_TCP, 228 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 229 }, 230 }; 231 232 static const struct rte_flow_ops mlx5_flow_ops = { 233 .validate = mlx5_flow_validate, 234 .create = mlx5_flow_create, 235 .destroy = mlx5_flow_destroy, 236 .flush = mlx5_flow_flush, 237 .isolate = mlx5_flow_isolate, 238 .query = mlx5_flow_query, 239 }; 240 241 /* Convert FDIR request to Generic flow. */ 242 struct mlx5_fdir { 243 struct rte_flow_attr attr; 244 struct rte_flow_item items[4]; 245 struct rte_flow_item_eth l2; 246 struct rte_flow_item_eth l2_mask; 247 union { 248 struct rte_flow_item_ipv4 ipv4; 249 struct rte_flow_item_ipv6 ipv6; 250 } l3; 251 union { 252 struct rte_flow_item_ipv4 ipv4; 253 struct rte_flow_item_ipv6 ipv6; 254 } l3_mask; 255 union { 256 struct rte_flow_item_udp udp; 257 struct rte_flow_item_tcp tcp; 258 } l4; 259 union { 260 struct rte_flow_item_udp udp; 261 struct rte_flow_item_tcp tcp; 262 } l4_mask; 263 struct rte_flow_action actions[2]; 264 struct rte_flow_action_queue queue; 265 }; 266 267 /* Map of Verbs to Flow priority with 8 Verbs priorities. */ 268 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { 269 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, 270 }; 271 272 /* Map of Verbs to Flow priority with 16 Verbs priorities. */ 273 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { 274 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, 275 { 9, 10, 11 }, { 12, 13, 14 }, 276 }; 277 278 /* Tunnel information. */ 279 struct mlx5_flow_tunnel_info { 280 uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ 281 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ 282 }; 283 284 static struct mlx5_flow_tunnel_info tunnels_info[] = { 285 { 286 .tunnel = MLX5_FLOW_LAYER_VXLAN, 287 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, 288 }, 289 { 290 .tunnel = MLX5_FLOW_LAYER_GENEVE, 291 .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP, 292 }, 293 { 294 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, 295 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, 296 }, 297 { 298 .tunnel = MLX5_FLOW_LAYER_GRE, 299 .ptype = RTE_PTYPE_TUNNEL_GRE, 300 }, 301 { 302 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, 303 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, 304 }, 305 { 306 .tunnel = MLX5_FLOW_LAYER_MPLS, 307 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, 308 }, 309 { 310 .tunnel = MLX5_FLOW_LAYER_NVGRE, 311 .ptype = RTE_PTYPE_TUNNEL_NVGRE, 312 }, 313 { 314 .tunnel = MLX5_FLOW_LAYER_IPIP, 315 .ptype = RTE_PTYPE_TUNNEL_IP, 316 }, 317 { 318 .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP, 319 .ptype = RTE_PTYPE_TUNNEL_IP, 320 }, 321 }; 322 323 /** 324 * Translate tag ID to register. 325 * 326 * @param[in] dev 327 * Pointer to the Ethernet device structure. 328 * @param[in] feature 329 * The feature that request the register. 330 * @param[in] id 331 * The request register ID. 332 * @param[out] error 333 * Error description in case of any. 334 * 335 * @return 336 * The request register on success, a negative errno 337 * value otherwise and rte_errno is set. 338 */ 339 enum modify_reg 340 mlx5_flow_get_reg_id(struct rte_eth_dev *dev, 341 enum mlx5_feature_name feature, 342 uint32_t id, 343 struct rte_flow_error *error) 344 { 345 struct mlx5_priv *priv = dev->data->dev_private; 346 struct mlx5_dev_config *config = &priv->config; 347 enum modify_reg start_reg; 348 349 switch (feature) { 350 case MLX5_HAIRPIN_RX: 351 return REG_B; 352 case MLX5_HAIRPIN_TX: 353 return REG_A; 354 case MLX5_METADATA_RX: 355 switch (config->dv_xmeta_en) { 356 case MLX5_XMETA_MODE_LEGACY: 357 return REG_B; 358 case MLX5_XMETA_MODE_META16: 359 return REG_C_0; 360 case MLX5_XMETA_MODE_META32: 361 return REG_C_1; 362 } 363 break; 364 case MLX5_METADATA_TX: 365 return REG_A; 366 case MLX5_METADATA_FDB: 367 switch (config->dv_xmeta_en) { 368 case MLX5_XMETA_MODE_LEGACY: 369 return REG_NONE; 370 case MLX5_XMETA_MODE_META16: 371 return REG_C_0; 372 case MLX5_XMETA_MODE_META32: 373 return REG_C_1; 374 } 375 break; 376 case MLX5_FLOW_MARK: 377 switch (config->dv_xmeta_en) { 378 case MLX5_XMETA_MODE_LEGACY: 379 return REG_NONE; 380 case MLX5_XMETA_MODE_META16: 381 return REG_C_1; 382 case MLX5_XMETA_MODE_META32: 383 return REG_C_0; 384 } 385 break; 386 case MLX5_COPY_MARK: 387 case MLX5_MTR_SFX: 388 /* 389 * Metadata COPY_MARK register using is in meter suffix sub 390 * flow while with meter. It's safe to share the same register. 391 */ 392 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3; 393 case MLX5_MTR_COLOR: 394 RTE_ASSERT(priv->mtr_color_reg != REG_NONE); 395 return priv->mtr_color_reg; 396 case MLX5_APP_TAG: 397 /* 398 * If meter is enable, it will engage two registers for color 399 * match and flow match. If meter color match is not using the 400 * REG_C_2, need to skip the REG_C_x be used by meter color 401 * match. 402 * If meter is disable, free to use all available registers. 403 */ 404 if (priv->mtr_color_reg != REG_NONE) 405 start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_3 : 406 REG_C_4; 407 else 408 start_reg = REG_C_2; 409 if (id > (REG_C_7 - start_reg)) 410 return rte_flow_error_set(error, EINVAL, 411 RTE_FLOW_ERROR_TYPE_ITEM, 412 NULL, "invalid tag id"); 413 if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NONE) 414 return rte_flow_error_set(error, ENOTSUP, 415 RTE_FLOW_ERROR_TYPE_ITEM, 416 NULL, "unsupported tag id"); 417 /* 418 * This case means meter is using the REG_C_x great than 2. 419 * Take care not to conflict with meter color REG_C_x. 420 * If the available index REG_C_y >= REG_C_x, skip the 421 * color register. 422 */ 423 if (start_reg == REG_C_3 && config->flow_mreg_c 424 [id + REG_C_3 - REG_C_0] >= priv->mtr_color_reg) { 425 if (config->flow_mreg_c[id + 1 + REG_C_3 - REG_C_0] != 426 REG_NONE) 427 return config->flow_mreg_c 428 [id + 1 + REG_C_3 - REG_C_0]; 429 return rte_flow_error_set(error, ENOTSUP, 430 RTE_FLOW_ERROR_TYPE_ITEM, 431 NULL, "unsupported tag id"); 432 } 433 return config->flow_mreg_c[id + start_reg - REG_C_0]; 434 } 435 assert(false); 436 return rte_flow_error_set(error, EINVAL, 437 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 438 NULL, "invalid feature name"); 439 } 440 441 /** 442 * Check extensive flow metadata register support. 443 * 444 * @param dev 445 * Pointer to rte_eth_dev structure. 446 * 447 * @return 448 * True if device supports extensive flow metadata register, otherwise false. 449 */ 450 bool 451 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) 452 { 453 struct mlx5_priv *priv = dev->data->dev_private; 454 struct mlx5_dev_config *config = &priv->config; 455 456 /* 457 * Having available reg_c can be regarded inclusively as supporting 458 * extensive flow metadata register, which could mean, 459 * - metadata register copy action by modify header. 460 * - 16 modify header actions is supported. 461 * - reg_c's are preserved across different domain (FDB and NIC) on 462 * packet loopback by flow lookup miss. 463 */ 464 return config->flow_mreg_c[2] != REG_NONE; 465 } 466 467 /** 468 * Discover the maximum number of priority available. 469 * 470 * @param[in] dev 471 * Pointer to the Ethernet device structure. 472 * 473 * @return 474 * number of supported flow priority on success, a negative errno 475 * value otherwise and rte_errno is set. 476 */ 477 int 478 mlx5_flow_discover_priorities(struct rte_eth_dev *dev) 479 { 480 struct mlx5_priv *priv = dev->data->dev_private; 481 struct { 482 struct ibv_flow_attr attr; 483 struct ibv_flow_spec_eth eth; 484 struct ibv_flow_spec_action_drop drop; 485 } flow_attr = { 486 .attr = { 487 .num_of_specs = 2, 488 .port = (uint8_t)priv->ibv_port, 489 }, 490 .eth = { 491 .type = IBV_FLOW_SPEC_ETH, 492 .size = sizeof(struct ibv_flow_spec_eth), 493 }, 494 .drop = { 495 .size = sizeof(struct ibv_flow_spec_action_drop), 496 .type = IBV_FLOW_SPEC_ACTION_DROP, 497 }, 498 }; 499 struct ibv_flow *flow; 500 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev); 501 uint16_t vprio[] = { 8, 16 }; 502 int i; 503 int priority = 0; 504 505 if (!drop) { 506 rte_errno = ENOTSUP; 507 return -rte_errno; 508 } 509 for (i = 0; i != RTE_DIM(vprio); i++) { 510 flow_attr.attr.priority = vprio[i] - 1; 511 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); 512 if (!flow) 513 break; 514 claim_zero(mlx5_glue->destroy_flow(flow)); 515 priority = vprio[i]; 516 } 517 mlx5_hrxq_drop_release(dev); 518 switch (priority) { 519 case 8: 520 priority = RTE_DIM(priority_map_3); 521 break; 522 case 16: 523 priority = RTE_DIM(priority_map_5); 524 break; 525 default: 526 rte_errno = ENOTSUP; 527 DRV_LOG(ERR, 528 "port %u verbs maximum priority: %d expected 8/16", 529 dev->data->port_id, priority); 530 return -rte_errno; 531 } 532 DRV_LOG(INFO, "port %u flow maximum priority: %d", 533 dev->data->port_id, priority); 534 return priority; 535 } 536 537 /** 538 * Adjust flow priority based on the highest layer and the request priority. 539 * 540 * @param[in] dev 541 * Pointer to the Ethernet device structure. 542 * @param[in] priority 543 * The rule base priority. 544 * @param[in] subpriority 545 * The priority based on the items. 546 * 547 * @return 548 * The new priority. 549 */ 550 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 551 uint32_t subpriority) 552 { 553 uint32_t res = 0; 554 struct mlx5_priv *priv = dev->data->dev_private; 555 556 switch (priv->config.flow_prio) { 557 case RTE_DIM(priority_map_3): 558 res = priority_map_3[priority][subpriority]; 559 break; 560 case RTE_DIM(priority_map_5): 561 res = priority_map_5[priority][subpriority]; 562 break; 563 } 564 return res; 565 } 566 567 /** 568 * Verify the @p item specifications (spec, last, mask) are compatible with the 569 * NIC capabilities. 570 * 571 * @param[in] item 572 * Item specification. 573 * @param[in] mask 574 * @p item->mask or flow default bit-masks. 575 * @param[in] nic_mask 576 * Bit-masks covering supported fields by the NIC to compare with user mask. 577 * @param[in] size 578 * Bit-masks size in bytes. 579 * @param[out] error 580 * Pointer to error structure. 581 * 582 * @return 583 * 0 on success, a negative errno value otherwise and rte_errno is set. 584 */ 585 int 586 mlx5_flow_item_acceptable(const struct rte_flow_item *item, 587 const uint8_t *mask, 588 const uint8_t *nic_mask, 589 unsigned int size, 590 struct rte_flow_error *error) 591 { 592 unsigned int i; 593 594 assert(nic_mask); 595 for (i = 0; i < size; ++i) 596 if ((nic_mask[i] | mask[i]) != nic_mask[i]) 597 return rte_flow_error_set(error, ENOTSUP, 598 RTE_FLOW_ERROR_TYPE_ITEM, 599 item, 600 "mask enables non supported" 601 " bits"); 602 if (!item->spec && (item->mask || item->last)) 603 return rte_flow_error_set(error, EINVAL, 604 RTE_FLOW_ERROR_TYPE_ITEM, item, 605 "mask/last without a spec is not" 606 " supported"); 607 if (item->spec && item->last) { 608 uint8_t spec[size]; 609 uint8_t last[size]; 610 unsigned int i; 611 int ret; 612 613 for (i = 0; i < size; ++i) { 614 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; 615 last[i] = ((const uint8_t *)item->last)[i] & mask[i]; 616 } 617 ret = memcmp(spec, last, size); 618 if (ret != 0) 619 return rte_flow_error_set(error, EINVAL, 620 RTE_FLOW_ERROR_TYPE_ITEM, 621 item, 622 "range is not valid"); 623 } 624 return 0; 625 } 626 627 /** 628 * Adjust the hash fields according to the @p flow information. 629 * 630 * @param[in] dev_flow. 631 * Pointer to the mlx5_flow. 632 * @param[in] tunnel 633 * 1 when the hash field is for a tunnel item. 634 * @param[in] layer_types 635 * ETH_RSS_* types. 636 * @param[in] hash_fields 637 * Item hash fields. 638 * 639 * @return 640 * The hash fields that should be used. 641 */ 642 uint64_t 643 mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, 644 int tunnel __rte_unused, uint64_t layer_types, 645 uint64_t hash_fields) 646 { 647 struct rte_flow *flow = dev_flow->flow; 648 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 649 int rss_request_inner = flow->rss.level >= 2; 650 651 /* Check RSS hash level for tunnel. */ 652 if (tunnel && rss_request_inner) 653 hash_fields |= IBV_RX_HASH_INNER; 654 else if (tunnel || rss_request_inner) 655 return 0; 656 #endif 657 /* Check if requested layer matches RSS hash fields. */ 658 if (!(flow->rss.types & layer_types)) 659 return 0; 660 return hash_fields; 661 } 662 663 /** 664 * Lookup and set the ptype in the data Rx part. A single Ptype can be used, 665 * if several tunnel rules are used on this queue, the tunnel ptype will be 666 * cleared. 667 * 668 * @param rxq_ctrl 669 * Rx queue to update. 670 */ 671 static void 672 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) 673 { 674 unsigned int i; 675 uint32_t tunnel_ptype = 0; 676 677 /* Look up for the ptype to use. */ 678 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { 679 if (!rxq_ctrl->flow_tunnels_n[i]) 680 continue; 681 if (!tunnel_ptype) { 682 tunnel_ptype = tunnels_info[i].ptype; 683 } else { 684 tunnel_ptype = 0; 685 break; 686 } 687 } 688 rxq_ctrl->rxq.tunnel = tunnel_ptype; 689 } 690 691 /** 692 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive 693 * flow. 694 * 695 * @param[in] dev 696 * Pointer to the Ethernet device structure. 697 * @param[in] dev_flow 698 * Pointer to device flow structure. 699 */ 700 static void 701 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 702 { 703 struct mlx5_priv *priv = dev->data->dev_private; 704 struct rte_flow *flow = dev_flow->flow; 705 const int mark = !!(dev_flow->actions & 706 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 707 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); 708 unsigned int i; 709 710 for (i = 0; i != flow->rss.queue_num; ++i) { 711 int idx = (*flow->rss.queue)[i]; 712 struct mlx5_rxq_ctrl *rxq_ctrl = 713 container_of((*priv->rxqs)[idx], 714 struct mlx5_rxq_ctrl, rxq); 715 716 /* 717 * To support metadata register copy on Tx loopback, 718 * this must be always enabled (metadata may arive 719 * from other port - not from local flows only. 720 */ 721 if (priv->config.dv_flow_en && 722 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 723 mlx5_flow_ext_mreg_supported(dev)) { 724 rxq_ctrl->rxq.mark = 1; 725 rxq_ctrl->flow_mark_n = 1; 726 } else if (mark) { 727 rxq_ctrl->rxq.mark = 1; 728 rxq_ctrl->flow_mark_n++; 729 } 730 if (tunnel) { 731 unsigned int j; 732 733 /* Increase the counter matching the flow. */ 734 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 735 if ((tunnels_info[j].tunnel & 736 dev_flow->layers) == 737 tunnels_info[j].tunnel) { 738 rxq_ctrl->flow_tunnels_n[j]++; 739 break; 740 } 741 } 742 flow_rxq_tunnel_ptype_update(rxq_ctrl); 743 } 744 } 745 } 746 747 /** 748 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow 749 * 750 * @param[in] dev 751 * Pointer to the Ethernet device structure. 752 * @param[in] flow 753 * Pointer to flow structure. 754 */ 755 static void 756 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) 757 { 758 struct mlx5_flow *dev_flow; 759 760 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 761 flow_drv_rxq_flags_set(dev, dev_flow); 762 } 763 764 /** 765 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 766 * device flow if no other flow uses it with the same kind of request. 767 * 768 * @param dev 769 * Pointer to Ethernet device. 770 * @param[in] dev_flow 771 * Pointer to the device flow. 772 */ 773 static void 774 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 775 { 776 struct mlx5_priv *priv = dev->data->dev_private; 777 struct rte_flow *flow = dev_flow->flow; 778 const int mark = !!(dev_flow->actions & 779 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 780 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); 781 unsigned int i; 782 783 assert(dev->data->dev_started); 784 for (i = 0; i != flow->rss.queue_num; ++i) { 785 int idx = (*flow->rss.queue)[i]; 786 struct mlx5_rxq_ctrl *rxq_ctrl = 787 container_of((*priv->rxqs)[idx], 788 struct mlx5_rxq_ctrl, rxq); 789 790 if (priv->config.dv_flow_en && 791 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 792 mlx5_flow_ext_mreg_supported(dev)) { 793 rxq_ctrl->rxq.mark = 1; 794 rxq_ctrl->flow_mark_n = 1; 795 } else if (mark) { 796 rxq_ctrl->flow_mark_n--; 797 rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; 798 } 799 if (tunnel) { 800 unsigned int j; 801 802 /* Decrease the counter matching the flow. */ 803 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 804 if ((tunnels_info[j].tunnel & 805 dev_flow->layers) == 806 tunnels_info[j].tunnel) { 807 rxq_ctrl->flow_tunnels_n[j]--; 808 break; 809 } 810 } 811 flow_rxq_tunnel_ptype_update(rxq_ctrl); 812 } 813 } 814 } 815 816 /** 817 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 818 * @p flow if no other flow uses it with the same kind of request. 819 * 820 * @param dev 821 * Pointer to Ethernet device. 822 * @param[in] flow 823 * Pointer to the flow. 824 */ 825 static void 826 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) 827 { 828 struct mlx5_flow *dev_flow; 829 830 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 831 flow_drv_rxq_flags_trim(dev, dev_flow); 832 } 833 834 /** 835 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. 836 * 837 * @param dev 838 * Pointer to Ethernet device. 839 */ 840 static void 841 flow_rxq_flags_clear(struct rte_eth_dev *dev) 842 { 843 struct mlx5_priv *priv = dev->data->dev_private; 844 unsigned int i; 845 846 for (i = 0; i != priv->rxqs_n; ++i) { 847 struct mlx5_rxq_ctrl *rxq_ctrl; 848 unsigned int j; 849 850 if (!(*priv->rxqs)[i]) 851 continue; 852 rxq_ctrl = container_of((*priv->rxqs)[i], 853 struct mlx5_rxq_ctrl, rxq); 854 rxq_ctrl->flow_mark_n = 0; 855 rxq_ctrl->rxq.mark = 0; 856 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) 857 rxq_ctrl->flow_tunnels_n[j] = 0; 858 rxq_ctrl->rxq.tunnel = 0; 859 } 860 } 861 862 /* 863 * return a pointer to the desired action in the list of actions. 864 * 865 * @param[in] actions 866 * The list of actions to search the action in. 867 * @param[in] action 868 * The action to find. 869 * 870 * @return 871 * Pointer to the action in the list, if found. NULL otherwise. 872 */ 873 const struct rte_flow_action * 874 mlx5_flow_find_action(const struct rte_flow_action *actions, 875 enum rte_flow_action_type action) 876 { 877 if (actions == NULL) 878 return NULL; 879 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) 880 if (actions->type == action) 881 return actions; 882 return NULL; 883 } 884 885 /* 886 * Validate the flag action. 887 * 888 * @param[in] action_flags 889 * Bit-fields that holds the actions detected until now. 890 * @param[in] attr 891 * Attributes of flow that includes this action. 892 * @param[out] error 893 * Pointer to error structure. 894 * 895 * @return 896 * 0 on success, a negative errno value otherwise and rte_errno is set. 897 */ 898 int 899 mlx5_flow_validate_action_flag(uint64_t action_flags, 900 const struct rte_flow_attr *attr, 901 struct rte_flow_error *error) 902 { 903 904 if (action_flags & MLX5_FLOW_ACTION_DROP) 905 return rte_flow_error_set(error, EINVAL, 906 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 907 "can't drop and flag in same flow"); 908 if (action_flags & MLX5_FLOW_ACTION_MARK) 909 return rte_flow_error_set(error, EINVAL, 910 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 911 "can't mark and flag in same flow"); 912 if (action_flags & MLX5_FLOW_ACTION_FLAG) 913 return rte_flow_error_set(error, EINVAL, 914 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 915 "can't have 2 flag" 916 " actions in same flow"); 917 if (attr->egress) 918 return rte_flow_error_set(error, ENOTSUP, 919 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 920 "flag action not supported for " 921 "egress"); 922 return 0; 923 } 924 925 /* 926 * Validate the mark action. 927 * 928 * @param[in] action 929 * Pointer to the queue action. 930 * @param[in] action_flags 931 * Bit-fields that holds the actions detected until now. 932 * @param[in] attr 933 * Attributes of flow that includes this action. 934 * @param[out] error 935 * Pointer to error structure. 936 * 937 * @return 938 * 0 on success, a negative errno value otherwise and rte_errno is set. 939 */ 940 int 941 mlx5_flow_validate_action_mark(const struct rte_flow_action *action, 942 uint64_t action_flags, 943 const struct rte_flow_attr *attr, 944 struct rte_flow_error *error) 945 { 946 const struct rte_flow_action_mark *mark = action->conf; 947 948 if (!mark) 949 return rte_flow_error_set(error, EINVAL, 950 RTE_FLOW_ERROR_TYPE_ACTION, 951 action, 952 "configuration cannot be null"); 953 if (mark->id >= MLX5_FLOW_MARK_MAX) 954 return rte_flow_error_set(error, EINVAL, 955 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 956 &mark->id, 957 "mark id must in 0 <= id < " 958 RTE_STR(MLX5_FLOW_MARK_MAX)); 959 if (action_flags & MLX5_FLOW_ACTION_DROP) 960 return rte_flow_error_set(error, EINVAL, 961 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 962 "can't drop and mark in same flow"); 963 if (action_flags & MLX5_FLOW_ACTION_FLAG) 964 return rte_flow_error_set(error, EINVAL, 965 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 966 "can't flag and mark in same flow"); 967 if (action_flags & MLX5_FLOW_ACTION_MARK) 968 return rte_flow_error_set(error, EINVAL, 969 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 970 "can't have 2 mark actions in same" 971 " flow"); 972 if (attr->egress) 973 return rte_flow_error_set(error, ENOTSUP, 974 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 975 "mark action not supported for " 976 "egress"); 977 return 0; 978 } 979 980 /* 981 * Validate the drop action. 982 * 983 * @param[in] action_flags 984 * Bit-fields that holds the actions detected until now. 985 * @param[in] attr 986 * Attributes of flow that includes this action. 987 * @param[out] error 988 * Pointer to error structure. 989 * 990 * @return 991 * 0 on success, a negative errno value otherwise and rte_errno is set. 992 */ 993 int 994 mlx5_flow_validate_action_drop(uint64_t action_flags, 995 const struct rte_flow_attr *attr, 996 struct rte_flow_error *error) 997 { 998 if (action_flags & MLX5_FLOW_ACTION_FLAG) 999 return rte_flow_error_set(error, EINVAL, 1000 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1001 "can't drop and flag in same flow"); 1002 if (action_flags & MLX5_FLOW_ACTION_MARK) 1003 return rte_flow_error_set(error, EINVAL, 1004 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1005 "can't drop and mark in same flow"); 1006 if (action_flags & (MLX5_FLOW_FATE_ACTIONS | 1007 MLX5_FLOW_FATE_ESWITCH_ACTIONS)) 1008 return rte_flow_error_set(error, EINVAL, 1009 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1010 "can't have 2 fate actions in" 1011 " same flow"); 1012 if (attr->egress) 1013 return rte_flow_error_set(error, ENOTSUP, 1014 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1015 "drop action not supported for " 1016 "egress"); 1017 return 0; 1018 } 1019 1020 /* 1021 * Validate the queue action. 1022 * 1023 * @param[in] action 1024 * Pointer to the queue action. 1025 * @param[in] action_flags 1026 * Bit-fields that holds the actions detected until now. 1027 * @param[in] dev 1028 * Pointer to the Ethernet device structure. 1029 * @param[in] attr 1030 * Attributes of flow that includes this action. 1031 * @param[out] error 1032 * Pointer to error structure. 1033 * 1034 * @return 1035 * 0 on success, a negative errno value otherwise and rte_errno is set. 1036 */ 1037 int 1038 mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 1039 uint64_t action_flags, 1040 struct rte_eth_dev *dev, 1041 const struct rte_flow_attr *attr, 1042 struct rte_flow_error *error) 1043 { 1044 struct mlx5_priv *priv = dev->data->dev_private; 1045 const struct rte_flow_action_queue *queue = action->conf; 1046 1047 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 1048 return rte_flow_error_set(error, EINVAL, 1049 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1050 "can't have 2 fate actions in" 1051 " same flow"); 1052 if (!priv->rxqs_n) 1053 return rte_flow_error_set(error, EINVAL, 1054 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1055 NULL, "No Rx queues configured"); 1056 if (queue->index >= priv->rxqs_n) 1057 return rte_flow_error_set(error, EINVAL, 1058 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1059 &queue->index, 1060 "queue index out of range"); 1061 if (!(*priv->rxqs)[queue->index]) 1062 return rte_flow_error_set(error, EINVAL, 1063 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1064 &queue->index, 1065 "queue is not configured"); 1066 if (attr->egress) 1067 return rte_flow_error_set(error, ENOTSUP, 1068 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1069 "queue action not supported for " 1070 "egress"); 1071 return 0; 1072 } 1073 1074 /* 1075 * Validate the rss action. 1076 * 1077 * @param[in] action 1078 * Pointer to the queue action. 1079 * @param[in] action_flags 1080 * Bit-fields that holds the actions detected until now. 1081 * @param[in] dev 1082 * Pointer to the Ethernet device structure. 1083 * @param[in] attr 1084 * Attributes of flow that includes this action. 1085 * @param[in] item_flags 1086 * Items that were detected. 1087 * @param[out] error 1088 * Pointer to error structure. 1089 * 1090 * @return 1091 * 0 on success, a negative errno value otherwise and rte_errno is set. 1092 */ 1093 int 1094 mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 1095 uint64_t action_flags, 1096 struct rte_eth_dev *dev, 1097 const struct rte_flow_attr *attr, 1098 uint64_t item_flags, 1099 struct rte_flow_error *error) 1100 { 1101 struct mlx5_priv *priv = dev->data->dev_private; 1102 const struct rte_flow_action_rss *rss = action->conf; 1103 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1104 unsigned int i; 1105 1106 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 1107 return rte_flow_error_set(error, EINVAL, 1108 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1109 "can't have 2 fate actions" 1110 " in same flow"); 1111 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && 1112 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) 1113 return rte_flow_error_set(error, ENOTSUP, 1114 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1115 &rss->func, 1116 "RSS hash function not supported"); 1117 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 1118 if (rss->level > 2) 1119 #else 1120 if (rss->level > 1) 1121 #endif 1122 return rte_flow_error_set(error, ENOTSUP, 1123 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1124 &rss->level, 1125 "tunnel RSS is not supported"); 1126 /* allow RSS key_len 0 in case of NULL (default) RSS key. */ 1127 if (rss->key_len == 0 && rss->key != NULL) 1128 return rte_flow_error_set(error, ENOTSUP, 1129 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1130 &rss->key_len, 1131 "RSS hash key length 0"); 1132 if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) 1133 return rte_flow_error_set(error, ENOTSUP, 1134 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1135 &rss->key_len, 1136 "RSS hash key too small"); 1137 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) 1138 return rte_flow_error_set(error, ENOTSUP, 1139 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1140 &rss->key_len, 1141 "RSS hash key too large"); 1142 if (rss->queue_num > priv->config.ind_table_max_size) 1143 return rte_flow_error_set(error, ENOTSUP, 1144 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1145 &rss->queue_num, 1146 "number of queues too large"); 1147 if (rss->types & MLX5_RSS_HF_MASK) 1148 return rte_flow_error_set(error, ENOTSUP, 1149 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1150 &rss->types, 1151 "some RSS protocols are not" 1152 " supported"); 1153 if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) && 1154 !(rss->types & ETH_RSS_IP)) 1155 return rte_flow_error_set(error, EINVAL, 1156 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1157 "L3 partial RSS requested but L3 RSS" 1158 " type not specified"); 1159 if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) && 1160 !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP))) 1161 return rte_flow_error_set(error, EINVAL, 1162 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1163 "L4 partial RSS requested but L4 RSS" 1164 " type not specified"); 1165 if (!priv->rxqs_n) 1166 return rte_flow_error_set(error, EINVAL, 1167 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1168 NULL, "No Rx queues configured"); 1169 if (!rss->queue_num) 1170 return rte_flow_error_set(error, EINVAL, 1171 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1172 NULL, "No queues configured"); 1173 for (i = 0; i != rss->queue_num; ++i) { 1174 if (rss->queue[i] >= priv->rxqs_n) 1175 return rte_flow_error_set 1176 (error, EINVAL, 1177 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1178 &rss->queue[i], "queue index out of range"); 1179 if (!(*priv->rxqs)[rss->queue[i]]) 1180 return rte_flow_error_set 1181 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1182 &rss->queue[i], "queue is not configured"); 1183 } 1184 if (attr->egress) 1185 return rte_flow_error_set(error, ENOTSUP, 1186 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1187 "rss action not supported for " 1188 "egress"); 1189 if (rss->level > 1 && !tunnel) 1190 return rte_flow_error_set(error, EINVAL, 1191 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1192 "inner RSS is not supported for " 1193 "non-tunnel flows"); 1194 return 0; 1195 } 1196 1197 /* 1198 * Validate the count action. 1199 * 1200 * @param[in] dev 1201 * Pointer to the Ethernet device structure. 1202 * @param[in] attr 1203 * Attributes of flow that includes this action. 1204 * @param[out] error 1205 * Pointer to error structure. 1206 * 1207 * @return 1208 * 0 on success, a negative errno value otherwise and rte_errno is set. 1209 */ 1210 int 1211 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, 1212 const struct rte_flow_attr *attr, 1213 struct rte_flow_error *error) 1214 { 1215 if (attr->egress) 1216 return rte_flow_error_set(error, ENOTSUP, 1217 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1218 "count action not supported for " 1219 "egress"); 1220 return 0; 1221 } 1222 1223 /** 1224 * Verify the @p attributes will be correctly understood by the NIC and store 1225 * them in the @p flow if everything is correct. 1226 * 1227 * @param[in] dev 1228 * Pointer to the Ethernet device structure. 1229 * @param[in] attributes 1230 * Pointer to flow attributes 1231 * @param[out] error 1232 * Pointer to error structure. 1233 * 1234 * @return 1235 * 0 on success, a negative errno value otherwise and rte_errno is set. 1236 */ 1237 int 1238 mlx5_flow_validate_attributes(struct rte_eth_dev *dev, 1239 const struct rte_flow_attr *attributes, 1240 struct rte_flow_error *error) 1241 { 1242 struct mlx5_priv *priv = dev->data->dev_private; 1243 uint32_t priority_max = priv->config.flow_prio - 1; 1244 1245 if (attributes->group) 1246 return rte_flow_error_set(error, ENOTSUP, 1247 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 1248 NULL, "groups is not supported"); 1249 if (attributes->priority != MLX5_FLOW_PRIO_RSVD && 1250 attributes->priority >= priority_max) 1251 return rte_flow_error_set(error, ENOTSUP, 1252 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1253 NULL, "priority out of range"); 1254 if (attributes->egress) 1255 return rte_flow_error_set(error, ENOTSUP, 1256 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1257 "egress is not supported"); 1258 if (attributes->transfer && !priv->config.dv_esw_en) 1259 return rte_flow_error_set(error, ENOTSUP, 1260 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 1261 NULL, "transfer is not supported"); 1262 if (!attributes->ingress) 1263 return rte_flow_error_set(error, EINVAL, 1264 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 1265 NULL, 1266 "ingress attribute is mandatory"); 1267 return 0; 1268 } 1269 1270 /** 1271 * Validate ICMP6 item. 1272 * 1273 * @param[in] item 1274 * Item specification. 1275 * @param[in] item_flags 1276 * Bit-fields that holds the items detected until now. 1277 * @param[out] error 1278 * Pointer to error structure. 1279 * 1280 * @return 1281 * 0 on success, a negative errno value otherwise and rte_errno is set. 1282 */ 1283 int 1284 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, 1285 uint64_t item_flags, 1286 uint8_t target_protocol, 1287 struct rte_flow_error *error) 1288 { 1289 const struct rte_flow_item_icmp6 *mask = item->mask; 1290 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1291 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 1292 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 1293 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1294 MLX5_FLOW_LAYER_OUTER_L4; 1295 int ret; 1296 1297 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) 1298 return rte_flow_error_set(error, EINVAL, 1299 RTE_FLOW_ERROR_TYPE_ITEM, item, 1300 "protocol filtering not compatible" 1301 " with ICMP6 layer"); 1302 if (!(item_flags & l3m)) 1303 return rte_flow_error_set(error, EINVAL, 1304 RTE_FLOW_ERROR_TYPE_ITEM, item, 1305 "IPv6 is mandatory to filter on" 1306 " ICMP6"); 1307 if (item_flags & l4m) 1308 return rte_flow_error_set(error, EINVAL, 1309 RTE_FLOW_ERROR_TYPE_ITEM, item, 1310 "multiple L4 layers not supported"); 1311 if (!mask) 1312 mask = &rte_flow_item_icmp6_mask; 1313 ret = mlx5_flow_item_acceptable 1314 (item, (const uint8_t *)mask, 1315 (const uint8_t *)&rte_flow_item_icmp6_mask, 1316 sizeof(struct rte_flow_item_icmp6), error); 1317 if (ret < 0) 1318 return ret; 1319 return 0; 1320 } 1321 1322 /** 1323 * Validate ICMP item. 1324 * 1325 * @param[in] item 1326 * Item specification. 1327 * @param[in] item_flags 1328 * Bit-fields that holds the items detected until now. 1329 * @param[out] error 1330 * Pointer to error structure. 1331 * 1332 * @return 1333 * 0 on success, a negative errno value otherwise and rte_errno is set. 1334 */ 1335 int 1336 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, 1337 uint64_t item_flags, 1338 uint8_t target_protocol, 1339 struct rte_flow_error *error) 1340 { 1341 const struct rte_flow_item_icmp *mask = item->mask; 1342 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1343 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 1344 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 1345 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1346 MLX5_FLOW_LAYER_OUTER_L4; 1347 int ret; 1348 1349 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) 1350 return rte_flow_error_set(error, EINVAL, 1351 RTE_FLOW_ERROR_TYPE_ITEM, item, 1352 "protocol filtering not compatible" 1353 " with ICMP layer"); 1354 if (!(item_flags & l3m)) 1355 return rte_flow_error_set(error, EINVAL, 1356 RTE_FLOW_ERROR_TYPE_ITEM, item, 1357 "IPv4 is mandatory to filter" 1358 " on ICMP"); 1359 if (item_flags & l4m) 1360 return rte_flow_error_set(error, EINVAL, 1361 RTE_FLOW_ERROR_TYPE_ITEM, item, 1362 "multiple L4 layers not supported"); 1363 if (!mask) 1364 mask = &rte_flow_item_icmp_mask; 1365 ret = mlx5_flow_item_acceptable 1366 (item, (const uint8_t *)mask, 1367 (const uint8_t *)&rte_flow_item_icmp_mask, 1368 sizeof(struct rte_flow_item_icmp), error); 1369 if (ret < 0) 1370 return ret; 1371 return 0; 1372 } 1373 1374 /** 1375 * Validate Ethernet item. 1376 * 1377 * @param[in] item 1378 * Item specification. 1379 * @param[in] item_flags 1380 * Bit-fields that holds the items detected until now. 1381 * @param[out] error 1382 * Pointer to error structure. 1383 * 1384 * @return 1385 * 0 on success, a negative errno value otherwise and rte_errno is set. 1386 */ 1387 int 1388 mlx5_flow_validate_item_eth(const struct rte_flow_item *item, 1389 uint64_t item_flags, 1390 struct rte_flow_error *error) 1391 { 1392 const struct rte_flow_item_eth *mask = item->mask; 1393 const struct rte_flow_item_eth nic_mask = { 1394 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1395 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1396 .type = RTE_BE16(0xffff), 1397 }; 1398 int ret; 1399 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1400 const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 1401 MLX5_FLOW_LAYER_OUTER_L2; 1402 1403 if (item_flags & ethm) 1404 return rte_flow_error_set(error, ENOTSUP, 1405 RTE_FLOW_ERROR_TYPE_ITEM, item, 1406 "multiple L2 layers not supported"); 1407 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) || 1408 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))) 1409 return rte_flow_error_set(error, EINVAL, 1410 RTE_FLOW_ERROR_TYPE_ITEM, item, 1411 "L2 layer should not follow " 1412 "L3 layers"); 1413 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) || 1414 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN))) 1415 return rte_flow_error_set(error, EINVAL, 1416 RTE_FLOW_ERROR_TYPE_ITEM, item, 1417 "L2 layer should not follow VLAN"); 1418 if (!mask) 1419 mask = &rte_flow_item_eth_mask; 1420 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1421 (const uint8_t *)&nic_mask, 1422 sizeof(struct rte_flow_item_eth), 1423 error); 1424 return ret; 1425 } 1426 1427 /** 1428 * Validate VLAN item. 1429 * 1430 * @param[in] item 1431 * Item specification. 1432 * @param[in] item_flags 1433 * Bit-fields that holds the items detected until now. 1434 * @param[in] dev 1435 * Ethernet device flow is being created on. 1436 * @param[out] error 1437 * Pointer to error structure. 1438 * 1439 * @return 1440 * 0 on success, a negative errno value otherwise and rte_errno is set. 1441 */ 1442 int 1443 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 1444 uint64_t item_flags, 1445 struct rte_eth_dev *dev, 1446 struct rte_flow_error *error) 1447 { 1448 const struct rte_flow_item_vlan *spec = item->spec; 1449 const struct rte_flow_item_vlan *mask = item->mask; 1450 const struct rte_flow_item_vlan nic_mask = { 1451 .tci = RTE_BE16(UINT16_MAX), 1452 .inner_type = RTE_BE16(UINT16_MAX), 1453 }; 1454 uint16_t vlan_tag = 0; 1455 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1456 int ret; 1457 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 1458 MLX5_FLOW_LAYER_INNER_L4) : 1459 (MLX5_FLOW_LAYER_OUTER_L3 | 1460 MLX5_FLOW_LAYER_OUTER_L4); 1461 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 1462 MLX5_FLOW_LAYER_OUTER_VLAN; 1463 1464 if (item_flags & vlanm) 1465 return rte_flow_error_set(error, EINVAL, 1466 RTE_FLOW_ERROR_TYPE_ITEM, item, 1467 "multiple VLAN layers not supported"); 1468 else if ((item_flags & l34m) != 0) 1469 return rte_flow_error_set(error, EINVAL, 1470 RTE_FLOW_ERROR_TYPE_ITEM, item, 1471 "VLAN cannot follow L3/L4 layer"); 1472 if (!mask) 1473 mask = &rte_flow_item_vlan_mask; 1474 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1475 (const uint8_t *)&nic_mask, 1476 sizeof(struct rte_flow_item_vlan), 1477 error); 1478 if (ret) 1479 return ret; 1480 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { 1481 struct mlx5_priv *priv = dev->data->dev_private; 1482 1483 if (priv->vmwa_context) { 1484 /* 1485 * Non-NULL context means we have a virtual machine 1486 * and SR-IOV enabled, we have to create VLAN interface 1487 * to make hypervisor to setup E-Switch vport 1488 * context correctly. We avoid creating the multiple 1489 * VLAN interfaces, so we cannot support VLAN tag mask. 1490 */ 1491 return rte_flow_error_set(error, EINVAL, 1492 RTE_FLOW_ERROR_TYPE_ITEM, 1493 item, 1494 "VLAN tag mask is not" 1495 " supported in virtual" 1496 " environment"); 1497 } 1498 } 1499 if (spec) { 1500 vlan_tag = spec->tci; 1501 vlan_tag &= mask->tci; 1502 } 1503 /* 1504 * From verbs perspective an empty VLAN is equivalent 1505 * to a packet without VLAN layer. 1506 */ 1507 if (!vlan_tag) 1508 return rte_flow_error_set(error, EINVAL, 1509 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1510 item->spec, 1511 "VLAN cannot be empty"); 1512 return 0; 1513 } 1514 1515 /** 1516 * Validate IPV4 item. 1517 * 1518 * @param[in] item 1519 * Item specification. 1520 * @param[in] item_flags 1521 * Bit-fields that holds the items detected until now. 1522 * @param[in] acc_mask 1523 * Acceptable mask, if NULL default internal default mask 1524 * will be used to check whether item fields are supported. 1525 * @param[out] error 1526 * Pointer to error structure. 1527 * 1528 * @return 1529 * 0 on success, a negative errno value otherwise and rte_errno is set. 1530 */ 1531 int 1532 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, 1533 uint64_t item_flags, 1534 uint64_t last_item, 1535 uint16_t ether_type, 1536 const struct rte_flow_item_ipv4 *acc_mask, 1537 struct rte_flow_error *error) 1538 { 1539 const struct rte_flow_item_ipv4 *mask = item->mask; 1540 const struct rte_flow_item_ipv4 *spec = item->spec; 1541 const struct rte_flow_item_ipv4 nic_mask = { 1542 .hdr = { 1543 .src_addr = RTE_BE32(0xffffffff), 1544 .dst_addr = RTE_BE32(0xffffffff), 1545 .type_of_service = 0xff, 1546 .next_proto_id = 0xff, 1547 }, 1548 }; 1549 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1550 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1551 MLX5_FLOW_LAYER_OUTER_L3; 1552 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1553 MLX5_FLOW_LAYER_OUTER_L4; 1554 int ret; 1555 uint8_t next_proto = 0xFF; 1556 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 1557 MLX5_FLOW_LAYER_OUTER_VLAN | 1558 MLX5_FLOW_LAYER_INNER_VLAN); 1559 1560 if ((last_item & l2_vlan) && ether_type && 1561 ether_type != RTE_ETHER_TYPE_IPV4) 1562 return rte_flow_error_set(error, EINVAL, 1563 RTE_FLOW_ERROR_TYPE_ITEM, item, 1564 "IPv4 cannot follow L2/VLAN layer " 1565 "which ether type is not IPv4"); 1566 if (item_flags & MLX5_FLOW_LAYER_IPIP) { 1567 if (mask && spec) 1568 next_proto = mask->hdr.next_proto_id & 1569 spec->hdr.next_proto_id; 1570 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1571 return rte_flow_error_set(error, EINVAL, 1572 RTE_FLOW_ERROR_TYPE_ITEM, 1573 item, 1574 "multiple tunnel " 1575 "not supported"); 1576 } 1577 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) 1578 return rte_flow_error_set(error, EINVAL, 1579 RTE_FLOW_ERROR_TYPE_ITEM, item, 1580 "wrong tunnel type - IPv6 specified " 1581 "but IPv4 item provided"); 1582 if (item_flags & l3m) 1583 return rte_flow_error_set(error, ENOTSUP, 1584 RTE_FLOW_ERROR_TYPE_ITEM, item, 1585 "multiple L3 layers not supported"); 1586 else if (item_flags & l4m) 1587 return rte_flow_error_set(error, EINVAL, 1588 RTE_FLOW_ERROR_TYPE_ITEM, item, 1589 "L3 cannot follow an L4 layer."); 1590 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1591 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1592 return rte_flow_error_set(error, EINVAL, 1593 RTE_FLOW_ERROR_TYPE_ITEM, item, 1594 "L3 cannot follow an NVGRE layer."); 1595 if (!mask) 1596 mask = &rte_flow_item_ipv4_mask; 1597 else if (mask->hdr.next_proto_id != 0 && 1598 mask->hdr.next_proto_id != 0xff) 1599 return rte_flow_error_set(error, EINVAL, 1600 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 1601 "partial mask is not supported" 1602 " for protocol"); 1603 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1604 acc_mask ? (const uint8_t *)acc_mask 1605 : (const uint8_t *)&nic_mask, 1606 sizeof(struct rte_flow_item_ipv4), 1607 error); 1608 if (ret < 0) 1609 return ret; 1610 return 0; 1611 } 1612 1613 /** 1614 * Validate IPV6 item. 1615 * 1616 * @param[in] item 1617 * Item specification. 1618 * @param[in] item_flags 1619 * Bit-fields that holds the items detected until now. 1620 * @param[in] acc_mask 1621 * Acceptable mask, if NULL default internal default mask 1622 * will be used to check whether item fields are supported. 1623 * @param[out] error 1624 * Pointer to error structure. 1625 * 1626 * @return 1627 * 0 on success, a negative errno value otherwise and rte_errno is set. 1628 */ 1629 int 1630 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, 1631 uint64_t item_flags, 1632 uint64_t last_item, 1633 uint16_t ether_type, 1634 const struct rte_flow_item_ipv6 *acc_mask, 1635 struct rte_flow_error *error) 1636 { 1637 const struct rte_flow_item_ipv6 *mask = item->mask; 1638 const struct rte_flow_item_ipv6 *spec = item->spec; 1639 const struct rte_flow_item_ipv6 nic_mask = { 1640 .hdr = { 1641 .src_addr = 1642 "\xff\xff\xff\xff\xff\xff\xff\xff" 1643 "\xff\xff\xff\xff\xff\xff\xff\xff", 1644 .dst_addr = 1645 "\xff\xff\xff\xff\xff\xff\xff\xff" 1646 "\xff\xff\xff\xff\xff\xff\xff\xff", 1647 .vtc_flow = RTE_BE32(0xffffffff), 1648 .proto = 0xff, 1649 .hop_limits = 0xff, 1650 }, 1651 }; 1652 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1653 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1654 MLX5_FLOW_LAYER_OUTER_L3; 1655 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1656 MLX5_FLOW_LAYER_OUTER_L4; 1657 int ret; 1658 uint8_t next_proto = 0xFF; 1659 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 1660 MLX5_FLOW_LAYER_OUTER_VLAN | 1661 MLX5_FLOW_LAYER_INNER_VLAN); 1662 1663 if ((last_item & l2_vlan) && ether_type && 1664 ether_type != RTE_ETHER_TYPE_IPV6) 1665 return rte_flow_error_set(error, EINVAL, 1666 RTE_FLOW_ERROR_TYPE_ITEM, item, 1667 "IPv6 cannot follow L2/VLAN layer " 1668 "which ether type is not IPv6"); 1669 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { 1670 if (mask && spec) 1671 next_proto = mask->hdr.proto & spec->hdr.proto; 1672 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1673 return rte_flow_error_set(error, EINVAL, 1674 RTE_FLOW_ERROR_TYPE_ITEM, 1675 item, 1676 "multiple tunnel " 1677 "not supported"); 1678 } 1679 if (item_flags & MLX5_FLOW_LAYER_IPIP) 1680 return rte_flow_error_set(error, EINVAL, 1681 RTE_FLOW_ERROR_TYPE_ITEM, item, 1682 "wrong tunnel type - IPv4 specified " 1683 "but IPv6 item provided"); 1684 if (item_flags & l3m) 1685 return rte_flow_error_set(error, ENOTSUP, 1686 RTE_FLOW_ERROR_TYPE_ITEM, item, 1687 "multiple L3 layers not supported"); 1688 else if (item_flags & l4m) 1689 return rte_flow_error_set(error, EINVAL, 1690 RTE_FLOW_ERROR_TYPE_ITEM, item, 1691 "L3 cannot follow an L4 layer."); 1692 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1693 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1694 return rte_flow_error_set(error, EINVAL, 1695 RTE_FLOW_ERROR_TYPE_ITEM, item, 1696 "L3 cannot follow an NVGRE layer."); 1697 if (!mask) 1698 mask = &rte_flow_item_ipv6_mask; 1699 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1700 acc_mask ? (const uint8_t *)acc_mask 1701 : (const uint8_t *)&nic_mask, 1702 sizeof(struct rte_flow_item_ipv6), 1703 error); 1704 if (ret < 0) 1705 return ret; 1706 return 0; 1707 } 1708 1709 /** 1710 * Validate UDP item. 1711 * 1712 * @param[in] item 1713 * Item specification. 1714 * @param[in] item_flags 1715 * Bit-fields that holds the items detected until now. 1716 * @param[in] target_protocol 1717 * The next protocol in the previous item. 1718 * @param[in] flow_mask 1719 * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask. 1720 * @param[out] error 1721 * Pointer to error structure. 1722 * 1723 * @return 1724 * 0 on success, a negative errno value otherwise and rte_errno is set. 1725 */ 1726 int 1727 mlx5_flow_validate_item_udp(const struct rte_flow_item *item, 1728 uint64_t item_flags, 1729 uint8_t target_protocol, 1730 struct rte_flow_error *error) 1731 { 1732 const struct rte_flow_item_udp *mask = item->mask; 1733 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1734 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1735 MLX5_FLOW_LAYER_OUTER_L3; 1736 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1737 MLX5_FLOW_LAYER_OUTER_L4; 1738 int ret; 1739 1740 if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) 1741 return rte_flow_error_set(error, EINVAL, 1742 RTE_FLOW_ERROR_TYPE_ITEM, item, 1743 "protocol filtering not compatible" 1744 " with UDP layer"); 1745 if (!(item_flags & l3m)) 1746 return rte_flow_error_set(error, EINVAL, 1747 RTE_FLOW_ERROR_TYPE_ITEM, item, 1748 "L3 is mandatory to filter on L4"); 1749 if (item_flags & l4m) 1750 return rte_flow_error_set(error, EINVAL, 1751 RTE_FLOW_ERROR_TYPE_ITEM, item, 1752 "multiple L4 layers not supported"); 1753 if (!mask) 1754 mask = &rte_flow_item_udp_mask; 1755 ret = mlx5_flow_item_acceptable 1756 (item, (const uint8_t *)mask, 1757 (const uint8_t *)&rte_flow_item_udp_mask, 1758 sizeof(struct rte_flow_item_udp), error); 1759 if (ret < 0) 1760 return ret; 1761 return 0; 1762 } 1763 1764 /** 1765 * Validate TCP item. 1766 * 1767 * @param[in] item 1768 * Item specification. 1769 * @param[in] item_flags 1770 * Bit-fields that holds the items detected until now. 1771 * @param[in] target_protocol 1772 * The next protocol in the previous item. 1773 * @param[out] error 1774 * Pointer to error structure. 1775 * 1776 * @return 1777 * 0 on success, a negative errno value otherwise and rte_errno is set. 1778 */ 1779 int 1780 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, 1781 uint64_t item_flags, 1782 uint8_t target_protocol, 1783 const struct rte_flow_item_tcp *flow_mask, 1784 struct rte_flow_error *error) 1785 { 1786 const struct rte_flow_item_tcp *mask = item->mask; 1787 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1788 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1789 MLX5_FLOW_LAYER_OUTER_L3; 1790 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1791 MLX5_FLOW_LAYER_OUTER_L4; 1792 int ret; 1793 1794 assert(flow_mask); 1795 if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) 1796 return rte_flow_error_set(error, EINVAL, 1797 RTE_FLOW_ERROR_TYPE_ITEM, item, 1798 "protocol filtering not compatible" 1799 " with TCP layer"); 1800 if (!(item_flags & l3m)) 1801 return rte_flow_error_set(error, EINVAL, 1802 RTE_FLOW_ERROR_TYPE_ITEM, item, 1803 "L3 is mandatory to filter on L4"); 1804 if (item_flags & l4m) 1805 return rte_flow_error_set(error, EINVAL, 1806 RTE_FLOW_ERROR_TYPE_ITEM, item, 1807 "multiple L4 layers not supported"); 1808 if (!mask) 1809 mask = &rte_flow_item_tcp_mask; 1810 ret = mlx5_flow_item_acceptable 1811 (item, (const uint8_t *)mask, 1812 (const uint8_t *)flow_mask, 1813 sizeof(struct rte_flow_item_tcp), error); 1814 if (ret < 0) 1815 return ret; 1816 return 0; 1817 } 1818 1819 /** 1820 * Validate VXLAN item. 1821 * 1822 * @param[in] item 1823 * Item specification. 1824 * @param[in] item_flags 1825 * Bit-fields that holds the items detected until now. 1826 * @param[in] target_protocol 1827 * The next protocol in the previous item. 1828 * @param[out] error 1829 * Pointer to error structure. 1830 * 1831 * @return 1832 * 0 on success, a negative errno value otherwise and rte_errno is set. 1833 */ 1834 int 1835 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, 1836 uint64_t item_flags, 1837 struct rte_flow_error *error) 1838 { 1839 const struct rte_flow_item_vxlan *spec = item->spec; 1840 const struct rte_flow_item_vxlan *mask = item->mask; 1841 int ret; 1842 union vni { 1843 uint32_t vlan_id; 1844 uint8_t vni[4]; 1845 } id = { .vlan_id = 0, }; 1846 uint32_t vlan_id = 0; 1847 1848 1849 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1850 return rte_flow_error_set(error, ENOTSUP, 1851 RTE_FLOW_ERROR_TYPE_ITEM, item, 1852 "multiple tunnel layers not" 1853 " supported"); 1854 /* 1855 * Verify only UDPv4 is present as defined in 1856 * https://tools.ietf.org/html/rfc7348 1857 */ 1858 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1859 return rte_flow_error_set(error, EINVAL, 1860 RTE_FLOW_ERROR_TYPE_ITEM, item, 1861 "no outer UDP layer found"); 1862 if (!mask) 1863 mask = &rte_flow_item_vxlan_mask; 1864 ret = mlx5_flow_item_acceptable 1865 (item, (const uint8_t *)mask, 1866 (const uint8_t *)&rte_flow_item_vxlan_mask, 1867 sizeof(struct rte_flow_item_vxlan), 1868 error); 1869 if (ret < 0) 1870 return ret; 1871 if (spec) { 1872 memcpy(&id.vni[1], spec->vni, 3); 1873 vlan_id = id.vlan_id; 1874 memcpy(&id.vni[1], mask->vni, 3); 1875 vlan_id &= id.vlan_id; 1876 } 1877 /* 1878 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if 1879 * only this layer is defined in the Verbs specification it is 1880 * interpreted as wildcard and all packets will match this 1881 * rule, if it follows a full stack layer (ex: eth / ipv4 / 1882 * udp), all packets matching the layers before will also 1883 * match this rule. To avoid such situation, VNI 0 is 1884 * currently refused. 1885 */ 1886 if (!vlan_id) 1887 return rte_flow_error_set(error, ENOTSUP, 1888 RTE_FLOW_ERROR_TYPE_ITEM, item, 1889 "VXLAN vni cannot be 0"); 1890 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1891 return rte_flow_error_set(error, ENOTSUP, 1892 RTE_FLOW_ERROR_TYPE_ITEM, item, 1893 "VXLAN tunnel must be fully defined"); 1894 return 0; 1895 } 1896 1897 /** 1898 * Validate VXLAN_GPE item. 1899 * 1900 * @param[in] item 1901 * Item specification. 1902 * @param[in] item_flags 1903 * Bit-fields that holds the items detected until now. 1904 * @param[in] priv 1905 * Pointer to the private data structure. 1906 * @param[in] target_protocol 1907 * The next protocol in the previous item. 1908 * @param[out] error 1909 * Pointer to error structure. 1910 * 1911 * @return 1912 * 0 on success, a negative errno value otherwise and rte_errno is set. 1913 */ 1914 int 1915 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 1916 uint64_t item_flags, 1917 struct rte_eth_dev *dev, 1918 struct rte_flow_error *error) 1919 { 1920 struct mlx5_priv *priv = dev->data->dev_private; 1921 const struct rte_flow_item_vxlan_gpe *spec = item->spec; 1922 const struct rte_flow_item_vxlan_gpe *mask = item->mask; 1923 int ret; 1924 union vni { 1925 uint32_t vlan_id; 1926 uint8_t vni[4]; 1927 } id = { .vlan_id = 0, }; 1928 uint32_t vlan_id = 0; 1929 1930 if (!priv->config.l3_vxlan_en) 1931 return rte_flow_error_set(error, ENOTSUP, 1932 RTE_FLOW_ERROR_TYPE_ITEM, item, 1933 "L3 VXLAN is not enabled by device" 1934 " parameter and/or not configured in" 1935 " firmware"); 1936 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1937 return rte_flow_error_set(error, ENOTSUP, 1938 RTE_FLOW_ERROR_TYPE_ITEM, item, 1939 "multiple tunnel layers not" 1940 " supported"); 1941 /* 1942 * Verify only UDPv4 is present as defined in 1943 * https://tools.ietf.org/html/rfc7348 1944 */ 1945 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1946 return rte_flow_error_set(error, EINVAL, 1947 RTE_FLOW_ERROR_TYPE_ITEM, item, 1948 "no outer UDP layer found"); 1949 if (!mask) 1950 mask = &rte_flow_item_vxlan_gpe_mask; 1951 ret = mlx5_flow_item_acceptable 1952 (item, (const uint8_t *)mask, 1953 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, 1954 sizeof(struct rte_flow_item_vxlan_gpe), 1955 error); 1956 if (ret < 0) 1957 return ret; 1958 if (spec) { 1959 if (spec->protocol) 1960 return rte_flow_error_set(error, ENOTSUP, 1961 RTE_FLOW_ERROR_TYPE_ITEM, 1962 item, 1963 "VxLAN-GPE protocol" 1964 " not supported"); 1965 memcpy(&id.vni[1], spec->vni, 3); 1966 vlan_id = id.vlan_id; 1967 memcpy(&id.vni[1], mask->vni, 3); 1968 vlan_id &= id.vlan_id; 1969 } 1970 /* 1971 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this 1972 * layer is defined in the Verbs specification it is interpreted as 1973 * wildcard and all packets will match this rule, if it follows a full 1974 * stack layer (ex: eth / ipv4 / udp), all packets matching the layers 1975 * before will also match this rule. To avoid such situation, VNI 0 1976 * is currently refused. 1977 */ 1978 if (!vlan_id) 1979 return rte_flow_error_set(error, ENOTSUP, 1980 RTE_FLOW_ERROR_TYPE_ITEM, item, 1981 "VXLAN-GPE vni cannot be 0"); 1982 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1983 return rte_flow_error_set(error, ENOTSUP, 1984 RTE_FLOW_ERROR_TYPE_ITEM, item, 1985 "VXLAN-GPE tunnel must be fully" 1986 " defined"); 1987 return 0; 1988 } 1989 /** 1990 * Validate GRE Key item. 1991 * 1992 * @param[in] item 1993 * Item specification. 1994 * @param[in] item_flags 1995 * Bit flags to mark detected items. 1996 * @param[in] gre_item 1997 * Pointer to gre_item 1998 * @param[out] error 1999 * Pointer to error structure. 2000 * 2001 * @return 2002 * 0 on success, a negative errno value otherwise and rte_errno is set. 2003 */ 2004 int 2005 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, 2006 uint64_t item_flags, 2007 const struct rte_flow_item *gre_item, 2008 struct rte_flow_error *error) 2009 { 2010 const rte_be32_t *mask = item->mask; 2011 int ret = 0; 2012 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); 2013 const struct rte_flow_item_gre *gre_spec; 2014 const struct rte_flow_item_gre *gre_mask; 2015 2016 if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) 2017 return rte_flow_error_set(error, ENOTSUP, 2018 RTE_FLOW_ERROR_TYPE_ITEM, item, 2019 "Multiple GRE key not support"); 2020 if (!(item_flags & MLX5_FLOW_LAYER_GRE)) 2021 return rte_flow_error_set(error, ENOTSUP, 2022 RTE_FLOW_ERROR_TYPE_ITEM, item, 2023 "No preceding GRE header"); 2024 if (item_flags & MLX5_FLOW_LAYER_INNER) 2025 return rte_flow_error_set(error, ENOTSUP, 2026 RTE_FLOW_ERROR_TYPE_ITEM, item, 2027 "GRE key following a wrong item"); 2028 gre_mask = gre_item->mask; 2029 if (!gre_mask) 2030 gre_mask = &rte_flow_item_gre_mask; 2031 gre_spec = gre_item->spec; 2032 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && 2033 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) 2034 return rte_flow_error_set(error, EINVAL, 2035 RTE_FLOW_ERROR_TYPE_ITEM, item, 2036 "Key bit must be on"); 2037 2038 if (!mask) 2039 mask = &gre_key_default_mask; 2040 ret = mlx5_flow_item_acceptable 2041 (item, (const uint8_t *)mask, 2042 (const uint8_t *)&gre_key_default_mask, 2043 sizeof(rte_be32_t), error); 2044 return ret; 2045 } 2046 2047 /** 2048 * Validate GRE item. 2049 * 2050 * @param[in] item 2051 * Item specification. 2052 * @param[in] item_flags 2053 * Bit flags to mark detected items. 2054 * @param[in] target_protocol 2055 * The next protocol in the previous item. 2056 * @param[out] error 2057 * Pointer to error structure. 2058 * 2059 * @return 2060 * 0 on success, a negative errno value otherwise and rte_errno is set. 2061 */ 2062 int 2063 mlx5_flow_validate_item_gre(const struct rte_flow_item *item, 2064 uint64_t item_flags, 2065 uint8_t target_protocol, 2066 struct rte_flow_error *error) 2067 { 2068 const struct rte_flow_item_gre *spec __rte_unused = item->spec; 2069 const struct rte_flow_item_gre *mask = item->mask; 2070 int ret; 2071 const struct rte_flow_item_gre nic_mask = { 2072 .c_rsvd0_ver = RTE_BE16(0xB000), 2073 .protocol = RTE_BE16(UINT16_MAX), 2074 }; 2075 2076 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 2077 return rte_flow_error_set(error, EINVAL, 2078 RTE_FLOW_ERROR_TYPE_ITEM, item, 2079 "protocol filtering not compatible" 2080 " with this GRE layer"); 2081 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2082 return rte_flow_error_set(error, ENOTSUP, 2083 RTE_FLOW_ERROR_TYPE_ITEM, item, 2084 "multiple tunnel layers not" 2085 " supported"); 2086 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 2087 return rte_flow_error_set(error, ENOTSUP, 2088 RTE_FLOW_ERROR_TYPE_ITEM, item, 2089 "L3 Layer is missing"); 2090 if (!mask) 2091 mask = &rte_flow_item_gre_mask; 2092 ret = mlx5_flow_item_acceptable 2093 (item, (const uint8_t *)mask, 2094 (const uint8_t *)&nic_mask, 2095 sizeof(struct rte_flow_item_gre), error); 2096 if (ret < 0) 2097 return ret; 2098 #ifndef HAVE_MLX5DV_DR 2099 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT 2100 if (spec && (spec->protocol & mask->protocol)) 2101 return rte_flow_error_set(error, ENOTSUP, 2102 RTE_FLOW_ERROR_TYPE_ITEM, item, 2103 "without MPLS support the" 2104 " specification cannot be used for" 2105 " filtering"); 2106 #endif 2107 #endif 2108 return 0; 2109 } 2110 2111 /** 2112 * Validate Geneve item. 2113 * 2114 * @param[in] item 2115 * Item specification. 2116 * @param[in] itemFlags 2117 * Bit-fields that holds the items detected until now. 2118 * @param[in] enPriv 2119 * Pointer to the private data structure. 2120 * @param[out] error 2121 * Pointer to error structure. 2122 * 2123 * @return 2124 * 0 on success, a negative errno value otherwise and rte_errno is set. 2125 */ 2126 2127 int 2128 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, 2129 uint64_t item_flags, 2130 struct rte_eth_dev *dev, 2131 struct rte_flow_error *error) 2132 { 2133 struct mlx5_priv *priv = dev->data->dev_private; 2134 const struct rte_flow_item_geneve *spec = item->spec; 2135 const struct rte_flow_item_geneve *mask = item->mask; 2136 int ret; 2137 uint16_t gbhdr; 2138 uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ? 2139 MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; 2140 const struct rte_flow_item_geneve nic_mask = { 2141 .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), 2142 .vni = "\xff\xff\xff", 2143 .protocol = RTE_BE16(UINT16_MAX), 2144 }; 2145 2146 if (!(priv->config.hca_attr.flex_parser_protocols & 2147 MLX5_HCA_FLEX_GENEVE_ENABLED) || 2148 !priv->config.hca_attr.tunnel_stateless_geneve_rx) 2149 return rte_flow_error_set(error, ENOTSUP, 2150 RTE_FLOW_ERROR_TYPE_ITEM, item, 2151 "L3 Geneve is not enabled by device" 2152 " parameter and/or not configured in" 2153 " firmware"); 2154 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2155 return rte_flow_error_set(error, ENOTSUP, 2156 RTE_FLOW_ERROR_TYPE_ITEM, item, 2157 "multiple tunnel layers not" 2158 " supported"); 2159 /* 2160 * Verify only UDPv4 is present as defined in 2161 * https://tools.ietf.org/html/rfc7348 2162 */ 2163 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 2164 return rte_flow_error_set(error, EINVAL, 2165 RTE_FLOW_ERROR_TYPE_ITEM, item, 2166 "no outer UDP layer found"); 2167 if (!mask) 2168 mask = &rte_flow_item_geneve_mask; 2169 ret = mlx5_flow_item_acceptable 2170 (item, (const uint8_t *)mask, 2171 (const uint8_t *)&nic_mask, 2172 sizeof(struct rte_flow_item_geneve), error); 2173 if (ret) 2174 return ret; 2175 if (spec) { 2176 gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0); 2177 if (MLX5_GENEVE_VER_VAL(gbhdr) || 2178 MLX5_GENEVE_CRITO_VAL(gbhdr) || 2179 MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1) 2180 return rte_flow_error_set(error, ENOTSUP, 2181 RTE_FLOW_ERROR_TYPE_ITEM, 2182 item, 2183 "Geneve protocol unsupported" 2184 " fields are being used"); 2185 if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len) 2186 return rte_flow_error_set 2187 (error, ENOTSUP, 2188 RTE_FLOW_ERROR_TYPE_ITEM, 2189 item, 2190 "Unsupported Geneve options length"); 2191 } 2192 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 2193 return rte_flow_error_set 2194 (error, ENOTSUP, 2195 RTE_FLOW_ERROR_TYPE_ITEM, item, 2196 "Geneve tunnel must be fully defined"); 2197 return 0; 2198 } 2199 2200 /** 2201 * Validate MPLS item. 2202 * 2203 * @param[in] dev 2204 * Pointer to the rte_eth_dev structure. 2205 * @param[in] item 2206 * Item specification. 2207 * @param[in] item_flags 2208 * Bit-fields that holds the items detected until now. 2209 * @param[in] prev_layer 2210 * The protocol layer indicated in previous item. 2211 * @param[out] error 2212 * Pointer to error structure. 2213 * 2214 * @return 2215 * 0 on success, a negative errno value otherwise and rte_errno is set. 2216 */ 2217 int 2218 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, 2219 const struct rte_flow_item *item __rte_unused, 2220 uint64_t item_flags __rte_unused, 2221 uint64_t prev_layer __rte_unused, 2222 struct rte_flow_error *error) 2223 { 2224 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 2225 const struct rte_flow_item_mpls *mask = item->mask; 2226 struct mlx5_priv *priv = dev->data->dev_private; 2227 int ret; 2228 2229 if (!priv->config.mpls_en) 2230 return rte_flow_error_set(error, ENOTSUP, 2231 RTE_FLOW_ERROR_TYPE_ITEM, item, 2232 "MPLS not supported or" 2233 " disabled in firmware" 2234 " configuration."); 2235 /* MPLS over IP, UDP, GRE is allowed */ 2236 if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | 2237 MLX5_FLOW_LAYER_OUTER_L4_UDP | 2238 MLX5_FLOW_LAYER_GRE))) 2239 return rte_flow_error_set(error, EINVAL, 2240 RTE_FLOW_ERROR_TYPE_ITEM, item, 2241 "protocol filtering not compatible" 2242 " with MPLS layer"); 2243 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ 2244 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && 2245 !(item_flags & MLX5_FLOW_LAYER_GRE)) 2246 return rte_flow_error_set(error, ENOTSUP, 2247 RTE_FLOW_ERROR_TYPE_ITEM, item, 2248 "multiple tunnel layers not" 2249 " supported"); 2250 if (!mask) 2251 mask = &rte_flow_item_mpls_mask; 2252 ret = mlx5_flow_item_acceptable 2253 (item, (const uint8_t *)mask, 2254 (const uint8_t *)&rte_flow_item_mpls_mask, 2255 sizeof(struct rte_flow_item_mpls), error); 2256 if (ret < 0) 2257 return ret; 2258 return 0; 2259 #endif 2260 return rte_flow_error_set(error, ENOTSUP, 2261 RTE_FLOW_ERROR_TYPE_ITEM, item, 2262 "MPLS is not supported by Verbs, please" 2263 " update."); 2264 } 2265 2266 /** 2267 * Validate NVGRE item. 2268 * 2269 * @param[in] item 2270 * Item specification. 2271 * @param[in] item_flags 2272 * Bit flags to mark detected items. 2273 * @param[in] target_protocol 2274 * The next protocol in the previous item. 2275 * @param[out] error 2276 * Pointer to error structure. 2277 * 2278 * @return 2279 * 0 on success, a negative errno value otherwise and rte_errno is set. 2280 */ 2281 int 2282 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, 2283 uint64_t item_flags, 2284 uint8_t target_protocol, 2285 struct rte_flow_error *error) 2286 { 2287 const struct rte_flow_item_nvgre *mask = item->mask; 2288 int ret; 2289 2290 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 2291 return rte_flow_error_set(error, EINVAL, 2292 RTE_FLOW_ERROR_TYPE_ITEM, item, 2293 "protocol filtering not compatible" 2294 " with this GRE layer"); 2295 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2296 return rte_flow_error_set(error, ENOTSUP, 2297 RTE_FLOW_ERROR_TYPE_ITEM, item, 2298 "multiple tunnel layers not" 2299 " supported"); 2300 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 2301 return rte_flow_error_set(error, ENOTSUP, 2302 RTE_FLOW_ERROR_TYPE_ITEM, item, 2303 "L3 Layer is missing"); 2304 if (!mask) 2305 mask = &rte_flow_item_nvgre_mask; 2306 ret = mlx5_flow_item_acceptable 2307 (item, (const uint8_t *)mask, 2308 (const uint8_t *)&rte_flow_item_nvgre_mask, 2309 sizeof(struct rte_flow_item_nvgre), error); 2310 if (ret < 0) 2311 return ret; 2312 return 0; 2313 } 2314 2315 /* Allocate unique ID for the split Q/RSS subflows. */ 2316 static uint32_t 2317 flow_qrss_get_id(struct rte_eth_dev *dev) 2318 { 2319 struct mlx5_priv *priv = dev->data->dev_private; 2320 uint32_t qrss_id, ret; 2321 2322 ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id); 2323 if (ret) 2324 return 0; 2325 assert(qrss_id); 2326 return qrss_id; 2327 } 2328 2329 /* Free unique ID for the split Q/RSS subflows. */ 2330 static void 2331 flow_qrss_free_id(struct rte_eth_dev *dev, uint32_t qrss_id) 2332 { 2333 struct mlx5_priv *priv = dev->data->dev_private; 2334 2335 if (qrss_id) 2336 mlx5_flow_id_release(priv->qrss_id_pool, qrss_id); 2337 } 2338 2339 /** 2340 * Release resource related QUEUE/RSS action split. 2341 * 2342 * @param dev 2343 * Pointer to Ethernet device. 2344 * @param flow 2345 * Flow to release id's from. 2346 */ 2347 static void 2348 flow_mreg_split_qrss_release(struct rte_eth_dev *dev, 2349 struct rte_flow *flow) 2350 { 2351 struct mlx5_flow *dev_flow; 2352 2353 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 2354 if (dev_flow->qrss_id) 2355 flow_qrss_free_id(dev, dev_flow->qrss_id); 2356 } 2357 2358 static int 2359 flow_null_validate(struct rte_eth_dev *dev __rte_unused, 2360 const struct rte_flow_attr *attr __rte_unused, 2361 const struct rte_flow_item items[] __rte_unused, 2362 const struct rte_flow_action actions[] __rte_unused, 2363 bool external __rte_unused, 2364 struct rte_flow_error *error) 2365 { 2366 return rte_flow_error_set(error, ENOTSUP, 2367 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2368 } 2369 2370 static struct mlx5_flow * 2371 flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, 2372 const struct rte_flow_item items[] __rte_unused, 2373 const struct rte_flow_action actions[] __rte_unused, 2374 struct rte_flow_error *error) 2375 { 2376 rte_flow_error_set(error, ENOTSUP, 2377 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2378 return NULL; 2379 } 2380 2381 static int 2382 flow_null_translate(struct rte_eth_dev *dev __rte_unused, 2383 struct mlx5_flow *dev_flow __rte_unused, 2384 const struct rte_flow_attr *attr __rte_unused, 2385 const struct rte_flow_item items[] __rte_unused, 2386 const struct rte_flow_action actions[] __rte_unused, 2387 struct rte_flow_error *error) 2388 { 2389 return rte_flow_error_set(error, ENOTSUP, 2390 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2391 } 2392 2393 static int 2394 flow_null_apply(struct rte_eth_dev *dev __rte_unused, 2395 struct rte_flow *flow __rte_unused, 2396 struct rte_flow_error *error) 2397 { 2398 return rte_flow_error_set(error, ENOTSUP, 2399 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2400 } 2401 2402 static void 2403 flow_null_remove(struct rte_eth_dev *dev __rte_unused, 2404 struct rte_flow *flow __rte_unused) 2405 { 2406 } 2407 2408 static void 2409 flow_null_destroy(struct rte_eth_dev *dev __rte_unused, 2410 struct rte_flow *flow __rte_unused) 2411 { 2412 } 2413 2414 static int 2415 flow_null_query(struct rte_eth_dev *dev __rte_unused, 2416 struct rte_flow *flow __rte_unused, 2417 const struct rte_flow_action *actions __rte_unused, 2418 void *data __rte_unused, 2419 struct rte_flow_error *error) 2420 { 2421 return rte_flow_error_set(error, ENOTSUP, 2422 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2423 } 2424 2425 /* Void driver to protect from null pointer reference. */ 2426 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { 2427 .validate = flow_null_validate, 2428 .prepare = flow_null_prepare, 2429 .translate = flow_null_translate, 2430 .apply = flow_null_apply, 2431 .remove = flow_null_remove, 2432 .destroy = flow_null_destroy, 2433 .query = flow_null_query, 2434 }; 2435 2436 /** 2437 * Select flow driver type according to flow attributes and device 2438 * configuration. 2439 * 2440 * @param[in] dev 2441 * Pointer to the dev structure. 2442 * @param[in] attr 2443 * Pointer to the flow attributes. 2444 * 2445 * @return 2446 * flow driver type, MLX5_FLOW_TYPE_MAX otherwise. 2447 */ 2448 static enum mlx5_flow_drv_type 2449 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) 2450 { 2451 struct mlx5_priv *priv = dev->data->dev_private; 2452 enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; 2453 2454 if (attr->transfer && priv->config.dv_esw_en) 2455 type = MLX5_FLOW_TYPE_DV; 2456 if (!attr->transfer) 2457 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : 2458 MLX5_FLOW_TYPE_VERBS; 2459 return type; 2460 } 2461 2462 #define flow_get_drv_ops(type) flow_drv_ops[type] 2463 2464 /** 2465 * Flow driver validation API. This abstracts calling driver specific functions. 2466 * The type of flow driver is determined according to flow attributes. 2467 * 2468 * @param[in] dev 2469 * Pointer to the dev structure. 2470 * @param[in] attr 2471 * Pointer to the flow attributes. 2472 * @param[in] items 2473 * Pointer to the list of items. 2474 * @param[in] actions 2475 * Pointer to the list of actions. 2476 * @param[in] external 2477 * This flow rule is created by request external to PMD. 2478 * @param[out] error 2479 * Pointer to the error structure. 2480 * 2481 * @return 2482 * 0 on success, a negative errno value otherwise and rte_errno is set. 2483 */ 2484 static inline int 2485 flow_drv_validate(struct rte_eth_dev *dev, 2486 const struct rte_flow_attr *attr, 2487 const struct rte_flow_item items[], 2488 const struct rte_flow_action actions[], 2489 bool external, struct rte_flow_error *error) 2490 { 2491 const struct mlx5_flow_driver_ops *fops; 2492 enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); 2493 2494 fops = flow_get_drv_ops(type); 2495 return fops->validate(dev, attr, items, actions, external, error); 2496 } 2497 2498 /** 2499 * Flow driver preparation API. This abstracts calling driver specific 2500 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2501 * calculates the size of memory required for device flow, allocates the memory, 2502 * initializes the device flow and returns the pointer. 2503 * 2504 * @note 2505 * This function initializes device flow structure such as dv or verbs in 2506 * struct mlx5_flow. However, it is caller's responsibility to initialize the 2507 * rest. For example, adding returning device flow to flow->dev_flow list and 2508 * setting backward reference to the flow should be done out of this function. 2509 * layers field is not filled either. 2510 * 2511 * @param[in] attr 2512 * Pointer to the flow attributes. 2513 * @param[in] items 2514 * Pointer to the list of items. 2515 * @param[in] actions 2516 * Pointer to the list of actions. 2517 * @param[out] error 2518 * Pointer to the error structure. 2519 * 2520 * @return 2521 * Pointer to device flow on success, otherwise NULL and rte_errno is set. 2522 */ 2523 static inline struct mlx5_flow * 2524 flow_drv_prepare(const struct rte_flow *flow, 2525 const struct rte_flow_attr *attr, 2526 const struct rte_flow_item items[], 2527 const struct rte_flow_action actions[], 2528 struct rte_flow_error *error) 2529 { 2530 const struct mlx5_flow_driver_ops *fops; 2531 enum mlx5_flow_drv_type type = flow->drv_type; 2532 2533 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2534 fops = flow_get_drv_ops(type); 2535 return fops->prepare(attr, items, actions, error); 2536 } 2537 2538 /** 2539 * Flow driver translation API. This abstracts calling driver specific 2540 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2541 * translates a generic flow into a driver flow. flow_drv_prepare() must 2542 * precede. 2543 * 2544 * @note 2545 * dev_flow->layers could be filled as a result of parsing during translation 2546 * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled 2547 * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, 2548 * flow->actions could be overwritten even though all the expanded dev_flows 2549 * have the same actions. 2550 * 2551 * @param[in] dev 2552 * Pointer to the rte dev structure. 2553 * @param[in, out] dev_flow 2554 * Pointer to the mlx5 flow. 2555 * @param[in] attr 2556 * Pointer to the flow attributes. 2557 * @param[in] items 2558 * Pointer to the list of items. 2559 * @param[in] actions 2560 * Pointer to the list of actions. 2561 * @param[out] error 2562 * Pointer to the error structure. 2563 * 2564 * @return 2565 * 0 on success, a negative errno value otherwise and rte_errno is set. 2566 */ 2567 static inline int 2568 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, 2569 const struct rte_flow_attr *attr, 2570 const struct rte_flow_item items[], 2571 const struct rte_flow_action actions[], 2572 struct rte_flow_error *error) 2573 { 2574 const struct mlx5_flow_driver_ops *fops; 2575 enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; 2576 2577 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2578 fops = flow_get_drv_ops(type); 2579 return fops->translate(dev, dev_flow, attr, items, actions, error); 2580 } 2581 2582 /** 2583 * Flow driver apply API. This abstracts calling driver specific functions. 2584 * Parent flow (rte_flow) should have driver type (drv_type). It applies 2585 * translated driver flows on to device. flow_drv_translate() must precede. 2586 * 2587 * @param[in] dev 2588 * Pointer to Ethernet device structure. 2589 * @param[in, out] flow 2590 * Pointer to flow structure. 2591 * @param[out] error 2592 * Pointer to error structure. 2593 * 2594 * @return 2595 * 0 on success, a negative errno value otherwise and rte_errno is set. 2596 */ 2597 static inline int 2598 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 2599 struct rte_flow_error *error) 2600 { 2601 const struct mlx5_flow_driver_ops *fops; 2602 enum mlx5_flow_drv_type type = flow->drv_type; 2603 2604 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2605 fops = flow_get_drv_ops(type); 2606 return fops->apply(dev, flow, error); 2607 } 2608 2609 /** 2610 * Flow driver remove API. This abstracts calling driver specific functions. 2611 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2612 * on device. All the resources of the flow should be freed by calling 2613 * flow_drv_destroy(). 2614 * 2615 * @param[in] dev 2616 * Pointer to Ethernet device. 2617 * @param[in, out] flow 2618 * Pointer to flow structure. 2619 */ 2620 static inline void 2621 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 2622 { 2623 const struct mlx5_flow_driver_ops *fops; 2624 enum mlx5_flow_drv_type type = flow->drv_type; 2625 2626 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2627 fops = flow_get_drv_ops(type); 2628 fops->remove(dev, flow); 2629 } 2630 2631 /** 2632 * Flow driver destroy API. This abstracts calling driver specific functions. 2633 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2634 * on device and releases resources of the flow. 2635 * 2636 * @param[in] dev 2637 * Pointer to Ethernet device. 2638 * @param[in, out] flow 2639 * Pointer to flow structure. 2640 */ 2641 static inline void 2642 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 2643 { 2644 const struct mlx5_flow_driver_ops *fops; 2645 enum mlx5_flow_drv_type type = flow->drv_type; 2646 2647 flow_mreg_split_qrss_release(dev, flow); 2648 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2649 fops = flow_get_drv_ops(type); 2650 fops->destroy(dev, flow); 2651 } 2652 2653 /** 2654 * Validate a flow supported by the NIC. 2655 * 2656 * @see rte_flow_validate() 2657 * @see rte_flow_ops 2658 */ 2659 int 2660 mlx5_flow_validate(struct rte_eth_dev *dev, 2661 const struct rte_flow_attr *attr, 2662 const struct rte_flow_item items[], 2663 const struct rte_flow_action actions[], 2664 struct rte_flow_error *error) 2665 { 2666 int ret; 2667 2668 ret = flow_drv_validate(dev, attr, items, actions, true, error); 2669 if (ret < 0) 2670 return ret; 2671 return 0; 2672 } 2673 2674 /** 2675 * Get port id item from the item list. 2676 * 2677 * @param[in] item 2678 * Pointer to the list of items. 2679 * 2680 * @return 2681 * Pointer to the port id item if exist, else return NULL. 2682 */ 2683 static const struct rte_flow_item * 2684 find_port_id_item(const struct rte_flow_item *item) 2685 { 2686 assert(item); 2687 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 2688 if (item->type == RTE_FLOW_ITEM_TYPE_PORT_ID) 2689 return item; 2690 } 2691 return NULL; 2692 } 2693 2694 /** 2695 * Get RSS action from the action list. 2696 * 2697 * @param[in] actions 2698 * Pointer to the list of actions. 2699 * 2700 * @return 2701 * Pointer to the RSS action if exist, else return NULL. 2702 */ 2703 static const struct rte_flow_action_rss* 2704 flow_get_rss_action(const struct rte_flow_action actions[]) 2705 { 2706 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2707 switch (actions->type) { 2708 case RTE_FLOW_ACTION_TYPE_RSS: 2709 return (const struct rte_flow_action_rss *) 2710 actions->conf; 2711 default: 2712 break; 2713 } 2714 } 2715 return NULL; 2716 } 2717 2718 static unsigned int 2719 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) 2720 { 2721 const struct rte_flow_item *item; 2722 unsigned int has_vlan = 0; 2723 2724 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 2725 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2726 has_vlan = 1; 2727 break; 2728 } 2729 } 2730 if (has_vlan) 2731 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : 2732 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; 2733 return rss_level < 2 ? MLX5_EXPANSION_ROOT : 2734 MLX5_EXPANSION_ROOT_OUTER; 2735 } 2736 2737 /** 2738 * Get QUEUE/RSS action from the action list. 2739 * 2740 * @param[in] actions 2741 * Pointer to the list of actions. 2742 * @param[out] qrss 2743 * Pointer to the return pointer. 2744 * @param[out] qrss_type 2745 * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned 2746 * if no QUEUE/RSS is found. 2747 * 2748 * @return 2749 * Total number of actions. 2750 */ 2751 static int 2752 flow_parse_qrss_action(const struct rte_flow_action actions[], 2753 const struct rte_flow_action **qrss) 2754 { 2755 int actions_n = 0; 2756 2757 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2758 switch (actions->type) { 2759 case RTE_FLOW_ACTION_TYPE_QUEUE: 2760 case RTE_FLOW_ACTION_TYPE_RSS: 2761 *qrss = actions; 2762 break; 2763 default: 2764 break; 2765 } 2766 actions_n++; 2767 } 2768 /* Count RTE_FLOW_ACTION_TYPE_END. */ 2769 return actions_n + 1; 2770 } 2771 2772 /** 2773 * Check meter action from the action list. 2774 * 2775 * @param[in] actions 2776 * Pointer to the list of actions. 2777 * @param[out] mtr 2778 * Pointer to the meter exist flag. 2779 * 2780 * @return 2781 * Total number of actions. 2782 */ 2783 static int 2784 flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr) 2785 { 2786 int actions_n = 0; 2787 2788 assert(mtr); 2789 *mtr = 0; 2790 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2791 switch (actions->type) { 2792 case RTE_FLOW_ACTION_TYPE_METER: 2793 *mtr = 1; 2794 break; 2795 default: 2796 break; 2797 } 2798 actions_n++; 2799 } 2800 /* Count RTE_FLOW_ACTION_TYPE_END. */ 2801 return actions_n + 1; 2802 } 2803 2804 /** 2805 * Check if the flow should be splited due to hairpin. 2806 * The reason for the split is that in current HW we can't 2807 * support encap on Rx, so if a flow have encap we move it 2808 * to Tx. 2809 * 2810 * @param dev 2811 * Pointer to Ethernet device. 2812 * @param[in] attr 2813 * Flow rule attributes. 2814 * @param[in] actions 2815 * Associated actions (list terminated by the END action). 2816 * 2817 * @return 2818 * > 0 the number of actions and the flow should be split, 2819 * 0 when no split required. 2820 */ 2821 static int 2822 flow_check_hairpin_split(struct rte_eth_dev *dev, 2823 const struct rte_flow_attr *attr, 2824 const struct rte_flow_action actions[]) 2825 { 2826 int queue_action = 0; 2827 int action_n = 0; 2828 int encap = 0; 2829 const struct rte_flow_action_queue *queue; 2830 const struct rte_flow_action_rss *rss; 2831 const struct rte_flow_action_raw_encap *raw_encap; 2832 2833 if (!attr->ingress) 2834 return 0; 2835 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2836 switch (actions->type) { 2837 case RTE_FLOW_ACTION_TYPE_QUEUE: 2838 queue = actions->conf; 2839 if (queue == NULL) 2840 return 0; 2841 if (mlx5_rxq_get_type(dev, queue->index) != 2842 MLX5_RXQ_TYPE_HAIRPIN) 2843 return 0; 2844 queue_action = 1; 2845 action_n++; 2846 break; 2847 case RTE_FLOW_ACTION_TYPE_RSS: 2848 rss = actions->conf; 2849 if (rss == NULL || rss->queue_num == 0) 2850 return 0; 2851 if (mlx5_rxq_get_type(dev, rss->queue[0]) != 2852 MLX5_RXQ_TYPE_HAIRPIN) 2853 return 0; 2854 queue_action = 1; 2855 action_n++; 2856 break; 2857 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 2858 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 2859 encap = 1; 2860 action_n++; 2861 break; 2862 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 2863 raw_encap = actions->conf; 2864 if (raw_encap->size > 2865 (sizeof(struct rte_flow_item_eth) + 2866 sizeof(struct rte_flow_item_ipv4))) 2867 encap = 1; 2868 action_n++; 2869 break; 2870 default: 2871 action_n++; 2872 break; 2873 } 2874 } 2875 if (encap == 1 && queue_action) 2876 return action_n; 2877 return 0; 2878 } 2879 2880 /* Declare flow create/destroy prototype in advance. */ 2881 static struct rte_flow * 2882 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, 2883 const struct rte_flow_attr *attr, 2884 const struct rte_flow_item items[], 2885 const struct rte_flow_action actions[], 2886 bool external, struct rte_flow_error *error); 2887 2888 static void 2889 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, 2890 struct rte_flow *flow); 2891 2892 /** 2893 * Add a flow of copying flow metadata registers in RX_CP_TBL. 2894 * 2895 * As mark_id is unique, if there's already a registered flow for the mark_id, 2896 * return by increasing the reference counter of the resource. Otherwise, create 2897 * the resource (mcp_res) and flow. 2898 * 2899 * Flow looks like, 2900 * - If ingress port is ANY and reg_c[1] is mark_id, 2901 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 2902 * 2903 * For default flow (zero mark_id), flow is like, 2904 * - If ingress port is ANY, 2905 * reg_b := reg_c[0] and jump to RX_ACT_TBL. 2906 * 2907 * @param dev 2908 * Pointer to Ethernet device. 2909 * @param mark_id 2910 * ID of MARK action, zero means default flow for META. 2911 * @param[out] error 2912 * Perform verbose error reporting if not NULL. 2913 * 2914 * @return 2915 * Associated resource on success, NULL otherwise and rte_errno is set. 2916 */ 2917 static struct mlx5_flow_mreg_copy_resource * 2918 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, 2919 struct rte_flow_error *error) 2920 { 2921 struct mlx5_priv *priv = dev->data->dev_private; 2922 struct rte_flow_attr attr = { 2923 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 2924 .ingress = 1, 2925 }; 2926 struct mlx5_rte_flow_item_tag tag_spec = { 2927 .data = mark_id, 2928 }; 2929 struct rte_flow_item items[] = { 2930 [1] = { .type = RTE_FLOW_ITEM_TYPE_END, }, 2931 }; 2932 struct rte_flow_action_mark ftag = { 2933 .id = mark_id, 2934 }; 2935 struct mlx5_flow_action_copy_mreg cp_mreg = { 2936 .dst = REG_B, 2937 .src = 0, 2938 }; 2939 struct rte_flow_action_jump jump = { 2940 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 2941 }; 2942 struct rte_flow_action actions[] = { 2943 [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, 2944 }; 2945 struct mlx5_flow_mreg_copy_resource *mcp_res; 2946 int ret; 2947 2948 /* Fill the register fileds in the flow. */ 2949 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 2950 if (ret < 0) 2951 return NULL; 2952 tag_spec.id = ret; 2953 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 2954 if (ret < 0) 2955 return NULL; 2956 cp_mreg.src = ret; 2957 /* Check if already registered. */ 2958 assert(priv->mreg_cp_tbl); 2959 mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id); 2960 if (mcp_res) { 2961 /* For non-default rule. */ 2962 if (mark_id != MLX5_DEFAULT_COPY_ID) 2963 mcp_res->refcnt++; 2964 assert(mark_id != MLX5_DEFAULT_COPY_ID || mcp_res->refcnt == 1); 2965 return mcp_res; 2966 } 2967 /* Provide the full width of FLAG specific value. */ 2968 if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT)) 2969 tag_spec.data = MLX5_FLOW_MARK_DEFAULT; 2970 /* Build a new flow. */ 2971 if (mark_id != MLX5_DEFAULT_COPY_ID) { 2972 items[0] = (struct rte_flow_item){ 2973 .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, 2974 .spec = &tag_spec, 2975 }; 2976 items[1] = (struct rte_flow_item){ 2977 .type = RTE_FLOW_ITEM_TYPE_END, 2978 }; 2979 actions[0] = (struct rte_flow_action){ 2980 .type = MLX5_RTE_FLOW_ACTION_TYPE_MARK, 2981 .conf = &ftag, 2982 }; 2983 actions[1] = (struct rte_flow_action){ 2984 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 2985 .conf = &cp_mreg, 2986 }; 2987 actions[2] = (struct rte_flow_action){ 2988 .type = RTE_FLOW_ACTION_TYPE_JUMP, 2989 .conf = &jump, 2990 }; 2991 actions[3] = (struct rte_flow_action){ 2992 .type = RTE_FLOW_ACTION_TYPE_END, 2993 }; 2994 } else { 2995 /* Default rule, wildcard match. */ 2996 attr.priority = MLX5_FLOW_PRIO_RSVD; 2997 items[0] = (struct rte_flow_item){ 2998 .type = RTE_FLOW_ITEM_TYPE_END, 2999 }; 3000 actions[0] = (struct rte_flow_action){ 3001 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 3002 .conf = &cp_mreg, 3003 }; 3004 actions[1] = (struct rte_flow_action){ 3005 .type = RTE_FLOW_ACTION_TYPE_JUMP, 3006 .conf = &jump, 3007 }; 3008 actions[2] = (struct rte_flow_action){ 3009 .type = RTE_FLOW_ACTION_TYPE_END, 3010 }; 3011 } 3012 /* Build a new entry. */ 3013 mcp_res = rte_zmalloc(__func__, sizeof(*mcp_res), 0); 3014 if (!mcp_res) { 3015 rte_errno = ENOMEM; 3016 return NULL; 3017 } 3018 /* 3019 * The copy Flows are not included in any list. There 3020 * ones are referenced from other Flows and can not 3021 * be applied, removed, deleted in ardbitrary order 3022 * by list traversing. 3023 */ 3024 mcp_res->flow = flow_list_create(dev, NULL, &attr, items, 3025 actions, false, error); 3026 if (!mcp_res->flow) 3027 goto error; 3028 mcp_res->refcnt++; 3029 mcp_res->hlist_ent.key = mark_id; 3030 ret = mlx5_hlist_insert(priv->mreg_cp_tbl, 3031 &mcp_res->hlist_ent); 3032 assert(!ret); 3033 if (ret) 3034 goto error; 3035 return mcp_res; 3036 error: 3037 if (mcp_res->flow) 3038 flow_list_destroy(dev, NULL, mcp_res->flow); 3039 rte_free(mcp_res); 3040 return NULL; 3041 } 3042 3043 /** 3044 * Release flow in RX_CP_TBL. 3045 * 3046 * @param dev 3047 * Pointer to Ethernet device. 3048 * @flow 3049 * Parent flow for wich copying is provided. 3050 */ 3051 static void 3052 flow_mreg_del_copy_action(struct rte_eth_dev *dev, 3053 struct rte_flow *flow) 3054 { 3055 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3056 struct mlx5_priv *priv = dev->data->dev_private; 3057 3058 if (!mcp_res || !priv->mreg_cp_tbl) 3059 return; 3060 if (flow->copy_applied) { 3061 assert(mcp_res->appcnt); 3062 flow->copy_applied = 0; 3063 --mcp_res->appcnt; 3064 if (!mcp_res->appcnt) 3065 flow_drv_remove(dev, mcp_res->flow); 3066 } 3067 /* 3068 * We do not check availability of metadata registers here, 3069 * because copy resources are not allocated in this case. 3070 */ 3071 if (--mcp_res->refcnt) 3072 return; 3073 assert(mcp_res->flow); 3074 flow_list_destroy(dev, NULL, mcp_res->flow); 3075 mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); 3076 rte_free(mcp_res); 3077 flow->mreg_copy = NULL; 3078 } 3079 3080 /** 3081 * Start flow in RX_CP_TBL. 3082 * 3083 * @param dev 3084 * Pointer to Ethernet device. 3085 * @flow 3086 * Parent flow for wich copying is provided. 3087 * 3088 * @return 3089 * 0 on success, a negative errno value otherwise and rte_errno is set. 3090 */ 3091 static int 3092 flow_mreg_start_copy_action(struct rte_eth_dev *dev, 3093 struct rte_flow *flow) 3094 { 3095 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3096 int ret; 3097 3098 if (!mcp_res || flow->copy_applied) 3099 return 0; 3100 if (!mcp_res->appcnt) { 3101 ret = flow_drv_apply(dev, mcp_res->flow, NULL); 3102 if (ret) 3103 return ret; 3104 } 3105 ++mcp_res->appcnt; 3106 flow->copy_applied = 1; 3107 return 0; 3108 } 3109 3110 /** 3111 * Stop flow in RX_CP_TBL. 3112 * 3113 * @param dev 3114 * Pointer to Ethernet device. 3115 * @flow 3116 * Parent flow for wich copying is provided. 3117 */ 3118 static void 3119 flow_mreg_stop_copy_action(struct rte_eth_dev *dev, 3120 struct rte_flow *flow) 3121 { 3122 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3123 3124 if (!mcp_res || !flow->copy_applied) 3125 return; 3126 assert(mcp_res->appcnt); 3127 --mcp_res->appcnt; 3128 flow->copy_applied = 0; 3129 if (!mcp_res->appcnt) 3130 flow_drv_remove(dev, mcp_res->flow); 3131 } 3132 3133 /** 3134 * Remove the default copy action from RX_CP_TBL. 3135 * 3136 * @param dev 3137 * Pointer to Ethernet device. 3138 */ 3139 static void 3140 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) 3141 { 3142 struct mlx5_flow_mreg_copy_resource *mcp_res; 3143 struct mlx5_priv *priv = dev->data->dev_private; 3144 3145 /* Check if default flow is registered. */ 3146 if (!priv->mreg_cp_tbl) 3147 return; 3148 mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, 3149 MLX5_DEFAULT_COPY_ID); 3150 if (!mcp_res) 3151 return; 3152 assert(mcp_res->flow); 3153 flow_list_destroy(dev, NULL, mcp_res->flow); 3154 mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); 3155 rte_free(mcp_res); 3156 } 3157 3158 /** 3159 * Add the default copy action in in RX_CP_TBL. 3160 * 3161 * @param dev 3162 * Pointer to Ethernet device. 3163 * @param[out] error 3164 * Perform verbose error reporting if not NULL. 3165 * 3166 * @return 3167 * 0 for success, negative value otherwise and rte_errno is set. 3168 */ 3169 static int 3170 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, 3171 struct rte_flow_error *error) 3172 { 3173 struct mlx5_priv *priv = dev->data->dev_private; 3174 struct mlx5_flow_mreg_copy_resource *mcp_res; 3175 3176 /* Check whether extensive metadata feature is engaged. */ 3177 if (!priv->config.dv_flow_en || 3178 priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3179 !mlx5_flow_ext_mreg_supported(dev) || 3180 !priv->sh->dv_regc0_mask) 3181 return 0; 3182 mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error); 3183 if (!mcp_res) 3184 return -rte_errno; 3185 return 0; 3186 } 3187 3188 /** 3189 * Add a flow of copying flow metadata registers in RX_CP_TBL. 3190 * 3191 * All the flow having Q/RSS action should be split by 3192 * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL 3193 * performs the following, 3194 * - CQE->flow_tag := reg_c[1] (MARK) 3195 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 3196 * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1] 3197 * but there should be a flow per each MARK ID set by MARK action. 3198 * 3199 * For the aforementioned reason, if there's a MARK action in flow's action 3200 * list, a corresponding flow should be added to the RX_CP_TBL in order to copy 3201 * the MARK ID to CQE's flow_tag like, 3202 * - If reg_c[1] is mark_id, 3203 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 3204 * 3205 * For SET_META action which stores value in reg_c[0], as the destination is 3206 * also a flow metadata register (reg_b), adding a default flow is enough. Zero 3207 * MARK ID means the default flow. The default flow looks like, 3208 * - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL. 3209 * 3210 * @param dev 3211 * Pointer to Ethernet device. 3212 * @param flow 3213 * Pointer to flow structure. 3214 * @param[in] actions 3215 * Pointer to the list of actions. 3216 * @param[out] error 3217 * Perform verbose error reporting if not NULL. 3218 * 3219 * @return 3220 * 0 on success, negative value otherwise and rte_errno is set. 3221 */ 3222 static int 3223 flow_mreg_update_copy_table(struct rte_eth_dev *dev, 3224 struct rte_flow *flow, 3225 const struct rte_flow_action *actions, 3226 struct rte_flow_error *error) 3227 { 3228 struct mlx5_priv *priv = dev->data->dev_private; 3229 struct mlx5_dev_config *config = &priv->config; 3230 struct mlx5_flow_mreg_copy_resource *mcp_res; 3231 const struct rte_flow_action_mark *mark; 3232 3233 /* Check whether extensive metadata feature is engaged. */ 3234 if (!config->dv_flow_en || 3235 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3236 !mlx5_flow_ext_mreg_supported(dev) || 3237 !priv->sh->dv_regc0_mask) 3238 return 0; 3239 /* Find MARK action. */ 3240 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3241 switch (actions->type) { 3242 case RTE_FLOW_ACTION_TYPE_FLAG: 3243 mcp_res = flow_mreg_add_copy_action 3244 (dev, MLX5_FLOW_MARK_DEFAULT, error); 3245 if (!mcp_res) 3246 return -rte_errno; 3247 flow->mreg_copy = mcp_res; 3248 if (dev->data->dev_started) { 3249 mcp_res->appcnt++; 3250 flow->copy_applied = 1; 3251 } 3252 return 0; 3253 case RTE_FLOW_ACTION_TYPE_MARK: 3254 mark = (const struct rte_flow_action_mark *) 3255 actions->conf; 3256 mcp_res = 3257 flow_mreg_add_copy_action(dev, mark->id, error); 3258 if (!mcp_res) 3259 return -rte_errno; 3260 flow->mreg_copy = mcp_res; 3261 if (dev->data->dev_started) { 3262 mcp_res->appcnt++; 3263 flow->copy_applied = 1; 3264 } 3265 return 0; 3266 default: 3267 break; 3268 } 3269 } 3270 return 0; 3271 } 3272 3273 #define MLX5_MAX_SPLIT_ACTIONS 24 3274 #define MLX5_MAX_SPLIT_ITEMS 24 3275 3276 /** 3277 * Split the hairpin flow. 3278 * Since HW can't support encap on Rx we move the encap to Tx. 3279 * If the count action is after the encap then we also 3280 * move the count action. in this case the count will also measure 3281 * the outer bytes. 3282 * 3283 * @param dev 3284 * Pointer to Ethernet device. 3285 * @param[in] actions 3286 * Associated actions (list terminated by the END action). 3287 * @param[out] actions_rx 3288 * Rx flow actions. 3289 * @param[out] actions_tx 3290 * Tx flow actions.. 3291 * @param[out] pattern_tx 3292 * The pattern items for the Tx flow. 3293 * @param[out] flow_id 3294 * The flow ID connected to this flow. 3295 * 3296 * @return 3297 * 0 on success. 3298 */ 3299 static int 3300 flow_hairpin_split(struct rte_eth_dev *dev, 3301 const struct rte_flow_action actions[], 3302 struct rte_flow_action actions_rx[], 3303 struct rte_flow_action actions_tx[], 3304 struct rte_flow_item pattern_tx[], 3305 uint32_t *flow_id) 3306 { 3307 struct mlx5_priv *priv = dev->data->dev_private; 3308 const struct rte_flow_action_raw_encap *raw_encap; 3309 const struct rte_flow_action_raw_decap *raw_decap; 3310 struct mlx5_rte_flow_action_set_tag *set_tag; 3311 struct rte_flow_action *tag_action; 3312 struct mlx5_rte_flow_item_tag *tag_item; 3313 struct rte_flow_item *item; 3314 char *addr; 3315 int encap = 0; 3316 3317 mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id); 3318 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3319 switch (actions->type) { 3320 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 3321 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 3322 rte_memcpy(actions_tx, actions, 3323 sizeof(struct rte_flow_action)); 3324 actions_tx++; 3325 break; 3326 case RTE_FLOW_ACTION_TYPE_COUNT: 3327 if (encap) { 3328 rte_memcpy(actions_tx, actions, 3329 sizeof(struct rte_flow_action)); 3330 actions_tx++; 3331 } else { 3332 rte_memcpy(actions_rx, actions, 3333 sizeof(struct rte_flow_action)); 3334 actions_rx++; 3335 } 3336 break; 3337 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 3338 raw_encap = actions->conf; 3339 if (raw_encap->size > 3340 (sizeof(struct rte_flow_item_eth) + 3341 sizeof(struct rte_flow_item_ipv4))) { 3342 memcpy(actions_tx, actions, 3343 sizeof(struct rte_flow_action)); 3344 actions_tx++; 3345 encap = 1; 3346 } else { 3347 rte_memcpy(actions_rx, actions, 3348 sizeof(struct rte_flow_action)); 3349 actions_rx++; 3350 } 3351 break; 3352 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 3353 raw_decap = actions->conf; 3354 if (raw_decap->size < 3355 (sizeof(struct rte_flow_item_eth) + 3356 sizeof(struct rte_flow_item_ipv4))) { 3357 memcpy(actions_tx, actions, 3358 sizeof(struct rte_flow_action)); 3359 actions_tx++; 3360 } else { 3361 rte_memcpy(actions_rx, actions, 3362 sizeof(struct rte_flow_action)); 3363 actions_rx++; 3364 } 3365 break; 3366 default: 3367 rte_memcpy(actions_rx, actions, 3368 sizeof(struct rte_flow_action)); 3369 actions_rx++; 3370 break; 3371 } 3372 } 3373 /* Add set meta action and end action for the Rx flow. */ 3374 tag_action = actions_rx; 3375 tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3376 actions_rx++; 3377 rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action)); 3378 actions_rx++; 3379 set_tag = (void *)actions_rx; 3380 set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL); 3381 assert(set_tag->id > REG_NONE); 3382 set_tag->data = *flow_id; 3383 tag_action->conf = set_tag; 3384 /* Create Tx item list. */ 3385 rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); 3386 addr = (void *)&pattern_tx[2]; 3387 item = pattern_tx; 3388 item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; 3389 tag_item = (void *)addr; 3390 tag_item->data = *flow_id; 3391 tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); 3392 assert(set_tag->id > REG_NONE); 3393 item->spec = tag_item; 3394 addr += sizeof(struct mlx5_rte_flow_item_tag); 3395 tag_item = (void *)addr; 3396 tag_item->data = UINT32_MAX; 3397 tag_item->id = UINT16_MAX; 3398 item->mask = tag_item; 3399 addr += sizeof(struct mlx5_rte_flow_item_tag); 3400 item->last = NULL; 3401 item++; 3402 item->type = RTE_FLOW_ITEM_TYPE_END; 3403 return 0; 3404 } 3405 3406 /** 3407 * The last stage of splitting chain, just creates the subflow 3408 * without any modification. 3409 * 3410 * @param dev 3411 * Pointer to Ethernet device. 3412 * @param[in] flow 3413 * Parent flow structure pointer. 3414 * @param[in, out] sub_flow 3415 * Pointer to return the created subflow, may be NULL. 3416 * @param[in] attr 3417 * Flow rule attributes. 3418 * @param[in] items 3419 * Pattern specification (list terminated by the END pattern item). 3420 * @param[in] actions 3421 * Associated actions (list terminated by the END action). 3422 * @param[in] external 3423 * This flow rule is created by request external to PMD. 3424 * @param[out] error 3425 * Perform verbose error reporting if not NULL. 3426 * @return 3427 * 0 on success, negative value otherwise 3428 */ 3429 static int 3430 flow_create_split_inner(struct rte_eth_dev *dev, 3431 struct rte_flow *flow, 3432 struct mlx5_flow **sub_flow, 3433 const struct rte_flow_attr *attr, 3434 const struct rte_flow_item items[], 3435 const struct rte_flow_action actions[], 3436 bool external, struct rte_flow_error *error) 3437 { 3438 struct mlx5_flow *dev_flow; 3439 3440 dev_flow = flow_drv_prepare(flow, attr, items, actions, error); 3441 if (!dev_flow) 3442 return -rte_errno; 3443 dev_flow->flow = flow; 3444 dev_flow->external = external; 3445 /* Subflow object was created, we must include one in the list. */ 3446 LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); 3447 if (sub_flow) 3448 *sub_flow = dev_flow; 3449 return flow_drv_translate(dev, dev_flow, attr, items, actions, error); 3450 } 3451 3452 /** 3453 * Split the meter flow. 3454 * 3455 * As meter flow will split to three sub flow, other than meter 3456 * action, the other actions make sense to only meter accepts 3457 * the packet. If it need to be dropped, no other additional 3458 * actions should be take. 3459 * 3460 * One kind of special action which decapsulates the L3 tunnel 3461 * header will be in the prefix sub flow, as not to take the 3462 * L3 tunnel header into account. 3463 * 3464 * @param dev 3465 * Pointer to Ethernet device. 3466 * @param[in] actions 3467 * Associated actions (list terminated by the END action). 3468 * @param[out] actions_sfx 3469 * Suffix flow actions. 3470 * @param[out] actions_pre 3471 * Prefix flow actions. 3472 * @param[out] pattern_sfx 3473 * The pattern items for the suffix flow. 3474 * @param[out] tag_sfx 3475 * Pointer to suffix flow tag. 3476 * 3477 * @return 3478 * 0 on success. 3479 */ 3480 static int 3481 flow_meter_split_prep(struct rte_eth_dev *dev, 3482 const struct rte_flow_action actions[], 3483 struct rte_flow_action actions_sfx[], 3484 struct rte_flow_action actions_pre[]) 3485 { 3486 struct rte_flow_action *tag_action; 3487 struct mlx5_rte_flow_action_set_tag *set_tag; 3488 struct rte_flow_error error; 3489 const struct rte_flow_action_raw_encap *raw_encap; 3490 const struct rte_flow_action_raw_decap *raw_decap; 3491 uint32_t tag_id; 3492 3493 /* Add the extra tag action first. */ 3494 tag_action = actions_pre; 3495 tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3496 actions_pre++; 3497 /* Prepare the actions for prefix and suffix flow. */ 3498 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3499 switch (actions->type) { 3500 case RTE_FLOW_ACTION_TYPE_METER: 3501 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 3502 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 3503 memcpy(actions_pre, actions, 3504 sizeof(struct rte_flow_action)); 3505 actions_pre++; 3506 break; 3507 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 3508 raw_encap = actions->conf; 3509 if (raw_encap->size > 3510 (sizeof(struct rte_flow_item_eth) + 3511 sizeof(struct rte_flow_item_ipv4))) { 3512 memcpy(actions_sfx, actions, 3513 sizeof(struct rte_flow_action)); 3514 actions_sfx++; 3515 } else { 3516 rte_memcpy(actions_pre, actions, 3517 sizeof(struct rte_flow_action)); 3518 actions_pre++; 3519 } 3520 break; 3521 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 3522 raw_decap = actions->conf; 3523 /* Size 0 decap means 50 bytes as vxlan decap. */ 3524 if (raw_decap->size && (raw_decap->size < 3525 (sizeof(struct rte_flow_item_eth) + 3526 sizeof(struct rte_flow_item_ipv4)))) { 3527 memcpy(actions_sfx, actions, 3528 sizeof(struct rte_flow_action)); 3529 actions_sfx++; 3530 } else { 3531 rte_memcpy(actions_pre, actions, 3532 sizeof(struct rte_flow_action)); 3533 actions_pre++; 3534 } 3535 break; 3536 default: 3537 memcpy(actions_sfx, actions, 3538 sizeof(struct rte_flow_action)); 3539 actions_sfx++; 3540 break; 3541 } 3542 } 3543 /* Add end action to the actions. */ 3544 actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; 3545 actions_pre->type = RTE_FLOW_ACTION_TYPE_END; 3546 actions_pre++; 3547 /* Set the tag. */ 3548 set_tag = (void *)actions_pre; 3549 set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error); 3550 /* 3551 * Get the id from the qrss_pool to make qrss share the id with meter. 3552 */ 3553 tag_id = flow_qrss_get_id(dev); 3554 set_tag->data = rte_cpu_to_be_32(tag_id); 3555 tag_action->conf = set_tag; 3556 return tag_id; 3557 } 3558 3559 /** 3560 * Split action list having QUEUE/RSS for metadata register copy. 3561 * 3562 * Once Q/RSS action is detected in user's action list, the flow action 3563 * should be split in order to copy metadata registers, which will happen in 3564 * RX_CP_TBL like, 3565 * - CQE->flow_tag := reg_c[1] (MARK) 3566 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 3567 * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL. 3568 * This is because the last action of each flow must be a terminal action 3569 * (QUEUE, RSS or DROP). 3570 * 3571 * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is 3572 * stored and kept in the mlx5_flow structure per each sub_flow. 3573 * 3574 * The Q/RSS action is replaced with, 3575 * - SET_TAG, setting the allocated flow ID to reg_c[2]. 3576 * And the following JUMP action is added at the end, 3577 * - JUMP, to RX_CP_TBL. 3578 * 3579 * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by 3580 * flow_create_split_metadata() routine. The flow will look like, 3581 * - If flow ID matches (reg_c[2]), perform Q/RSS. 3582 * 3583 * @param dev 3584 * Pointer to Ethernet device. 3585 * @param[out] split_actions 3586 * Pointer to store split actions to jump to CP_TBL. 3587 * @param[in] actions 3588 * Pointer to the list of original flow actions. 3589 * @param[in] qrss 3590 * Pointer to the Q/RSS action. 3591 * @param[in] actions_n 3592 * Number of original actions. 3593 * @param[out] error 3594 * Perform verbose error reporting if not NULL. 3595 * 3596 * @return 3597 * non-zero unique flow_id on success, otherwise 0 and 3598 * error/rte_error are set. 3599 */ 3600 static uint32_t 3601 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, 3602 struct rte_flow_action *split_actions, 3603 const struct rte_flow_action *actions, 3604 const struct rte_flow_action *qrss, 3605 int actions_n, struct rte_flow_error *error) 3606 { 3607 struct mlx5_rte_flow_action_set_tag *set_tag; 3608 struct rte_flow_action_jump *jump; 3609 const int qrss_idx = qrss - actions; 3610 uint32_t flow_id = 0; 3611 int ret = 0; 3612 3613 /* 3614 * Given actions will be split 3615 * - Replace QUEUE/RSS action with SET_TAG to set flow ID. 3616 * - Add jump to mreg CP_TBL. 3617 * As a result, there will be one more action. 3618 */ 3619 ++actions_n; 3620 memcpy(split_actions, actions, sizeof(*split_actions) * actions_n); 3621 set_tag = (void *)(split_actions + actions_n); 3622 /* 3623 * If tag action is not set to void(it means we are not the meter 3624 * suffix flow), add the tag action. Since meter suffix flow already 3625 * has the tag added. 3626 */ 3627 if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) { 3628 /* 3629 * Allocate the new subflow ID. This one is unique within 3630 * device and not shared with representors. Otherwise, 3631 * we would have to resolve multi-thread access synch 3632 * issue. Each flow on the shared device is appended 3633 * with source vport identifier, so the resulting 3634 * flows will be unique in the shared (by master and 3635 * representors) domain even if they have coinciding 3636 * IDs. 3637 */ 3638 flow_id = flow_qrss_get_id(dev); 3639 if (!flow_id) 3640 return rte_flow_error_set(error, ENOMEM, 3641 RTE_FLOW_ERROR_TYPE_ACTION, 3642 NULL, "can't allocate id " 3643 "for split Q/RSS subflow"); 3644 /* Internal SET_TAG action to set flow ID. */ 3645 *set_tag = (struct mlx5_rte_flow_action_set_tag){ 3646 .data = flow_id, 3647 }; 3648 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); 3649 if (ret < 0) 3650 return ret; 3651 set_tag->id = ret; 3652 /* Construct new actions array. */ 3653 /* Replace QUEUE/RSS action. */ 3654 split_actions[qrss_idx] = (struct rte_flow_action){ 3655 .type = MLX5_RTE_FLOW_ACTION_TYPE_TAG, 3656 .conf = set_tag, 3657 }; 3658 } 3659 /* JUMP action to jump to mreg copy table (CP_TBL). */ 3660 jump = (void *)(set_tag + 1); 3661 *jump = (struct rte_flow_action_jump){ 3662 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 3663 }; 3664 split_actions[actions_n - 2] = (struct rte_flow_action){ 3665 .type = RTE_FLOW_ACTION_TYPE_JUMP, 3666 .conf = jump, 3667 }; 3668 split_actions[actions_n - 1] = (struct rte_flow_action){ 3669 .type = RTE_FLOW_ACTION_TYPE_END, 3670 }; 3671 return flow_id; 3672 } 3673 3674 /** 3675 * Extend the given action list for Tx metadata copy. 3676 * 3677 * Copy the given action list to the ext_actions and add flow metadata register 3678 * copy action in order to copy reg_a set by WQE to reg_c[0]. 3679 * 3680 * @param[out] ext_actions 3681 * Pointer to the extended action list. 3682 * @param[in] actions 3683 * Pointer to the list of actions. 3684 * @param[in] actions_n 3685 * Number of actions in the list. 3686 * @param[out] error 3687 * Perform verbose error reporting if not NULL. 3688 * 3689 * @return 3690 * 0 on success, negative value otherwise 3691 */ 3692 static int 3693 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, 3694 struct rte_flow_action *ext_actions, 3695 const struct rte_flow_action *actions, 3696 int actions_n, struct rte_flow_error *error) 3697 { 3698 struct mlx5_flow_action_copy_mreg *cp_mreg = 3699 (struct mlx5_flow_action_copy_mreg *) 3700 (ext_actions + actions_n + 1); 3701 int ret; 3702 3703 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 3704 if (ret < 0) 3705 return ret; 3706 cp_mreg->dst = ret; 3707 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error); 3708 if (ret < 0) 3709 return ret; 3710 cp_mreg->src = ret; 3711 memcpy(ext_actions, actions, 3712 sizeof(*ext_actions) * actions_n); 3713 ext_actions[actions_n - 1] = (struct rte_flow_action){ 3714 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 3715 .conf = cp_mreg, 3716 }; 3717 ext_actions[actions_n] = (struct rte_flow_action){ 3718 .type = RTE_FLOW_ACTION_TYPE_END, 3719 }; 3720 return 0; 3721 } 3722 3723 /** 3724 * The splitting for metadata feature. 3725 * 3726 * - Q/RSS action on NIC Rx should be split in order to pass by 3727 * the mreg copy table (RX_CP_TBL) and then it jumps to the 3728 * action table (RX_ACT_TBL) which has the split Q/RSS action. 3729 * 3730 * - All the actions on NIC Tx should have a mreg copy action to 3731 * copy reg_a from WQE to reg_c[0]. 3732 * 3733 * @param dev 3734 * Pointer to Ethernet device. 3735 * @param[in] flow 3736 * Parent flow structure pointer. 3737 * @param[in] attr 3738 * Flow rule attributes. 3739 * @param[in] items 3740 * Pattern specification (list terminated by the END pattern item). 3741 * @param[in] actions 3742 * Associated actions (list terminated by the END action). 3743 * @param[in] external 3744 * This flow rule is created by request external to PMD. 3745 * @param[out] error 3746 * Perform verbose error reporting if not NULL. 3747 * @return 3748 * 0 on success, negative value otherwise 3749 */ 3750 static int 3751 flow_create_split_metadata(struct rte_eth_dev *dev, 3752 struct rte_flow *flow, 3753 const struct rte_flow_attr *attr, 3754 const struct rte_flow_item items[], 3755 const struct rte_flow_action actions[], 3756 bool external, struct rte_flow_error *error) 3757 { 3758 struct mlx5_priv *priv = dev->data->dev_private; 3759 struct mlx5_dev_config *config = &priv->config; 3760 const struct rte_flow_action *qrss = NULL; 3761 struct rte_flow_action *ext_actions = NULL; 3762 struct mlx5_flow *dev_flow = NULL; 3763 uint32_t qrss_id = 0; 3764 int mtr_sfx = 0; 3765 size_t act_size; 3766 int actions_n; 3767 int ret; 3768 3769 /* Check whether extensive metadata feature is engaged. */ 3770 if (!config->dv_flow_en || 3771 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3772 !mlx5_flow_ext_mreg_supported(dev)) 3773 return flow_create_split_inner(dev, flow, NULL, attr, items, 3774 actions, external, error); 3775 actions_n = flow_parse_qrss_action(actions, &qrss); 3776 if (qrss) { 3777 /* Exclude hairpin flows from splitting. */ 3778 if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) { 3779 const struct rte_flow_action_queue *queue; 3780 3781 queue = qrss->conf; 3782 if (mlx5_rxq_get_type(dev, queue->index) == 3783 MLX5_RXQ_TYPE_HAIRPIN) 3784 qrss = NULL; 3785 } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) { 3786 const struct rte_flow_action_rss *rss; 3787 3788 rss = qrss->conf; 3789 if (mlx5_rxq_get_type(dev, rss->queue[0]) == 3790 MLX5_RXQ_TYPE_HAIRPIN) 3791 qrss = NULL; 3792 } 3793 } 3794 if (qrss) { 3795 /* Check if it is in meter suffix table. */ 3796 mtr_sfx = attr->group == (attr->transfer ? 3797 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : 3798 MLX5_FLOW_TABLE_LEVEL_SUFFIX); 3799 /* 3800 * Q/RSS action on NIC Rx should be split in order to pass by 3801 * the mreg copy table (RX_CP_TBL) and then it jumps to the 3802 * action table (RX_ACT_TBL) which has the split Q/RSS action. 3803 */ 3804 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 3805 sizeof(struct rte_flow_action_set_tag) + 3806 sizeof(struct rte_flow_action_jump); 3807 ext_actions = rte_zmalloc(__func__, act_size, 0); 3808 if (!ext_actions) 3809 return rte_flow_error_set(error, ENOMEM, 3810 RTE_FLOW_ERROR_TYPE_ACTION, 3811 NULL, "no memory to split " 3812 "metadata flow"); 3813 /* 3814 * If we are the suffix flow of meter, tag already exist. 3815 * Set the tag action to void. 3816 */ 3817 if (mtr_sfx) 3818 ext_actions[qrss - actions].type = 3819 RTE_FLOW_ACTION_TYPE_VOID; 3820 else 3821 ext_actions[qrss - actions].type = 3822 MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3823 /* 3824 * Create the new actions list with removed Q/RSS action 3825 * and appended set tag and jump to register copy table 3826 * (RX_CP_TBL). We should preallocate unique tag ID here 3827 * in advance, because it is needed for set tag action. 3828 */ 3829 qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions, 3830 qrss, actions_n, error); 3831 if (!mtr_sfx && !qrss_id) { 3832 ret = -rte_errno; 3833 goto exit; 3834 } 3835 } else if (attr->egress && !attr->transfer) { 3836 /* 3837 * All the actions on NIC Tx should have a metadata register 3838 * copy action to copy reg_a from WQE to reg_c[meta] 3839 */ 3840 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 3841 sizeof(struct mlx5_flow_action_copy_mreg); 3842 ext_actions = rte_zmalloc(__func__, act_size, 0); 3843 if (!ext_actions) 3844 return rte_flow_error_set(error, ENOMEM, 3845 RTE_FLOW_ERROR_TYPE_ACTION, 3846 NULL, "no memory to split " 3847 "metadata flow"); 3848 /* Create the action list appended with copy register. */ 3849 ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions, 3850 actions_n, error); 3851 if (ret < 0) 3852 goto exit; 3853 } 3854 /* Add the unmodified original or prefix subflow. */ 3855 ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items, 3856 ext_actions ? ext_actions : actions, 3857 external, error); 3858 if (ret < 0) 3859 goto exit; 3860 assert(dev_flow); 3861 if (qrss) { 3862 const struct rte_flow_attr q_attr = { 3863 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 3864 .ingress = 1, 3865 }; 3866 /* Internal PMD action to set register. */ 3867 struct mlx5_rte_flow_item_tag q_tag_spec = { 3868 .data = qrss_id, 3869 .id = 0, 3870 }; 3871 struct rte_flow_item q_items[] = { 3872 { 3873 .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, 3874 .spec = &q_tag_spec, 3875 .last = NULL, 3876 .mask = NULL, 3877 }, 3878 { 3879 .type = RTE_FLOW_ITEM_TYPE_END, 3880 }, 3881 }; 3882 struct rte_flow_action q_actions[] = { 3883 { 3884 .type = qrss->type, 3885 .conf = qrss->conf, 3886 }, 3887 { 3888 .type = RTE_FLOW_ACTION_TYPE_END, 3889 }, 3890 }; 3891 uint64_t hash_fields = dev_flow->hash_fields; 3892 3893 /* 3894 * Configure the tag item only if there is no meter subflow. 3895 * Since tag is already marked in the meter suffix subflow 3896 * we can just use the meter suffix items as is. 3897 */ 3898 if (qrss_id) { 3899 /* Not meter subflow. */ 3900 assert(!mtr_sfx); 3901 /* 3902 * Put unique id in prefix flow due to it is destroyed 3903 * after suffix flow and id will be freed after there 3904 * is no actual flows with this id and identifier 3905 * reallocation becomes possible (for example, for 3906 * other flows in other threads). 3907 */ 3908 dev_flow->qrss_id = qrss_id; 3909 qrss_id = 0; 3910 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, 3911 error); 3912 if (ret < 0) 3913 goto exit; 3914 q_tag_spec.id = ret; 3915 } 3916 dev_flow = NULL; 3917 /* Add suffix subflow to execute Q/RSS. */ 3918 ret = flow_create_split_inner(dev, flow, &dev_flow, 3919 &q_attr, mtr_sfx ? items : 3920 q_items, q_actions, 3921 external, error); 3922 if (ret < 0) 3923 goto exit; 3924 assert(dev_flow); 3925 dev_flow->hash_fields = hash_fields; 3926 } 3927 3928 exit: 3929 /* 3930 * We do not destroy the partially created sub_flows in case of error. 3931 * These ones are included into parent flow list and will be destroyed 3932 * by flow_drv_destroy. 3933 */ 3934 flow_qrss_free_id(dev, qrss_id); 3935 rte_free(ext_actions); 3936 return ret; 3937 } 3938 3939 /** 3940 * The splitting for meter feature. 3941 * 3942 * - The meter flow will be split to two flows as prefix and 3943 * suffix flow. The packets make sense only it pass the prefix 3944 * meter action. 3945 * 3946 * - Reg_C_5 is used for the packet to match betweend prefix and 3947 * suffix flow. 3948 * 3949 * @param dev 3950 * Pointer to Ethernet device. 3951 * @param[in] flow 3952 * Parent flow structure pointer. 3953 * @param[in] attr 3954 * Flow rule attributes. 3955 * @param[in] items 3956 * Pattern specification (list terminated by the END pattern item). 3957 * @param[in] actions 3958 * Associated actions (list terminated by the END action). 3959 * @param[in] external 3960 * This flow rule is created by request external to PMD. 3961 * @param[out] error 3962 * Perform verbose error reporting if not NULL. 3963 * @return 3964 * 0 on success, negative value otherwise 3965 */ 3966 static int 3967 flow_create_split_meter(struct rte_eth_dev *dev, 3968 struct rte_flow *flow, 3969 const struct rte_flow_attr *attr, 3970 const struct rte_flow_item items[], 3971 const struct rte_flow_action actions[], 3972 bool external, struct rte_flow_error *error) 3973 { 3974 struct mlx5_priv *priv = dev->data->dev_private; 3975 struct rte_flow_action *sfx_actions = NULL; 3976 struct rte_flow_action *pre_actions = NULL; 3977 struct rte_flow_item *sfx_items = NULL; 3978 const struct rte_flow_item *sfx_port_id_item; 3979 struct mlx5_flow *dev_flow = NULL; 3980 struct rte_flow_attr sfx_attr = *attr; 3981 uint32_t mtr = 0; 3982 uint32_t mtr_tag_id = 0; 3983 size_t act_size; 3984 size_t item_size; 3985 int actions_n = 0; 3986 int ret; 3987 3988 if (priv->mtr_en) 3989 actions_n = flow_check_meter_action(actions, &mtr); 3990 if (mtr) { 3991 struct mlx5_rte_flow_item_tag *tag_spec; 3992 /* The five prefix actions: meter, decap, encap, tag, end. */ 3993 act_size = sizeof(struct rte_flow_action) * (actions_n + 5) + 3994 sizeof(struct rte_flow_action_set_tag); 3995 /* tag, end. */ 3996 #define METER_SUFFIX_ITEM 3 3997 item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM + 3998 sizeof(struct mlx5_rte_flow_item_tag); 3999 sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0); 4000 if (!sfx_actions) 4001 return rte_flow_error_set(error, ENOMEM, 4002 RTE_FLOW_ERROR_TYPE_ACTION, 4003 NULL, "no memory to split " 4004 "meter flow"); 4005 pre_actions = sfx_actions + actions_n; 4006 mtr_tag_id = flow_meter_split_prep(dev, actions, sfx_actions, 4007 pre_actions); 4008 if (!mtr_tag_id) { 4009 ret = -rte_errno; 4010 goto exit; 4011 } 4012 /* Add the prefix subflow. */ 4013 ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items, 4014 pre_actions, external, error); 4015 if (ret) { 4016 ret = -rte_errno; 4017 goto exit; 4018 } 4019 dev_flow->mtr_flow_id = mtr_tag_id; 4020 /* Prepare the suffix flow match pattern. */ 4021 sfx_items = (struct rte_flow_item *)((char *)sfx_actions + 4022 act_size); 4023 tag_spec = (struct mlx5_rte_flow_item_tag *)(sfx_items + 4024 METER_SUFFIX_ITEM); 4025 tag_spec->data = rte_cpu_to_be_32(dev_flow->mtr_flow_id); 4026 tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, 4027 error); 4028 sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; 4029 sfx_items->spec = tag_spec; 4030 sfx_items->last = NULL; 4031 sfx_items->mask = NULL; 4032 sfx_items++; 4033 sfx_port_id_item = find_port_id_item(items); 4034 if (sfx_port_id_item) { 4035 memcpy(sfx_items, sfx_port_id_item, 4036 sizeof(*sfx_items)); 4037 sfx_items++; 4038 } 4039 sfx_items->type = RTE_FLOW_ITEM_TYPE_END; 4040 sfx_items -= METER_SUFFIX_ITEM; 4041 /* Setting the sfx group atrr. */ 4042 sfx_attr.group = sfx_attr.transfer ? 4043 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : 4044 MLX5_FLOW_TABLE_LEVEL_SUFFIX; 4045 } 4046 /* Add the prefix subflow. */ 4047 ret = flow_create_split_metadata(dev, flow, &sfx_attr, 4048 sfx_items ? sfx_items : items, 4049 sfx_actions ? sfx_actions : actions, 4050 external, error); 4051 exit: 4052 if (sfx_actions) 4053 rte_free(sfx_actions); 4054 return ret; 4055 } 4056 4057 /** 4058 * Split the flow to subflow set. The splitters might be linked 4059 * in the chain, like this: 4060 * flow_create_split_outer() calls: 4061 * flow_create_split_meter() calls: 4062 * flow_create_split_metadata(meter_subflow_0) calls: 4063 * flow_create_split_inner(metadata_subflow_0) 4064 * flow_create_split_inner(metadata_subflow_1) 4065 * flow_create_split_inner(metadata_subflow_2) 4066 * flow_create_split_metadata(meter_subflow_1) calls: 4067 * flow_create_split_inner(metadata_subflow_0) 4068 * flow_create_split_inner(metadata_subflow_1) 4069 * flow_create_split_inner(metadata_subflow_2) 4070 * 4071 * This provide flexible way to add new levels of flow splitting. 4072 * The all of successfully created subflows are included to the 4073 * parent flow dev_flow list. 4074 * 4075 * @param dev 4076 * Pointer to Ethernet device. 4077 * @param[in] flow 4078 * Parent flow structure pointer. 4079 * @param[in] attr 4080 * Flow rule attributes. 4081 * @param[in] items 4082 * Pattern specification (list terminated by the END pattern item). 4083 * @param[in] actions 4084 * Associated actions (list terminated by the END action). 4085 * @param[in] external 4086 * This flow rule is created by request external to PMD. 4087 * @param[out] error 4088 * Perform verbose error reporting if not NULL. 4089 * @return 4090 * 0 on success, negative value otherwise 4091 */ 4092 static int 4093 flow_create_split_outer(struct rte_eth_dev *dev, 4094 struct rte_flow *flow, 4095 const struct rte_flow_attr *attr, 4096 const struct rte_flow_item items[], 4097 const struct rte_flow_action actions[], 4098 bool external, struct rte_flow_error *error) 4099 { 4100 int ret; 4101 4102 ret = flow_create_split_meter(dev, flow, attr, items, 4103 actions, external, error); 4104 assert(ret <= 0); 4105 return ret; 4106 } 4107 4108 /** 4109 * Create a flow and add it to @p list. 4110 * 4111 * @param dev 4112 * Pointer to Ethernet device. 4113 * @param list 4114 * Pointer to a TAILQ flow list. If this parameter NULL, 4115 * no list insertion occurred, flow is just created, 4116 * this is caller's responsibility to track the 4117 * created flow. 4118 * @param[in] attr 4119 * Flow rule attributes. 4120 * @param[in] items 4121 * Pattern specification (list terminated by the END pattern item). 4122 * @param[in] actions 4123 * Associated actions (list terminated by the END action). 4124 * @param[in] external 4125 * This flow rule is created by request external to PMD. 4126 * @param[out] error 4127 * Perform verbose error reporting if not NULL. 4128 * 4129 * @return 4130 * A flow on success, NULL otherwise and rte_errno is set. 4131 */ 4132 static struct rte_flow * 4133 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, 4134 const struct rte_flow_attr *attr, 4135 const struct rte_flow_item items[], 4136 const struct rte_flow_action actions[], 4137 bool external, struct rte_flow_error *error) 4138 { 4139 struct mlx5_priv *priv = dev->data->dev_private; 4140 struct rte_flow *flow = NULL; 4141 struct mlx5_flow *dev_flow; 4142 const struct rte_flow_action_rss *rss; 4143 union { 4144 struct rte_flow_expand_rss buf; 4145 uint8_t buffer[2048]; 4146 } expand_buffer; 4147 union { 4148 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 4149 uint8_t buffer[2048]; 4150 } actions_rx; 4151 union { 4152 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 4153 uint8_t buffer[2048]; 4154 } actions_hairpin_tx; 4155 union { 4156 struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS]; 4157 uint8_t buffer[2048]; 4158 } items_tx; 4159 struct rte_flow_expand_rss *buf = &expand_buffer.buf; 4160 const struct rte_flow_action *p_actions_rx = actions; 4161 int ret; 4162 uint32_t i; 4163 uint32_t flow_size; 4164 int hairpin_flow = 0; 4165 uint32_t hairpin_id = 0; 4166 struct rte_flow_attr attr_tx = { .priority = 0 }; 4167 4168 hairpin_flow = flow_check_hairpin_split(dev, attr, actions); 4169 if (hairpin_flow > 0) { 4170 if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { 4171 rte_errno = EINVAL; 4172 return NULL; 4173 } 4174 flow_hairpin_split(dev, actions, actions_rx.actions, 4175 actions_hairpin_tx.actions, items_tx.items, 4176 &hairpin_id); 4177 p_actions_rx = actions_rx.actions; 4178 } 4179 ret = flow_drv_validate(dev, attr, items, p_actions_rx, external, 4180 error); 4181 if (ret < 0) 4182 goto error_before_flow; 4183 flow_size = sizeof(struct rte_flow); 4184 rss = flow_get_rss_action(p_actions_rx); 4185 if (rss) 4186 flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), 4187 sizeof(void *)); 4188 else 4189 flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); 4190 flow = rte_calloc(__func__, 1, flow_size, 0); 4191 if (!flow) { 4192 rte_errno = ENOMEM; 4193 goto error_before_flow; 4194 } 4195 flow->drv_type = flow_get_drv_type(dev, attr); 4196 if (hairpin_id != 0) 4197 flow->hairpin_flow_id = hairpin_id; 4198 assert(flow->drv_type > MLX5_FLOW_TYPE_MIN && 4199 flow->drv_type < MLX5_FLOW_TYPE_MAX); 4200 flow->rss.queue = (void *)(flow + 1); 4201 if (rss) { 4202 /* 4203 * The following information is required by 4204 * mlx5_flow_hashfields_adjust() in advance. 4205 */ 4206 flow->rss.level = rss->level; 4207 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ 4208 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; 4209 } 4210 LIST_INIT(&flow->dev_flows); 4211 if (rss && rss->types) { 4212 unsigned int graph_root; 4213 4214 graph_root = find_graph_root(items, rss->level); 4215 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), 4216 items, rss->types, 4217 mlx5_support_expansion, 4218 graph_root); 4219 assert(ret > 0 && 4220 (unsigned int)ret < sizeof(expand_buffer.buffer)); 4221 } else { 4222 buf->entries = 1; 4223 buf->entry[0].pattern = (void *)(uintptr_t)items; 4224 } 4225 for (i = 0; i < buf->entries; ++i) { 4226 /* 4227 * The splitter may create multiple dev_flows, 4228 * depending on configuration. In the simplest 4229 * case it just creates unmodified original flow. 4230 */ 4231 ret = flow_create_split_outer(dev, flow, attr, 4232 buf->entry[i].pattern, 4233 p_actions_rx, external, 4234 error); 4235 if (ret < 0) 4236 goto error; 4237 } 4238 /* Create the tx flow. */ 4239 if (hairpin_flow) { 4240 attr_tx.group = MLX5_HAIRPIN_TX_TABLE; 4241 attr_tx.ingress = 0; 4242 attr_tx.egress = 1; 4243 dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items, 4244 actions_hairpin_tx.actions, error); 4245 if (!dev_flow) 4246 goto error; 4247 dev_flow->flow = flow; 4248 dev_flow->external = 0; 4249 LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); 4250 ret = flow_drv_translate(dev, dev_flow, &attr_tx, 4251 items_tx.items, 4252 actions_hairpin_tx.actions, error); 4253 if (ret < 0) 4254 goto error; 4255 } 4256 /* 4257 * Update the metadata register copy table. If extensive 4258 * metadata feature is enabled and registers are supported 4259 * we might create the extra rte_flow for each unique 4260 * MARK/FLAG action ID. 4261 * 4262 * The table is updated for ingress Flows only, because 4263 * the egress Flows belong to the different device and 4264 * copy table should be updated in peer NIC Rx domain. 4265 */ 4266 if (attr->ingress && 4267 (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) { 4268 ret = flow_mreg_update_copy_table(dev, flow, actions, error); 4269 if (ret) 4270 goto error; 4271 } 4272 if (dev->data->dev_started) { 4273 ret = flow_drv_apply(dev, flow, error); 4274 if (ret < 0) 4275 goto error; 4276 } 4277 if (list) 4278 TAILQ_INSERT_TAIL(list, flow, next); 4279 flow_rxq_flags_set(dev, flow); 4280 return flow; 4281 error_before_flow: 4282 if (hairpin_id) 4283 mlx5_flow_id_release(priv->sh->flow_id_pool, 4284 hairpin_id); 4285 return NULL; 4286 error: 4287 assert(flow); 4288 flow_mreg_del_copy_action(dev, flow); 4289 ret = rte_errno; /* Save rte_errno before cleanup. */ 4290 if (flow->hairpin_flow_id) 4291 mlx5_flow_id_release(priv->sh->flow_id_pool, 4292 flow->hairpin_flow_id); 4293 assert(flow); 4294 flow_drv_destroy(dev, flow); 4295 rte_free(flow); 4296 rte_errno = ret; /* Restore rte_errno. */ 4297 return NULL; 4298 } 4299 4300 /** 4301 * Create a dedicated flow rule on e-switch table 0 (root table), to direct all 4302 * incoming packets to table 1. 4303 * 4304 * Other flow rules, requested for group n, will be created in 4305 * e-switch table n+1. 4306 * Jump action to e-switch group n will be created to group n+1. 4307 * 4308 * Used when working in switchdev mode, to utilise advantages of table 1 4309 * and above. 4310 * 4311 * @param dev 4312 * Pointer to Ethernet device. 4313 * 4314 * @return 4315 * Pointer to flow on success, NULL otherwise and rte_errno is set. 4316 */ 4317 struct rte_flow * 4318 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) 4319 { 4320 const struct rte_flow_attr attr = { 4321 .group = 0, 4322 .priority = 0, 4323 .ingress = 1, 4324 .egress = 0, 4325 .transfer = 1, 4326 }; 4327 const struct rte_flow_item pattern = { 4328 .type = RTE_FLOW_ITEM_TYPE_END, 4329 }; 4330 struct rte_flow_action_jump jump = { 4331 .group = 1, 4332 }; 4333 const struct rte_flow_action actions[] = { 4334 { 4335 .type = RTE_FLOW_ACTION_TYPE_JUMP, 4336 .conf = &jump, 4337 }, 4338 { 4339 .type = RTE_FLOW_ACTION_TYPE_END, 4340 }, 4341 }; 4342 struct mlx5_priv *priv = dev->data->dev_private; 4343 struct rte_flow_error error; 4344 4345 return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern, 4346 actions, false, &error); 4347 } 4348 4349 /** 4350 * Create a flow. 4351 * 4352 * @see rte_flow_create() 4353 * @see rte_flow_ops 4354 */ 4355 struct rte_flow * 4356 mlx5_flow_create(struct rte_eth_dev *dev, 4357 const struct rte_flow_attr *attr, 4358 const struct rte_flow_item items[], 4359 const struct rte_flow_action actions[], 4360 struct rte_flow_error *error) 4361 { 4362 struct mlx5_priv *priv = dev->data->dev_private; 4363 4364 return flow_list_create(dev, &priv->flows, 4365 attr, items, actions, true, error); 4366 } 4367 4368 /** 4369 * Destroy a flow in a list. 4370 * 4371 * @param dev 4372 * Pointer to Ethernet device. 4373 * @param list 4374 * Pointer to a TAILQ flow list. If this parameter NULL, 4375 * there is no flow removal from the list. 4376 * @param[in] flow 4377 * Flow to destroy. 4378 */ 4379 static void 4380 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, 4381 struct rte_flow *flow) 4382 { 4383 struct mlx5_priv *priv = dev->data->dev_private; 4384 4385 /* 4386 * Update RX queue flags only if port is started, otherwise it is 4387 * already clean. 4388 */ 4389 if (dev->data->dev_started) 4390 flow_rxq_flags_trim(dev, flow); 4391 if (flow->hairpin_flow_id) 4392 mlx5_flow_id_release(priv->sh->flow_id_pool, 4393 flow->hairpin_flow_id); 4394 flow_drv_destroy(dev, flow); 4395 if (list) 4396 TAILQ_REMOVE(list, flow, next); 4397 flow_mreg_del_copy_action(dev, flow); 4398 rte_free(flow->fdir); 4399 rte_free(flow); 4400 } 4401 4402 /** 4403 * Destroy all flows. 4404 * 4405 * @param dev 4406 * Pointer to Ethernet device. 4407 * @param list 4408 * Pointer to a TAILQ flow list. 4409 */ 4410 void 4411 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) 4412 { 4413 while (!TAILQ_EMPTY(list)) { 4414 struct rte_flow *flow; 4415 4416 flow = TAILQ_FIRST(list); 4417 flow_list_destroy(dev, list, flow); 4418 } 4419 } 4420 4421 /** 4422 * Remove all flows. 4423 * 4424 * @param dev 4425 * Pointer to Ethernet device. 4426 * @param list 4427 * Pointer to a TAILQ flow list. 4428 */ 4429 void 4430 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) 4431 { 4432 struct rte_flow *flow; 4433 4434 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { 4435 flow_drv_remove(dev, flow); 4436 flow_mreg_stop_copy_action(dev, flow); 4437 } 4438 flow_mreg_del_default_copy_action(dev); 4439 flow_rxq_flags_clear(dev); 4440 } 4441 4442 /** 4443 * Add all flows. 4444 * 4445 * @param dev 4446 * Pointer to Ethernet device. 4447 * @param list 4448 * Pointer to a TAILQ flow list. 4449 * 4450 * @return 4451 * 0 on success, a negative errno value otherwise and rte_errno is set. 4452 */ 4453 int 4454 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) 4455 { 4456 struct rte_flow *flow; 4457 struct rte_flow_error error; 4458 int ret = 0; 4459 4460 /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ 4461 ret = flow_mreg_add_default_copy_action(dev, &error); 4462 if (ret < 0) 4463 return -rte_errno; 4464 /* Apply Flows created by application. */ 4465 TAILQ_FOREACH(flow, list, next) { 4466 ret = flow_mreg_start_copy_action(dev, flow); 4467 if (ret < 0) 4468 goto error; 4469 ret = flow_drv_apply(dev, flow, &error); 4470 if (ret < 0) 4471 goto error; 4472 flow_rxq_flags_set(dev, flow); 4473 } 4474 return 0; 4475 error: 4476 ret = rte_errno; /* Save rte_errno before cleanup. */ 4477 mlx5_flow_stop(dev, list); 4478 rte_errno = ret; /* Restore rte_errno. */ 4479 return -rte_errno; 4480 } 4481 4482 /** 4483 * Verify the flow list is empty 4484 * 4485 * @param dev 4486 * Pointer to Ethernet device. 4487 * 4488 * @return the number of flows not released. 4489 */ 4490 int 4491 mlx5_flow_verify(struct rte_eth_dev *dev) 4492 { 4493 struct mlx5_priv *priv = dev->data->dev_private; 4494 struct rte_flow *flow; 4495 int ret = 0; 4496 4497 TAILQ_FOREACH(flow, &priv->flows, next) { 4498 DRV_LOG(DEBUG, "port %u flow %p still referenced", 4499 dev->data->port_id, (void *)flow); 4500 ++ret; 4501 } 4502 return ret; 4503 } 4504 4505 /** 4506 * Enable default hairpin egress flow. 4507 * 4508 * @param dev 4509 * Pointer to Ethernet device. 4510 * @param queue 4511 * The queue index. 4512 * 4513 * @return 4514 * 0 on success, a negative errno value otherwise and rte_errno is set. 4515 */ 4516 int 4517 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, 4518 uint32_t queue) 4519 { 4520 struct mlx5_priv *priv = dev->data->dev_private; 4521 const struct rte_flow_attr attr = { 4522 .egress = 1, 4523 .priority = 0, 4524 }; 4525 struct mlx5_rte_flow_item_tx_queue queue_spec = { 4526 .queue = queue, 4527 }; 4528 struct mlx5_rte_flow_item_tx_queue queue_mask = { 4529 .queue = UINT32_MAX, 4530 }; 4531 struct rte_flow_item items[] = { 4532 { 4533 .type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, 4534 .spec = &queue_spec, 4535 .last = NULL, 4536 .mask = &queue_mask, 4537 }, 4538 { 4539 .type = RTE_FLOW_ITEM_TYPE_END, 4540 }, 4541 }; 4542 struct rte_flow_action_jump jump = { 4543 .group = MLX5_HAIRPIN_TX_TABLE, 4544 }; 4545 struct rte_flow_action actions[2]; 4546 struct rte_flow *flow; 4547 struct rte_flow_error error; 4548 4549 actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; 4550 actions[0].conf = &jump; 4551 actions[1].type = RTE_FLOW_ACTION_TYPE_END; 4552 flow = flow_list_create(dev, &priv->ctrl_flows, 4553 &attr, items, actions, false, &error); 4554 if (!flow) { 4555 DRV_LOG(DEBUG, 4556 "Failed to create ctrl flow: rte_errno(%d)," 4557 " type(%d), message(%s)", 4558 rte_errno, error.type, 4559 error.message ? error.message : " (no stated reason)"); 4560 return -rte_errno; 4561 } 4562 return 0; 4563 } 4564 4565 /** 4566 * Enable a control flow configured from the control plane. 4567 * 4568 * @param dev 4569 * Pointer to Ethernet device. 4570 * @param eth_spec 4571 * An Ethernet flow spec to apply. 4572 * @param eth_mask 4573 * An Ethernet flow mask to apply. 4574 * @param vlan_spec 4575 * A VLAN flow spec to apply. 4576 * @param vlan_mask 4577 * A VLAN flow mask to apply. 4578 * 4579 * @return 4580 * 0 on success, a negative errno value otherwise and rte_errno is set. 4581 */ 4582 int 4583 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, 4584 struct rte_flow_item_eth *eth_spec, 4585 struct rte_flow_item_eth *eth_mask, 4586 struct rte_flow_item_vlan *vlan_spec, 4587 struct rte_flow_item_vlan *vlan_mask) 4588 { 4589 struct mlx5_priv *priv = dev->data->dev_private; 4590 const struct rte_flow_attr attr = { 4591 .ingress = 1, 4592 .priority = MLX5_FLOW_PRIO_RSVD, 4593 }; 4594 struct rte_flow_item items[] = { 4595 { 4596 .type = RTE_FLOW_ITEM_TYPE_ETH, 4597 .spec = eth_spec, 4598 .last = NULL, 4599 .mask = eth_mask, 4600 }, 4601 { 4602 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : 4603 RTE_FLOW_ITEM_TYPE_END, 4604 .spec = vlan_spec, 4605 .last = NULL, 4606 .mask = vlan_mask, 4607 }, 4608 { 4609 .type = RTE_FLOW_ITEM_TYPE_END, 4610 }, 4611 }; 4612 uint16_t queue[priv->reta_idx_n]; 4613 struct rte_flow_action_rss action_rss = { 4614 .func = RTE_ETH_HASH_FUNCTION_DEFAULT, 4615 .level = 0, 4616 .types = priv->rss_conf.rss_hf, 4617 .key_len = priv->rss_conf.rss_key_len, 4618 .queue_num = priv->reta_idx_n, 4619 .key = priv->rss_conf.rss_key, 4620 .queue = queue, 4621 }; 4622 struct rte_flow_action actions[] = { 4623 { 4624 .type = RTE_FLOW_ACTION_TYPE_RSS, 4625 .conf = &action_rss, 4626 }, 4627 { 4628 .type = RTE_FLOW_ACTION_TYPE_END, 4629 }, 4630 }; 4631 struct rte_flow *flow; 4632 struct rte_flow_error error; 4633 unsigned int i; 4634 4635 if (!priv->reta_idx_n || !priv->rxqs_n) { 4636 return 0; 4637 } 4638 for (i = 0; i != priv->reta_idx_n; ++i) 4639 queue[i] = (*priv->reta_idx)[i]; 4640 flow = flow_list_create(dev, &priv->ctrl_flows, 4641 &attr, items, actions, false, &error); 4642 if (!flow) 4643 return -rte_errno; 4644 return 0; 4645 } 4646 4647 /** 4648 * Enable a flow control configured from the control plane. 4649 * 4650 * @param dev 4651 * Pointer to Ethernet device. 4652 * @param eth_spec 4653 * An Ethernet flow spec to apply. 4654 * @param eth_mask 4655 * An Ethernet flow mask to apply. 4656 * 4657 * @return 4658 * 0 on success, a negative errno value otherwise and rte_errno is set. 4659 */ 4660 int 4661 mlx5_ctrl_flow(struct rte_eth_dev *dev, 4662 struct rte_flow_item_eth *eth_spec, 4663 struct rte_flow_item_eth *eth_mask) 4664 { 4665 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); 4666 } 4667 4668 /** 4669 * Destroy a flow. 4670 * 4671 * @see rte_flow_destroy() 4672 * @see rte_flow_ops 4673 */ 4674 int 4675 mlx5_flow_destroy(struct rte_eth_dev *dev, 4676 struct rte_flow *flow, 4677 struct rte_flow_error *error __rte_unused) 4678 { 4679 struct mlx5_priv *priv = dev->data->dev_private; 4680 4681 flow_list_destroy(dev, &priv->flows, flow); 4682 return 0; 4683 } 4684 4685 /** 4686 * Destroy all flows. 4687 * 4688 * @see rte_flow_flush() 4689 * @see rte_flow_ops 4690 */ 4691 int 4692 mlx5_flow_flush(struct rte_eth_dev *dev, 4693 struct rte_flow_error *error __rte_unused) 4694 { 4695 struct mlx5_priv *priv = dev->data->dev_private; 4696 4697 mlx5_flow_list_flush(dev, &priv->flows); 4698 return 0; 4699 } 4700 4701 /** 4702 * Isolated mode. 4703 * 4704 * @see rte_flow_isolate() 4705 * @see rte_flow_ops 4706 */ 4707 int 4708 mlx5_flow_isolate(struct rte_eth_dev *dev, 4709 int enable, 4710 struct rte_flow_error *error) 4711 { 4712 struct mlx5_priv *priv = dev->data->dev_private; 4713 4714 if (dev->data->dev_started) { 4715 rte_flow_error_set(error, EBUSY, 4716 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4717 NULL, 4718 "port must be stopped first"); 4719 return -rte_errno; 4720 } 4721 priv->isolated = !!enable; 4722 if (enable) 4723 dev->dev_ops = &mlx5_dev_ops_isolate; 4724 else 4725 dev->dev_ops = &mlx5_dev_ops; 4726 return 0; 4727 } 4728 4729 /** 4730 * Query a flow. 4731 * 4732 * @see rte_flow_query() 4733 * @see rte_flow_ops 4734 */ 4735 static int 4736 flow_drv_query(struct rte_eth_dev *dev, 4737 struct rte_flow *flow, 4738 const struct rte_flow_action *actions, 4739 void *data, 4740 struct rte_flow_error *error) 4741 { 4742 const struct mlx5_flow_driver_ops *fops; 4743 enum mlx5_flow_drv_type ftype = flow->drv_type; 4744 4745 assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); 4746 fops = flow_get_drv_ops(ftype); 4747 4748 return fops->query(dev, flow, actions, data, error); 4749 } 4750 4751 /** 4752 * Query a flow. 4753 * 4754 * @see rte_flow_query() 4755 * @see rte_flow_ops 4756 */ 4757 int 4758 mlx5_flow_query(struct rte_eth_dev *dev, 4759 struct rte_flow *flow, 4760 const struct rte_flow_action *actions, 4761 void *data, 4762 struct rte_flow_error *error) 4763 { 4764 int ret; 4765 4766 ret = flow_drv_query(dev, flow, actions, data, error); 4767 if (ret < 0) 4768 return ret; 4769 return 0; 4770 } 4771 4772 /** 4773 * Convert a flow director filter to a generic flow. 4774 * 4775 * @param dev 4776 * Pointer to Ethernet device. 4777 * @param fdir_filter 4778 * Flow director filter to add. 4779 * @param attributes 4780 * Generic flow parameters structure. 4781 * 4782 * @return 4783 * 0 on success, a negative errno value otherwise and rte_errno is set. 4784 */ 4785 static int 4786 flow_fdir_filter_convert(struct rte_eth_dev *dev, 4787 const struct rte_eth_fdir_filter *fdir_filter, 4788 struct mlx5_fdir *attributes) 4789 { 4790 struct mlx5_priv *priv = dev->data->dev_private; 4791 const struct rte_eth_fdir_input *input = &fdir_filter->input; 4792 const struct rte_eth_fdir_masks *mask = 4793 &dev->data->dev_conf.fdir_conf.mask; 4794 4795 /* Validate queue number. */ 4796 if (fdir_filter->action.rx_queue >= priv->rxqs_n) { 4797 DRV_LOG(ERR, "port %u invalid queue number %d", 4798 dev->data->port_id, fdir_filter->action.rx_queue); 4799 rte_errno = EINVAL; 4800 return -rte_errno; 4801 } 4802 attributes->attr.ingress = 1; 4803 attributes->items[0] = (struct rte_flow_item) { 4804 .type = RTE_FLOW_ITEM_TYPE_ETH, 4805 .spec = &attributes->l2, 4806 .mask = &attributes->l2_mask, 4807 }; 4808 switch (fdir_filter->action.behavior) { 4809 case RTE_ETH_FDIR_ACCEPT: 4810 attributes->actions[0] = (struct rte_flow_action){ 4811 .type = RTE_FLOW_ACTION_TYPE_QUEUE, 4812 .conf = &attributes->queue, 4813 }; 4814 break; 4815 case RTE_ETH_FDIR_REJECT: 4816 attributes->actions[0] = (struct rte_flow_action){ 4817 .type = RTE_FLOW_ACTION_TYPE_DROP, 4818 }; 4819 break; 4820 default: 4821 DRV_LOG(ERR, "port %u invalid behavior %d", 4822 dev->data->port_id, 4823 fdir_filter->action.behavior); 4824 rte_errno = ENOTSUP; 4825 return -rte_errno; 4826 } 4827 attributes->queue.index = fdir_filter->action.rx_queue; 4828 /* Handle L3. */ 4829 switch (fdir_filter->input.flow_type) { 4830 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 4831 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 4832 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 4833 attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){ 4834 .src_addr = input->flow.ip4_flow.src_ip, 4835 .dst_addr = input->flow.ip4_flow.dst_ip, 4836 .time_to_live = input->flow.ip4_flow.ttl, 4837 .type_of_service = input->flow.ip4_flow.tos, 4838 }; 4839 attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){ 4840 .src_addr = mask->ipv4_mask.src_ip, 4841 .dst_addr = mask->ipv4_mask.dst_ip, 4842 .time_to_live = mask->ipv4_mask.ttl, 4843 .type_of_service = mask->ipv4_mask.tos, 4844 .next_proto_id = mask->ipv4_mask.proto, 4845 }; 4846 attributes->items[1] = (struct rte_flow_item){ 4847 .type = RTE_FLOW_ITEM_TYPE_IPV4, 4848 .spec = &attributes->l3, 4849 .mask = &attributes->l3_mask, 4850 }; 4851 break; 4852 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 4853 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 4854 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 4855 attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){ 4856 .hop_limits = input->flow.ipv6_flow.hop_limits, 4857 .proto = input->flow.ipv6_flow.proto, 4858 }; 4859 4860 memcpy(attributes->l3.ipv6.hdr.src_addr, 4861 input->flow.ipv6_flow.src_ip, 4862 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 4863 memcpy(attributes->l3.ipv6.hdr.dst_addr, 4864 input->flow.ipv6_flow.dst_ip, 4865 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 4866 memcpy(attributes->l3_mask.ipv6.hdr.src_addr, 4867 mask->ipv6_mask.src_ip, 4868 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 4869 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr, 4870 mask->ipv6_mask.dst_ip, 4871 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 4872 attributes->items[1] = (struct rte_flow_item){ 4873 .type = RTE_FLOW_ITEM_TYPE_IPV6, 4874 .spec = &attributes->l3, 4875 .mask = &attributes->l3_mask, 4876 }; 4877 break; 4878 default: 4879 DRV_LOG(ERR, "port %u invalid flow type%d", 4880 dev->data->port_id, fdir_filter->input.flow_type); 4881 rte_errno = ENOTSUP; 4882 return -rte_errno; 4883 } 4884 /* Handle L4. */ 4885 switch (fdir_filter->input.flow_type) { 4886 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 4887 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 4888 .src_port = input->flow.udp4_flow.src_port, 4889 .dst_port = input->flow.udp4_flow.dst_port, 4890 }; 4891 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 4892 .src_port = mask->src_port_mask, 4893 .dst_port = mask->dst_port_mask, 4894 }; 4895 attributes->items[2] = (struct rte_flow_item){ 4896 .type = RTE_FLOW_ITEM_TYPE_UDP, 4897 .spec = &attributes->l4, 4898 .mask = &attributes->l4_mask, 4899 }; 4900 break; 4901 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 4902 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 4903 .src_port = input->flow.tcp4_flow.src_port, 4904 .dst_port = input->flow.tcp4_flow.dst_port, 4905 }; 4906 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 4907 .src_port = mask->src_port_mask, 4908 .dst_port = mask->dst_port_mask, 4909 }; 4910 attributes->items[2] = (struct rte_flow_item){ 4911 .type = RTE_FLOW_ITEM_TYPE_TCP, 4912 .spec = &attributes->l4, 4913 .mask = &attributes->l4_mask, 4914 }; 4915 break; 4916 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 4917 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 4918 .src_port = input->flow.udp6_flow.src_port, 4919 .dst_port = input->flow.udp6_flow.dst_port, 4920 }; 4921 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 4922 .src_port = mask->src_port_mask, 4923 .dst_port = mask->dst_port_mask, 4924 }; 4925 attributes->items[2] = (struct rte_flow_item){ 4926 .type = RTE_FLOW_ITEM_TYPE_UDP, 4927 .spec = &attributes->l4, 4928 .mask = &attributes->l4_mask, 4929 }; 4930 break; 4931 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 4932 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 4933 .src_port = input->flow.tcp6_flow.src_port, 4934 .dst_port = input->flow.tcp6_flow.dst_port, 4935 }; 4936 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 4937 .src_port = mask->src_port_mask, 4938 .dst_port = mask->dst_port_mask, 4939 }; 4940 attributes->items[2] = (struct rte_flow_item){ 4941 .type = RTE_FLOW_ITEM_TYPE_TCP, 4942 .spec = &attributes->l4, 4943 .mask = &attributes->l4_mask, 4944 }; 4945 break; 4946 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 4947 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 4948 break; 4949 default: 4950 DRV_LOG(ERR, "port %u invalid flow type%d", 4951 dev->data->port_id, fdir_filter->input.flow_type); 4952 rte_errno = ENOTSUP; 4953 return -rte_errno; 4954 } 4955 return 0; 4956 } 4957 4958 #define FLOW_FDIR_CMP(f1, f2, fld) \ 4959 memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld)) 4960 4961 /** 4962 * Compare two FDIR flows. If items and actions are identical, the two flows are 4963 * regarded as same. 4964 * 4965 * @param dev 4966 * Pointer to Ethernet device. 4967 * @param f1 4968 * FDIR flow to compare. 4969 * @param f2 4970 * FDIR flow to compare. 4971 * 4972 * @return 4973 * Zero on match, 1 otherwise. 4974 */ 4975 static int 4976 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) 4977 { 4978 if (FLOW_FDIR_CMP(f1, f2, attr) || 4979 FLOW_FDIR_CMP(f1, f2, l2) || 4980 FLOW_FDIR_CMP(f1, f2, l2_mask) || 4981 FLOW_FDIR_CMP(f1, f2, l3) || 4982 FLOW_FDIR_CMP(f1, f2, l3_mask) || 4983 FLOW_FDIR_CMP(f1, f2, l4) || 4984 FLOW_FDIR_CMP(f1, f2, l4_mask) || 4985 FLOW_FDIR_CMP(f1, f2, actions[0].type)) 4986 return 1; 4987 if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE && 4988 FLOW_FDIR_CMP(f1, f2, queue)) 4989 return 1; 4990 return 0; 4991 } 4992 4993 /** 4994 * Search device flow list to find out a matched FDIR flow. 4995 * 4996 * @param dev 4997 * Pointer to Ethernet device. 4998 * @param fdir_flow 4999 * FDIR flow to lookup. 5000 * 5001 * @return 5002 * Pointer of flow if found, NULL otherwise. 5003 */ 5004 static struct rte_flow * 5005 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) 5006 { 5007 struct mlx5_priv *priv = dev->data->dev_private; 5008 struct rte_flow *flow = NULL; 5009 5010 assert(fdir_flow); 5011 TAILQ_FOREACH(flow, &priv->flows, next) { 5012 if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) { 5013 DRV_LOG(DEBUG, "port %u found FDIR flow %p", 5014 dev->data->port_id, (void *)flow); 5015 break; 5016 } 5017 } 5018 return flow; 5019 } 5020 5021 /** 5022 * Add new flow director filter and store it in list. 5023 * 5024 * @param dev 5025 * Pointer to Ethernet device. 5026 * @param fdir_filter 5027 * Flow director filter to add. 5028 * 5029 * @return 5030 * 0 on success, a negative errno value otherwise and rte_errno is set. 5031 */ 5032 static int 5033 flow_fdir_filter_add(struct rte_eth_dev *dev, 5034 const struct rte_eth_fdir_filter *fdir_filter) 5035 { 5036 struct mlx5_priv *priv = dev->data->dev_private; 5037 struct mlx5_fdir *fdir_flow; 5038 struct rte_flow *flow; 5039 int ret; 5040 5041 fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0); 5042 if (!fdir_flow) { 5043 rte_errno = ENOMEM; 5044 return -rte_errno; 5045 } 5046 ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow); 5047 if (ret) 5048 goto error; 5049 flow = flow_fdir_filter_lookup(dev, fdir_flow); 5050 if (flow) { 5051 rte_errno = EEXIST; 5052 goto error; 5053 } 5054 flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr, 5055 fdir_flow->items, fdir_flow->actions, true, 5056 NULL); 5057 if (!flow) 5058 goto error; 5059 assert(!flow->fdir); 5060 flow->fdir = fdir_flow; 5061 DRV_LOG(DEBUG, "port %u created FDIR flow %p", 5062 dev->data->port_id, (void *)flow); 5063 return 0; 5064 error: 5065 rte_free(fdir_flow); 5066 return -rte_errno; 5067 } 5068 5069 /** 5070 * Delete specific filter. 5071 * 5072 * @param dev 5073 * Pointer to Ethernet device. 5074 * @param fdir_filter 5075 * Filter to be deleted. 5076 * 5077 * @return 5078 * 0 on success, a negative errno value otherwise and rte_errno is set. 5079 */ 5080 static int 5081 flow_fdir_filter_delete(struct rte_eth_dev *dev, 5082 const struct rte_eth_fdir_filter *fdir_filter) 5083 { 5084 struct mlx5_priv *priv = dev->data->dev_private; 5085 struct rte_flow *flow; 5086 struct mlx5_fdir fdir_flow = { 5087 .attr.group = 0, 5088 }; 5089 int ret; 5090 5091 ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow); 5092 if (ret) 5093 return -rte_errno; 5094 flow = flow_fdir_filter_lookup(dev, &fdir_flow); 5095 if (!flow) { 5096 rte_errno = ENOENT; 5097 return -rte_errno; 5098 } 5099 flow_list_destroy(dev, &priv->flows, flow); 5100 DRV_LOG(DEBUG, "port %u deleted FDIR flow %p", 5101 dev->data->port_id, (void *)flow); 5102 return 0; 5103 } 5104 5105 /** 5106 * Update queue for specific filter. 5107 * 5108 * @param dev 5109 * Pointer to Ethernet device. 5110 * @param fdir_filter 5111 * Filter to be updated. 5112 * 5113 * @return 5114 * 0 on success, a negative errno value otherwise and rte_errno is set. 5115 */ 5116 static int 5117 flow_fdir_filter_update(struct rte_eth_dev *dev, 5118 const struct rte_eth_fdir_filter *fdir_filter) 5119 { 5120 int ret; 5121 5122 ret = flow_fdir_filter_delete(dev, fdir_filter); 5123 if (ret) 5124 return ret; 5125 return flow_fdir_filter_add(dev, fdir_filter); 5126 } 5127 5128 /** 5129 * Flush all filters. 5130 * 5131 * @param dev 5132 * Pointer to Ethernet device. 5133 */ 5134 static void 5135 flow_fdir_filter_flush(struct rte_eth_dev *dev) 5136 { 5137 struct mlx5_priv *priv = dev->data->dev_private; 5138 5139 mlx5_flow_list_flush(dev, &priv->flows); 5140 } 5141 5142 /** 5143 * Get flow director information. 5144 * 5145 * @param dev 5146 * Pointer to Ethernet device. 5147 * @param[out] fdir_info 5148 * Resulting flow director information. 5149 */ 5150 static void 5151 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) 5152 { 5153 struct rte_eth_fdir_masks *mask = 5154 &dev->data->dev_conf.fdir_conf.mask; 5155 5156 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; 5157 fdir_info->guarant_spc = 0; 5158 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); 5159 fdir_info->max_flexpayload = 0; 5160 fdir_info->flow_types_mask[0] = 0; 5161 fdir_info->flex_payload_unit = 0; 5162 fdir_info->max_flex_payload_segment_num = 0; 5163 fdir_info->flex_payload_limit = 0; 5164 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf)); 5165 } 5166 5167 /** 5168 * Deal with flow director operations. 5169 * 5170 * @param dev 5171 * Pointer to Ethernet device. 5172 * @param filter_op 5173 * Operation to perform. 5174 * @param arg 5175 * Pointer to operation-specific structure. 5176 * 5177 * @return 5178 * 0 on success, a negative errno value otherwise and rte_errno is set. 5179 */ 5180 static int 5181 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, 5182 void *arg) 5183 { 5184 enum rte_fdir_mode fdir_mode = 5185 dev->data->dev_conf.fdir_conf.mode; 5186 5187 if (filter_op == RTE_ETH_FILTER_NOP) 5188 return 0; 5189 if (fdir_mode != RTE_FDIR_MODE_PERFECT && 5190 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 5191 DRV_LOG(ERR, "port %u flow director mode %d not supported", 5192 dev->data->port_id, fdir_mode); 5193 rte_errno = EINVAL; 5194 return -rte_errno; 5195 } 5196 switch (filter_op) { 5197 case RTE_ETH_FILTER_ADD: 5198 return flow_fdir_filter_add(dev, arg); 5199 case RTE_ETH_FILTER_UPDATE: 5200 return flow_fdir_filter_update(dev, arg); 5201 case RTE_ETH_FILTER_DELETE: 5202 return flow_fdir_filter_delete(dev, arg); 5203 case RTE_ETH_FILTER_FLUSH: 5204 flow_fdir_filter_flush(dev); 5205 break; 5206 case RTE_ETH_FILTER_INFO: 5207 flow_fdir_info_get(dev, arg); 5208 break; 5209 default: 5210 DRV_LOG(DEBUG, "port %u unknown operation %u", 5211 dev->data->port_id, filter_op); 5212 rte_errno = EINVAL; 5213 return -rte_errno; 5214 } 5215 return 0; 5216 } 5217 5218 /** 5219 * Manage filter operations. 5220 * 5221 * @param dev 5222 * Pointer to Ethernet device structure. 5223 * @param filter_type 5224 * Filter type. 5225 * @param filter_op 5226 * Operation to perform. 5227 * @param arg 5228 * Pointer to operation-specific structure. 5229 * 5230 * @return 5231 * 0 on success, a negative errno value otherwise and rte_errno is set. 5232 */ 5233 int 5234 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, 5235 enum rte_filter_type filter_type, 5236 enum rte_filter_op filter_op, 5237 void *arg) 5238 { 5239 switch (filter_type) { 5240 case RTE_ETH_FILTER_GENERIC: 5241 if (filter_op != RTE_ETH_FILTER_GET) { 5242 rte_errno = EINVAL; 5243 return -rte_errno; 5244 } 5245 *(const void **)arg = &mlx5_flow_ops; 5246 return 0; 5247 case RTE_ETH_FILTER_FDIR: 5248 return flow_fdir_ctrl_func(dev, filter_op, arg); 5249 default: 5250 DRV_LOG(ERR, "port %u filter type (%d) not supported", 5251 dev->data->port_id, filter_type); 5252 rte_errno = ENOTSUP; 5253 return -rte_errno; 5254 } 5255 return 0; 5256 } 5257 5258 /** 5259 * Create the needed meter and suffix tables. 5260 * 5261 * @param[in] dev 5262 * Pointer to Ethernet device. 5263 * @param[in] fm 5264 * Pointer to the flow meter. 5265 * 5266 * @return 5267 * Pointer to table set on success, NULL otherwise. 5268 */ 5269 struct mlx5_meter_domains_infos * 5270 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev, 5271 const struct mlx5_flow_meter *fm) 5272 { 5273 const struct mlx5_flow_driver_ops *fops; 5274 5275 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5276 return fops->create_mtr_tbls(dev, fm); 5277 } 5278 5279 /** 5280 * Destroy the meter table set. 5281 * 5282 * @param[in] dev 5283 * Pointer to Ethernet device. 5284 * @param[in] tbl 5285 * Pointer to the meter table set. 5286 * 5287 * @return 5288 * 0 on success. 5289 */ 5290 int 5291 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, 5292 struct mlx5_meter_domains_infos *tbls) 5293 { 5294 const struct mlx5_flow_driver_ops *fops; 5295 5296 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5297 return fops->destroy_mtr_tbls(dev, tbls); 5298 } 5299 5300 /** 5301 * Create policer rules. 5302 * 5303 * @param[in] dev 5304 * Pointer to Ethernet device. 5305 * @param[in] fm 5306 * Pointer to flow meter structure. 5307 * @param[in] attr 5308 * Pointer to flow attributes. 5309 * 5310 * @return 5311 * 0 on success, -1 otherwise. 5312 */ 5313 int 5314 mlx5_flow_create_policer_rules(struct rte_eth_dev *dev, 5315 struct mlx5_flow_meter *fm, 5316 const struct rte_flow_attr *attr) 5317 { 5318 const struct mlx5_flow_driver_ops *fops; 5319 5320 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5321 return fops->create_policer_rules(dev, fm, attr); 5322 } 5323 5324 /** 5325 * Destroy policer rules. 5326 * 5327 * @param[in] fm 5328 * Pointer to flow meter structure. 5329 * @param[in] attr 5330 * Pointer to flow attributes. 5331 * 5332 * @return 5333 * 0 on success, -1 otherwise. 5334 */ 5335 int 5336 mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev, 5337 struct mlx5_flow_meter *fm, 5338 const struct rte_flow_attr *attr) 5339 { 5340 const struct mlx5_flow_driver_ops *fops; 5341 5342 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5343 return fops->destroy_policer_rules(dev, fm, attr); 5344 } 5345 5346 /** 5347 * Allocate a counter. 5348 * 5349 * @param[in] dev 5350 * Pointer to Ethernet device structure. 5351 * 5352 * @return 5353 * Pointer to allocated counter on success, NULL otherwise. 5354 */ 5355 struct mlx5_flow_counter * 5356 mlx5_counter_alloc(struct rte_eth_dev *dev) 5357 { 5358 const struct mlx5_flow_driver_ops *fops; 5359 struct rte_flow_attr attr = { .transfer = 0 }; 5360 5361 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5362 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5363 return fops->counter_alloc(dev); 5364 } 5365 DRV_LOG(ERR, 5366 "port %u counter allocate is not supported.", 5367 dev->data->port_id); 5368 return NULL; 5369 } 5370 5371 /** 5372 * Free a counter. 5373 * 5374 * @param[in] dev 5375 * Pointer to Ethernet device structure. 5376 * @param[in] cnt 5377 * Pointer to counter to be free. 5378 */ 5379 void 5380 mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt) 5381 { 5382 const struct mlx5_flow_driver_ops *fops; 5383 struct rte_flow_attr attr = { .transfer = 0 }; 5384 5385 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5386 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5387 fops->counter_free(dev, cnt); 5388 return; 5389 } 5390 DRV_LOG(ERR, 5391 "port %u counter free is not supported.", 5392 dev->data->port_id); 5393 } 5394 5395 /** 5396 * Query counter statistics. 5397 * 5398 * @param[in] dev 5399 * Pointer to Ethernet device structure. 5400 * @param[in] cnt 5401 * Pointer to counter to query. 5402 * @param[in] clear 5403 * Set to clear counter statistics. 5404 * @param[out] pkts 5405 * The counter hits packets number to save. 5406 * @param[out] bytes 5407 * The counter hits bytes number to save. 5408 * 5409 * @return 5410 * 0 on success, a negative errno value otherwise. 5411 */ 5412 int 5413 mlx5_counter_query(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt, 5414 bool clear, uint64_t *pkts, uint64_t *bytes) 5415 { 5416 const struct mlx5_flow_driver_ops *fops; 5417 struct rte_flow_attr attr = { .transfer = 0 }; 5418 5419 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5420 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5421 return fops->counter_query(dev, cnt, clear, pkts, bytes); 5422 } 5423 DRV_LOG(ERR, 5424 "port %u counter query is not supported.", 5425 dev->data->port_id); 5426 return -ENOTSUP; 5427 } 5428 5429 #define MLX5_POOL_QUERY_FREQ_US 1000000 5430 5431 /** 5432 * Set the periodic procedure for triggering asynchronous batch queries for all 5433 * the counter pools. 5434 * 5435 * @param[in] sh 5436 * Pointer to mlx5_ibv_shared object. 5437 */ 5438 void 5439 mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) 5440 { 5441 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0); 5442 uint32_t pools_n = rte_atomic16_read(&cont->n_valid); 5443 uint32_t us; 5444 5445 cont = MLX5_CNT_CONTAINER(sh, 1, 0); 5446 pools_n += rte_atomic16_read(&cont->n_valid); 5447 us = MLX5_POOL_QUERY_FREQ_US / pools_n; 5448 DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us); 5449 if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { 5450 sh->cmng.query_thread_on = 0; 5451 DRV_LOG(ERR, "Cannot reinitialize query alarm"); 5452 } else { 5453 sh->cmng.query_thread_on = 1; 5454 } 5455 } 5456 5457 /** 5458 * The periodic procedure for triggering asynchronous batch queries for all the 5459 * counter pools. This function is probably called by the host thread. 5460 * 5461 * @param[in] arg 5462 * The parameter for the alarm process. 5463 */ 5464 void 5465 mlx5_flow_query_alarm(void *arg) 5466 { 5467 struct mlx5_ibv_shared *sh = arg; 5468 struct mlx5_devx_obj *dcs; 5469 uint16_t offset; 5470 int ret; 5471 uint8_t batch = sh->cmng.batch; 5472 uint16_t pool_index = sh->cmng.pool_index; 5473 struct mlx5_pools_container *cont; 5474 struct mlx5_pools_container *mcont; 5475 struct mlx5_flow_counter_pool *pool; 5476 5477 if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) 5478 goto set_alarm; 5479 next_container: 5480 cont = MLX5_CNT_CONTAINER(sh, batch, 1); 5481 mcont = MLX5_CNT_CONTAINER(sh, batch, 0); 5482 /* Check if resize was done and need to flip a container. */ 5483 if (cont != mcont) { 5484 if (cont->pools) { 5485 /* Clean the old container. */ 5486 rte_free(cont->pools); 5487 memset(cont, 0, sizeof(*cont)); 5488 } 5489 rte_cio_wmb(); 5490 /* Flip the host container. */ 5491 sh->cmng.mhi[batch] ^= (uint8_t)2; 5492 cont = mcont; 5493 } 5494 if (!cont->pools) { 5495 /* 2 empty containers case is unexpected. */ 5496 if (unlikely(batch != sh->cmng.batch)) 5497 goto set_alarm; 5498 batch ^= 0x1; 5499 pool_index = 0; 5500 goto next_container; 5501 } 5502 pool = cont->pools[pool_index]; 5503 if (pool->raw_hw) 5504 /* There is a pool query in progress. */ 5505 goto set_alarm; 5506 pool->raw_hw = 5507 LIST_FIRST(&sh->cmng.free_stat_raws); 5508 if (!pool->raw_hw) 5509 /* No free counter statistics raw memory. */ 5510 goto set_alarm; 5511 dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read 5512 (&pool->a64_dcs); 5513 offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; 5514 ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - 5515 offset, NULL, NULL, 5516 pool->raw_hw->mem_mng->dm->id, 5517 (void *)(uintptr_t) 5518 (pool->raw_hw->data + offset), 5519 sh->devx_comp, 5520 (uint64_t)(uintptr_t)pool); 5521 if (ret) { 5522 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID" 5523 " %d", pool->min_dcs->id); 5524 pool->raw_hw = NULL; 5525 goto set_alarm; 5526 } 5527 pool->raw_hw->min_dcs_id = dcs->id; 5528 LIST_REMOVE(pool->raw_hw, next); 5529 sh->cmng.pending_queries++; 5530 pool_index++; 5531 if (pool_index >= rte_atomic16_read(&cont->n_valid)) { 5532 batch ^= 0x1; 5533 pool_index = 0; 5534 } 5535 set_alarm: 5536 sh->cmng.batch = batch; 5537 sh->cmng.pool_index = pool_index; 5538 mlx5_set_query_alarm(sh); 5539 } 5540 5541 /** 5542 * Handler for the HW respond about ready values from an asynchronous batch 5543 * query. This function is probably called by the host thread. 5544 * 5545 * @param[in] sh 5546 * The pointer to the shared IB device context. 5547 * @param[in] async_id 5548 * The Devx async ID. 5549 * @param[in] status 5550 * The status of the completion. 5551 */ 5552 void 5553 mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, 5554 uint64_t async_id, int status) 5555 { 5556 struct mlx5_flow_counter_pool *pool = 5557 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; 5558 struct mlx5_counter_stats_raw *raw_to_free; 5559 5560 if (unlikely(status)) { 5561 raw_to_free = pool->raw_hw; 5562 } else { 5563 raw_to_free = pool->raw; 5564 rte_spinlock_lock(&pool->sl); 5565 pool->raw = pool->raw_hw; 5566 rte_spinlock_unlock(&pool->sl); 5567 rte_atomic64_add(&pool->query_gen, 1); 5568 /* Be sure the new raw counters data is updated in memory. */ 5569 rte_cio_wmb(); 5570 } 5571 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next); 5572 pool->raw_hw = NULL; 5573 sh->cmng.pending_queries--; 5574 } 5575 5576 /** 5577 * Translate the rte_flow group index to HW table value. 5578 * 5579 * @param[in] attributes 5580 * Pointer to flow attributes 5581 * @param[in] external 5582 * Value is part of flow rule created by request external to PMD. 5583 * @param[in] group 5584 * rte_flow group index value. 5585 * @param[out] table 5586 * HW table value. 5587 * @param[out] error 5588 * Pointer to error structure. 5589 * 5590 * @return 5591 * 0 on success, a negative errno value otherwise and rte_errno is set. 5592 */ 5593 int 5594 mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external, 5595 uint32_t group, uint32_t *table, 5596 struct rte_flow_error *error) 5597 { 5598 if (attributes->transfer && external) { 5599 if (group == UINT32_MAX) 5600 return rte_flow_error_set 5601 (error, EINVAL, 5602 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 5603 NULL, 5604 "group index not supported"); 5605 *table = group + 1; 5606 } else { 5607 *table = group; 5608 } 5609 return 0; 5610 } 5611 5612 /** 5613 * Discover availability of metadata reg_c's. 5614 * 5615 * Iteratively use test flows to check availability. 5616 * 5617 * @param[in] dev 5618 * Pointer to the Ethernet device structure. 5619 * 5620 * @return 5621 * 0 on success, a negative errno value otherwise and rte_errno is set. 5622 */ 5623 int 5624 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) 5625 { 5626 struct mlx5_priv *priv = dev->data->dev_private; 5627 struct mlx5_dev_config *config = &priv->config; 5628 enum modify_reg idx; 5629 int n = 0; 5630 5631 /* reg_c[0] and reg_c[1] are reserved. */ 5632 config->flow_mreg_c[n++] = REG_C_0; 5633 config->flow_mreg_c[n++] = REG_C_1; 5634 /* Discover availability of other reg_c's. */ 5635 for (idx = REG_C_2; idx <= REG_C_7; ++idx) { 5636 struct rte_flow_attr attr = { 5637 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 5638 .priority = MLX5_FLOW_PRIO_RSVD, 5639 .ingress = 1, 5640 }; 5641 struct rte_flow_item items[] = { 5642 [0] = { 5643 .type = RTE_FLOW_ITEM_TYPE_END, 5644 }, 5645 }; 5646 struct rte_flow_action actions[] = { 5647 [0] = { 5648 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 5649 .conf = &(struct mlx5_flow_action_copy_mreg){ 5650 .src = REG_C_1, 5651 .dst = idx, 5652 }, 5653 }, 5654 [1] = { 5655 .type = RTE_FLOW_ACTION_TYPE_JUMP, 5656 .conf = &(struct rte_flow_action_jump){ 5657 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 5658 }, 5659 }, 5660 [2] = { 5661 .type = RTE_FLOW_ACTION_TYPE_END, 5662 }, 5663 }; 5664 struct rte_flow *flow; 5665 struct rte_flow_error error; 5666 5667 if (!config->dv_flow_en) 5668 break; 5669 /* Create internal flow, validation skips copy action. */ 5670 flow = flow_list_create(dev, NULL, &attr, items, 5671 actions, false, &error); 5672 if (!flow) 5673 continue; 5674 if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL)) 5675 config->flow_mreg_c[n++] = idx; 5676 flow_list_destroy(dev, NULL, flow); 5677 } 5678 for (; n < MLX5_MREG_C_NUM; ++n) 5679 config->flow_mreg_c[n] = REG_NONE; 5680 return 0; 5681 } 5682