1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <netinet/in.h> 7 #include <sys/queue.h> 8 #include <stdalign.h> 9 #include <stdint.h> 10 #include <string.h> 11 #include <stdbool.h> 12 13 /* Verbs header. */ 14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 15 #ifdef PEDANTIC 16 #pragma GCC diagnostic ignored "-Wpedantic" 17 #endif 18 #include <infiniband/verbs.h> 19 #ifdef PEDANTIC 20 #pragma GCC diagnostic error "-Wpedantic" 21 #endif 22 23 #include <rte_common.h> 24 #include <rte_ether.h> 25 #include <rte_ethdev_driver.h> 26 #include <rte_flow.h> 27 #include <rte_flow_driver.h> 28 #include <rte_malloc.h> 29 #include <rte_ip.h> 30 31 #include <mlx5_glue.h> 32 #include <mlx5_devx_cmds.h> 33 #include <mlx5_prm.h> 34 35 #include "mlx5_defs.h" 36 #include "mlx5.h" 37 #include "mlx5_flow.h" 38 #include "mlx5_rxtx.h" 39 40 /* Dev ops structure defined in mlx5.c */ 41 extern const struct eth_dev_ops mlx5_dev_ops; 42 extern const struct eth_dev_ops mlx5_dev_ops_isolate; 43 44 /** Device flow drivers. */ 45 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 46 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; 47 #endif 48 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; 49 50 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; 51 52 const struct mlx5_flow_driver_ops *flow_drv_ops[] = { 53 [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, 54 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 55 [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, 56 #endif 57 [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, 58 [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops 59 }; 60 61 enum mlx5_expansion { 62 MLX5_EXPANSION_ROOT, 63 MLX5_EXPANSION_ROOT_OUTER, 64 MLX5_EXPANSION_ROOT_ETH_VLAN, 65 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, 66 MLX5_EXPANSION_OUTER_ETH, 67 MLX5_EXPANSION_OUTER_ETH_VLAN, 68 MLX5_EXPANSION_OUTER_VLAN, 69 MLX5_EXPANSION_OUTER_IPV4, 70 MLX5_EXPANSION_OUTER_IPV4_UDP, 71 MLX5_EXPANSION_OUTER_IPV4_TCP, 72 MLX5_EXPANSION_OUTER_IPV6, 73 MLX5_EXPANSION_OUTER_IPV6_UDP, 74 MLX5_EXPANSION_OUTER_IPV6_TCP, 75 MLX5_EXPANSION_VXLAN, 76 MLX5_EXPANSION_VXLAN_GPE, 77 MLX5_EXPANSION_GRE, 78 MLX5_EXPANSION_MPLS, 79 MLX5_EXPANSION_ETH, 80 MLX5_EXPANSION_ETH_VLAN, 81 MLX5_EXPANSION_VLAN, 82 MLX5_EXPANSION_IPV4, 83 MLX5_EXPANSION_IPV4_UDP, 84 MLX5_EXPANSION_IPV4_TCP, 85 MLX5_EXPANSION_IPV6, 86 MLX5_EXPANSION_IPV6_UDP, 87 MLX5_EXPANSION_IPV6_TCP, 88 }; 89 90 /** Supported expansion of items. */ 91 static const struct rte_flow_expand_node mlx5_support_expansion[] = { 92 [MLX5_EXPANSION_ROOT] = { 93 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 94 MLX5_EXPANSION_IPV4, 95 MLX5_EXPANSION_IPV6), 96 .type = RTE_FLOW_ITEM_TYPE_END, 97 }, 98 [MLX5_EXPANSION_ROOT_OUTER] = { 99 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, 100 MLX5_EXPANSION_OUTER_IPV4, 101 MLX5_EXPANSION_OUTER_IPV6), 102 .type = RTE_FLOW_ITEM_TYPE_END, 103 }, 104 [MLX5_EXPANSION_ROOT_ETH_VLAN] = { 105 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), 106 .type = RTE_FLOW_ITEM_TYPE_END, 107 }, 108 [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { 109 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), 110 .type = RTE_FLOW_ITEM_TYPE_END, 111 }, 112 [MLX5_EXPANSION_OUTER_ETH] = { 113 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 114 MLX5_EXPANSION_OUTER_IPV6, 115 MLX5_EXPANSION_MPLS), 116 .type = RTE_FLOW_ITEM_TYPE_ETH, 117 .rss_types = 0, 118 }, 119 [MLX5_EXPANSION_OUTER_ETH_VLAN] = { 120 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), 121 .type = RTE_FLOW_ITEM_TYPE_ETH, 122 .rss_types = 0, 123 }, 124 [MLX5_EXPANSION_OUTER_VLAN] = { 125 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 126 MLX5_EXPANSION_OUTER_IPV6), 127 .type = RTE_FLOW_ITEM_TYPE_VLAN, 128 }, 129 [MLX5_EXPANSION_OUTER_IPV4] = { 130 .next = RTE_FLOW_EXPAND_RSS_NEXT 131 (MLX5_EXPANSION_OUTER_IPV4_UDP, 132 MLX5_EXPANSION_OUTER_IPV4_TCP, 133 MLX5_EXPANSION_GRE, 134 MLX5_EXPANSION_IPV4, 135 MLX5_EXPANSION_IPV6), 136 .type = RTE_FLOW_ITEM_TYPE_IPV4, 137 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 138 ETH_RSS_NONFRAG_IPV4_OTHER, 139 }, 140 [MLX5_EXPANSION_OUTER_IPV4_UDP] = { 141 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 142 MLX5_EXPANSION_VXLAN_GPE), 143 .type = RTE_FLOW_ITEM_TYPE_UDP, 144 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 145 }, 146 [MLX5_EXPANSION_OUTER_IPV4_TCP] = { 147 .type = RTE_FLOW_ITEM_TYPE_TCP, 148 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 149 }, 150 [MLX5_EXPANSION_OUTER_IPV6] = { 151 .next = RTE_FLOW_EXPAND_RSS_NEXT 152 (MLX5_EXPANSION_OUTER_IPV6_UDP, 153 MLX5_EXPANSION_OUTER_IPV6_TCP, 154 MLX5_EXPANSION_IPV4, 155 MLX5_EXPANSION_IPV6), 156 .type = RTE_FLOW_ITEM_TYPE_IPV6, 157 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 158 ETH_RSS_NONFRAG_IPV6_OTHER, 159 }, 160 [MLX5_EXPANSION_OUTER_IPV6_UDP] = { 161 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 162 MLX5_EXPANSION_VXLAN_GPE), 163 .type = RTE_FLOW_ITEM_TYPE_UDP, 164 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 165 }, 166 [MLX5_EXPANSION_OUTER_IPV6_TCP] = { 167 .type = RTE_FLOW_ITEM_TYPE_TCP, 168 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 169 }, 170 [MLX5_EXPANSION_VXLAN] = { 171 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 172 MLX5_EXPANSION_IPV4, 173 MLX5_EXPANSION_IPV6), 174 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 175 }, 176 [MLX5_EXPANSION_VXLAN_GPE] = { 177 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 178 MLX5_EXPANSION_IPV4, 179 MLX5_EXPANSION_IPV6), 180 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, 181 }, 182 [MLX5_EXPANSION_GRE] = { 183 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), 184 .type = RTE_FLOW_ITEM_TYPE_GRE, 185 }, 186 [MLX5_EXPANSION_MPLS] = { 187 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 188 MLX5_EXPANSION_IPV6), 189 .type = RTE_FLOW_ITEM_TYPE_MPLS, 190 }, 191 [MLX5_EXPANSION_ETH] = { 192 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 193 MLX5_EXPANSION_IPV6), 194 .type = RTE_FLOW_ITEM_TYPE_ETH, 195 }, 196 [MLX5_EXPANSION_ETH_VLAN] = { 197 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), 198 .type = RTE_FLOW_ITEM_TYPE_ETH, 199 }, 200 [MLX5_EXPANSION_VLAN] = { 201 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 202 MLX5_EXPANSION_IPV6), 203 .type = RTE_FLOW_ITEM_TYPE_VLAN, 204 }, 205 [MLX5_EXPANSION_IPV4] = { 206 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, 207 MLX5_EXPANSION_IPV4_TCP), 208 .type = RTE_FLOW_ITEM_TYPE_IPV4, 209 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 210 ETH_RSS_NONFRAG_IPV4_OTHER, 211 }, 212 [MLX5_EXPANSION_IPV4_UDP] = { 213 .type = RTE_FLOW_ITEM_TYPE_UDP, 214 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 215 }, 216 [MLX5_EXPANSION_IPV4_TCP] = { 217 .type = RTE_FLOW_ITEM_TYPE_TCP, 218 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 219 }, 220 [MLX5_EXPANSION_IPV6] = { 221 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, 222 MLX5_EXPANSION_IPV6_TCP), 223 .type = RTE_FLOW_ITEM_TYPE_IPV6, 224 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 225 ETH_RSS_NONFRAG_IPV6_OTHER, 226 }, 227 [MLX5_EXPANSION_IPV6_UDP] = { 228 .type = RTE_FLOW_ITEM_TYPE_UDP, 229 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 230 }, 231 [MLX5_EXPANSION_IPV6_TCP] = { 232 .type = RTE_FLOW_ITEM_TYPE_TCP, 233 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 234 }, 235 }; 236 237 static const struct rte_flow_ops mlx5_flow_ops = { 238 .validate = mlx5_flow_validate, 239 .create = mlx5_flow_create, 240 .destroy = mlx5_flow_destroy, 241 .flush = mlx5_flow_flush, 242 .isolate = mlx5_flow_isolate, 243 .query = mlx5_flow_query, 244 .dev_dump = mlx5_flow_dev_dump, 245 }; 246 247 /* Convert FDIR request to Generic flow. */ 248 struct mlx5_fdir { 249 struct rte_flow_attr attr; 250 struct rte_flow_item items[4]; 251 struct rte_flow_item_eth l2; 252 struct rte_flow_item_eth l2_mask; 253 union { 254 struct rte_flow_item_ipv4 ipv4; 255 struct rte_flow_item_ipv6 ipv6; 256 } l3; 257 union { 258 struct rte_flow_item_ipv4 ipv4; 259 struct rte_flow_item_ipv6 ipv6; 260 } l3_mask; 261 union { 262 struct rte_flow_item_udp udp; 263 struct rte_flow_item_tcp tcp; 264 } l4; 265 union { 266 struct rte_flow_item_udp udp; 267 struct rte_flow_item_tcp tcp; 268 } l4_mask; 269 struct rte_flow_action actions[2]; 270 struct rte_flow_action_queue queue; 271 }; 272 273 /* Map of Verbs to Flow priority with 8 Verbs priorities. */ 274 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { 275 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, 276 }; 277 278 /* Map of Verbs to Flow priority with 16 Verbs priorities. */ 279 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { 280 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, 281 { 9, 10, 11 }, { 12, 13, 14 }, 282 }; 283 284 /* Tunnel information. */ 285 struct mlx5_flow_tunnel_info { 286 uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ 287 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ 288 }; 289 290 static struct mlx5_flow_tunnel_info tunnels_info[] = { 291 { 292 .tunnel = MLX5_FLOW_LAYER_VXLAN, 293 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, 294 }, 295 { 296 .tunnel = MLX5_FLOW_LAYER_GENEVE, 297 .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP, 298 }, 299 { 300 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, 301 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, 302 }, 303 { 304 .tunnel = MLX5_FLOW_LAYER_GRE, 305 .ptype = RTE_PTYPE_TUNNEL_GRE, 306 }, 307 { 308 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, 309 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, 310 }, 311 { 312 .tunnel = MLX5_FLOW_LAYER_MPLS, 313 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, 314 }, 315 { 316 .tunnel = MLX5_FLOW_LAYER_NVGRE, 317 .ptype = RTE_PTYPE_TUNNEL_NVGRE, 318 }, 319 { 320 .tunnel = MLX5_FLOW_LAYER_IPIP, 321 .ptype = RTE_PTYPE_TUNNEL_IP, 322 }, 323 { 324 .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP, 325 .ptype = RTE_PTYPE_TUNNEL_IP, 326 }, 327 { 328 .tunnel = MLX5_FLOW_LAYER_GTP, 329 .ptype = RTE_PTYPE_TUNNEL_GTPU, 330 }, 331 }; 332 333 /** 334 * Translate tag ID to register. 335 * 336 * @param[in] dev 337 * Pointer to the Ethernet device structure. 338 * @param[in] feature 339 * The feature that request the register. 340 * @param[in] id 341 * The request register ID. 342 * @param[out] error 343 * Error description in case of any. 344 * 345 * @return 346 * The request register on success, a negative errno 347 * value otherwise and rte_errno is set. 348 */ 349 int 350 mlx5_flow_get_reg_id(struct rte_eth_dev *dev, 351 enum mlx5_feature_name feature, 352 uint32_t id, 353 struct rte_flow_error *error) 354 { 355 struct mlx5_priv *priv = dev->data->dev_private; 356 struct mlx5_dev_config *config = &priv->config; 357 enum modify_reg start_reg; 358 bool skip_mtr_reg = false; 359 360 switch (feature) { 361 case MLX5_HAIRPIN_RX: 362 return REG_B; 363 case MLX5_HAIRPIN_TX: 364 return REG_A; 365 case MLX5_METADATA_RX: 366 switch (config->dv_xmeta_en) { 367 case MLX5_XMETA_MODE_LEGACY: 368 return REG_B; 369 case MLX5_XMETA_MODE_META16: 370 return REG_C_0; 371 case MLX5_XMETA_MODE_META32: 372 return REG_C_1; 373 } 374 break; 375 case MLX5_METADATA_TX: 376 return REG_A; 377 case MLX5_METADATA_FDB: 378 switch (config->dv_xmeta_en) { 379 case MLX5_XMETA_MODE_LEGACY: 380 return REG_NONE; 381 case MLX5_XMETA_MODE_META16: 382 return REG_C_0; 383 case MLX5_XMETA_MODE_META32: 384 return REG_C_1; 385 } 386 break; 387 case MLX5_FLOW_MARK: 388 switch (config->dv_xmeta_en) { 389 case MLX5_XMETA_MODE_LEGACY: 390 return REG_NONE; 391 case MLX5_XMETA_MODE_META16: 392 return REG_C_1; 393 case MLX5_XMETA_MODE_META32: 394 return REG_C_0; 395 } 396 break; 397 case MLX5_MTR_SFX: 398 /* 399 * If meter color and flow match share one register, flow match 400 * should use the meter color register for match. 401 */ 402 if (priv->mtr_reg_share) 403 return priv->mtr_color_reg; 404 else 405 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : 406 REG_C_3; 407 case MLX5_MTR_COLOR: 408 MLX5_ASSERT(priv->mtr_color_reg != REG_NONE); 409 return priv->mtr_color_reg; 410 case MLX5_COPY_MARK: 411 /* 412 * Metadata COPY_MARK register using is in meter suffix sub 413 * flow while with meter. It's safe to share the same register. 414 */ 415 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3; 416 case MLX5_APP_TAG: 417 /* 418 * If meter is enable, it will engage the register for color 419 * match and flow match. If meter color match is not using the 420 * REG_C_2, need to skip the REG_C_x be used by meter color 421 * match. 422 * If meter is disable, free to use all available registers. 423 */ 424 start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 : 425 (priv->mtr_reg_share ? REG_C_3 : REG_C_4); 426 skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2); 427 if (id > (REG_C_7 - start_reg)) 428 return rte_flow_error_set(error, EINVAL, 429 RTE_FLOW_ERROR_TYPE_ITEM, 430 NULL, "invalid tag id"); 431 if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NONE) 432 return rte_flow_error_set(error, ENOTSUP, 433 RTE_FLOW_ERROR_TYPE_ITEM, 434 NULL, "unsupported tag id"); 435 /* 436 * This case means meter is using the REG_C_x great than 2. 437 * Take care not to conflict with meter color REG_C_x. 438 * If the available index REG_C_y >= REG_C_x, skip the 439 * color register. 440 */ 441 if (skip_mtr_reg && config->flow_mreg_c 442 [id + start_reg - REG_C_0] >= priv->mtr_color_reg) { 443 if (config->flow_mreg_c 444 [id + 1 + start_reg - REG_C_0] != REG_NONE) 445 return config->flow_mreg_c 446 [id + 1 + start_reg - REG_C_0]; 447 return rte_flow_error_set(error, ENOTSUP, 448 RTE_FLOW_ERROR_TYPE_ITEM, 449 NULL, "unsupported tag id"); 450 } 451 return config->flow_mreg_c[id + start_reg - REG_C_0]; 452 } 453 MLX5_ASSERT(false); 454 return rte_flow_error_set(error, EINVAL, 455 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 456 NULL, "invalid feature name"); 457 } 458 459 /** 460 * Check extensive flow metadata register support. 461 * 462 * @param dev 463 * Pointer to rte_eth_dev structure. 464 * 465 * @return 466 * True if device supports extensive flow metadata register, otherwise false. 467 */ 468 bool 469 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) 470 { 471 struct mlx5_priv *priv = dev->data->dev_private; 472 struct mlx5_dev_config *config = &priv->config; 473 474 /* 475 * Having available reg_c can be regarded inclusively as supporting 476 * extensive flow metadata register, which could mean, 477 * - metadata register copy action by modify header. 478 * - 16 modify header actions is supported. 479 * - reg_c's are preserved across different domain (FDB and NIC) on 480 * packet loopback by flow lookup miss. 481 */ 482 return config->flow_mreg_c[2] != REG_NONE; 483 } 484 485 /** 486 * Discover the maximum number of priority available. 487 * 488 * @param[in] dev 489 * Pointer to the Ethernet device structure. 490 * 491 * @return 492 * number of supported flow priority on success, a negative errno 493 * value otherwise and rte_errno is set. 494 */ 495 int 496 mlx5_flow_discover_priorities(struct rte_eth_dev *dev) 497 { 498 struct mlx5_priv *priv = dev->data->dev_private; 499 struct { 500 struct ibv_flow_attr attr; 501 struct ibv_flow_spec_eth eth; 502 struct ibv_flow_spec_action_drop drop; 503 } flow_attr = { 504 .attr = { 505 .num_of_specs = 2, 506 .port = (uint8_t)priv->ibv_port, 507 }, 508 .eth = { 509 .type = IBV_FLOW_SPEC_ETH, 510 .size = sizeof(struct ibv_flow_spec_eth), 511 }, 512 .drop = { 513 .size = sizeof(struct ibv_flow_spec_action_drop), 514 .type = IBV_FLOW_SPEC_ACTION_DROP, 515 }, 516 }; 517 struct ibv_flow *flow; 518 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev); 519 uint16_t vprio[] = { 8, 16 }; 520 int i; 521 int priority = 0; 522 523 if (!drop) { 524 rte_errno = ENOTSUP; 525 return -rte_errno; 526 } 527 for (i = 0; i != RTE_DIM(vprio); i++) { 528 flow_attr.attr.priority = vprio[i] - 1; 529 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); 530 if (!flow) 531 break; 532 claim_zero(mlx5_glue->destroy_flow(flow)); 533 priority = vprio[i]; 534 } 535 mlx5_hrxq_drop_release(dev); 536 switch (priority) { 537 case 8: 538 priority = RTE_DIM(priority_map_3); 539 break; 540 case 16: 541 priority = RTE_DIM(priority_map_5); 542 break; 543 default: 544 rte_errno = ENOTSUP; 545 DRV_LOG(ERR, 546 "port %u verbs maximum priority: %d expected 8/16", 547 dev->data->port_id, priority); 548 return -rte_errno; 549 } 550 DRV_LOG(INFO, "port %u flow maximum priority: %d", 551 dev->data->port_id, priority); 552 return priority; 553 } 554 555 /** 556 * Adjust flow priority based on the highest layer and the request priority. 557 * 558 * @param[in] dev 559 * Pointer to the Ethernet device structure. 560 * @param[in] priority 561 * The rule base priority. 562 * @param[in] subpriority 563 * The priority based on the items. 564 * 565 * @return 566 * The new priority. 567 */ 568 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 569 uint32_t subpriority) 570 { 571 uint32_t res = 0; 572 struct mlx5_priv *priv = dev->data->dev_private; 573 574 switch (priv->config.flow_prio) { 575 case RTE_DIM(priority_map_3): 576 res = priority_map_3[priority][subpriority]; 577 break; 578 case RTE_DIM(priority_map_5): 579 res = priority_map_5[priority][subpriority]; 580 break; 581 } 582 return res; 583 } 584 585 /** 586 * Verify the @p item specifications (spec, last, mask) are compatible with the 587 * NIC capabilities. 588 * 589 * @param[in] item 590 * Item specification. 591 * @param[in] mask 592 * @p item->mask or flow default bit-masks. 593 * @param[in] nic_mask 594 * Bit-masks covering supported fields by the NIC to compare with user mask. 595 * @param[in] size 596 * Bit-masks size in bytes. 597 * @param[out] error 598 * Pointer to error structure. 599 * 600 * @return 601 * 0 on success, a negative errno value otherwise and rte_errno is set. 602 */ 603 int 604 mlx5_flow_item_acceptable(const struct rte_flow_item *item, 605 const uint8_t *mask, 606 const uint8_t *nic_mask, 607 unsigned int size, 608 struct rte_flow_error *error) 609 { 610 unsigned int i; 611 612 MLX5_ASSERT(nic_mask); 613 for (i = 0; i < size; ++i) 614 if ((nic_mask[i] | mask[i]) != nic_mask[i]) 615 return rte_flow_error_set(error, ENOTSUP, 616 RTE_FLOW_ERROR_TYPE_ITEM, 617 item, 618 "mask enables non supported" 619 " bits"); 620 if (!item->spec && (item->mask || item->last)) 621 return rte_flow_error_set(error, EINVAL, 622 RTE_FLOW_ERROR_TYPE_ITEM, item, 623 "mask/last without a spec is not" 624 " supported"); 625 if (item->spec && item->last) { 626 uint8_t spec[size]; 627 uint8_t last[size]; 628 unsigned int i; 629 int ret; 630 631 for (i = 0; i < size; ++i) { 632 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; 633 last[i] = ((const uint8_t *)item->last)[i] & mask[i]; 634 } 635 ret = memcmp(spec, last, size); 636 if (ret != 0) 637 return rte_flow_error_set(error, EINVAL, 638 RTE_FLOW_ERROR_TYPE_ITEM, 639 item, 640 "range is not valid"); 641 } 642 return 0; 643 } 644 645 /** 646 * Adjust the hash fields according to the @p flow information. 647 * 648 * @param[in] dev_flow. 649 * Pointer to the mlx5_flow. 650 * @param[in] tunnel 651 * 1 when the hash field is for a tunnel item. 652 * @param[in] layer_types 653 * ETH_RSS_* types. 654 * @param[in] hash_fields 655 * Item hash fields. 656 * 657 * @return 658 * The hash fields that should be used. 659 */ 660 uint64_t 661 mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, 662 int tunnel __rte_unused, uint64_t layer_types, 663 uint64_t hash_fields) 664 { 665 struct rte_flow *flow = dev_flow->flow; 666 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 667 int rss_request_inner = flow->rss.level >= 2; 668 669 /* Check RSS hash level for tunnel. */ 670 if (tunnel && rss_request_inner) 671 hash_fields |= IBV_RX_HASH_INNER; 672 else if (tunnel || rss_request_inner) 673 return 0; 674 #endif 675 /* Check if requested layer matches RSS hash fields. */ 676 if (!(flow->rss.types & layer_types)) 677 return 0; 678 return hash_fields; 679 } 680 681 /** 682 * Lookup and set the ptype in the data Rx part. A single Ptype can be used, 683 * if several tunnel rules are used on this queue, the tunnel ptype will be 684 * cleared. 685 * 686 * @param rxq_ctrl 687 * Rx queue to update. 688 */ 689 static void 690 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) 691 { 692 unsigned int i; 693 uint32_t tunnel_ptype = 0; 694 695 /* Look up for the ptype to use. */ 696 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { 697 if (!rxq_ctrl->flow_tunnels_n[i]) 698 continue; 699 if (!tunnel_ptype) { 700 tunnel_ptype = tunnels_info[i].ptype; 701 } else { 702 tunnel_ptype = 0; 703 break; 704 } 705 } 706 rxq_ctrl->rxq.tunnel = tunnel_ptype; 707 } 708 709 /** 710 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive 711 * flow. 712 * 713 * @param[in] dev 714 * Pointer to the Ethernet device structure. 715 * @param[in] flow 716 * Pointer to flow structure. 717 * @param[in] dev_handle 718 * Pointer to device flow handle structure. 719 */ 720 static void 721 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow, 722 struct mlx5_flow_handle *dev_handle) 723 { 724 struct mlx5_priv *priv = dev->data->dev_private; 725 const int mark = !!(dev_handle->act_flags & 726 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 727 const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); 728 unsigned int i; 729 730 for (i = 0; i != flow->rss.queue_num; ++i) { 731 int idx = (*flow->rss.queue)[i]; 732 struct mlx5_rxq_ctrl *rxq_ctrl = 733 container_of((*priv->rxqs)[idx], 734 struct mlx5_rxq_ctrl, rxq); 735 736 /* 737 * To support metadata register copy on Tx loopback, 738 * this must be always enabled (metadata may arive 739 * from other port - not from local flows only. 740 */ 741 if (priv->config.dv_flow_en && 742 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 743 mlx5_flow_ext_mreg_supported(dev)) { 744 rxq_ctrl->rxq.mark = 1; 745 rxq_ctrl->flow_mark_n = 1; 746 } else if (mark) { 747 rxq_ctrl->rxq.mark = 1; 748 rxq_ctrl->flow_mark_n++; 749 } 750 if (tunnel) { 751 unsigned int j; 752 753 /* Increase the counter matching the flow. */ 754 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 755 if ((tunnels_info[j].tunnel & 756 dev_handle->layers) == 757 tunnels_info[j].tunnel) { 758 rxq_ctrl->flow_tunnels_n[j]++; 759 break; 760 } 761 } 762 flow_rxq_tunnel_ptype_update(rxq_ctrl); 763 } 764 } 765 } 766 767 /** 768 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow 769 * 770 * @param[in] dev 771 * Pointer to the Ethernet device structure. 772 * @param[in] flow 773 * Pointer to flow structure. 774 */ 775 static void 776 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) 777 { 778 struct mlx5_priv *priv = dev->data->dev_private; 779 uint32_t handle_idx; 780 struct mlx5_flow_handle *dev_handle; 781 782 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 783 handle_idx, dev_handle, next) 784 flow_drv_rxq_flags_set(dev, flow, dev_handle); 785 } 786 787 /** 788 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 789 * device flow if no other flow uses it with the same kind of request. 790 * 791 * @param dev 792 * Pointer to Ethernet device. 793 * @param[in] flow 794 * Pointer to flow structure. 795 * @param[in] dev_handle 796 * Pointer to the device flow handle structure. 797 */ 798 static void 799 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow, 800 struct mlx5_flow_handle *dev_handle) 801 { 802 struct mlx5_priv *priv = dev->data->dev_private; 803 const int mark = !!(dev_handle->act_flags & 804 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 805 const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); 806 unsigned int i; 807 808 MLX5_ASSERT(dev->data->dev_started); 809 for (i = 0; i != flow->rss.queue_num; ++i) { 810 int idx = (*flow->rss.queue)[i]; 811 struct mlx5_rxq_ctrl *rxq_ctrl = 812 container_of((*priv->rxqs)[idx], 813 struct mlx5_rxq_ctrl, rxq); 814 815 if (priv->config.dv_flow_en && 816 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 817 mlx5_flow_ext_mreg_supported(dev)) { 818 rxq_ctrl->rxq.mark = 1; 819 rxq_ctrl->flow_mark_n = 1; 820 } else if (mark) { 821 rxq_ctrl->flow_mark_n--; 822 rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; 823 } 824 if (tunnel) { 825 unsigned int j; 826 827 /* Decrease the counter matching the flow. */ 828 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 829 if ((tunnels_info[j].tunnel & 830 dev_handle->layers) == 831 tunnels_info[j].tunnel) { 832 rxq_ctrl->flow_tunnels_n[j]--; 833 break; 834 } 835 } 836 flow_rxq_tunnel_ptype_update(rxq_ctrl); 837 } 838 } 839 } 840 841 /** 842 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 843 * @p flow if no other flow uses it with the same kind of request. 844 * 845 * @param dev 846 * Pointer to Ethernet device. 847 * @param[in] flow 848 * Pointer to the flow. 849 */ 850 static void 851 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) 852 { 853 struct mlx5_priv *priv = dev->data->dev_private; 854 uint32_t handle_idx; 855 struct mlx5_flow_handle *dev_handle; 856 857 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 858 handle_idx, dev_handle, next) 859 flow_drv_rxq_flags_trim(dev, flow, dev_handle); 860 } 861 862 /** 863 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. 864 * 865 * @param dev 866 * Pointer to Ethernet device. 867 */ 868 static void 869 flow_rxq_flags_clear(struct rte_eth_dev *dev) 870 { 871 struct mlx5_priv *priv = dev->data->dev_private; 872 unsigned int i; 873 874 for (i = 0; i != priv->rxqs_n; ++i) { 875 struct mlx5_rxq_ctrl *rxq_ctrl; 876 unsigned int j; 877 878 if (!(*priv->rxqs)[i]) 879 continue; 880 rxq_ctrl = container_of((*priv->rxqs)[i], 881 struct mlx5_rxq_ctrl, rxq); 882 rxq_ctrl->flow_mark_n = 0; 883 rxq_ctrl->rxq.mark = 0; 884 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) 885 rxq_ctrl->flow_tunnels_n[j] = 0; 886 rxq_ctrl->rxq.tunnel = 0; 887 } 888 } 889 890 /* 891 * return a pointer to the desired action in the list of actions. 892 * 893 * @param[in] actions 894 * The list of actions to search the action in. 895 * @param[in] action 896 * The action to find. 897 * 898 * @return 899 * Pointer to the action in the list, if found. NULL otherwise. 900 */ 901 const struct rte_flow_action * 902 mlx5_flow_find_action(const struct rte_flow_action *actions, 903 enum rte_flow_action_type action) 904 { 905 if (actions == NULL) 906 return NULL; 907 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) 908 if (actions->type == action) 909 return actions; 910 return NULL; 911 } 912 913 /* 914 * Validate the flag action. 915 * 916 * @param[in] action_flags 917 * Bit-fields that holds the actions detected until now. 918 * @param[in] attr 919 * Attributes of flow that includes this action. 920 * @param[out] error 921 * Pointer to error structure. 922 * 923 * @return 924 * 0 on success, a negative errno value otherwise and rte_errno is set. 925 */ 926 int 927 mlx5_flow_validate_action_flag(uint64_t action_flags, 928 const struct rte_flow_attr *attr, 929 struct rte_flow_error *error) 930 { 931 if (action_flags & MLX5_FLOW_ACTION_MARK) 932 return rte_flow_error_set(error, EINVAL, 933 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 934 "can't mark and flag in same flow"); 935 if (action_flags & MLX5_FLOW_ACTION_FLAG) 936 return rte_flow_error_set(error, EINVAL, 937 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 938 "can't have 2 flag" 939 " actions in same flow"); 940 if (attr->egress) 941 return rte_flow_error_set(error, ENOTSUP, 942 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 943 "flag action not supported for " 944 "egress"); 945 return 0; 946 } 947 948 /* 949 * Validate the mark action. 950 * 951 * @param[in] action 952 * Pointer to the queue action. 953 * @param[in] action_flags 954 * Bit-fields that holds the actions detected until now. 955 * @param[in] attr 956 * Attributes of flow that includes this action. 957 * @param[out] error 958 * Pointer to error structure. 959 * 960 * @return 961 * 0 on success, a negative errno value otherwise and rte_errno is set. 962 */ 963 int 964 mlx5_flow_validate_action_mark(const struct rte_flow_action *action, 965 uint64_t action_flags, 966 const struct rte_flow_attr *attr, 967 struct rte_flow_error *error) 968 { 969 const struct rte_flow_action_mark *mark = action->conf; 970 971 if (!mark) 972 return rte_flow_error_set(error, EINVAL, 973 RTE_FLOW_ERROR_TYPE_ACTION, 974 action, 975 "configuration cannot be null"); 976 if (mark->id >= MLX5_FLOW_MARK_MAX) 977 return rte_flow_error_set(error, EINVAL, 978 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 979 &mark->id, 980 "mark id must in 0 <= id < " 981 RTE_STR(MLX5_FLOW_MARK_MAX)); 982 if (action_flags & MLX5_FLOW_ACTION_FLAG) 983 return rte_flow_error_set(error, EINVAL, 984 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 985 "can't flag and mark in same flow"); 986 if (action_flags & MLX5_FLOW_ACTION_MARK) 987 return rte_flow_error_set(error, EINVAL, 988 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 989 "can't have 2 mark actions in same" 990 " flow"); 991 if (attr->egress) 992 return rte_flow_error_set(error, ENOTSUP, 993 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 994 "mark action not supported for " 995 "egress"); 996 return 0; 997 } 998 999 /* 1000 * Validate the drop action. 1001 * 1002 * @param[in] action_flags 1003 * Bit-fields that holds the actions detected until now. 1004 * @param[in] attr 1005 * Attributes of flow that includes this action. 1006 * @param[out] error 1007 * Pointer to error structure. 1008 * 1009 * @return 1010 * 0 on success, a negative errno value otherwise and rte_errno is set. 1011 */ 1012 int 1013 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused, 1014 const struct rte_flow_attr *attr, 1015 struct rte_flow_error *error) 1016 { 1017 if (attr->egress) 1018 return rte_flow_error_set(error, ENOTSUP, 1019 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1020 "drop action not supported for " 1021 "egress"); 1022 return 0; 1023 } 1024 1025 /* 1026 * Validate the queue action. 1027 * 1028 * @param[in] action 1029 * Pointer to the queue action. 1030 * @param[in] action_flags 1031 * Bit-fields that holds the actions detected until now. 1032 * @param[in] dev 1033 * Pointer to the Ethernet device structure. 1034 * @param[in] attr 1035 * Attributes of flow that includes this action. 1036 * @param[out] error 1037 * Pointer to error structure. 1038 * 1039 * @return 1040 * 0 on success, a negative errno value otherwise and rte_errno is set. 1041 */ 1042 int 1043 mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 1044 uint64_t action_flags, 1045 struct rte_eth_dev *dev, 1046 const struct rte_flow_attr *attr, 1047 struct rte_flow_error *error) 1048 { 1049 struct mlx5_priv *priv = dev->data->dev_private; 1050 const struct rte_flow_action_queue *queue = action->conf; 1051 1052 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 1053 return rte_flow_error_set(error, EINVAL, 1054 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1055 "can't have 2 fate actions in" 1056 " same flow"); 1057 if (!priv->rxqs_n) 1058 return rte_flow_error_set(error, EINVAL, 1059 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1060 NULL, "No Rx queues configured"); 1061 if (queue->index >= priv->rxqs_n) 1062 return rte_flow_error_set(error, EINVAL, 1063 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1064 &queue->index, 1065 "queue index out of range"); 1066 if (!(*priv->rxqs)[queue->index]) 1067 return rte_flow_error_set(error, EINVAL, 1068 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1069 &queue->index, 1070 "queue is not configured"); 1071 if (attr->egress) 1072 return rte_flow_error_set(error, ENOTSUP, 1073 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1074 "queue action not supported for " 1075 "egress"); 1076 return 0; 1077 } 1078 1079 /* 1080 * Validate the rss action. 1081 * 1082 * @param[in] action 1083 * Pointer to the queue action. 1084 * @param[in] action_flags 1085 * Bit-fields that holds the actions detected until now. 1086 * @param[in] dev 1087 * Pointer to the Ethernet device structure. 1088 * @param[in] attr 1089 * Attributes of flow that includes this action. 1090 * @param[in] item_flags 1091 * Items that were detected. 1092 * @param[out] error 1093 * Pointer to error structure. 1094 * 1095 * @return 1096 * 0 on success, a negative errno value otherwise and rte_errno is set. 1097 */ 1098 int 1099 mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 1100 uint64_t action_flags, 1101 struct rte_eth_dev *dev, 1102 const struct rte_flow_attr *attr, 1103 uint64_t item_flags, 1104 struct rte_flow_error *error) 1105 { 1106 struct mlx5_priv *priv = dev->data->dev_private; 1107 const struct rte_flow_action_rss *rss = action->conf; 1108 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1109 unsigned int i; 1110 1111 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 1112 return rte_flow_error_set(error, EINVAL, 1113 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1114 "can't have 2 fate actions" 1115 " in same flow"); 1116 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && 1117 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) 1118 return rte_flow_error_set(error, ENOTSUP, 1119 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1120 &rss->func, 1121 "RSS hash function not supported"); 1122 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 1123 if (rss->level > 2) 1124 #else 1125 if (rss->level > 1) 1126 #endif 1127 return rte_flow_error_set(error, ENOTSUP, 1128 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1129 &rss->level, 1130 "tunnel RSS is not supported"); 1131 /* allow RSS key_len 0 in case of NULL (default) RSS key. */ 1132 if (rss->key_len == 0 && rss->key != NULL) 1133 return rte_flow_error_set(error, ENOTSUP, 1134 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1135 &rss->key_len, 1136 "RSS hash key length 0"); 1137 if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) 1138 return rte_flow_error_set(error, ENOTSUP, 1139 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1140 &rss->key_len, 1141 "RSS hash key too small"); 1142 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) 1143 return rte_flow_error_set(error, ENOTSUP, 1144 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1145 &rss->key_len, 1146 "RSS hash key too large"); 1147 if (rss->queue_num > priv->config.ind_table_max_size) 1148 return rte_flow_error_set(error, ENOTSUP, 1149 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1150 &rss->queue_num, 1151 "number of queues too large"); 1152 if (rss->types & MLX5_RSS_HF_MASK) 1153 return rte_flow_error_set(error, ENOTSUP, 1154 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1155 &rss->types, 1156 "some RSS protocols are not" 1157 " supported"); 1158 if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) && 1159 !(rss->types & ETH_RSS_IP)) 1160 return rte_flow_error_set(error, EINVAL, 1161 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1162 "L3 partial RSS requested but L3 RSS" 1163 " type not specified"); 1164 if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) && 1165 !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP))) 1166 return rte_flow_error_set(error, EINVAL, 1167 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1168 "L4 partial RSS requested but L4 RSS" 1169 " type not specified"); 1170 if (!priv->rxqs_n) 1171 return rte_flow_error_set(error, EINVAL, 1172 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1173 NULL, "No Rx queues configured"); 1174 if (!rss->queue_num) 1175 return rte_flow_error_set(error, EINVAL, 1176 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1177 NULL, "No queues configured"); 1178 for (i = 0; i != rss->queue_num; ++i) { 1179 if (rss->queue[i] >= priv->rxqs_n) 1180 return rte_flow_error_set 1181 (error, EINVAL, 1182 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1183 &rss->queue[i], "queue index out of range"); 1184 if (!(*priv->rxqs)[rss->queue[i]]) 1185 return rte_flow_error_set 1186 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1187 &rss->queue[i], "queue is not configured"); 1188 } 1189 if (attr->egress) 1190 return rte_flow_error_set(error, ENOTSUP, 1191 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1192 "rss action not supported for " 1193 "egress"); 1194 if (rss->level > 1 && !tunnel) 1195 return rte_flow_error_set(error, EINVAL, 1196 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1197 "inner RSS is not supported for " 1198 "non-tunnel flows"); 1199 return 0; 1200 } 1201 1202 /* 1203 * Validate the count action. 1204 * 1205 * @param[in] dev 1206 * Pointer to the Ethernet device structure. 1207 * @param[in] attr 1208 * Attributes of flow that includes this action. 1209 * @param[out] error 1210 * Pointer to error structure. 1211 * 1212 * @return 1213 * 0 on success, a negative errno value otherwise and rte_errno is set. 1214 */ 1215 int 1216 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, 1217 const struct rte_flow_attr *attr, 1218 struct rte_flow_error *error) 1219 { 1220 if (attr->egress) 1221 return rte_flow_error_set(error, ENOTSUP, 1222 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1223 "count action not supported for " 1224 "egress"); 1225 return 0; 1226 } 1227 1228 /** 1229 * Verify the @p attributes will be correctly understood by the NIC and store 1230 * them in the @p flow if everything is correct. 1231 * 1232 * @param[in] dev 1233 * Pointer to the Ethernet device structure. 1234 * @param[in] attributes 1235 * Pointer to flow attributes 1236 * @param[out] error 1237 * Pointer to error structure. 1238 * 1239 * @return 1240 * 0 on success, a negative errno value otherwise and rte_errno is set. 1241 */ 1242 int 1243 mlx5_flow_validate_attributes(struct rte_eth_dev *dev, 1244 const struct rte_flow_attr *attributes, 1245 struct rte_flow_error *error) 1246 { 1247 struct mlx5_priv *priv = dev->data->dev_private; 1248 uint32_t priority_max = priv->config.flow_prio - 1; 1249 1250 if (attributes->group) 1251 return rte_flow_error_set(error, ENOTSUP, 1252 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 1253 NULL, "groups is not supported"); 1254 if (attributes->priority != MLX5_FLOW_PRIO_RSVD && 1255 attributes->priority >= priority_max) 1256 return rte_flow_error_set(error, ENOTSUP, 1257 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1258 NULL, "priority out of range"); 1259 if (attributes->egress) 1260 return rte_flow_error_set(error, ENOTSUP, 1261 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1262 "egress is not supported"); 1263 if (attributes->transfer && !priv->config.dv_esw_en) 1264 return rte_flow_error_set(error, ENOTSUP, 1265 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 1266 NULL, "transfer is not supported"); 1267 if (!attributes->ingress) 1268 return rte_flow_error_set(error, EINVAL, 1269 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 1270 NULL, 1271 "ingress attribute is mandatory"); 1272 return 0; 1273 } 1274 1275 /** 1276 * Validate ICMP6 item. 1277 * 1278 * @param[in] item 1279 * Item specification. 1280 * @param[in] item_flags 1281 * Bit-fields that holds the items detected until now. 1282 * @param[out] error 1283 * Pointer to error structure. 1284 * 1285 * @return 1286 * 0 on success, a negative errno value otherwise and rte_errno is set. 1287 */ 1288 int 1289 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, 1290 uint64_t item_flags, 1291 uint8_t target_protocol, 1292 struct rte_flow_error *error) 1293 { 1294 const struct rte_flow_item_icmp6 *mask = item->mask; 1295 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1296 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 1297 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 1298 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1299 MLX5_FLOW_LAYER_OUTER_L4; 1300 int ret; 1301 1302 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) 1303 return rte_flow_error_set(error, EINVAL, 1304 RTE_FLOW_ERROR_TYPE_ITEM, item, 1305 "protocol filtering not compatible" 1306 " with ICMP6 layer"); 1307 if (!(item_flags & l3m)) 1308 return rte_flow_error_set(error, EINVAL, 1309 RTE_FLOW_ERROR_TYPE_ITEM, item, 1310 "IPv6 is mandatory to filter on" 1311 " ICMP6"); 1312 if (item_flags & l4m) 1313 return rte_flow_error_set(error, EINVAL, 1314 RTE_FLOW_ERROR_TYPE_ITEM, item, 1315 "multiple L4 layers not supported"); 1316 if (!mask) 1317 mask = &rte_flow_item_icmp6_mask; 1318 ret = mlx5_flow_item_acceptable 1319 (item, (const uint8_t *)mask, 1320 (const uint8_t *)&rte_flow_item_icmp6_mask, 1321 sizeof(struct rte_flow_item_icmp6), error); 1322 if (ret < 0) 1323 return ret; 1324 return 0; 1325 } 1326 1327 /** 1328 * Validate ICMP item. 1329 * 1330 * @param[in] item 1331 * Item specification. 1332 * @param[in] item_flags 1333 * Bit-fields that holds the items detected until now. 1334 * @param[out] error 1335 * Pointer to error structure. 1336 * 1337 * @return 1338 * 0 on success, a negative errno value otherwise and rte_errno is set. 1339 */ 1340 int 1341 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, 1342 uint64_t item_flags, 1343 uint8_t target_protocol, 1344 struct rte_flow_error *error) 1345 { 1346 const struct rte_flow_item_icmp *mask = item->mask; 1347 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1348 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 1349 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 1350 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1351 MLX5_FLOW_LAYER_OUTER_L4; 1352 int ret; 1353 1354 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) 1355 return rte_flow_error_set(error, EINVAL, 1356 RTE_FLOW_ERROR_TYPE_ITEM, item, 1357 "protocol filtering not compatible" 1358 " with ICMP layer"); 1359 if (!(item_flags & l3m)) 1360 return rte_flow_error_set(error, EINVAL, 1361 RTE_FLOW_ERROR_TYPE_ITEM, item, 1362 "IPv4 is mandatory to filter" 1363 " on ICMP"); 1364 if (item_flags & l4m) 1365 return rte_flow_error_set(error, EINVAL, 1366 RTE_FLOW_ERROR_TYPE_ITEM, item, 1367 "multiple L4 layers not supported"); 1368 if (!mask) 1369 mask = &rte_flow_item_icmp_mask; 1370 ret = mlx5_flow_item_acceptable 1371 (item, (const uint8_t *)mask, 1372 (const uint8_t *)&rte_flow_item_icmp_mask, 1373 sizeof(struct rte_flow_item_icmp), error); 1374 if (ret < 0) 1375 return ret; 1376 return 0; 1377 } 1378 1379 /** 1380 * Validate Ethernet item. 1381 * 1382 * @param[in] item 1383 * Item specification. 1384 * @param[in] item_flags 1385 * Bit-fields that holds the items detected until now. 1386 * @param[out] error 1387 * Pointer to error structure. 1388 * 1389 * @return 1390 * 0 on success, a negative errno value otherwise and rte_errno is set. 1391 */ 1392 int 1393 mlx5_flow_validate_item_eth(const struct rte_flow_item *item, 1394 uint64_t item_flags, 1395 struct rte_flow_error *error) 1396 { 1397 const struct rte_flow_item_eth *mask = item->mask; 1398 const struct rte_flow_item_eth nic_mask = { 1399 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1400 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1401 .type = RTE_BE16(0xffff), 1402 }; 1403 int ret; 1404 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1405 const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 1406 MLX5_FLOW_LAYER_OUTER_L2; 1407 1408 if (item_flags & ethm) 1409 return rte_flow_error_set(error, ENOTSUP, 1410 RTE_FLOW_ERROR_TYPE_ITEM, item, 1411 "multiple L2 layers not supported"); 1412 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) || 1413 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))) 1414 return rte_flow_error_set(error, EINVAL, 1415 RTE_FLOW_ERROR_TYPE_ITEM, item, 1416 "L2 layer should not follow " 1417 "L3 layers"); 1418 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) || 1419 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN))) 1420 return rte_flow_error_set(error, EINVAL, 1421 RTE_FLOW_ERROR_TYPE_ITEM, item, 1422 "L2 layer should not follow VLAN"); 1423 if (!mask) 1424 mask = &rte_flow_item_eth_mask; 1425 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1426 (const uint8_t *)&nic_mask, 1427 sizeof(struct rte_flow_item_eth), 1428 error); 1429 return ret; 1430 } 1431 1432 /** 1433 * Validate VLAN item. 1434 * 1435 * @param[in] item 1436 * Item specification. 1437 * @param[in] item_flags 1438 * Bit-fields that holds the items detected until now. 1439 * @param[in] dev 1440 * Ethernet device flow is being created on. 1441 * @param[out] error 1442 * Pointer to error structure. 1443 * 1444 * @return 1445 * 0 on success, a negative errno value otherwise and rte_errno is set. 1446 */ 1447 int 1448 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 1449 uint64_t item_flags, 1450 struct rte_eth_dev *dev, 1451 struct rte_flow_error *error) 1452 { 1453 const struct rte_flow_item_vlan *spec = item->spec; 1454 const struct rte_flow_item_vlan *mask = item->mask; 1455 const struct rte_flow_item_vlan nic_mask = { 1456 .tci = RTE_BE16(UINT16_MAX), 1457 .inner_type = RTE_BE16(UINT16_MAX), 1458 }; 1459 uint16_t vlan_tag = 0; 1460 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1461 int ret; 1462 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 1463 MLX5_FLOW_LAYER_INNER_L4) : 1464 (MLX5_FLOW_LAYER_OUTER_L3 | 1465 MLX5_FLOW_LAYER_OUTER_L4); 1466 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 1467 MLX5_FLOW_LAYER_OUTER_VLAN; 1468 1469 if (item_flags & vlanm) 1470 return rte_flow_error_set(error, EINVAL, 1471 RTE_FLOW_ERROR_TYPE_ITEM, item, 1472 "multiple VLAN layers not supported"); 1473 else if ((item_flags & l34m) != 0) 1474 return rte_flow_error_set(error, EINVAL, 1475 RTE_FLOW_ERROR_TYPE_ITEM, item, 1476 "VLAN cannot follow L3/L4 layer"); 1477 if (!mask) 1478 mask = &rte_flow_item_vlan_mask; 1479 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1480 (const uint8_t *)&nic_mask, 1481 sizeof(struct rte_flow_item_vlan), 1482 error); 1483 if (ret) 1484 return ret; 1485 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { 1486 struct mlx5_priv *priv = dev->data->dev_private; 1487 1488 if (priv->vmwa_context) { 1489 /* 1490 * Non-NULL context means we have a virtual machine 1491 * and SR-IOV enabled, we have to create VLAN interface 1492 * to make hypervisor to setup E-Switch vport 1493 * context correctly. We avoid creating the multiple 1494 * VLAN interfaces, so we cannot support VLAN tag mask. 1495 */ 1496 return rte_flow_error_set(error, EINVAL, 1497 RTE_FLOW_ERROR_TYPE_ITEM, 1498 item, 1499 "VLAN tag mask is not" 1500 " supported in virtual" 1501 " environment"); 1502 } 1503 } 1504 if (spec) { 1505 vlan_tag = spec->tci; 1506 vlan_tag &= mask->tci; 1507 } 1508 /* 1509 * From verbs perspective an empty VLAN is equivalent 1510 * to a packet without VLAN layer. 1511 */ 1512 if (!vlan_tag) 1513 return rte_flow_error_set(error, EINVAL, 1514 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1515 item->spec, 1516 "VLAN cannot be empty"); 1517 return 0; 1518 } 1519 1520 /** 1521 * Validate IPV4 item. 1522 * 1523 * @param[in] item 1524 * Item specification. 1525 * @param[in] item_flags 1526 * Bit-fields that holds the items detected until now. 1527 * @param[in] acc_mask 1528 * Acceptable mask, if NULL default internal default mask 1529 * will be used to check whether item fields are supported. 1530 * @param[out] error 1531 * Pointer to error structure. 1532 * 1533 * @return 1534 * 0 on success, a negative errno value otherwise and rte_errno is set. 1535 */ 1536 int 1537 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, 1538 uint64_t item_flags, 1539 uint64_t last_item, 1540 uint16_t ether_type, 1541 const struct rte_flow_item_ipv4 *acc_mask, 1542 struct rte_flow_error *error) 1543 { 1544 const struct rte_flow_item_ipv4 *mask = item->mask; 1545 const struct rte_flow_item_ipv4 *spec = item->spec; 1546 const struct rte_flow_item_ipv4 nic_mask = { 1547 .hdr = { 1548 .src_addr = RTE_BE32(0xffffffff), 1549 .dst_addr = RTE_BE32(0xffffffff), 1550 .type_of_service = 0xff, 1551 .next_proto_id = 0xff, 1552 }, 1553 }; 1554 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1555 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1556 MLX5_FLOW_LAYER_OUTER_L3; 1557 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1558 MLX5_FLOW_LAYER_OUTER_L4; 1559 int ret; 1560 uint8_t next_proto = 0xFF; 1561 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 1562 MLX5_FLOW_LAYER_OUTER_VLAN | 1563 MLX5_FLOW_LAYER_INNER_VLAN); 1564 1565 if ((last_item & l2_vlan) && ether_type && 1566 ether_type != RTE_ETHER_TYPE_IPV4) 1567 return rte_flow_error_set(error, EINVAL, 1568 RTE_FLOW_ERROR_TYPE_ITEM, item, 1569 "IPv4 cannot follow L2/VLAN layer " 1570 "which ether type is not IPv4"); 1571 if (item_flags & MLX5_FLOW_LAYER_IPIP) { 1572 if (mask && spec) 1573 next_proto = mask->hdr.next_proto_id & 1574 spec->hdr.next_proto_id; 1575 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1576 return rte_flow_error_set(error, EINVAL, 1577 RTE_FLOW_ERROR_TYPE_ITEM, 1578 item, 1579 "multiple tunnel " 1580 "not supported"); 1581 } 1582 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) 1583 return rte_flow_error_set(error, EINVAL, 1584 RTE_FLOW_ERROR_TYPE_ITEM, item, 1585 "wrong tunnel type - IPv6 specified " 1586 "but IPv4 item provided"); 1587 if (item_flags & l3m) 1588 return rte_flow_error_set(error, ENOTSUP, 1589 RTE_FLOW_ERROR_TYPE_ITEM, item, 1590 "multiple L3 layers not supported"); 1591 else if (item_flags & l4m) 1592 return rte_flow_error_set(error, EINVAL, 1593 RTE_FLOW_ERROR_TYPE_ITEM, item, 1594 "L3 cannot follow an L4 layer."); 1595 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1596 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1597 return rte_flow_error_set(error, EINVAL, 1598 RTE_FLOW_ERROR_TYPE_ITEM, item, 1599 "L3 cannot follow an NVGRE layer."); 1600 if (!mask) 1601 mask = &rte_flow_item_ipv4_mask; 1602 else if (mask->hdr.next_proto_id != 0 && 1603 mask->hdr.next_proto_id != 0xff) 1604 return rte_flow_error_set(error, EINVAL, 1605 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 1606 "partial mask is not supported" 1607 " for protocol"); 1608 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1609 acc_mask ? (const uint8_t *)acc_mask 1610 : (const uint8_t *)&nic_mask, 1611 sizeof(struct rte_flow_item_ipv4), 1612 error); 1613 if (ret < 0) 1614 return ret; 1615 return 0; 1616 } 1617 1618 /** 1619 * Validate IPV6 item. 1620 * 1621 * @param[in] item 1622 * Item specification. 1623 * @param[in] item_flags 1624 * Bit-fields that holds the items detected until now. 1625 * @param[in] acc_mask 1626 * Acceptable mask, if NULL default internal default mask 1627 * will be used to check whether item fields are supported. 1628 * @param[out] error 1629 * Pointer to error structure. 1630 * 1631 * @return 1632 * 0 on success, a negative errno value otherwise and rte_errno is set. 1633 */ 1634 int 1635 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, 1636 uint64_t item_flags, 1637 uint64_t last_item, 1638 uint16_t ether_type, 1639 const struct rte_flow_item_ipv6 *acc_mask, 1640 struct rte_flow_error *error) 1641 { 1642 const struct rte_flow_item_ipv6 *mask = item->mask; 1643 const struct rte_flow_item_ipv6 *spec = item->spec; 1644 const struct rte_flow_item_ipv6 nic_mask = { 1645 .hdr = { 1646 .src_addr = 1647 "\xff\xff\xff\xff\xff\xff\xff\xff" 1648 "\xff\xff\xff\xff\xff\xff\xff\xff", 1649 .dst_addr = 1650 "\xff\xff\xff\xff\xff\xff\xff\xff" 1651 "\xff\xff\xff\xff\xff\xff\xff\xff", 1652 .vtc_flow = RTE_BE32(0xffffffff), 1653 .proto = 0xff, 1654 }, 1655 }; 1656 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1657 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1658 MLX5_FLOW_LAYER_OUTER_L3; 1659 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1660 MLX5_FLOW_LAYER_OUTER_L4; 1661 int ret; 1662 uint8_t next_proto = 0xFF; 1663 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 1664 MLX5_FLOW_LAYER_OUTER_VLAN | 1665 MLX5_FLOW_LAYER_INNER_VLAN); 1666 1667 if ((last_item & l2_vlan) && ether_type && 1668 ether_type != RTE_ETHER_TYPE_IPV6) 1669 return rte_flow_error_set(error, EINVAL, 1670 RTE_FLOW_ERROR_TYPE_ITEM, item, 1671 "IPv6 cannot follow L2/VLAN layer " 1672 "which ether type is not IPv6"); 1673 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { 1674 if (mask && spec) 1675 next_proto = mask->hdr.proto & spec->hdr.proto; 1676 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1677 return rte_flow_error_set(error, EINVAL, 1678 RTE_FLOW_ERROR_TYPE_ITEM, 1679 item, 1680 "multiple tunnel " 1681 "not supported"); 1682 } 1683 if (item_flags & MLX5_FLOW_LAYER_IPIP) 1684 return rte_flow_error_set(error, EINVAL, 1685 RTE_FLOW_ERROR_TYPE_ITEM, item, 1686 "wrong tunnel type - IPv4 specified " 1687 "but IPv6 item provided"); 1688 if (item_flags & l3m) 1689 return rte_flow_error_set(error, ENOTSUP, 1690 RTE_FLOW_ERROR_TYPE_ITEM, item, 1691 "multiple L3 layers not supported"); 1692 else if (item_flags & l4m) 1693 return rte_flow_error_set(error, EINVAL, 1694 RTE_FLOW_ERROR_TYPE_ITEM, item, 1695 "L3 cannot follow an L4 layer."); 1696 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1697 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1698 return rte_flow_error_set(error, EINVAL, 1699 RTE_FLOW_ERROR_TYPE_ITEM, item, 1700 "L3 cannot follow an NVGRE layer."); 1701 if (!mask) 1702 mask = &rte_flow_item_ipv6_mask; 1703 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1704 acc_mask ? (const uint8_t *)acc_mask 1705 : (const uint8_t *)&nic_mask, 1706 sizeof(struct rte_flow_item_ipv6), 1707 error); 1708 if (ret < 0) 1709 return ret; 1710 return 0; 1711 } 1712 1713 /** 1714 * Validate UDP item. 1715 * 1716 * @param[in] item 1717 * Item specification. 1718 * @param[in] item_flags 1719 * Bit-fields that holds the items detected until now. 1720 * @param[in] target_protocol 1721 * The next protocol in the previous item. 1722 * @param[in] flow_mask 1723 * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask. 1724 * @param[out] error 1725 * Pointer to error structure. 1726 * 1727 * @return 1728 * 0 on success, a negative errno value otherwise and rte_errno is set. 1729 */ 1730 int 1731 mlx5_flow_validate_item_udp(const struct rte_flow_item *item, 1732 uint64_t item_flags, 1733 uint8_t target_protocol, 1734 struct rte_flow_error *error) 1735 { 1736 const struct rte_flow_item_udp *mask = item->mask; 1737 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1738 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1739 MLX5_FLOW_LAYER_OUTER_L3; 1740 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1741 MLX5_FLOW_LAYER_OUTER_L4; 1742 int ret; 1743 1744 if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) 1745 return rte_flow_error_set(error, EINVAL, 1746 RTE_FLOW_ERROR_TYPE_ITEM, item, 1747 "protocol filtering not compatible" 1748 " with UDP layer"); 1749 if (!(item_flags & l3m)) 1750 return rte_flow_error_set(error, EINVAL, 1751 RTE_FLOW_ERROR_TYPE_ITEM, item, 1752 "L3 is mandatory to filter on L4"); 1753 if (item_flags & l4m) 1754 return rte_flow_error_set(error, EINVAL, 1755 RTE_FLOW_ERROR_TYPE_ITEM, item, 1756 "multiple L4 layers not supported"); 1757 if (!mask) 1758 mask = &rte_flow_item_udp_mask; 1759 ret = mlx5_flow_item_acceptable 1760 (item, (const uint8_t *)mask, 1761 (const uint8_t *)&rte_flow_item_udp_mask, 1762 sizeof(struct rte_flow_item_udp), error); 1763 if (ret < 0) 1764 return ret; 1765 return 0; 1766 } 1767 1768 /** 1769 * Validate TCP item. 1770 * 1771 * @param[in] item 1772 * Item specification. 1773 * @param[in] item_flags 1774 * Bit-fields that holds the items detected until now. 1775 * @param[in] target_protocol 1776 * The next protocol in the previous item. 1777 * @param[out] error 1778 * Pointer to error structure. 1779 * 1780 * @return 1781 * 0 on success, a negative errno value otherwise and rte_errno is set. 1782 */ 1783 int 1784 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, 1785 uint64_t item_flags, 1786 uint8_t target_protocol, 1787 const struct rte_flow_item_tcp *flow_mask, 1788 struct rte_flow_error *error) 1789 { 1790 const struct rte_flow_item_tcp *mask = item->mask; 1791 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1792 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1793 MLX5_FLOW_LAYER_OUTER_L3; 1794 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1795 MLX5_FLOW_LAYER_OUTER_L4; 1796 int ret; 1797 1798 MLX5_ASSERT(flow_mask); 1799 if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) 1800 return rte_flow_error_set(error, EINVAL, 1801 RTE_FLOW_ERROR_TYPE_ITEM, item, 1802 "protocol filtering not compatible" 1803 " with TCP layer"); 1804 if (!(item_flags & l3m)) 1805 return rte_flow_error_set(error, EINVAL, 1806 RTE_FLOW_ERROR_TYPE_ITEM, item, 1807 "L3 is mandatory to filter on L4"); 1808 if (item_flags & l4m) 1809 return rte_flow_error_set(error, EINVAL, 1810 RTE_FLOW_ERROR_TYPE_ITEM, item, 1811 "multiple L4 layers not supported"); 1812 if (!mask) 1813 mask = &rte_flow_item_tcp_mask; 1814 ret = mlx5_flow_item_acceptable 1815 (item, (const uint8_t *)mask, 1816 (const uint8_t *)flow_mask, 1817 sizeof(struct rte_flow_item_tcp), error); 1818 if (ret < 0) 1819 return ret; 1820 return 0; 1821 } 1822 1823 /** 1824 * Validate VXLAN item. 1825 * 1826 * @param[in] item 1827 * Item specification. 1828 * @param[in] item_flags 1829 * Bit-fields that holds the items detected until now. 1830 * @param[in] target_protocol 1831 * The next protocol in the previous item. 1832 * @param[out] error 1833 * Pointer to error structure. 1834 * 1835 * @return 1836 * 0 on success, a negative errno value otherwise and rte_errno is set. 1837 */ 1838 int 1839 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, 1840 uint64_t item_flags, 1841 struct rte_flow_error *error) 1842 { 1843 const struct rte_flow_item_vxlan *spec = item->spec; 1844 const struct rte_flow_item_vxlan *mask = item->mask; 1845 int ret; 1846 union vni { 1847 uint32_t vlan_id; 1848 uint8_t vni[4]; 1849 } id = { .vlan_id = 0, }; 1850 1851 1852 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1853 return rte_flow_error_set(error, ENOTSUP, 1854 RTE_FLOW_ERROR_TYPE_ITEM, item, 1855 "multiple tunnel layers not" 1856 " supported"); 1857 /* 1858 * Verify only UDPv4 is present as defined in 1859 * https://tools.ietf.org/html/rfc7348 1860 */ 1861 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1862 return rte_flow_error_set(error, EINVAL, 1863 RTE_FLOW_ERROR_TYPE_ITEM, item, 1864 "no outer UDP layer found"); 1865 if (!mask) 1866 mask = &rte_flow_item_vxlan_mask; 1867 ret = mlx5_flow_item_acceptable 1868 (item, (const uint8_t *)mask, 1869 (const uint8_t *)&rte_flow_item_vxlan_mask, 1870 sizeof(struct rte_flow_item_vxlan), 1871 error); 1872 if (ret < 0) 1873 return ret; 1874 if (spec) { 1875 memcpy(&id.vni[1], spec->vni, 3); 1876 memcpy(&id.vni[1], mask->vni, 3); 1877 } 1878 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1879 return rte_flow_error_set(error, ENOTSUP, 1880 RTE_FLOW_ERROR_TYPE_ITEM, item, 1881 "VXLAN tunnel must be fully defined"); 1882 return 0; 1883 } 1884 1885 /** 1886 * Validate VXLAN_GPE item. 1887 * 1888 * @param[in] item 1889 * Item specification. 1890 * @param[in] item_flags 1891 * Bit-fields that holds the items detected until now. 1892 * @param[in] priv 1893 * Pointer to the private data structure. 1894 * @param[in] target_protocol 1895 * The next protocol in the previous item. 1896 * @param[out] error 1897 * Pointer to error structure. 1898 * 1899 * @return 1900 * 0 on success, a negative errno value otherwise and rte_errno is set. 1901 */ 1902 int 1903 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 1904 uint64_t item_flags, 1905 struct rte_eth_dev *dev, 1906 struct rte_flow_error *error) 1907 { 1908 struct mlx5_priv *priv = dev->data->dev_private; 1909 const struct rte_flow_item_vxlan_gpe *spec = item->spec; 1910 const struct rte_flow_item_vxlan_gpe *mask = item->mask; 1911 int ret; 1912 union vni { 1913 uint32_t vlan_id; 1914 uint8_t vni[4]; 1915 } id = { .vlan_id = 0, }; 1916 1917 if (!priv->config.l3_vxlan_en) 1918 return rte_flow_error_set(error, ENOTSUP, 1919 RTE_FLOW_ERROR_TYPE_ITEM, item, 1920 "L3 VXLAN is not enabled by device" 1921 " parameter and/or not configured in" 1922 " firmware"); 1923 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1924 return rte_flow_error_set(error, ENOTSUP, 1925 RTE_FLOW_ERROR_TYPE_ITEM, item, 1926 "multiple tunnel layers not" 1927 " supported"); 1928 /* 1929 * Verify only UDPv4 is present as defined in 1930 * https://tools.ietf.org/html/rfc7348 1931 */ 1932 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1933 return rte_flow_error_set(error, EINVAL, 1934 RTE_FLOW_ERROR_TYPE_ITEM, item, 1935 "no outer UDP layer found"); 1936 if (!mask) 1937 mask = &rte_flow_item_vxlan_gpe_mask; 1938 ret = mlx5_flow_item_acceptable 1939 (item, (const uint8_t *)mask, 1940 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, 1941 sizeof(struct rte_flow_item_vxlan_gpe), 1942 error); 1943 if (ret < 0) 1944 return ret; 1945 if (spec) { 1946 if (spec->protocol) 1947 return rte_flow_error_set(error, ENOTSUP, 1948 RTE_FLOW_ERROR_TYPE_ITEM, 1949 item, 1950 "VxLAN-GPE protocol" 1951 " not supported"); 1952 memcpy(&id.vni[1], spec->vni, 3); 1953 memcpy(&id.vni[1], mask->vni, 3); 1954 } 1955 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1956 return rte_flow_error_set(error, ENOTSUP, 1957 RTE_FLOW_ERROR_TYPE_ITEM, item, 1958 "VXLAN-GPE tunnel must be fully" 1959 " defined"); 1960 return 0; 1961 } 1962 /** 1963 * Validate GRE Key item. 1964 * 1965 * @param[in] item 1966 * Item specification. 1967 * @param[in] item_flags 1968 * Bit flags to mark detected items. 1969 * @param[in] gre_item 1970 * Pointer to gre_item 1971 * @param[out] error 1972 * Pointer to error structure. 1973 * 1974 * @return 1975 * 0 on success, a negative errno value otherwise and rte_errno is set. 1976 */ 1977 int 1978 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, 1979 uint64_t item_flags, 1980 const struct rte_flow_item *gre_item, 1981 struct rte_flow_error *error) 1982 { 1983 const rte_be32_t *mask = item->mask; 1984 int ret = 0; 1985 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); 1986 const struct rte_flow_item_gre *gre_spec; 1987 const struct rte_flow_item_gre *gre_mask; 1988 1989 if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) 1990 return rte_flow_error_set(error, ENOTSUP, 1991 RTE_FLOW_ERROR_TYPE_ITEM, item, 1992 "Multiple GRE key not support"); 1993 if (!(item_flags & MLX5_FLOW_LAYER_GRE)) 1994 return rte_flow_error_set(error, ENOTSUP, 1995 RTE_FLOW_ERROR_TYPE_ITEM, item, 1996 "No preceding GRE header"); 1997 if (item_flags & MLX5_FLOW_LAYER_INNER) 1998 return rte_flow_error_set(error, ENOTSUP, 1999 RTE_FLOW_ERROR_TYPE_ITEM, item, 2000 "GRE key following a wrong item"); 2001 gre_mask = gre_item->mask; 2002 if (!gre_mask) 2003 gre_mask = &rte_flow_item_gre_mask; 2004 gre_spec = gre_item->spec; 2005 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && 2006 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) 2007 return rte_flow_error_set(error, EINVAL, 2008 RTE_FLOW_ERROR_TYPE_ITEM, item, 2009 "Key bit must be on"); 2010 2011 if (!mask) 2012 mask = &gre_key_default_mask; 2013 ret = mlx5_flow_item_acceptable 2014 (item, (const uint8_t *)mask, 2015 (const uint8_t *)&gre_key_default_mask, 2016 sizeof(rte_be32_t), error); 2017 return ret; 2018 } 2019 2020 /** 2021 * Validate GRE item. 2022 * 2023 * @param[in] item 2024 * Item specification. 2025 * @param[in] item_flags 2026 * Bit flags to mark detected items. 2027 * @param[in] target_protocol 2028 * The next protocol in the previous item. 2029 * @param[out] error 2030 * Pointer to error structure. 2031 * 2032 * @return 2033 * 0 on success, a negative errno value otherwise and rte_errno is set. 2034 */ 2035 int 2036 mlx5_flow_validate_item_gre(const struct rte_flow_item *item, 2037 uint64_t item_flags, 2038 uint8_t target_protocol, 2039 struct rte_flow_error *error) 2040 { 2041 const struct rte_flow_item_gre *spec __rte_unused = item->spec; 2042 const struct rte_flow_item_gre *mask = item->mask; 2043 int ret; 2044 const struct rte_flow_item_gre nic_mask = { 2045 .c_rsvd0_ver = RTE_BE16(0xB000), 2046 .protocol = RTE_BE16(UINT16_MAX), 2047 }; 2048 2049 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 2050 return rte_flow_error_set(error, EINVAL, 2051 RTE_FLOW_ERROR_TYPE_ITEM, item, 2052 "protocol filtering not compatible" 2053 " with this GRE layer"); 2054 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2055 return rte_flow_error_set(error, ENOTSUP, 2056 RTE_FLOW_ERROR_TYPE_ITEM, item, 2057 "multiple tunnel layers not" 2058 " supported"); 2059 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 2060 return rte_flow_error_set(error, ENOTSUP, 2061 RTE_FLOW_ERROR_TYPE_ITEM, item, 2062 "L3 Layer is missing"); 2063 if (!mask) 2064 mask = &rte_flow_item_gre_mask; 2065 ret = mlx5_flow_item_acceptable 2066 (item, (const uint8_t *)mask, 2067 (const uint8_t *)&nic_mask, 2068 sizeof(struct rte_flow_item_gre), error); 2069 if (ret < 0) 2070 return ret; 2071 #ifndef HAVE_MLX5DV_DR 2072 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT 2073 if (spec && (spec->protocol & mask->protocol)) 2074 return rte_flow_error_set(error, ENOTSUP, 2075 RTE_FLOW_ERROR_TYPE_ITEM, item, 2076 "without MPLS support the" 2077 " specification cannot be used for" 2078 " filtering"); 2079 #endif 2080 #endif 2081 return 0; 2082 } 2083 2084 /** 2085 * Validate Geneve item. 2086 * 2087 * @param[in] item 2088 * Item specification. 2089 * @param[in] itemFlags 2090 * Bit-fields that holds the items detected until now. 2091 * @param[in] enPriv 2092 * Pointer to the private data structure. 2093 * @param[out] error 2094 * Pointer to error structure. 2095 * 2096 * @return 2097 * 0 on success, a negative errno value otherwise and rte_errno is set. 2098 */ 2099 2100 int 2101 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, 2102 uint64_t item_flags, 2103 struct rte_eth_dev *dev, 2104 struct rte_flow_error *error) 2105 { 2106 struct mlx5_priv *priv = dev->data->dev_private; 2107 const struct rte_flow_item_geneve *spec = item->spec; 2108 const struct rte_flow_item_geneve *mask = item->mask; 2109 int ret; 2110 uint16_t gbhdr; 2111 uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ? 2112 MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; 2113 const struct rte_flow_item_geneve nic_mask = { 2114 .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), 2115 .vni = "\xff\xff\xff", 2116 .protocol = RTE_BE16(UINT16_MAX), 2117 }; 2118 2119 if (!priv->config.hca_attr.tunnel_stateless_geneve_rx) 2120 return rte_flow_error_set(error, ENOTSUP, 2121 RTE_FLOW_ERROR_TYPE_ITEM, item, 2122 "L3 Geneve is not enabled by device" 2123 " parameter and/or not configured in" 2124 " firmware"); 2125 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2126 return rte_flow_error_set(error, ENOTSUP, 2127 RTE_FLOW_ERROR_TYPE_ITEM, item, 2128 "multiple tunnel layers not" 2129 " supported"); 2130 /* 2131 * Verify only UDPv4 is present as defined in 2132 * https://tools.ietf.org/html/rfc7348 2133 */ 2134 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 2135 return rte_flow_error_set(error, EINVAL, 2136 RTE_FLOW_ERROR_TYPE_ITEM, item, 2137 "no outer UDP layer found"); 2138 if (!mask) 2139 mask = &rte_flow_item_geneve_mask; 2140 ret = mlx5_flow_item_acceptable 2141 (item, (const uint8_t *)mask, 2142 (const uint8_t *)&nic_mask, 2143 sizeof(struct rte_flow_item_geneve), error); 2144 if (ret) 2145 return ret; 2146 if (spec) { 2147 gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0); 2148 if (MLX5_GENEVE_VER_VAL(gbhdr) || 2149 MLX5_GENEVE_CRITO_VAL(gbhdr) || 2150 MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1) 2151 return rte_flow_error_set(error, ENOTSUP, 2152 RTE_FLOW_ERROR_TYPE_ITEM, 2153 item, 2154 "Geneve protocol unsupported" 2155 " fields are being used"); 2156 if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len) 2157 return rte_flow_error_set 2158 (error, ENOTSUP, 2159 RTE_FLOW_ERROR_TYPE_ITEM, 2160 item, 2161 "Unsupported Geneve options length"); 2162 } 2163 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 2164 return rte_flow_error_set 2165 (error, ENOTSUP, 2166 RTE_FLOW_ERROR_TYPE_ITEM, item, 2167 "Geneve tunnel must be fully defined"); 2168 return 0; 2169 } 2170 2171 /** 2172 * Validate MPLS item. 2173 * 2174 * @param[in] dev 2175 * Pointer to the rte_eth_dev structure. 2176 * @param[in] item 2177 * Item specification. 2178 * @param[in] item_flags 2179 * Bit-fields that holds the items detected until now. 2180 * @param[in] prev_layer 2181 * The protocol layer indicated in previous item. 2182 * @param[out] error 2183 * Pointer to error structure. 2184 * 2185 * @return 2186 * 0 on success, a negative errno value otherwise and rte_errno is set. 2187 */ 2188 int 2189 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, 2190 const struct rte_flow_item *item __rte_unused, 2191 uint64_t item_flags __rte_unused, 2192 uint64_t prev_layer __rte_unused, 2193 struct rte_flow_error *error) 2194 { 2195 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 2196 const struct rte_flow_item_mpls *mask = item->mask; 2197 struct mlx5_priv *priv = dev->data->dev_private; 2198 int ret; 2199 2200 if (!priv->config.mpls_en) 2201 return rte_flow_error_set(error, ENOTSUP, 2202 RTE_FLOW_ERROR_TYPE_ITEM, item, 2203 "MPLS not supported or" 2204 " disabled in firmware" 2205 " configuration."); 2206 /* MPLS over IP, UDP, GRE is allowed */ 2207 if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | 2208 MLX5_FLOW_LAYER_OUTER_L4_UDP | 2209 MLX5_FLOW_LAYER_GRE))) 2210 return rte_flow_error_set(error, EINVAL, 2211 RTE_FLOW_ERROR_TYPE_ITEM, item, 2212 "protocol filtering not compatible" 2213 " with MPLS layer"); 2214 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ 2215 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && 2216 !(item_flags & MLX5_FLOW_LAYER_GRE)) 2217 return rte_flow_error_set(error, ENOTSUP, 2218 RTE_FLOW_ERROR_TYPE_ITEM, item, 2219 "multiple tunnel layers not" 2220 " supported"); 2221 if (!mask) 2222 mask = &rte_flow_item_mpls_mask; 2223 ret = mlx5_flow_item_acceptable 2224 (item, (const uint8_t *)mask, 2225 (const uint8_t *)&rte_flow_item_mpls_mask, 2226 sizeof(struct rte_flow_item_mpls), error); 2227 if (ret < 0) 2228 return ret; 2229 return 0; 2230 #endif 2231 return rte_flow_error_set(error, ENOTSUP, 2232 RTE_FLOW_ERROR_TYPE_ITEM, item, 2233 "MPLS is not supported by Verbs, please" 2234 " update."); 2235 } 2236 2237 /** 2238 * Validate NVGRE item. 2239 * 2240 * @param[in] item 2241 * Item specification. 2242 * @param[in] item_flags 2243 * Bit flags to mark detected items. 2244 * @param[in] target_protocol 2245 * The next protocol in the previous item. 2246 * @param[out] error 2247 * Pointer to error structure. 2248 * 2249 * @return 2250 * 0 on success, a negative errno value otherwise and rte_errno is set. 2251 */ 2252 int 2253 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, 2254 uint64_t item_flags, 2255 uint8_t target_protocol, 2256 struct rte_flow_error *error) 2257 { 2258 const struct rte_flow_item_nvgre *mask = item->mask; 2259 int ret; 2260 2261 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 2262 return rte_flow_error_set(error, EINVAL, 2263 RTE_FLOW_ERROR_TYPE_ITEM, item, 2264 "protocol filtering not compatible" 2265 " with this GRE layer"); 2266 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2267 return rte_flow_error_set(error, ENOTSUP, 2268 RTE_FLOW_ERROR_TYPE_ITEM, item, 2269 "multiple tunnel layers not" 2270 " supported"); 2271 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 2272 return rte_flow_error_set(error, ENOTSUP, 2273 RTE_FLOW_ERROR_TYPE_ITEM, item, 2274 "L3 Layer is missing"); 2275 if (!mask) 2276 mask = &rte_flow_item_nvgre_mask; 2277 ret = mlx5_flow_item_acceptable 2278 (item, (const uint8_t *)mask, 2279 (const uint8_t *)&rte_flow_item_nvgre_mask, 2280 sizeof(struct rte_flow_item_nvgre), error); 2281 if (ret < 0) 2282 return ret; 2283 return 0; 2284 } 2285 2286 /* Allocate unique ID for the split Q/RSS subflows. */ 2287 static uint32_t 2288 flow_qrss_get_id(struct rte_eth_dev *dev) 2289 { 2290 struct mlx5_priv *priv = dev->data->dev_private; 2291 uint32_t qrss_id, ret; 2292 2293 ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id); 2294 if (ret) 2295 return 0; 2296 MLX5_ASSERT(qrss_id); 2297 return qrss_id; 2298 } 2299 2300 /* Free unique ID for the split Q/RSS subflows. */ 2301 static void 2302 flow_qrss_free_id(struct rte_eth_dev *dev, uint32_t qrss_id) 2303 { 2304 struct mlx5_priv *priv = dev->data->dev_private; 2305 2306 if (qrss_id) 2307 mlx5_flow_id_release(priv->qrss_id_pool, qrss_id); 2308 } 2309 2310 /** 2311 * Release resource related QUEUE/RSS action split. 2312 * 2313 * @param dev 2314 * Pointer to Ethernet device. 2315 * @param flow 2316 * Flow to release id's from. 2317 */ 2318 static void 2319 flow_mreg_split_qrss_release(struct rte_eth_dev *dev, 2320 struct rte_flow *flow) 2321 { 2322 struct mlx5_priv *priv = dev->data->dev_private; 2323 uint32_t handle_idx; 2324 struct mlx5_flow_handle *dev_handle; 2325 2326 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 2327 handle_idx, dev_handle, next) 2328 if (dev_handle->qrss_id) 2329 flow_qrss_free_id(dev, dev_handle->qrss_id); 2330 } 2331 2332 static int 2333 flow_null_validate(struct rte_eth_dev *dev __rte_unused, 2334 const struct rte_flow_attr *attr __rte_unused, 2335 const struct rte_flow_item items[] __rte_unused, 2336 const struct rte_flow_action actions[] __rte_unused, 2337 bool external __rte_unused, 2338 struct rte_flow_error *error) 2339 { 2340 return rte_flow_error_set(error, ENOTSUP, 2341 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2342 } 2343 2344 static struct mlx5_flow * 2345 flow_null_prepare(struct rte_eth_dev *dev __rte_unused, 2346 const struct rte_flow_attr *attr __rte_unused, 2347 const struct rte_flow_item items[] __rte_unused, 2348 const struct rte_flow_action actions[] __rte_unused, 2349 struct rte_flow_error *error) 2350 { 2351 rte_flow_error_set(error, ENOTSUP, 2352 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2353 return NULL; 2354 } 2355 2356 static int 2357 flow_null_translate(struct rte_eth_dev *dev __rte_unused, 2358 struct mlx5_flow *dev_flow __rte_unused, 2359 const struct rte_flow_attr *attr __rte_unused, 2360 const struct rte_flow_item items[] __rte_unused, 2361 const struct rte_flow_action actions[] __rte_unused, 2362 struct rte_flow_error *error) 2363 { 2364 return rte_flow_error_set(error, ENOTSUP, 2365 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2366 } 2367 2368 static int 2369 flow_null_apply(struct rte_eth_dev *dev __rte_unused, 2370 struct rte_flow *flow __rte_unused, 2371 struct rte_flow_error *error) 2372 { 2373 return rte_flow_error_set(error, ENOTSUP, 2374 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2375 } 2376 2377 static void 2378 flow_null_remove(struct rte_eth_dev *dev __rte_unused, 2379 struct rte_flow *flow __rte_unused) 2380 { 2381 } 2382 2383 static void 2384 flow_null_destroy(struct rte_eth_dev *dev __rte_unused, 2385 struct rte_flow *flow __rte_unused) 2386 { 2387 } 2388 2389 static int 2390 flow_null_query(struct rte_eth_dev *dev __rte_unused, 2391 struct rte_flow *flow __rte_unused, 2392 const struct rte_flow_action *actions __rte_unused, 2393 void *data __rte_unused, 2394 struct rte_flow_error *error) 2395 { 2396 return rte_flow_error_set(error, ENOTSUP, 2397 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2398 } 2399 2400 /* Void driver to protect from null pointer reference. */ 2401 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { 2402 .validate = flow_null_validate, 2403 .prepare = flow_null_prepare, 2404 .translate = flow_null_translate, 2405 .apply = flow_null_apply, 2406 .remove = flow_null_remove, 2407 .destroy = flow_null_destroy, 2408 .query = flow_null_query, 2409 }; 2410 2411 /** 2412 * Select flow driver type according to flow attributes and device 2413 * configuration. 2414 * 2415 * @param[in] dev 2416 * Pointer to the dev structure. 2417 * @param[in] attr 2418 * Pointer to the flow attributes. 2419 * 2420 * @return 2421 * flow driver type, MLX5_FLOW_TYPE_MAX otherwise. 2422 */ 2423 static enum mlx5_flow_drv_type 2424 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) 2425 { 2426 struct mlx5_priv *priv = dev->data->dev_private; 2427 enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; 2428 2429 if (attr->transfer && priv->config.dv_esw_en) 2430 type = MLX5_FLOW_TYPE_DV; 2431 if (!attr->transfer) 2432 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : 2433 MLX5_FLOW_TYPE_VERBS; 2434 return type; 2435 } 2436 2437 #define flow_get_drv_ops(type) flow_drv_ops[type] 2438 2439 /** 2440 * Flow driver validation API. This abstracts calling driver specific functions. 2441 * The type of flow driver is determined according to flow attributes. 2442 * 2443 * @param[in] dev 2444 * Pointer to the dev structure. 2445 * @param[in] attr 2446 * Pointer to the flow attributes. 2447 * @param[in] items 2448 * Pointer to the list of items. 2449 * @param[in] actions 2450 * Pointer to the list of actions. 2451 * @param[in] external 2452 * This flow rule is created by request external to PMD. 2453 * @param[out] error 2454 * Pointer to the error structure. 2455 * 2456 * @return 2457 * 0 on success, a negative errno value otherwise and rte_errno is set. 2458 */ 2459 static inline int 2460 flow_drv_validate(struct rte_eth_dev *dev, 2461 const struct rte_flow_attr *attr, 2462 const struct rte_flow_item items[], 2463 const struct rte_flow_action actions[], 2464 bool external, struct rte_flow_error *error) 2465 { 2466 const struct mlx5_flow_driver_ops *fops; 2467 enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); 2468 2469 fops = flow_get_drv_ops(type); 2470 return fops->validate(dev, attr, items, actions, external, error); 2471 } 2472 2473 /** 2474 * Flow driver preparation API. This abstracts calling driver specific 2475 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2476 * calculates the size of memory required for device flow, allocates the memory, 2477 * initializes the device flow and returns the pointer. 2478 * 2479 * @note 2480 * This function initializes device flow structure such as dv or verbs in 2481 * struct mlx5_flow. However, it is caller's responsibility to initialize the 2482 * rest. For example, adding returning device flow to flow->dev_flow list and 2483 * setting backward reference to the flow should be done out of this function. 2484 * layers field is not filled either. 2485 * 2486 * @param[in] dev 2487 * Pointer to the dev structure. 2488 * @param[in] attr 2489 * Pointer to the flow attributes. 2490 * @param[in] items 2491 * Pointer to the list of items. 2492 * @param[in] actions 2493 * Pointer to the list of actions. 2494 * @param[out] error 2495 * Pointer to the error structure. 2496 * 2497 * @return 2498 * Pointer to device flow on success, otherwise NULL and rte_errno is set. 2499 */ 2500 static inline struct mlx5_flow * 2501 flow_drv_prepare(struct rte_eth_dev *dev, 2502 const struct rte_flow *flow, 2503 const struct rte_flow_attr *attr, 2504 const struct rte_flow_item items[], 2505 const struct rte_flow_action actions[], 2506 struct rte_flow_error *error) 2507 { 2508 const struct mlx5_flow_driver_ops *fops; 2509 enum mlx5_flow_drv_type type = flow->drv_type; 2510 2511 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2512 fops = flow_get_drv_ops(type); 2513 return fops->prepare(dev, attr, items, actions, error); 2514 } 2515 2516 /** 2517 * Flow driver translation API. This abstracts calling driver specific 2518 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2519 * translates a generic flow into a driver flow. flow_drv_prepare() must 2520 * precede. 2521 * 2522 * @note 2523 * dev_flow->layers could be filled as a result of parsing during translation 2524 * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled 2525 * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, 2526 * flow->actions could be overwritten even though all the expanded dev_flows 2527 * have the same actions. 2528 * 2529 * @param[in] dev 2530 * Pointer to the rte dev structure. 2531 * @param[in, out] dev_flow 2532 * Pointer to the mlx5 flow. 2533 * @param[in] attr 2534 * Pointer to the flow attributes. 2535 * @param[in] items 2536 * Pointer to the list of items. 2537 * @param[in] actions 2538 * Pointer to the list of actions. 2539 * @param[out] error 2540 * Pointer to the error structure. 2541 * 2542 * @return 2543 * 0 on success, a negative errno value otherwise and rte_errno is set. 2544 */ 2545 static inline int 2546 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, 2547 const struct rte_flow_attr *attr, 2548 const struct rte_flow_item items[], 2549 const struct rte_flow_action actions[], 2550 struct rte_flow_error *error) 2551 { 2552 const struct mlx5_flow_driver_ops *fops; 2553 enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; 2554 2555 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2556 fops = flow_get_drv_ops(type); 2557 return fops->translate(dev, dev_flow, attr, items, actions, error); 2558 } 2559 2560 /** 2561 * Flow driver apply API. This abstracts calling driver specific functions. 2562 * Parent flow (rte_flow) should have driver type (drv_type). It applies 2563 * translated driver flows on to device. flow_drv_translate() must precede. 2564 * 2565 * @param[in] dev 2566 * Pointer to Ethernet device structure. 2567 * @param[in, out] flow 2568 * Pointer to flow structure. 2569 * @param[out] error 2570 * Pointer to error structure. 2571 * 2572 * @return 2573 * 0 on success, a negative errno value otherwise and rte_errno is set. 2574 */ 2575 static inline int 2576 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 2577 struct rte_flow_error *error) 2578 { 2579 const struct mlx5_flow_driver_ops *fops; 2580 enum mlx5_flow_drv_type type = flow->drv_type; 2581 2582 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2583 fops = flow_get_drv_ops(type); 2584 return fops->apply(dev, flow, error); 2585 } 2586 2587 /** 2588 * Flow driver remove API. This abstracts calling driver specific functions. 2589 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2590 * on device. All the resources of the flow should be freed by calling 2591 * flow_drv_destroy(). 2592 * 2593 * @param[in] dev 2594 * Pointer to Ethernet device. 2595 * @param[in, out] flow 2596 * Pointer to flow structure. 2597 */ 2598 static inline void 2599 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 2600 { 2601 const struct mlx5_flow_driver_ops *fops; 2602 enum mlx5_flow_drv_type type = flow->drv_type; 2603 2604 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2605 fops = flow_get_drv_ops(type); 2606 fops->remove(dev, flow); 2607 } 2608 2609 /** 2610 * Flow driver destroy API. This abstracts calling driver specific functions. 2611 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2612 * on device and releases resources of the flow. 2613 * 2614 * @param[in] dev 2615 * Pointer to Ethernet device. 2616 * @param[in, out] flow 2617 * Pointer to flow structure. 2618 */ 2619 static inline void 2620 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 2621 { 2622 const struct mlx5_flow_driver_ops *fops; 2623 enum mlx5_flow_drv_type type = flow->drv_type; 2624 2625 flow_mreg_split_qrss_release(dev, flow); 2626 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2627 fops = flow_get_drv_ops(type); 2628 fops->destroy(dev, flow); 2629 } 2630 2631 /** 2632 * Validate a flow supported by the NIC. 2633 * 2634 * @see rte_flow_validate() 2635 * @see rte_flow_ops 2636 */ 2637 int 2638 mlx5_flow_validate(struct rte_eth_dev *dev, 2639 const struct rte_flow_attr *attr, 2640 const struct rte_flow_item items[], 2641 const struct rte_flow_action actions[], 2642 struct rte_flow_error *error) 2643 { 2644 int ret; 2645 2646 ret = flow_drv_validate(dev, attr, items, actions, true, error); 2647 if (ret < 0) 2648 return ret; 2649 return 0; 2650 } 2651 2652 /** 2653 * Get RSS action from the action list. 2654 * 2655 * @param[in] actions 2656 * Pointer to the list of actions. 2657 * 2658 * @return 2659 * Pointer to the RSS action if exist, else return NULL. 2660 */ 2661 static const struct rte_flow_action_rss* 2662 flow_get_rss_action(const struct rte_flow_action actions[]) 2663 { 2664 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2665 switch (actions->type) { 2666 case RTE_FLOW_ACTION_TYPE_RSS: 2667 return (const struct rte_flow_action_rss *) 2668 actions->conf; 2669 default: 2670 break; 2671 } 2672 } 2673 return NULL; 2674 } 2675 2676 static unsigned int 2677 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) 2678 { 2679 const struct rte_flow_item *item; 2680 unsigned int has_vlan = 0; 2681 2682 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 2683 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2684 has_vlan = 1; 2685 break; 2686 } 2687 } 2688 if (has_vlan) 2689 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : 2690 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; 2691 return rss_level < 2 ? MLX5_EXPANSION_ROOT : 2692 MLX5_EXPANSION_ROOT_OUTER; 2693 } 2694 2695 /** 2696 * Get layer flags from the prefix flow. 2697 * 2698 * Some flows may be split to several subflows, the prefix subflow gets the 2699 * match items and the suffix sub flow gets the actions. 2700 * Some actions need the user defined match item flags to get the detail for 2701 * the action. 2702 * This function helps the suffix flow to get the item layer flags from prefix 2703 * subflow. 2704 * 2705 * @param[in] dev_flow 2706 * Pointer the created preifx subflow. 2707 * 2708 * @return 2709 * The layers get from prefix subflow. 2710 */ 2711 static inline uint64_t 2712 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow) 2713 { 2714 uint64_t layers = 0; 2715 2716 /* 2717 * Layers bits could be localization, but usually the compiler will 2718 * help to do the optimization work for source code. 2719 * If no decap actions, use the layers directly. 2720 */ 2721 if (!(dev_flow->handle->act_flags & MLX5_FLOW_ACTION_DECAP)) 2722 return dev_flow->handle->layers; 2723 /* Convert L3 layers with decap action. */ 2724 if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4) 2725 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4; 2726 else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6) 2727 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6; 2728 /* Convert L4 layers with decap action. */ 2729 if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP) 2730 layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP; 2731 else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP) 2732 layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP; 2733 return layers; 2734 } 2735 2736 /** 2737 * Get metadata split action information. 2738 * 2739 * @param[in] actions 2740 * Pointer to the list of actions. 2741 * @param[out] qrss 2742 * Pointer to the return pointer. 2743 * @param[out] qrss_type 2744 * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned 2745 * if no QUEUE/RSS is found. 2746 * @param[out] encap_idx 2747 * Pointer to the index of the encap action if exists, otherwise the last 2748 * action index. 2749 * 2750 * @return 2751 * Total number of actions. 2752 */ 2753 static int 2754 flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[], 2755 const struct rte_flow_action **qrss, 2756 int *encap_idx) 2757 { 2758 const struct rte_flow_action_raw_encap *raw_encap; 2759 int actions_n = 0; 2760 int raw_decap_idx = -1; 2761 2762 *encap_idx = -1; 2763 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2764 switch (actions->type) { 2765 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 2766 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 2767 *encap_idx = actions_n; 2768 break; 2769 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 2770 raw_decap_idx = actions_n; 2771 break; 2772 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 2773 raw_encap = actions->conf; 2774 if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 2775 *encap_idx = raw_decap_idx != -1 ? 2776 raw_decap_idx : actions_n; 2777 break; 2778 case RTE_FLOW_ACTION_TYPE_QUEUE: 2779 case RTE_FLOW_ACTION_TYPE_RSS: 2780 *qrss = actions; 2781 break; 2782 default: 2783 break; 2784 } 2785 actions_n++; 2786 } 2787 if (*encap_idx == -1) 2788 *encap_idx = actions_n; 2789 /* Count RTE_FLOW_ACTION_TYPE_END. */ 2790 return actions_n + 1; 2791 } 2792 2793 /** 2794 * Check meter action from the action list. 2795 * 2796 * @param[in] actions 2797 * Pointer to the list of actions. 2798 * @param[out] mtr 2799 * Pointer to the meter exist flag. 2800 * 2801 * @return 2802 * Total number of actions. 2803 */ 2804 static int 2805 flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr) 2806 { 2807 int actions_n = 0; 2808 2809 MLX5_ASSERT(mtr); 2810 *mtr = 0; 2811 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2812 switch (actions->type) { 2813 case RTE_FLOW_ACTION_TYPE_METER: 2814 *mtr = 1; 2815 break; 2816 default: 2817 break; 2818 } 2819 actions_n++; 2820 } 2821 /* Count RTE_FLOW_ACTION_TYPE_END. */ 2822 return actions_n + 1; 2823 } 2824 2825 /** 2826 * Check if the flow should be splited due to hairpin. 2827 * The reason for the split is that in current HW we can't 2828 * support encap on Rx, so if a flow have encap we move it 2829 * to Tx. 2830 * 2831 * @param dev 2832 * Pointer to Ethernet device. 2833 * @param[in] attr 2834 * Flow rule attributes. 2835 * @param[in] actions 2836 * Associated actions (list terminated by the END action). 2837 * 2838 * @return 2839 * > 0 the number of actions and the flow should be split, 2840 * 0 when no split required. 2841 */ 2842 static int 2843 flow_check_hairpin_split(struct rte_eth_dev *dev, 2844 const struct rte_flow_attr *attr, 2845 const struct rte_flow_action actions[]) 2846 { 2847 int queue_action = 0; 2848 int action_n = 0; 2849 int encap = 0; 2850 const struct rte_flow_action_queue *queue; 2851 const struct rte_flow_action_rss *rss; 2852 const struct rte_flow_action_raw_encap *raw_encap; 2853 2854 if (!attr->ingress) 2855 return 0; 2856 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2857 switch (actions->type) { 2858 case RTE_FLOW_ACTION_TYPE_QUEUE: 2859 queue = actions->conf; 2860 if (queue == NULL) 2861 return 0; 2862 if (mlx5_rxq_get_type(dev, queue->index) != 2863 MLX5_RXQ_TYPE_HAIRPIN) 2864 return 0; 2865 queue_action = 1; 2866 action_n++; 2867 break; 2868 case RTE_FLOW_ACTION_TYPE_RSS: 2869 rss = actions->conf; 2870 if (rss == NULL || rss->queue_num == 0) 2871 return 0; 2872 if (mlx5_rxq_get_type(dev, rss->queue[0]) != 2873 MLX5_RXQ_TYPE_HAIRPIN) 2874 return 0; 2875 queue_action = 1; 2876 action_n++; 2877 break; 2878 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 2879 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 2880 encap = 1; 2881 action_n++; 2882 break; 2883 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 2884 raw_encap = actions->conf; 2885 if (raw_encap->size > 2886 (sizeof(struct rte_flow_item_eth) + 2887 sizeof(struct rte_flow_item_ipv4))) 2888 encap = 1; 2889 action_n++; 2890 break; 2891 default: 2892 action_n++; 2893 break; 2894 } 2895 } 2896 if (encap == 1 && queue_action) 2897 return action_n; 2898 return 0; 2899 } 2900 2901 /* Declare flow create/destroy prototype in advance. */ 2902 static struct rte_flow * 2903 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, 2904 const struct rte_flow_attr *attr, 2905 const struct rte_flow_item items[], 2906 const struct rte_flow_action actions[], 2907 bool external, struct rte_flow_error *error); 2908 2909 static void 2910 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, 2911 struct rte_flow *flow); 2912 2913 /** 2914 * Add a flow of copying flow metadata registers in RX_CP_TBL. 2915 * 2916 * As mark_id is unique, if there's already a registered flow for the mark_id, 2917 * return by increasing the reference counter of the resource. Otherwise, create 2918 * the resource (mcp_res) and flow. 2919 * 2920 * Flow looks like, 2921 * - If ingress port is ANY and reg_c[1] is mark_id, 2922 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 2923 * 2924 * For default flow (zero mark_id), flow is like, 2925 * - If ingress port is ANY, 2926 * reg_b := reg_c[0] and jump to RX_ACT_TBL. 2927 * 2928 * @param dev 2929 * Pointer to Ethernet device. 2930 * @param mark_id 2931 * ID of MARK action, zero means default flow for META. 2932 * @param[out] error 2933 * Perform verbose error reporting if not NULL. 2934 * 2935 * @return 2936 * Associated resource on success, NULL otherwise and rte_errno is set. 2937 */ 2938 static struct mlx5_flow_mreg_copy_resource * 2939 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, 2940 struct rte_flow_error *error) 2941 { 2942 struct mlx5_priv *priv = dev->data->dev_private; 2943 struct rte_flow_attr attr = { 2944 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 2945 .ingress = 1, 2946 }; 2947 struct mlx5_rte_flow_item_tag tag_spec = { 2948 .data = mark_id, 2949 }; 2950 struct rte_flow_item items[] = { 2951 [1] = { .type = RTE_FLOW_ITEM_TYPE_END, }, 2952 }; 2953 struct rte_flow_action_mark ftag = { 2954 .id = mark_id, 2955 }; 2956 struct mlx5_flow_action_copy_mreg cp_mreg = { 2957 .dst = REG_B, 2958 .src = 0, 2959 }; 2960 struct rte_flow_action_jump jump = { 2961 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 2962 }; 2963 struct rte_flow_action actions[] = { 2964 [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, 2965 }; 2966 struct mlx5_flow_mreg_copy_resource *mcp_res; 2967 int ret; 2968 2969 /* Fill the register fileds in the flow. */ 2970 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 2971 if (ret < 0) 2972 return NULL; 2973 tag_spec.id = ret; 2974 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 2975 if (ret < 0) 2976 return NULL; 2977 cp_mreg.src = ret; 2978 /* Check if already registered. */ 2979 MLX5_ASSERT(priv->mreg_cp_tbl); 2980 mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id); 2981 if (mcp_res) { 2982 /* For non-default rule. */ 2983 if (mark_id != MLX5_DEFAULT_COPY_ID) 2984 mcp_res->refcnt++; 2985 MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID || 2986 mcp_res->refcnt == 1); 2987 return mcp_res; 2988 } 2989 /* Provide the full width of FLAG specific value. */ 2990 if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT)) 2991 tag_spec.data = MLX5_FLOW_MARK_DEFAULT; 2992 /* Build a new flow. */ 2993 if (mark_id != MLX5_DEFAULT_COPY_ID) { 2994 items[0] = (struct rte_flow_item){ 2995 .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, 2996 .spec = &tag_spec, 2997 }; 2998 items[1] = (struct rte_flow_item){ 2999 .type = RTE_FLOW_ITEM_TYPE_END, 3000 }; 3001 actions[0] = (struct rte_flow_action){ 3002 .type = MLX5_RTE_FLOW_ACTION_TYPE_MARK, 3003 .conf = &ftag, 3004 }; 3005 actions[1] = (struct rte_flow_action){ 3006 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 3007 .conf = &cp_mreg, 3008 }; 3009 actions[2] = (struct rte_flow_action){ 3010 .type = RTE_FLOW_ACTION_TYPE_JUMP, 3011 .conf = &jump, 3012 }; 3013 actions[3] = (struct rte_flow_action){ 3014 .type = RTE_FLOW_ACTION_TYPE_END, 3015 }; 3016 } else { 3017 /* Default rule, wildcard match. */ 3018 attr.priority = MLX5_FLOW_PRIO_RSVD; 3019 items[0] = (struct rte_flow_item){ 3020 .type = RTE_FLOW_ITEM_TYPE_END, 3021 }; 3022 actions[0] = (struct rte_flow_action){ 3023 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 3024 .conf = &cp_mreg, 3025 }; 3026 actions[1] = (struct rte_flow_action){ 3027 .type = RTE_FLOW_ACTION_TYPE_JUMP, 3028 .conf = &jump, 3029 }; 3030 actions[2] = (struct rte_flow_action){ 3031 .type = RTE_FLOW_ACTION_TYPE_END, 3032 }; 3033 } 3034 /* Build a new entry. */ 3035 mcp_res = rte_zmalloc(__func__, sizeof(*mcp_res), 0); 3036 if (!mcp_res) { 3037 rte_errno = ENOMEM; 3038 return NULL; 3039 } 3040 /* 3041 * The copy Flows are not included in any list. There 3042 * ones are referenced from other Flows and can not 3043 * be applied, removed, deleted in ardbitrary order 3044 * by list traversing. 3045 */ 3046 mcp_res->flow = flow_list_create(dev, NULL, &attr, items, 3047 actions, false, error); 3048 if (!mcp_res->flow) 3049 goto error; 3050 mcp_res->refcnt++; 3051 mcp_res->hlist_ent.key = mark_id; 3052 ret = mlx5_hlist_insert(priv->mreg_cp_tbl, 3053 &mcp_res->hlist_ent); 3054 MLX5_ASSERT(!ret); 3055 if (ret) 3056 goto error; 3057 return mcp_res; 3058 error: 3059 if (mcp_res->flow) 3060 flow_list_destroy(dev, NULL, mcp_res->flow); 3061 rte_free(mcp_res); 3062 return NULL; 3063 } 3064 3065 /** 3066 * Release flow in RX_CP_TBL. 3067 * 3068 * @param dev 3069 * Pointer to Ethernet device. 3070 * @flow 3071 * Parent flow for wich copying is provided. 3072 */ 3073 static void 3074 flow_mreg_del_copy_action(struct rte_eth_dev *dev, 3075 struct rte_flow *flow) 3076 { 3077 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3078 struct mlx5_priv *priv = dev->data->dev_private; 3079 3080 if (!mcp_res || !priv->mreg_cp_tbl) 3081 return; 3082 if (flow->copy_applied) { 3083 MLX5_ASSERT(mcp_res->appcnt); 3084 flow->copy_applied = 0; 3085 --mcp_res->appcnt; 3086 if (!mcp_res->appcnt) 3087 flow_drv_remove(dev, mcp_res->flow); 3088 } 3089 /* 3090 * We do not check availability of metadata registers here, 3091 * because copy resources are not allocated in this case. 3092 */ 3093 if (--mcp_res->refcnt) 3094 return; 3095 MLX5_ASSERT(mcp_res->flow); 3096 flow_list_destroy(dev, NULL, mcp_res->flow); 3097 mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); 3098 rte_free(mcp_res); 3099 flow->mreg_copy = NULL; 3100 } 3101 3102 /** 3103 * Start flow in RX_CP_TBL. 3104 * 3105 * @param dev 3106 * Pointer to Ethernet device. 3107 * @flow 3108 * Parent flow for wich copying is provided. 3109 * 3110 * @return 3111 * 0 on success, a negative errno value otherwise and rte_errno is set. 3112 */ 3113 static int 3114 flow_mreg_start_copy_action(struct rte_eth_dev *dev, 3115 struct rte_flow *flow) 3116 { 3117 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3118 int ret; 3119 3120 if (!mcp_res || flow->copy_applied) 3121 return 0; 3122 if (!mcp_res->appcnt) { 3123 ret = flow_drv_apply(dev, mcp_res->flow, NULL); 3124 if (ret) 3125 return ret; 3126 } 3127 ++mcp_res->appcnt; 3128 flow->copy_applied = 1; 3129 return 0; 3130 } 3131 3132 /** 3133 * Stop flow in RX_CP_TBL. 3134 * 3135 * @param dev 3136 * Pointer to Ethernet device. 3137 * @flow 3138 * Parent flow for wich copying is provided. 3139 */ 3140 static void 3141 flow_mreg_stop_copy_action(struct rte_eth_dev *dev, 3142 struct rte_flow *flow) 3143 { 3144 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3145 3146 if (!mcp_res || !flow->copy_applied) 3147 return; 3148 MLX5_ASSERT(mcp_res->appcnt); 3149 --mcp_res->appcnt; 3150 flow->copy_applied = 0; 3151 if (!mcp_res->appcnt) 3152 flow_drv_remove(dev, mcp_res->flow); 3153 } 3154 3155 /** 3156 * Remove the default copy action from RX_CP_TBL. 3157 * 3158 * @param dev 3159 * Pointer to Ethernet device. 3160 */ 3161 static void 3162 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) 3163 { 3164 struct mlx5_flow_mreg_copy_resource *mcp_res; 3165 struct mlx5_priv *priv = dev->data->dev_private; 3166 3167 /* Check if default flow is registered. */ 3168 if (!priv->mreg_cp_tbl) 3169 return; 3170 mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, 3171 MLX5_DEFAULT_COPY_ID); 3172 if (!mcp_res) 3173 return; 3174 MLX5_ASSERT(mcp_res->flow); 3175 flow_list_destroy(dev, NULL, mcp_res->flow); 3176 mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); 3177 rte_free(mcp_res); 3178 } 3179 3180 /** 3181 * Add the default copy action in in RX_CP_TBL. 3182 * 3183 * @param dev 3184 * Pointer to Ethernet device. 3185 * @param[out] error 3186 * Perform verbose error reporting if not NULL. 3187 * 3188 * @return 3189 * 0 for success, negative value otherwise and rte_errno is set. 3190 */ 3191 static int 3192 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, 3193 struct rte_flow_error *error) 3194 { 3195 struct mlx5_priv *priv = dev->data->dev_private; 3196 struct mlx5_flow_mreg_copy_resource *mcp_res; 3197 3198 /* Check whether extensive metadata feature is engaged. */ 3199 if (!priv->config.dv_flow_en || 3200 priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3201 !mlx5_flow_ext_mreg_supported(dev) || 3202 !priv->sh->dv_regc0_mask) 3203 return 0; 3204 mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error); 3205 if (!mcp_res) 3206 return -rte_errno; 3207 return 0; 3208 } 3209 3210 /** 3211 * Add a flow of copying flow metadata registers in RX_CP_TBL. 3212 * 3213 * All the flow having Q/RSS action should be split by 3214 * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL 3215 * performs the following, 3216 * - CQE->flow_tag := reg_c[1] (MARK) 3217 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 3218 * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1] 3219 * but there should be a flow per each MARK ID set by MARK action. 3220 * 3221 * For the aforementioned reason, if there's a MARK action in flow's action 3222 * list, a corresponding flow should be added to the RX_CP_TBL in order to copy 3223 * the MARK ID to CQE's flow_tag like, 3224 * - If reg_c[1] is mark_id, 3225 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 3226 * 3227 * For SET_META action which stores value in reg_c[0], as the destination is 3228 * also a flow metadata register (reg_b), adding a default flow is enough. Zero 3229 * MARK ID means the default flow. The default flow looks like, 3230 * - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL. 3231 * 3232 * @param dev 3233 * Pointer to Ethernet device. 3234 * @param flow 3235 * Pointer to flow structure. 3236 * @param[in] actions 3237 * Pointer to the list of actions. 3238 * @param[out] error 3239 * Perform verbose error reporting if not NULL. 3240 * 3241 * @return 3242 * 0 on success, negative value otherwise and rte_errno is set. 3243 */ 3244 static int 3245 flow_mreg_update_copy_table(struct rte_eth_dev *dev, 3246 struct rte_flow *flow, 3247 const struct rte_flow_action *actions, 3248 struct rte_flow_error *error) 3249 { 3250 struct mlx5_priv *priv = dev->data->dev_private; 3251 struct mlx5_dev_config *config = &priv->config; 3252 struct mlx5_flow_mreg_copy_resource *mcp_res; 3253 const struct rte_flow_action_mark *mark; 3254 3255 /* Check whether extensive metadata feature is engaged. */ 3256 if (!config->dv_flow_en || 3257 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3258 !mlx5_flow_ext_mreg_supported(dev) || 3259 !priv->sh->dv_regc0_mask) 3260 return 0; 3261 /* Find MARK action. */ 3262 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3263 switch (actions->type) { 3264 case RTE_FLOW_ACTION_TYPE_FLAG: 3265 mcp_res = flow_mreg_add_copy_action 3266 (dev, MLX5_FLOW_MARK_DEFAULT, error); 3267 if (!mcp_res) 3268 return -rte_errno; 3269 flow->mreg_copy = mcp_res; 3270 if (dev->data->dev_started) { 3271 mcp_res->appcnt++; 3272 flow->copy_applied = 1; 3273 } 3274 return 0; 3275 case RTE_FLOW_ACTION_TYPE_MARK: 3276 mark = (const struct rte_flow_action_mark *) 3277 actions->conf; 3278 mcp_res = 3279 flow_mreg_add_copy_action(dev, mark->id, error); 3280 if (!mcp_res) 3281 return -rte_errno; 3282 flow->mreg_copy = mcp_res; 3283 if (dev->data->dev_started) { 3284 mcp_res->appcnt++; 3285 flow->copy_applied = 1; 3286 } 3287 return 0; 3288 default: 3289 break; 3290 } 3291 } 3292 return 0; 3293 } 3294 3295 #define MLX5_MAX_SPLIT_ACTIONS 24 3296 #define MLX5_MAX_SPLIT_ITEMS 24 3297 3298 /** 3299 * Split the hairpin flow. 3300 * Since HW can't support encap on Rx we move the encap to Tx. 3301 * If the count action is after the encap then we also 3302 * move the count action. in this case the count will also measure 3303 * the outer bytes. 3304 * 3305 * @param dev 3306 * Pointer to Ethernet device. 3307 * @param[in] actions 3308 * Associated actions (list terminated by the END action). 3309 * @param[out] actions_rx 3310 * Rx flow actions. 3311 * @param[out] actions_tx 3312 * Tx flow actions.. 3313 * @param[out] pattern_tx 3314 * The pattern items for the Tx flow. 3315 * @param[out] flow_id 3316 * The flow ID connected to this flow. 3317 * 3318 * @return 3319 * 0 on success. 3320 */ 3321 static int 3322 flow_hairpin_split(struct rte_eth_dev *dev, 3323 const struct rte_flow_action actions[], 3324 struct rte_flow_action actions_rx[], 3325 struct rte_flow_action actions_tx[], 3326 struct rte_flow_item pattern_tx[], 3327 uint32_t *flow_id) 3328 { 3329 struct mlx5_priv *priv = dev->data->dev_private; 3330 const struct rte_flow_action_raw_encap *raw_encap; 3331 const struct rte_flow_action_raw_decap *raw_decap; 3332 struct mlx5_rte_flow_action_set_tag *set_tag; 3333 struct rte_flow_action *tag_action; 3334 struct mlx5_rte_flow_item_tag *tag_item; 3335 struct rte_flow_item *item; 3336 char *addr; 3337 int encap = 0; 3338 3339 mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id); 3340 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3341 switch (actions->type) { 3342 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 3343 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 3344 rte_memcpy(actions_tx, actions, 3345 sizeof(struct rte_flow_action)); 3346 actions_tx++; 3347 break; 3348 case RTE_FLOW_ACTION_TYPE_COUNT: 3349 if (encap) { 3350 rte_memcpy(actions_tx, actions, 3351 sizeof(struct rte_flow_action)); 3352 actions_tx++; 3353 } else { 3354 rte_memcpy(actions_rx, actions, 3355 sizeof(struct rte_flow_action)); 3356 actions_rx++; 3357 } 3358 break; 3359 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 3360 raw_encap = actions->conf; 3361 if (raw_encap->size > 3362 (sizeof(struct rte_flow_item_eth) + 3363 sizeof(struct rte_flow_item_ipv4))) { 3364 memcpy(actions_tx, actions, 3365 sizeof(struct rte_flow_action)); 3366 actions_tx++; 3367 encap = 1; 3368 } else { 3369 rte_memcpy(actions_rx, actions, 3370 sizeof(struct rte_flow_action)); 3371 actions_rx++; 3372 } 3373 break; 3374 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 3375 raw_decap = actions->conf; 3376 if (raw_decap->size < 3377 (sizeof(struct rte_flow_item_eth) + 3378 sizeof(struct rte_flow_item_ipv4))) { 3379 memcpy(actions_tx, actions, 3380 sizeof(struct rte_flow_action)); 3381 actions_tx++; 3382 } else { 3383 rte_memcpy(actions_rx, actions, 3384 sizeof(struct rte_flow_action)); 3385 actions_rx++; 3386 } 3387 break; 3388 default: 3389 rte_memcpy(actions_rx, actions, 3390 sizeof(struct rte_flow_action)); 3391 actions_rx++; 3392 break; 3393 } 3394 } 3395 /* Add set meta action and end action for the Rx flow. */ 3396 tag_action = actions_rx; 3397 tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3398 actions_rx++; 3399 rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action)); 3400 actions_rx++; 3401 set_tag = (void *)actions_rx; 3402 set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL); 3403 MLX5_ASSERT(set_tag->id > REG_NONE); 3404 set_tag->data = *flow_id; 3405 tag_action->conf = set_tag; 3406 /* Create Tx item list. */ 3407 rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); 3408 addr = (void *)&pattern_tx[2]; 3409 item = pattern_tx; 3410 item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; 3411 tag_item = (void *)addr; 3412 tag_item->data = *flow_id; 3413 tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); 3414 MLX5_ASSERT(set_tag->id > REG_NONE); 3415 item->spec = tag_item; 3416 addr += sizeof(struct mlx5_rte_flow_item_tag); 3417 tag_item = (void *)addr; 3418 tag_item->data = UINT32_MAX; 3419 tag_item->id = UINT16_MAX; 3420 item->mask = tag_item; 3421 addr += sizeof(struct mlx5_rte_flow_item_tag); 3422 item->last = NULL; 3423 item++; 3424 item->type = RTE_FLOW_ITEM_TYPE_END; 3425 return 0; 3426 } 3427 3428 /** 3429 * The last stage of splitting chain, just creates the subflow 3430 * without any modification. 3431 * 3432 * @param[in] dev 3433 * Pointer to Ethernet device. 3434 * @param[in] flow 3435 * Parent flow structure pointer. 3436 * @param[in, out] sub_flow 3437 * Pointer to return the created subflow, may be NULL. 3438 * @param[in] prefix_layers 3439 * Prefix subflow layers, may be 0. 3440 * @param[in] attr 3441 * Flow rule attributes. 3442 * @param[in] items 3443 * Pattern specification (list terminated by the END pattern item). 3444 * @param[in] actions 3445 * Associated actions (list terminated by the END action). 3446 * @param[in] external 3447 * This flow rule is created by request external to PMD. 3448 * @param[out] error 3449 * Perform verbose error reporting if not NULL. 3450 * @return 3451 * 0 on success, negative value otherwise 3452 */ 3453 static int 3454 flow_create_split_inner(struct rte_eth_dev *dev, 3455 struct rte_flow *flow, 3456 struct mlx5_flow **sub_flow, 3457 uint64_t prefix_layers, 3458 const struct rte_flow_attr *attr, 3459 const struct rte_flow_item items[], 3460 const struct rte_flow_action actions[], 3461 bool external, struct rte_flow_error *error) 3462 { 3463 struct mlx5_flow *dev_flow; 3464 3465 dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, error); 3466 if (!dev_flow) 3467 return -rte_errno; 3468 dev_flow->flow = flow; 3469 dev_flow->external = external; 3470 /* Subflow object was created, we must include one in the list. */ 3471 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, 3472 dev_flow->handle, next); 3473 /* 3474 * If dev_flow is as one of the suffix flow, some actions in suffix 3475 * flow may need some user defined item layer flags. 3476 */ 3477 if (prefix_layers) 3478 dev_flow->handle->layers = prefix_layers; 3479 if (sub_flow) 3480 *sub_flow = dev_flow; 3481 return flow_drv_translate(dev, dev_flow, attr, items, actions, error); 3482 } 3483 3484 /** 3485 * Split the meter flow. 3486 * 3487 * As meter flow will split to three sub flow, other than meter 3488 * action, the other actions make sense to only meter accepts 3489 * the packet. If it need to be dropped, no other additional 3490 * actions should be take. 3491 * 3492 * One kind of special action which decapsulates the L3 tunnel 3493 * header will be in the prefix sub flow, as not to take the 3494 * L3 tunnel header into account. 3495 * 3496 * @param dev 3497 * Pointer to Ethernet device. 3498 * @param[in] items 3499 * Pattern specification (list terminated by the END pattern item). 3500 * @param[out] sfx_items 3501 * Suffix flow match items (list terminated by the END pattern item). 3502 * @param[in] actions 3503 * Associated actions (list terminated by the END action). 3504 * @param[out] actions_sfx 3505 * Suffix flow actions. 3506 * @param[out] actions_pre 3507 * Prefix flow actions. 3508 * @param[out] pattern_sfx 3509 * The pattern items for the suffix flow. 3510 * @param[out] tag_sfx 3511 * Pointer to suffix flow tag. 3512 * 3513 * @return 3514 * 0 on success. 3515 */ 3516 static int 3517 flow_meter_split_prep(struct rte_eth_dev *dev, 3518 const struct rte_flow_item items[], 3519 struct rte_flow_item sfx_items[], 3520 const struct rte_flow_action actions[], 3521 struct rte_flow_action actions_sfx[], 3522 struct rte_flow_action actions_pre[]) 3523 { 3524 struct rte_flow_action *tag_action = NULL; 3525 struct rte_flow_item *tag_item; 3526 struct mlx5_rte_flow_action_set_tag *set_tag; 3527 struct rte_flow_error error; 3528 const struct rte_flow_action_raw_encap *raw_encap; 3529 const struct rte_flow_action_raw_decap *raw_decap; 3530 struct mlx5_rte_flow_item_tag *tag_spec; 3531 struct mlx5_rte_flow_item_tag *tag_mask; 3532 uint32_t tag_id; 3533 bool copy_vlan = false; 3534 3535 /* Prepare the actions for prefix and suffix flow. */ 3536 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3537 struct rte_flow_action **action_cur = NULL; 3538 3539 switch (actions->type) { 3540 case RTE_FLOW_ACTION_TYPE_METER: 3541 /* Add the extra tag action first. */ 3542 tag_action = actions_pre; 3543 tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3544 actions_pre++; 3545 action_cur = &actions_pre; 3546 break; 3547 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 3548 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 3549 action_cur = &actions_pre; 3550 break; 3551 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 3552 raw_encap = actions->conf; 3553 if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) 3554 action_cur = &actions_pre; 3555 break; 3556 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 3557 raw_decap = actions->conf; 3558 if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 3559 action_cur = &actions_pre; 3560 break; 3561 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 3562 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 3563 copy_vlan = true; 3564 break; 3565 default: 3566 break; 3567 } 3568 if (!action_cur) 3569 action_cur = &actions_sfx; 3570 memcpy(*action_cur, actions, sizeof(struct rte_flow_action)); 3571 (*action_cur)++; 3572 } 3573 /* Add end action to the actions. */ 3574 actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; 3575 actions_pre->type = RTE_FLOW_ACTION_TYPE_END; 3576 actions_pre++; 3577 /* Set the tag. */ 3578 set_tag = (void *)actions_pre; 3579 set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error); 3580 /* 3581 * Get the id from the qrss_pool to make qrss share the id with meter. 3582 */ 3583 tag_id = flow_qrss_get_id(dev); 3584 set_tag->data = tag_id << MLX5_MTR_COLOR_BITS; 3585 assert(tag_action); 3586 tag_action->conf = set_tag; 3587 /* Prepare the suffix subflow items. */ 3588 tag_item = sfx_items++; 3589 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 3590 int item_type = items->type; 3591 3592 switch (item_type) { 3593 case RTE_FLOW_ITEM_TYPE_PORT_ID: 3594 memcpy(sfx_items, items, sizeof(*sfx_items)); 3595 sfx_items++; 3596 break; 3597 case RTE_FLOW_ITEM_TYPE_VLAN: 3598 if (copy_vlan) { 3599 memcpy(sfx_items, items, sizeof(*sfx_items)); 3600 /* 3601 * Convert to internal match item, it is used 3602 * for vlan push and set vid. 3603 */ 3604 sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_VLAN; 3605 sfx_items++; 3606 } 3607 break; 3608 default: 3609 break; 3610 } 3611 } 3612 sfx_items->type = RTE_FLOW_ITEM_TYPE_END; 3613 sfx_items++; 3614 tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items; 3615 tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS; 3616 tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error); 3617 tag_mask = tag_spec + 1; 3618 tag_mask->data = 0xffffff00; 3619 tag_item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; 3620 tag_item->spec = tag_spec; 3621 tag_item->last = NULL; 3622 tag_item->mask = tag_mask; 3623 return tag_id; 3624 } 3625 3626 /** 3627 * Split action list having QUEUE/RSS for metadata register copy. 3628 * 3629 * Once Q/RSS action is detected in user's action list, the flow action 3630 * should be split in order to copy metadata registers, which will happen in 3631 * RX_CP_TBL like, 3632 * - CQE->flow_tag := reg_c[1] (MARK) 3633 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 3634 * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL. 3635 * This is because the last action of each flow must be a terminal action 3636 * (QUEUE, RSS or DROP). 3637 * 3638 * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is 3639 * stored and kept in the mlx5_flow structure per each sub_flow. 3640 * 3641 * The Q/RSS action is replaced with, 3642 * - SET_TAG, setting the allocated flow ID to reg_c[2]. 3643 * And the following JUMP action is added at the end, 3644 * - JUMP, to RX_CP_TBL. 3645 * 3646 * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by 3647 * flow_create_split_metadata() routine. The flow will look like, 3648 * - If flow ID matches (reg_c[2]), perform Q/RSS. 3649 * 3650 * @param dev 3651 * Pointer to Ethernet device. 3652 * @param[out] split_actions 3653 * Pointer to store split actions to jump to CP_TBL. 3654 * @param[in] actions 3655 * Pointer to the list of original flow actions. 3656 * @param[in] qrss 3657 * Pointer to the Q/RSS action. 3658 * @param[in] actions_n 3659 * Number of original actions. 3660 * @param[out] error 3661 * Perform verbose error reporting if not NULL. 3662 * 3663 * @return 3664 * non-zero unique flow_id on success, otherwise 0 and 3665 * error/rte_error are set. 3666 */ 3667 static uint32_t 3668 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, 3669 struct rte_flow_action *split_actions, 3670 const struct rte_flow_action *actions, 3671 const struct rte_flow_action *qrss, 3672 int actions_n, struct rte_flow_error *error) 3673 { 3674 struct mlx5_rte_flow_action_set_tag *set_tag; 3675 struct rte_flow_action_jump *jump; 3676 const int qrss_idx = qrss - actions; 3677 uint32_t flow_id = 0; 3678 int ret = 0; 3679 3680 /* 3681 * Given actions will be split 3682 * - Replace QUEUE/RSS action with SET_TAG to set flow ID. 3683 * - Add jump to mreg CP_TBL. 3684 * As a result, there will be one more action. 3685 */ 3686 ++actions_n; 3687 memcpy(split_actions, actions, sizeof(*split_actions) * actions_n); 3688 set_tag = (void *)(split_actions + actions_n); 3689 /* 3690 * If tag action is not set to void(it means we are not the meter 3691 * suffix flow), add the tag action. Since meter suffix flow already 3692 * has the tag added. 3693 */ 3694 if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) { 3695 /* 3696 * Allocate the new subflow ID. This one is unique within 3697 * device and not shared with representors. Otherwise, 3698 * we would have to resolve multi-thread access synch 3699 * issue. Each flow on the shared device is appended 3700 * with source vport identifier, so the resulting 3701 * flows will be unique in the shared (by master and 3702 * representors) domain even if they have coinciding 3703 * IDs. 3704 */ 3705 flow_id = flow_qrss_get_id(dev); 3706 if (!flow_id) 3707 return rte_flow_error_set(error, ENOMEM, 3708 RTE_FLOW_ERROR_TYPE_ACTION, 3709 NULL, "can't allocate id " 3710 "for split Q/RSS subflow"); 3711 /* Internal SET_TAG action to set flow ID. */ 3712 *set_tag = (struct mlx5_rte_flow_action_set_tag){ 3713 .data = flow_id, 3714 }; 3715 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); 3716 if (ret < 0) 3717 return ret; 3718 set_tag->id = ret; 3719 /* Construct new actions array. */ 3720 /* Replace QUEUE/RSS action. */ 3721 split_actions[qrss_idx] = (struct rte_flow_action){ 3722 .type = MLX5_RTE_FLOW_ACTION_TYPE_TAG, 3723 .conf = set_tag, 3724 }; 3725 } 3726 /* JUMP action to jump to mreg copy table (CP_TBL). */ 3727 jump = (void *)(set_tag + 1); 3728 *jump = (struct rte_flow_action_jump){ 3729 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 3730 }; 3731 split_actions[actions_n - 2] = (struct rte_flow_action){ 3732 .type = RTE_FLOW_ACTION_TYPE_JUMP, 3733 .conf = jump, 3734 }; 3735 split_actions[actions_n - 1] = (struct rte_flow_action){ 3736 .type = RTE_FLOW_ACTION_TYPE_END, 3737 }; 3738 return flow_id; 3739 } 3740 3741 /** 3742 * Extend the given action list for Tx metadata copy. 3743 * 3744 * Copy the given action list to the ext_actions and add flow metadata register 3745 * copy action in order to copy reg_a set by WQE to reg_c[0]. 3746 * 3747 * @param[out] ext_actions 3748 * Pointer to the extended action list. 3749 * @param[in] actions 3750 * Pointer to the list of actions. 3751 * @param[in] actions_n 3752 * Number of actions in the list. 3753 * @param[out] error 3754 * Perform verbose error reporting if not NULL. 3755 * @param[in] encap_idx 3756 * The encap action inndex. 3757 * 3758 * @return 3759 * 0 on success, negative value otherwise 3760 */ 3761 static int 3762 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, 3763 struct rte_flow_action *ext_actions, 3764 const struct rte_flow_action *actions, 3765 int actions_n, struct rte_flow_error *error, 3766 int encap_idx) 3767 { 3768 struct mlx5_flow_action_copy_mreg *cp_mreg = 3769 (struct mlx5_flow_action_copy_mreg *) 3770 (ext_actions + actions_n + 1); 3771 int ret; 3772 3773 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 3774 if (ret < 0) 3775 return ret; 3776 cp_mreg->dst = ret; 3777 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error); 3778 if (ret < 0) 3779 return ret; 3780 cp_mreg->src = ret; 3781 if (encap_idx != 0) 3782 memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx); 3783 if (encap_idx == actions_n - 1) { 3784 ext_actions[actions_n - 1] = (struct rte_flow_action){ 3785 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 3786 .conf = cp_mreg, 3787 }; 3788 ext_actions[actions_n] = (struct rte_flow_action){ 3789 .type = RTE_FLOW_ACTION_TYPE_END, 3790 }; 3791 } else { 3792 ext_actions[encap_idx] = (struct rte_flow_action){ 3793 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 3794 .conf = cp_mreg, 3795 }; 3796 memcpy(ext_actions + encap_idx + 1, actions + encap_idx, 3797 sizeof(*ext_actions) * (actions_n - encap_idx)); 3798 } 3799 return 0; 3800 } 3801 3802 /** 3803 * The splitting for metadata feature. 3804 * 3805 * - Q/RSS action on NIC Rx should be split in order to pass by 3806 * the mreg copy table (RX_CP_TBL) and then it jumps to the 3807 * action table (RX_ACT_TBL) which has the split Q/RSS action. 3808 * 3809 * - All the actions on NIC Tx should have a mreg copy action to 3810 * copy reg_a from WQE to reg_c[0]. 3811 * 3812 * @param dev 3813 * Pointer to Ethernet device. 3814 * @param[in] flow 3815 * Parent flow structure pointer. 3816 * @param[in] prefix_layers 3817 * Prefix flow layer flags. 3818 * @param[in] attr 3819 * Flow rule attributes. 3820 * @param[in] items 3821 * Pattern specification (list terminated by the END pattern item). 3822 * @param[in] actions 3823 * Associated actions (list terminated by the END action). 3824 * @param[in] external 3825 * This flow rule is created by request external to PMD. 3826 * @param[out] error 3827 * Perform verbose error reporting if not NULL. 3828 * @return 3829 * 0 on success, negative value otherwise 3830 */ 3831 static int 3832 flow_create_split_metadata(struct rte_eth_dev *dev, 3833 struct rte_flow *flow, 3834 uint64_t prefix_layers, 3835 const struct rte_flow_attr *attr, 3836 const struct rte_flow_item items[], 3837 const struct rte_flow_action actions[], 3838 bool external, struct rte_flow_error *error) 3839 { 3840 struct mlx5_priv *priv = dev->data->dev_private; 3841 struct mlx5_dev_config *config = &priv->config; 3842 const struct rte_flow_action *qrss = NULL; 3843 struct rte_flow_action *ext_actions = NULL; 3844 struct mlx5_flow *dev_flow = NULL; 3845 uint32_t qrss_id = 0; 3846 int mtr_sfx = 0; 3847 size_t act_size; 3848 int actions_n; 3849 int encap_idx; 3850 int ret; 3851 3852 /* Check whether extensive metadata feature is engaged. */ 3853 if (!config->dv_flow_en || 3854 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3855 !mlx5_flow_ext_mreg_supported(dev)) 3856 return flow_create_split_inner(dev, flow, NULL, prefix_layers, 3857 attr, items, actions, external, 3858 error); 3859 actions_n = flow_parse_metadata_split_actions_info(actions, &qrss, 3860 &encap_idx); 3861 if (qrss) { 3862 /* Exclude hairpin flows from splitting. */ 3863 if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) { 3864 const struct rte_flow_action_queue *queue; 3865 3866 queue = qrss->conf; 3867 if (mlx5_rxq_get_type(dev, queue->index) == 3868 MLX5_RXQ_TYPE_HAIRPIN) 3869 qrss = NULL; 3870 } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) { 3871 const struct rte_flow_action_rss *rss; 3872 3873 rss = qrss->conf; 3874 if (mlx5_rxq_get_type(dev, rss->queue[0]) == 3875 MLX5_RXQ_TYPE_HAIRPIN) 3876 qrss = NULL; 3877 } 3878 } 3879 if (qrss) { 3880 /* Check if it is in meter suffix table. */ 3881 mtr_sfx = attr->group == (attr->transfer ? 3882 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : 3883 MLX5_FLOW_TABLE_LEVEL_SUFFIX); 3884 /* 3885 * Q/RSS action on NIC Rx should be split in order to pass by 3886 * the mreg copy table (RX_CP_TBL) and then it jumps to the 3887 * action table (RX_ACT_TBL) which has the split Q/RSS action. 3888 */ 3889 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 3890 sizeof(struct rte_flow_action_set_tag) + 3891 sizeof(struct rte_flow_action_jump); 3892 ext_actions = rte_zmalloc(__func__, act_size, 0); 3893 if (!ext_actions) 3894 return rte_flow_error_set(error, ENOMEM, 3895 RTE_FLOW_ERROR_TYPE_ACTION, 3896 NULL, "no memory to split " 3897 "metadata flow"); 3898 /* 3899 * If we are the suffix flow of meter, tag already exist. 3900 * Set the tag action to void. 3901 */ 3902 if (mtr_sfx) 3903 ext_actions[qrss - actions].type = 3904 RTE_FLOW_ACTION_TYPE_VOID; 3905 else 3906 ext_actions[qrss - actions].type = 3907 MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3908 /* 3909 * Create the new actions list with removed Q/RSS action 3910 * and appended set tag and jump to register copy table 3911 * (RX_CP_TBL). We should preallocate unique tag ID here 3912 * in advance, because it is needed for set tag action. 3913 */ 3914 qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions, 3915 qrss, actions_n, error); 3916 if (!mtr_sfx && !qrss_id) { 3917 ret = -rte_errno; 3918 goto exit; 3919 } 3920 } else if (attr->egress && !attr->transfer) { 3921 /* 3922 * All the actions on NIC Tx should have a metadata register 3923 * copy action to copy reg_a from WQE to reg_c[meta] 3924 */ 3925 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 3926 sizeof(struct mlx5_flow_action_copy_mreg); 3927 ext_actions = rte_zmalloc(__func__, act_size, 0); 3928 if (!ext_actions) 3929 return rte_flow_error_set(error, ENOMEM, 3930 RTE_FLOW_ERROR_TYPE_ACTION, 3931 NULL, "no memory to split " 3932 "metadata flow"); 3933 /* Create the action list appended with copy register. */ 3934 ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions, 3935 actions_n, error, encap_idx); 3936 if (ret < 0) 3937 goto exit; 3938 } 3939 /* Add the unmodified original or prefix subflow. */ 3940 ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers, attr, 3941 items, ext_actions ? ext_actions : 3942 actions, external, error); 3943 if (ret < 0) 3944 goto exit; 3945 MLX5_ASSERT(dev_flow); 3946 if (qrss) { 3947 const struct rte_flow_attr q_attr = { 3948 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 3949 .ingress = 1, 3950 }; 3951 /* Internal PMD action to set register. */ 3952 struct mlx5_rte_flow_item_tag q_tag_spec = { 3953 .data = qrss_id, 3954 .id = 0, 3955 }; 3956 struct rte_flow_item q_items[] = { 3957 { 3958 .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, 3959 .spec = &q_tag_spec, 3960 .last = NULL, 3961 .mask = NULL, 3962 }, 3963 { 3964 .type = RTE_FLOW_ITEM_TYPE_END, 3965 }, 3966 }; 3967 struct rte_flow_action q_actions[] = { 3968 { 3969 .type = qrss->type, 3970 .conf = qrss->conf, 3971 }, 3972 { 3973 .type = RTE_FLOW_ACTION_TYPE_END, 3974 }, 3975 }; 3976 uint64_t layers = flow_get_prefix_layer_flags(dev_flow); 3977 3978 /* 3979 * Configure the tag item only if there is no meter subflow. 3980 * Since tag is already marked in the meter suffix subflow 3981 * we can just use the meter suffix items as is. 3982 */ 3983 if (qrss_id) { 3984 /* Not meter subflow. */ 3985 MLX5_ASSERT(!mtr_sfx); 3986 /* 3987 * Put unique id in prefix flow due to it is destroyed 3988 * after suffix flow and id will be freed after there 3989 * is no actual flows with this id and identifier 3990 * reallocation becomes possible (for example, for 3991 * other flows in other threads). 3992 */ 3993 dev_flow->handle->qrss_id = qrss_id; 3994 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, 3995 error); 3996 if (ret < 0) 3997 goto exit; 3998 q_tag_spec.id = ret; 3999 } 4000 dev_flow = NULL; 4001 /* Add suffix subflow to execute Q/RSS. */ 4002 ret = flow_create_split_inner(dev, flow, &dev_flow, layers, 4003 &q_attr, mtr_sfx ? items : 4004 q_items, q_actions, 4005 external, error); 4006 if (ret < 0) 4007 goto exit; 4008 /* qrss ID should be freed if failed. */ 4009 qrss_id = 0; 4010 MLX5_ASSERT(dev_flow); 4011 } 4012 4013 exit: 4014 /* 4015 * We do not destroy the partially created sub_flows in case of error. 4016 * These ones are included into parent flow list and will be destroyed 4017 * by flow_drv_destroy. 4018 */ 4019 flow_qrss_free_id(dev, qrss_id); 4020 rte_free(ext_actions); 4021 return ret; 4022 } 4023 4024 /** 4025 * The splitting for meter feature. 4026 * 4027 * - The meter flow will be split to two flows as prefix and 4028 * suffix flow. The packets make sense only it pass the prefix 4029 * meter action. 4030 * 4031 * - Reg_C_5 is used for the packet to match betweend prefix and 4032 * suffix flow. 4033 * 4034 * @param dev 4035 * Pointer to Ethernet device. 4036 * @param[in] flow 4037 * Parent flow structure pointer. 4038 * @param[in] attr 4039 * Flow rule attributes. 4040 * @param[in] items 4041 * Pattern specification (list terminated by the END pattern item). 4042 * @param[in] actions 4043 * Associated actions (list terminated by the END action). 4044 * @param[in] external 4045 * This flow rule is created by request external to PMD. 4046 * @param[out] error 4047 * Perform verbose error reporting if not NULL. 4048 * @return 4049 * 0 on success, negative value otherwise 4050 */ 4051 static int 4052 flow_create_split_meter(struct rte_eth_dev *dev, 4053 struct rte_flow *flow, 4054 const struct rte_flow_attr *attr, 4055 const struct rte_flow_item items[], 4056 const struct rte_flow_action actions[], 4057 bool external, struct rte_flow_error *error) 4058 { 4059 struct mlx5_priv *priv = dev->data->dev_private; 4060 struct rte_flow_action *sfx_actions = NULL; 4061 struct rte_flow_action *pre_actions = NULL; 4062 struct rte_flow_item *sfx_items = NULL; 4063 struct mlx5_flow *dev_flow = NULL; 4064 struct rte_flow_attr sfx_attr = *attr; 4065 uint32_t mtr = 0; 4066 uint32_t mtr_tag_id = 0; 4067 size_t act_size; 4068 size_t item_size; 4069 int actions_n = 0; 4070 int ret; 4071 4072 if (priv->mtr_en) 4073 actions_n = flow_check_meter_action(actions, &mtr); 4074 if (mtr) { 4075 /* The five prefix actions: meter, decap, encap, tag, end. */ 4076 act_size = sizeof(struct rte_flow_action) * (actions_n + 5) + 4077 sizeof(struct mlx5_rte_flow_action_set_tag); 4078 /* tag, vlan, port id, end. */ 4079 #define METER_SUFFIX_ITEM 4 4080 item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM + 4081 sizeof(struct mlx5_rte_flow_item_tag) * 2; 4082 sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0); 4083 if (!sfx_actions) 4084 return rte_flow_error_set(error, ENOMEM, 4085 RTE_FLOW_ERROR_TYPE_ACTION, 4086 NULL, "no memory to split " 4087 "meter flow"); 4088 sfx_items = (struct rte_flow_item *)((char *)sfx_actions + 4089 act_size); 4090 pre_actions = sfx_actions + actions_n; 4091 mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items, 4092 actions, sfx_actions, 4093 pre_actions); 4094 if (!mtr_tag_id) { 4095 ret = -rte_errno; 4096 goto exit; 4097 } 4098 /* Add the prefix subflow. */ 4099 ret = flow_create_split_inner(dev, flow, &dev_flow, 0, attr, 4100 items, pre_actions, external, 4101 error); 4102 if (ret) { 4103 ret = -rte_errno; 4104 goto exit; 4105 } 4106 dev_flow->handle->mtr_flow_id = mtr_tag_id; 4107 /* Setting the sfx group atrr. */ 4108 sfx_attr.group = sfx_attr.transfer ? 4109 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : 4110 MLX5_FLOW_TABLE_LEVEL_SUFFIX; 4111 } 4112 /* Add the prefix subflow. */ 4113 ret = flow_create_split_metadata(dev, flow, dev_flow ? 4114 flow_get_prefix_layer_flags(dev_flow) : 4115 0, &sfx_attr, 4116 sfx_items ? sfx_items : items, 4117 sfx_actions ? sfx_actions : actions, 4118 external, error); 4119 exit: 4120 if (sfx_actions) 4121 rte_free(sfx_actions); 4122 return ret; 4123 } 4124 4125 /** 4126 * Split the flow to subflow set. The splitters might be linked 4127 * in the chain, like this: 4128 * flow_create_split_outer() calls: 4129 * flow_create_split_meter() calls: 4130 * flow_create_split_metadata(meter_subflow_0) calls: 4131 * flow_create_split_inner(metadata_subflow_0) 4132 * flow_create_split_inner(metadata_subflow_1) 4133 * flow_create_split_inner(metadata_subflow_2) 4134 * flow_create_split_metadata(meter_subflow_1) calls: 4135 * flow_create_split_inner(metadata_subflow_0) 4136 * flow_create_split_inner(metadata_subflow_1) 4137 * flow_create_split_inner(metadata_subflow_2) 4138 * 4139 * This provide flexible way to add new levels of flow splitting. 4140 * The all of successfully created subflows are included to the 4141 * parent flow dev_flow list. 4142 * 4143 * @param dev 4144 * Pointer to Ethernet device. 4145 * @param[in] flow 4146 * Parent flow structure pointer. 4147 * @param[in] attr 4148 * Flow rule attributes. 4149 * @param[in] items 4150 * Pattern specification (list terminated by the END pattern item). 4151 * @param[in] actions 4152 * Associated actions (list terminated by the END action). 4153 * @param[in] external 4154 * This flow rule is created by request external to PMD. 4155 * @param[out] error 4156 * Perform verbose error reporting if not NULL. 4157 * @return 4158 * 0 on success, negative value otherwise 4159 */ 4160 static int 4161 flow_create_split_outer(struct rte_eth_dev *dev, 4162 struct rte_flow *flow, 4163 const struct rte_flow_attr *attr, 4164 const struct rte_flow_item items[], 4165 const struct rte_flow_action actions[], 4166 bool external, struct rte_flow_error *error) 4167 { 4168 int ret; 4169 4170 ret = flow_create_split_meter(dev, flow, attr, items, 4171 actions, external, error); 4172 MLX5_ASSERT(ret <= 0); 4173 return ret; 4174 } 4175 4176 /** 4177 * Create a flow and add it to @p list. 4178 * 4179 * @param dev 4180 * Pointer to Ethernet device. 4181 * @param list 4182 * Pointer to a TAILQ flow list. If this parameter NULL, 4183 * no list insertion occurred, flow is just created, 4184 * this is caller's responsibility to track the 4185 * created flow. 4186 * @param[in] attr 4187 * Flow rule attributes. 4188 * @param[in] items 4189 * Pattern specification (list terminated by the END pattern item). 4190 * @param[in] actions 4191 * Associated actions (list terminated by the END action). 4192 * @param[in] external 4193 * This flow rule is created by request external to PMD. 4194 * @param[out] error 4195 * Perform verbose error reporting if not NULL. 4196 * 4197 * @return 4198 * A flow on success, NULL otherwise and rte_errno is set. 4199 */ 4200 static struct rte_flow * 4201 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, 4202 const struct rte_flow_attr *attr, 4203 const struct rte_flow_item items[], 4204 const struct rte_flow_action actions[], 4205 bool external, struct rte_flow_error *error) 4206 { 4207 struct mlx5_priv *priv = dev->data->dev_private; 4208 struct rte_flow *flow = NULL; 4209 struct mlx5_flow *dev_flow; 4210 const struct rte_flow_action_rss *rss; 4211 union { 4212 struct rte_flow_expand_rss buf; 4213 uint8_t buffer[2048]; 4214 } expand_buffer; 4215 union { 4216 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 4217 uint8_t buffer[2048]; 4218 } actions_rx; 4219 union { 4220 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 4221 uint8_t buffer[2048]; 4222 } actions_hairpin_tx; 4223 union { 4224 struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS]; 4225 uint8_t buffer[2048]; 4226 } items_tx; 4227 struct rte_flow_expand_rss *buf = &expand_buffer.buf; 4228 const struct rte_flow_action *p_actions_rx = actions; 4229 uint32_t i; 4230 uint32_t flow_size; 4231 int hairpin_flow = 0; 4232 uint32_t hairpin_id = 0; 4233 struct rte_flow_attr attr_tx = { .priority = 0 }; 4234 int ret = flow_drv_validate(dev, attr, items, p_actions_rx, external, 4235 error); 4236 4237 if (ret < 0) 4238 return NULL; 4239 hairpin_flow = flow_check_hairpin_split(dev, attr, actions); 4240 if (hairpin_flow > 0) { 4241 if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { 4242 rte_errno = EINVAL; 4243 return NULL; 4244 } 4245 flow_hairpin_split(dev, actions, actions_rx.actions, 4246 actions_hairpin_tx.actions, items_tx.items, 4247 &hairpin_id); 4248 p_actions_rx = actions_rx.actions; 4249 } 4250 flow_size = sizeof(struct rte_flow); 4251 rss = flow_get_rss_action(p_actions_rx); 4252 if (rss) 4253 flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), 4254 sizeof(void *)); 4255 else 4256 flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); 4257 flow = rte_calloc(__func__, 1, flow_size, 0); 4258 if (!flow) { 4259 rte_errno = ENOMEM; 4260 goto error_before_flow; 4261 } 4262 flow->drv_type = flow_get_drv_type(dev, attr); 4263 if (hairpin_id != 0) 4264 flow->hairpin_flow_id = hairpin_id; 4265 MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && 4266 flow->drv_type < MLX5_FLOW_TYPE_MAX); 4267 flow->rss.queue = (void *)(flow + 1); 4268 if (rss) { 4269 /* 4270 * The following information is required by 4271 * mlx5_flow_hashfields_adjust() in advance. 4272 */ 4273 flow->rss.level = rss->level; 4274 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ 4275 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; 4276 } 4277 flow->dev_handles = 0; 4278 if (rss && rss->types) { 4279 unsigned int graph_root; 4280 4281 graph_root = find_graph_root(items, rss->level); 4282 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), 4283 items, rss->types, 4284 mlx5_support_expansion, 4285 graph_root); 4286 MLX5_ASSERT(ret > 0 && 4287 (unsigned int)ret < sizeof(expand_buffer.buffer)); 4288 } else { 4289 buf->entries = 1; 4290 buf->entry[0].pattern = (void *)(uintptr_t)items; 4291 } 4292 /* 4293 * Record the start index when there is a nested call. All sub-flows 4294 * need to be translated before another calling. 4295 * No need to use ping-pong buffer to save memory here. 4296 */ 4297 if (priv->flow_idx) { 4298 MLX5_ASSERT(!priv->flow_nested_idx); 4299 priv->flow_nested_idx = priv->flow_idx; 4300 } 4301 for (i = 0; i < buf->entries; ++i) { 4302 /* 4303 * The splitter may create multiple dev_flows, 4304 * depending on configuration. In the simplest 4305 * case it just creates unmodified original flow. 4306 */ 4307 ret = flow_create_split_outer(dev, flow, attr, 4308 buf->entry[i].pattern, 4309 p_actions_rx, external, 4310 error); 4311 if (ret < 0) 4312 goto error; 4313 } 4314 /* Create the tx flow. */ 4315 if (hairpin_flow) { 4316 attr_tx.group = MLX5_HAIRPIN_TX_TABLE; 4317 attr_tx.ingress = 0; 4318 attr_tx.egress = 1; 4319 dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items, 4320 actions_hairpin_tx.actions, error); 4321 if (!dev_flow) 4322 goto error; 4323 dev_flow->flow = flow; 4324 dev_flow->external = 0; 4325 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, 4326 dev_flow->handle, next); 4327 ret = flow_drv_translate(dev, dev_flow, &attr_tx, 4328 items_tx.items, 4329 actions_hairpin_tx.actions, error); 4330 if (ret < 0) 4331 goto error; 4332 } 4333 /* 4334 * Update the metadata register copy table. If extensive 4335 * metadata feature is enabled and registers are supported 4336 * we might create the extra rte_flow for each unique 4337 * MARK/FLAG action ID. 4338 * 4339 * The table is updated for ingress Flows only, because 4340 * the egress Flows belong to the different device and 4341 * copy table should be updated in peer NIC Rx domain. 4342 */ 4343 if (attr->ingress && 4344 (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) { 4345 ret = flow_mreg_update_copy_table(dev, flow, actions, error); 4346 if (ret) 4347 goto error; 4348 } 4349 /* 4350 * If the flow is external (from application) OR device is started, then 4351 * the flow will be applied immediately. 4352 */ 4353 if (external || dev->data->dev_started) { 4354 ret = flow_drv_apply(dev, flow, error); 4355 if (ret < 0) 4356 goto error; 4357 } 4358 if (list) 4359 TAILQ_INSERT_TAIL(list, flow, next); 4360 flow_rxq_flags_set(dev, flow); 4361 /* Nested flow creation index recovery. */ 4362 priv->flow_idx = priv->flow_nested_idx; 4363 if (priv->flow_nested_idx) 4364 priv->flow_nested_idx = 0; 4365 return flow; 4366 error: 4367 MLX5_ASSERT(flow); 4368 ret = rte_errno; /* Save rte_errno before cleanup. */ 4369 flow_mreg_del_copy_action(dev, flow); 4370 flow_drv_destroy(dev, flow); 4371 rte_free(flow); 4372 rte_errno = ret; /* Restore rte_errno. */ 4373 error_before_flow: 4374 ret = rte_errno; 4375 if (hairpin_id) 4376 mlx5_flow_id_release(priv->sh->flow_id_pool, 4377 hairpin_id); 4378 rte_errno = ret; 4379 priv->flow_idx = priv->flow_nested_idx; 4380 if (priv->flow_nested_idx) 4381 priv->flow_nested_idx = 0; 4382 return NULL; 4383 } 4384 4385 /** 4386 * Create a dedicated flow rule on e-switch table 0 (root table), to direct all 4387 * incoming packets to table 1. 4388 * 4389 * Other flow rules, requested for group n, will be created in 4390 * e-switch table n+1. 4391 * Jump action to e-switch group n will be created to group n+1. 4392 * 4393 * Used when working in switchdev mode, to utilise advantages of table 1 4394 * and above. 4395 * 4396 * @param dev 4397 * Pointer to Ethernet device. 4398 * 4399 * @return 4400 * Pointer to flow on success, NULL otherwise and rte_errno is set. 4401 */ 4402 struct rte_flow * 4403 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) 4404 { 4405 const struct rte_flow_attr attr = { 4406 .group = 0, 4407 .priority = 0, 4408 .ingress = 1, 4409 .egress = 0, 4410 .transfer = 1, 4411 }; 4412 const struct rte_flow_item pattern = { 4413 .type = RTE_FLOW_ITEM_TYPE_END, 4414 }; 4415 struct rte_flow_action_jump jump = { 4416 .group = 1, 4417 }; 4418 const struct rte_flow_action actions[] = { 4419 { 4420 .type = RTE_FLOW_ACTION_TYPE_JUMP, 4421 .conf = &jump, 4422 }, 4423 { 4424 .type = RTE_FLOW_ACTION_TYPE_END, 4425 }, 4426 }; 4427 struct mlx5_priv *priv = dev->data->dev_private; 4428 struct rte_flow_error error; 4429 4430 return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern, 4431 actions, false, &error); 4432 } 4433 4434 /** 4435 * Create a flow. 4436 * 4437 * @see rte_flow_create() 4438 * @see rte_flow_ops 4439 */ 4440 struct rte_flow * 4441 mlx5_flow_create(struct rte_eth_dev *dev, 4442 const struct rte_flow_attr *attr, 4443 const struct rte_flow_item items[], 4444 const struct rte_flow_action actions[], 4445 struct rte_flow_error *error) 4446 { 4447 struct mlx5_priv *priv = dev->data->dev_private; 4448 4449 /* 4450 * If the device is not started yet, it is not allowed to created a 4451 * flow from application. PMD default flows and traffic control flows 4452 * are not affected. 4453 */ 4454 if (unlikely(!dev->data->dev_started)) { 4455 rte_errno = ENODEV; 4456 DRV_LOG(DEBUG, "port %u is not started when " 4457 "inserting a flow", dev->data->port_id); 4458 return NULL; 4459 } 4460 return flow_list_create(dev, &priv->flows, 4461 attr, items, actions, true, error); 4462 } 4463 4464 /** 4465 * Destroy a flow in a list. 4466 * 4467 * @param dev 4468 * Pointer to Ethernet device. 4469 * @param list 4470 * Pointer to a TAILQ flow list. If this parameter NULL, 4471 * there is no flow removal from the list. 4472 * @param[in] flow 4473 * Flow to destroy. 4474 */ 4475 static void 4476 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, 4477 struct rte_flow *flow) 4478 { 4479 struct mlx5_priv *priv = dev->data->dev_private; 4480 4481 /* 4482 * Update RX queue flags only if port is started, otherwise it is 4483 * already clean. 4484 */ 4485 if (dev->data->dev_started) 4486 flow_rxq_flags_trim(dev, flow); 4487 if (flow->hairpin_flow_id) 4488 mlx5_flow_id_release(priv->sh->flow_id_pool, 4489 flow->hairpin_flow_id); 4490 flow_drv_destroy(dev, flow); 4491 if (list) 4492 TAILQ_REMOVE(list, flow, next); 4493 flow_mreg_del_copy_action(dev, flow); 4494 rte_free(flow->fdir); 4495 rte_free(flow); 4496 } 4497 4498 /** 4499 * Destroy all flows. 4500 * 4501 * @param dev 4502 * Pointer to Ethernet device. 4503 * @param list 4504 * Pointer to a TAILQ flow list. 4505 * @param active 4506 * If flushing is called avtively. 4507 */ 4508 void 4509 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list, 4510 bool active) 4511 { 4512 uint32_t num_flushed = 0; 4513 4514 while (!TAILQ_EMPTY(list)) { 4515 struct rte_flow *flow; 4516 4517 flow = TAILQ_FIRST(list); 4518 flow_list_destroy(dev, list, flow); 4519 num_flushed++; 4520 } 4521 if (active) { 4522 DRV_LOG(INFO, "port %u: %u flows flushed before stopping", 4523 dev->data->port_id, num_flushed); 4524 } 4525 } 4526 4527 /** 4528 * Remove all flows. 4529 * 4530 * @param dev 4531 * Pointer to Ethernet device. 4532 * @param list 4533 * Pointer to a TAILQ flow list. 4534 */ 4535 void 4536 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) 4537 { 4538 struct rte_flow *flow; 4539 4540 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { 4541 flow_drv_remove(dev, flow); 4542 flow_mreg_stop_copy_action(dev, flow); 4543 } 4544 flow_mreg_del_default_copy_action(dev); 4545 flow_rxq_flags_clear(dev); 4546 } 4547 4548 /** 4549 * Add all flows. 4550 * 4551 * @param dev 4552 * Pointer to Ethernet device. 4553 * @param list 4554 * Pointer to a TAILQ flow list. 4555 * 4556 * @return 4557 * 0 on success, a negative errno value otherwise and rte_errno is set. 4558 */ 4559 int 4560 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) 4561 { 4562 struct rte_flow *flow; 4563 struct rte_flow_error error; 4564 int ret = 0; 4565 4566 /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ 4567 ret = flow_mreg_add_default_copy_action(dev, &error); 4568 if (ret < 0) 4569 return -rte_errno; 4570 /* Apply Flows created by application. */ 4571 TAILQ_FOREACH(flow, list, next) { 4572 ret = flow_mreg_start_copy_action(dev, flow); 4573 if (ret < 0) 4574 goto error; 4575 ret = flow_drv_apply(dev, flow, &error); 4576 if (ret < 0) 4577 goto error; 4578 flow_rxq_flags_set(dev, flow); 4579 } 4580 return 0; 4581 error: 4582 ret = rte_errno; /* Save rte_errno before cleanup. */ 4583 mlx5_flow_stop(dev, list); 4584 rte_errno = ret; /* Restore rte_errno. */ 4585 return -rte_errno; 4586 } 4587 4588 /** 4589 * Stop all default actions for flows. 4590 * 4591 * @param dev 4592 * Pointer to Ethernet device. 4593 */ 4594 void 4595 mlx5_flow_stop_default(struct rte_eth_dev *dev) 4596 { 4597 flow_mreg_del_default_copy_action(dev); 4598 } 4599 4600 /** 4601 * Start all default actions for flows. 4602 * 4603 * @param dev 4604 * Pointer to Ethernet device. 4605 * @return 4606 * 0 on success, a negative errno value otherwise and rte_errno is set. 4607 */ 4608 int 4609 mlx5_flow_start_default(struct rte_eth_dev *dev) 4610 { 4611 struct rte_flow_error error; 4612 4613 /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ 4614 return flow_mreg_add_default_copy_action(dev, &error); 4615 } 4616 4617 /** 4618 * Allocate intermediate resources for flow creation. 4619 * 4620 * @param dev 4621 * Pointer to Ethernet device. 4622 */ 4623 void 4624 mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev) 4625 { 4626 struct mlx5_priv *priv = dev->data->dev_private; 4627 4628 if (!priv->inter_flows) 4629 priv->inter_flows = rte_calloc(__func__, MLX5_NUM_MAX_DEV_FLOWS, 4630 sizeof(struct mlx5_flow), 0); 4631 /* Reset the index. */ 4632 priv->flow_idx = 0; 4633 priv->flow_nested_idx = 0; 4634 } 4635 4636 /** 4637 * Free intermediate resources for flows. 4638 * 4639 * @param dev 4640 * Pointer to Ethernet device. 4641 */ 4642 void 4643 mlx5_flow_free_intermediate(struct rte_eth_dev *dev) 4644 { 4645 struct mlx5_priv *priv = dev->data->dev_private; 4646 4647 rte_free(priv->inter_flows); 4648 priv->inter_flows = NULL; 4649 } 4650 4651 /** 4652 * Verify the flow list is empty 4653 * 4654 * @param dev 4655 * Pointer to Ethernet device. 4656 * 4657 * @return the number of flows not released. 4658 */ 4659 int 4660 mlx5_flow_verify(struct rte_eth_dev *dev) 4661 { 4662 struct mlx5_priv *priv = dev->data->dev_private; 4663 struct rte_flow *flow; 4664 int ret = 0; 4665 4666 TAILQ_FOREACH(flow, &priv->flows, next) { 4667 DRV_LOG(DEBUG, "port %u flow %p still referenced", 4668 dev->data->port_id, (void *)flow); 4669 ++ret; 4670 } 4671 return ret; 4672 } 4673 4674 /** 4675 * Enable default hairpin egress flow. 4676 * 4677 * @param dev 4678 * Pointer to Ethernet device. 4679 * @param queue 4680 * The queue index. 4681 * 4682 * @return 4683 * 0 on success, a negative errno value otherwise and rte_errno is set. 4684 */ 4685 int 4686 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, 4687 uint32_t queue) 4688 { 4689 struct mlx5_priv *priv = dev->data->dev_private; 4690 const struct rte_flow_attr attr = { 4691 .egress = 1, 4692 .priority = 0, 4693 }; 4694 struct mlx5_rte_flow_item_tx_queue queue_spec = { 4695 .queue = queue, 4696 }; 4697 struct mlx5_rte_flow_item_tx_queue queue_mask = { 4698 .queue = UINT32_MAX, 4699 }; 4700 struct rte_flow_item items[] = { 4701 { 4702 .type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, 4703 .spec = &queue_spec, 4704 .last = NULL, 4705 .mask = &queue_mask, 4706 }, 4707 { 4708 .type = RTE_FLOW_ITEM_TYPE_END, 4709 }, 4710 }; 4711 struct rte_flow_action_jump jump = { 4712 .group = MLX5_HAIRPIN_TX_TABLE, 4713 }; 4714 struct rte_flow_action actions[2]; 4715 struct rte_flow *flow; 4716 struct rte_flow_error error; 4717 4718 actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; 4719 actions[0].conf = &jump; 4720 actions[1].type = RTE_FLOW_ACTION_TYPE_END; 4721 flow = flow_list_create(dev, &priv->ctrl_flows, 4722 &attr, items, actions, false, &error); 4723 if (!flow) { 4724 DRV_LOG(DEBUG, 4725 "Failed to create ctrl flow: rte_errno(%d)," 4726 " type(%d), message(%s)", 4727 rte_errno, error.type, 4728 error.message ? error.message : " (no stated reason)"); 4729 return -rte_errno; 4730 } 4731 return 0; 4732 } 4733 4734 /** 4735 * Enable a control flow configured from the control plane. 4736 * 4737 * @param dev 4738 * Pointer to Ethernet device. 4739 * @param eth_spec 4740 * An Ethernet flow spec to apply. 4741 * @param eth_mask 4742 * An Ethernet flow mask to apply. 4743 * @param vlan_spec 4744 * A VLAN flow spec to apply. 4745 * @param vlan_mask 4746 * A VLAN flow mask to apply. 4747 * 4748 * @return 4749 * 0 on success, a negative errno value otherwise and rte_errno is set. 4750 */ 4751 int 4752 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, 4753 struct rte_flow_item_eth *eth_spec, 4754 struct rte_flow_item_eth *eth_mask, 4755 struct rte_flow_item_vlan *vlan_spec, 4756 struct rte_flow_item_vlan *vlan_mask) 4757 { 4758 struct mlx5_priv *priv = dev->data->dev_private; 4759 const struct rte_flow_attr attr = { 4760 .ingress = 1, 4761 .priority = MLX5_FLOW_PRIO_RSVD, 4762 }; 4763 struct rte_flow_item items[] = { 4764 { 4765 .type = RTE_FLOW_ITEM_TYPE_ETH, 4766 .spec = eth_spec, 4767 .last = NULL, 4768 .mask = eth_mask, 4769 }, 4770 { 4771 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : 4772 RTE_FLOW_ITEM_TYPE_END, 4773 .spec = vlan_spec, 4774 .last = NULL, 4775 .mask = vlan_mask, 4776 }, 4777 { 4778 .type = RTE_FLOW_ITEM_TYPE_END, 4779 }, 4780 }; 4781 uint16_t queue[priv->reta_idx_n]; 4782 struct rte_flow_action_rss action_rss = { 4783 .func = RTE_ETH_HASH_FUNCTION_DEFAULT, 4784 .level = 0, 4785 .types = priv->rss_conf.rss_hf, 4786 .key_len = priv->rss_conf.rss_key_len, 4787 .queue_num = priv->reta_idx_n, 4788 .key = priv->rss_conf.rss_key, 4789 .queue = queue, 4790 }; 4791 struct rte_flow_action actions[] = { 4792 { 4793 .type = RTE_FLOW_ACTION_TYPE_RSS, 4794 .conf = &action_rss, 4795 }, 4796 { 4797 .type = RTE_FLOW_ACTION_TYPE_END, 4798 }, 4799 }; 4800 struct rte_flow *flow; 4801 struct rte_flow_error error; 4802 unsigned int i; 4803 4804 if (!priv->reta_idx_n || !priv->rxqs_n) { 4805 return 0; 4806 } 4807 for (i = 0; i != priv->reta_idx_n; ++i) 4808 queue[i] = (*priv->reta_idx)[i]; 4809 flow = flow_list_create(dev, &priv->ctrl_flows, 4810 &attr, items, actions, false, &error); 4811 if (!flow) 4812 return -rte_errno; 4813 return 0; 4814 } 4815 4816 /** 4817 * Enable a flow control configured from the control plane. 4818 * 4819 * @param dev 4820 * Pointer to Ethernet device. 4821 * @param eth_spec 4822 * An Ethernet flow spec to apply. 4823 * @param eth_mask 4824 * An Ethernet flow mask to apply. 4825 * 4826 * @return 4827 * 0 on success, a negative errno value otherwise and rte_errno is set. 4828 */ 4829 int 4830 mlx5_ctrl_flow(struct rte_eth_dev *dev, 4831 struct rte_flow_item_eth *eth_spec, 4832 struct rte_flow_item_eth *eth_mask) 4833 { 4834 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); 4835 } 4836 4837 /** 4838 * Destroy a flow. 4839 * 4840 * @see rte_flow_destroy() 4841 * @see rte_flow_ops 4842 */ 4843 int 4844 mlx5_flow_destroy(struct rte_eth_dev *dev, 4845 struct rte_flow *flow, 4846 struct rte_flow_error *error __rte_unused) 4847 { 4848 struct mlx5_priv *priv = dev->data->dev_private; 4849 4850 flow_list_destroy(dev, &priv->flows, flow); 4851 return 0; 4852 } 4853 4854 /** 4855 * Destroy all flows. 4856 * 4857 * @see rte_flow_flush() 4858 * @see rte_flow_ops 4859 */ 4860 int 4861 mlx5_flow_flush(struct rte_eth_dev *dev, 4862 struct rte_flow_error *error __rte_unused) 4863 { 4864 struct mlx5_priv *priv = dev->data->dev_private; 4865 4866 mlx5_flow_list_flush(dev, &priv->flows, false); 4867 return 0; 4868 } 4869 4870 /** 4871 * Isolated mode. 4872 * 4873 * @see rte_flow_isolate() 4874 * @see rte_flow_ops 4875 */ 4876 int 4877 mlx5_flow_isolate(struct rte_eth_dev *dev, 4878 int enable, 4879 struct rte_flow_error *error) 4880 { 4881 struct mlx5_priv *priv = dev->data->dev_private; 4882 4883 if (dev->data->dev_started) { 4884 rte_flow_error_set(error, EBUSY, 4885 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4886 NULL, 4887 "port must be stopped first"); 4888 return -rte_errno; 4889 } 4890 priv->isolated = !!enable; 4891 if (enable) 4892 dev->dev_ops = &mlx5_dev_ops_isolate; 4893 else 4894 dev->dev_ops = &mlx5_dev_ops; 4895 return 0; 4896 } 4897 4898 /** 4899 * Query a flow. 4900 * 4901 * @see rte_flow_query() 4902 * @see rte_flow_ops 4903 */ 4904 static int 4905 flow_drv_query(struct rte_eth_dev *dev, 4906 struct rte_flow *flow, 4907 const struct rte_flow_action *actions, 4908 void *data, 4909 struct rte_flow_error *error) 4910 { 4911 const struct mlx5_flow_driver_ops *fops; 4912 enum mlx5_flow_drv_type ftype = flow->drv_type; 4913 4914 MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); 4915 fops = flow_get_drv_ops(ftype); 4916 4917 return fops->query(dev, flow, actions, data, error); 4918 } 4919 4920 /** 4921 * Query a flow. 4922 * 4923 * @see rte_flow_query() 4924 * @see rte_flow_ops 4925 */ 4926 int 4927 mlx5_flow_query(struct rte_eth_dev *dev, 4928 struct rte_flow *flow, 4929 const struct rte_flow_action *actions, 4930 void *data, 4931 struct rte_flow_error *error) 4932 { 4933 int ret; 4934 4935 ret = flow_drv_query(dev, flow, actions, data, error); 4936 if (ret < 0) 4937 return ret; 4938 return 0; 4939 } 4940 4941 /** 4942 * Convert a flow director filter to a generic flow. 4943 * 4944 * @param dev 4945 * Pointer to Ethernet device. 4946 * @param fdir_filter 4947 * Flow director filter to add. 4948 * @param attributes 4949 * Generic flow parameters structure. 4950 * 4951 * @return 4952 * 0 on success, a negative errno value otherwise and rte_errno is set. 4953 */ 4954 static int 4955 flow_fdir_filter_convert(struct rte_eth_dev *dev, 4956 const struct rte_eth_fdir_filter *fdir_filter, 4957 struct mlx5_fdir *attributes) 4958 { 4959 struct mlx5_priv *priv = dev->data->dev_private; 4960 const struct rte_eth_fdir_input *input = &fdir_filter->input; 4961 const struct rte_eth_fdir_masks *mask = 4962 &dev->data->dev_conf.fdir_conf.mask; 4963 4964 /* Validate queue number. */ 4965 if (fdir_filter->action.rx_queue >= priv->rxqs_n) { 4966 DRV_LOG(ERR, "port %u invalid queue number %d", 4967 dev->data->port_id, fdir_filter->action.rx_queue); 4968 rte_errno = EINVAL; 4969 return -rte_errno; 4970 } 4971 attributes->attr.ingress = 1; 4972 attributes->items[0] = (struct rte_flow_item) { 4973 .type = RTE_FLOW_ITEM_TYPE_ETH, 4974 .spec = &attributes->l2, 4975 .mask = &attributes->l2_mask, 4976 }; 4977 switch (fdir_filter->action.behavior) { 4978 case RTE_ETH_FDIR_ACCEPT: 4979 attributes->actions[0] = (struct rte_flow_action){ 4980 .type = RTE_FLOW_ACTION_TYPE_QUEUE, 4981 .conf = &attributes->queue, 4982 }; 4983 break; 4984 case RTE_ETH_FDIR_REJECT: 4985 attributes->actions[0] = (struct rte_flow_action){ 4986 .type = RTE_FLOW_ACTION_TYPE_DROP, 4987 }; 4988 break; 4989 default: 4990 DRV_LOG(ERR, "port %u invalid behavior %d", 4991 dev->data->port_id, 4992 fdir_filter->action.behavior); 4993 rte_errno = ENOTSUP; 4994 return -rte_errno; 4995 } 4996 attributes->queue.index = fdir_filter->action.rx_queue; 4997 /* Handle L3. */ 4998 switch (fdir_filter->input.flow_type) { 4999 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 5000 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 5001 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 5002 attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){ 5003 .src_addr = input->flow.ip4_flow.src_ip, 5004 .dst_addr = input->flow.ip4_flow.dst_ip, 5005 .time_to_live = input->flow.ip4_flow.ttl, 5006 .type_of_service = input->flow.ip4_flow.tos, 5007 }; 5008 attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){ 5009 .src_addr = mask->ipv4_mask.src_ip, 5010 .dst_addr = mask->ipv4_mask.dst_ip, 5011 .time_to_live = mask->ipv4_mask.ttl, 5012 .type_of_service = mask->ipv4_mask.tos, 5013 .next_proto_id = mask->ipv4_mask.proto, 5014 }; 5015 attributes->items[1] = (struct rte_flow_item){ 5016 .type = RTE_FLOW_ITEM_TYPE_IPV4, 5017 .spec = &attributes->l3, 5018 .mask = &attributes->l3_mask, 5019 }; 5020 break; 5021 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 5022 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 5023 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 5024 attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){ 5025 .hop_limits = input->flow.ipv6_flow.hop_limits, 5026 .proto = input->flow.ipv6_flow.proto, 5027 }; 5028 5029 memcpy(attributes->l3.ipv6.hdr.src_addr, 5030 input->flow.ipv6_flow.src_ip, 5031 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 5032 memcpy(attributes->l3.ipv6.hdr.dst_addr, 5033 input->flow.ipv6_flow.dst_ip, 5034 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 5035 memcpy(attributes->l3_mask.ipv6.hdr.src_addr, 5036 mask->ipv6_mask.src_ip, 5037 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 5038 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr, 5039 mask->ipv6_mask.dst_ip, 5040 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 5041 attributes->items[1] = (struct rte_flow_item){ 5042 .type = RTE_FLOW_ITEM_TYPE_IPV6, 5043 .spec = &attributes->l3, 5044 .mask = &attributes->l3_mask, 5045 }; 5046 break; 5047 default: 5048 DRV_LOG(ERR, "port %u invalid flow type%d", 5049 dev->data->port_id, fdir_filter->input.flow_type); 5050 rte_errno = ENOTSUP; 5051 return -rte_errno; 5052 } 5053 /* Handle L4. */ 5054 switch (fdir_filter->input.flow_type) { 5055 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 5056 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 5057 .src_port = input->flow.udp4_flow.src_port, 5058 .dst_port = input->flow.udp4_flow.dst_port, 5059 }; 5060 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 5061 .src_port = mask->src_port_mask, 5062 .dst_port = mask->dst_port_mask, 5063 }; 5064 attributes->items[2] = (struct rte_flow_item){ 5065 .type = RTE_FLOW_ITEM_TYPE_UDP, 5066 .spec = &attributes->l4, 5067 .mask = &attributes->l4_mask, 5068 }; 5069 break; 5070 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 5071 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 5072 .src_port = input->flow.tcp4_flow.src_port, 5073 .dst_port = input->flow.tcp4_flow.dst_port, 5074 }; 5075 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 5076 .src_port = mask->src_port_mask, 5077 .dst_port = mask->dst_port_mask, 5078 }; 5079 attributes->items[2] = (struct rte_flow_item){ 5080 .type = RTE_FLOW_ITEM_TYPE_TCP, 5081 .spec = &attributes->l4, 5082 .mask = &attributes->l4_mask, 5083 }; 5084 break; 5085 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 5086 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 5087 .src_port = input->flow.udp6_flow.src_port, 5088 .dst_port = input->flow.udp6_flow.dst_port, 5089 }; 5090 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 5091 .src_port = mask->src_port_mask, 5092 .dst_port = mask->dst_port_mask, 5093 }; 5094 attributes->items[2] = (struct rte_flow_item){ 5095 .type = RTE_FLOW_ITEM_TYPE_UDP, 5096 .spec = &attributes->l4, 5097 .mask = &attributes->l4_mask, 5098 }; 5099 break; 5100 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 5101 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 5102 .src_port = input->flow.tcp6_flow.src_port, 5103 .dst_port = input->flow.tcp6_flow.dst_port, 5104 }; 5105 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 5106 .src_port = mask->src_port_mask, 5107 .dst_port = mask->dst_port_mask, 5108 }; 5109 attributes->items[2] = (struct rte_flow_item){ 5110 .type = RTE_FLOW_ITEM_TYPE_TCP, 5111 .spec = &attributes->l4, 5112 .mask = &attributes->l4_mask, 5113 }; 5114 break; 5115 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 5116 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 5117 break; 5118 default: 5119 DRV_LOG(ERR, "port %u invalid flow type%d", 5120 dev->data->port_id, fdir_filter->input.flow_type); 5121 rte_errno = ENOTSUP; 5122 return -rte_errno; 5123 } 5124 return 0; 5125 } 5126 5127 #define FLOW_FDIR_CMP(f1, f2, fld) \ 5128 memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld)) 5129 5130 /** 5131 * Compare two FDIR flows. If items and actions are identical, the two flows are 5132 * regarded as same. 5133 * 5134 * @param dev 5135 * Pointer to Ethernet device. 5136 * @param f1 5137 * FDIR flow to compare. 5138 * @param f2 5139 * FDIR flow to compare. 5140 * 5141 * @return 5142 * Zero on match, 1 otherwise. 5143 */ 5144 static int 5145 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) 5146 { 5147 if (FLOW_FDIR_CMP(f1, f2, attr) || 5148 FLOW_FDIR_CMP(f1, f2, l2) || 5149 FLOW_FDIR_CMP(f1, f2, l2_mask) || 5150 FLOW_FDIR_CMP(f1, f2, l3) || 5151 FLOW_FDIR_CMP(f1, f2, l3_mask) || 5152 FLOW_FDIR_CMP(f1, f2, l4) || 5153 FLOW_FDIR_CMP(f1, f2, l4_mask) || 5154 FLOW_FDIR_CMP(f1, f2, actions[0].type)) 5155 return 1; 5156 if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE && 5157 FLOW_FDIR_CMP(f1, f2, queue)) 5158 return 1; 5159 return 0; 5160 } 5161 5162 /** 5163 * Search device flow list to find out a matched FDIR flow. 5164 * 5165 * @param dev 5166 * Pointer to Ethernet device. 5167 * @param fdir_flow 5168 * FDIR flow to lookup. 5169 * 5170 * @return 5171 * Pointer of flow if found, NULL otherwise. 5172 */ 5173 static struct rte_flow * 5174 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) 5175 { 5176 struct mlx5_priv *priv = dev->data->dev_private; 5177 struct rte_flow *flow = NULL; 5178 5179 MLX5_ASSERT(fdir_flow); 5180 TAILQ_FOREACH(flow, &priv->flows, next) { 5181 if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) { 5182 DRV_LOG(DEBUG, "port %u found FDIR flow %p", 5183 dev->data->port_id, (void *)flow); 5184 break; 5185 } 5186 } 5187 return flow; 5188 } 5189 5190 /** 5191 * Add new flow director filter and store it in list. 5192 * 5193 * @param dev 5194 * Pointer to Ethernet device. 5195 * @param fdir_filter 5196 * Flow director filter to add. 5197 * 5198 * @return 5199 * 0 on success, a negative errno value otherwise and rte_errno is set. 5200 */ 5201 static int 5202 flow_fdir_filter_add(struct rte_eth_dev *dev, 5203 const struct rte_eth_fdir_filter *fdir_filter) 5204 { 5205 struct mlx5_priv *priv = dev->data->dev_private; 5206 struct mlx5_fdir *fdir_flow; 5207 struct rte_flow *flow; 5208 int ret; 5209 5210 fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0); 5211 if (!fdir_flow) { 5212 rte_errno = ENOMEM; 5213 return -rte_errno; 5214 } 5215 ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow); 5216 if (ret) 5217 goto error; 5218 flow = flow_fdir_filter_lookup(dev, fdir_flow); 5219 if (flow) { 5220 rte_errno = EEXIST; 5221 goto error; 5222 } 5223 flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr, 5224 fdir_flow->items, fdir_flow->actions, true, 5225 NULL); 5226 if (!flow) 5227 goto error; 5228 MLX5_ASSERT(!flow->fdir); 5229 flow->fdir = fdir_flow; 5230 DRV_LOG(DEBUG, "port %u created FDIR flow %p", 5231 dev->data->port_id, (void *)flow); 5232 return 0; 5233 error: 5234 rte_free(fdir_flow); 5235 return -rte_errno; 5236 } 5237 5238 /** 5239 * Delete specific filter. 5240 * 5241 * @param dev 5242 * Pointer to Ethernet device. 5243 * @param fdir_filter 5244 * Filter to be deleted. 5245 * 5246 * @return 5247 * 0 on success, a negative errno value otherwise and rte_errno is set. 5248 */ 5249 static int 5250 flow_fdir_filter_delete(struct rte_eth_dev *dev, 5251 const struct rte_eth_fdir_filter *fdir_filter) 5252 { 5253 struct mlx5_priv *priv = dev->data->dev_private; 5254 struct rte_flow *flow; 5255 struct mlx5_fdir fdir_flow = { 5256 .attr.group = 0, 5257 }; 5258 int ret; 5259 5260 ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow); 5261 if (ret) 5262 return -rte_errno; 5263 flow = flow_fdir_filter_lookup(dev, &fdir_flow); 5264 if (!flow) { 5265 rte_errno = ENOENT; 5266 return -rte_errno; 5267 } 5268 flow_list_destroy(dev, &priv->flows, flow); 5269 DRV_LOG(DEBUG, "port %u deleted FDIR flow %p", 5270 dev->data->port_id, (void *)flow); 5271 return 0; 5272 } 5273 5274 /** 5275 * Update queue for specific filter. 5276 * 5277 * @param dev 5278 * Pointer to Ethernet device. 5279 * @param fdir_filter 5280 * Filter to be updated. 5281 * 5282 * @return 5283 * 0 on success, a negative errno value otherwise and rte_errno is set. 5284 */ 5285 static int 5286 flow_fdir_filter_update(struct rte_eth_dev *dev, 5287 const struct rte_eth_fdir_filter *fdir_filter) 5288 { 5289 int ret; 5290 5291 ret = flow_fdir_filter_delete(dev, fdir_filter); 5292 if (ret) 5293 return ret; 5294 return flow_fdir_filter_add(dev, fdir_filter); 5295 } 5296 5297 /** 5298 * Flush all filters. 5299 * 5300 * @param dev 5301 * Pointer to Ethernet device. 5302 */ 5303 static void 5304 flow_fdir_filter_flush(struct rte_eth_dev *dev) 5305 { 5306 struct mlx5_priv *priv = dev->data->dev_private; 5307 5308 mlx5_flow_list_flush(dev, &priv->flows, false); 5309 } 5310 5311 /** 5312 * Get flow director information. 5313 * 5314 * @param dev 5315 * Pointer to Ethernet device. 5316 * @param[out] fdir_info 5317 * Resulting flow director information. 5318 */ 5319 static void 5320 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) 5321 { 5322 struct rte_eth_fdir_masks *mask = 5323 &dev->data->dev_conf.fdir_conf.mask; 5324 5325 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; 5326 fdir_info->guarant_spc = 0; 5327 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); 5328 fdir_info->max_flexpayload = 0; 5329 fdir_info->flow_types_mask[0] = 0; 5330 fdir_info->flex_payload_unit = 0; 5331 fdir_info->max_flex_payload_segment_num = 0; 5332 fdir_info->flex_payload_limit = 0; 5333 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf)); 5334 } 5335 5336 /** 5337 * Deal with flow director operations. 5338 * 5339 * @param dev 5340 * Pointer to Ethernet device. 5341 * @param filter_op 5342 * Operation to perform. 5343 * @param arg 5344 * Pointer to operation-specific structure. 5345 * 5346 * @return 5347 * 0 on success, a negative errno value otherwise and rte_errno is set. 5348 */ 5349 static int 5350 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, 5351 void *arg) 5352 { 5353 enum rte_fdir_mode fdir_mode = 5354 dev->data->dev_conf.fdir_conf.mode; 5355 5356 if (filter_op == RTE_ETH_FILTER_NOP) 5357 return 0; 5358 if (fdir_mode != RTE_FDIR_MODE_PERFECT && 5359 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 5360 DRV_LOG(ERR, "port %u flow director mode %d not supported", 5361 dev->data->port_id, fdir_mode); 5362 rte_errno = EINVAL; 5363 return -rte_errno; 5364 } 5365 switch (filter_op) { 5366 case RTE_ETH_FILTER_ADD: 5367 return flow_fdir_filter_add(dev, arg); 5368 case RTE_ETH_FILTER_UPDATE: 5369 return flow_fdir_filter_update(dev, arg); 5370 case RTE_ETH_FILTER_DELETE: 5371 return flow_fdir_filter_delete(dev, arg); 5372 case RTE_ETH_FILTER_FLUSH: 5373 flow_fdir_filter_flush(dev); 5374 break; 5375 case RTE_ETH_FILTER_INFO: 5376 flow_fdir_info_get(dev, arg); 5377 break; 5378 default: 5379 DRV_LOG(DEBUG, "port %u unknown operation %u", 5380 dev->data->port_id, filter_op); 5381 rte_errno = EINVAL; 5382 return -rte_errno; 5383 } 5384 return 0; 5385 } 5386 5387 /** 5388 * Manage filter operations. 5389 * 5390 * @param dev 5391 * Pointer to Ethernet device structure. 5392 * @param filter_type 5393 * Filter type. 5394 * @param filter_op 5395 * Operation to perform. 5396 * @param arg 5397 * Pointer to operation-specific structure. 5398 * 5399 * @return 5400 * 0 on success, a negative errno value otherwise and rte_errno is set. 5401 */ 5402 int 5403 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, 5404 enum rte_filter_type filter_type, 5405 enum rte_filter_op filter_op, 5406 void *arg) 5407 { 5408 switch (filter_type) { 5409 case RTE_ETH_FILTER_GENERIC: 5410 if (filter_op != RTE_ETH_FILTER_GET) { 5411 rte_errno = EINVAL; 5412 return -rte_errno; 5413 } 5414 *(const void **)arg = &mlx5_flow_ops; 5415 return 0; 5416 case RTE_ETH_FILTER_FDIR: 5417 return flow_fdir_ctrl_func(dev, filter_op, arg); 5418 default: 5419 DRV_LOG(ERR, "port %u filter type (%d) not supported", 5420 dev->data->port_id, filter_type); 5421 rte_errno = ENOTSUP; 5422 return -rte_errno; 5423 } 5424 return 0; 5425 } 5426 5427 /** 5428 * Create the needed meter and suffix tables. 5429 * 5430 * @param[in] dev 5431 * Pointer to Ethernet device. 5432 * @param[in] fm 5433 * Pointer to the flow meter. 5434 * 5435 * @return 5436 * Pointer to table set on success, NULL otherwise. 5437 */ 5438 struct mlx5_meter_domains_infos * 5439 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev, 5440 const struct mlx5_flow_meter *fm) 5441 { 5442 const struct mlx5_flow_driver_ops *fops; 5443 5444 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5445 return fops->create_mtr_tbls(dev, fm); 5446 } 5447 5448 /** 5449 * Destroy the meter table set. 5450 * 5451 * @param[in] dev 5452 * Pointer to Ethernet device. 5453 * @param[in] tbl 5454 * Pointer to the meter table set. 5455 * 5456 * @return 5457 * 0 on success. 5458 */ 5459 int 5460 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, 5461 struct mlx5_meter_domains_infos *tbls) 5462 { 5463 const struct mlx5_flow_driver_ops *fops; 5464 5465 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5466 return fops->destroy_mtr_tbls(dev, tbls); 5467 } 5468 5469 /** 5470 * Create policer rules. 5471 * 5472 * @param[in] dev 5473 * Pointer to Ethernet device. 5474 * @param[in] fm 5475 * Pointer to flow meter structure. 5476 * @param[in] attr 5477 * Pointer to flow attributes. 5478 * 5479 * @return 5480 * 0 on success, -1 otherwise. 5481 */ 5482 int 5483 mlx5_flow_create_policer_rules(struct rte_eth_dev *dev, 5484 struct mlx5_flow_meter *fm, 5485 const struct rte_flow_attr *attr) 5486 { 5487 const struct mlx5_flow_driver_ops *fops; 5488 5489 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5490 return fops->create_policer_rules(dev, fm, attr); 5491 } 5492 5493 /** 5494 * Destroy policer rules. 5495 * 5496 * @param[in] fm 5497 * Pointer to flow meter structure. 5498 * @param[in] attr 5499 * Pointer to flow attributes. 5500 * 5501 * @return 5502 * 0 on success, -1 otherwise. 5503 */ 5504 int 5505 mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev, 5506 struct mlx5_flow_meter *fm, 5507 const struct rte_flow_attr *attr) 5508 { 5509 const struct mlx5_flow_driver_ops *fops; 5510 5511 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5512 return fops->destroy_policer_rules(dev, fm, attr); 5513 } 5514 5515 /** 5516 * Allocate a counter. 5517 * 5518 * @param[in] dev 5519 * Pointer to Ethernet device structure. 5520 * 5521 * @return 5522 * Index to allocated counter on success, 0 otherwise. 5523 */ 5524 uint32_t 5525 mlx5_counter_alloc(struct rte_eth_dev *dev) 5526 { 5527 const struct mlx5_flow_driver_ops *fops; 5528 struct rte_flow_attr attr = { .transfer = 0 }; 5529 5530 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5531 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5532 return fops->counter_alloc(dev); 5533 } 5534 DRV_LOG(ERR, 5535 "port %u counter allocate is not supported.", 5536 dev->data->port_id); 5537 return 0; 5538 } 5539 5540 /** 5541 * Free a counter. 5542 * 5543 * @param[in] dev 5544 * Pointer to Ethernet device structure. 5545 * @param[in] cnt 5546 * Index to counter to be free. 5547 */ 5548 void 5549 mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt) 5550 { 5551 const struct mlx5_flow_driver_ops *fops; 5552 struct rte_flow_attr attr = { .transfer = 0 }; 5553 5554 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5555 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5556 fops->counter_free(dev, cnt); 5557 return; 5558 } 5559 DRV_LOG(ERR, 5560 "port %u counter free is not supported.", 5561 dev->data->port_id); 5562 } 5563 5564 /** 5565 * Query counter statistics. 5566 * 5567 * @param[in] dev 5568 * Pointer to Ethernet device structure. 5569 * @param[in] cnt 5570 * Index to counter to query. 5571 * @param[in] clear 5572 * Set to clear counter statistics. 5573 * @param[out] pkts 5574 * The counter hits packets number to save. 5575 * @param[out] bytes 5576 * The counter hits bytes number to save. 5577 * 5578 * @return 5579 * 0 on success, a negative errno value otherwise. 5580 */ 5581 int 5582 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, 5583 bool clear, uint64_t *pkts, uint64_t *bytes) 5584 { 5585 const struct mlx5_flow_driver_ops *fops; 5586 struct rte_flow_attr attr = { .transfer = 0 }; 5587 5588 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5589 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5590 return fops->counter_query(dev, cnt, clear, pkts, bytes); 5591 } 5592 DRV_LOG(ERR, 5593 "port %u counter query is not supported.", 5594 dev->data->port_id); 5595 return -ENOTSUP; 5596 } 5597 5598 #define MLX5_POOL_QUERY_FREQ_US 1000000 5599 5600 /** 5601 * Set the periodic procedure for triggering asynchronous batch queries for all 5602 * the counter pools. 5603 * 5604 * @param[in] sh 5605 * Pointer to mlx5_ibv_shared object. 5606 */ 5607 void 5608 mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) 5609 { 5610 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0); 5611 uint32_t pools_n = rte_atomic16_read(&cont->n_valid); 5612 uint32_t us; 5613 5614 cont = MLX5_CNT_CONTAINER(sh, 1, 0); 5615 pools_n += rte_atomic16_read(&cont->n_valid); 5616 us = MLX5_POOL_QUERY_FREQ_US / pools_n; 5617 DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us); 5618 if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { 5619 sh->cmng.query_thread_on = 0; 5620 DRV_LOG(ERR, "Cannot reinitialize query alarm"); 5621 } else { 5622 sh->cmng.query_thread_on = 1; 5623 } 5624 } 5625 5626 /** 5627 * The periodic procedure for triggering asynchronous batch queries for all the 5628 * counter pools. This function is probably called by the host thread. 5629 * 5630 * @param[in] arg 5631 * The parameter for the alarm process. 5632 */ 5633 void 5634 mlx5_flow_query_alarm(void *arg) 5635 { 5636 struct mlx5_ibv_shared *sh = arg; 5637 struct mlx5_devx_obj *dcs; 5638 uint16_t offset; 5639 int ret; 5640 uint8_t batch = sh->cmng.batch; 5641 uint16_t pool_index = sh->cmng.pool_index; 5642 struct mlx5_pools_container *cont; 5643 struct mlx5_pools_container *mcont; 5644 struct mlx5_flow_counter_pool *pool; 5645 5646 if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) 5647 goto set_alarm; 5648 next_container: 5649 cont = MLX5_CNT_CONTAINER(sh, batch, 1); 5650 mcont = MLX5_CNT_CONTAINER(sh, batch, 0); 5651 /* Check if resize was done and need to flip a container. */ 5652 if (cont != mcont) { 5653 if (cont->pools) { 5654 /* Clean the old container. */ 5655 rte_free(cont->pools); 5656 memset(cont, 0, sizeof(*cont)); 5657 } 5658 rte_cio_wmb(); 5659 /* Flip the host container. */ 5660 sh->cmng.mhi[batch] ^= (uint8_t)2; 5661 cont = mcont; 5662 } 5663 if (!cont->pools) { 5664 /* 2 empty containers case is unexpected. */ 5665 if (unlikely(batch != sh->cmng.batch)) 5666 goto set_alarm; 5667 batch ^= 0x1; 5668 pool_index = 0; 5669 goto next_container; 5670 } 5671 pool = cont->pools[pool_index]; 5672 if (pool->raw_hw) 5673 /* There is a pool query in progress. */ 5674 goto set_alarm; 5675 pool->raw_hw = 5676 LIST_FIRST(&sh->cmng.free_stat_raws); 5677 if (!pool->raw_hw) 5678 /* No free counter statistics raw memory. */ 5679 goto set_alarm; 5680 dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read 5681 (&pool->a64_dcs); 5682 offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; 5683 /* 5684 * Identify the counters released between query trigger and query 5685 * handle more effiecntly. The counter released in this gap period 5686 * should wait for a new round of query as the new arrived packets 5687 * will not be taken into account. 5688 */ 5689 rte_atomic64_add(&pool->start_query_gen, 1); 5690 ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - 5691 offset, NULL, NULL, 5692 pool->raw_hw->mem_mng->dm->id, 5693 (void *)(uintptr_t) 5694 (pool->raw_hw->data + offset), 5695 sh->devx_comp, 5696 (uint64_t)(uintptr_t)pool); 5697 if (ret) { 5698 rte_atomic64_sub(&pool->start_query_gen, 1); 5699 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID" 5700 " %d", pool->min_dcs->id); 5701 pool->raw_hw = NULL; 5702 goto set_alarm; 5703 } 5704 pool->raw_hw->min_dcs_id = dcs->id; 5705 LIST_REMOVE(pool->raw_hw, next); 5706 sh->cmng.pending_queries++; 5707 pool_index++; 5708 if (pool_index >= rte_atomic16_read(&cont->n_valid)) { 5709 batch ^= 0x1; 5710 pool_index = 0; 5711 } 5712 set_alarm: 5713 sh->cmng.batch = batch; 5714 sh->cmng.pool_index = pool_index; 5715 mlx5_set_query_alarm(sh); 5716 } 5717 5718 /** 5719 * Handler for the HW respond about ready values from an asynchronous batch 5720 * query. This function is probably called by the host thread. 5721 * 5722 * @param[in] sh 5723 * The pointer to the shared IB device context. 5724 * @param[in] async_id 5725 * The Devx async ID. 5726 * @param[in] status 5727 * The status of the completion. 5728 */ 5729 void 5730 mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, 5731 uint64_t async_id, int status) 5732 { 5733 struct mlx5_flow_counter_pool *pool = 5734 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; 5735 struct mlx5_counter_stats_raw *raw_to_free; 5736 5737 if (unlikely(status)) { 5738 rte_atomic64_sub(&pool->start_query_gen, 1); 5739 raw_to_free = pool->raw_hw; 5740 } else { 5741 raw_to_free = pool->raw; 5742 rte_spinlock_lock(&pool->sl); 5743 pool->raw = pool->raw_hw; 5744 rte_spinlock_unlock(&pool->sl); 5745 MLX5_ASSERT(rte_atomic64_read(&pool->end_query_gen) + 1 == 5746 rte_atomic64_read(&pool->start_query_gen)); 5747 rte_atomic64_set(&pool->end_query_gen, 5748 rte_atomic64_read(&pool->start_query_gen)); 5749 /* Be sure the new raw counters data is updated in memory. */ 5750 rte_cio_wmb(); 5751 } 5752 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next); 5753 pool->raw_hw = NULL; 5754 sh->cmng.pending_queries--; 5755 } 5756 5757 /** 5758 * Translate the rte_flow group index to HW table value. 5759 * 5760 * @param[in] attributes 5761 * Pointer to flow attributes 5762 * @param[in] external 5763 * Value is part of flow rule created by request external to PMD. 5764 * @param[in] group 5765 * rte_flow group index value. 5766 * @param[out] fdb_def_rule 5767 * Whether fdb jump to table 1 is configured. 5768 * @param[out] table 5769 * HW table value. 5770 * @param[out] error 5771 * Pointer to error structure. 5772 * 5773 * @return 5774 * 0 on success, a negative errno value otherwise and rte_errno is set. 5775 */ 5776 int 5777 mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external, 5778 uint32_t group, bool fdb_def_rule, uint32_t *table, 5779 struct rte_flow_error *error) 5780 { 5781 if (attributes->transfer && external && fdb_def_rule) { 5782 if (group == UINT32_MAX) 5783 return rte_flow_error_set 5784 (error, EINVAL, 5785 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 5786 NULL, 5787 "group index not supported"); 5788 *table = group + 1; 5789 } else { 5790 *table = group; 5791 } 5792 return 0; 5793 } 5794 5795 /** 5796 * Discover availability of metadata reg_c's. 5797 * 5798 * Iteratively use test flows to check availability. 5799 * 5800 * @param[in] dev 5801 * Pointer to the Ethernet device structure. 5802 * 5803 * @return 5804 * 0 on success, a negative errno value otherwise and rte_errno is set. 5805 */ 5806 int 5807 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) 5808 { 5809 struct mlx5_priv *priv = dev->data->dev_private; 5810 struct mlx5_dev_config *config = &priv->config; 5811 enum modify_reg idx; 5812 int n = 0; 5813 5814 /* reg_c[0] and reg_c[1] are reserved. */ 5815 config->flow_mreg_c[n++] = REG_C_0; 5816 config->flow_mreg_c[n++] = REG_C_1; 5817 /* Discover availability of other reg_c's. */ 5818 for (idx = REG_C_2; idx <= REG_C_7; ++idx) { 5819 struct rte_flow_attr attr = { 5820 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 5821 .priority = MLX5_FLOW_PRIO_RSVD, 5822 .ingress = 1, 5823 }; 5824 struct rte_flow_item items[] = { 5825 [0] = { 5826 .type = RTE_FLOW_ITEM_TYPE_END, 5827 }, 5828 }; 5829 struct rte_flow_action actions[] = { 5830 [0] = { 5831 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 5832 .conf = &(struct mlx5_flow_action_copy_mreg){ 5833 .src = REG_C_1, 5834 .dst = idx, 5835 }, 5836 }, 5837 [1] = { 5838 .type = RTE_FLOW_ACTION_TYPE_JUMP, 5839 .conf = &(struct rte_flow_action_jump){ 5840 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 5841 }, 5842 }, 5843 [2] = { 5844 .type = RTE_FLOW_ACTION_TYPE_END, 5845 }, 5846 }; 5847 struct rte_flow *flow; 5848 struct rte_flow_error error; 5849 5850 if (!config->dv_flow_en) 5851 break; 5852 /* Create internal flow, validation skips copy action. */ 5853 flow = flow_list_create(dev, NULL, &attr, items, 5854 actions, false, &error); 5855 if (!flow) 5856 continue; 5857 if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL)) 5858 config->flow_mreg_c[n++] = idx; 5859 flow_list_destroy(dev, NULL, flow); 5860 } 5861 for (; n < MLX5_MREG_C_NUM; ++n) 5862 config->flow_mreg_c[n] = REG_NONE; 5863 return 0; 5864 } 5865 5866 /** 5867 * Dump flow raw hw data to file 5868 * 5869 * @param[in] dev 5870 * The pointer to Ethernet device. 5871 * @param[in] file 5872 * A pointer to a file for output. 5873 * @param[out] error 5874 * Perform verbose error reporting if not NULL. PMDs initialize this 5875 * structure in case of error only. 5876 * @return 5877 * 0 on success, a nagative value otherwise. 5878 */ 5879 int 5880 mlx5_flow_dev_dump(struct rte_eth_dev *dev, 5881 FILE *file, 5882 struct rte_flow_error *error __rte_unused) 5883 { 5884 struct mlx5_priv *priv = dev->data->dev_private; 5885 struct mlx5_ibv_shared *sh = priv->sh; 5886 5887 return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain, 5888 sh->tx_domain, file); 5889 } 5890