1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <netinet/in.h> 7 #include <sys/queue.h> 8 #include <stdalign.h> 9 #include <stdint.h> 10 #include <string.h> 11 #include <stdbool.h> 12 13 /* Verbs header. */ 14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 15 #ifdef PEDANTIC 16 #pragma GCC diagnostic ignored "-Wpedantic" 17 #endif 18 #include <infiniband/verbs.h> 19 #ifdef PEDANTIC 20 #pragma GCC diagnostic error "-Wpedantic" 21 #endif 22 23 #include <rte_common.h> 24 #include <rte_ether.h> 25 #include <rte_ethdev_driver.h> 26 #include <rte_flow.h> 27 #include <rte_flow_driver.h> 28 #include <rte_malloc.h> 29 #include <rte_ip.h> 30 31 #include <mlx5_glue.h> 32 #include <mlx5_devx_cmds.h> 33 #include <mlx5_prm.h> 34 35 #include "mlx5_defs.h" 36 #include "mlx5.h" 37 #include "mlx5_flow.h" 38 #include "mlx5_rxtx.h" 39 40 /* Dev ops structure defined in mlx5.c */ 41 extern const struct eth_dev_ops mlx5_dev_ops; 42 extern const struct eth_dev_ops mlx5_dev_ops_isolate; 43 44 /** Device flow drivers. */ 45 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 46 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; 47 #endif 48 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; 49 50 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; 51 52 const struct mlx5_flow_driver_ops *flow_drv_ops[] = { 53 [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, 54 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 55 [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, 56 #endif 57 [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, 58 [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops 59 }; 60 61 enum mlx5_expansion { 62 MLX5_EXPANSION_ROOT, 63 MLX5_EXPANSION_ROOT_OUTER, 64 MLX5_EXPANSION_ROOT_ETH_VLAN, 65 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, 66 MLX5_EXPANSION_OUTER_ETH, 67 MLX5_EXPANSION_OUTER_ETH_VLAN, 68 MLX5_EXPANSION_OUTER_VLAN, 69 MLX5_EXPANSION_OUTER_IPV4, 70 MLX5_EXPANSION_OUTER_IPV4_UDP, 71 MLX5_EXPANSION_OUTER_IPV4_TCP, 72 MLX5_EXPANSION_OUTER_IPV6, 73 MLX5_EXPANSION_OUTER_IPV6_UDP, 74 MLX5_EXPANSION_OUTER_IPV6_TCP, 75 MLX5_EXPANSION_VXLAN, 76 MLX5_EXPANSION_VXLAN_GPE, 77 MLX5_EXPANSION_GRE, 78 MLX5_EXPANSION_MPLS, 79 MLX5_EXPANSION_ETH, 80 MLX5_EXPANSION_ETH_VLAN, 81 MLX5_EXPANSION_VLAN, 82 MLX5_EXPANSION_IPV4, 83 MLX5_EXPANSION_IPV4_UDP, 84 MLX5_EXPANSION_IPV4_TCP, 85 MLX5_EXPANSION_IPV6, 86 MLX5_EXPANSION_IPV6_UDP, 87 MLX5_EXPANSION_IPV6_TCP, 88 }; 89 90 /** Supported expansion of items. */ 91 static const struct rte_flow_expand_node mlx5_support_expansion[] = { 92 [MLX5_EXPANSION_ROOT] = { 93 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 94 MLX5_EXPANSION_IPV4, 95 MLX5_EXPANSION_IPV6), 96 .type = RTE_FLOW_ITEM_TYPE_END, 97 }, 98 [MLX5_EXPANSION_ROOT_OUTER] = { 99 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, 100 MLX5_EXPANSION_OUTER_IPV4, 101 MLX5_EXPANSION_OUTER_IPV6), 102 .type = RTE_FLOW_ITEM_TYPE_END, 103 }, 104 [MLX5_EXPANSION_ROOT_ETH_VLAN] = { 105 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), 106 .type = RTE_FLOW_ITEM_TYPE_END, 107 }, 108 [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { 109 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), 110 .type = RTE_FLOW_ITEM_TYPE_END, 111 }, 112 [MLX5_EXPANSION_OUTER_ETH] = { 113 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 114 MLX5_EXPANSION_OUTER_IPV6, 115 MLX5_EXPANSION_MPLS), 116 .type = RTE_FLOW_ITEM_TYPE_ETH, 117 .rss_types = 0, 118 }, 119 [MLX5_EXPANSION_OUTER_ETH_VLAN] = { 120 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), 121 .type = RTE_FLOW_ITEM_TYPE_ETH, 122 .rss_types = 0, 123 }, 124 [MLX5_EXPANSION_OUTER_VLAN] = { 125 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 126 MLX5_EXPANSION_OUTER_IPV6), 127 .type = RTE_FLOW_ITEM_TYPE_VLAN, 128 }, 129 [MLX5_EXPANSION_OUTER_IPV4] = { 130 .next = RTE_FLOW_EXPAND_RSS_NEXT 131 (MLX5_EXPANSION_OUTER_IPV4_UDP, 132 MLX5_EXPANSION_OUTER_IPV4_TCP, 133 MLX5_EXPANSION_GRE, 134 MLX5_EXPANSION_IPV4, 135 MLX5_EXPANSION_IPV6), 136 .type = RTE_FLOW_ITEM_TYPE_IPV4, 137 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 138 ETH_RSS_NONFRAG_IPV4_OTHER, 139 }, 140 [MLX5_EXPANSION_OUTER_IPV4_UDP] = { 141 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 142 MLX5_EXPANSION_VXLAN_GPE), 143 .type = RTE_FLOW_ITEM_TYPE_UDP, 144 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 145 }, 146 [MLX5_EXPANSION_OUTER_IPV4_TCP] = { 147 .type = RTE_FLOW_ITEM_TYPE_TCP, 148 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 149 }, 150 [MLX5_EXPANSION_OUTER_IPV6] = { 151 .next = RTE_FLOW_EXPAND_RSS_NEXT 152 (MLX5_EXPANSION_OUTER_IPV6_UDP, 153 MLX5_EXPANSION_OUTER_IPV6_TCP, 154 MLX5_EXPANSION_IPV4, 155 MLX5_EXPANSION_IPV6), 156 .type = RTE_FLOW_ITEM_TYPE_IPV6, 157 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 158 ETH_RSS_NONFRAG_IPV6_OTHER, 159 }, 160 [MLX5_EXPANSION_OUTER_IPV6_UDP] = { 161 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 162 MLX5_EXPANSION_VXLAN_GPE), 163 .type = RTE_FLOW_ITEM_TYPE_UDP, 164 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 165 }, 166 [MLX5_EXPANSION_OUTER_IPV6_TCP] = { 167 .type = RTE_FLOW_ITEM_TYPE_TCP, 168 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 169 }, 170 [MLX5_EXPANSION_VXLAN] = { 171 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 172 MLX5_EXPANSION_IPV4, 173 MLX5_EXPANSION_IPV6), 174 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 175 }, 176 [MLX5_EXPANSION_VXLAN_GPE] = { 177 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 178 MLX5_EXPANSION_IPV4, 179 MLX5_EXPANSION_IPV6), 180 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, 181 }, 182 [MLX5_EXPANSION_GRE] = { 183 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), 184 .type = RTE_FLOW_ITEM_TYPE_GRE, 185 }, 186 [MLX5_EXPANSION_MPLS] = { 187 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 188 MLX5_EXPANSION_IPV6), 189 .type = RTE_FLOW_ITEM_TYPE_MPLS, 190 }, 191 [MLX5_EXPANSION_ETH] = { 192 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 193 MLX5_EXPANSION_IPV6), 194 .type = RTE_FLOW_ITEM_TYPE_ETH, 195 }, 196 [MLX5_EXPANSION_ETH_VLAN] = { 197 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), 198 .type = RTE_FLOW_ITEM_TYPE_ETH, 199 }, 200 [MLX5_EXPANSION_VLAN] = { 201 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 202 MLX5_EXPANSION_IPV6), 203 .type = RTE_FLOW_ITEM_TYPE_VLAN, 204 }, 205 [MLX5_EXPANSION_IPV4] = { 206 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, 207 MLX5_EXPANSION_IPV4_TCP), 208 .type = RTE_FLOW_ITEM_TYPE_IPV4, 209 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 210 ETH_RSS_NONFRAG_IPV4_OTHER, 211 }, 212 [MLX5_EXPANSION_IPV4_UDP] = { 213 .type = RTE_FLOW_ITEM_TYPE_UDP, 214 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 215 }, 216 [MLX5_EXPANSION_IPV4_TCP] = { 217 .type = RTE_FLOW_ITEM_TYPE_TCP, 218 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 219 }, 220 [MLX5_EXPANSION_IPV6] = { 221 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, 222 MLX5_EXPANSION_IPV6_TCP), 223 .type = RTE_FLOW_ITEM_TYPE_IPV6, 224 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 225 ETH_RSS_NONFRAG_IPV6_OTHER, 226 }, 227 [MLX5_EXPANSION_IPV6_UDP] = { 228 .type = RTE_FLOW_ITEM_TYPE_UDP, 229 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 230 }, 231 [MLX5_EXPANSION_IPV6_TCP] = { 232 .type = RTE_FLOW_ITEM_TYPE_TCP, 233 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 234 }, 235 }; 236 237 static const struct rte_flow_ops mlx5_flow_ops = { 238 .validate = mlx5_flow_validate, 239 .create = mlx5_flow_create, 240 .destroy = mlx5_flow_destroy, 241 .flush = mlx5_flow_flush, 242 .isolate = mlx5_flow_isolate, 243 .query = mlx5_flow_query, 244 .dev_dump = mlx5_flow_dev_dump, 245 }; 246 247 /* Convert FDIR request to Generic flow. */ 248 struct mlx5_fdir { 249 struct rte_flow_attr attr; 250 struct rte_flow_item items[4]; 251 struct rte_flow_item_eth l2; 252 struct rte_flow_item_eth l2_mask; 253 union { 254 struct rte_flow_item_ipv4 ipv4; 255 struct rte_flow_item_ipv6 ipv6; 256 } l3; 257 union { 258 struct rte_flow_item_ipv4 ipv4; 259 struct rte_flow_item_ipv6 ipv6; 260 } l3_mask; 261 union { 262 struct rte_flow_item_udp udp; 263 struct rte_flow_item_tcp tcp; 264 } l4; 265 union { 266 struct rte_flow_item_udp udp; 267 struct rte_flow_item_tcp tcp; 268 } l4_mask; 269 struct rte_flow_action actions[2]; 270 struct rte_flow_action_queue queue; 271 }; 272 273 /* Map of Verbs to Flow priority with 8 Verbs priorities. */ 274 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { 275 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, 276 }; 277 278 /* Map of Verbs to Flow priority with 16 Verbs priorities. */ 279 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { 280 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, 281 { 9, 10, 11 }, { 12, 13, 14 }, 282 }; 283 284 /* Tunnel information. */ 285 struct mlx5_flow_tunnel_info { 286 uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ 287 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ 288 }; 289 290 static struct mlx5_flow_tunnel_info tunnels_info[] = { 291 { 292 .tunnel = MLX5_FLOW_LAYER_VXLAN, 293 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, 294 }, 295 { 296 .tunnel = MLX5_FLOW_LAYER_GENEVE, 297 .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP, 298 }, 299 { 300 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, 301 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, 302 }, 303 { 304 .tunnel = MLX5_FLOW_LAYER_GRE, 305 .ptype = RTE_PTYPE_TUNNEL_GRE, 306 }, 307 { 308 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, 309 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, 310 }, 311 { 312 .tunnel = MLX5_FLOW_LAYER_MPLS, 313 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, 314 }, 315 { 316 .tunnel = MLX5_FLOW_LAYER_NVGRE, 317 .ptype = RTE_PTYPE_TUNNEL_NVGRE, 318 }, 319 { 320 .tunnel = MLX5_FLOW_LAYER_IPIP, 321 .ptype = RTE_PTYPE_TUNNEL_IP, 322 }, 323 { 324 .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP, 325 .ptype = RTE_PTYPE_TUNNEL_IP, 326 }, 327 { 328 .tunnel = MLX5_FLOW_LAYER_GTP, 329 .ptype = RTE_PTYPE_TUNNEL_GTPU, 330 }, 331 }; 332 333 /** 334 * Translate tag ID to register. 335 * 336 * @param[in] dev 337 * Pointer to the Ethernet device structure. 338 * @param[in] feature 339 * The feature that request the register. 340 * @param[in] id 341 * The request register ID. 342 * @param[out] error 343 * Error description in case of any. 344 * 345 * @return 346 * The request register on success, a negative errno 347 * value otherwise and rte_errno is set. 348 */ 349 int 350 mlx5_flow_get_reg_id(struct rte_eth_dev *dev, 351 enum mlx5_feature_name feature, 352 uint32_t id, 353 struct rte_flow_error *error) 354 { 355 struct mlx5_priv *priv = dev->data->dev_private; 356 struct mlx5_dev_config *config = &priv->config; 357 enum modify_reg start_reg; 358 bool skip_mtr_reg = false; 359 360 switch (feature) { 361 case MLX5_HAIRPIN_RX: 362 return REG_B; 363 case MLX5_HAIRPIN_TX: 364 return REG_A; 365 case MLX5_METADATA_RX: 366 switch (config->dv_xmeta_en) { 367 case MLX5_XMETA_MODE_LEGACY: 368 return REG_B; 369 case MLX5_XMETA_MODE_META16: 370 return REG_C_0; 371 case MLX5_XMETA_MODE_META32: 372 return REG_C_1; 373 } 374 break; 375 case MLX5_METADATA_TX: 376 return REG_A; 377 case MLX5_METADATA_FDB: 378 switch (config->dv_xmeta_en) { 379 case MLX5_XMETA_MODE_LEGACY: 380 return REG_NONE; 381 case MLX5_XMETA_MODE_META16: 382 return REG_C_0; 383 case MLX5_XMETA_MODE_META32: 384 return REG_C_1; 385 } 386 break; 387 case MLX5_FLOW_MARK: 388 switch (config->dv_xmeta_en) { 389 case MLX5_XMETA_MODE_LEGACY: 390 return REG_NONE; 391 case MLX5_XMETA_MODE_META16: 392 return REG_C_1; 393 case MLX5_XMETA_MODE_META32: 394 return REG_C_0; 395 } 396 break; 397 case MLX5_MTR_SFX: 398 /* 399 * If meter color and flow match share one register, flow match 400 * should use the meter color register for match. 401 */ 402 if (priv->mtr_reg_share) 403 return priv->mtr_color_reg; 404 else 405 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : 406 REG_C_3; 407 case MLX5_MTR_COLOR: 408 MLX5_ASSERT(priv->mtr_color_reg != REG_NONE); 409 return priv->mtr_color_reg; 410 case MLX5_COPY_MARK: 411 /* 412 * Metadata COPY_MARK register using is in meter suffix sub 413 * flow while with meter. It's safe to share the same register. 414 */ 415 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3; 416 case MLX5_APP_TAG: 417 /* 418 * If meter is enable, it will engage the register for color 419 * match and flow match. If meter color match is not using the 420 * REG_C_2, need to skip the REG_C_x be used by meter color 421 * match. 422 * If meter is disable, free to use all available registers. 423 */ 424 start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 : 425 (priv->mtr_reg_share ? REG_C_3 : REG_C_4); 426 skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2); 427 if (id > (REG_C_7 - start_reg)) 428 return rte_flow_error_set(error, EINVAL, 429 RTE_FLOW_ERROR_TYPE_ITEM, 430 NULL, "invalid tag id"); 431 if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NONE) 432 return rte_flow_error_set(error, ENOTSUP, 433 RTE_FLOW_ERROR_TYPE_ITEM, 434 NULL, "unsupported tag id"); 435 /* 436 * This case means meter is using the REG_C_x great than 2. 437 * Take care not to conflict with meter color REG_C_x. 438 * If the available index REG_C_y >= REG_C_x, skip the 439 * color register. 440 */ 441 if (skip_mtr_reg && config->flow_mreg_c 442 [id + start_reg - REG_C_0] >= priv->mtr_color_reg) { 443 if (config->flow_mreg_c 444 [id + 1 + start_reg - REG_C_0] != REG_NONE) 445 return config->flow_mreg_c 446 [id + 1 + start_reg - REG_C_0]; 447 return rte_flow_error_set(error, ENOTSUP, 448 RTE_FLOW_ERROR_TYPE_ITEM, 449 NULL, "unsupported tag id"); 450 } 451 return config->flow_mreg_c[id + start_reg - REG_C_0]; 452 } 453 MLX5_ASSERT(false); 454 return rte_flow_error_set(error, EINVAL, 455 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 456 NULL, "invalid feature name"); 457 } 458 459 /** 460 * Check extensive flow metadata register support. 461 * 462 * @param dev 463 * Pointer to rte_eth_dev structure. 464 * 465 * @return 466 * True if device supports extensive flow metadata register, otherwise false. 467 */ 468 bool 469 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) 470 { 471 struct mlx5_priv *priv = dev->data->dev_private; 472 struct mlx5_dev_config *config = &priv->config; 473 474 /* 475 * Having available reg_c can be regarded inclusively as supporting 476 * extensive flow metadata register, which could mean, 477 * - metadata register copy action by modify header. 478 * - 16 modify header actions is supported. 479 * - reg_c's are preserved across different domain (FDB and NIC) on 480 * packet loopback by flow lookup miss. 481 */ 482 return config->flow_mreg_c[2] != REG_NONE; 483 } 484 485 /** 486 * Discover the maximum number of priority available. 487 * 488 * @param[in] dev 489 * Pointer to the Ethernet device structure. 490 * 491 * @return 492 * number of supported flow priority on success, a negative errno 493 * value otherwise and rte_errno is set. 494 */ 495 int 496 mlx5_flow_discover_priorities(struct rte_eth_dev *dev) 497 { 498 struct mlx5_priv *priv = dev->data->dev_private; 499 struct { 500 struct ibv_flow_attr attr; 501 struct ibv_flow_spec_eth eth; 502 struct ibv_flow_spec_action_drop drop; 503 } flow_attr = { 504 .attr = { 505 .num_of_specs = 2, 506 .port = (uint8_t)priv->ibv_port, 507 }, 508 .eth = { 509 .type = IBV_FLOW_SPEC_ETH, 510 .size = sizeof(struct ibv_flow_spec_eth), 511 }, 512 .drop = { 513 .size = sizeof(struct ibv_flow_spec_action_drop), 514 .type = IBV_FLOW_SPEC_ACTION_DROP, 515 }, 516 }; 517 struct ibv_flow *flow; 518 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev); 519 uint16_t vprio[] = { 8, 16 }; 520 int i; 521 int priority = 0; 522 523 if (!drop) { 524 rte_errno = ENOTSUP; 525 return -rte_errno; 526 } 527 for (i = 0; i != RTE_DIM(vprio); i++) { 528 flow_attr.attr.priority = vprio[i] - 1; 529 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); 530 if (!flow) 531 break; 532 claim_zero(mlx5_glue->destroy_flow(flow)); 533 priority = vprio[i]; 534 } 535 mlx5_hrxq_drop_release(dev); 536 switch (priority) { 537 case 8: 538 priority = RTE_DIM(priority_map_3); 539 break; 540 case 16: 541 priority = RTE_DIM(priority_map_5); 542 break; 543 default: 544 rte_errno = ENOTSUP; 545 DRV_LOG(ERR, 546 "port %u verbs maximum priority: %d expected 8/16", 547 dev->data->port_id, priority); 548 return -rte_errno; 549 } 550 DRV_LOG(INFO, "port %u flow maximum priority: %d", 551 dev->data->port_id, priority); 552 return priority; 553 } 554 555 /** 556 * Adjust flow priority based on the highest layer and the request priority. 557 * 558 * @param[in] dev 559 * Pointer to the Ethernet device structure. 560 * @param[in] priority 561 * The rule base priority. 562 * @param[in] subpriority 563 * The priority based on the items. 564 * 565 * @return 566 * The new priority. 567 */ 568 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 569 uint32_t subpriority) 570 { 571 uint32_t res = 0; 572 struct mlx5_priv *priv = dev->data->dev_private; 573 574 switch (priv->config.flow_prio) { 575 case RTE_DIM(priority_map_3): 576 res = priority_map_3[priority][subpriority]; 577 break; 578 case RTE_DIM(priority_map_5): 579 res = priority_map_5[priority][subpriority]; 580 break; 581 } 582 return res; 583 } 584 585 /** 586 * Verify the @p item specifications (spec, last, mask) are compatible with the 587 * NIC capabilities. 588 * 589 * @param[in] item 590 * Item specification. 591 * @param[in] mask 592 * @p item->mask or flow default bit-masks. 593 * @param[in] nic_mask 594 * Bit-masks covering supported fields by the NIC to compare with user mask. 595 * @param[in] size 596 * Bit-masks size in bytes. 597 * @param[out] error 598 * Pointer to error structure. 599 * 600 * @return 601 * 0 on success, a negative errno value otherwise and rte_errno is set. 602 */ 603 int 604 mlx5_flow_item_acceptable(const struct rte_flow_item *item, 605 const uint8_t *mask, 606 const uint8_t *nic_mask, 607 unsigned int size, 608 struct rte_flow_error *error) 609 { 610 unsigned int i; 611 612 MLX5_ASSERT(nic_mask); 613 for (i = 0; i < size; ++i) 614 if ((nic_mask[i] | mask[i]) != nic_mask[i]) 615 return rte_flow_error_set(error, ENOTSUP, 616 RTE_FLOW_ERROR_TYPE_ITEM, 617 item, 618 "mask enables non supported" 619 " bits"); 620 if (!item->spec && (item->mask || item->last)) 621 return rte_flow_error_set(error, EINVAL, 622 RTE_FLOW_ERROR_TYPE_ITEM, item, 623 "mask/last without a spec is not" 624 " supported"); 625 if (item->spec && item->last) { 626 uint8_t spec[size]; 627 uint8_t last[size]; 628 unsigned int i; 629 int ret; 630 631 for (i = 0; i < size; ++i) { 632 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; 633 last[i] = ((const uint8_t *)item->last)[i] & mask[i]; 634 } 635 ret = memcmp(spec, last, size); 636 if (ret != 0) 637 return rte_flow_error_set(error, EINVAL, 638 RTE_FLOW_ERROR_TYPE_ITEM, 639 item, 640 "range is not valid"); 641 } 642 return 0; 643 } 644 645 /** 646 * Adjust the hash fields according to the @p flow information. 647 * 648 * @param[in] dev_flow. 649 * Pointer to the mlx5_flow. 650 * @param[in] tunnel 651 * 1 when the hash field is for a tunnel item. 652 * @param[in] layer_types 653 * ETH_RSS_* types. 654 * @param[in] hash_fields 655 * Item hash fields. 656 * 657 * @return 658 * The hash fields that should be used. 659 */ 660 uint64_t 661 mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc, 662 int tunnel __rte_unused, uint64_t layer_types, 663 uint64_t hash_fields) 664 { 665 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 666 int rss_request_inner = rss_desc->level >= 2; 667 668 /* Check RSS hash level for tunnel. */ 669 if (tunnel && rss_request_inner) 670 hash_fields |= IBV_RX_HASH_INNER; 671 else if (tunnel || rss_request_inner) 672 return 0; 673 #endif 674 /* Check if requested layer matches RSS hash fields. */ 675 if (!(rss_desc->types & layer_types)) 676 return 0; 677 return hash_fields; 678 } 679 680 /** 681 * Lookup and set the ptype in the data Rx part. A single Ptype can be used, 682 * if several tunnel rules are used on this queue, the tunnel ptype will be 683 * cleared. 684 * 685 * @param rxq_ctrl 686 * Rx queue to update. 687 */ 688 static void 689 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) 690 { 691 unsigned int i; 692 uint32_t tunnel_ptype = 0; 693 694 /* Look up for the ptype to use. */ 695 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { 696 if (!rxq_ctrl->flow_tunnels_n[i]) 697 continue; 698 if (!tunnel_ptype) { 699 tunnel_ptype = tunnels_info[i].ptype; 700 } else { 701 tunnel_ptype = 0; 702 break; 703 } 704 } 705 rxq_ctrl->rxq.tunnel = tunnel_ptype; 706 } 707 708 /** 709 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive 710 * flow. 711 * 712 * @param[in] dev 713 * Pointer to the Ethernet device structure. 714 * @param[in] dev_handle 715 * Pointer to device flow handle structure. 716 */ 717 static void 718 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, 719 struct mlx5_flow_handle *dev_handle) 720 { 721 struct mlx5_priv *priv = dev->data->dev_private; 722 const int mark = dev_handle->mark; 723 const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); 724 struct mlx5_hrxq *hrxq; 725 unsigned int i; 726 727 if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE) 728 return; 729 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], 730 dev_handle->rix_hrxq); 731 if (!hrxq) 732 return; 733 for (i = 0; i != hrxq->ind_table->queues_n; ++i) { 734 int idx = hrxq->ind_table->queues[i]; 735 struct mlx5_rxq_ctrl *rxq_ctrl = 736 container_of((*priv->rxqs)[idx], 737 struct mlx5_rxq_ctrl, rxq); 738 739 /* 740 * To support metadata register copy on Tx loopback, 741 * this must be always enabled (metadata may arive 742 * from other port - not from local flows only. 743 */ 744 if (priv->config.dv_flow_en && 745 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 746 mlx5_flow_ext_mreg_supported(dev)) { 747 rxq_ctrl->rxq.mark = 1; 748 rxq_ctrl->flow_mark_n = 1; 749 } else if (mark) { 750 rxq_ctrl->rxq.mark = 1; 751 rxq_ctrl->flow_mark_n++; 752 } 753 if (tunnel) { 754 unsigned int j; 755 756 /* Increase the counter matching the flow. */ 757 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 758 if ((tunnels_info[j].tunnel & 759 dev_handle->layers) == 760 tunnels_info[j].tunnel) { 761 rxq_ctrl->flow_tunnels_n[j]++; 762 break; 763 } 764 } 765 flow_rxq_tunnel_ptype_update(rxq_ctrl); 766 } 767 } 768 } 769 770 /** 771 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow 772 * 773 * @param[in] dev 774 * Pointer to the Ethernet device structure. 775 * @param[in] flow 776 * Pointer to flow structure. 777 */ 778 static void 779 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) 780 { 781 struct mlx5_priv *priv = dev->data->dev_private; 782 uint32_t handle_idx; 783 struct mlx5_flow_handle *dev_handle; 784 785 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 786 handle_idx, dev_handle, next) 787 flow_drv_rxq_flags_set(dev, dev_handle); 788 } 789 790 /** 791 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 792 * device flow if no other flow uses it with the same kind of request. 793 * 794 * @param dev 795 * Pointer to Ethernet device. 796 * @param[in] dev_handle 797 * Pointer to the device flow handle structure. 798 */ 799 static void 800 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, 801 struct mlx5_flow_handle *dev_handle) 802 { 803 struct mlx5_priv *priv = dev->data->dev_private; 804 const int mark = dev_handle->mark; 805 const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); 806 struct mlx5_hrxq *hrxq; 807 unsigned int i; 808 809 if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE) 810 return; 811 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], 812 dev_handle->rix_hrxq); 813 if (!hrxq) 814 return; 815 MLX5_ASSERT(dev->data->dev_started); 816 for (i = 0; i != hrxq->ind_table->queues_n; ++i) { 817 int idx = hrxq->ind_table->queues[i]; 818 struct mlx5_rxq_ctrl *rxq_ctrl = 819 container_of((*priv->rxqs)[idx], 820 struct mlx5_rxq_ctrl, rxq); 821 822 if (priv->config.dv_flow_en && 823 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 824 mlx5_flow_ext_mreg_supported(dev)) { 825 rxq_ctrl->rxq.mark = 1; 826 rxq_ctrl->flow_mark_n = 1; 827 } else if (mark) { 828 rxq_ctrl->flow_mark_n--; 829 rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; 830 } 831 if (tunnel) { 832 unsigned int j; 833 834 /* Decrease the counter matching the flow. */ 835 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 836 if ((tunnels_info[j].tunnel & 837 dev_handle->layers) == 838 tunnels_info[j].tunnel) { 839 rxq_ctrl->flow_tunnels_n[j]--; 840 break; 841 } 842 } 843 flow_rxq_tunnel_ptype_update(rxq_ctrl); 844 } 845 } 846 } 847 848 /** 849 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 850 * @p flow if no other flow uses it with the same kind of request. 851 * 852 * @param dev 853 * Pointer to Ethernet device. 854 * @param[in] flow 855 * Pointer to the flow. 856 */ 857 static void 858 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) 859 { 860 struct mlx5_priv *priv = dev->data->dev_private; 861 uint32_t handle_idx; 862 struct mlx5_flow_handle *dev_handle; 863 864 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 865 handle_idx, dev_handle, next) 866 flow_drv_rxq_flags_trim(dev, dev_handle); 867 } 868 869 /** 870 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. 871 * 872 * @param dev 873 * Pointer to Ethernet device. 874 */ 875 static void 876 flow_rxq_flags_clear(struct rte_eth_dev *dev) 877 { 878 struct mlx5_priv *priv = dev->data->dev_private; 879 unsigned int i; 880 881 for (i = 0; i != priv->rxqs_n; ++i) { 882 struct mlx5_rxq_ctrl *rxq_ctrl; 883 unsigned int j; 884 885 if (!(*priv->rxqs)[i]) 886 continue; 887 rxq_ctrl = container_of((*priv->rxqs)[i], 888 struct mlx5_rxq_ctrl, rxq); 889 rxq_ctrl->flow_mark_n = 0; 890 rxq_ctrl->rxq.mark = 0; 891 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) 892 rxq_ctrl->flow_tunnels_n[j] = 0; 893 rxq_ctrl->rxq.tunnel = 0; 894 } 895 } 896 897 /* 898 * return a pointer to the desired action in the list of actions. 899 * 900 * @param[in] actions 901 * The list of actions to search the action in. 902 * @param[in] action 903 * The action to find. 904 * 905 * @return 906 * Pointer to the action in the list, if found. NULL otherwise. 907 */ 908 const struct rte_flow_action * 909 mlx5_flow_find_action(const struct rte_flow_action *actions, 910 enum rte_flow_action_type action) 911 { 912 if (actions == NULL) 913 return NULL; 914 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) 915 if (actions->type == action) 916 return actions; 917 return NULL; 918 } 919 920 /* 921 * Validate the flag action. 922 * 923 * @param[in] action_flags 924 * Bit-fields that holds the actions detected until now. 925 * @param[in] attr 926 * Attributes of flow that includes this action. 927 * @param[out] error 928 * Pointer to error structure. 929 * 930 * @return 931 * 0 on success, a negative errno value otherwise and rte_errno is set. 932 */ 933 int 934 mlx5_flow_validate_action_flag(uint64_t action_flags, 935 const struct rte_flow_attr *attr, 936 struct rte_flow_error *error) 937 { 938 if (action_flags & MLX5_FLOW_ACTION_MARK) 939 return rte_flow_error_set(error, EINVAL, 940 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 941 "can't mark and flag in same flow"); 942 if (action_flags & MLX5_FLOW_ACTION_FLAG) 943 return rte_flow_error_set(error, EINVAL, 944 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 945 "can't have 2 flag" 946 " actions in same flow"); 947 if (attr->egress) 948 return rte_flow_error_set(error, ENOTSUP, 949 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 950 "flag action not supported for " 951 "egress"); 952 return 0; 953 } 954 955 /* 956 * Validate the mark action. 957 * 958 * @param[in] action 959 * Pointer to the queue action. 960 * @param[in] action_flags 961 * Bit-fields that holds the actions detected until now. 962 * @param[in] attr 963 * Attributes of flow that includes this action. 964 * @param[out] error 965 * Pointer to error structure. 966 * 967 * @return 968 * 0 on success, a negative errno value otherwise and rte_errno is set. 969 */ 970 int 971 mlx5_flow_validate_action_mark(const struct rte_flow_action *action, 972 uint64_t action_flags, 973 const struct rte_flow_attr *attr, 974 struct rte_flow_error *error) 975 { 976 const struct rte_flow_action_mark *mark = action->conf; 977 978 if (!mark) 979 return rte_flow_error_set(error, EINVAL, 980 RTE_FLOW_ERROR_TYPE_ACTION, 981 action, 982 "configuration cannot be null"); 983 if (mark->id >= MLX5_FLOW_MARK_MAX) 984 return rte_flow_error_set(error, EINVAL, 985 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 986 &mark->id, 987 "mark id must in 0 <= id < " 988 RTE_STR(MLX5_FLOW_MARK_MAX)); 989 if (action_flags & MLX5_FLOW_ACTION_FLAG) 990 return rte_flow_error_set(error, EINVAL, 991 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 992 "can't flag and mark in same flow"); 993 if (action_flags & MLX5_FLOW_ACTION_MARK) 994 return rte_flow_error_set(error, EINVAL, 995 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 996 "can't have 2 mark actions in same" 997 " flow"); 998 if (attr->egress) 999 return rte_flow_error_set(error, ENOTSUP, 1000 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1001 "mark action not supported for " 1002 "egress"); 1003 return 0; 1004 } 1005 1006 /* 1007 * Validate the drop action. 1008 * 1009 * @param[in] action_flags 1010 * Bit-fields that holds the actions detected until now. 1011 * @param[in] attr 1012 * Attributes of flow that includes this action. 1013 * @param[out] error 1014 * Pointer to error structure. 1015 * 1016 * @return 1017 * 0 on success, a negative errno value otherwise and rte_errno is set. 1018 */ 1019 int 1020 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused, 1021 const struct rte_flow_attr *attr, 1022 struct rte_flow_error *error) 1023 { 1024 if (attr->egress) 1025 return rte_flow_error_set(error, ENOTSUP, 1026 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1027 "drop action not supported for " 1028 "egress"); 1029 return 0; 1030 } 1031 1032 /* 1033 * Validate the queue action. 1034 * 1035 * @param[in] action 1036 * Pointer to the queue action. 1037 * @param[in] action_flags 1038 * Bit-fields that holds the actions detected until now. 1039 * @param[in] dev 1040 * Pointer to the Ethernet device structure. 1041 * @param[in] attr 1042 * Attributes of flow that includes this action. 1043 * @param[out] error 1044 * Pointer to error structure. 1045 * 1046 * @return 1047 * 0 on success, a negative errno value otherwise and rte_errno is set. 1048 */ 1049 int 1050 mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 1051 uint64_t action_flags, 1052 struct rte_eth_dev *dev, 1053 const struct rte_flow_attr *attr, 1054 struct rte_flow_error *error) 1055 { 1056 struct mlx5_priv *priv = dev->data->dev_private; 1057 const struct rte_flow_action_queue *queue = action->conf; 1058 1059 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 1060 return rte_flow_error_set(error, EINVAL, 1061 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1062 "can't have 2 fate actions in" 1063 " same flow"); 1064 if (!priv->rxqs_n) 1065 return rte_flow_error_set(error, EINVAL, 1066 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1067 NULL, "No Rx queues configured"); 1068 if (queue->index >= priv->rxqs_n) 1069 return rte_flow_error_set(error, EINVAL, 1070 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1071 &queue->index, 1072 "queue index out of range"); 1073 if (!(*priv->rxqs)[queue->index]) 1074 return rte_flow_error_set(error, EINVAL, 1075 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1076 &queue->index, 1077 "queue is not configured"); 1078 if (attr->egress) 1079 return rte_flow_error_set(error, ENOTSUP, 1080 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1081 "queue action not supported for " 1082 "egress"); 1083 return 0; 1084 } 1085 1086 /* 1087 * Validate the rss action. 1088 * 1089 * @param[in] action 1090 * Pointer to the queue action. 1091 * @param[in] action_flags 1092 * Bit-fields that holds the actions detected until now. 1093 * @param[in] dev 1094 * Pointer to the Ethernet device structure. 1095 * @param[in] attr 1096 * Attributes of flow that includes this action. 1097 * @param[in] item_flags 1098 * Items that were detected. 1099 * @param[out] error 1100 * Pointer to error structure. 1101 * 1102 * @return 1103 * 0 on success, a negative errno value otherwise and rte_errno is set. 1104 */ 1105 int 1106 mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 1107 uint64_t action_flags, 1108 struct rte_eth_dev *dev, 1109 const struct rte_flow_attr *attr, 1110 uint64_t item_flags, 1111 struct rte_flow_error *error) 1112 { 1113 struct mlx5_priv *priv = dev->data->dev_private; 1114 const struct rte_flow_action_rss *rss = action->conf; 1115 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1116 unsigned int i; 1117 1118 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 1119 return rte_flow_error_set(error, EINVAL, 1120 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1121 "can't have 2 fate actions" 1122 " in same flow"); 1123 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && 1124 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) 1125 return rte_flow_error_set(error, ENOTSUP, 1126 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1127 &rss->func, 1128 "RSS hash function not supported"); 1129 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 1130 if (rss->level > 2) 1131 #else 1132 if (rss->level > 1) 1133 #endif 1134 return rte_flow_error_set(error, ENOTSUP, 1135 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1136 &rss->level, 1137 "tunnel RSS is not supported"); 1138 /* allow RSS key_len 0 in case of NULL (default) RSS key. */ 1139 if (rss->key_len == 0 && rss->key != NULL) 1140 return rte_flow_error_set(error, ENOTSUP, 1141 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1142 &rss->key_len, 1143 "RSS hash key length 0"); 1144 if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) 1145 return rte_flow_error_set(error, ENOTSUP, 1146 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1147 &rss->key_len, 1148 "RSS hash key too small"); 1149 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) 1150 return rte_flow_error_set(error, ENOTSUP, 1151 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1152 &rss->key_len, 1153 "RSS hash key too large"); 1154 if (rss->queue_num > priv->config.ind_table_max_size) 1155 return rte_flow_error_set(error, ENOTSUP, 1156 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1157 &rss->queue_num, 1158 "number of queues too large"); 1159 if (rss->types & MLX5_RSS_HF_MASK) 1160 return rte_flow_error_set(error, ENOTSUP, 1161 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1162 &rss->types, 1163 "some RSS protocols are not" 1164 " supported"); 1165 if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) && 1166 !(rss->types & ETH_RSS_IP)) 1167 return rte_flow_error_set(error, EINVAL, 1168 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1169 "L3 partial RSS requested but L3 RSS" 1170 " type not specified"); 1171 if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) && 1172 !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP))) 1173 return rte_flow_error_set(error, EINVAL, 1174 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1175 "L4 partial RSS requested but L4 RSS" 1176 " type not specified"); 1177 if (!priv->rxqs_n) 1178 return rte_flow_error_set(error, EINVAL, 1179 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1180 NULL, "No Rx queues configured"); 1181 if (!rss->queue_num) 1182 return rte_flow_error_set(error, EINVAL, 1183 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1184 NULL, "No queues configured"); 1185 for (i = 0; i != rss->queue_num; ++i) { 1186 if (rss->queue[i] >= priv->rxqs_n) 1187 return rte_flow_error_set 1188 (error, EINVAL, 1189 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1190 &rss->queue[i], "queue index out of range"); 1191 if (!(*priv->rxqs)[rss->queue[i]]) 1192 return rte_flow_error_set 1193 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1194 &rss->queue[i], "queue is not configured"); 1195 } 1196 if (attr->egress) 1197 return rte_flow_error_set(error, ENOTSUP, 1198 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1199 "rss action not supported for " 1200 "egress"); 1201 if (rss->level > 1 && !tunnel) 1202 return rte_flow_error_set(error, EINVAL, 1203 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1204 "inner RSS is not supported for " 1205 "non-tunnel flows"); 1206 return 0; 1207 } 1208 1209 /* 1210 * Validate the count action. 1211 * 1212 * @param[in] dev 1213 * Pointer to the Ethernet device structure. 1214 * @param[in] attr 1215 * Attributes of flow that includes this action. 1216 * @param[out] error 1217 * Pointer to error structure. 1218 * 1219 * @return 1220 * 0 on success, a negative errno value otherwise and rte_errno is set. 1221 */ 1222 int 1223 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, 1224 const struct rte_flow_attr *attr, 1225 struct rte_flow_error *error) 1226 { 1227 if (attr->egress) 1228 return rte_flow_error_set(error, ENOTSUP, 1229 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1230 "count action not supported for " 1231 "egress"); 1232 return 0; 1233 } 1234 1235 /** 1236 * Verify the @p attributes will be correctly understood by the NIC and store 1237 * them in the @p flow if everything is correct. 1238 * 1239 * @param[in] dev 1240 * Pointer to the Ethernet device structure. 1241 * @param[in] attributes 1242 * Pointer to flow attributes 1243 * @param[out] error 1244 * Pointer to error structure. 1245 * 1246 * @return 1247 * 0 on success, a negative errno value otherwise and rte_errno is set. 1248 */ 1249 int 1250 mlx5_flow_validate_attributes(struct rte_eth_dev *dev, 1251 const struct rte_flow_attr *attributes, 1252 struct rte_flow_error *error) 1253 { 1254 struct mlx5_priv *priv = dev->data->dev_private; 1255 uint32_t priority_max = priv->config.flow_prio - 1; 1256 1257 if (attributes->group) 1258 return rte_flow_error_set(error, ENOTSUP, 1259 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 1260 NULL, "groups is not supported"); 1261 if (attributes->priority != MLX5_FLOW_PRIO_RSVD && 1262 attributes->priority >= priority_max) 1263 return rte_flow_error_set(error, ENOTSUP, 1264 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1265 NULL, "priority out of range"); 1266 if (attributes->egress) 1267 return rte_flow_error_set(error, ENOTSUP, 1268 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1269 "egress is not supported"); 1270 if (attributes->transfer && !priv->config.dv_esw_en) 1271 return rte_flow_error_set(error, ENOTSUP, 1272 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 1273 NULL, "transfer is not supported"); 1274 if (!attributes->ingress) 1275 return rte_flow_error_set(error, EINVAL, 1276 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 1277 NULL, 1278 "ingress attribute is mandatory"); 1279 return 0; 1280 } 1281 1282 /** 1283 * Validate ICMP6 item. 1284 * 1285 * @param[in] item 1286 * Item specification. 1287 * @param[in] item_flags 1288 * Bit-fields that holds the items detected until now. 1289 * @param[out] error 1290 * Pointer to error structure. 1291 * 1292 * @return 1293 * 0 on success, a negative errno value otherwise and rte_errno is set. 1294 */ 1295 int 1296 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, 1297 uint64_t item_flags, 1298 uint8_t target_protocol, 1299 struct rte_flow_error *error) 1300 { 1301 const struct rte_flow_item_icmp6 *mask = item->mask; 1302 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1303 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 1304 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 1305 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1306 MLX5_FLOW_LAYER_OUTER_L4; 1307 int ret; 1308 1309 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) 1310 return rte_flow_error_set(error, EINVAL, 1311 RTE_FLOW_ERROR_TYPE_ITEM, item, 1312 "protocol filtering not compatible" 1313 " with ICMP6 layer"); 1314 if (!(item_flags & l3m)) 1315 return rte_flow_error_set(error, EINVAL, 1316 RTE_FLOW_ERROR_TYPE_ITEM, item, 1317 "IPv6 is mandatory to filter on" 1318 " ICMP6"); 1319 if (item_flags & l4m) 1320 return rte_flow_error_set(error, EINVAL, 1321 RTE_FLOW_ERROR_TYPE_ITEM, item, 1322 "multiple L4 layers not supported"); 1323 if (!mask) 1324 mask = &rte_flow_item_icmp6_mask; 1325 ret = mlx5_flow_item_acceptable 1326 (item, (const uint8_t *)mask, 1327 (const uint8_t *)&rte_flow_item_icmp6_mask, 1328 sizeof(struct rte_flow_item_icmp6), error); 1329 if (ret < 0) 1330 return ret; 1331 return 0; 1332 } 1333 1334 /** 1335 * Validate ICMP item. 1336 * 1337 * @param[in] item 1338 * Item specification. 1339 * @param[in] item_flags 1340 * Bit-fields that holds the items detected until now. 1341 * @param[out] error 1342 * Pointer to error structure. 1343 * 1344 * @return 1345 * 0 on success, a negative errno value otherwise and rte_errno is set. 1346 */ 1347 int 1348 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, 1349 uint64_t item_flags, 1350 uint8_t target_protocol, 1351 struct rte_flow_error *error) 1352 { 1353 const struct rte_flow_item_icmp *mask = item->mask; 1354 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1355 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 1356 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 1357 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1358 MLX5_FLOW_LAYER_OUTER_L4; 1359 int ret; 1360 1361 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) 1362 return rte_flow_error_set(error, EINVAL, 1363 RTE_FLOW_ERROR_TYPE_ITEM, item, 1364 "protocol filtering not compatible" 1365 " with ICMP layer"); 1366 if (!(item_flags & l3m)) 1367 return rte_flow_error_set(error, EINVAL, 1368 RTE_FLOW_ERROR_TYPE_ITEM, item, 1369 "IPv4 is mandatory to filter" 1370 " on ICMP"); 1371 if (item_flags & l4m) 1372 return rte_flow_error_set(error, EINVAL, 1373 RTE_FLOW_ERROR_TYPE_ITEM, item, 1374 "multiple L4 layers not supported"); 1375 if (!mask) 1376 mask = &rte_flow_item_icmp_mask; 1377 ret = mlx5_flow_item_acceptable 1378 (item, (const uint8_t *)mask, 1379 (const uint8_t *)&rte_flow_item_icmp_mask, 1380 sizeof(struct rte_flow_item_icmp), error); 1381 if (ret < 0) 1382 return ret; 1383 return 0; 1384 } 1385 1386 /** 1387 * Validate Ethernet item. 1388 * 1389 * @param[in] item 1390 * Item specification. 1391 * @param[in] item_flags 1392 * Bit-fields that holds the items detected until now. 1393 * @param[out] error 1394 * Pointer to error structure. 1395 * 1396 * @return 1397 * 0 on success, a negative errno value otherwise and rte_errno is set. 1398 */ 1399 int 1400 mlx5_flow_validate_item_eth(const struct rte_flow_item *item, 1401 uint64_t item_flags, 1402 struct rte_flow_error *error) 1403 { 1404 const struct rte_flow_item_eth *mask = item->mask; 1405 const struct rte_flow_item_eth nic_mask = { 1406 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1407 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1408 .type = RTE_BE16(0xffff), 1409 }; 1410 int ret; 1411 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1412 const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 1413 MLX5_FLOW_LAYER_OUTER_L2; 1414 1415 if (item_flags & ethm) 1416 return rte_flow_error_set(error, ENOTSUP, 1417 RTE_FLOW_ERROR_TYPE_ITEM, item, 1418 "multiple L2 layers not supported"); 1419 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) || 1420 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))) 1421 return rte_flow_error_set(error, EINVAL, 1422 RTE_FLOW_ERROR_TYPE_ITEM, item, 1423 "L2 layer should not follow " 1424 "L3 layers"); 1425 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) || 1426 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN))) 1427 return rte_flow_error_set(error, EINVAL, 1428 RTE_FLOW_ERROR_TYPE_ITEM, item, 1429 "L2 layer should not follow VLAN"); 1430 if (!mask) 1431 mask = &rte_flow_item_eth_mask; 1432 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1433 (const uint8_t *)&nic_mask, 1434 sizeof(struct rte_flow_item_eth), 1435 error); 1436 return ret; 1437 } 1438 1439 /** 1440 * Validate VLAN item. 1441 * 1442 * @param[in] item 1443 * Item specification. 1444 * @param[in] item_flags 1445 * Bit-fields that holds the items detected until now. 1446 * @param[in] dev 1447 * Ethernet device flow is being created on. 1448 * @param[out] error 1449 * Pointer to error structure. 1450 * 1451 * @return 1452 * 0 on success, a negative errno value otherwise and rte_errno is set. 1453 */ 1454 int 1455 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 1456 uint64_t item_flags, 1457 struct rte_eth_dev *dev, 1458 struct rte_flow_error *error) 1459 { 1460 const struct rte_flow_item_vlan *spec = item->spec; 1461 const struct rte_flow_item_vlan *mask = item->mask; 1462 const struct rte_flow_item_vlan nic_mask = { 1463 .tci = RTE_BE16(UINT16_MAX), 1464 .inner_type = RTE_BE16(UINT16_MAX), 1465 }; 1466 uint16_t vlan_tag = 0; 1467 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1468 int ret; 1469 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 1470 MLX5_FLOW_LAYER_INNER_L4) : 1471 (MLX5_FLOW_LAYER_OUTER_L3 | 1472 MLX5_FLOW_LAYER_OUTER_L4); 1473 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 1474 MLX5_FLOW_LAYER_OUTER_VLAN; 1475 1476 if (item_flags & vlanm) 1477 return rte_flow_error_set(error, EINVAL, 1478 RTE_FLOW_ERROR_TYPE_ITEM, item, 1479 "multiple VLAN layers not supported"); 1480 else if ((item_flags & l34m) != 0) 1481 return rte_flow_error_set(error, EINVAL, 1482 RTE_FLOW_ERROR_TYPE_ITEM, item, 1483 "VLAN cannot follow L3/L4 layer"); 1484 if (!mask) 1485 mask = &rte_flow_item_vlan_mask; 1486 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1487 (const uint8_t *)&nic_mask, 1488 sizeof(struct rte_flow_item_vlan), 1489 error); 1490 if (ret) 1491 return ret; 1492 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { 1493 struct mlx5_priv *priv = dev->data->dev_private; 1494 1495 if (priv->vmwa_context) { 1496 /* 1497 * Non-NULL context means we have a virtual machine 1498 * and SR-IOV enabled, we have to create VLAN interface 1499 * to make hypervisor to setup E-Switch vport 1500 * context correctly. We avoid creating the multiple 1501 * VLAN interfaces, so we cannot support VLAN tag mask. 1502 */ 1503 return rte_flow_error_set(error, EINVAL, 1504 RTE_FLOW_ERROR_TYPE_ITEM, 1505 item, 1506 "VLAN tag mask is not" 1507 " supported in virtual" 1508 " environment"); 1509 } 1510 } 1511 if (spec) { 1512 vlan_tag = spec->tci; 1513 vlan_tag &= mask->tci; 1514 } 1515 /* 1516 * From verbs perspective an empty VLAN is equivalent 1517 * to a packet without VLAN layer. 1518 */ 1519 if (!vlan_tag) 1520 return rte_flow_error_set(error, EINVAL, 1521 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1522 item->spec, 1523 "VLAN cannot be empty"); 1524 return 0; 1525 } 1526 1527 /** 1528 * Validate IPV4 item. 1529 * 1530 * @param[in] item 1531 * Item specification. 1532 * @param[in] item_flags 1533 * Bit-fields that holds the items detected until now. 1534 * @param[in] acc_mask 1535 * Acceptable mask, if NULL default internal default mask 1536 * will be used to check whether item fields are supported. 1537 * @param[out] error 1538 * Pointer to error structure. 1539 * 1540 * @return 1541 * 0 on success, a negative errno value otherwise and rte_errno is set. 1542 */ 1543 int 1544 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, 1545 uint64_t item_flags, 1546 uint64_t last_item, 1547 uint16_t ether_type, 1548 const struct rte_flow_item_ipv4 *acc_mask, 1549 struct rte_flow_error *error) 1550 { 1551 const struct rte_flow_item_ipv4 *mask = item->mask; 1552 const struct rte_flow_item_ipv4 *spec = item->spec; 1553 const struct rte_flow_item_ipv4 nic_mask = { 1554 .hdr = { 1555 .src_addr = RTE_BE32(0xffffffff), 1556 .dst_addr = RTE_BE32(0xffffffff), 1557 .type_of_service = 0xff, 1558 .next_proto_id = 0xff, 1559 }, 1560 }; 1561 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1562 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1563 MLX5_FLOW_LAYER_OUTER_L3; 1564 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1565 MLX5_FLOW_LAYER_OUTER_L4; 1566 int ret; 1567 uint8_t next_proto = 0xFF; 1568 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 1569 MLX5_FLOW_LAYER_OUTER_VLAN | 1570 MLX5_FLOW_LAYER_INNER_VLAN); 1571 1572 if ((last_item & l2_vlan) && ether_type && 1573 ether_type != RTE_ETHER_TYPE_IPV4) 1574 return rte_flow_error_set(error, EINVAL, 1575 RTE_FLOW_ERROR_TYPE_ITEM, item, 1576 "IPv4 cannot follow L2/VLAN layer " 1577 "which ether type is not IPv4"); 1578 if (item_flags & MLX5_FLOW_LAYER_IPIP) { 1579 if (mask && spec) 1580 next_proto = mask->hdr.next_proto_id & 1581 spec->hdr.next_proto_id; 1582 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1583 return rte_flow_error_set(error, EINVAL, 1584 RTE_FLOW_ERROR_TYPE_ITEM, 1585 item, 1586 "multiple tunnel " 1587 "not supported"); 1588 } 1589 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) 1590 return rte_flow_error_set(error, EINVAL, 1591 RTE_FLOW_ERROR_TYPE_ITEM, item, 1592 "wrong tunnel type - IPv6 specified " 1593 "but IPv4 item provided"); 1594 if (item_flags & l3m) 1595 return rte_flow_error_set(error, ENOTSUP, 1596 RTE_FLOW_ERROR_TYPE_ITEM, item, 1597 "multiple L3 layers not supported"); 1598 else if (item_flags & l4m) 1599 return rte_flow_error_set(error, EINVAL, 1600 RTE_FLOW_ERROR_TYPE_ITEM, item, 1601 "L3 cannot follow an L4 layer."); 1602 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1603 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1604 return rte_flow_error_set(error, EINVAL, 1605 RTE_FLOW_ERROR_TYPE_ITEM, item, 1606 "L3 cannot follow an NVGRE layer."); 1607 if (!mask) 1608 mask = &rte_flow_item_ipv4_mask; 1609 else if (mask->hdr.next_proto_id != 0 && 1610 mask->hdr.next_proto_id != 0xff) 1611 return rte_flow_error_set(error, EINVAL, 1612 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 1613 "partial mask is not supported" 1614 " for protocol"); 1615 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1616 acc_mask ? (const uint8_t *)acc_mask 1617 : (const uint8_t *)&nic_mask, 1618 sizeof(struct rte_flow_item_ipv4), 1619 error); 1620 if (ret < 0) 1621 return ret; 1622 return 0; 1623 } 1624 1625 /** 1626 * Validate IPV6 item. 1627 * 1628 * @param[in] item 1629 * Item specification. 1630 * @param[in] item_flags 1631 * Bit-fields that holds the items detected until now. 1632 * @param[in] acc_mask 1633 * Acceptable mask, if NULL default internal default mask 1634 * will be used to check whether item fields are supported. 1635 * @param[out] error 1636 * Pointer to error structure. 1637 * 1638 * @return 1639 * 0 on success, a negative errno value otherwise and rte_errno is set. 1640 */ 1641 int 1642 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, 1643 uint64_t item_flags, 1644 uint64_t last_item, 1645 uint16_t ether_type, 1646 const struct rte_flow_item_ipv6 *acc_mask, 1647 struct rte_flow_error *error) 1648 { 1649 const struct rte_flow_item_ipv6 *mask = item->mask; 1650 const struct rte_flow_item_ipv6 *spec = item->spec; 1651 const struct rte_flow_item_ipv6 nic_mask = { 1652 .hdr = { 1653 .src_addr = 1654 "\xff\xff\xff\xff\xff\xff\xff\xff" 1655 "\xff\xff\xff\xff\xff\xff\xff\xff", 1656 .dst_addr = 1657 "\xff\xff\xff\xff\xff\xff\xff\xff" 1658 "\xff\xff\xff\xff\xff\xff\xff\xff", 1659 .vtc_flow = RTE_BE32(0xffffffff), 1660 .proto = 0xff, 1661 }, 1662 }; 1663 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1664 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1665 MLX5_FLOW_LAYER_OUTER_L3; 1666 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1667 MLX5_FLOW_LAYER_OUTER_L4; 1668 int ret; 1669 uint8_t next_proto = 0xFF; 1670 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 1671 MLX5_FLOW_LAYER_OUTER_VLAN | 1672 MLX5_FLOW_LAYER_INNER_VLAN); 1673 1674 if ((last_item & l2_vlan) && ether_type && 1675 ether_type != RTE_ETHER_TYPE_IPV6) 1676 return rte_flow_error_set(error, EINVAL, 1677 RTE_FLOW_ERROR_TYPE_ITEM, item, 1678 "IPv6 cannot follow L2/VLAN layer " 1679 "which ether type is not IPv6"); 1680 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { 1681 if (mask && spec) 1682 next_proto = mask->hdr.proto & spec->hdr.proto; 1683 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1684 return rte_flow_error_set(error, EINVAL, 1685 RTE_FLOW_ERROR_TYPE_ITEM, 1686 item, 1687 "multiple tunnel " 1688 "not supported"); 1689 } 1690 if (item_flags & MLX5_FLOW_LAYER_IPIP) 1691 return rte_flow_error_set(error, EINVAL, 1692 RTE_FLOW_ERROR_TYPE_ITEM, item, 1693 "wrong tunnel type - IPv4 specified " 1694 "but IPv6 item provided"); 1695 if (item_flags & l3m) 1696 return rte_flow_error_set(error, ENOTSUP, 1697 RTE_FLOW_ERROR_TYPE_ITEM, item, 1698 "multiple L3 layers not supported"); 1699 else if (item_flags & l4m) 1700 return rte_flow_error_set(error, EINVAL, 1701 RTE_FLOW_ERROR_TYPE_ITEM, item, 1702 "L3 cannot follow an L4 layer."); 1703 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1704 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1705 return rte_flow_error_set(error, EINVAL, 1706 RTE_FLOW_ERROR_TYPE_ITEM, item, 1707 "L3 cannot follow an NVGRE layer."); 1708 if (!mask) 1709 mask = &rte_flow_item_ipv6_mask; 1710 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1711 acc_mask ? (const uint8_t *)acc_mask 1712 : (const uint8_t *)&nic_mask, 1713 sizeof(struct rte_flow_item_ipv6), 1714 error); 1715 if (ret < 0) 1716 return ret; 1717 return 0; 1718 } 1719 1720 /** 1721 * Validate UDP item. 1722 * 1723 * @param[in] item 1724 * Item specification. 1725 * @param[in] item_flags 1726 * Bit-fields that holds the items detected until now. 1727 * @param[in] target_protocol 1728 * The next protocol in the previous item. 1729 * @param[in] flow_mask 1730 * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask. 1731 * @param[out] error 1732 * Pointer to error structure. 1733 * 1734 * @return 1735 * 0 on success, a negative errno value otherwise and rte_errno is set. 1736 */ 1737 int 1738 mlx5_flow_validate_item_udp(const struct rte_flow_item *item, 1739 uint64_t item_flags, 1740 uint8_t target_protocol, 1741 struct rte_flow_error *error) 1742 { 1743 const struct rte_flow_item_udp *mask = item->mask; 1744 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1745 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1746 MLX5_FLOW_LAYER_OUTER_L3; 1747 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1748 MLX5_FLOW_LAYER_OUTER_L4; 1749 int ret; 1750 1751 if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) 1752 return rte_flow_error_set(error, EINVAL, 1753 RTE_FLOW_ERROR_TYPE_ITEM, item, 1754 "protocol filtering not compatible" 1755 " with UDP layer"); 1756 if (!(item_flags & l3m)) 1757 return rte_flow_error_set(error, EINVAL, 1758 RTE_FLOW_ERROR_TYPE_ITEM, item, 1759 "L3 is mandatory to filter on L4"); 1760 if (item_flags & l4m) 1761 return rte_flow_error_set(error, EINVAL, 1762 RTE_FLOW_ERROR_TYPE_ITEM, item, 1763 "multiple L4 layers not supported"); 1764 if (!mask) 1765 mask = &rte_flow_item_udp_mask; 1766 ret = mlx5_flow_item_acceptable 1767 (item, (const uint8_t *)mask, 1768 (const uint8_t *)&rte_flow_item_udp_mask, 1769 sizeof(struct rte_flow_item_udp), error); 1770 if (ret < 0) 1771 return ret; 1772 return 0; 1773 } 1774 1775 /** 1776 * Validate TCP item. 1777 * 1778 * @param[in] item 1779 * Item specification. 1780 * @param[in] item_flags 1781 * Bit-fields that holds the items detected until now. 1782 * @param[in] target_protocol 1783 * The next protocol in the previous item. 1784 * @param[out] error 1785 * Pointer to error structure. 1786 * 1787 * @return 1788 * 0 on success, a negative errno value otherwise and rte_errno is set. 1789 */ 1790 int 1791 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, 1792 uint64_t item_flags, 1793 uint8_t target_protocol, 1794 const struct rte_flow_item_tcp *flow_mask, 1795 struct rte_flow_error *error) 1796 { 1797 const struct rte_flow_item_tcp *mask = item->mask; 1798 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1799 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1800 MLX5_FLOW_LAYER_OUTER_L3; 1801 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1802 MLX5_FLOW_LAYER_OUTER_L4; 1803 int ret; 1804 1805 MLX5_ASSERT(flow_mask); 1806 if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) 1807 return rte_flow_error_set(error, EINVAL, 1808 RTE_FLOW_ERROR_TYPE_ITEM, item, 1809 "protocol filtering not compatible" 1810 " with TCP layer"); 1811 if (!(item_flags & l3m)) 1812 return rte_flow_error_set(error, EINVAL, 1813 RTE_FLOW_ERROR_TYPE_ITEM, item, 1814 "L3 is mandatory to filter on L4"); 1815 if (item_flags & l4m) 1816 return rte_flow_error_set(error, EINVAL, 1817 RTE_FLOW_ERROR_TYPE_ITEM, item, 1818 "multiple L4 layers not supported"); 1819 if (!mask) 1820 mask = &rte_flow_item_tcp_mask; 1821 ret = mlx5_flow_item_acceptable 1822 (item, (const uint8_t *)mask, 1823 (const uint8_t *)flow_mask, 1824 sizeof(struct rte_flow_item_tcp), error); 1825 if (ret < 0) 1826 return ret; 1827 return 0; 1828 } 1829 1830 /** 1831 * Validate VXLAN item. 1832 * 1833 * @param[in] item 1834 * Item specification. 1835 * @param[in] item_flags 1836 * Bit-fields that holds the items detected until now. 1837 * @param[in] target_protocol 1838 * The next protocol in the previous item. 1839 * @param[out] error 1840 * Pointer to error structure. 1841 * 1842 * @return 1843 * 0 on success, a negative errno value otherwise and rte_errno is set. 1844 */ 1845 int 1846 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, 1847 uint64_t item_flags, 1848 struct rte_flow_error *error) 1849 { 1850 const struct rte_flow_item_vxlan *spec = item->spec; 1851 const struct rte_flow_item_vxlan *mask = item->mask; 1852 int ret; 1853 union vni { 1854 uint32_t vlan_id; 1855 uint8_t vni[4]; 1856 } id = { .vlan_id = 0, }; 1857 1858 1859 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1860 return rte_flow_error_set(error, ENOTSUP, 1861 RTE_FLOW_ERROR_TYPE_ITEM, item, 1862 "multiple tunnel layers not" 1863 " supported"); 1864 /* 1865 * Verify only UDPv4 is present as defined in 1866 * https://tools.ietf.org/html/rfc7348 1867 */ 1868 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1869 return rte_flow_error_set(error, EINVAL, 1870 RTE_FLOW_ERROR_TYPE_ITEM, item, 1871 "no outer UDP layer found"); 1872 if (!mask) 1873 mask = &rte_flow_item_vxlan_mask; 1874 ret = mlx5_flow_item_acceptable 1875 (item, (const uint8_t *)mask, 1876 (const uint8_t *)&rte_flow_item_vxlan_mask, 1877 sizeof(struct rte_flow_item_vxlan), 1878 error); 1879 if (ret < 0) 1880 return ret; 1881 if (spec) { 1882 memcpy(&id.vni[1], spec->vni, 3); 1883 memcpy(&id.vni[1], mask->vni, 3); 1884 } 1885 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1886 return rte_flow_error_set(error, ENOTSUP, 1887 RTE_FLOW_ERROR_TYPE_ITEM, item, 1888 "VXLAN tunnel must be fully defined"); 1889 return 0; 1890 } 1891 1892 /** 1893 * Validate VXLAN_GPE item. 1894 * 1895 * @param[in] item 1896 * Item specification. 1897 * @param[in] item_flags 1898 * Bit-fields that holds the items detected until now. 1899 * @param[in] priv 1900 * Pointer to the private data structure. 1901 * @param[in] target_protocol 1902 * The next protocol in the previous item. 1903 * @param[out] error 1904 * Pointer to error structure. 1905 * 1906 * @return 1907 * 0 on success, a negative errno value otherwise and rte_errno is set. 1908 */ 1909 int 1910 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 1911 uint64_t item_flags, 1912 struct rte_eth_dev *dev, 1913 struct rte_flow_error *error) 1914 { 1915 struct mlx5_priv *priv = dev->data->dev_private; 1916 const struct rte_flow_item_vxlan_gpe *spec = item->spec; 1917 const struct rte_flow_item_vxlan_gpe *mask = item->mask; 1918 int ret; 1919 union vni { 1920 uint32_t vlan_id; 1921 uint8_t vni[4]; 1922 } id = { .vlan_id = 0, }; 1923 1924 if (!priv->config.l3_vxlan_en) 1925 return rte_flow_error_set(error, ENOTSUP, 1926 RTE_FLOW_ERROR_TYPE_ITEM, item, 1927 "L3 VXLAN is not enabled by device" 1928 " parameter and/or not configured in" 1929 " firmware"); 1930 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1931 return rte_flow_error_set(error, ENOTSUP, 1932 RTE_FLOW_ERROR_TYPE_ITEM, item, 1933 "multiple tunnel layers not" 1934 " supported"); 1935 /* 1936 * Verify only UDPv4 is present as defined in 1937 * https://tools.ietf.org/html/rfc7348 1938 */ 1939 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1940 return rte_flow_error_set(error, EINVAL, 1941 RTE_FLOW_ERROR_TYPE_ITEM, item, 1942 "no outer UDP layer found"); 1943 if (!mask) 1944 mask = &rte_flow_item_vxlan_gpe_mask; 1945 ret = mlx5_flow_item_acceptable 1946 (item, (const uint8_t *)mask, 1947 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, 1948 sizeof(struct rte_flow_item_vxlan_gpe), 1949 error); 1950 if (ret < 0) 1951 return ret; 1952 if (spec) { 1953 if (spec->protocol) 1954 return rte_flow_error_set(error, ENOTSUP, 1955 RTE_FLOW_ERROR_TYPE_ITEM, 1956 item, 1957 "VxLAN-GPE protocol" 1958 " not supported"); 1959 memcpy(&id.vni[1], spec->vni, 3); 1960 memcpy(&id.vni[1], mask->vni, 3); 1961 } 1962 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1963 return rte_flow_error_set(error, ENOTSUP, 1964 RTE_FLOW_ERROR_TYPE_ITEM, item, 1965 "VXLAN-GPE tunnel must be fully" 1966 " defined"); 1967 return 0; 1968 } 1969 /** 1970 * Validate GRE Key item. 1971 * 1972 * @param[in] item 1973 * Item specification. 1974 * @param[in] item_flags 1975 * Bit flags to mark detected items. 1976 * @param[in] gre_item 1977 * Pointer to gre_item 1978 * @param[out] error 1979 * Pointer to error structure. 1980 * 1981 * @return 1982 * 0 on success, a negative errno value otherwise and rte_errno is set. 1983 */ 1984 int 1985 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, 1986 uint64_t item_flags, 1987 const struct rte_flow_item *gre_item, 1988 struct rte_flow_error *error) 1989 { 1990 const rte_be32_t *mask = item->mask; 1991 int ret = 0; 1992 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); 1993 const struct rte_flow_item_gre *gre_spec; 1994 const struct rte_flow_item_gre *gre_mask; 1995 1996 if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) 1997 return rte_flow_error_set(error, ENOTSUP, 1998 RTE_FLOW_ERROR_TYPE_ITEM, item, 1999 "Multiple GRE key not support"); 2000 if (!(item_flags & MLX5_FLOW_LAYER_GRE)) 2001 return rte_flow_error_set(error, ENOTSUP, 2002 RTE_FLOW_ERROR_TYPE_ITEM, item, 2003 "No preceding GRE header"); 2004 if (item_flags & MLX5_FLOW_LAYER_INNER) 2005 return rte_flow_error_set(error, ENOTSUP, 2006 RTE_FLOW_ERROR_TYPE_ITEM, item, 2007 "GRE key following a wrong item"); 2008 gre_mask = gre_item->mask; 2009 if (!gre_mask) 2010 gre_mask = &rte_flow_item_gre_mask; 2011 gre_spec = gre_item->spec; 2012 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && 2013 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) 2014 return rte_flow_error_set(error, EINVAL, 2015 RTE_FLOW_ERROR_TYPE_ITEM, item, 2016 "Key bit must be on"); 2017 2018 if (!mask) 2019 mask = &gre_key_default_mask; 2020 ret = mlx5_flow_item_acceptable 2021 (item, (const uint8_t *)mask, 2022 (const uint8_t *)&gre_key_default_mask, 2023 sizeof(rte_be32_t), error); 2024 return ret; 2025 } 2026 2027 /** 2028 * Validate GRE item. 2029 * 2030 * @param[in] item 2031 * Item specification. 2032 * @param[in] item_flags 2033 * Bit flags to mark detected items. 2034 * @param[in] target_protocol 2035 * The next protocol in the previous item. 2036 * @param[out] error 2037 * Pointer to error structure. 2038 * 2039 * @return 2040 * 0 on success, a negative errno value otherwise and rte_errno is set. 2041 */ 2042 int 2043 mlx5_flow_validate_item_gre(const struct rte_flow_item *item, 2044 uint64_t item_flags, 2045 uint8_t target_protocol, 2046 struct rte_flow_error *error) 2047 { 2048 const struct rte_flow_item_gre *spec __rte_unused = item->spec; 2049 const struct rte_flow_item_gre *mask = item->mask; 2050 int ret; 2051 const struct rte_flow_item_gre nic_mask = { 2052 .c_rsvd0_ver = RTE_BE16(0xB000), 2053 .protocol = RTE_BE16(UINT16_MAX), 2054 }; 2055 2056 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 2057 return rte_flow_error_set(error, EINVAL, 2058 RTE_FLOW_ERROR_TYPE_ITEM, item, 2059 "protocol filtering not compatible" 2060 " with this GRE layer"); 2061 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2062 return rte_flow_error_set(error, ENOTSUP, 2063 RTE_FLOW_ERROR_TYPE_ITEM, item, 2064 "multiple tunnel layers not" 2065 " supported"); 2066 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 2067 return rte_flow_error_set(error, ENOTSUP, 2068 RTE_FLOW_ERROR_TYPE_ITEM, item, 2069 "L3 Layer is missing"); 2070 if (!mask) 2071 mask = &rte_flow_item_gre_mask; 2072 ret = mlx5_flow_item_acceptable 2073 (item, (const uint8_t *)mask, 2074 (const uint8_t *)&nic_mask, 2075 sizeof(struct rte_flow_item_gre), error); 2076 if (ret < 0) 2077 return ret; 2078 #ifndef HAVE_MLX5DV_DR 2079 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT 2080 if (spec && (spec->protocol & mask->protocol)) 2081 return rte_flow_error_set(error, ENOTSUP, 2082 RTE_FLOW_ERROR_TYPE_ITEM, item, 2083 "without MPLS support the" 2084 " specification cannot be used for" 2085 " filtering"); 2086 #endif 2087 #endif 2088 return 0; 2089 } 2090 2091 /** 2092 * Validate Geneve item. 2093 * 2094 * @param[in] item 2095 * Item specification. 2096 * @param[in] itemFlags 2097 * Bit-fields that holds the items detected until now. 2098 * @param[in] enPriv 2099 * Pointer to the private data structure. 2100 * @param[out] error 2101 * Pointer to error structure. 2102 * 2103 * @return 2104 * 0 on success, a negative errno value otherwise and rte_errno is set. 2105 */ 2106 2107 int 2108 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, 2109 uint64_t item_flags, 2110 struct rte_eth_dev *dev, 2111 struct rte_flow_error *error) 2112 { 2113 struct mlx5_priv *priv = dev->data->dev_private; 2114 const struct rte_flow_item_geneve *spec = item->spec; 2115 const struct rte_flow_item_geneve *mask = item->mask; 2116 int ret; 2117 uint16_t gbhdr; 2118 uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ? 2119 MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; 2120 const struct rte_flow_item_geneve nic_mask = { 2121 .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), 2122 .vni = "\xff\xff\xff", 2123 .protocol = RTE_BE16(UINT16_MAX), 2124 }; 2125 2126 if (!priv->config.hca_attr.tunnel_stateless_geneve_rx) 2127 return rte_flow_error_set(error, ENOTSUP, 2128 RTE_FLOW_ERROR_TYPE_ITEM, item, 2129 "L3 Geneve is not enabled by device" 2130 " parameter and/or not configured in" 2131 " firmware"); 2132 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2133 return rte_flow_error_set(error, ENOTSUP, 2134 RTE_FLOW_ERROR_TYPE_ITEM, item, 2135 "multiple tunnel layers not" 2136 " supported"); 2137 /* 2138 * Verify only UDPv4 is present as defined in 2139 * https://tools.ietf.org/html/rfc7348 2140 */ 2141 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 2142 return rte_flow_error_set(error, EINVAL, 2143 RTE_FLOW_ERROR_TYPE_ITEM, item, 2144 "no outer UDP layer found"); 2145 if (!mask) 2146 mask = &rte_flow_item_geneve_mask; 2147 ret = mlx5_flow_item_acceptable 2148 (item, (const uint8_t *)mask, 2149 (const uint8_t *)&nic_mask, 2150 sizeof(struct rte_flow_item_geneve), error); 2151 if (ret) 2152 return ret; 2153 if (spec) { 2154 gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0); 2155 if (MLX5_GENEVE_VER_VAL(gbhdr) || 2156 MLX5_GENEVE_CRITO_VAL(gbhdr) || 2157 MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1) 2158 return rte_flow_error_set(error, ENOTSUP, 2159 RTE_FLOW_ERROR_TYPE_ITEM, 2160 item, 2161 "Geneve protocol unsupported" 2162 " fields are being used"); 2163 if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len) 2164 return rte_flow_error_set 2165 (error, ENOTSUP, 2166 RTE_FLOW_ERROR_TYPE_ITEM, 2167 item, 2168 "Unsupported Geneve options length"); 2169 } 2170 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 2171 return rte_flow_error_set 2172 (error, ENOTSUP, 2173 RTE_FLOW_ERROR_TYPE_ITEM, item, 2174 "Geneve tunnel must be fully defined"); 2175 return 0; 2176 } 2177 2178 /** 2179 * Validate MPLS item. 2180 * 2181 * @param[in] dev 2182 * Pointer to the rte_eth_dev structure. 2183 * @param[in] item 2184 * Item specification. 2185 * @param[in] item_flags 2186 * Bit-fields that holds the items detected until now. 2187 * @param[in] prev_layer 2188 * The protocol layer indicated in previous item. 2189 * @param[out] error 2190 * Pointer to error structure. 2191 * 2192 * @return 2193 * 0 on success, a negative errno value otherwise and rte_errno is set. 2194 */ 2195 int 2196 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, 2197 const struct rte_flow_item *item __rte_unused, 2198 uint64_t item_flags __rte_unused, 2199 uint64_t prev_layer __rte_unused, 2200 struct rte_flow_error *error) 2201 { 2202 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 2203 const struct rte_flow_item_mpls *mask = item->mask; 2204 struct mlx5_priv *priv = dev->data->dev_private; 2205 int ret; 2206 2207 if (!priv->config.mpls_en) 2208 return rte_flow_error_set(error, ENOTSUP, 2209 RTE_FLOW_ERROR_TYPE_ITEM, item, 2210 "MPLS not supported or" 2211 " disabled in firmware" 2212 " configuration."); 2213 /* MPLS over IP, UDP, GRE is allowed */ 2214 if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | 2215 MLX5_FLOW_LAYER_OUTER_L4_UDP | 2216 MLX5_FLOW_LAYER_GRE))) 2217 return rte_flow_error_set(error, EINVAL, 2218 RTE_FLOW_ERROR_TYPE_ITEM, item, 2219 "protocol filtering not compatible" 2220 " with MPLS layer"); 2221 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ 2222 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && 2223 !(item_flags & MLX5_FLOW_LAYER_GRE)) 2224 return rte_flow_error_set(error, ENOTSUP, 2225 RTE_FLOW_ERROR_TYPE_ITEM, item, 2226 "multiple tunnel layers not" 2227 " supported"); 2228 if (!mask) 2229 mask = &rte_flow_item_mpls_mask; 2230 ret = mlx5_flow_item_acceptable 2231 (item, (const uint8_t *)mask, 2232 (const uint8_t *)&rte_flow_item_mpls_mask, 2233 sizeof(struct rte_flow_item_mpls), error); 2234 if (ret < 0) 2235 return ret; 2236 return 0; 2237 #endif 2238 return rte_flow_error_set(error, ENOTSUP, 2239 RTE_FLOW_ERROR_TYPE_ITEM, item, 2240 "MPLS is not supported by Verbs, please" 2241 " update."); 2242 } 2243 2244 /** 2245 * Validate NVGRE item. 2246 * 2247 * @param[in] item 2248 * Item specification. 2249 * @param[in] item_flags 2250 * Bit flags to mark detected items. 2251 * @param[in] target_protocol 2252 * The next protocol in the previous item. 2253 * @param[out] error 2254 * Pointer to error structure. 2255 * 2256 * @return 2257 * 0 on success, a negative errno value otherwise and rte_errno is set. 2258 */ 2259 int 2260 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, 2261 uint64_t item_flags, 2262 uint8_t target_protocol, 2263 struct rte_flow_error *error) 2264 { 2265 const struct rte_flow_item_nvgre *mask = item->mask; 2266 int ret; 2267 2268 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 2269 return rte_flow_error_set(error, EINVAL, 2270 RTE_FLOW_ERROR_TYPE_ITEM, item, 2271 "protocol filtering not compatible" 2272 " with this GRE layer"); 2273 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2274 return rte_flow_error_set(error, ENOTSUP, 2275 RTE_FLOW_ERROR_TYPE_ITEM, item, 2276 "multiple tunnel layers not" 2277 " supported"); 2278 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 2279 return rte_flow_error_set(error, ENOTSUP, 2280 RTE_FLOW_ERROR_TYPE_ITEM, item, 2281 "L3 Layer is missing"); 2282 if (!mask) 2283 mask = &rte_flow_item_nvgre_mask; 2284 ret = mlx5_flow_item_acceptable 2285 (item, (const uint8_t *)mask, 2286 (const uint8_t *)&rte_flow_item_nvgre_mask, 2287 sizeof(struct rte_flow_item_nvgre), error); 2288 if (ret < 0) 2289 return ret; 2290 return 0; 2291 } 2292 2293 /* Allocate unique ID for the split Q/RSS subflows. */ 2294 static uint32_t 2295 flow_qrss_get_id(struct rte_eth_dev *dev) 2296 { 2297 struct mlx5_priv *priv = dev->data->dev_private; 2298 uint32_t qrss_id, ret; 2299 2300 ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id); 2301 if (ret) 2302 return 0; 2303 MLX5_ASSERT(qrss_id); 2304 return qrss_id; 2305 } 2306 2307 /* Free unique ID for the split Q/RSS subflows. */ 2308 static void 2309 flow_qrss_free_id(struct rte_eth_dev *dev, uint32_t qrss_id) 2310 { 2311 struct mlx5_priv *priv = dev->data->dev_private; 2312 2313 if (qrss_id) 2314 mlx5_flow_id_release(priv->qrss_id_pool, qrss_id); 2315 } 2316 2317 /** 2318 * Release resource related QUEUE/RSS action split. 2319 * 2320 * @param dev 2321 * Pointer to Ethernet device. 2322 * @param flow 2323 * Flow to release id's from. 2324 */ 2325 static void 2326 flow_mreg_split_qrss_release(struct rte_eth_dev *dev, 2327 struct rte_flow *flow) 2328 { 2329 struct mlx5_priv *priv = dev->data->dev_private; 2330 uint32_t handle_idx; 2331 struct mlx5_flow_handle *dev_handle; 2332 2333 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 2334 handle_idx, dev_handle, next) 2335 if (dev_handle->split_flow_id) 2336 flow_qrss_free_id(dev, dev_handle->split_flow_id); 2337 } 2338 2339 static int 2340 flow_null_validate(struct rte_eth_dev *dev __rte_unused, 2341 const struct rte_flow_attr *attr __rte_unused, 2342 const struct rte_flow_item items[] __rte_unused, 2343 const struct rte_flow_action actions[] __rte_unused, 2344 bool external __rte_unused, 2345 int hairpin __rte_unused, 2346 struct rte_flow_error *error) 2347 { 2348 return rte_flow_error_set(error, ENOTSUP, 2349 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2350 } 2351 2352 static struct mlx5_flow * 2353 flow_null_prepare(struct rte_eth_dev *dev __rte_unused, 2354 const struct rte_flow_attr *attr __rte_unused, 2355 const struct rte_flow_item items[] __rte_unused, 2356 const struct rte_flow_action actions[] __rte_unused, 2357 struct rte_flow_error *error) 2358 { 2359 rte_flow_error_set(error, ENOTSUP, 2360 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2361 return NULL; 2362 } 2363 2364 static int 2365 flow_null_translate(struct rte_eth_dev *dev __rte_unused, 2366 struct mlx5_flow *dev_flow __rte_unused, 2367 const struct rte_flow_attr *attr __rte_unused, 2368 const struct rte_flow_item items[] __rte_unused, 2369 const struct rte_flow_action actions[] __rte_unused, 2370 struct rte_flow_error *error) 2371 { 2372 return rte_flow_error_set(error, ENOTSUP, 2373 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2374 } 2375 2376 static int 2377 flow_null_apply(struct rte_eth_dev *dev __rte_unused, 2378 struct rte_flow *flow __rte_unused, 2379 struct rte_flow_error *error) 2380 { 2381 return rte_flow_error_set(error, ENOTSUP, 2382 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2383 } 2384 2385 static void 2386 flow_null_remove(struct rte_eth_dev *dev __rte_unused, 2387 struct rte_flow *flow __rte_unused) 2388 { 2389 } 2390 2391 static void 2392 flow_null_destroy(struct rte_eth_dev *dev __rte_unused, 2393 struct rte_flow *flow __rte_unused) 2394 { 2395 } 2396 2397 static int 2398 flow_null_query(struct rte_eth_dev *dev __rte_unused, 2399 struct rte_flow *flow __rte_unused, 2400 const struct rte_flow_action *actions __rte_unused, 2401 void *data __rte_unused, 2402 struct rte_flow_error *error) 2403 { 2404 return rte_flow_error_set(error, ENOTSUP, 2405 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2406 } 2407 2408 /* Void driver to protect from null pointer reference. */ 2409 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { 2410 .validate = flow_null_validate, 2411 .prepare = flow_null_prepare, 2412 .translate = flow_null_translate, 2413 .apply = flow_null_apply, 2414 .remove = flow_null_remove, 2415 .destroy = flow_null_destroy, 2416 .query = flow_null_query, 2417 }; 2418 2419 /** 2420 * Select flow driver type according to flow attributes and device 2421 * configuration. 2422 * 2423 * @param[in] dev 2424 * Pointer to the dev structure. 2425 * @param[in] attr 2426 * Pointer to the flow attributes. 2427 * 2428 * @return 2429 * flow driver type, MLX5_FLOW_TYPE_MAX otherwise. 2430 */ 2431 static enum mlx5_flow_drv_type 2432 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) 2433 { 2434 struct mlx5_priv *priv = dev->data->dev_private; 2435 enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; 2436 2437 if (attr->transfer && priv->config.dv_esw_en) 2438 type = MLX5_FLOW_TYPE_DV; 2439 if (!attr->transfer) 2440 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : 2441 MLX5_FLOW_TYPE_VERBS; 2442 return type; 2443 } 2444 2445 #define flow_get_drv_ops(type) flow_drv_ops[type] 2446 2447 /** 2448 * Flow driver validation API. This abstracts calling driver specific functions. 2449 * The type of flow driver is determined according to flow attributes. 2450 * 2451 * @param[in] dev 2452 * Pointer to the dev structure. 2453 * @param[in] attr 2454 * Pointer to the flow attributes. 2455 * @param[in] items 2456 * Pointer to the list of items. 2457 * @param[in] actions 2458 * Pointer to the list of actions. 2459 * @param[in] external 2460 * This flow rule is created by request external to PMD. 2461 * @param[in] hairpin 2462 * Number of hairpin TX actions, 0 means classic flow. 2463 * @param[out] error 2464 * Pointer to the error structure. 2465 * 2466 * @return 2467 * 0 on success, a negative errno value otherwise and rte_errno is set. 2468 */ 2469 static inline int 2470 flow_drv_validate(struct rte_eth_dev *dev, 2471 const struct rte_flow_attr *attr, 2472 const struct rte_flow_item items[], 2473 const struct rte_flow_action actions[], 2474 bool external, int hairpin, struct rte_flow_error *error) 2475 { 2476 const struct mlx5_flow_driver_ops *fops; 2477 enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); 2478 2479 fops = flow_get_drv_ops(type); 2480 return fops->validate(dev, attr, items, actions, external, 2481 hairpin, error); 2482 } 2483 2484 /** 2485 * Flow driver preparation API. This abstracts calling driver specific 2486 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2487 * calculates the size of memory required for device flow, allocates the memory, 2488 * initializes the device flow and returns the pointer. 2489 * 2490 * @note 2491 * This function initializes device flow structure such as dv or verbs in 2492 * struct mlx5_flow. However, it is caller's responsibility to initialize the 2493 * rest. For example, adding returning device flow to flow->dev_flow list and 2494 * setting backward reference to the flow should be done out of this function. 2495 * layers field is not filled either. 2496 * 2497 * @param[in] dev 2498 * Pointer to the dev structure. 2499 * @param[in] attr 2500 * Pointer to the flow attributes. 2501 * @param[in] items 2502 * Pointer to the list of items. 2503 * @param[in] actions 2504 * Pointer to the list of actions. 2505 * @param[out] error 2506 * Pointer to the error structure. 2507 * 2508 * @return 2509 * Pointer to device flow on success, otherwise NULL and rte_errno is set. 2510 */ 2511 static inline struct mlx5_flow * 2512 flow_drv_prepare(struct rte_eth_dev *dev, 2513 const struct rte_flow *flow, 2514 const struct rte_flow_attr *attr, 2515 const struct rte_flow_item items[], 2516 const struct rte_flow_action actions[], 2517 struct rte_flow_error *error) 2518 { 2519 const struct mlx5_flow_driver_ops *fops; 2520 enum mlx5_flow_drv_type type = flow->drv_type; 2521 2522 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2523 fops = flow_get_drv_ops(type); 2524 return fops->prepare(dev, attr, items, actions, error); 2525 } 2526 2527 /** 2528 * Flow driver translation API. This abstracts calling driver specific 2529 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2530 * translates a generic flow into a driver flow. flow_drv_prepare() must 2531 * precede. 2532 * 2533 * @note 2534 * dev_flow->layers could be filled as a result of parsing during translation 2535 * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled 2536 * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, 2537 * flow->actions could be overwritten even though all the expanded dev_flows 2538 * have the same actions. 2539 * 2540 * @param[in] dev 2541 * Pointer to the rte dev structure. 2542 * @param[in, out] dev_flow 2543 * Pointer to the mlx5 flow. 2544 * @param[in] attr 2545 * Pointer to the flow attributes. 2546 * @param[in] items 2547 * Pointer to the list of items. 2548 * @param[in] actions 2549 * Pointer to the list of actions. 2550 * @param[out] error 2551 * Pointer to the error structure. 2552 * 2553 * @return 2554 * 0 on success, a negative errno value otherwise and rte_errno is set. 2555 */ 2556 static inline int 2557 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, 2558 const struct rte_flow_attr *attr, 2559 const struct rte_flow_item items[], 2560 const struct rte_flow_action actions[], 2561 struct rte_flow_error *error) 2562 { 2563 const struct mlx5_flow_driver_ops *fops; 2564 enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; 2565 2566 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2567 fops = flow_get_drv_ops(type); 2568 return fops->translate(dev, dev_flow, attr, items, actions, error); 2569 } 2570 2571 /** 2572 * Flow driver apply API. This abstracts calling driver specific functions. 2573 * Parent flow (rte_flow) should have driver type (drv_type). It applies 2574 * translated driver flows on to device. flow_drv_translate() must precede. 2575 * 2576 * @param[in] dev 2577 * Pointer to Ethernet device structure. 2578 * @param[in, out] flow 2579 * Pointer to flow structure. 2580 * @param[out] error 2581 * Pointer to error structure. 2582 * 2583 * @return 2584 * 0 on success, a negative errno value otherwise and rte_errno is set. 2585 */ 2586 static inline int 2587 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 2588 struct rte_flow_error *error) 2589 { 2590 const struct mlx5_flow_driver_ops *fops; 2591 enum mlx5_flow_drv_type type = flow->drv_type; 2592 2593 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2594 fops = flow_get_drv_ops(type); 2595 return fops->apply(dev, flow, error); 2596 } 2597 2598 /** 2599 * Flow driver remove API. This abstracts calling driver specific functions. 2600 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2601 * on device. All the resources of the flow should be freed by calling 2602 * flow_drv_destroy(). 2603 * 2604 * @param[in] dev 2605 * Pointer to Ethernet device. 2606 * @param[in, out] flow 2607 * Pointer to flow structure. 2608 */ 2609 static inline void 2610 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 2611 { 2612 const struct mlx5_flow_driver_ops *fops; 2613 enum mlx5_flow_drv_type type = flow->drv_type; 2614 2615 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2616 fops = flow_get_drv_ops(type); 2617 fops->remove(dev, flow); 2618 } 2619 2620 /** 2621 * Flow driver destroy API. This abstracts calling driver specific functions. 2622 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2623 * on device and releases resources of the flow. 2624 * 2625 * @param[in] dev 2626 * Pointer to Ethernet device. 2627 * @param[in, out] flow 2628 * Pointer to flow structure. 2629 */ 2630 static inline void 2631 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 2632 { 2633 const struct mlx5_flow_driver_ops *fops; 2634 enum mlx5_flow_drv_type type = flow->drv_type; 2635 2636 flow_mreg_split_qrss_release(dev, flow); 2637 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2638 fops = flow_get_drv_ops(type); 2639 fops->destroy(dev, flow); 2640 } 2641 2642 /** 2643 * Get RSS action from the action list. 2644 * 2645 * @param[in] actions 2646 * Pointer to the list of actions. 2647 * 2648 * @return 2649 * Pointer to the RSS action if exist, else return NULL. 2650 */ 2651 static const struct rte_flow_action_rss* 2652 flow_get_rss_action(const struct rte_flow_action actions[]) 2653 { 2654 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2655 switch (actions->type) { 2656 case RTE_FLOW_ACTION_TYPE_RSS: 2657 return (const struct rte_flow_action_rss *) 2658 actions->conf; 2659 default: 2660 break; 2661 } 2662 } 2663 return NULL; 2664 } 2665 2666 static unsigned int 2667 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) 2668 { 2669 const struct rte_flow_item *item; 2670 unsigned int has_vlan = 0; 2671 2672 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 2673 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2674 has_vlan = 1; 2675 break; 2676 } 2677 } 2678 if (has_vlan) 2679 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : 2680 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; 2681 return rss_level < 2 ? MLX5_EXPANSION_ROOT : 2682 MLX5_EXPANSION_ROOT_OUTER; 2683 } 2684 2685 /** 2686 * Get layer flags from the prefix flow. 2687 * 2688 * Some flows may be split to several subflows, the prefix subflow gets the 2689 * match items and the suffix sub flow gets the actions. 2690 * Some actions need the user defined match item flags to get the detail for 2691 * the action. 2692 * This function helps the suffix flow to get the item layer flags from prefix 2693 * subflow. 2694 * 2695 * @param[in] dev_flow 2696 * Pointer the created preifx subflow. 2697 * 2698 * @return 2699 * The layers get from prefix subflow. 2700 */ 2701 static inline uint64_t 2702 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow) 2703 { 2704 uint64_t layers = 0; 2705 2706 /* 2707 * Layers bits could be localization, but usually the compiler will 2708 * help to do the optimization work for source code. 2709 * If no decap actions, use the layers directly. 2710 */ 2711 if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP)) 2712 return dev_flow->handle->layers; 2713 /* Convert L3 layers with decap action. */ 2714 if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4) 2715 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4; 2716 else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6) 2717 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6; 2718 /* Convert L4 layers with decap action. */ 2719 if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP) 2720 layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP; 2721 else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP) 2722 layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP; 2723 return layers; 2724 } 2725 2726 /** 2727 * Get metadata split action information. 2728 * 2729 * @param[in] actions 2730 * Pointer to the list of actions. 2731 * @param[out] qrss 2732 * Pointer to the return pointer. 2733 * @param[out] qrss_type 2734 * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned 2735 * if no QUEUE/RSS is found. 2736 * @param[out] encap_idx 2737 * Pointer to the index of the encap action if exists, otherwise the last 2738 * action index. 2739 * 2740 * @return 2741 * Total number of actions. 2742 */ 2743 static int 2744 flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[], 2745 const struct rte_flow_action **qrss, 2746 int *encap_idx) 2747 { 2748 const struct rte_flow_action_raw_encap *raw_encap; 2749 int actions_n = 0; 2750 int raw_decap_idx = -1; 2751 2752 *encap_idx = -1; 2753 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2754 switch (actions->type) { 2755 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 2756 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 2757 *encap_idx = actions_n; 2758 break; 2759 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 2760 raw_decap_idx = actions_n; 2761 break; 2762 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 2763 raw_encap = actions->conf; 2764 if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 2765 *encap_idx = raw_decap_idx != -1 ? 2766 raw_decap_idx : actions_n; 2767 break; 2768 case RTE_FLOW_ACTION_TYPE_QUEUE: 2769 case RTE_FLOW_ACTION_TYPE_RSS: 2770 *qrss = actions; 2771 break; 2772 default: 2773 break; 2774 } 2775 actions_n++; 2776 } 2777 if (*encap_idx == -1) 2778 *encap_idx = actions_n; 2779 /* Count RTE_FLOW_ACTION_TYPE_END. */ 2780 return actions_n + 1; 2781 } 2782 2783 /** 2784 * Check meter action from the action list. 2785 * 2786 * @param[in] actions 2787 * Pointer to the list of actions. 2788 * @param[out] mtr 2789 * Pointer to the meter exist flag. 2790 * 2791 * @return 2792 * Total number of actions. 2793 */ 2794 static int 2795 flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr) 2796 { 2797 int actions_n = 0; 2798 2799 MLX5_ASSERT(mtr); 2800 *mtr = 0; 2801 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2802 switch (actions->type) { 2803 case RTE_FLOW_ACTION_TYPE_METER: 2804 *mtr = 1; 2805 break; 2806 default: 2807 break; 2808 } 2809 actions_n++; 2810 } 2811 /* Count RTE_FLOW_ACTION_TYPE_END. */ 2812 return actions_n + 1; 2813 } 2814 2815 /** 2816 * Check if the flow should be splited due to hairpin. 2817 * The reason for the split is that in current HW we can't 2818 * support encap on Rx, so if a flow have encap we move it 2819 * to Tx. 2820 * 2821 * @param dev 2822 * Pointer to Ethernet device. 2823 * @param[in] attr 2824 * Flow rule attributes. 2825 * @param[in] actions 2826 * Associated actions (list terminated by the END action). 2827 * 2828 * @return 2829 * > 0 the number of actions and the flow should be split, 2830 * 0 when no split required. 2831 */ 2832 static int 2833 flow_check_hairpin_split(struct rte_eth_dev *dev, 2834 const struct rte_flow_attr *attr, 2835 const struct rte_flow_action actions[]) 2836 { 2837 int queue_action = 0; 2838 int action_n = 0; 2839 int encap = 0; 2840 const struct rte_flow_action_queue *queue; 2841 const struct rte_flow_action_rss *rss; 2842 const struct rte_flow_action_raw_encap *raw_encap; 2843 2844 if (!attr->ingress) 2845 return 0; 2846 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2847 switch (actions->type) { 2848 case RTE_FLOW_ACTION_TYPE_QUEUE: 2849 queue = actions->conf; 2850 if (queue == NULL) 2851 return 0; 2852 if (mlx5_rxq_get_type(dev, queue->index) != 2853 MLX5_RXQ_TYPE_HAIRPIN) 2854 return 0; 2855 queue_action = 1; 2856 action_n++; 2857 break; 2858 case RTE_FLOW_ACTION_TYPE_RSS: 2859 rss = actions->conf; 2860 if (rss == NULL || rss->queue_num == 0) 2861 return 0; 2862 if (mlx5_rxq_get_type(dev, rss->queue[0]) != 2863 MLX5_RXQ_TYPE_HAIRPIN) 2864 return 0; 2865 queue_action = 1; 2866 action_n++; 2867 break; 2868 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 2869 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 2870 encap = 1; 2871 action_n++; 2872 break; 2873 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 2874 raw_encap = actions->conf; 2875 if (raw_encap->size > 2876 (sizeof(struct rte_flow_item_eth) + 2877 sizeof(struct rte_flow_item_ipv4))) 2878 encap = 1; 2879 action_n++; 2880 break; 2881 default: 2882 action_n++; 2883 break; 2884 } 2885 } 2886 if (encap == 1 && queue_action) 2887 return action_n; 2888 return 0; 2889 } 2890 2891 /* Declare flow create/destroy prototype in advance. */ 2892 static uint32_t 2893 flow_list_create(struct rte_eth_dev *dev, uint32_t *list, 2894 const struct rte_flow_attr *attr, 2895 const struct rte_flow_item items[], 2896 const struct rte_flow_action actions[], 2897 bool external, struct rte_flow_error *error); 2898 2899 static void 2900 flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, 2901 uint32_t flow_idx); 2902 2903 /** 2904 * Add a flow of copying flow metadata registers in RX_CP_TBL. 2905 * 2906 * As mark_id is unique, if there's already a registered flow for the mark_id, 2907 * return by increasing the reference counter of the resource. Otherwise, create 2908 * the resource (mcp_res) and flow. 2909 * 2910 * Flow looks like, 2911 * - If ingress port is ANY and reg_c[1] is mark_id, 2912 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 2913 * 2914 * For default flow (zero mark_id), flow is like, 2915 * - If ingress port is ANY, 2916 * reg_b := reg_c[0] and jump to RX_ACT_TBL. 2917 * 2918 * @param dev 2919 * Pointer to Ethernet device. 2920 * @param mark_id 2921 * ID of MARK action, zero means default flow for META. 2922 * @param[out] error 2923 * Perform verbose error reporting if not NULL. 2924 * 2925 * @return 2926 * Associated resource on success, NULL otherwise and rte_errno is set. 2927 */ 2928 static struct mlx5_flow_mreg_copy_resource * 2929 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, 2930 struct rte_flow_error *error) 2931 { 2932 struct mlx5_priv *priv = dev->data->dev_private; 2933 struct rte_flow_attr attr = { 2934 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 2935 .ingress = 1, 2936 }; 2937 struct mlx5_rte_flow_item_tag tag_spec = { 2938 .data = mark_id, 2939 }; 2940 struct rte_flow_item items[] = { 2941 [1] = { .type = RTE_FLOW_ITEM_TYPE_END, }, 2942 }; 2943 struct rte_flow_action_mark ftag = { 2944 .id = mark_id, 2945 }; 2946 struct mlx5_flow_action_copy_mreg cp_mreg = { 2947 .dst = REG_B, 2948 .src = 0, 2949 }; 2950 struct rte_flow_action_jump jump = { 2951 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 2952 }; 2953 struct rte_flow_action actions[] = { 2954 [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, 2955 }; 2956 struct mlx5_flow_mreg_copy_resource *mcp_res; 2957 uint32_t idx = 0; 2958 int ret; 2959 2960 /* Fill the register fileds in the flow. */ 2961 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 2962 if (ret < 0) 2963 return NULL; 2964 tag_spec.id = ret; 2965 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 2966 if (ret < 0) 2967 return NULL; 2968 cp_mreg.src = ret; 2969 /* Check if already registered. */ 2970 MLX5_ASSERT(priv->mreg_cp_tbl); 2971 mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id); 2972 if (mcp_res) { 2973 /* For non-default rule. */ 2974 if (mark_id != MLX5_DEFAULT_COPY_ID) 2975 mcp_res->refcnt++; 2976 MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID || 2977 mcp_res->refcnt == 1); 2978 return mcp_res; 2979 } 2980 /* Provide the full width of FLAG specific value. */ 2981 if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT)) 2982 tag_spec.data = MLX5_FLOW_MARK_DEFAULT; 2983 /* Build a new flow. */ 2984 if (mark_id != MLX5_DEFAULT_COPY_ID) { 2985 items[0] = (struct rte_flow_item){ 2986 .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, 2987 .spec = &tag_spec, 2988 }; 2989 items[1] = (struct rte_flow_item){ 2990 .type = RTE_FLOW_ITEM_TYPE_END, 2991 }; 2992 actions[0] = (struct rte_flow_action){ 2993 .type = MLX5_RTE_FLOW_ACTION_TYPE_MARK, 2994 .conf = &ftag, 2995 }; 2996 actions[1] = (struct rte_flow_action){ 2997 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 2998 .conf = &cp_mreg, 2999 }; 3000 actions[2] = (struct rte_flow_action){ 3001 .type = RTE_FLOW_ACTION_TYPE_JUMP, 3002 .conf = &jump, 3003 }; 3004 actions[3] = (struct rte_flow_action){ 3005 .type = RTE_FLOW_ACTION_TYPE_END, 3006 }; 3007 } else { 3008 /* Default rule, wildcard match. */ 3009 attr.priority = MLX5_FLOW_PRIO_RSVD; 3010 items[0] = (struct rte_flow_item){ 3011 .type = RTE_FLOW_ITEM_TYPE_END, 3012 }; 3013 actions[0] = (struct rte_flow_action){ 3014 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 3015 .conf = &cp_mreg, 3016 }; 3017 actions[1] = (struct rte_flow_action){ 3018 .type = RTE_FLOW_ACTION_TYPE_JUMP, 3019 .conf = &jump, 3020 }; 3021 actions[2] = (struct rte_flow_action){ 3022 .type = RTE_FLOW_ACTION_TYPE_END, 3023 }; 3024 } 3025 /* Build a new entry. */ 3026 mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx); 3027 if (!mcp_res) { 3028 rte_errno = ENOMEM; 3029 return NULL; 3030 } 3031 mcp_res->idx = idx; 3032 /* 3033 * The copy Flows are not included in any list. There 3034 * ones are referenced from other Flows and can not 3035 * be applied, removed, deleted in ardbitrary order 3036 * by list traversing. 3037 */ 3038 mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items, 3039 actions, false, error); 3040 if (!mcp_res->rix_flow) 3041 goto error; 3042 mcp_res->refcnt++; 3043 mcp_res->hlist_ent.key = mark_id; 3044 ret = mlx5_hlist_insert(priv->mreg_cp_tbl, 3045 &mcp_res->hlist_ent); 3046 MLX5_ASSERT(!ret); 3047 if (ret) 3048 goto error; 3049 return mcp_res; 3050 error: 3051 if (mcp_res->rix_flow) 3052 flow_list_destroy(dev, NULL, mcp_res->rix_flow); 3053 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); 3054 return NULL; 3055 } 3056 3057 /** 3058 * Release flow in RX_CP_TBL. 3059 * 3060 * @param dev 3061 * Pointer to Ethernet device. 3062 * @flow 3063 * Parent flow for wich copying is provided. 3064 */ 3065 static void 3066 flow_mreg_del_copy_action(struct rte_eth_dev *dev, 3067 struct rte_flow *flow) 3068 { 3069 struct mlx5_flow_mreg_copy_resource *mcp_res; 3070 struct mlx5_priv *priv = dev->data->dev_private; 3071 3072 if (!flow->rix_mreg_copy) 3073 return; 3074 mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], 3075 flow->rix_mreg_copy); 3076 if (!mcp_res || !priv->mreg_cp_tbl) 3077 return; 3078 if (flow->copy_applied) { 3079 MLX5_ASSERT(mcp_res->appcnt); 3080 flow->copy_applied = 0; 3081 --mcp_res->appcnt; 3082 if (!mcp_res->appcnt) { 3083 struct rte_flow *mcp_flow = mlx5_ipool_get 3084 (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], 3085 mcp_res->rix_flow); 3086 3087 if (mcp_flow) 3088 flow_drv_remove(dev, mcp_flow); 3089 } 3090 } 3091 /* 3092 * We do not check availability of metadata registers here, 3093 * because copy resources are not allocated in this case. 3094 */ 3095 if (--mcp_res->refcnt) 3096 return; 3097 MLX5_ASSERT(mcp_res->rix_flow); 3098 flow_list_destroy(dev, NULL, mcp_res->rix_flow); 3099 mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); 3100 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); 3101 flow->rix_mreg_copy = 0; 3102 } 3103 3104 /** 3105 * Start flow in RX_CP_TBL. 3106 * 3107 * @param dev 3108 * Pointer to Ethernet device. 3109 * @flow 3110 * Parent flow for wich copying is provided. 3111 * 3112 * @return 3113 * 0 on success, a negative errno value otherwise and rte_errno is set. 3114 */ 3115 static int 3116 flow_mreg_start_copy_action(struct rte_eth_dev *dev, 3117 struct rte_flow *flow) 3118 { 3119 struct mlx5_flow_mreg_copy_resource *mcp_res; 3120 struct mlx5_priv *priv = dev->data->dev_private; 3121 int ret; 3122 3123 if (!flow->rix_mreg_copy || flow->copy_applied) 3124 return 0; 3125 mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], 3126 flow->rix_mreg_copy); 3127 if (!mcp_res) 3128 return 0; 3129 if (!mcp_res->appcnt) { 3130 struct rte_flow *mcp_flow = mlx5_ipool_get 3131 (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], 3132 mcp_res->rix_flow); 3133 3134 if (mcp_flow) { 3135 ret = flow_drv_apply(dev, mcp_flow, NULL); 3136 if (ret) 3137 return ret; 3138 } 3139 } 3140 ++mcp_res->appcnt; 3141 flow->copy_applied = 1; 3142 return 0; 3143 } 3144 3145 /** 3146 * Stop flow in RX_CP_TBL. 3147 * 3148 * @param dev 3149 * Pointer to Ethernet device. 3150 * @flow 3151 * Parent flow for wich copying is provided. 3152 */ 3153 static void 3154 flow_mreg_stop_copy_action(struct rte_eth_dev *dev, 3155 struct rte_flow *flow) 3156 { 3157 struct mlx5_flow_mreg_copy_resource *mcp_res; 3158 struct mlx5_priv *priv = dev->data->dev_private; 3159 3160 if (!flow->rix_mreg_copy || !flow->copy_applied) 3161 return; 3162 mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], 3163 flow->rix_mreg_copy); 3164 if (!mcp_res) 3165 return; 3166 MLX5_ASSERT(mcp_res->appcnt); 3167 --mcp_res->appcnt; 3168 flow->copy_applied = 0; 3169 if (!mcp_res->appcnt) { 3170 struct rte_flow *mcp_flow = mlx5_ipool_get 3171 (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], 3172 mcp_res->rix_flow); 3173 3174 if (mcp_flow) 3175 flow_drv_remove(dev, mcp_flow); 3176 } 3177 } 3178 3179 /** 3180 * Remove the default copy action from RX_CP_TBL. 3181 * 3182 * @param dev 3183 * Pointer to Ethernet device. 3184 */ 3185 static void 3186 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) 3187 { 3188 struct mlx5_flow_mreg_copy_resource *mcp_res; 3189 struct mlx5_priv *priv = dev->data->dev_private; 3190 3191 /* Check if default flow is registered. */ 3192 if (!priv->mreg_cp_tbl) 3193 return; 3194 mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, 3195 MLX5_DEFAULT_COPY_ID); 3196 if (!mcp_res) 3197 return; 3198 MLX5_ASSERT(mcp_res->rix_flow); 3199 flow_list_destroy(dev, NULL, mcp_res->rix_flow); 3200 mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); 3201 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); 3202 } 3203 3204 /** 3205 * Add the default copy action in in RX_CP_TBL. 3206 * 3207 * @param dev 3208 * Pointer to Ethernet device. 3209 * @param[out] error 3210 * Perform verbose error reporting if not NULL. 3211 * 3212 * @return 3213 * 0 for success, negative value otherwise and rte_errno is set. 3214 */ 3215 static int 3216 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, 3217 struct rte_flow_error *error) 3218 { 3219 struct mlx5_priv *priv = dev->data->dev_private; 3220 struct mlx5_flow_mreg_copy_resource *mcp_res; 3221 3222 /* Check whether extensive metadata feature is engaged. */ 3223 if (!priv->config.dv_flow_en || 3224 priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3225 !mlx5_flow_ext_mreg_supported(dev) || 3226 !priv->sh->dv_regc0_mask) 3227 return 0; 3228 mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error); 3229 if (!mcp_res) 3230 return -rte_errno; 3231 return 0; 3232 } 3233 3234 /** 3235 * Add a flow of copying flow metadata registers in RX_CP_TBL. 3236 * 3237 * All the flow having Q/RSS action should be split by 3238 * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL 3239 * performs the following, 3240 * - CQE->flow_tag := reg_c[1] (MARK) 3241 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 3242 * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1] 3243 * but there should be a flow per each MARK ID set by MARK action. 3244 * 3245 * For the aforementioned reason, if there's a MARK action in flow's action 3246 * list, a corresponding flow should be added to the RX_CP_TBL in order to copy 3247 * the MARK ID to CQE's flow_tag like, 3248 * - If reg_c[1] is mark_id, 3249 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 3250 * 3251 * For SET_META action which stores value in reg_c[0], as the destination is 3252 * also a flow metadata register (reg_b), adding a default flow is enough. Zero 3253 * MARK ID means the default flow. The default flow looks like, 3254 * - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL. 3255 * 3256 * @param dev 3257 * Pointer to Ethernet device. 3258 * @param flow 3259 * Pointer to flow structure. 3260 * @param[in] actions 3261 * Pointer to the list of actions. 3262 * @param[out] error 3263 * Perform verbose error reporting if not NULL. 3264 * 3265 * @return 3266 * 0 on success, negative value otherwise and rte_errno is set. 3267 */ 3268 static int 3269 flow_mreg_update_copy_table(struct rte_eth_dev *dev, 3270 struct rte_flow *flow, 3271 const struct rte_flow_action *actions, 3272 struct rte_flow_error *error) 3273 { 3274 struct mlx5_priv *priv = dev->data->dev_private; 3275 struct mlx5_dev_config *config = &priv->config; 3276 struct mlx5_flow_mreg_copy_resource *mcp_res; 3277 const struct rte_flow_action_mark *mark; 3278 3279 /* Check whether extensive metadata feature is engaged. */ 3280 if (!config->dv_flow_en || 3281 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3282 !mlx5_flow_ext_mreg_supported(dev) || 3283 !priv->sh->dv_regc0_mask) 3284 return 0; 3285 /* Find MARK action. */ 3286 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3287 switch (actions->type) { 3288 case RTE_FLOW_ACTION_TYPE_FLAG: 3289 mcp_res = flow_mreg_add_copy_action 3290 (dev, MLX5_FLOW_MARK_DEFAULT, error); 3291 if (!mcp_res) 3292 return -rte_errno; 3293 flow->rix_mreg_copy = mcp_res->idx; 3294 if (dev->data->dev_started) { 3295 mcp_res->appcnt++; 3296 flow->copy_applied = 1; 3297 } 3298 return 0; 3299 case RTE_FLOW_ACTION_TYPE_MARK: 3300 mark = (const struct rte_flow_action_mark *) 3301 actions->conf; 3302 mcp_res = 3303 flow_mreg_add_copy_action(dev, mark->id, error); 3304 if (!mcp_res) 3305 return -rte_errno; 3306 flow->rix_mreg_copy = mcp_res->idx; 3307 if (dev->data->dev_started) { 3308 mcp_res->appcnt++; 3309 flow->copy_applied = 1; 3310 } 3311 return 0; 3312 default: 3313 break; 3314 } 3315 } 3316 return 0; 3317 } 3318 3319 #define MLX5_MAX_SPLIT_ACTIONS 24 3320 #define MLX5_MAX_SPLIT_ITEMS 24 3321 3322 /** 3323 * Split the hairpin flow. 3324 * Since HW can't support encap on Rx we move the encap to Tx. 3325 * If the count action is after the encap then we also 3326 * move the count action. in this case the count will also measure 3327 * the outer bytes. 3328 * 3329 * @param dev 3330 * Pointer to Ethernet device. 3331 * @param[in] actions 3332 * Associated actions (list terminated by the END action). 3333 * @param[out] actions_rx 3334 * Rx flow actions. 3335 * @param[out] actions_tx 3336 * Tx flow actions.. 3337 * @param[out] pattern_tx 3338 * The pattern items for the Tx flow. 3339 * @param[out] flow_id 3340 * The flow ID connected to this flow. 3341 * 3342 * @return 3343 * 0 on success. 3344 */ 3345 static int 3346 flow_hairpin_split(struct rte_eth_dev *dev, 3347 const struct rte_flow_action actions[], 3348 struct rte_flow_action actions_rx[], 3349 struct rte_flow_action actions_tx[], 3350 struct rte_flow_item pattern_tx[], 3351 uint32_t *flow_id) 3352 { 3353 struct mlx5_priv *priv = dev->data->dev_private; 3354 const struct rte_flow_action_raw_encap *raw_encap; 3355 const struct rte_flow_action_raw_decap *raw_decap; 3356 struct mlx5_rte_flow_action_set_tag *set_tag; 3357 struct rte_flow_action *tag_action; 3358 struct mlx5_rte_flow_item_tag *tag_item; 3359 struct rte_flow_item *item; 3360 char *addr; 3361 int encap = 0; 3362 3363 mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id); 3364 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3365 switch (actions->type) { 3366 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 3367 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 3368 rte_memcpy(actions_tx, actions, 3369 sizeof(struct rte_flow_action)); 3370 actions_tx++; 3371 break; 3372 case RTE_FLOW_ACTION_TYPE_COUNT: 3373 if (encap) { 3374 rte_memcpy(actions_tx, actions, 3375 sizeof(struct rte_flow_action)); 3376 actions_tx++; 3377 } else { 3378 rte_memcpy(actions_rx, actions, 3379 sizeof(struct rte_flow_action)); 3380 actions_rx++; 3381 } 3382 break; 3383 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 3384 raw_encap = actions->conf; 3385 if (raw_encap->size > 3386 (sizeof(struct rte_flow_item_eth) + 3387 sizeof(struct rte_flow_item_ipv4))) { 3388 memcpy(actions_tx, actions, 3389 sizeof(struct rte_flow_action)); 3390 actions_tx++; 3391 encap = 1; 3392 } else { 3393 rte_memcpy(actions_rx, actions, 3394 sizeof(struct rte_flow_action)); 3395 actions_rx++; 3396 } 3397 break; 3398 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 3399 raw_decap = actions->conf; 3400 if (raw_decap->size < 3401 (sizeof(struct rte_flow_item_eth) + 3402 sizeof(struct rte_flow_item_ipv4))) { 3403 memcpy(actions_tx, actions, 3404 sizeof(struct rte_flow_action)); 3405 actions_tx++; 3406 } else { 3407 rte_memcpy(actions_rx, actions, 3408 sizeof(struct rte_flow_action)); 3409 actions_rx++; 3410 } 3411 break; 3412 default: 3413 rte_memcpy(actions_rx, actions, 3414 sizeof(struct rte_flow_action)); 3415 actions_rx++; 3416 break; 3417 } 3418 } 3419 /* Add set meta action and end action for the Rx flow. */ 3420 tag_action = actions_rx; 3421 tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3422 actions_rx++; 3423 rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action)); 3424 actions_rx++; 3425 set_tag = (void *)actions_rx; 3426 set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL); 3427 MLX5_ASSERT(set_tag->id > REG_NONE); 3428 set_tag->data = *flow_id; 3429 tag_action->conf = set_tag; 3430 /* Create Tx item list. */ 3431 rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); 3432 addr = (void *)&pattern_tx[2]; 3433 item = pattern_tx; 3434 item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; 3435 tag_item = (void *)addr; 3436 tag_item->data = *flow_id; 3437 tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); 3438 MLX5_ASSERT(set_tag->id > REG_NONE); 3439 item->spec = tag_item; 3440 addr += sizeof(struct mlx5_rte_flow_item_tag); 3441 tag_item = (void *)addr; 3442 tag_item->data = UINT32_MAX; 3443 tag_item->id = UINT16_MAX; 3444 item->mask = tag_item; 3445 addr += sizeof(struct mlx5_rte_flow_item_tag); 3446 item->last = NULL; 3447 item++; 3448 item->type = RTE_FLOW_ITEM_TYPE_END; 3449 return 0; 3450 } 3451 3452 /** 3453 * The last stage of splitting chain, just creates the subflow 3454 * without any modification. 3455 * 3456 * @param[in] dev 3457 * Pointer to Ethernet device. 3458 * @param[in] flow 3459 * Parent flow structure pointer. 3460 * @param[in, out] sub_flow 3461 * Pointer to return the created subflow, may be NULL. 3462 * @param[in] prefix_layers 3463 * Prefix subflow layers, may be 0. 3464 * @param[in] attr 3465 * Flow rule attributes. 3466 * @param[in] items 3467 * Pattern specification (list terminated by the END pattern item). 3468 * @param[in] actions 3469 * Associated actions (list terminated by the END action). 3470 * @param[in] external 3471 * This flow rule is created by request external to PMD. 3472 * @param[out] error 3473 * Perform verbose error reporting if not NULL. 3474 * @return 3475 * 0 on success, negative value otherwise 3476 */ 3477 static int 3478 flow_create_split_inner(struct rte_eth_dev *dev, 3479 struct rte_flow *flow, 3480 struct mlx5_flow **sub_flow, 3481 uint64_t prefix_layers, 3482 const struct rte_flow_attr *attr, 3483 const struct rte_flow_item items[], 3484 const struct rte_flow_action actions[], 3485 bool external, struct rte_flow_error *error) 3486 { 3487 struct mlx5_flow *dev_flow; 3488 3489 dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, error); 3490 if (!dev_flow) 3491 return -rte_errno; 3492 dev_flow->flow = flow; 3493 dev_flow->external = external; 3494 /* Subflow object was created, we must include one in the list. */ 3495 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, 3496 dev_flow->handle, next); 3497 /* 3498 * If dev_flow is as one of the suffix flow, some actions in suffix 3499 * flow may need some user defined item layer flags. 3500 */ 3501 if (prefix_layers) 3502 dev_flow->handle->layers = prefix_layers; 3503 if (sub_flow) 3504 *sub_flow = dev_flow; 3505 return flow_drv_translate(dev, dev_flow, attr, items, actions, error); 3506 } 3507 3508 /** 3509 * Split the meter flow. 3510 * 3511 * As meter flow will split to three sub flow, other than meter 3512 * action, the other actions make sense to only meter accepts 3513 * the packet. If it need to be dropped, no other additional 3514 * actions should be take. 3515 * 3516 * One kind of special action which decapsulates the L3 tunnel 3517 * header will be in the prefix sub flow, as not to take the 3518 * L3 tunnel header into account. 3519 * 3520 * @param dev 3521 * Pointer to Ethernet device. 3522 * @param[in] items 3523 * Pattern specification (list terminated by the END pattern item). 3524 * @param[out] sfx_items 3525 * Suffix flow match items (list terminated by the END pattern item). 3526 * @param[in] actions 3527 * Associated actions (list terminated by the END action). 3528 * @param[out] actions_sfx 3529 * Suffix flow actions. 3530 * @param[out] actions_pre 3531 * Prefix flow actions. 3532 * @param[out] pattern_sfx 3533 * The pattern items for the suffix flow. 3534 * @param[out] tag_sfx 3535 * Pointer to suffix flow tag. 3536 * 3537 * @return 3538 * 0 on success. 3539 */ 3540 static int 3541 flow_meter_split_prep(struct rte_eth_dev *dev, 3542 const struct rte_flow_item items[], 3543 struct rte_flow_item sfx_items[], 3544 const struct rte_flow_action actions[], 3545 struct rte_flow_action actions_sfx[], 3546 struct rte_flow_action actions_pre[]) 3547 { 3548 struct rte_flow_action *tag_action = NULL; 3549 struct rte_flow_item *tag_item; 3550 struct mlx5_rte_flow_action_set_tag *set_tag; 3551 struct rte_flow_error error; 3552 const struct rte_flow_action_raw_encap *raw_encap; 3553 const struct rte_flow_action_raw_decap *raw_decap; 3554 struct mlx5_rte_flow_item_tag *tag_spec; 3555 struct mlx5_rte_flow_item_tag *tag_mask; 3556 uint32_t tag_id; 3557 bool copy_vlan = false; 3558 3559 /* Prepare the actions for prefix and suffix flow. */ 3560 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3561 struct rte_flow_action **action_cur = NULL; 3562 3563 switch (actions->type) { 3564 case RTE_FLOW_ACTION_TYPE_METER: 3565 /* Add the extra tag action first. */ 3566 tag_action = actions_pre; 3567 tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3568 actions_pre++; 3569 action_cur = &actions_pre; 3570 break; 3571 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 3572 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 3573 action_cur = &actions_pre; 3574 break; 3575 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 3576 raw_encap = actions->conf; 3577 if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) 3578 action_cur = &actions_pre; 3579 break; 3580 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 3581 raw_decap = actions->conf; 3582 if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 3583 action_cur = &actions_pre; 3584 break; 3585 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 3586 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 3587 copy_vlan = true; 3588 break; 3589 default: 3590 break; 3591 } 3592 if (!action_cur) 3593 action_cur = &actions_sfx; 3594 memcpy(*action_cur, actions, sizeof(struct rte_flow_action)); 3595 (*action_cur)++; 3596 } 3597 /* Add end action to the actions. */ 3598 actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; 3599 actions_pre->type = RTE_FLOW_ACTION_TYPE_END; 3600 actions_pre++; 3601 /* Set the tag. */ 3602 set_tag = (void *)actions_pre; 3603 set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error); 3604 /* 3605 * Get the id from the qrss_pool to make qrss share the id with meter. 3606 */ 3607 tag_id = flow_qrss_get_id(dev); 3608 set_tag->data = tag_id << MLX5_MTR_COLOR_BITS; 3609 assert(tag_action); 3610 tag_action->conf = set_tag; 3611 /* Prepare the suffix subflow items. */ 3612 tag_item = sfx_items++; 3613 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 3614 int item_type = items->type; 3615 3616 switch (item_type) { 3617 case RTE_FLOW_ITEM_TYPE_PORT_ID: 3618 memcpy(sfx_items, items, sizeof(*sfx_items)); 3619 sfx_items++; 3620 break; 3621 case RTE_FLOW_ITEM_TYPE_VLAN: 3622 if (copy_vlan) { 3623 memcpy(sfx_items, items, sizeof(*sfx_items)); 3624 /* 3625 * Convert to internal match item, it is used 3626 * for vlan push and set vid. 3627 */ 3628 sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_VLAN; 3629 sfx_items++; 3630 } 3631 break; 3632 default: 3633 break; 3634 } 3635 } 3636 sfx_items->type = RTE_FLOW_ITEM_TYPE_END; 3637 sfx_items++; 3638 tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items; 3639 tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS; 3640 tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error); 3641 tag_mask = tag_spec + 1; 3642 tag_mask->data = 0xffffff00; 3643 tag_item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; 3644 tag_item->spec = tag_spec; 3645 tag_item->last = NULL; 3646 tag_item->mask = tag_mask; 3647 return tag_id; 3648 } 3649 3650 /** 3651 * Split action list having QUEUE/RSS for metadata register copy. 3652 * 3653 * Once Q/RSS action is detected in user's action list, the flow action 3654 * should be split in order to copy metadata registers, which will happen in 3655 * RX_CP_TBL like, 3656 * - CQE->flow_tag := reg_c[1] (MARK) 3657 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 3658 * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL. 3659 * This is because the last action of each flow must be a terminal action 3660 * (QUEUE, RSS or DROP). 3661 * 3662 * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is 3663 * stored and kept in the mlx5_flow structure per each sub_flow. 3664 * 3665 * The Q/RSS action is replaced with, 3666 * - SET_TAG, setting the allocated flow ID to reg_c[2]. 3667 * And the following JUMP action is added at the end, 3668 * - JUMP, to RX_CP_TBL. 3669 * 3670 * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by 3671 * flow_create_split_metadata() routine. The flow will look like, 3672 * - If flow ID matches (reg_c[2]), perform Q/RSS. 3673 * 3674 * @param dev 3675 * Pointer to Ethernet device. 3676 * @param[out] split_actions 3677 * Pointer to store split actions to jump to CP_TBL. 3678 * @param[in] actions 3679 * Pointer to the list of original flow actions. 3680 * @param[in] qrss 3681 * Pointer to the Q/RSS action. 3682 * @param[in] actions_n 3683 * Number of original actions. 3684 * @param[out] error 3685 * Perform verbose error reporting if not NULL. 3686 * 3687 * @return 3688 * non-zero unique flow_id on success, otherwise 0 and 3689 * error/rte_error are set. 3690 */ 3691 static uint32_t 3692 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, 3693 struct rte_flow_action *split_actions, 3694 const struct rte_flow_action *actions, 3695 const struct rte_flow_action *qrss, 3696 int actions_n, struct rte_flow_error *error) 3697 { 3698 struct mlx5_rte_flow_action_set_tag *set_tag; 3699 struct rte_flow_action_jump *jump; 3700 const int qrss_idx = qrss - actions; 3701 uint32_t flow_id = 0; 3702 int ret = 0; 3703 3704 /* 3705 * Given actions will be split 3706 * - Replace QUEUE/RSS action with SET_TAG to set flow ID. 3707 * - Add jump to mreg CP_TBL. 3708 * As a result, there will be one more action. 3709 */ 3710 ++actions_n; 3711 memcpy(split_actions, actions, sizeof(*split_actions) * actions_n); 3712 set_tag = (void *)(split_actions + actions_n); 3713 /* 3714 * If tag action is not set to void(it means we are not the meter 3715 * suffix flow), add the tag action. Since meter suffix flow already 3716 * has the tag added. 3717 */ 3718 if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) { 3719 /* 3720 * Allocate the new subflow ID. This one is unique within 3721 * device and not shared with representors. Otherwise, 3722 * we would have to resolve multi-thread access synch 3723 * issue. Each flow on the shared device is appended 3724 * with source vport identifier, so the resulting 3725 * flows will be unique in the shared (by master and 3726 * representors) domain even if they have coinciding 3727 * IDs. 3728 */ 3729 flow_id = flow_qrss_get_id(dev); 3730 if (!flow_id) 3731 return rte_flow_error_set(error, ENOMEM, 3732 RTE_FLOW_ERROR_TYPE_ACTION, 3733 NULL, "can't allocate id " 3734 "for split Q/RSS subflow"); 3735 /* Internal SET_TAG action to set flow ID. */ 3736 *set_tag = (struct mlx5_rte_flow_action_set_tag){ 3737 .data = flow_id, 3738 }; 3739 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); 3740 if (ret < 0) 3741 return ret; 3742 set_tag->id = ret; 3743 /* Construct new actions array. */ 3744 /* Replace QUEUE/RSS action. */ 3745 split_actions[qrss_idx] = (struct rte_flow_action){ 3746 .type = MLX5_RTE_FLOW_ACTION_TYPE_TAG, 3747 .conf = set_tag, 3748 }; 3749 } 3750 /* JUMP action to jump to mreg copy table (CP_TBL). */ 3751 jump = (void *)(set_tag + 1); 3752 *jump = (struct rte_flow_action_jump){ 3753 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 3754 }; 3755 split_actions[actions_n - 2] = (struct rte_flow_action){ 3756 .type = RTE_FLOW_ACTION_TYPE_JUMP, 3757 .conf = jump, 3758 }; 3759 split_actions[actions_n - 1] = (struct rte_flow_action){ 3760 .type = RTE_FLOW_ACTION_TYPE_END, 3761 }; 3762 return flow_id; 3763 } 3764 3765 /** 3766 * Extend the given action list for Tx metadata copy. 3767 * 3768 * Copy the given action list to the ext_actions and add flow metadata register 3769 * copy action in order to copy reg_a set by WQE to reg_c[0]. 3770 * 3771 * @param[out] ext_actions 3772 * Pointer to the extended action list. 3773 * @param[in] actions 3774 * Pointer to the list of actions. 3775 * @param[in] actions_n 3776 * Number of actions in the list. 3777 * @param[out] error 3778 * Perform verbose error reporting if not NULL. 3779 * @param[in] encap_idx 3780 * The encap action inndex. 3781 * 3782 * @return 3783 * 0 on success, negative value otherwise 3784 */ 3785 static int 3786 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, 3787 struct rte_flow_action *ext_actions, 3788 const struct rte_flow_action *actions, 3789 int actions_n, struct rte_flow_error *error, 3790 int encap_idx) 3791 { 3792 struct mlx5_flow_action_copy_mreg *cp_mreg = 3793 (struct mlx5_flow_action_copy_mreg *) 3794 (ext_actions + actions_n + 1); 3795 int ret; 3796 3797 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 3798 if (ret < 0) 3799 return ret; 3800 cp_mreg->dst = ret; 3801 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error); 3802 if (ret < 0) 3803 return ret; 3804 cp_mreg->src = ret; 3805 if (encap_idx != 0) 3806 memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx); 3807 if (encap_idx == actions_n - 1) { 3808 ext_actions[actions_n - 1] = (struct rte_flow_action){ 3809 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 3810 .conf = cp_mreg, 3811 }; 3812 ext_actions[actions_n] = (struct rte_flow_action){ 3813 .type = RTE_FLOW_ACTION_TYPE_END, 3814 }; 3815 } else { 3816 ext_actions[encap_idx] = (struct rte_flow_action){ 3817 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 3818 .conf = cp_mreg, 3819 }; 3820 memcpy(ext_actions + encap_idx + 1, actions + encap_idx, 3821 sizeof(*ext_actions) * (actions_n - encap_idx)); 3822 } 3823 return 0; 3824 } 3825 3826 /** 3827 * The splitting for metadata feature. 3828 * 3829 * - Q/RSS action on NIC Rx should be split in order to pass by 3830 * the mreg copy table (RX_CP_TBL) and then it jumps to the 3831 * action table (RX_ACT_TBL) which has the split Q/RSS action. 3832 * 3833 * - All the actions on NIC Tx should have a mreg copy action to 3834 * copy reg_a from WQE to reg_c[0]. 3835 * 3836 * @param dev 3837 * Pointer to Ethernet device. 3838 * @param[in] flow 3839 * Parent flow structure pointer. 3840 * @param[in] prefix_layers 3841 * Prefix flow layer flags. 3842 * @param[in] attr 3843 * Flow rule attributes. 3844 * @param[in] items 3845 * Pattern specification (list terminated by the END pattern item). 3846 * @param[in] actions 3847 * Associated actions (list terminated by the END action). 3848 * @param[in] external 3849 * This flow rule is created by request external to PMD. 3850 * @param[out] error 3851 * Perform verbose error reporting if not NULL. 3852 * @return 3853 * 0 on success, negative value otherwise 3854 */ 3855 static int 3856 flow_create_split_metadata(struct rte_eth_dev *dev, 3857 struct rte_flow *flow, 3858 uint64_t prefix_layers, 3859 const struct rte_flow_attr *attr, 3860 const struct rte_flow_item items[], 3861 const struct rte_flow_action actions[], 3862 bool external, struct rte_flow_error *error) 3863 { 3864 struct mlx5_priv *priv = dev->data->dev_private; 3865 struct mlx5_dev_config *config = &priv->config; 3866 const struct rte_flow_action *qrss = NULL; 3867 struct rte_flow_action *ext_actions = NULL; 3868 struct mlx5_flow *dev_flow = NULL; 3869 uint32_t qrss_id = 0; 3870 int mtr_sfx = 0; 3871 size_t act_size; 3872 int actions_n; 3873 int encap_idx; 3874 int ret; 3875 3876 /* Check whether extensive metadata feature is engaged. */ 3877 if (!config->dv_flow_en || 3878 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3879 !mlx5_flow_ext_mreg_supported(dev)) 3880 return flow_create_split_inner(dev, flow, NULL, prefix_layers, 3881 attr, items, actions, external, 3882 error); 3883 actions_n = flow_parse_metadata_split_actions_info(actions, &qrss, 3884 &encap_idx); 3885 if (qrss) { 3886 /* Exclude hairpin flows from splitting. */ 3887 if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) { 3888 const struct rte_flow_action_queue *queue; 3889 3890 queue = qrss->conf; 3891 if (mlx5_rxq_get_type(dev, queue->index) == 3892 MLX5_RXQ_TYPE_HAIRPIN) 3893 qrss = NULL; 3894 } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) { 3895 const struct rte_flow_action_rss *rss; 3896 3897 rss = qrss->conf; 3898 if (mlx5_rxq_get_type(dev, rss->queue[0]) == 3899 MLX5_RXQ_TYPE_HAIRPIN) 3900 qrss = NULL; 3901 } 3902 } 3903 if (qrss) { 3904 /* Check if it is in meter suffix table. */ 3905 mtr_sfx = attr->group == (attr->transfer ? 3906 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : 3907 MLX5_FLOW_TABLE_LEVEL_SUFFIX); 3908 /* 3909 * Q/RSS action on NIC Rx should be split in order to pass by 3910 * the mreg copy table (RX_CP_TBL) and then it jumps to the 3911 * action table (RX_ACT_TBL) which has the split Q/RSS action. 3912 */ 3913 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 3914 sizeof(struct rte_flow_action_set_tag) + 3915 sizeof(struct rte_flow_action_jump); 3916 ext_actions = rte_zmalloc(__func__, act_size, 0); 3917 if (!ext_actions) 3918 return rte_flow_error_set(error, ENOMEM, 3919 RTE_FLOW_ERROR_TYPE_ACTION, 3920 NULL, "no memory to split " 3921 "metadata flow"); 3922 /* 3923 * If we are the suffix flow of meter, tag already exist. 3924 * Set the tag action to void. 3925 */ 3926 if (mtr_sfx) 3927 ext_actions[qrss - actions].type = 3928 RTE_FLOW_ACTION_TYPE_VOID; 3929 else 3930 ext_actions[qrss - actions].type = 3931 MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3932 /* 3933 * Create the new actions list with removed Q/RSS action 3934 * and appended set tag and jump to register copy table 3935 * (RX_CP_TBL). We should preallocate unique tag ID here 3936 * in advance, because it is needed for set tag action. 3937 */ 3938 qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions, 3939 qrss, actions_n, error); 3940 if (!mtr_sfx && !qrss_id) { 3941 ret = -rte_errno; 3942 goto exit; 3943 } 3944 } else if (attr->egress && !attr->transfer) { 3945 /* 3946 * All the actions on NIC Tx should have a metadata register 3947 * copy action to copy reg_a from WQE to reg_c[meta] 3948 */ 3949 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 3950 sizeof(struct mlx5_flow_action_copy_mreg); 3951 ext_actions = rte_zmalloc(__func__, act_size, 0); 3952 if (!ext_actions) 3953 return rte_flow_error_set(error, ENOMEM, 3954 RTE_FLOW_ERROR_TYPE_ACTION, 3955 NULL, "no memory to split " 3956 "metadata flow"); 3957 /* Create the action list appended with copy register. */ 3958 ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions, 3959 actions_n, error, encap_idx); 3960 if (ret < 0) 3961 goto exit; 3962 } 3963 /* Add the unmodified original or prefix subflow. */ 3964 ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers, attr, 3965 items, ext_actions ? ext_actions : 3966 actions, external, error); 3967 if (ret < 0) 3968 goto exit; 3969 MLX5_ASSERT(dev_flow); 3970 if (qrss) { 3971 const struct rte_flow_attr q_attr = { 3972 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 3973 .ingress = 1, 3974 }; 3975 /* Internal PMD action to set register. */ 3976 struct mlx5_rte_flow_item_tag q_tag_spec = { 3977 .data = qrss_id, 3978 .id = 0, 3979 }; 3980 struct rte_flow_item q_items[] = { 3981 { 3982 .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, 3983 .spec = &q_tag_spec, 3984 .last = NULL, 3985 .mask = NULL, 3986 }, 3987 { 3988 .type = RTE_FLOW_ITEM_TYPE_END, 3989 }, 3990 }; 3991 struct rte_flow_action q_actions[] = { 3992 { 3993 .type = qrss->type, 3994 .conf = qrss->conf, 3995 }, 3996 { 3997 .type = RTE_FLOW_ACTION_TYPE_END, 3998 }, 3999 }; 4000 uint64_t layers = flow_get_prefix_layer_flags(dev_flow); 4001 4002 /* 4003 * Configure the tag item only if there is no meter subflow. 4004 * Since tag is already marked in the meter suffix subflow 4005 * we can just use the meter suffix items as is. 4006 */ 4007 if (qrss_id) { 4008 /* Not meter subflow. */ 4009 MLX5_ASSERT(!mtr_sfx); 4010 /* 4011 * Put unique id in prefix flow due to it is destroyed 4012 * after suffix flow and id will be freed after there 4013 * is no actual flows with this id and identifier 4014 * reallocation becomes possible (for example, for 4015 * other flows in other threads). 4016 */ 4017 dev_flow->handle->split_flow_id = qrss_id; 4018 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, 4019 error); 4020 if (ret < 0) 4021 goto exit; 4022 q_tag_spec.id = ret; 4023 } 4024 dev_flow = NULL; 4025 /* Add suffix subflow to execute Q/RSS. */ 4026 ret = flow_create_split_inner(dev, flow, &dev_flow, layers, 4027 &q_attr, mtr_sfx ? items : 4028 q_items, q_actions, 4029 external, error); 4030 if (ret < 0) 4031 goto exit; 4032 /* qrss ID should be freed if failed. */ 4033 qrss_id = 0; 4034 MLX5_ASSERT(dev_flow); 4035 } 4036 4037 exit: 4038 /* 4039 * We do not destroy the partially created sub_flows in case of error. 4040 * These ones are included into parent flow list and will be destroyed 4041 * by flow_drv_destroy. 4042 */ 4043 flow_qrss_free_id(dev, qrss_id); 4044 rte_free(ext_actions); 4045 return ret; 4046 } 4047 4048 /** 4049 * The splitting for meter feature. 4050 * 4051 * - The meter flow will be split to two flows as prefix and 4052 * suffix flow. The packets make sense only it pass the prefix 4053 * meter action. 4054 * 4055 * - Reg_C_5 is used for the packet to match betweend prefix and 4056 * suffix flow. 4057 * 4058 * @param dev 4059 * Pointer to Ethernet device. 4060 * @param[in] flow 4061 * Parent flow structure pointer. 4062 * @param[in] attr 4063 * Flow rule attributes. 4064 * @param[in] items 4065 * Pattern specification (list terminated by the END pattern item). 4066 * @param[in] actions 4067 * Associated actions (list terminated by the END action). 4068 * @param[in] external 4069 * This flow rule is created by request external to PMD. 4070 * @param[out] error 4071 * Perform verbose error reporting if not NULL. 4072 * @return 4073 * 0 on success, negative value otherwise 4074 */ 4075 static int 4076 flow_create_split_meter(struct rte_eth_dev *dev, 4077 struct rte_flow *flow, 4078 const struct rte_flow_attr *attr, 4079 const struct rte_flow_item items[], 4080 const struct rte_flow_action actions[], 4081 bool external, struct rte_flow_error *error) 4082 { 4083 struct mlx5_priv *priv = dev->data->dev_private; 4084 struct rte_flow_action *sfx_actions = NULL; 4085 struct rte_flow_action *pre_actions = NULL; 4086 struct rte_flow_item *sfx_items = NULL; 4087 struct mlx5_flow *dev_flow = NULL; 4088 struct rte_flow_attr sfx_attr = *attr; 4089 uint32_t mtr = 0; 4090 uint32_t mtr_tag_id = 0; 4091 size_t act_size; 4092 size_t item_size; 4093 int actions_n = 0; 4094 int ret; 4095 4096 if (priv->mtr_en) 4097 actions_n = flow_check_meter_action(actions, &mtr); 4098 if (mtr) { 4099 /* The five prefix actions: meter, decap, encap, tag, end. */ 4100 act_size = sizeof(struct rte_flow_action) * (actions_n + 5) + 4101 sizeof(struct mlx5_rte_flow_action_set_tag); 4102 /* tag, vlan, port id, end. */ 4103 #define METER_SUFFIX_ITEM 4 4104 item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM + 4105 sizeof(struct mlx5_rte_flow_item_tag) * 2; 4106 sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0); 4107 if (!sfx_actions) 4108 return rte_flow_error_set(error, ENOMEM, 4109 RTE_FLOW_ERROR_TYPE_ACTION, 4110 NULL, "no memory to split " 4111 "meter flow"); 4112 sfx_items = (struct rte_flow_item *)((char *)sfx_actions + 4113 act_size); 4114 pre_actions = sfx_actions + actions_n; 4115 mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items, 4116 actions, sfx_actions, 4117 pre_actions); 4118 if (!mtr_tag_id) { 4119 ret = -rte_errno; 4120 goto exit; 4121 } 4122 /* Add the prefix subflow. */ 4123 ret = flow_create_split_inner(dev, flow, &dev_flow, 0, attr, 4124 items, pre_actions, external, 4125 error); 4126 if (ret) { 4127 ret = -rte_errno; 4128 goto exit; 4129 } 4130 dev_flow->handle->split_flow_id = mtr_tag_id; 4131 /* Setting the sfx group atrr. */ 4132 sfx_attr.group = sfx_attr.transfer ? 4133 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : 4134 MLX5_FLOW_TABLE_LEVEL_SUFFIX; 4135 } 4136 /* Add the prefix subflow. */ 4137 ret = flow_create_split_metadata(dev, flow, dev_flow ? 4138 flow_get_prefix_layer_flags(dev_flow) : 4139 0, &sfx_attr, 4140 sfx_items ? sfx_items : items, 4141 sfx_actions ? sfx_actions : actions, 4142 external, error); 4143 exit: 4144 if (sfx_actions) 4145 rte_free(sfx_actions); 4146 return ret; 4147 } 4148 4149 /** 4150 * Split the flow to subflow set. The splitters might be linked 4151 * in the chain, like this: 4152 * flow_create_split_outer() calls: 4153 * flow_create_split_meter() calls: 4154 * flow_create_split_metadata(meter_subflow_0) calls: 4155 * flow_create_split_inner(metadata_subflow_0) 4156 * flow_create_split_inner(metadata_subflow_1) 4157 * flow_create_split_inner(metadata_subflow_2) 4158 * flow_create_split_metadata(meter_subflow_1) calls: 4159 * flow_create_split_inner(metadata_subflow_0) 4160 * flow_create_split_inner(metadata_subflow_1) 4161 * flow_create_split_inner(metadata_subflow_2) 4162 * 4163 * This provide flexible way to add new levels of flow splitting. 4164 * The all of successfully created subflows are included to the 4165 * parent flow dev_flow list. 4166 * 4167 * @param dev 4168 * Pointer to Ethernet device. 4169 * @param[in] flow 4170 * Parent flow structure pointer. 4171 * @param[in] attr 4172 * Flow rule attributes. 4173 * @param[in] items 4174 * Pattern specification (list terminated by the END pattern item). 4175 * @param[in] actions 4176 * Associated actions (list terminated by the END action). 4177 * @param[in] external 4178 * This flow rule is created by request external to PMD. 4179 * @param[out] error 4180 * Perform verbose error reporting if not NULL. 4181 * @return 4182 * 0 on success, negative value otherwise 4183 */ 4184 static int 4185 flow_create_split_outer(struct rte_eth_dev *dev, 4186 struct rte_flow *flow, 4187 const struct rte_flow_attr *attr, 4188 const struct rte_flow_item items[], 4189 const struct rte_flow_action actions[], 4190 bool external, struct rte_flow_error *error) 4191 { 4192 int ret; 4193 4194 ret = flow_create_split_meter(dev, flow, attr, items, 4195 actions, external, error); 4196 MLX5_ASSERT(ret <= 0); 4197 return ret; 4198 } 4199 4200 /** 4201 * Create a flow and add it to @p list. 4202 * 4203 * @param dev 4204 * Pointer to Ethernet device. 4205 * @param list 4206 * Pointer to a TAILQ flow list. If this parameter NULL, 4207 * no list insertion occurred, flow is just created, 4208 * this is caller's responsibility to track the 4209 * created flow. 4210 * @param[in] attr 4211 * Flow rule attributes. 4212 * @param[in] items 4213 * Pattern specification (list terminated by the END pattern item). 4214 * @param[in] actions 4215 * Associated actions (list terminated by the END action). 4216 * @param[in] external 4217 * This flow rule is created by request external to PMD. 4218 * @param[out] error 4219 * Perform verbose error reporting if not NULL. 4220 * 4221 * @return 4222 * A flow index on success, 0 otherwise and rte_errno is set. 4223 */ 4224 static uint32_t 4225 flow_list_create(struct rte_eth_dev *dev, uint32_t *list, 4226 const struct rte_flow_attr *attr, 4227 const struct rte_flow_item items[], 4228 const struct rte_flow_action actions[], 4229 bool external, struct rte_flow_error *error) 4230 { 4231 struct mlx5_priv *priv = dev->data->dev_private; 4232 struct rte_flow *flow = NULL; 4233 struct mlx5_flow *dev_flow; 4234 const struct rte_flow_action_rss *rss; 4235 union { 4236 struct rte_flow_expand_rss buf; 4237 uint8_t buffer[2048]; 4238 } expand_buffer; 4239 union { 4240 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 4241 uint8_t buffer[2048]; 4242 } actions_rx; 4243 union { 4244 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 4245 uint8_t buffer[2048]; 4246 } actions_hairpin_tx; 4247 union { 4248 struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS]; 4249 uint8_t buffer[2048]; 4250 } items_tx; 4251 struct rte_flow_expand_rss *buf = &expand_buffer.buf; 4252 struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) 4253 priv->rss_desc)[!!priv->flow_idx]; 4254 const struct rte_flow_action *p_actions_rx = actions; 4255 uint32_t i; 4256 uint32_t idx = 0; 4257 int hairpin_flow; 4258 uint32_t hairpin_id = 0; 4259 struct rte_flow_attr attr_tx = { .priority = 0 }; 4260 int ret; 4261 4262 hairpin_flow = flow_check_hairpin_split(dev, attr, actions); 4263 ret = flow_drv_validate(dev, attr, items, p_actions_rx, 4264 external, hairpin_flow, error); 4265 if (ret < 0) 4266 return 0; 4267 if (hairpin_flow > 0) { 4268 if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { 4269 rte_errno = EINVAL; 4270 return 0; 4271 } 4272 flow_hairpin_split(dev, actions, actions_rx.actions, 4273 actions_hairpin_tx.actions, items_tx.items, 4274 &hairpin_id); 4275 p_actions_rx = actions_rx.actions; 4276 } 4277 flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx); 4278 if (!flow) { 4279 rte_errno = ENOMEM; 4280 goto error_before_flow; 4281 } 4282 flow->drv_type = flow_get_drv_type(dev, attr); 4283 if (hairpin_id != 0) 4284 flow->hairpin_flow_id = hairpin_id; 4285 MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && 4286 flow->drv_type < MLX5_FLOW_TYPE_MAX); 4287 memset(rss_desc, 0, sizeof(*rss_desc)); 4288 rss = flow_get_rss_action(p_actions_rx); 4289 if (rss) { 4290 /* 4291 * The following information is required by 4292 * mlx5_flow_hashfields_adjust() in advance. 4293 */ 4294 rss_desc->level = rss->level; 4295 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ 4296 rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types; 4297 } 4298 flow->dev_handles = 0; 4299 if (rss && rss->types) { 4300 unsigned int graph_root; 4301 4302 graph_root = find_graph_root(items, rss->level); 4303 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), 4304 items, rss->types, 4305 mlx5_support_expansion, 4306 graph_root); 4307 MLX5_ASSERT(ret > 0 && 4308 (unsigned int)ret < sizeof(expand_buffer.buffer)); 4309 } else { 4310 buf->entries = 1; 4311 buf->entry[0].pattern = (void *)(uintptr_t)items; 4312 } 4313 /* 4314 * Record the start index when there is a nested call. All sub-flows 4315 * need to be translated before another calling. 4316 * No need to use ping-pong buffer to save memory here. 4317 */ 4318 if (priv->flow_idx) { 4319 MLX5_ASSERT(!priv->flow_nested_idx); 4320 priv->flow_nested_idx = priv->flow_idx; 4321 } 4322 for (i = 0; i < buf->entries; ++i) { 4323 /* 4324 * The splitter may create multiple dev_flows, 4325 * depending on configuration. In the simplest 4326 * case it just creates unmodified original flow. 4327 */ 4328 ret = flow_create_split_outer(dev, flow, attr, 4329 buf->entry[i].pattern, 4330 p_actions_rx, external, 4331 error); 4332 if (ret < 0) 4333 goto error; 4334 } 4335 /* Create the tx flow. */ 4336 if (hairpin_flow) { 4337 attr_tx.group = MLX5_HAIRPIN_TX_TABLE; 4338 attr_tx.ingress = 0; 4339 attr_tx.egress = 1; 4340 dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items, 4341 actions_hairpin_tx.actions, error); 4342 if (!dev_flow) 4343 goto error; 4344 dev_flow->flow = flow; 4345 dev_flow->external = 0; 4346 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, 4347 dev_flow->handle, next); 4348 ret = flow_drv_translate(dev, dev_flow, &attr_tx, 4349 items_tx.items, 4350 actions_hairpin_tx.actions, error); 4351 if (ret < 0) 4352 goto error; 4353 } 4354 /* 4355 * Update the metadata register copy table. If extensive 4356 * metadata feature is enabled and registers are supported 4357 * we might create the extra rte_flow for each unique 4358 * MARK/FLAG action ID. 4359 * 4360 * The table is updated for ingress Flows only, because 4361 * the egress Flows belong to the different device and 4362 * copy table should be updated in peer NIC Rx domain. 4363 */ 4364 if (attr->ingress && 4365 (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) { 4366 ret = flow_mreg_update_copy_table(dev, flow, actions, error); 4367 if (ret) 4368 goto error; 4369 } 4370 /* 4371 * If the flow is external (from application) OR device is started, then 4372 * the flow will be applied immediately. 4373 */ 4374 if (external || dev->data->dev_started) { 4375 ret = flow_drv_apply(dev, flow, error); 4376 if (ret < 0) 4377 goto error; 4378 } 4379 if (list) 4380 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx, 4381 flow, next); 4382 flow_rxq_flags_set(dev, flow); 4383 /* Nested flow creation index recovery. */ 4384 priv->flow_idx = priv->flow_nested_idx; 4385 if (priv->flow_nested_idx) 4386 priv->flow_nested_idx = 0; 4387 return idx; 4388 error: 4389 MLX5_ASSERT(flow); 4390 ret = rte_errno; /* Save rte_errno before cleanup. */ 4391 flow_mreg_del_copy_action(dev, flow); 4392 flow_drv_destroy(dev, flow); 4393 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx); 4394 rte_errno = ret; /* Restore rte_errno. */ 4395 error_before_flow: 4396 ret = rte_errno; 4397 if (hairpin_id) 4398 mlx5_flow_id_release(priv->sh->flow_id_pool, 4399 hairpin_id); 4400 rte_errno = ret; 4401 priv->flow_idx = priv->flow_nested_idx; 4402 if (priv->flow_nested_idx) 4403 priv->flow_nested_idx = 0; 4404 return 0; 4405 } 4406 4407 /** 4408 * Create a dedicated flow rule on e-switch table 0 (root table), to direct all 4409 * incoming packets to table 1. 4410 * 4411 * Other flow rules, requested for group n, will be created in 4412 * e-switch table n+1. 4413 * Jump action to e-switch group n will be created to group n+1. 4414 * 4415 * Used when working in switchdev mode, to utilise advantages of table 1 4416 * and above. 4417 * 4418 * @param dev 4419 * Pointer to Ethernet device. 4420 * 4421 * @return 4422 * Pointer to flow on success, NULL otherwise and rte_errno is set. 4423 */ 4424 struct rte_flow * 4425 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) 4426 { 4427 const struct rte_flow_attr attr = { 4428 .group = 0, 4429 .priority = 0, 4430 .ingress = 1, 4431 .egress = 0, 4432 .transfer = 1, 4433 }; 4434 const struct rte_flow_item pattern = { 4435 .type = RTE_FLOW_ITEM_TYPE_END, 4436 }; 4437 struct rte_flow_action_jump jump = { 4438 .group = 1, 4439 }; 4440 const struct rte_flow_action actions[] = { 4441 { 4442 .type = RTE_FLOW_ACTION_TYPE_JUMP, 4443 .conf = &jump, 4444 }, 4445 { 4446 .type = RTE_FLOW_ACTION_TYPE_END, 4447 }, 4448 }; 4449 struct mlx5_priv *priv = dev->data->dev_private; 4450 struct rte_flow_error error; 4451 4452 return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows, 4453 &attr, &pattern, 4454 actions, false, &error); 4455 } 4456 4457 /** 4458 * Validate a flow supported by the NIC. 4459 * 4460 * @see rte_flow_validate() 4461 * @see rte_flow_ops 4462 */ 4463 int 4464 mlx5_flow_validate(struct rte_eth_dev *dev, 4465 const struct rte_flow_attr *attr, 4466 const struct rte_flow_item items[], 4467 const struct rte_flow_action actions[], 4468 struct rte_flow_error *error) 4469 { 4470 int hairpin_flow; 4471 4472 hairpin_flow = flow_check_hairpin_split(dev, attr, actions); 4473 return flow_drv_validate(dev, attr, items, actions, 4474 true, hairpin_flow, error); 4475 } 4476 4477 /** 4478 * Create a flow. 4479 * 4480 * @see rte_flow_create() 4481 * @see rte_flow_ops 4482 */ 4483 struct rte_flow * 4484 mlx5_flow_create(struct rte_eth_dev *dev, 4485 const struct rte_flow_attr *attr, 4486 const struct rte_flow_item items[], 4487 const struct rte_flow_action actions[], 4488 struct rte_flow_error *error) 4489 { 4490 struct mlx5_priv *priv = dev->data->dev_private; 4491 4492 /* 4493 * If the device is not started yet, it is not allowed to created a 4494 * flow from application. PMD default flows and traffic control flows 4495 * are not affected. 4496 */ 4497 if (unlikely(!dev->data->dev_started)) { 4498 rte_errno = ENODEV; 4499 DRV_LOG(DEBUG, "port %u is not started when " 4500 "inserting a flow", dev->data->port_id); 4501 return NULL; 4502 } 4503 return (void *)(uintptr_t)flow_list_create(dev, &priv->flows, 4504 attr, items, actions, true, error); 4505 } 4506 4507 /** 4508 * Destroy a flow in a list. 4509 * 4510 * @param dev 4511 * Pointer to Ethernet device. 4512 * @param list 4513 * Pointer to the Indexed flow list. If this parameter NULL, 4514 * there is no flow removal from the list. Be noted that as 4515 * flow is add to the indexed list, memory of the indexed 4516 * list points to maybe changed as flow destroyed. 4517 * @param[in] flow_idx 4518 * Index of flow to destroy. 4519 */ 4520 static void 4521 flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, 4522 uint32_t flow_idx) 4523 { 4524 struct mlx5_priv *priv = dev->data->dev_private; 4525 struct mlx5_fdir_flow *priv_fdir_flow = NULL; 4526 struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool 4527 [MLX5_IPOOL_RTE_FLOW], flow_idx); 4528 4529 if (!flow) 4530 return; 4531 /* 4532 * Update RX queue flags only if port is started, otherwise it is 4533 * already clean. 4534 */ 4535 if (dev->data->dev_started) 4536 flow_rxq_flags_trim(dev, flow); 4537 if (flow->hairpin_flow_id) 4538 mlx5_flow_id_release(priv->sh->flow_id_pool, 4539 flow->hairpin_flow_id); 4540 flow_drv_destroy(dev, flow); 4541 if (list) 4542 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, 4543 flow_idx, flow, next); 4544 flow_mreg_del_copy_action(dev, flow); 4545 if (flow->fdir) { 4546 LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) { 4547 if (priv_fdir_flow->rix_flow == flow_idx) 4548 break; 4549 } 4550 if (priv_fdir_flow) { 4551 LIST_REMOVE(priv_fdir_flow, next); 4552 rte_free(priv_fdir_flow->fdir); 4553 rte_free(priv_fdir_flow); 4554 } 4555 } 4556 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx); 4557 } 4558 4559 /** 4560 * Destroy all flows. 4561 * 4562 * @param dev 4563 * Pointer to Ethernet device. 4564 * @param list 4565 * Pointer to the Indexed flow list. 4566 * @param active 4567 * If flushing is called avtively. 4568 */ 4569 void 4570 mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active) 4571 { 4572 uint32_t num_flushed = 0; 4573 4574 while (*list) { 4575 flow_list_destroy(dev, list, *list); 4576 num_flushed++; 4577 } 4578 if (active) { 4579 DRV_LOG(INFO, "port %u: %u flows flushed before stopping", 4580 dev->data->port_id, num_flushed); 4581 } 4582 } 4583 4584 /** 4585 * Remove all flows. 4586 * 4587 * @param dev 4588 * Pointer to Ethernet device. 4589 * @param list 4590 * Pointer to the Indexed flow list. 4591 */ 4592 void 4593 mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list) 4594 { 4595 struct mlx5_priv *priv = dev->data->dev_private; 4596 struct rte_flow *flow = NULL; 4597 uint32_t idx; 4598 4599 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx, 4600 flow, next) { 4601 flow_drv_remove(dev, flow); 4602 flow_mreg_stop_copy_action(dev, flow); 4603 } 4604 flow_mreg_del_default_copy_action(dev); 4605 flow_rxq_flags_clear(dev); 4606 } 4607 4608 /** 4609 * Add all flows. 4610 * 4611 * @param dev 4612 * Pointer to Ethernet device. 4613 * @param list 4614 * Pointer to the Indexed flow list. 4615 * 4616 * @return 4617 * 0 on success, a negative errno value otherwise and rte_errno is set. 4618 */ 4619 int 4620 mlx5_flow_start(struct rte_eth_dev *dev, uint32_t *list) 4621 { 4622 struct mlx5_priv *priv = dev->data->dev_private; 4623 struct rte_flow *flow = NULL; 4624 struct rte_flow_error error; 4625 uint32_t idx; 4626 int ret = 0; 4627 4628 /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ 4629 ret = flow_mreg_add_default_copy_action(dev, &error); 4630 if (ret < 0) 4631 return -rte_errno; 4632 /* Apply Flows created by application. */ 4633 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx, 4634 flow, next) { 4635 ret = flow_mreg_start_copy_action(dev, flow); 4636 if (ret < 0) 4637 goto error; 4638 ret = flow_drv_apply(dev, flow, &error); 4639 if (ret < 0) 4640 goto error; 4641 flow_rxq_flags_set(dev, flow); 4642 } 4643 return 0; 4644 error: 4645 ret = rte_errno; /* Save rte_errno before cleanup. */ 4646 mlx5_flow_stop(dev, list); 4647 rte_errno = ret; /* Restore rte_errno. */ 4648 return -rte_errno; 4649 } 4650 4651 /** 4652 * Stop all default actions for flows. 4653 * 4654 * @param dev 4655 * Pointer to Ethernet device. 4656 */ 4657 void 4658 mlx5_flow_stop_default(struct rte_eth_dev *dev) 4659 { 4660 flow_mreg_del_default_copy_action(dev); 4661 } 4662 4663 /** 4664 * Start all default actions for flows. 4665 * 4666 * @param dev 4667 * Pointer to Ethernet device. 4668 * @return 4669 * 0 on success, a negative errno value otherwise and rte_errno is set. 4670 */ 4671 int 4672 mlx5_flow_start_default(struct rte_eth_dev *dev) 4673 { 4674 struct rte_flow_error error; 4675 4676 /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ 4677 return flow_mreg_add_default_copy_action(dev, &error); 4678 } 4679 4680 /** 4681 * Allocate intermediate resources for flow creation. 4682 * 4683 * @param dev 4684 * Pointer to Ethernet device. 4685 */ 4686 void 4687 mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev) 4688 { 4689 struct mlx5_priv *priv = dev->data->dev_private; 4690 4691 if (!priv->inter_flows) { 4692 priv->inter_flows = rte_calloc(__func__, 1, 4693 MLX5_NUM_MAX_DEV_FLOWS * 4694 sizeof(struct mlx5_flow) + 4695 (sizeof(struct mlx5_flow_rss_desc) + 4696 sizeof(uint16_t) * UINT16_MAX) * 2, 0); 4697 if (!priv->inter_flows) { 4698 DRV_LOG(ERR, "can't allocate intermediate memory."); 4699 return; 4700 } 4701 } 4702 priv->rss_desc = &((struct mlx5_flow *)priv->inter_flows) 4703 [MLX5_NUM_MAX_DEV_FLOWS]; 4704 /* Reset the index. */ 4705 priv->flow_idx = 0; 4706 priv->flow_nested_idx = 0; 4707 } 4708 4709 /** 4710 * Free intermediate resources for flows. 4711 * 4712 * @param dev 4713 * Pointer to Ethernet device. 4714 */ 4715 void 4716 mlx5_flow_free_intermediate(struct rte_eth_dev *dev) 4717 { 4718 struct mlx5_priv *priv = dev->data->dev_private; 4719 4720 rte_free(priv->inter_flows); 4721 priv->inter_flows = NULL; 4722 } 4723 4724 /** 4725 * Verify the flow list is empty 4726 * 4727 * @param dev 4728 * Pointer to Ethernet device. 4729 * 4730 * @return the number of flows not released. 4731 */ 4732 int 4733 mlx5_flow_verify(struct rte_eth_dev *dev) 4734 { 4735 struct mlx5_priv *priv = dev->data->dev_private; 4736 struct rte_flow *flow; 4737 uint32_t idx; 4738 int ret = 0; 4739 4740 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx, 4741 flow, next) { 4742 DRV_LOG(DEBUG, "port %u flow %p still referenced", 4743 dev->data->port_id, (void *)flow); 4744 ++ret; 4745 } 4746 return ret; 4747 } 4748 4749 /** 4750 * Enable default hairpin egress flow. 4751 * 4752 * @param dev 4753 * Pointer to Ethernet device. 4754 * @param queue 4755 * The queue index. 4756 * 4757 * @return 4758 * 0 on success, a negative errno value otherwise and rte_errno is set. 4759 */ 4760 int 4761 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, 4762 uint32_t queue) 4763 { 4764 struct mlx5_priv *priv = dev->data->dev_private; 4765 const struct rte_flow_attr attr = { 4766 .egress = 1, 4767 .priority = 0, 4768 }; 4769 struct mlx5_rte_flow_item_tx_queue queue_spec = { 4770 .queue = queue, 4771 }; 4772 struct mlx5_rte_flow_item_tx_queue queue_mask = { 4773 .queue = UINT32_MAX, 4774 }; 4775 struct rte_flow_item items[] = { 4776 { 4777 .type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, 4778 .spec = &queue_spec, 4779 .last = NULL, 4780 .mask = &queue_mask, 4781 }, 4782 { 4783 .type = RTE_FLOW_ITEM_TYPE_END, 4784 }, 4785 }; 4786 struct rte_flow_action_jump jump = { 4787 .group = MLX5_HAIRPIN_TX_TABLE, 4788 }; 4789 struct rte_flow_action actions[2]; 4790 uint32_t flow_idx; 4791 struct rte_flow_error error; 4792 4793 actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; 4794 actions[0].conf = &jump; 4795 actions[1].type = RTE_FLOW_ACTION_TYPE_END; 4796 flow_idx = flow_list_create(dev, &priv->ctrl_flows, 4797 &attr, items, actions, false, &error); 4798 if (!flow_idx) { 4799 DRV_LOG(DEBUG, 4800 "Failed to create ctrl flow: rte_errno(%d)," 4801 " type(%d), message(%s)", 4802 rte_errno, error.type, 4803 error.message ? error.message : " (no stated reason)"); 4804 return -rte_errno; 4805 } 4806 return 0; 4807 } 4808 4809 /** 4810 * Enable a control flow configured from the control plane. 4811 * 4812 * @param dev 4813 * Pointer to Ethernet device. 4814 * @param eth_spec 4815 * An Ethernet flow spec to apply. 4816 * @param eth_mask 4817 * An Ethernet flow mask to apply. 4818 * @param vlan_spec 4819 * A VLAN flow spec to apply. 4820 * @param vlan_mask 4821 * A VLAN flow mask to apply. 4822 * 4823 * @return 4824 * 0 on success, a negative errno value otherwise and rte_errno is set. 4825 */ 4826 int 4827 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, 4828 struct rte_flow_item_eth *eth_spec, 4829 struct rte_flow_item_eth *eth_mask, 4830 struct rte_flow_item_vlan *vlan_spec, 4831 struct rte_flow_item_vlan *vlan_mask) 4832 { 4833 struct mlx5_priv *priv = dev->data->dev_private; 4834 const struct rte_flow_attr attr = { 4835 .ingress = 1, 4836 .priority = MLX5_FLOW_PRIO_RSVD, 4837 }; 4838 struct rte_flow_item items[] = { 4839 { 4840 .type = RTE_FLOW_ITEM_TYPE_ETH, 4841 .spec = eth_spec, 4842 .last = NULL, 4843 .mask = eth_mask, 4844 }, 4845 { 4846 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : 4847 RTE_FLOW_ITEM_TYPE_END, 4848 .spec = vlan_spec, 4849 .last = NULL, 4850 .mask = vlan_mask, 4851 }, 4852 { 4853 .type = RTE_FLOW_ITEM_TYPE_END, 4854 }, 4855 }; 4856 uint16_t queue[priv->reta_idx_n]; 4857 struct rte_flow_action_rss action_rss = { 4858 .func = RTE_ETH_HASH_FUNCTION_DEFAULT, 4859 .level = 0, 4860 .types = priv->rss_conf.rss_hf, 4861 .key_len = priv->rss_conf.rss_key_len, 4862 .queue_num = priv->reta_idx_n, 4863 .key = priv->rss_conf.rss_key, 4864 .queue = queue, 4865 }; 4866 struct rte_flow_action actions[] = { 4867 { 4868 .type = RTE_FLOW_ACTION_TYPE_RSS, 4869 .conf = &action_rss, 4870 }, 4871 { 4872 .type = RTE_FLOW_ACTION_TYPE_END, 4873 }, 4874 }; 4875 uint32_t flow_idx; 4876 struct rte_flow_error error; 4877 unsigned int i; 4878 4879 if (!priv->reta_idx_n || !priv->rxqs_n) { 4880 return 0; 4881 } 4882 for (i = 0; i != priv->reta_idx_n; ++i) 4883 queue[i] = (*priv->reta_idx)[i]; 4884 flow_idx = flow_list_create(dev, &priv->ctrl_flows, 4885 &attr, items, actions, false, &error); 4886 if (!flow_idx) 4887 return -rte_errno; 4888 return 0; 4889 } 4890 4891 /** 4892 * Enable a flow control configured from the control plane. 4893 * 4894 * @param dev 4895 * Pointer to Ethernet device. 4896 * @param eth_spec 4897 * An Ethernet flow spec to apply. 4898 * @param eth_mask 4899 * An Ethernet flow mask to apply. 4900 * 4901 * @return 4902 * 0 on success, a negative errno value otherwise and rte_errno is set. 4903 */ 4904 int 4905 mlx5_ctrl_flow(struct rte_eth_dev *dev, 4906 struct rte_flow_item_eth *eth_spec, 4907 struct rte_flow_item_eth *eth_mask) 4908 { 4909 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); 4910 } 4911 4912 /** 4913 * Destroy a flow. 4914 * 4915 * @see rte_flow_destroy() 4916 * @see rte_flow_ops 4917 */ 4918 int 4919 mlx5_flow_destroy(struct rte_eth_dev *dev, 4920 struct rte_flow *flow, 4921 struct rte_flow_error *error __rte_unused) 4922 { 4923 struct mlx5_priv *priv = dev->data->dev_private; 4924 4925 flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow); 4926 return 0; 4927 } 4928 4929 /** 4930 * Destroy all flows. 4931 * 4932 * @see rte_flow_flush() 4933 * @see rte_flow_ops 4934 */ 4935 int 4936 mlx5_flow_flush(struct rte_eth_dev *dev, 4937 struct rte_flow_error *error __rte_unused) 4938 { 4939 struct mlx5_priv *priv = dev->data->dev_private; 4940 4941 mlx5_flow_list_flush(dev, &priv->flows, false); 4942 return 0; 4943 } 4944 4945 /** 4946 * Isolated mode. 4947 * 4948 * @see rte_flow_isolate() 4949 * @see rte_flow_ops 4950 */ 4951 int 4952 mlx5_flow_isolate(struct rte_eth_dev *dev, 4953 int enable, 4954 struct rte_flow_error *error) 4955 { 4956 struct mlx5_priv *priv = dev->data->dev_private; 4957 4958 if (dev->data->dev_started) { 4959 rte_flow_error_set(error, EBUSY, 4960 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4961 NULL, 4962 "port must be stopped first"); 4963 return -rte_errno; 4964 } 4965 priv->isolated = !!enable; 4966 if (enable) 4967 dev->dev_ops = &mlx5_dev_ops_isolate; 4968 else 4969 dev->dev_ops = &mlx5_dev_ops; 4970 return 0; 4971 } 4972 4973 /** 4974 * Query a flow. 4975 * 4976 * @see rte_flow_query() 4977 * @see rte_flow_ops 4978 */ 4979 static int 4980 flow_drv_query(struct rte_eth_dev *dev, 4981 uint32_t flow_idx, 4982 const struct rte_flow_action *actions, 4983 void *data, 4984 struct rte_flow_error *error) 4985 { 4986 struct mlx5_priv *priv = dev->data->dev_private; 4987 const struct mlx5_flow_driver_ops *fops; 4988 struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool 4989 [MLX5_IPOOL_RTE_FLOW], 4990 flow_idx); 4991 enum mlx5_flow_drv_type ftype; 4992 4993 if (!flow) { 4994 return rte_flow_error_set(error, ENOENT, 4995 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4996 NULL, 4997 "invalid flow handle"); 4998 } 4999 ftype = flow->drv_type; 5000 MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); 5001 fops = flow_get_drv_ops(ftype); 5002 5003 return fops->query(dev, flow, actions, data, error); 5004 } 5005 5006 /** 5007 * Query a flow. 5008 * 5009 * @see rte_flow_query() 5010 * @see rte_flow_ops 5011 */ 5012 int 5013 mlx5_flow_query(struct rte_eth_dev *dev, 5014 struct rte_flow *flow, 5015 const struct rte_flow_action *actions, 5016 void *data, 5017 struct rte_flow_error *error) 5018 { 5019 int ret; 5020 5021 ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data, 5022 error); 5023 if (ret < 0) 5024 return ret; 5025 return 0; 5026 } 5027 5028 /** 5029 * Convert a flow director filter to a generic flow. 5030 * 5031 * @param dev 5032 * Pointer to Ethernet device. 5033 * @param fdir_filter 5034 * Flow director filter to add. 5035 * @param attributes 5036 * Generic flow parameters structure. 5037 * 5038 * @return 5039 * 0 on success, a negative errno value otherwise and rte_errno is set. 5040 */ 5041 static int 5042 flow_fdir_filter_convert(struct rte_eth_dev *dev, 5043 const struct rte_eth_fdir_filter *fdir_filter, 5044 struct mlx5_fdir *attributes) 5045 { 5046 struct mlx5_priv *priv = dev->data->dev_private; 5047 const struct rte_eth_fdir_input *input = &fdir_filter->input; 5048 const struct rte_eth_fdir_masks *mask = 5049 &dev->data->dev_conf.fdir_conf.mask; 5050 5051 /* Validate queue number. */ 5052 if (fdir_filter->action.rx_queue >= priv->rxqs_n) { 5053 DRV_LOG(ERR, "port %u invalid queue number %d", 5054 dev->data->port_id, fdir_filter->action.rx_queue); 5055 rte_errno = EINVAL; 5056 return -rte_errno; 5057 } 5058 attributes->attr.ingress = 1; 5059 attributes->items[0] = (struct rte_flow_item) { 5060 .type = RTE_FLOW_ITEM_TYPE_ETH, 5061 .spec = &attributes->l2, 5062 .mask = &attributes->l2_mask, 5063 }; 5064 switch (fdir_filter->action.behavior) { 5065 case RTE_ETH_FDIR_ACCEPT: 5066 attributes->actions[0] = (struct rte_flow_action){ 5067 .type = RTE_FLOW_ACTION_TYPE_QUEUE, 5068 .conf = &attributes->queue, 5069 }; 5070 break; 5071 case RTE_ETH_FDIR_REJECT: 5072 attributes->actions[0] = (struct rte_flow_action){ 5073 .type = RTE_FLOW_ACTION_TYPE_DROP, 5074 }; 5075 break; 5076 default: 5077 DRV_LOG(ERR, "port %u invalid behavior %d", 5078 dev->data->port_id, 5079 fdir_filter->action.behavior); 5080 rte_errno = ENOTSUP; 5081 return -rte_errno; 5082 } 5083 attributes->queue.index = fdir_filter->action.rx_queue; 5084 /* Handle L3. */ 5085 switch (fdir_filter->input.flow_type) { 5086 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 5087 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 5088 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 5089 attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){ 5090 .src_addr = input->flow.ip4_flow.src_ip, 5091 .dst_addr = input->flow.ip4_flow.dst_ip, 5092 .time_to_live = input->flow.ip4_flow.ttl, 5093 .type_of_service = input->flow.ip4_flow.tos, 5094 }; 5095 attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){ 5096 .src_addr = mask->ipv4_mask.src_ip, 5097 .dst_addr = mask->ipv4_mask.dst_ip, 5098 .time_to_live = mask->ipv4_mask.ttl, 5099 .type_of_service = mask->ipv4_mask.tos, 5100 .next_proto_id = mask->ipv4_mask.proto, 5101 }; 5102 attributes->items[1] = (struct rte_flow_item){ 5103 .type = RTE_FLOW_ITEM_TYPE_IPV4, 5104 .spec = &attributes->l3, 5105 .mask = &attributes->l3_mask, 5106 }; 5107 break; 5108 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 5109 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 5110 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 5111 attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){ 5112 .hop_limits = input->flow.ipv6_flow.hop_limits, 5113 .proto = input->flow.ipv6_flow.proto, 5114 }; 5115 5116 memcpy(attributes->l3.ipv6.hdr.src_addr, 5117 input->flow.ipv6_flow.src_ip, 5118 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 5119 memcpy(attributes->l3.ipv6.hdr.dst_addr, 5120 input->flow.ipv6_flow.dst_ip, 5121 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 5122 memcpy(attributes->l3_mask.ipv6.hdr.src_addr, 5123 mask->ipv6_mask.src_ip, 5124 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 5125 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr, 5126 mask->ipv6_mask.dst_ip, 5127 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 5128 attributes->items[1] = (struct rte_flow_item){ 5129 .type = RTE_FLOW_ITEM_TYPE_IPV6, 5130 .spec = &attributes->l3, 5131 .mask = &attributes->l3_mask, 5132 }; 5133 break; 5134 default: 5135 DRV_LOG(ERR, "port %u invalid flow type%d", 5136 dev->data->port_id, fdir_filter->input.flow_type); 5137 rte_errno = ENOTSUP; 5138 return -rte_errno; 5139 } 5140 /* Handle L4. */ 5141 switch (fdir_filter->input.flow_type) { 5142 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 5143 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 5144 .src_port = input->flow.udp4_flow.src_port, 5145 .dst_port = input->flow.udp4_flow.dst_port, 5146 }; 5147 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 5148 .src_port = mask->src_port_mask, 5149 .dst_port = mask->dst_port_mask, 5150 }; 5151 attributes->items[2] = (struct rte_flow_item){ 5152 .type = RTE_FLOW_ITEM_TYPE_UDP, 5153 .spec = &attributes->l4, 5154 .mask = &attributes->l4_mask, 5155 }; 5156 break; 5157 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 5158 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 5159 .src_port = input->flow.tcp4_flow.src_port, 5160 .dst_port = input->flow.tcp4_flow.dst_port, 5161 }; 5162 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 5163 .src_port = mask->src_port_mask, 5164 .dst_port = mask->dst_port_mask, 5165 }; 5166 attributes->items[2] = (struct rte_flow_item){ 5167 .type = RTE_FLOW_ITEM_TYPE_TCP, 5168 .spec = &attributes->l4, 5169 .mask = &attributes->l4_mask, 5170 }; 5171 break; 5172 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 5173 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 5174 .src_port = input->flow.udp6_flow.src_port, 5175 .dst_port = input->flow.udp6_flow.dst_port, 5176 }; 5177 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 5178 .src_port = mask->src_port_mask, 5179 .dst_port = mask->dst_port_mask, 5180 }; 5181 attributes->items[2] = (struct rte_flow_item){ 5182 .type = RTE_FLOW_ITEM_TYPE_UDP, 5183 .spec = &attributes->l4, 5184 .mask = &attributes->l4_mask, 5185 }; 5186 break; 5187 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 5188 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 5189 .src_port = input->flow.tcp6_flow.src_port, 5190 .dst_port = input->flow.tcp6_flow.dst_port, 5191 }; 5192 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 5193 .src_port = mask->src_port_mask, 5194 .dst_port = mask->dst_port_mask, 5195 }; 5196 attributes->items[2] = (struct rte_flow_item){ 5197 .type = RTE_FLOW_ITEM_TYPE_TCP, 5198 .spec = &attributes->l4, 5199 .mask = &attributes->l4_mask, 5200 }; 5201 break; 5202 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 5203 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 5204 break; 5205 default: 5206 DRV_LOG(ERR, "port %u invalid flow type%d", 5207 dev->data->port_id, fdir_filter->input.flow_type); 5208 rte_errno = ENOTSUP; 5209 return -rte_errno; 5210 } 5211 return 0; 5212 } 5213 5214 #define FLOW_FDIR_CMP(f1, f2, fld) \ 5215 memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld)) 5216 5217 /** 5218 * Compare two FDIR flows. If items and actions are identical, the two flows are 5219 * regarded as same. 5220 * 5221 * @param dev 5222 * Pointer to Ethernet device. 5223 * @param f1 5224 * FDIR flow to compare. 5225 * @param f2 5226 * FDIR flow to compare. 5227 * 5228 * @return 5229 * Zero on match, 1 otherwise. 5230 */ 5231 static int 5232 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) 5233 { 5234 if (FLOW_FDIR_CMP(f1, f2, attr) || 5235 FLOW_FDIR_CMP(f1, f2, l2) || 5236 FLOW_FDIR_CMP(f1, f2, l2_mask) || 5237 FLOW_FDIR_CMP(f1, f2, l3) || 5238 FLOW_FDIR_CMP(f1, f2, l3_mask) || 5239 FLOW_FDIR_CMP(f1, f2, l4) || 5240 FLOW_FDIR_CMP(f1, f2, l4_mask) || 5241 FLOW_FDIR_CMP(f1, f2, actions[0].type)) 5242 return 1; 5243 if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE && 5244 FLOW_FDIR_CMP(f1, f2, queue)) 5245 return 1; 5246 return 0; 5247 } 5248 5249 /** 5250 * Search device flow list to find out a matched FDIR flow. 5251 * 5252 * @param dev 5253 * Pointer to Ethernet device. 5254 * @param fdir_flow 5255 * FDIR flow to lookup. 5256 * 5257 * @return 5258 * Index of flow if found, 0 otherwise. 5259 */ 5260 static uint32_t 5261 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) 5262 { 5263 struct mlx5_priv *priv = dev->data->dev_private; 5264 uint32_t flow_idx = 0; 5265 struct mlx5_fdir_flow *priv_fdir_flow = NULL; 5266 5267 MLX5_ASSERT(fdir_flow); 5268 LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) { 5269 if (!flow_fdir_cmp(priv_fdir_flow->fdir, fdir_flow)) { 5270 DRV_LOG(DEBUG, "port %u found FDIR flow %u", 5271 dev->data->port_id, flow_idx); 5272 flow_idx = priv_fdir_flow->rix_flow; 5273 break; 5274 } 5275 } 5276 return flow_idx; 5277 } 5278 5279 /** 5280 * Add new flow director filter and store it in list. 5281 * 5282 * @param dev 5283 * Pointer to Ethernet device. 5284 * @param fdir_filter 5285 * Flow director filter to add. 5286 * 5287 * @return 5288 * 0 on success, a negative errno value otherwise and rte_errno is set. 5289 */ 5290 static int 5291 flow_fdir_filter_add(struct rte_eth_dev *dev, 5292 const struct rte_eth_fdir_filter *fdir_filter) 5293 { 5294 struct mlx5_priv *priv = dev->data->dev_private; 5295 struct mlx5_fdir *fdir_flow; 5296 struct rte_flow *flow; 5297 struct mlx5_fdir_flow *priv_fdir_flow = NULL; 5298 uint32_t flow_idx; 5299 int ret; 5300 5301 fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0); 5302 if (!fdir_flow) { 5303 rte_errno = ENOMEM; 5304 return -rte_errno; 5305 } 5306 ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow); 5307 if (ret) 5308 goto error; 5309 flow_idx = flow_fdir_filter_lookup(dev, fdir_flow); 5310 if (flow_idx) { 5311 rte_errno = EEXIST; 5312 goto error; 5313 } 5314 priv_fdir_flow = rte_zmalloc(__func__, sizeof(struct mlx5_fdir_flow), 5315 0); 5316 if (!priv_fdir_flow) { 5317 rte_errno = ENOMEM; 5318 goto error; 5319 } 5320 flow_idx = flow_list_create(dev, &priv->flows, &fdir_flow->attr, 5321 fdir_flow->items, fdir_flow->actions, true, 5322 NULL); 5323 flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx); 5324 if (!flow) 5325 goto error; 5326 flow->fdir = 1; 5327 priv_fdir_flow->fdir = fdir_flow; 5328 priv_fdir_flow->rix_flow = flow_idx; 5329 LIST_INSERT_HEAD(&priv->fdir_flows, priv_fdir_flow, next); 5330 DRV_LOG(DEBUG, "port %u created FDIR flow %p", 5331 dev->data->port_id, (void *)flow); 5332 return 0; 5333 error: 5334 rte_free(priv_fdir_flow); 5335 rte_free(fdir_flow); 5336 return -rte_errno; 5337 } 5338 5339 /** 5340 * Delete specific filter. 5341 * 5342 * @param dev 5343 * Pointer to Ethernet device. 5344 * @param fdir_filter 5345 * Filter to be deleted. 5346 * 5347 * @return 5348 * 0 on success, a negative errno value otherwise and rte_errno is set. 5349 */ 5350 static int 5351 flow_fdir_filter_delete(struct rte_eth_dev *dev, 5352 const struct rte_eth_fdir_filter *fdir_filter) 5353 { 5354 struct mlx5_priv *priv = dev->data->dev_private; 5355 uint32_t flow_idx; 5356 struct mlx5_fdir fdir_flow = { 5357 .attr.group = 0, 5358 }; 5359 struct mlx5_fdir_flow *priv_fdir_flow = NULL; 5360 int ret; 5361 5362 ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow); 5363 if (ret) 5364 return -rte_errno; 5365 LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) { 5366 /* Find the fdir in priv list */ 5367 if (!flow_fdir_cmp(priv_fdir_flow->fdir, &fdir_flow)) 5368 break; 5369 } 5370 if (!priv_fdir_flow) 5371 return 0; 5372 LIST_REMOVE(priv_fdir_flow, next); 5373 flow_idx = priv_fdir_flow->rix_flow; 5374 flow_list_destroy(dev, &priv->flows, flow_idx); 5375 rte_free(priv_fdir_flow->fdir); 5376 rte_free(priv_fdir_flow); 5377 DRV_LOG(DEBUG, "port %u deleted FDIR flow %u", 5378 dev->data->port_id, flow_idx); 5379 return 0; 5380 } 5381 5382 /** 5383 * Update queue for specific filter. 5384 * 5385 * @param dev 5386 * Pointer to Ethernet device. 5387 * @param fdir_filter 5388 * Filter to be updated. 5389 * 5390 * @return 5391 * 0 on success, a negative errno value otherwise and rte_errno is set. 5392 */ 5393 static int 5394 flow_fdir_filter_update(struct rte_eth_dev *dev, 5395 const struct rte_eth_fdir_filter *fdir_filter) 5396 { 5397 int ret; 5398 5399 ret = flow_fdir_filter_delete(dev, fdir_filter); 5400 if (ret) 5401 return ret; 5402 return flow_fdir_filter_add(dev, fdir_filter); 5403 } 5404 5405 /** 5406 * Flush all filters. 5407 * 5408 * @param dev 5409 * Pointer to Ethernet device. 5410 */ 5411 static void 5412 flow_fdir_filter_flush(struct rte_eth_dev *dev) 5413 { 5414 struct mlx5_priv *priv = dev->data->dev_private; 5415 struct mlx5_fdir_flow *priv_fdir_flow = NULL; 5416 5417 while (!LIST_EMPTY(&priv->fdir_flows)) { 5418 priv_fdir_flow = LIST_FIRST(&priv->fdir_flows); 5419 LIST_REMOVE(priv_fdir_flow, next); 5420 flow_list_destroy(dev, &priv->flows, priv_fdir_flow->rix_flow); 5421 rte_free(priv_fdir_flow->fdir); 5422 rte_free(priv_fdir_flow); 5423 } 5424 } 5425 5426 /** 5427 * Get flow director information. 5428 * 5429 * @param dev 5430 * Pointer to Ethernet device. 5431 * @param[out] fdir_info 5432 * Resulting flow director information. 5433 */ 5434 static void 5435 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) 5436 { 5437 struct rte_eth_fdir_masks *mask = 5438 &dev->data->dev_conf.fdir_conf.mask; 5439 5440 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; 5441 fdir_info->guarant_spc = 0; 5442 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); 5443 fdir_info->max_flexpayload = 0; 5444 fdir_info->flow_types_mask[0] = 0; 5445 fdir_info->flex_payload_unit = 0; 5446 fdir_info->max_flex_payload_segment_num = 0; 5447 fdir_info->flex_payload_limit = 0; 5448 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf)); 5449 } 5450 5451 /** 5452 * Deal with flow director operations. 5453 * 5454 * @param dev 5455 * Pointer to Ethernet device. 5456 * @param filter_op 5457 * Operation to perform. 5458 * @param arg 5459 * Pointer to operation-specific structure. 5460 * 5461 * @return 5462 * 0 on success, a negative errno value otherwise and rte_errno is set. 5463 */ 5464 static int 5465 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, 5466 void *arg) 5467 { 5468 enum rte_fdir_mode fdir_mode = 5469 dev->data->dev_conf.fdir_conf.mode; 5470 5471 if (filter_op == RTE_ETH_FILTER_NOP) 5472 return 0; 5473 if (fdir_mode != RTE_FDIR_MODE_PERFECT && 5474 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 5475 DRV_LOG(ERR, "port %u flow director mode %d not supported", 5476 dev->data->port_id, fdir_mode); 5477 rte_errno = EINVAL; 5478 return -rte_errno; 5479 } 5480 switch (filter_op) { 5481 case RTE_ETH_FILTER_ADD: 5482 return flow_fdir_filter_add(dev, arg); 5483 case RTE_ETH_FILTER_UPDATE: 5484 return flow_fdir_filter_update(dev, arg); 5485 case RTE_ETH_FILTER_DELETE: 5486 return flow_fdir_filter_delete(dev, arg); 5487 case RTE_ETH_FILTER_FLUSH: 5488 flow_fdir_filter_flush(dev); 5489 break; 5490 case RTE_ETH_FILTER_INFO: 5491 flow_fdir_info_get(dev, arg); 5492 break; 5493 default: 5494 DRV_LOG(DEBUG, "port %u unknown operation %u", 5495 dev->data->port_id, filter_op); 5496 rte_errno = EINVAL; 5497 return -rte_errno; 5498 } 5499 return 0; 5500 } 5501 5502 /** 5503 * Manage filter operations. 5504 * 5505 * @param dev 5506 * Pointer to Ethernet device structure. 5507 * @param filter_type 5508 * Filter type. 5509 * @param filter_op 5510 * Operation to perform. 5511 * @param arg 5512 * Pointer to operation-specific structure. 5513 * 5514 * @return 5515 * 0 on success, a negative errno value otherwise and rte_errno is set. 5516 */ 5517 int 5518 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, 5519 enum rte_filter_type filter_type, 5520 enum rte_filter_op filter_op, 5521 void *arg) 5522 { 5523 switch (filter_type) { 5524 case RTE_ETH_FILTER_GENERIC: 5525 if (filter_op != RTE_ETH_FILTER_GET) { 5526 rte_errno = EINVAL; 5527 return -rte_errno; 5528 } 5529 *(const void **)arg = &mlx5_flow_ops; 5530 return 0; 5531 case RTE_ETH_FILTER_FDIR: 5532 return flow_fdir_ctrl_func(dev, filter_op, arg); 5533 default: 5534 DRV_LOG(ERR, "port %u filter type (%d) not supported", 5535 dev->data->port_id, filter_type); 5536 rte_errno = ENOTSUP; 5537 return -rte_errno; 5538 } 5539 return 0; 5540 } 5541 5542 /** 5543 * Create the needed meter and suffix tables. 5544 * 5545 * @param[in] dev 5546 * Pointer to Ethernet device. 5547 * @param[in] fm 5548 * Pointer to the flow meter. 5549 * 5550 * @return 5551 * Pointer to table set on success, NULL otherwise. 5552 */ 5553 struct mlx5_meter_domains_infos * 5554 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev, 5555 const struct mlx5_flow_meter *fm) 5556 { 5557 const struct mlx5_flow_driver_ops *fops; 5558 5559 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5560 return fops->create_mtr_tbls(dev, fm); 5561 } 5562 5563 /** 5564 * Destroy the meter table set. 5565 * 5566 * @param[in] dev 5567 * Pointer to Ethernet device. 5568 * @param[in] tbl 5569 * Pointer to the meter table set. 5570 * 5571 * @return 5572 * 0 on success. 5573 */ 5574 int 5575 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, 5576 struct mlx5_meter_domains_infos *tbls) 5577 { 5578 const struct mlx5_flow_driver_ops *fops; 5579 5580 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5581 return fops->destroy_mtr_tbls(dev, tbls); 5582 } 5583 5584 /** 5585 * Create policer rules. 5586 * 5587 * @param[in] dev 5588 * Pointer to Ethernet device. 5589 * @param[in] fm 5590 * Pointer to flow meter structure. 5591 * @param[in] attr 5592 * Pointer to flow attributes. 5593 * 5594 * @return 5595 * 0 on success, -1 otherwise. 5596 */ 5597 int 5598 mlx5_flow_create_policer_rules(struct rte_eth_dev *dev, 5599 struct mlx5_flow_meter *fm, 5600 const struct rte_flow_attr *attr) 5601 { 5602 const struct mlx5_flow_driver_ops *fops; 5603 5604 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5605 return fops->create_policer_rules(dev, fm, attr); 5606 } 5607 5608 /** 5609 * Destroy policer rules. 5610 * 5611 * @param[in] fm 5612 * Pointer to flow meter structure. 5613 * @param[in] attr 5614 * Pointer to flow attributes. 5615 * 5616 * @return 5617 * 0 on success, -1 otherwise. 5618 */ 5619 int 5620 mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev, 5621 struct mlx5_flow_meter *fm, 5622 const struct rte_flow_attr *attr) 5623 { 5624 const struct mlx5_flow_driver_ops *fops; 5625 5626 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5627 return fops->destroy_policer_rules(dev, fm, attr); 5628 } 5629 5630 /** 5631 * Allocate a counter. 5632 * 5633 * @param[in] dev 5634 * Pointer to Ethernet device structure. 5635 * 5636 * @return 5637 * Index to allocated counter on success, 0 otherwise. 5638 */ 5639 uint32_t 5640 mlx5_counter_alloc(struct rte_eth_dev *dev) 5641 { 5642 const struct mlx5_flow_driver_ops *fops; 5643 struct rte_flow_attr attr = { .transfer = 0 }; 5644 5645 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5646 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5647 return fops->counter_alloc(dev); 5648 } 5649 DRV_LOG(ERR, 5650 "port %u counter allocate is not supported.", 5651 dev->data->port_id); 5652 return 0; 5653 } 5654 5655 /** 5656 * Free a counter. 5657 * 5658 * @param[in] dev 5659 * Pointer to Ethernet device structure. 5660 * @param[in] cnt 5661 * Index to counter to be free. 5662 */ 5663 void 5664 mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt) 5665 { 5666 const struct mlx5_flow_driver_ops *fops; 5667 struct rte_flow_attr attr = { .transfer = 0 }; 5668 5669 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5670 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5671 fops->counter_free(dev, cnt); 5672 return; 5673 } 5674 DRV_LOG(ERR, 5675 "port %u counter free is not supported.", 5676 dev->data->port_id); 5677 } 5678 5679 /** 5680 * Query counter statistics. 5681 * 5682 * @param[in] dev 5683 * Pointer to Ethernet device structure. 5684 * @param[in] cnt 5685 * Index to counter to query. 5686 * @param[in] clear 5687 * Set to clear counter statistics. 5688 * @param[out] pkts 5689 * The counter hits packets number to save. 5690 * @param[out] bytes 5691 * The counter hits bytes number to save. 5692 * 5693 * @return 5694 * 0 on success, a negative errno value otherwise. 5695 */ 5696 int 5697 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, 5698 bool clear, uint64_t *pkts, uint64_t *bytes) 5699 { 5700 const struct mlx5_flow_driver_ops *fops; 5701 struct rte_flow_attr attr = { .transfer = 0 }; 5702 5703 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5704 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5705 return fops->counter_query(dev, cnt, clear, pkts, bytes); 5706 } 5707 DRV_LOG(ERR, 5708 "port %u counter query is not supported.", 5709 dev->data->port_id); 5710 return -ENOTSUP; 5711 } 5712 5713 #define MLX5_POOL_QUERY_FREQ_US 1000000 5714 5715 /** 5716 * Set the periodic procedure for triggering asynchronous batch queries for all 5717 * the counter pools. 5718 * 5719 * @param[in] sh 5720 * Pointer to mlx5_ibv_shared object. 5721 */ 5722 void 5723 mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) 5724 { 5725 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0); 5726 uint32_t pools_n = rte_atomic16_read(&cont->n_valid); 5727 uint32_t us; 5728 5729 cont = MLX5_CNT_CONTAINER(sh, 1, 0); 5730 pools_n += rte_atomic16_read(&cont->n_valid); 5731 us = MLX5_POOL_QUERY_FREQ_US / pools_n; 5732 DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us); 5733 if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { 5734 sh->cmng.query_thread_on = 0; 5735 DRV_LOG(ERR, "Cannot reinitialize query alarm"); 5736 } else { 5737 sh->cmng.query_thread_on = 1; 5738 } 5739 } 5740 5741 /** 5742 * The periodic procedure for triggering asynchronous batch queries for all the 5743 * counter pools. This function is probably called by the host thread. 5744 * 5745 * @param[in] arg 5746 * The parameter for the alarm process. 5747 */ 5748 void 5749 mlx5_flow_query_alarm(void *arg) 5750 { 5751 struct mlx5_ibv_shared *sh = arg; 5752 struct mlx5_devx_obj *dcs; 5753 uint16_t offset; 5754 int ret; 5755 uint8_t batch = sh->cmng.batch; 5756 uint16_t pool_index = sh->cmng.pool_index; 5757 struct mlx5_pools_container *cont; 5758 struct mlx5_pools_container *mcont; 5759 struct mlx5_flow_counter_pool *pool; 5760 5761 if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) 5762 goto set_alarm; 5763 next_container: 5764 cont = MLX5_CNT_CONTAINER(sh, batch, 1); 5765 mcont = MLX5_CNT_CONTAINER(sh, batch, 0); 5766 /* Check if resize was done and need to flip a container. */ 5767 if (cont != mcont) { 5768 if (cont->pools) { 5769 /* Clean the old container. */ 5770 rte_free(cont->pools); 5771 memset(cont, 0, sizeof(*cont)); 5772 } 5773 rte_cio_wmb(); 5774 /* Flip the host container. */ 5775 sh->cmng.mhi[batch] ^= (uint8_t)2; 5776 cont = mcont; 5777 } 5778 if (!cont->pools) { 5779 /* 2 empty containers case is unexpected. */ 5780 if (unlikely(batch != sh->cmng.batch)) 5781 goto set_alarm; 5782 batch ^= 0x1; 5783 pool_index = 0; 5784 goto next_container; 5785 } 5786 pool = cont->pools[pool_index]; 5787 if (pool->raw_hw) 5788 /* There is a pool query in progress. */ 5789 goto set_alarm; 5790 pool->raw_hw = 5791 LIST_FIRST(&sh->cmng.free_stat_raws); 5792 if (!pool->raw_hw) 5793 /* No free counter statistics raw memory. */ 5794 goto set_alarm; 5795 dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read 5796 (&pool->a64_dcs); 5797 offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; 5798 /* 5799 * Identify the counters released between query trigger and query 5800 * handle more effiecntly. The counter released in this gap period 5801 * should wait for a new round of query as the new arrived packets 5802 * will not be taken into account. 5803 */ 5804 rte_atomic64_add(&pool->start_query_gen, 1); 5805 ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - 5806 offset, NULL, NULL, 5807 pool->raw_hw->mem_mng->dm->id, 5808 (void *)(uintptr_t) 5809 (pool->raw_hw->data + offset), 5810 sh->devx_comp, 5811 (uint64_t)(uintptr_t)pool); 5812 if (ret) { 5813 rte_atomic64_sub(&pool->start_query_gen, 1); 5814 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID" 5815 " %d", pool->min_dcs->id); 5816 pool->raw_hw = NULL; 5817 goto set_alarm; 5818 } 5819 pool->raw_hw->min_dcs_id = dcs->id; 5820 LIST_REMOVE(pool->raw_hw, next); 5821 sh->cmng.pending_queries++; 5822 pool_index++; 5823 if (pool_index >= rte_atomic16_read(&cont->n_valid)) { 5824 batch ^= 0x1; 5825 pool_index = 0; 5826 } 5827 set_alarm: 5828 sh->cmng.batch = batch; 5829 sh->cmng.pool_index = pool_index; 5830 mlx5_set_query_alarm(sh); 5831 } 5832 5833 /** 5834 * Handler for the HW respond about ready values from an asynchronous batch 5835 * query. This function is probably called by the host thread. 5836 * 5837 * @param[in] sh 5838 * The pointer to the shared IB device context. 5839 * @param[in] async_id 5840 * The Devx async ID. 5841 * @param[in] status 5842 * The status of the completion. 5843 */ 5844 void 5845 mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, 5846 uint64_t async_id, int status) 5847 { 5848 struct mlx5_flow_counter_pool *pool = 5849 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; 5850 struct mlx5_counter_stats_raw *raw_to_free; 5851 5852 if (unlikely(status)) { 5853 rte_atomic64_sub(&pool->start_query_gen, 1); 5854 raw_to_free = pool->raw_hw; 5855 } else { 5856 raw_to_free = pool->raw; 5857 rte_spinlock_lock(&pool->sl); 5858 pool->raw = pool->raw_hw; 5859 rte_spinlock_unlock(&pool->sl); 5860 MLX5_ASSERT(rte_atomic64_read(&pool->end_query_gen) + 1 == 5861 rte_atomic64_read(&pool->start_query_gen)); 5862 rte_atomic64_set(&pool->end_query_gen, 5863 rte_atomic64_read(&pool->start_query_gen)); 5864 /* Be sure the new raw counters data is updated in memory. */ 5865 rte_cio_wmb(); 5866 } 5867 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next); 5868 pool->raw_hw = NULL; 5869 sh->cmng.pending_queries--; 5870 } 5871 5872 /** 5873 * Translate the rte_flow group index to HW table value. 5874 * 5875 * @param[in] attributes 5876 * Pointer to flow attributes 5877 * @param[in] external 5878 * Value is part of flow rule created by request external to PMD. 5879 * @param[in] group 5880 * rte_flow group index value. 5881 * @param[out] fdb_def_rule 5882 * Whether fdb jump to table 1 is configured. 5883 * @param[out] table 5884 * HW table value. 5885 * @param[out] error 5886 * Pointer to error structure. 5887 * 5888 * @return 5889 * 0 on success, a negative errno value otherwise and rte_errno is set. 5890 */ 5891 int 5892 mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external, 5893 uint32_t group, bool fdb_def_rule, uint32_t *table, 5894 struct rte_flow_error *error) 5895 { 5896 if (attributes->transfer && external && fdb_def_rule) { 5897 if (group == UINT32_MAX) 5898 return rte_flow_error_set 5899 (error, EINVAL, 5900 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 5901 NULL, 5902 "group index not supported"); 5903 *table = group + 1; 5904 } else { 5905 *table = group; 5906 } 5907 return 0; 5908 } 5909 5910 /** 5911 * Discover availability of metadata reg_c's. 5912 * 5913 * Iteratively use test flows to check availability. 5914 * 5915 * @param[in] dev 5916 * Pointer to the Ethernet device structure. 5917 * 5918 * @return 5919 * 0 on success, a negative errno value otherwise and rte_errno is set. 5920 */ 5921 int 5922 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) 5923 { 5924 struct mlx5_priv *priv = dev->data->dev_private; 5925 struct mlx5_dev_config *config = &priv->config; 5926 enum modify_reg idx; 5927 int n = 0; 5928 5929 /* reg_c[0] and reg_c[1] are reserved. */ 5930 config->flow_mreg_c[n++] = REG_C_0; 5931 config->flow_mreg_c[n++] = REG_C_1; 5932 /* Discover availability of other reg_c's. */ 5933 for (idx = REG_C_2; idx <= REG_C_7; ++idx) { 5934 struct rte_flow_attr attr = { 5935 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 5936 .priority = MLX5_FLOW_PRIO_RSVD, 5937 .ingress = 1, 5938 }; 5939 struct rte_flow_item items[] = { 5940 [0] = { 5941 .type = RTE_FLOW_ITEM_TYPE_END, 5942 }, 5943 }; 5944 struct rte_flow_action actions[] = { 5945 [0] = { 5946 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 5947 .conf = &(struct mlx5_flow_action_copy_mreg){ 5948 .src = REG_C_1, 5949 .dst = idx, 5950 }, 5951 }, 5952 [1] = { 5953 .type = RTE_FLOW_ACTION_TYPE_JUMP, 5954 .conf = &(struct rte_flow_action_jump){ 5955 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 5956 }, 5957 }, 5958 [2] = { 5959 .type = RTE_FLOW_ACTION_TYPE_END, 5960 }, 5961 }; 5962 uint32_t flow_idx; 5963 struct rte_flow *flow; 5964 struct rte_flow_error error; 5965 5966 if (!config->dv_flow_en) 5967 break; 5968 /* Create internal flow, validation skips copy action. */ 5969 flow_idx = flow_list_create(dev, NULL, &attr, items, 5970 actions, false, &error); 5971 flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], 5972 flow_idx); 5973 if (!flow) 5974 continue; 5975 if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL)) 5976 config->flow_mreg_c[n++] = idx; 5977 flow_list_destroy(dev, NULL, flow_idx); 5978 } 5979 for (; n < MLX5_MREG_C_NUM; ++n) 5980 config->flow_mreg_c[n] = REG_NONE; 5981 return 0; 5982 } 5983 5984 /** 5985 * Dump flow raw hw data to file 5986 * 5987 * @param[in] dev 5988 * The pointer to Ethernet device. 5989 * @param[in] file 5990 * A pointer to a file for output. 5991 * @param[out] error 5992 * Perform verbose error reporting if not NULL. PMDs initialize this 5993 * structure in case of error only. 5994 * @return 5995 * 0 on success, a nagative value otherwise. 5996 */ 5997 int 5998 mlx5_flow_dev_dump(struct rte_eth_dev *dev, 5999 FILE *file, 6000 struct rte_flow_error *error __rte_unused) 6001 { 6002 struct mlx5_priv *priv = dev->data->dev_private; 6003 struct mlx5_ibv_shared *sh = priv->sh; 6004 6005 return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain, 6006 sh->tx_domain, file); 6007 } 6008