1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <netinet/in.h> 7 #include <sys/queue.h> 8 #include <stdalign.h> 9 #include <stdint.h> 10 #include <string.h> 11 #include <stdbool.h> 12 13 /* Verbs header. */ 14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 15 #ifdef PEDANTIC 16 #pragma GCC diagnostic ignored "-Wpedantic" 17 #endif 18 #include <infiniband/verbs.h> 19 #ifdef PEDANTIC 20 #pragma GCC diagnostic error "-Wpedantic" 21 #endif 22 23 #include <rte_common.h> 24 #include <rte_ether.h> 25 #include <rte_ethdev_driver.h> 26 #include <rte_flow.h> 27 #include <rte_flow_driver.h> 28 #include <rte_malloc.h> 29 #include <rte_ip.h> 30 31 #include <mlx5_glue.h> 32 #include <mlx5_devx_cmds.h> 33 #include <mlx5_prm.h> 34 35 #include "mlx5_defs.h" 36 #include "mlx5.h" 37 #include "mlx5_flow.h" 38 #include "mlx5_rxtx.h" 39 40 /* Dev ops structure defined in mlx5.c */ 41 extern const struct eth_dev_ops mlx5_dev_ops; 42 extern const struct eth_dev_ops mlx5_dev_ops_isolate; 43 44 /** Device flow drivers. */ 45 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 46 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; 47 #endif 48 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; 49 50 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; 51 52 const struct mlx5_flow_driver_ops *flow_drv_ops[] = { 53 [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, 54 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 55 [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, 56 #endif 57 [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, 58 [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops 59 }; 60 61 enum mlx5_expansion { 62 MLX5_EXPANSION_ROOT, 63 MLX5_EXPANSION_ROOT_OUTER, 64 MLX5_EXPANSION_ROOT_ETH_VLAN, 65 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, 66 MLX5_EXPANSION_OUTER_ETH, 67 MLX5_EXPANSION_OUTER_ETH_VLAN, 68 MLX5_EXPANSION_OUTER_VLAN, 69 MLX5_EXPANSION_OUTER_IPV4, 70 MLX5_EXPANSION_OUTER_IPV4_UDP, 71 MLX5_EXPANSION_OUTER_IPV4_TCP, 72 MLX5_EXPANSION_OUTER_IPV6, 73 MLX5_EXPANSION_OUTER_IPV6_UDP, 74 MLX5_EXPANSION_OUTER_IPV6_TCP, 75 MLX5_EXPANSION_VXLAN, 76 MLX5_EXPANSION_VXLAN_GPE, 77 MLX5_EXPANSION_GRE, 78 MLX5_EXPANSION_MPLS, 79 MLX5_EXPANSION_ETH, 80 MLX5_EXPANSION_ETH_VLAN, 81 MLX5_EXPANSION_VLAN, 82 MLX5_EXPANSION_IPV4, 83 MLX5_EXPANSION_IPV4_UDP, 84 MLX5_EXPANSION_IPV4_TCP, 85 MLX5_EXPANSION_IPV6, 86 MLX5_EXPANSION_IPV6_UDP, 87 MLX5_EXPANSION_IPV6_TCP, 88 }; 89 90 /** Supported expansion of items. */ 91 static const struct rte_flow_expand_node mlx5_support_expansion[] = { 92 [MLX5_EXPANSION_ROOT] = { 93 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 94 MLX5_EXPANSION_IPV4, 95 MLX5_EXPANSION_IPV6), 96 .type = RTE_FLOW_ITEM_TYPE_END, 97 }, 98 [MLX5_EXPANSION_ROOT_OUTER] = { 99 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, 100 MLX5_EXPANSION_OUTER_IPV4, 101 MLX5_EXPANSION_OUTER_IPV6), 102 .type = RTE_FLOW_ITEM_TYPE_END, 103 }, 104 [MLX5_EXPANSION_ROOT_ETH_VLAN] = { 105 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), 106 .type = RTE_FLOW_ITEM_TYPE_END, 107 }, 108 [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { 109 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), 110 .type = RTE_FLOW_ITEM_TYPE_END, 111 }, 112 [MLX5_EXPANSION_OUTER_ETH] = { 113 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 114 MLX5_EXPANSION_OUTER_IPV6, 115 MLX5_EXPANSION_MPLS), 116 .type = RTE_FLOW_ITEM_TYPE_ETH, 117 .rss_types = 0, 118 }, 119 [MLX5_EXPANSION_OUTER_ETH_VLAN] = { 120 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), 121 .type = RTE_FLOW_ITEM_TYPE_ETH, 122 .rss_types = 0, 123 }, 124 [MLX5_EXPANSION_OUTER_VLAN] = { 125 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 126 MLX5_EXPANSION_OUTER_IPV6), 127 .type = RTE_FLOW_ITEM_TYPE_VLAN, 128 }, 129 [MLX5_EXPANSION_OUTER_IPV4] = { 130 .next = RTE_FLOW_EXPAND_RSS_NEXT 131 (MLX5_EXPANSION_OUTER_IPV4_UDP, 132 MLX5_EXPANSION_OUTER_IPV4_TCP, 133 MLX5_EXPANSION_GRE, 134 MLX5_EXPANSION_IPV4, 135 MLX5_EXPANSION_IPV6), 136 .type = RTE_FLOW_ITEM_TYPE_IPV4, 137 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 138 ETH_RSS_NONFRAG_IPV4_OTHER, 139 }, 140 [MLX5_EXPANSION_OUTER_IPV4_UDP] = { 141 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 142 MLX5_EXPANSION_VXLAN_GPE), 143 .type = RTE_FLOW_ITEM_TYPE_UDP, 144 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 145 }, 146 [MLX5_EXPANSION_OUTER_IPV4_TCP] = { 147 .type = RTE_FLOW_ITEM_TYPE_TCP, 148 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 149 }, 150 [MLX5_EXPANSION_OUTER_IPV6] = { 151 .next = RTE_FLOW_EXPAND_RSS_NEXT 152 (MLX5_EXPANSION_OUTER_IPV6_UDP, 153 MLX5_EXPANSION_OUTER_IPV6_TCP, 154 MLX5_EXPANSION_IPV4, 155 MLX5_EXPANSION_IPV6), 156 .type = RTE_FLOW_ITEM_TYPE_IPV6, 157 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 158 ETH_RSS_NONFRAG_IPV6_OTHER, 159 }, 160 [MLX5_EXPANSION_OUTER_IPV6_UDP] = { 161 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 162 MLX5_EXPANSION_VXLAN_GPE), 163 .type = RTE_FLOW_ITEM_TYPE_UDP, 164 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 165 }, 166 [MLX5_EXPANSION_OUTER_IPV6_TCP] = { 167 .type = RTE_FLOW_ITEM_TYPE_TCP, 168 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 169 }, 170 [MLX5_EXPANSION_VXLAN] = { 171 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 172 MLX5_EXPANSION_IPV4, 173 MLX5_EXPANSION_IPV6), 174 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 175 }, 176 [MLX5_EXPANSION_VXLAN_GPE] = { 177 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 178 MLX5_EXPANSION_IPV4, 179 MLX5_EXPANSION_IPV6), 180 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, 181 }, 182 [MLX5_EXPANSION_GRE] = { 183 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), 184 .type = RTE_FLOW_ITEM_TYPE_GRE, 185 }, 186 [MLX5_EXPANSION_MPLS] = { 187 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 188 MLX5_EXPANSION_IPV6), 189 .type = RTE_FLOW_ITEM_TYPE_MPLS, 190 }, 191 [MLX5_EXPANSION_ETH] = { 192 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 193 MLX5_EXPANSION_IPV6), 194 .type = RTE_FLOW_ITEM_TYPE_ETH, 195 }, 196 [MLX5_EXPANSION_ETH_VLAN] = { 197 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), 198 .type = RTE_FLOW_ITEM_TYPE_ETH, 199 }, 200 [MLX5_EXPANSION_VLAN] = { 201 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 202 MLX5_EXPANSION_IPV6), 203 .type = RTE_FLOW_ITEM_TYPE_VLAN, 204 }, 205 [MLX5_EXPANSION_IPV4] = { 206 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, 207 MLX5_EXPANSION_IPV4_TCP), 208 .type = RTE_FLOW_ITEM_TYPE_IPV4, 209 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 210 ETH_RSS_NONFRAG_IPV4_OTHER, 211 }, 212 [MLX5_EXPANSION_IPV4_UDP] = { 213 .type = RTE_FLOW_ITEM_TYPE_UDP, 214 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 215 }, 216 [MLX5_EXPANSION_IPV4_TCP] = { 217 .type = RTE_FLOW_ITEM_TYPE_TCP, 218 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 219 }, 220 [MLX5_EXPANSION_IPV6] = { 221 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, 222 MLX5_EXPANSION_IPV6_TCP), 223 .type = RTE_FLOW_ITEM_TYPE_IPV6, 224 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 225 ETH_RSS_NONFRAG_IPV6_OTHER, 226 }, 227 [MLX5_EXPANSION_IPV6_UDP] = { 228 .type = RTE_FLOW_ITEM_TYPE_UDP, 229 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 230 }, 231 [MLX5_EXPANSION_IPV6_TCP] = { 232 .type = RTE_FLOW_ITEM_TYPE_TCP, 233 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 234 }, 235 }; 236 237 static const struct rte_flow_ops mlx5_flow_ops = { 238 .validate = mlx5_flow_validate, 239 .create = mlx5_flow_create, 240 .destroy = mlx5_flow_destroy, 241 .flush = mlx5_flow_flush, 242 .isolate = mlx5_flow_isolate, 243 .query = mlx5_flow_query, 244 .dev_dump = mlx5_flow_dev_dump, 245 }; 246 247 /* Convert FDIR request to Generic flow. */ 248 struct mlx5_fdir { 249 struct rte_flow_attr attr; 250 struct rte_flow_item items[4]; 251 struct rte_flow_item_eth l2; 252 struct rte_flow_item_eth l2_mask; 253 union { 254 struct rte_flow_item_ipv4 ipv4; 255 struct rte_flow_item_ipv6 ipv6; 256 } l3; 257 union { 258 struct rte_flow_item_ipv4 ipv4; 259 struct rte_flow_item_ipv6 ipv6; 260 } l3_mask; 261 union { 262 struct rte_flow_item_udp udp; 263 struct rte_flow_item_tcp tcp; 264 } l4; 265 union { 266 struct rte_flow_item_udp udp; 267 struct rte_flow_item_tcp tcp; 268 } l4_mask; 269 struct rte_flow_action actions[2]; 270 struct rte_flow_action_queue queue; 271 }; 272 273 /* Map of Verbs to Flow priority with 8 Verbs priorities. */ 274 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { 275 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, 276 }; 277 278 /* Map of Verbs to Flow priority with 16 Verbs priorities. */ 279 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { 280 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, 281 { 9, 10, 11 }, { 12, 13, 14 }, 282 }; 283 284 /* Tunnel information. */ 285 struct mlx5_flow_tunnel_info { 286 uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ 287 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ 288 }; 289 290 static struct mlx5_flow_tunnel_info tunnels_info[] = { 291 { 292 .tunnel = MLX5_FLOW_LAYER_VXLAN, 293 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, 294 }, 295 { 296 .tunnel = MLX5_FLOW_LAYER_GENEVE, 297 .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP, 298 }, 299 { 300 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, 301 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, 302 }, 303 { 304 .tunnel = MLX5_FLOW_LAYER_GRE, 305 .ptype = RTE_PTYPE_TUNNEL_GRE, 306 }, 307 { 308 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, 309 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, 310 }, 311 { 312 .tunnel = MLX5_FLOW_LAYER_MPLS, 313 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, 314 }, 315 { 316 .tunnel = MLX5_FLOW_LAYER_NVGRE, 317 .ptype = RTE_PTYPE_TUNNEL_NVGRE, 318 }, 319 { 320 .tunnel = MLX5_FLOW_LAYER_IPIP, 321 .ptype = RTE_PTYPE_TUNNEL_IP, 322 }, 323 { 324 .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP, 325 .ptype = RTE_PTYPE_TUNNEL_IP, 326 }, 327 { 328 .tunnel = MLX5_FLOW_LAYER_GTP, 329 .ptype = RTE_PTYPE_TUNNEL_GTPU, 330 }, 331 }; 332 333 /** 334 * Translate tag ID to register. 335 * 336 * @param[in] dev 337 * Pointer to the Ethernet device structure. 338 * @param[in] feature 339 * The feature that request the register. 340 * @param[in] id 341 * The request register ID. 342 * @param[out] error 343 * Error description in case of any. 344 * 345 * @return 346 * The request register on success, a negative errno 347 * value otherwise and rte_errno is set. 348 */ 349 int 350 mlx5_flow_get_reg_id(struct rte_eth_dev *dev, 351 enum mlx5_feature_name feature, 352 uint32_t id, 353 struct rte_flow_error *error) 354 { 355 struct mlx5_priv *priv = dev->data->dev_private; 356 struct mlx5_dev_config *config = &priv->config; 357 enum modify_reg start_reg; 358 bool skip_mtr_reg = false; 359 360 switch (feature) { 361 case MLX5_HAIRPIN_RX: 362 return REG_B; 363 case MLX5_HAIRPIN_TX: 364 return REG_A; 365 case MLX5_METADATA_RX: 366 switch (config->dv_xmeta_en) { 367 case MLX5_XMETA_MODE_LEGACY: 368 return REG_B; 369 case MLX5_XMETA_MODE_META16: 370 return REG_C_0; 371 case MLX5_XMETA_MODE_META32: 372 return REG_C_1; 373 } 374 break; 375 case MLX5_METADATA_TX: 376 return REG_A; 377 case MLX5_METADATA_FDB: 378 switch (config->dv_xmeta_en) { 379 case MLX5_XMETA_MODE_LEGACY: 380 return REG_NONE; 381 case MLX5_XMETA_MODE_META16: 382 return REG_C_0; 383 case MLX5_XMETA_MODE_META32: 384 return REG_C_1; 385 } 386 break; 387 case MLX5_FLOW_MARK: 388 switch (config->dv_xmeta_en) { 389 case MLX5_XMETA_MODE_LEGACY: 390 return REG_NONE; 391 case MLX5_XMETA_MODE_META16: 392 return REG_C_1; 393 case MLX5_XMETA_MODE_META32: 394 return REG_C_0; 395 } 396 break; 397 case MLX5_MTR_SFX: 398 /* 399 * If meter color and flow match share one register, flow match 400 * should use the meter color register for match. 401 */ 402 if (priv->mtr_reg_share) 403 return priv->mtr_color_reg; 404 else 405 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : 406 REG_C_3; 407 case MLX5_MTR_COLOR: 408 MLX5_ASSERT(priv->mtr_color_reg != REG_NONE); 409 return priv->mtr_color_reg; 410 case MLX5_COPY_MARK: 411 /* 412 * Metadata COPY_MARK register using is in meter suffix sub 413 * flow while with meter. It's safe to share the same register. 414 */ 415 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3; 416 case MLX5_APP_TAG: 417 /* 418 * If meter is enable, it will engage the register for color 419 * match and flow match. If meter color match is not using the 420 * REG_C_2, need to skip the REG_C_x be used by meter color 421 * match. 422 * If meter is disable, free to use all available registers. 423 */ 424 start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 : 425 (priv->mtr_reg_share ? REG_C_3 : REG_C_4); 426 skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2); 427 if (id > (REG_C_7 - start_reg)) 428 return rte_flow_error_set(error, EINVAL, 429 RTE_FLOW_ERROR_TYPE_ITEM, 430 NULL, "invalid tag id"); 431 if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NONE) 432 return rte_flow_error_set(error, ENOTSUP, 433 RTE_FLOW_ERROR_TYPE_ITEM, 434 NULL, "unsupported tag id"); 435 /* 436 * This case means meter is using the REG_C_x great than 2. 437 * Take care not to conflict with meter color REG_C_x. 438 * If the available index REG_C_y >= REG_C_x, skip the 439 * color register. 440 */ 441 if (skip_mtr_reg && config->flow_mreg_c 442 [id + start_reg - REG_C_0] >= priv->mtr_color_reg) { 443 if (config->flow_mreg_c 444 [id + 1 + start_reg - REG_C_0] != REG_NONE) 445 return config->flow_mreg_c 446 [id + 1 + start_reg - REG_C_0]; 447 return rte_flow_error_set(error, ENOTSUP, 448 RTE_FLOW_ERROR_TYPE_ITEM, 449 NULL, "unsupported tag id"); 450 } 451 return config->flow_mreg_c[id + start_reg - REG_C_0]; 452 } 453 MLX5_ASSERT(false); 454 return rte_flow_error_set(error, EINVAL, 455 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 456 NULL, "invalid feature name"); 457 } 458 459 /** 460 * Check extensive flow metadata register support. 461 * 462 * @param dev 463 * Pointer to rte_eth_dev structure. 464 * 465 * @return 466 * True if device supports extensive flow metadata register, otherwise false. 467 */ 468 bool 469 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) 470 { 471 struct mlx5_priv *priv = dev->data->dev_private; 472 struct mlx5_dev_config *config = &priv->config; 473 474 /* 475 * Having available reg_c can be regarded inclusively as supporting 476 * extensive flow metadata register, which could mean, 477 * - metadata register copy action by modify header. 478 * - 16 modify header actions is supported. 479 * - reg_c's are preserved across different domain (FDB and NIC) on 480 * packet loopback by flow lookup miss. 481 */ 482 return config->flow_mreg_c[2] != REG_NONE; 483 } 484 485 /** 486 * Discover the maximum number of priority available. 487 * 488 * @param[in] dev 489 * Pointer to the Ethernet device structure. 490 * 491 * @return 492 * number of supported flow priority on success, a negative errno 493 * value otherwise and rte_errno is set. 494 */ 495 int 496 mlx5_flow_discover_priorities(struct rte_eth_dev *dev) 497 { 498 struct mlx5_priv *priv = dev->data->dev_private; 499 struct { 500 struct ibv_flow_attr attr; 501 struct ibv_flow_spec_eth eth; 502 struct ibv_flow_spec_action_drop drop; 503 } flow_attr = { 504 .attr = { 505 .num_of_specs = 2, 506 .port = (uint8_t)priv->ibv_port, 507 }, 508 .eth = { 509 .type = IBV_FLOW_SPEC_ETH, 510 .size = sizeof(struct ibv_flow_spec_eth), 511 }, 512 .drop = { 513 .size = sizeof(struct ibv_flow_spec_action_drop), 514 .type = IBV_FLOW_SPEC_ACTION_DROP, 515 }, 516 }; 517 struct ibv_flow *flow; 518 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev); 519 uint16_t vprio[] = { 8, 16 }; 520 int i; 521 int priority = 0; 522 523 if (!drop) { 524 rte_errno = ENOTSUP; 525 return -rte_errno; 526 } 527 for (i = 0; i != RTE_DIM(vprio); i++) { 528 flow_attr.attr.priority = vprio[i] - 1; 529 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); 530 if (!flow) 531 break; 532 claim_zero(mlx5_glue->destroy_flow(flow)); 533 priority = vprio[i]; 534 } 535 mlx5_hrxq_drop_release(dev); 536 switch (priority) { 537 case 8: 538 priority = RTE_DIM(priority_map_3); 539 break; 540 case 16: 541 priority = RTE_DIM(priority_map_5); 542 break; 543 default: 544 rte_errno = ENOTSUP; 545 DRV_LOG(ERR, 546 "port %u verbs maximum priority: %d expected 8/16", 547 dev->data->port_id, priority); 548 return -rte_errno; 549 } 550 DRV_LOG(INFO, "port %u flow maximum priority: %d", 551 dev->data->port_id, priority); 552 return priority; 553 } 554 555 /** 556 * Adjust flow priority based on the highest layer and the request priority. 557 * 558 * @param[in] dev 559 * Pointer to the Ethernet device structure. 560 * @param[in] priority 561 * The rule base priority. 562 * @param[in] subpriority 563 * The priority based on the items. 564 * 565 * @return 566 * The new priority. 567 */ 568 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 569 uint32_t subpriority) 570 { 571 uint32_t res = 0; 572 struct mlx5_priv *priv = dev->data->dev_private; 573 574 switch (priv->config.flow_prio) { 575 case RTE_DIM(priority_map_3): 576 res = priority_map_3[priority][subpriority]; 577 break; 578 case RTE_DIM(priority_map_5): 579 res = priority_map_5[priority][subpriority]; 580 break; 581 } 582 return res; 583 } 584 585 /** 586 * Verify the @p item specifications (spec, last, mask) are compatible with the 587 * NIC capabilities. 588 * 589 * @param[in] item 590 * Item specification. 591 * @param[in] mask 592 * @p item->mask or flow default bit-masks. 593 * @param[in] nic_mask 594 * Bit-masks covering supported fields by the NIC to compare with user mask. 595 * @param[in] size 596 * Bit-masks size in bytes. 597 * @param[out] error 598 * Pointer to error structure. 599 * 600 * @return 601 * 0 on success, a negative errno value otherwise and rte_errno is set. 602 */ 603 int 604 mlx5_flow_item_acceptable(const struct rte_flow_item *item, 605 const uint8_t *mask, 606 const uint8_t *nic_mask, 607 unsigned int size, 608 struct rte_flow_error *error) 609 { 610 unsigned int i; 611 612 MLX5_ASSERT(nic_mask); 613 for (i = 0; i < size; ++i) 614 if ((nic_mask[i] | mask[i]) != nic_mask[i]) 615 return rte_flow_error_set(error, ENOTSUP, 616 RTE_FLOW_ERROR_TYPE_ITEM, 617 item, 618 "mask enables non supported" 619 " bits"); 620 if (!item->spec && (item->mask || item->last)) 621 return rte_flow_error_set(error, EINVAL, 622 RTE_FLOW_ERROR_TYPE_ITEM, item, 623 "mask/last without a spec is not" 624 " supported"); 625 if (item->spec && item->last) { 626 uint8_t spec[size]; 627 uint8_t last[size]; 628 unsigned int i; 629 int ret; 630 631 for (i = 0; i < size; ++i) { 632 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; 633 last[i] = ((const uint8_t *)item->last)[i] & mask[i]; 634 } 635 ret = memcmp(spec, last, size); 636 if (ret != 0) 637 return rte_flow_error_set(error, EINVAL, 638 RTE_FLOW_ERROR_TYPE_ITEM, 639 item, 640 "range is not valid"); 641 } 642 return 0; 643 } 644 645 /** 646 * Adjust the hash fields according to the @p flow information. 647 * 648 * @param[in] dev_flow. 649 * Pointer to the mlx5_flow. 650 * @param[in] tunnel 651 * 1 when the hash field is for a tunnel item. 652 * @param[in] layer_types 653 * ETH_RSS_* types. 654 * @param[in] hash_fields 655 * Item hash fields. 656 * 657 * @return 658 * The hash fields that should be used. 659 */ 660 uint64_t 661 mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, 662 int tunnel __rte_unused, uint64_t layer_types, 663 uint64_t hash_fields) 664 { 665 struct rte_flow *flow = dev_flow->flow; 666 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 667 int rss_request_inner = flow->rss.level >= 2; 668 669 /* Check RSS hash level for tunnel. */ 670 if (tunnel && rss_request_inner) 671 hash_fields |= IBV_RX_HASH_INNER; 672 else if (tunnel || rss_request_inner) 673 return 0; 674 #endif 675 /* Check if requested layer matches RSS hash fields. */ 676 if (!(flow->rss.types & layer_types)) 677 return 0; 678 return hash_fields; 679 } 680 681 /** 682 * Lookup and set the ptype in the data Rx part. A single Ptype can be used, 683 * if several tunnel rules are used on this queue, the tunnel ptype will be 684 * cleared. 685 * 686 * @param rxq_ctrl 687 * Rx queue to update. 688 */ 689 static void 690 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) 691 { 692 unsigned int i; 693 uint32_t tunnel_ptype = 0; 694 695 /* Look up for the ptype to use. */ 696 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { 697 if (!rxq_ctrl->flow_tunnels_n[i]) 698 continue; 699 if (!tunnel_ptype) { 700 tunnel_ptype = tunnels_info[i].ptype; 701 } else { 702 tunnel_ptype = 0; 703 break; 704 } 705 } 706 rxq_ctrl->rxq.tunnel = tunnel_ptype; 707 } 708 709 /** 710 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive 711 * flow. 712 * 713 * @param[in] dev 714 * Pointer to the Ethernet device structure. 715 * @param[in] dev_flow 716 * Pointer to device flow structure. 717 */ 718 static void 719 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 720 { 721 struct mlx5_priv *priv = dev->data->dev_private; 722 struct rte_flow *flow = dev_flow->flow; 723 const int mark = !!(dev_flow->handle.act_flags & 724 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 725 const int tunnel = !!(dev_flow->handle.layers & MLX5_FLOW_LAYER_TUNNEL); 726 unsigned int i; 727 728 for (i = 0; i != flow->rss.queue_num; ++i) { 729 int idx = (*flow->rss.queue)[i]; 730 struct mlx5_rxq_ctrl *rxq_ctrl = 731 container_of((*priv->rxqs)[idx], 732 struct mlx5_rxq_ctrl, rxq); 733 734 /* 735 * To support metadata register copy on Tx loopback, 736 * this must be always enabled (metadata may arive 737 * from other port - not from local flows only. 738 */ 739 if (priv->config.dv_flow_en && 740 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 741 mlx5_flow_ext_mreg_supported(dev)) { 742 rxq_ctrl->rxq.mark = 1; 743 rxq_ctrl->flow_mark_n = 1; 744 } else if (mark) { 745 rxq_ctrl->rxq.mark = 1; 746 rxq_ctrl->flow_mark_n++; 747 } 748 if (tunnel) { 749 unsigned int j; 750 751 /* Increase the counter matching the flow. */ 752 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 753 if ((tunnels_info[j].tunnel & 754 dev_flow->handle.layers) == 755 tunnels_info[j].tunnel) { 756 rxq_ctrl->flow_tunnels_n[j]++; 757 break; 758 } 759 } 760 flow_rxq_tunnel_ptype_update(rxq_ctrl); 761 } 762 } 763 } 764 765 /** 766 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow 767 * 768 * @param[in] dev 769 * Pointer to the Ethernet device structure. 770 * @param[in] flow 771 * Pointer to flow structure. 772 */ 773 static void 774 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) 775 { 776 struct mlx5_flow *dev_flow; 777 778 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 779 flow_drv_rxq_flags_set(dev, dev_flow); 780 } 781 782 /** 783 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 784 * device flow if no other flow uses it with the same kind of request. 785 * 786 * @param dev 787 * Pointer to Ethernet device. 788 * @param[in] dev_flow 789 * Pointer to the device flow. 790 */ 791 static void 792 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 793 { 794 struct mlx5_priv *priv = dev->data->dev_private; 795 struct rte_flow *flow = dev_flow->flow; 796 const int mark = !!(dev_flow->handle.act_flags & 797 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 798 const int tunnel = !!(dev_flow->handle.layers & MLX5_FLOW_LAYER_TUNNEL); 799 unsigned int i; 800 801 MLX5_ASSERT(dev->data->dev_started); 802 for (i = 0; i != flow->rss.queue_num; ++i) { 803 int idx = (*flow->rss.queue)[i]; 804 struct mlx5_rxq_ctrl *rxq_ctrl = 805 container_of((*priv->rxqs)[idx], 806 struct mlx5_rxq_ctrl, rxq); 807 808 if (priv->config.dv_flow_en && 809 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 810 mlx5_flow_ext_mreg_supported(dev)) { 811 rxq_ctrl->rxq.mark = 1; 812 rxq_ctrl->flow_mark_n = 1; 813 } else if (mark) { 814 rxq_ctrl->flow_mark_n--; 815 rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; 816 } 817 if (tunnel) { 818 unsigned int j; 819 820 /* Decrease the counter matching the flow. */ 821 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 822 if ((tunnels_info[j].tunnel & 823 dev_flow->handle.layers) == 824 tunnels_info[j].tunnel) { 825 rxq_ctrl->flow_tunnels_n[j]--; 826 break; 827 } 828 } 829 flow_rxq_tunnel_ptype_update(rxq_ctrl); 830 } 831 } 832 } 833 834 /** 835 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 836 * @p flow if no other flow uses it with the same kind of request. 837 * 838 * @param dev 839 * Pointer to Ethernet device. 840 * @param[in] flow 841 * Pointer to the flow. 842 */ 843 static void 844 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) 845 { 846 struct mlx5_flow *dev_flow; 847 848 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 849 flow_drv_rxq_flags_trim(dev, dev_flow); 850 } 851 852 /** 853 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. 854 * 855 * @param dev 856 * Pointer to Ethernet device. 857 */ 858 static void 859 flow_rxq_flags_clear(struct rte_eth_dev *dev) 860 { 861 struct mlx5_priv *priv = dev->data->dev_private; 862 unsigned int i; 863 864 for (i = 0; i != priv->rxqs_n; ++i) { 865 struct mlx5_rxq_ctrl *rxq_ctrl; 866 unsigned int j; 867 868 if (!(*priv->rxqs)[i]) 869 continue; 870 rxq_ctrl = container_of((*priv->rxqs)[i], 871 struct mlx5_rxq_ctrl, rxq); 872 rxq_ctrl->flow_mark_n = 0; 873 rxq_ctrl->rxq.mark = 0; 874 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) 875 rxq_ctrl->flow_tunnels_n[j] = 0; 876 rxq_ctrl->rxq.tunnel = 0; 877 } 878 } 879 880 /* 881 * return a pointer to the desired action in the list of actions. 882 * 883 * @param[in] actions 884 * The list of actions to search the action in. 885 * @param[in] action 886 * The action to find. 887 * 888 * @return 889 * Pointer to the action in the list, if found. NULL otherwise. 890 */ 891 const struct rte_flow_action * 892 mlx5_flow_find_action(const struct rte_flow_action *actions, 893 enum rte_flow_action_type action) 894 { 895 if (actions == NULL) 896 return NULL; 897 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) 898 if (actions->type == action) 899 return actions; 900 return NULL; 901 } 902 903 /* 904 * Validate the flag action. 905 * 906 * @param[in] action_flags 907 * Bit-fields that holds the actions detected until now. 908 * @param[in] attr 909 * Attributes of flow that includes this action. 910 * @param[out] error 911 * Pointer to error structure. 912 * 913 * @return 914 * 0 on success, a negative errno value otherwise and rte_errno is set. 915 */ 916 int 917 mlx5_flow_validate_action_flag(uint64_t action_flags, 918 const struct rte_flow_attr *attr, 919 struct rte_flow_error *error) 920 { 921 if (action_flags & MLX5_FLOW_ACTION_MARK) 922 return rte_flow_error_set(error, EINVAL, 923 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 924 "can't mark and flag in same flow"); 925 if (action_flags & MLX5_FLOW_ACTION_FLAG) 926 return rte_flow_error_set(error, EINVAL, 927 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 928 "can't have 2 flag" 929 " actions in same flow"); 930 if (attr->egress) 931 return rte_flow_error_set(error, ENOTSUP, 932 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 933 "flag action not supported for " 934 "egress"); 935 return 0; 936 } 937 938 /* 939 * Validate the mark action. 940 * 941 * @param[in] action 942 * Pointer to the queue action. 943 * @param[in] action_flags 944 * Bit-fields that holds the actions detected until now. 945 * @param[in] attr 946 * Attributes of flow that includes this action. 947 * @param[out] error 948 * Pointer to error structure. 949 * 950 * @return 951 * 0 on success, a negative errno value otherwise and rte_errno is set. 952 */ 953 int 954 mlx5_flow_validate_action_mark(const struct rte_flow_action *action, 955 uint64_t action_flags, 956 const struct rte_flow_attr *attr, 957 struct rte_flow_error *error) 958 { 959 const struct rte_flow_action_mark *mark = action->conf; 960 961 if (!mark) 962 return rte_flow_error_set(error, EINVAL, 963 RTE_FLOW_ERROR_TYPE_ACTION, 964 action, 965 "configuration cannot be null"); 966 if (mark->id >= MLX5_FLOW_MARK_MAX) 967 return rte_flow_error_set(error, EINVAL, 968 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 969 &mark->id, 970 "mark id must in 0 <= id < " 971 RTE_STR(MLX5_FLOW_MARK_MAX)); 972 if (action_flags & MLX5_FLOW_ACTION_FLAG) 973 return rte_flow_error_set(error, EINVAL, 974 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 975 "can't flag and mark in same flow"); 976 if (action_flags & MLX5_FLOW_ACTION_MARK) 977 return rte_flow_error_set(error, EINVAL, 978 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 979 "can't have 2 mark actions in same" 980 " flow"); 981 if (attr->egress) 982 return rte_flow_error_set(error, ENOTSUP, 983 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 984 "mark action not supported for " 985 "egress"); 986 return 0; 987 } 988 989 /* 990 * Validate the drop action. 991 * 992 * @param[in] action_flags 993 * Bit-fields that holds the actions detected until now. 994 * @param[in] attr 995 * Attributes of flow that includes this action. 996 * @param[out] error 997 * Pointer to error structure. 998 * 999 * @return 1000 * 0 on success, a negative errno value otherwise and rte_errno is set. 1001 */ 1002 int 1003 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused, 1004 const struct rte_flow_attr *attr, 1005 struct rte_flow_error *error) 1006 { 1007 if (attr->egress) 1008 return rte_flow_error_set(error, ENOTSUP, 1009 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1010 "drop action not supported for " 1011 "egress"); 1012 return 0; 1013 } 1014 1015 /* 1016 * Validate the queue action. 1017 * 1018 * @param[in] action 1019 * Pointer to the queue action. 1020 * @param[in] action_flags 1021 * Bit-fields that holds the actions detected until now. 1022 * @param[in] dev 1023 * Pointer to the Ethernet device structure. 1024 * @param[in] attr 1025 * Attributes of flow that includes this action. 1026 * @param[out] error 1027 * Pointer to error structure. 1028 * 1029 * @return 1030 * 0 on success, a negative errno value otherwise and rte_errno is set. 1031 */ 1032 int 1033 mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 1034 uint64_t action_flags, 1035 struct rte_eth_dev *dev, 1036 const struct rte_flow_attr *attr, 1037 struct rte_flow_error *error) 1038 { 1039 struct mlx5_priv *priv = dev->data->dev_private; 1040 const struct rte_flow_action_queue *queue = action->conf; 1041 1042 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 1043 return rte_flow_error_set(error, EINVAL, 1044 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1045 "can't have 2 fate actions in" 1046 " same flow"); 1047 if (!priv->rxqs_n) 1048 return rte_flow_error_set(error, EINVAL, 1049 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1050 NULL, "No Rx queues configured"); 1051 if (queue->index >= priv->rxqs_n) 1052 return rte_flow_error_set(error, EINVAL, 1053 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1054 &queue->index, 1055 "queue index out of range"); 1056 if (!(*priv->rxqs)[queue->index]) 1057 return rte_flow_error_set(error, EINVAL, 1058 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1059 &queue->index, 1060 "queue is not configured"); 1061 if (attr->egress) 1062 return rte_flow_error_set(error, ENOTSUP, 1063 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1064 "queue action not supported for " 1065 "egress"); 1066 return 0; 1067 } 1068 1069 /* 1070 * Validate the rss action. 1071 * 1072 * @param[in] action 1073 * Pointer to the queue action. 1074 * @param[in] action_flags 1075 * Bit-fields that holds the actions detected until now. 1076 * @param[in] dev 1077 * Pointer to the Ethernet device structure. 1078 * @param[in] attr 1079 * Attributes of flow that includes this action. 1080 * @param[in] item_flags 1081 * Items that were detected. 1082 * @param[out] error 1083 * Pointer to error structure. 1084 * 1085 * @return 1086 * 0 on success, a negative errno value otherwise and rte_errno is set. 1087 */ 1088 int 1089 mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 1090 uint64_t action_flags, 1091 struct rte_eth_dev *dev, 1092 const struct rte_flow_attr *attr, 1093 uint64_t item_flags, 1094 struct rte_flow_error *error) 1095 { 1096 struct mlx5_priv *priv = dev->data->dev_private; 1097 const struct rte_flow_action_rss *rss = action->conf; 1098 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1099 unsigned int i; 1100 1101 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 1102 return rte_flow_error_set(error, EINVAL, 1103 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1104 "can't have 2 fate actions" 1105 " in same flow"); 1106 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && 1107 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) 1108 return rte_flow_error_set(error, ENOTSUP, 1109 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1110 &rss->func, 1111 "RSS hash function not supported"); 1112 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 1113 if (rss->level > 2) 1114 #else 1115 if (rss->level > 1) 1116 #endif 1117 return rte_flow_error_set(error, ENOTSUP, 1118 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1119 &rss->level, 1120 "tunnel RSS is not supported"); 1121 /* allow RSS key_len 0 in case of NULL (default) RSS key. */ 1122 if (rss->key_len == 0 && rss->key != NULL) 1123 return rte_flow_error_set(error, ENOTSUP, 1124 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1125 &rss->key_len, 1126 "RSS hash key length 0"); 1127 if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) 1128 return rte_flow_error_set(error, ENOTSUP, 1129 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1130 &rss->key_len, 1131 "RSS hash key too small"); 1132 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) 1133 return rte_flow_error_set(error, ENOTSUP, 1134 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1135 &rss->key_len, 1136 "RSS hash key too large"); 1137 if (rss->queue_num > priv->config.ind_table_max_size) 1138 return rte_flow_error_set(error, ENOTSUP, 1139 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1140 &rss->queue_num, 1141 "number of queues too large"); 1142 if (rss->types & MLX5_RSS_HF_MASK) 1143 return rte_flow_error_set(error, ENOTSUP, 1144 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1145 &rss->types, 1146 "some RSS protocols are not" 1147 " supported"); 1148 if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) && 1149 !(rss->types & ETH_RSS_IP)) 1150 return rte_flow_error_set(error, EINVAL, 1151 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1152 "L3 partial RSS requested but L3 RSS" 1153 " type not specified"); 1154 if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) && 1155 !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP))) 1156 return rte_flow_error_set(error, EINVAL, 1157 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1158 "L4 partial RSS requested but L4 RSS" 1159 " type not specified"); 1160 if (!priv->rxqs_n) 1161 return rte_flow_error_set(error, EINVAL, 1162 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1163 NULL, "No Rx queues configured"); 1164 if (!rss->queue_num) 1165 return rte_flow_error_set(error, EINVAL, 1166 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1167 NULL, "No queues configured"); 1168 for (i = 0; i != rss->queue_num; ++i) { 1169 if (rss->queue[i] >= priv->rxqs_n) 1170 return rte_flow_error_set 1171 (error, EINVAL, 1172 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1173 &rss->queue[i], "queue index out of range"); 1174 if (!(*priv->rxqs)[rss->queue[i]]) 1175 return rte_flow_error_set 1176 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1177 &rss->queue[i], "queue is not configured"); 1178 } 1179 if (attr->egress) 1180 return rte_flow_error_set(error, ENOTSUP, 1181 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1182 "rss action not supported for " 1183 "egress"); 1184 if (rss->level > 1 && !tunnel) 1185 return rte_flow_error_set(error, EINVAL, 1186 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1187 "inner RSS is not supported for " 1188 "non-tunnel flows"); 1189 return 0; 1190 } 1191 1192 /* 1193 * Validate the count action. 1194 * 1195 * @param[in] dev 1196 * Pointer to the Ethernet device structure. 1197 * @param[in] attr 1198 * Attributes of flow that includes this action. 1199 * @param[out] error 1200 * Pointer to error structure. 1201 * 1202 * @return 1203 * 0 on success, a negative errno value otherwise and rte_errno is set. 1204 */ 1205 int 1206 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, 1207 const struct rte_flow_attr *attr, 1208 struct rte_flow_error *error) 1209 { 1210 if (attr->egress) 1211 return rte_flow_error_set(error, ENOTSUP, 1212 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1213 "count action not supported for " 1214 "egress"); 1215 return 0; 1216 } 1217 1218 /** 1219 * Verify the @p attributes will be correctly understood by the NIC and store 1220 * them in the @p flow if everything is correct. 1221 * 1222 * @param[in] dev 1223 * Pointer to the Ethernet device structure. 1224 * @param[in] attributes 1225 * Pointer to flow attributes 1226 * @param[out] error 1227 * Pointer to error structure. 1228 * 1229 * @return 1230 * 0 on success, a negative errno value otherwise and rte_errno is set. 1231 */ 1232 int 1233 mlx5_flow_validate_attributes(struct rte_eth_dev *dev, 1234 const struct rte_flow_attr *attributes, 1235 struct rte_flow_error *error) 1236 { 1237 struct mlx5_priv *priv = dev->data->dev_private; 1238 uint32_t priority_max = priv->config.flow_prio - 1; 1239 1240 if (attributes->group) 1241 return rte_flow_error_set(error, ENOTSUP, 1242 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 1243 NULL, "groups is not supported"); 1244 if (attributes->priority != MLX5_FLOW_PRIO_RSVD && 1245 attributes->priority >= priority_max) 1246 return rte_flow_error_set(error, ENOTSUP, 1247 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1248 NULL, "priority out of range"); 1249 if (attributes->egress) 1250 return rte_flow_error_set(error, ENOTSUP, 1251 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1252 "egress is not supported"); 1253 if (attributes->transfer && !priv->config.dv_esw_en) 1254 return rte_flow_error_set(error, ENOTSUP, 1255 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 1256 NULL, "transfer is not supported"); 1257 if (!attributes->ingress) 1258 return rte_flow_error_set(error, EINVAL, 1259 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 1260 NULL, 1261 "ingress attribute is mandatory"); 1262 return 0; 1263 } 1264 1265 /** 1266 * Validate ICMP6 item. 1267 * 1268 * @param[in] item 1269 * Item specification. 1270 * @param[in] item_flags 1271 * Bit-fields that holds the items detected until now. 1272 * @param[out] error 1273 * Pointer to error structure. 1274 * 1275 * @return 1276 * 0 on success, a negative errno value otherwise and rte_errno is set. 1277 */ 1278 int 1279 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, 1280 uint64_t item_flags, 1281 uint8_t target_protocol, 1282 struct rte_flow_error *error) 1283 { 1284 const struct rte_flow_item_icmp6 *mask = item->mask; 1285 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1286 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 1287 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 1288 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1289 MLX5_FLOW_LAYER_OUTER_L4; 1290 int ret; 1291 1292 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) 1293 return rte_flow_error_set(error, EINVAL, 1294 RTE_FLOW_ERROR_TYPE_ITEM, item, 1295 "protocol filtering not compatible" 1296 " with ICMP6 layer"); 1297 if (!(item_flags & l3m)) 1298 return rte_flow_error_set(error, EINVAL, 1299 RTE_FLOW_ERROR_TYPE_ITEM, item, 1300 "IPv6 is mandatory to filter on" 1301 " ICMP6"); 1302 if (item_flags & l4m) 1303 return rte_flow_error_set(error, EINVAL, 1304 RTE_FLOW_ERROR_TYPE_ITEM, item, 1305 "multiple L4 layers not supported"); 1306 if (!mask) 1307 mask = &rte_flow_item_icmp6_mask; 1308 ret = mlx5_flow_item_acceptable 1309 (item, (const uint8_t *)mask, 1310 (const uint8_t *)&rte_flow_item_icmp6_mask, 1311 sizeof(struct rte_flow_item_icmp6), error); 1312 if (ret < 0) 1313 return ret; 1314 return 0; 1315 } 1316 1317 /** 1318 * Validate ICMP item. 1319 * 1320 * @param[in] item 1321 * Item specification. 1322 * @param[in] item_flags 1323 * Bit-fields that holds the items detected until now. 1324 * @param[out] error 1325 * Pointer to error structure. 1326 * 1327 * @return 1328 * 0 on success, a negative errno value otherwise and rte_errno is set. 1329 */ 1330 int 1331 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, 1332 uint64_t item_flags, 1333 uint8_t target_protocol, 1334 struct rte_flow_error *error) 1335 { 1336 const struct rte_flow_item_icmp *mask = item->mask; 1337 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1338 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 1339 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 1340 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1341 MLX5_FLOW_LAYER_OUTER_L4; 1342 int ret; 1343 1344 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) 1345 return rte_flow_error_set(error, EINVAL, 1346 RTE_FLOW_ERROR_TYPE_ITEM, item, 1347 "protocol filtering not compatible" 1348 " with ICMP layer"); 1349 if (!(item_flags & l3m)) 1350 return rte_flow_error_set(error, EINVAL, 1351 RTE_FLOW_ERROR_TYPE_ITEM, item, 1352 "IPv4 is mandatory to filter" 1353 " on ICMP"); 1354 if (item_flags & l4m) 1355 return rte_flow_error_set(error, EINVAL, 1356 RTE_FLOW_ERROR_TYPE_ITEM, item, 1357 "multiple L4 layers not supported"); 1358 if (!mask) 1359 mask = &rte_flow_item_icmp_mask; 1360 ret = mlx5_flow_item_acceptable 1361 (item, (const uint8_t *)mask, 1362 (const uint8_t *)&rte_flow_item_icmp_mask, 1363 sizeof(struct rte_flow_item_icmp), error); 1364 if (ret < 0) 1365 return ret; 1366 return 0; 1367 } 1368 1369 /** 1370 * Validate Ethernet item. 1371 * 1372 * @param[in] item 1373 * Item specification. 1374 * @param[in] item_flags 1375 * Bit-fields that holds the items detected until now. 1376 * @param[out] error 1377 * Pointer to error structure. 1378 * 1379 * @return 1380 * 0 on success, a negative errno value otherwise and rte_errno is set. 1381 */ 1382 int 1383 mlx5_flow_validate_item_eth(const struct rte_flow_item *item, 1384 uint64_t item_flags, 1385 struct rte_flow_error *error) 1386 { 1387 const struct rte_flow_item_eth *mask = item->mask; 1388 const struct rte_flow_item_eth nic_mask = { 1389 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1390 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1391 .type = RTE_BE16(0xffff), 1392 }; 1393 int ret; 1394 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1395 const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 1396 MLX5_FLOW_LAYER_OUTER_L2; 1397 1398 if (item_flags & ethm) 1399 return rte_flow_error_set(error, ENOTSUP, 1400 RTE_FLOW_ERROR_TYPE_ITEM, item, 1401 "multiple L2 layers not supported"); 1402 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) || 1403 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))) 1404 return rte_flow_error_set(error, EINVAL, 1405 RTE_FLOW_ERROR_TYPE_ITEM, item, 1406 "L2 layer should not follow " 1407 "L3 layers"); 1408 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) || 1409 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN))) 1410 return rte_flow_error_set(error, EINVAL, 1411 RTE_FLOW_ERROR_TYPE_ITEM, item, 1412 "L2 layer should not follow VLAN"); 1413 if (!mask) 1414 mask = &rte_flow_item_eth_mask; 1415 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1416 (const uint8_t *)&nic_mask, 1417 sizeof(struct rte_flow_item_eth), 1418 error); 1419 return ret; 1420 } 1421 1422 /** 1423 * Validate VLAN item. 1424 * 1425 * @param[in] item 1426 * Item specification. 1427 * @param[in] item_flags 1428 * Bit-fields that holds the items detected until now. 1429 * @param[in] dev 1430 * Ethernet device flow is being created on. 1431 * @param[out] error 1432 * Pointer to error structure. 1433 * 1434 * @return 1435 * 0 on success, a negative errno value otherwise and rte_errno is set. 1436 */ 1437 int 1438 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 1439 uint64_t item_flags, 1440 struct rte_eth_dev *dev, 1441 struct rte_flow_error *error) 1442 { 1443 const struct rte_flow_item_vlan *spec = item->spec; 1444 const struct rte_flow_item_vlan *mask = item->mask; 1445 const struct rte_flow_item_vlan nic_mask = { 1446 .tci = RTE_BE16(UINT16_MAX), 1447 .inner_type = RTE_BE16(UINT16_MAX), 1448 }; 1449 uint16_t vlan_tag = 0; 1450 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1451 int ret; 1452 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 1453 MLX5_FLOW_LAYER_INNER_L4) : 1454 (MLX5_FLOW_LAYER_OUTER_L3 | 1455 MLX5_FLOW_LAYER_OUTER_L4); 1456 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 1457 MLX5_FLOW_LAYER_OUTER_VLAN; 1458 1459 if (item_flags & vlanm) 1460 return rte_flow_error_set(error, EINVAL, 1461 RTE_FLOW_ERROR_TYPE_ITEM, item, 1462 "multiple VLAN layers not supported"); 1463 else if ((item_flags & l34m) != 0) 1464 return rte_flow_error_set(error, EINVAL, 1465 RTE_FLOW_ERROR_TYPE_ITEM, item, 1466 "VLAN cannot follow L3/L4 layer"); 1467 if (!mask) 1468 mask = &rte_flow_item_vlan_mask; 1469 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1470 (const uint8_t *)&nic_mask, 1471 sizeof(struct rte_flow_item_vlan), 1472 error); 1473 if (ret) 1474 return ret; 1475 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { 1476 struct mlx5_priv *priv = dev->data->dev_private; 1477 1478 if (priv->vmwa_context) { 1479 /* 1480 * Non-NULL context means we have a virtual machine 1481 * and SR-IOV enabled, we have to create VLAN interface 1482 * to make hypervisor to setup E-Switch vport 1483 * context correctly. We avoid creating the multiple 1484 * VLAN interfaces, so we cannot support VLAN tag mask. 1485 */ 1486 return rte_flow_error_set(error, EINVAL, 1487 RTE_FLOW_ERROR_TYPE_ITEM, 1488 item, 1489 "VLAN tag mask is not" 1490 " supported in virtual" 1491 " environment"); 1492 } 1493 } 1494 if (spec) { 1495 vlan_tag = spec->tci; 1496 vlan_tag &= mask->tci; 1497 } 1498 /* 1499 * From verbs perspective an empty VLAN is equivalent 1500 * to a packet without VLAN layer. 1501 */ 1502 if (!vlan_tag) 1503 return rte_flow_error_set(error, EINVAL, 1504 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1505 item->spec, 1506 "VLAN cannot be empty"); 1507 return 0; 1508 } 1509 1510 /** 1511 * Validate IPV4 item. 1512 * 1513 * @param[in] item 1514 * Item specification. 1515 * @param[in] item_flags 1516 * Bit-fields that holds the items detected until now. 1517 * @param[in] acc_mask 1518 * Acceptable mask, if NULL default internal default mask 1519 * will be used to check whether item fields are supported. 1520 * @param[out] error 1521 * Pointer to error structure. 1522 * 1523 * @return 1524 * 0 on success, a negative errno value otherwise and rte_errno is set. 1525 */ 1526 int 1527 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, 1528 uint64_t item_flags, 1529 uint64_t last_item, 1530 uint16_t ether_type, 1531 const struct rte_flow_item_ipv4 *acc_mask, 1532 struct rte_flow_error *error) 1533 { 1534 const struct rte_flow_item_ipv4 *mask = item->mask; 1535 const struct rte_flow_item_ipv4 *spec = item->spec; 1536 const struct rte_flow_item_ipv4 nic_mask = { 1537 .hdr = { 1538 .src_addr = RTE_BE32(0xffffffff), 1539 .dst_addr = RTE_BE32(0xffffffff), 1540 .type_of_service = 0xff, 1541 .next_proto_id = 0xff, 1542 }, 1543 }; 1544 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1545 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1546 MLX5_FLOW_LAYER_OUTER_L3; 1547 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1548 MLX5_FLOW_LAYER_OUTER_L4; 1549 int ret; 1550 uint8_t next_proto = 0xFF; 1551 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 1552 MLX5_FLOW_LAYER_OUTER_VLAN | 1553 MLX5_FLOW_LAYER_INNER_VLAN); 1554 1555 if ((last_item & l2_vlan) && ether_type && 1556 ether_type != RTE_ETHER_TYPE_IPV4) 1557 return rte_flow_error_set(error, EINVAL, 1558 RTE_FLOW_ERROR_TYPE_ITEM, item, 1559 "IPv4 cannot follow L2/VLAN layer " 1560 "which ether type is not IPv4"); 1561 if (item_flags & MLX5_FLOW_LAYER_IPIP) { 1562 if (mask && spec) 1563 next_proto = mask->hdr.next_proto_id & 1564 spec->hdr.next_proto_id; 1565 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1566 return rte_flow_error_set(error, EINVAL, 1567 RTE_FLOW_ERROR_TYPE_ITEM, 1568 item, 1569 "multiple tunnel " 1570 "not supported"); 1571 } 1572 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) 1573 return rte_flow_error_set(error, EINVAL, 1574 RTE_FLOW_ERROR_TYPE_ITEM, item, 1575 "wrong tunnel type - IPv6 specified " 1576 "but IPv4 item provided"); 1577 if (item_flags & l3m) 1578 return rte_flow_error_set(error, ENOTSUP, 1579 RTE_FLOW_ERROR_TYPE_ITEM, item, 1580 "multiple L3 layers not supported"); 1581 else if (item_flags & l4m) 1582 return rte_flow_error_set(error, EINVAL, 1583 RTE_FLOW_ERROR_TYPE_ITEM, item, 1584 "L3 cannot follow an L4 layer."); 1585 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1586 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1587 return rte_flow_error_set(error, EINVAL, 1588 RTE_FLOW_ERROR_TYPE_ITEM, item, 1589 "L3 cannot follow an NVGRE layer."); 1590 if (!mask) 1591 mask = &rte_flow_item_ipv4_mask; 1592 else if (mask->hdr.next_proto_id != 0 && 1593 mask->hdr.next_proto_id != 0xff) 1594 return rte_flow_error_set(error, EINVAL, 1595 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 1596 "partial mask is not supported" 1597 " for protocol"); 1598 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1599 acc_mask ? (const uint8_t *)acc_mask 1600 : (const uint8_t *)&nic_mask, 1601 sizeof(struct rte_flow_item_ipv4), 1602 error); 1603 if (ret < 0) 1604 return ret; 1605 return 0; 1606 } 1607 1608 /** 1609 * Validate IPV6 item. 1610 * 1611 * @param[in] item 1612 * Item specification. 1613 * @param[in] item_flags 1614 * Bit-fields that holds the items detected until now. 1615 * @param[in] acc_mask 1616 * Acceptable mask, if NULL default internal default mask 1617 * will be used to check whether item fields are supported. 1618 * @param[out] error 1619 * Pointer to error structure. 1620 * 1621 * @return 1622 * 0 on success, a negative errno value otherwise and rte_errno is set. 1623 */ 1624 int 1625 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, 1626 uint64_t item_flags, 1627 uint64_t last_item, 1628 uint16_t ether_type, 1629 const struct rte_flow_item_ipv6 *acc_mask, 1630 struct rte_flow_error *error) 1631 { 1632 const struct rte_flow_item_ipv6 *mask = item->mask; 1633 const struct rte_flow_item_ipv6 *spec = item->spec; 1634 const struct rte_flow_item_ipv6 nic_mask = { 1635 .hdr = { 1636 .src_addr = 1637 "\xff\xff\xff\xff\xff\xff\xff\xff" 1638 "\xff\xff\xff\xff\xff\xff\xff\xff", 1639 .dst_addr = 1640 "\xff\xff\xff\xff\xff\xff\xff\xff" 1641 "\xff\xff\xff\xff\xff\xff\xff\xff", 1642 .vtc_flow = RTE_BE32(0xffffffff), 1643 .proto = 0xff, 1644 }, 1645 }; 1646 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1647 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1648 MLX5_FLOW_LAYER_OUTER_L3; 1649 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1650 MLX5_FLOW_LAYER_OUTER_L4; 1651 int ret; 1652 uint8_t next_proto = 0xFF; 1653 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 1654 MLX5_FLOW_LAYER_OUTER_VLAN | 1655 MLX5_FLOW_LAYER_INNER_VLAN); 1656 1657 if ((last_item & l2_vlan) && ether_type && 1658 ether_type != RTE_ETHER_TYPE_IPV6) 1659 return rte_flow_error_set(error, EINVAL, 1660 RTE_FLOW_ERROR_TYPE_ITEM, item, 1661 "IPv6 cannot follow L2/VLAN layer " 1662 "which ether type is not IPv6"); 1663 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { 1664 if (mask && spec) 1665 next_proto = mask->hdr.proto & spec->hdr.proto; 1666 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1667 return rte_flow_error_set(error, EINVAL, 1668 RTE_FLOW_ERROR_TYPE_ITEM, 1669 item, 1670 "multiple tunnel " 1671 "not supported"); 1672 } 1673 if (item_flags & MLX5_FLOW_LAYER_IPIP) 1674 return rte_flow_error_set(error, EINVAL, 1675 RTE_FLOW_ERROR_TYPE_ITEM, item, 1676 "wrong tunnel type - IPv4 specified " 1677 "but IPv6 item provided"); 1678 if (item_flags & l3m) 1679 return rte_flow_error_set(error, ENOTSUP, 1680 RTE_FLOW_ERROR_TYPE_ITEM, item, 1681 "multiple L3 layers not supported"); 1682 else if (item_flags & l4m) 1683 return rte_flow_error_set(error, EINVAL, 1684 RTE_FLOW_ERROR_TYPE_ITEM, item, 1685 "L3 cannot follow an L4 layer."); 1686 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1687 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1688 return rte_flow_error_set(error, EINVAL, 1689 RTE_FLOW_ERROR_TYPE_ITEM, item, 1690 "L3 cannot follow an NVGRE layer."); 1691 if (!mask) 1692 mask = &rte_flow_item_ipv6_mask; 1693 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1694 acc_mask ? (const uint8_t *)acc_mask 1695 : (const uint8_t *)&nic_mask, 1696 sizeof(struct rte_flow_item_ipv6), 1697 error); 1698 if (ret < 0) 1699 return ret; 1700 return 0; 1701 } 1702 1703 /** 1704 * Validate UDP item. 1705 * 1706 * @param[in] item 1707 * Item specification. 1708 * @param[in] item_flags 1709 * Bit-fields that holds the items detected until now. 1710 * @param[in] target_protocol 1711 * The next protocol in the previous item. 1712 * @param[in] flow_mask 1713 * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask. 1714 * @param[out] error 1715 * Pointer to error structure. 1716 * 1717 * @return 1718 * 0 on success, a negative errno value otherwise and rte_errno is set. 1719 */ 1720 int 1721 mlx5_flow_validate_item_udp(const struct rte_flow_item *item, 1722 uint64_t item_flags, 1723 uint8_t target_protocol, 1724 struct rte_flow_error *error) 1725 { 1726 const struct rte_flow_item_udp *mask = item->mask; 1727 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1728 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1729 MLX5_FLOW_LAYER_OUTER_L3; 1730 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1731 MLX5_FLOW_LAYER_OUTER_L4; 1732 int ret; 1733 1734 if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) 1735 return rte_flow_error_set(error, EINVAL, 1736 RTE_FLOW_ERROR_TYPE_ITEM, item, 1737 "protocol filtering not compatible" 1738 " with UDP layer"); 1739 if (!(item_flags & l3m)) 1740 return rte_flow_error_set(error, EINVAL, 1741 RTE_FLOW_ERROR_TYPE_ITEM, item, 1742 "L3 is mandatory to filter on L4"); 1743 if (item_flags & l4m) 1744 return rte_flow_error_set(error, EINVAL, 1745 RTE_FLOW_ERROR_TYPE_ITEM, item, 1746 "multiple L4 layers not supported"); 1747 if (!mask) 1748 mask = &rte_flow_item_udp_mask; 1749 ret = mlx5_flow_item_acceptable 1750 (item, (const uint8_t *)mask, 1751 (const uint8_t *)&rte_flow_item_udp_mask, 1752 sizeof(struct rte_flow_item_udp), error); 1753 if (ret < 0) 1754 return ret; 1755 return 0; 1756 } 1757 1758 /** 1759 * Validate TCP item. 1760 * 1761 * @param[in] item 1762 * Item specification. 1763 * @param[in] item_flags 1764 * Bit-fields that holds the items detected until now. 1765 * @param[in] target_protocol 1766 * The next protocol in the previous item. 1767 * @param[out] error 1768 * Pointer to error structure. 1769 * 1770 * @return 1771 * 0 on success, a negative errno value otherwise and rte_errno is set. 1772 */ 1773 int 1774 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, 1775 uint64_t item_flags, 1776 uint8_t target_protocol, 1777 const struct rte_flow_item_tcp *flow_mask, 1778 struct rte_flow_error *error) 1779 { 1780 const struct rte_flow_item_tcp *mask = item->mask; 1781 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1782 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1783 MLX5_FLOW_LAYER_OUTER_L3; 1784 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1785 MLX5_FLOW_LAYER_OUTER_L4; 1786 int ret; 1787 1788 MLX5_ASSERT(flow_mask); 1789 if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) 1790 return rte_flow_error_set(error, EINVAL, 1791 RTE_FLOW_ERROR_TYPE_ITEM, item, 1792 "protocol filtering not compatible" 1793 " with TCP layer"); 1794 if (!(item_flags & l3m)) 1795 return rte_flow_error_set(error, EINVAL, 1796 RTE_FLOW_ERROR_TYPE_ITEM, item, 1797 "L3 is mandatory to filter on L4"); 1798 if (item_flags & l4m) 1799 return rte_flow_error_set(error, EINVAL, 1800 RTE_FLOW_ERROR_TYPE_ITEM, item, 1801 "multiple L4 layers not supported"); 1802 if (!mask) 1803 mask = &rte_flow_item_tcp_mask; 1804 ret = mlx5_flow_item_acceptable 1805 (item, (const uint8_t *)mask, 1806 (const uint8_t *)flow_mask, 1807 sizeof(struct rte_flow_item_tcp), error); 1808 if (ret < 0) 1809 return ret; 1810 return 0; 1811 } 1812 1813 /** 1814 * Validate VXLAN item. 1815 * 1816 * @param[in] item 1817 * Item specification. 1818 * @param[in] item_flags 1819 * Bit-fields that holds the items detected until now. 1820 * @param[in] target_protocol 1821 * The next protocol in the previous item. 1822 * @param[out] error 1823 * Pointer to error structure. 1824 * 1825 * @return 1826 * 0 on success, a negative errno value otherwise and rte_errno is set. 1827 */ 1828 int 1829 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, 1830 uint64_t item_flags, 1831 struct rte_flow_error *error) 1832 { 1833 const struct rte_flow_item_vxlan *spec = item->spec; 1834 const struct rte_flow_item_vxlan *mask = item->mask; 1835 int ret; 1836 union vni { 1837 uint32_t vlan_id; 1838 uint8_t vni[4]; 1839 } id = { .vlan_id = 0, }; 1840 1841 1842 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1843 return rte_flow_error_set(error, ENOTSUP, 1844 RTE_FLOW_ERROR_TYPE_ITEM, item, 1845 "multiple tunnel layers not" 1846 " supported"); 1847 /* 1848 * Verify only UDPv4 is present as defined in 1849 * https://tools.ietf.org/html/rfc7348 1850 */ 1851 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1852 return rte_flow_error_set(error, EINVAL, 1853 RTE_FLOW_ERROR_TYPE_ITEM, item, 1854 "no outer UDP layer found"); 1855 if (!mask) 1856 mask = &rte_flow_item_vxlan_mask; 1857 ret = mlx5_flow_item_acceptable 1858 (item, (const uint8_t *)mask, 1859 (const uint8_t *)&rte_flow_item_vxlan_mask, 1860 sizeof(struct rte_flow_item_vxlan), 1861 error); 1862 if (ret < 0) 1863 return ret; 1864 if (spec) { 1865 memcpy(&id.vni[1], spec->vni, 3); 1866 memcpy(&id.vni[1], mask->vni, 3); 1867 } 1868 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1869 return rte_flow_error_set(error, ENOTSUP, 1870 RTE_FLOW_ERROR_TYPE_ITEM, item, 1871 "VXLAN tunnel must be fully defined"); 1872 return 0; 1873 } 1874 1875 /** 1876 * Validate VXLAN_GPE item. 1877 * 1878 * @param[in] item 1879 * Item specification. 1880 * @param[in] item_flags 1881 * Bit-fields that holds the items detected until now. 1882 * @param[in] priv 1883 * Pointer to the private data structure. 1884 * @param[in] target_protocol 1885 * The next protocol in the previous item. 1886 * @param[out] error 1887 * Pointer to error structure. 1888 * 1889 * @return 1890 * 0 on success, a negative errno value otherwise and rte_errno is set. 1891 */ 1892 int 1893 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 1894 uint64_t item_flags, 1895 struct rte_eth_dev *dev, 1896 struct rte_flow_error *error) 1897 { 1898 struct mlx5_priv *priv = dev->data->dev_private; 1899 const struct rte_flow_item_vxlan_gpe *spec = item->spec; 1900 const struct rte_flow_item_vxlan_gpe *mask = item->mask; 1901 int ret; 1902 union vni { 1903 uint32_t vlan_id; 1904 uint8_t vni[4]; 1905 } id = { .vlan_id = 0, }; 1906 1907 if (!priv->config.l3_vxlan_en) 1908 return rte_flow_error_set(error, ENOTSUP, 1909 RTE_FLOW_ERROR_TYPE_ITEM, item, 1910 "L3 VXLAN is not enabled by device" 1911 " parameter and/or not configured in" 1912 " firmware"); 1913 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1914 return rte_flow_error_set(error, ENOTSUP, 1915 RTE_FLOW_ERROR_TYPE_ITEM, item, 1916 "multiple tunnel layers not" 1917 " supported"); 1918 /* 1919 * Verify only UDPv4 is present as defined in 1920 * https://tools.ietf.org/html/rfc7348 1921 */ 1922 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1923 return rte_flow_error_set(error, EINVAL, 1924 RTE_FLOW_ERROR_TYPE_ITEM, item, 1925 "no outer UDP layer found"); 1926 if (!mask) 1927 mask = &rte_flow_item_vxlan_gpe_mask; 1928 ret = mlx5_flow_item_acceptable 1929 (item, (const uint8_t *)mask, 1930 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, 1931 sizeof(struct rte_flow_item_vxlan_gpe), 1932 error); 1933 if (ret < 0) 1934 return ret; 1935 if (spec) { 1936 if (spec->protocol) 1937 return rte_flow_error_set(error, ENOTSUP, 1938 RTE_FLOW_ERROR_TYPE_ITEM, 1939 item, 1940 "VxLAN-GPE protocol" 1941 " not supported"); 1942 memcpy(&id.vni[1], spec->vni, 3); 1943 memcpy(&id.vni[1], mask->vni, 3); 1944 } 1945 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1946 return rte_flow_error_set(error, ENOTSUP, 1947 RTE_FLOW_ERROR_TYPE_ITEM, item, 1948 "VXLAN-GPE tunnel must be fully" 1949 " defined"); 1950 return 0; 1951 } 1952 /** 1953 * Validate GRE Key item. 1954 * 1955 * @param[in] item 1956 * Item specification. 1957 * @param[in] item_flags 1958 * Bit flags to mark detected items. 1959 * @param[in] gre_item 1960 * Pointer to gre_item 1961 * @param[out] error 1962 * Pointer to error structure. 1963 * 1964 * @return 1965 * 0 on success, a negative errno value otherwise and rte_errno is set. 1966 */ 1967 int 1968 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, 1969 uint64_t item_flags, 1970 const struct rte_flow_item *gre_item, 1971 struct rte_flow_error *error) 1972 { 1973 const rte_be32_t *mask = item->mask; 1974 int ret = 0; 1975 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); 1976 const struct rte_flow_item_gre *gre_spec; 1977 const struct rte_flow_item_gre *gre_mask; 1978 1979 if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) 1980 return rte_flow_error_set(error, ENOTSUP, 1981 RTE_FLOW_ERROR_TYPE_ITEM, item, 1982 "Multiple GRE key not support"); 1983 if (!(item_flags & MLX5_FLOW_LAYER_GRE)) 1984 return rte_flow_error_set(error, ENOTSUP, 1985 RTE_FLOW_ERROR_TYPE_ITEM, item, 1986 "No preceding GRE header"); 1987 if (item_flags & MLX5_FLOW_LAYER_INNER) 1988 return rte_flow_error_set(error, ENOTSUP, 1989 RTE_FLOW_ERROR_TYPE_ITEM, item, 1990 "GRE key following a wrong item"); 1991 gre_mask = gre_item->mask; 1992 if (!gre_mask) 1993 gre_mask = &rte_flow_item_gre_mask; 1994 gre_spec = gre_item->spec; 1995 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && 1996 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) 1997 return rte_flow_error_set(error, EINVAL, 1998 RTE_FLOW_ERROR_TYPE_ITEM, item, 1999 "Key bit must be on"); 2000 2001 if (!mask) 2002 mask = &gre_key_default_mask; 2003 ret = mlx5_flow_item_acceptable 2004 (item, (const uint8_t *)mask, 2005 (const uint8_t *)&gre_key_default_mask, 2006 sizeof(rte_be32_t), error); 2007 return ret; 2008 } 2009 2010 /** 2011 * Validate GRE item. 2012 * 2013 * @param[in] item 2014 * Item specification. 2015 * @param[in] item_flags 2016 * Bit flags to mark detected items. 2017 * @param[in] target_protocol 2018 * The next protocol in the previous item. 2019 * @param[out] error 2020 * Pointer to error structure. 2021 * 2022 * @return 2023 * 0 on success, a negative errno value otherwise and rte_errno is set. 2024 */ 2025 int 2026 mlx5_flow_validate_item_gre(const struct rte_flow_item *item, 2027 uint64_t item_flags, 2028 uint8_t target_protocol, 2029 struct rte_flow_error *error) 2030 { 2031 const struct rte_flow_item_gre *spec __rte_unused = item->spec; 2032 const struct rte_flow_item_gre *mask = item->mask; 2033 int ret; 2034 const struct rte_flow_item_gre nic_mask = { 2035 .c_rsvd0_ver = RTE_BE16(0xB000), 2036 .protocol = RTE_BE16(UINT16_MAX), 2037 }; 2038 2039 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 2040 return rte_flow_error_set(error, EINVAL, 2041 RTE_FLOW_ERROR_TYPE_ITEM, item, 2042 "protocol filtering not compatible" 2043 " with this GRE layer"); 2044 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2045 return rte_flow_error_set(error, ENOTSUP, 2046 RTE_FLOW_ERROR_TYPE_ITEM, item, 2047 "multiple tunnel layers not" 2048 " supported"); 2049 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 2050 return rte_flow_error_set(error, ENOTSUP, 2051 RTE_FLOW_ERROR_TYPE_ITEM, item, 2052 "L3 Layer is missing"); 2053 if (!mask) 2054 mask = &rte_flow_item_gre_mask; 2055 ret = mlx5_flow_item_acceptable 2056 (item, (const uint8_t *)mask, 2057 (const uint8_t *)&nic_mask, 2058 sizeof(struct rte_flow_item_gre), error); 2059 if (ret < 0) 2060 return ret; 2061 #ifndef HAVE_MLX5DV_DR 2062 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT 2063 if (spec && (spec->protocol & mask->protocol)) 2064 return rte_flow_error_set(error, ENOTSUP, 2065 RTE_FLOW_ERROR_TYPE_ITEM, item, 2066 "without MPLS support the" 2067 " specification cannot be used for" 2068 " filtering"); 2069 #endif 2070 #endif 2071 return 0; 2072 } 2073 2074 /** 2075 * Validate Geneve item. 2076 * 2077 * @param[in] item 2078 * Item specification. 2079 * @param[in] itemFlags 2080 * Bit-fields that holds the items detected until now. 2081 * @param[in] enPriv 2082 * Pointer to the private data structure. 2083 * @param[out] error 2084 * Pointer to error structure. 2085 * 2086 * @return 2087 * 0 on success, a negative errno value otherwise and rte_errno is set. 2088 */ 2089 2090 int 2091 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, 2092 uint64_t item_flags, 2093 struct rte_eth_dev *dev, 2094 struct rte_flow_error *error) 2095 { 2096 struct mlx5_priv *priv = dev->data->dev_private; 2097 const struct rte_flow_item_geneve *spec = item->spec; 2098 const struct rte_flow_item_geneve *mask = item->mask; 2099 int ret; 2100 uint16_t gbhdr; 2101 uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ? 2102 MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; 2103 const struct rte_flow_item_geneve nic_mask = { 2104 .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), 2105 .vni = "\xff\xff\xff", 2106 .protocol = RTE_BE16(UINT16_MAX), 2107 }; 2108 2109 if (!priv->config.hca_attr.tunnel_stateless_geneve_rx) 2110 return rte_flow_error_set(error, ENOTSUP, 2111 RTE_FLOW_ERROR_TYPE_ITEM, item, 2112 "L3 Geneve is not enabled by device" 2113 " parameter and/or not configured in" 2114 " firmware"); 2115 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2116 return rte_flow_error_set(error, ENOTSUP, 2117 RTE_FLOW_ERROR_TYPE_ITEM, item, 2118 "multiple tunnel layers not" 2119 " supported"); 2120 /* 2121 * Verify only UDPv4 is present as defined in 2122 * https://tools.ietf.org/html/rfc7348 2123 */ 2124 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 2125 return rte_flow_error_set(error, EINVAL, 2126 RTE_FLOW_ERROR_TYPE_ITEM, item, 2127 "no outer UDP layer found"); 2128 if (!mask) 2129 mask = &rte_flow_item_geneve_mask; 2130 ret = mlx5_flow_item_acceptable 2131 (item, (const uint8_t *)mask, 2132 (const uint8_t *)&nic_mask, 2133 sizeof(struct rte_flow_item_geneve), error); 2134 if (ret) 2135 return ret; 2136 if (spec) { 2137 gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0); 2138 if (MLX5_GENEVE_VER_VAL(gbhdr) || 2139 MLX5_GENEVE_CRITO_VAL(gbhdr) || 2140 MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1) 2141 return rte_flow_error_set(error, ENOTSUP, 2142 RTE_FLOW_ERROR_TYPE_ITEM, 2143 item, 2144 "Geneve protocol unsupported" 2145 " fields are being used"); 2146 if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len) 2147 return rte_flow_error_set 2148 (error, ENOTSUP, 2149 RTE_FLOW_ERROR_TYPE_ITEM, 2150 item, 2151 "Unsupported Geneve options length"); 2152 } 2153 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 2154 return rte_flow_error_set 2155 (error, ENOTSUP, 2156 RTE_FLOW_ERROR_TYPE_ITEM, item, 2157 "Geneve tunnel must be fully defined"); 2158 return 0; 2159 } 2160 2161 /** 2162 * Validate MPLS item. 2163 * 2164 * @param[in] dev 2165 * Pointer to the rte_eth_dev structure. 2166 * @param[in] item 2167 * Item specification. 2168 * @param[in] item_flags 2169 * Bit-fields that holds the items detected until now. 2170 * @param[in] prev_layer 2171 * The protocol layer indicated in previous item. 2172 * @param[out] error 2173 * Pointer to error structure. 2174 * 2175 * @return 2176 * 0 on success, a negative errno value otherwise and rte_errno is set. 2177 */ 2178 int 2179 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, 2180 const struct rte_flow_item *item __rte_unused, 2181 uint64_t item_flags __rte_unused, 2182 uint64_t prev_layer __rte_unused, 2183 struct rte_flow_error *error) 2184 { 2185 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 2186 const struct rte_flow_item_mpls *mask = item->mask; 2187 struct mlx5_priv *priv = dev->data->dev_private; 2188 int ret; 2189 2190 if (!priv->config.mpls_en) 2191 return rte_flow_error_set(error, ENOTSUP, 2192 RTE_FLOW_ERROR_TYPE_ITEM, item, 2193 "MPLS not supported or" 2194 " disabled in firmware" 2195 " configuration."); 2196 /* MPLS over IP, UDP, GRE is allowed */ 2197 if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | 2198 MLX5_FLOW_LAYER_OUTER_L4_UDP | 2199 MLX5_FLOW_LAYER_GRE))) 2200 return rte_flow_error_set(error, EINVAL, 2201 RTE_FLOW_ERROR_TYPE_ITEM, item, 2202 "protocol filtering not compatible" 2203 " with MPLS layer"); 2204 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ 2205 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && 2206 !(item_flags & MLX5_FLOW_LAYER_GRE)) 2207 return rte_flow_error_set(error, ENOTSUP, 2208 RTE_FLOW_ERROR_TYPE_ITEM, item, 2209 "multiple tunnel layers not" 2210 " supported"); 2211 if (!mask) 2212 mask = &rte_flow_item_mpls_mask; 2213 ret = mlx5_flow_item_acceptable 2214 (item, (const uint8_t *)mask, 2215 (const uint8_t *)&rte_flow_item_mpls_mask, 2216 sizeof(struct rte_flow_item_mpls), error); 2217 if (ret < 0) 2218 return ret; 2219 return 0; 2220 #endif 2221 return rte_flow_error_set(error, ENOTSUP, 2222 RTE_FLOW_ERROR_TYPE_ITEM, item, 2223 "MPLS is not supported by Verbs, please" 2224 " update."); 2225 } 2226 2227 /** 2228 * Validate NVGRE item. 2229 * 2230 * @param[in] item 2231 * Item specification. 2232 * @param[in] item_flags 2233 * Bit flags to mark detected items. 2234 * @param[in] target_protocol 2235 * The next protocol in the previous item. 2236 * @param[out] error 2237 * Pointer to error structure. 2238 * 2239 * @return 2240 * 0 on success, a negative errno value otherwise and rte_errno is set. 2241 */ 2242 int 2243 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, 2244 uint64_t item_flags, 2245 uint8_t target_protocol, 2246 struct rte_flow_error *error) 2247 { 2248 const struct rte_flow_item_nvgre *mask = item->mask; 2249 int ret; 2250 2251 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 2252 return rte_flow_error_set(error, EINVAL, 2253 RTE_FLOW_ERROR_TYPE_ITEM, item, 2254 "protocol filtering not compatible" 2255 " with this GRE layer"); 2256 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2257 return rte_flow_error_set(error, ENOTSUP, 2258 RTE_FLOW_ERROR_TYPE_ITEM, item, 2259 "multiple tunnel layers not" 2260 " supported"); 2261 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 2262 return rte_flow_error_set(error, ENOTSUP, 2263 RTE_FLOW_ERROR_TYPE_ITEM, item, 2264 "L3 Layer is missing"); 2265 if (!mask) 2266 mask = &rte_flow_item_nvgre_mask; 2267 ret = mlx5_flow_item_acceptable 2268 (item, (const uint8_t *)mask, 2269 (const uint8_t *)&rte_flow_item_nvgre_mask, 2270 sizeof(struct rte_flow_item_nvgre), error); 2271 if (ret < 0) 2272 return ret; 2273 return 0; 2274 } 2275 2276 /* Allocate unique ID for the split Q/RSS subflows. */ 2277 static uint32_t 2278 flow_qrss_get_id(struct rte_eth_dev *dev) 2279 { 2280 struct mlx5_priv *priv = dev->data->dev_private; 2281 uint32_t qrss_id, ret; 2282 2283 ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id); 2284 if (ret) 2285 return 0; 2286 MLX5_ASSERT(qrss_id); 2287 return qrss_id; 2288 } 2289 2290 /* Free unique ID for the split Q/RSS subflows. */ 2291 static void 2292 flow_qrss_free_id(struct rte_eth_dev *dev, uint32_t qrss_id) 2293 { 2294 struct mlx5_priv *priv = dev->data->dev_private; 2295 2296 if (qrss_id) 2297 mlx5_flow_id_release(priv->qrss_id_pool, qrss_id); 2298 } 2299 2300 /** 2301 * Release resource related QUEUE/RSS action split. 2302 * 2303 * @param dev 2304 * Pointer to Ethernet device. 2305 * @param flow 2306 * Flow to release id's from. 2307 */ 2308 static void 2309 flow_mreg_split_qrss_release(struct rte_eth_dev *dev, 2310 struct rte_flow *flow) 2311 { 2312 struct mlx5_flow *dev_flow; 2313 2314 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 2315 if (dev_flow->handle.qrss_id) 2316 flow_qrss_free_id(dev, dev_flow->handle.qrss_id); 2317 } 2318 2319 static int 2320 flow_null_validate(struct rte_eth_dev *dev __rte_unused, 2321 const struct rte_flow_attr *attr __rte_unused, 2322 const struct rte_flow_item items[] __rte_unused, 2323 const struct rte_flow_action actions[] __rte_unused, 2324 bool external __rte_unused, 2325 struct rte_flow_error *error) 2326 { 2327 return rte_flow_error_set(error, ENOTSUP, 2328 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2329 } 2330 2331 static struct mlx5_flow * 2332 flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, 2333 const struct rte_flow_item items[] __rte_unused, 2334 const struct rte_flow_action actions[] __rte_unused, 2335 struct rte_flow_error *error) 2336 { 2337 rte_flow_error_set(error, ENOTSUP, 2338 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2339 return NULL; 2340 } 2341 2342 static int 2343 flow_null_translate(struct rte_eth_dev *dev __rte_unused, 2344 struct mlx5_flow *dev_flow __rte_unused, 2345 const struct rte_flow_attr *attr __rte_unused, 2346 const struct rte_flow_item items[] __rte_unused, 2347 const struct rte_flow_action actions[] __rte_unused, 2348 struct rte_flow_error *error) 2349 { 2350 return rte_flow_error_set(error, ENOTSUP, 2351 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2352 } 2353 2354 static int 2355 flow_null_apply(struct rte_eth_dev *dev __rte_unused, 2356 struct rte_flow *flow __rte_unused, 2357 struct rte_flow_error *error) 2358 { 2359 return rte_flow_error_set(error, ENOTSUP, 2360 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2361 } 2362 2363 static void 2364 flow_null_remove(struct rte_eth_dev *dev __rte_unused, 2365 struct rte_flow *flow __rte_unused) 2366 { 2367 } 2368 2369 static void 2370 flow_null_destroy(struct rte_eth_dev *dev __rte_unused, 2371 struct rte_flow *flow __rte_unused) 2372 { 2373 } 2374 2375 static int 2376 flow_null_query(struct rte_eth_dev *dev __rte_unused, 2377 struct rte_flow *flow __rte_unused, 2378 const struct rte_flow_action *actions __rte_unused, 2379 void *data __rte_unused, 2380 struct rte_flow_error *error) 2381 { 2382 return rte_flow_error_set(error, ENOTSUP, 2383 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2384 } 2385 2386 /* Void driver to protect from null pointer reference. */ 2387 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { 2388 .validate = flow_null_validate, 2389 .prepare = flow_null_prepare, 2390 .translate = flow_null_translate, 2391 .apply = flow_null_apply, 2392 .remove = flow_null_remove, 2393 .destroy = flow_null_destroy, 2394 .query = flow_null_query, 2395 }; 2396 2397 /** 2398 * Select flow driver type according to flow attributes and device 2399 * configuration. 2400 * 2401 * @param[in] dev 2402 * Pointer to the dev structure. 2403 * @param[in] attr 2404 * Pointer to the flow attributes. 2405 * 2406 * @return 2407 * flow driver type, MLX5_FLOW_TYPE_MAX otherwise. 2408 */ 2409 static enum mlx5_flow_drv_type 2410 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) 2411 { 2412 struct mlx5_priv *priv = dev->data->dev_private; 2413 enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; 2414 2415 if (attr->transfer && priv->config.dv_esw_en) 2416 type = MLX5_FLOW_TYPE_DV; 2417 if (!attr->transfer) 2418 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : 2419 MLX5_FLOW_TYPE_VERBS; 2420 return type; 2421 } 2422 2423 #define flow_get_drv_ops(type) flow_drv_ops[type] 2424 2425 /** 2426 * Flow driver validation API. This abstracts calling driver specific functions. 2427 * The type of flow driver is determined according to flow attributes. 2428 * 2429 * @param[in] dev 2430 * Pointer to the dev structure. 2431 * @param[in] attr 2432 * Pointer to the flow attributes. 2433 * @param[in] items 2434 * Pointer to the list of items. 2435 * @param[in] actions 2436 * Pointer to the list of actions. 2437 * @param[in] external 2438 * This flow rule is created by request external to PMD. 2439 * @param[out] error 2440 * Pointer to the error structure. 2441 * 2442 * @return 2443 * 0 on success, a negative errno value otherwise and rte_errno is set. 2444 */ 2445 static inline int 2446 flow_drv_validate(struct rte_eth_dev *dev, 2447 const struct rte_flow_attr *attr, 2448 const struct rte_flow_item items[], 2449 const struct rte_flow_action actions[], 2450 bool external, struct rte_flow_error *error) 2451 { 2452 const struct mlx5_flow_driver_ops *fops; 2453 enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); 2454 2455 fops = flow_get_drv_ops(type); 2456 return fops->validate(dev, attr, items, actions, external, error); 2457 } 2458 2459 /** 2460 * Flow driver preparation API. This abstracts calling driver specific 2461 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2462 * calculates the size of memory required for device flow, allocates the memory, 2463 * initializes the device flow and returns the pointer. 2464 * 2465 * @note 2466 * This function initializes device flow structure such as dv or verbs in 2467 * struct mlx5_flow. However, it is caller's responsibility to initialize the 2468 * rest. For example, adding returning device flow to flow->dev_flow list and 2469 * setting backward reference to the flow should be done out of this function. 2470 * layers field is not filled either. 2471 * 2472 * @param[in] attr 2473 * Pointer to the flow attributes. 2474 * @param[in] items 2475 * Pointer to the list of items. 2476 * @param[in] actions 2477 * Pointer to the list of actions. 2478 * @param[out] error 2479 * Pointer to the error structure. 2480 * 2481 * @return 2482 * Pointer to device flow on success, otherwise NULL and rte_errno is set. 2483 */ 2484 static inline struct mlx5_flow * 2485 flow_drv_prepare(const struct rte_flow *flow, 2486 const struct rte_flow_attr *attr, 2487 const struct rte_flow_item items[], 2488 const struct rte_flow_action actions[], 2489 struct rte_flow_error *error) 2490 { 2491 const struct mlx5_flow_driver_ops *fops; 2492 enum mlx5_flow_drv_type type = flow->drv_type; 2493 2494 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2495 fops = flow_get_drv_ops(type); 2496 return fops->prepare(attr, items, actions, error); 2497 } 2498 2499 /** 2500 * Flow driver translation API. This abstracts calling driver specific 2501 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2502 * translates a generic flow into a driver flow. flow_drv_prepare() must 2503 * precede. 2504 * 2505 * @note 2506 * dev_flow->layers could be filled as a result of parsing during translation 2507 * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled 2508 * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, 2509 * flow->actions could be overwritten even though all the expanded dev_flows 2510 * have the same actions. 2511 * 2512 * @param[in] dev 2513 * Pointer to the rte dev structure. 2514 * @param[in, out] dev_flow 2515 * Pointer to the mlx5 flow. 2516 * @param[in] attr 2517 * Pointer to the flow attributes. 2518 * @param[in] items 2519 * Pointer to the list of items. 2520 * @param[in] actions 2521 * Pointer to the list of actions. 2522 * @param[out] error 2523 * Pointer to the error structure. 2524 * 2525 * @return 2526 * 0 on success, a negative errno value otherwise and rte_errno is set. 2527 */ 2528 static inline int 2529 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, 2530 const struct rte_flow_attr *attr, 2531 const struct rte_flow_item items[], 2532 const struct rte_flow_action actions[], 2533 struct rte_flow_error *error) 2534 { 2535 const struct mlx5_flow_driver_ops *fops; 2536 enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; 2537 2538 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2539 fops = flow_get_drv_ops(type); 2540 return fops->translate(dev, dev_flow, attr, items, actions, error); 2541 } 2542 2543 /** 2544 * Flow driver apply API. This abstracts calling driver specific functions. 2545 * Parent flow (rte_flow) should have driver type (drv_type). It applies 2546 * translated driver flows on to device. flow_drv_translate() must precede. 2547 * 2548 * @param[in] dev 2549 * Pointer to Ethernet device structure. 2550 * @param[in, out] flow 2551 * Pointer to flow structure. 2552 * @param[out] error 2553 * Pointer to error structure. 2554 * 2555 * @return 2556 * 0 on success, a negative errno value otherwise and rte_errno is set. 2557 */ 2558 static inline int 2559 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 2560 struct rte_flow_error *error) 2561 { 2562 const struct mlx5_flow_driver_ops *fops; 2563 enum mlx5_flow_drv_type type = flow->drv_type; 2564 2565 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2566 fops = flow_get_drv_ops(type); 2567 return fops->apply(dev, flow, error); 2568 } 2569 2570 /** 2571 * Flow driver remove API. This abstracts calling driver specific functions. 2572 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2573 * on device. All the resources of the flow should be freed by calling 2574 * flow_drv_destroy(). 2575 * 2576 * @param[in] dev 2577 * Pointer to Ethernet device. 2578 * @param[in, out] flow 2579 * Pointer to flow structure. 2580 */ 2581 static inline void 2582 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 2583 { 2584 const struct mlx5_flow_driver_ops *fops; 2585 enum mlx5_flow_drv_type type = flow->drv_type; 2586 2587 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2588 fops = flow_get_drv_ops(type); 2589 fops->remove(dev, flow); 2590 } 2591 2592 /** 2593 * Flow driver destroy API. This abstracts calling driver specific functions. 2594 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2595 * on device and releases resources of the flow. 2596 * 2597 * @param[in] dev 2598 * Pointer to Ethernet device. 2599 * @param[in, out] flow 2600 * Pointer to flow structure. 2601 */ 2602 static inline void 2603 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 2604 { 2605 const struct mlx5_flow_driver_ops *fops; 2606 enum mlx5_flow_drv_type type = flow->drv_type; 2607 2608 flow_mreg_split_qrss_release(dev, flow); 2609 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2610 fops = flow_get_drv_ops(type); 2611 fops->destroy(dev, flow); 2612 } 2613 2614 /** 2615 * Validate a flow supported by the NIC. 2616 * 2617 * @see rte_flow_validate() 2618 * @see rte_flow_ops 2619 */ 2620 int 2621 mlx5_flow_validate(struct rte_eth_dev *dev, 2622 const struct rte_flow_attr *attr, 2623 const struct rte_flow_item items[], 2624 const struct rte_flow_action actions[], 2625 struct rte_flow_error *error) 2626 { 2627 int ret; 2628 2629 ret = flow_drv_validate(dev, attr, items, actions, true, error); 2630 if (ret < 0) 2631 return ret; 2632 return 0; 2633 } 2634 2635 /** 2636 * Get RSS action from the action list. 2637 * 2638 * @param[in] actions 2639 * Pointer to the list of actions. 2640 * 2641 * @return 2642 * Pointer to the RSS action if exist, else return NULL. 2643 */ 2644 static const struct rte_flow_action_rss* 2645 flow_get_rss_action(const struct rte_flow_action actions[]) 2646 { 2647 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2648 switch (actions->type) { 2649 case RTE_FLOW_ACTION_TYPE_RSS: 2650 return (const struct rte_flow_action_rss *) 2651 actions->conf; 2652 default: 2653 break; 2654 } 2655 } 2656 return NULL; 2657 } 2658 2659 static unsigned int 2660 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) 2661 { 2662 const struct rte_flow_item *item; 2663 unsigned int has_vlan = 0; 2664 2665 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 2666 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2667 has_vlan = 1; 2668 break; 2669 } 2670 } 2671 if (has_vlan) 2672 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : 2673 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; 2674 return rss_level < 2 ? MLX5_EXPANSION_ROOT : 2675 MLX5_EXPANSION_ROOT_OUTER; 2676 } 2677 2678 /** 2679 * Get layer flags from the prefix flow. 2680 * 2681 * Some flows may be split to several subflows, the prefix subflow gets the 2682 * match items and the suffix sub flow gets the actions. 2683 * Some actions need the user defined match item flags to get the detail for 2684 * the action. 2685 * This function helps the suffix flow to get the item layer flags from prefix 2686 * subflow. 2687 * 2688 * @param[in] dev_flow 2689 * Pointer the created preifx subflow. 2690 * 2691 * @return 2692 * The layers get from prefix subflow. 2693 */ 2694 static inline uint64_t 2695 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow) 2696 { 2697 uint64_t layers = 0; 2698 2699 /* 2700 * Layers bits could be localization, but usually the compiler will 2701 * help to do the optimization work for source code. 2702 * If no decap actions, use the layers directly. 2703 */ 2704 if (!(dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DECAP)) 2705 return dev_flow->handle.layers; 2706 /* Convert L3 layers with decap action. */ 2707 if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L3_IPV4) 2708 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4; 2709 else if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L3_IPV6) 2710 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6; 2711 /* Convert L4 layers with decap action. */ 2712 if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L4_TCP) 2713 layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP; 2714 else if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L4_UDP) 2715 layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP; 2716 return layers; 2717 } 2718 2719 /** 2720 * Get metadata split action information. 2721 * 2722 * @param[in] actions 2723 * Pointer to the list of actions. 2724 * @param[out] qrss 2725 * Pointer to the return pointer. 2726 * @param[out] qrss_type 2727 * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned 2728 * if no QUEUE/RSS is found. 2729 * @param[out] encap_idx 2730 * Pointer to the index of the encap action if exists, otherwise the last 2731 * action index. 2732 * 2733 * @return 2734 * Total number of actions. 2735 */ 2736 static int 2737 flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[], 2738 const struct rte_flow_action **qrss, 2739 int *encap_idx) 2740 { 2741 const struct rte_flow_action_raw_encap *raw_encap; 2742 int actions_n = 0; 2743 int raw_decap_idx = -1; 2744 2745 *encap_idx = -1; 2746 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2747 switch (actions->type) { 2748 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 2749 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 2750 *encap_idx = actions_n; 2751 break; 2752 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 2753 raw_decap_idx = actions_n; 2754 break; 2755 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 2756 raw_encap = actions->conf; 2757 if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 2758 *encap_idx = raw_decap_idx != -1 ? 2759 raw_decap_idx : actions_n; 2760 break; 2761 case RTE_FLOW_ACTION_TYPE_QUEUE: 2762 case RTE_FLOW_ACTION_TYPE_RSS: 2763 *qrss = actions; 2764 break; 2765 default: 2766 break; 2767 } 2768 actions_n++; 2769 } 2770 if (*encap_idx == -1) 2771 *encap_idx = actions_n; 2772 /* Count RTE_FLOW_ACTION_TYPE_END. */ 2773 return actions_n + 1; 2774 } 2775 2776 /** 2777 * Check meter action from the action list. 2778 * 2779 * @param[in] actions 2780 * Pointer to the list of actions. 2781 * @param[out] mtr 2782 * Pointer to the meter exist flag. 2783 * 2784 * @return 2785 * Total number of actions. 2786 */ 2787 static int 2788 flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr) 2789 { 2790 int actions_n = 0; 2791 2792 MLX5_ASSERT(mtr); 2793 *mtr = 0; 2794 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2795 switch (actions->type) { 2796 case RTE_FLOW_ACTION_TYPE_METER: 2797 *mtr = 1; 2798 break; 2799 default: 2800 break; 2801 } 2802 actions_n++; 2803 } 2804 /* Count RTE_FLOW_ACTION_TYPE_END. */ 2805 return actions_n + 1; 2806 } 2807 2808 /** 2809 * Check if the flow should be splited due to hairpin. 2810 * The reason for the split is that in current HW we can't 2811 * support encap on Rx, so if a flow have encap we move it 2812 * to Tx. 2813 * 2814 * @param dev 2815 * Pointer to Ethernet device. 2816 * @param[in] attr 2817 * Flow rule attributes. 2818 * @param[in] actions 2819 * Associated actions (list terminated by the END action). 2820 * 2821 * @return 2822 * > 0 the number of actions and the flow should be split, 2823 * 0 when no split required. 2824 */ 2825 static int 2826 flow_check_hairpin_split(struct rte_eth_dev *dev, 2827 const struct rte_flow_attr *attr, 2828 const struct rte_flow_action actions[]) 2829 { 2830 int queue_action = 0; 2831 int action_n = 0; 2832 int encap = 0; 2833 const struct rte_flow_action_queue *queue; 2834 const struct rte_flow_action_rss *rss; 2835 const struct rte_flow_action_raw_encap *raw_encap; 2836 2837 if (!attr->ingress) 2838 return 0; 2839 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2840 switch (actions->type) { 2841 case RTE_FLOW_ACTION_TYPE_QUEUE: 2842 queue = actions->conf; 2843 if (queue == NULL) 2844 return 0; 2845 if (mlx5_rxq_get_type(dev, queue->index) != 2846 MLX5_RXQ_TYPE_HAIRPIN) 2847 return 0; 2848 queue_action = 1; 2849 action_n++; 2850 break; 2851 case RTE_FLOW_ACTION_TYPE_RSS: 2852 rss = actions->conf; 2853 if (rss == NULL || rss->queue_num == 0) 2854 return 0; 2855 if (mlx5_rxq_get_type(dev, rss->queue[0]) != 2856 MLX5_RXQ_TYPE_HAIRPIN) 2857 return 0; 2858 queue_action = 1; 2859 action_n++; 2860 break; 2861 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 2862 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 2863 encap = 1; 2864 action_n++; 2865 break; 2866 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 2867 raw_encap = actions->conf; 2868 if (raw_encap->size > 2869 (sizeof(struct rte_flow_item_eth) + 2870 sizeof(struct rte_flow_item_ipv4))) 2871 encap = 1; 2872 action_n++; 2873 break; 2874 default: 2875 action_n++; 2876 break; 2877 } 2878 } 2879 if (encap == 1 && queue_action) 2880 return action_n; 2881 return 0; 2882 } 2883 2884 /* Declare flow create/destroy prototype in advance. */ 2885 static struct rte_flow * 2886 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, 2887 const struct rte_flow_attr *attr, 2888 const struct rte_flow_item items[], 2889 const struct rte_flow_action actions[], 2890 bool external, struct rte_flow_error *error); 2891 2892 static void 2893 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, 2894 struct rte_flow *flow); 2895 2896 /** 2897 * Add a flow of copying flow metadata registers in RX_CP_TBL. 2898 * 2899 * As mark_id is unique, if there's already a registered flow for the mark_id, 2900 * return by increasing the reference counter of the resource. Otherwise, create 2901 * the resource (mcp_res) and flow. 2902 * 2903 * Flow looks like, 2904 * - If ingress port is ANY and reg_c[1] is mark_id, 2905 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 2906 * 2907 * For default flow (zero mark_id), flow is like, 2908 * - If ingress port is ANY, 2909 * reg_b := reg_c[0] and jump to RX_ACT_TBL. 2910 * 2911 * @param dev 2912 * Pointer to Ethernet device. 2913 * @param mark_id 2914 * ID of MARK action, zero means default flow for META. 2915 * @param[out] error 2916 * Perform verbose error reporting if not NULL. 2917 * 2918 * @return 2919 * Associated resource on success, NULL otherwise and rte_errno is set. 2920 */ 2921 static struct mlx5_flow_mreg_copy_resource * 2922 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, 2923 struct rte_flow_error *error) 2924 { 2925 struct mlx5_priv *priv = dev->data->dev_private; 2926 struct rte_flow_attr attr = { 2927 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 2928 .ingress = 1, 2929 }; 2930 struct mlx5_rte_flow_item_tag tag_spec = { 2931 .data = mark_id, 2932 }; 2933 struct rte_flow_item items[] = { 2934 [1] = { .type = RTE_FLOW_ITEM_TYPE_END, }, 2935 }; 2936 struct rte_flow_action_mark ftag = { 2937 .id = mark_id, 2938 }; 2939 struct mlx5_flow_action_copy_mreg cp_mreg = { 2940 .dst = REG_B, 2941 .src = 0, 2942 }; 2943 struct rte_flow_action_jump jump = { 2944 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 2945 }; 2946 struct rte_flow_action actions[] = { 2947 [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, 2948 }; 2949 struct mlx5_flow_mreg_copy_resource *mcp_res; 2950 int ret; 2951 2952 /* Fill the register fileds in the flow. */ 2953 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 2954 if (ret < 0) 2955 return NULL; 2956 tag_spec.id = ret; 2957 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 2958 if (ret < 0) 2959 return NULL; 2960 cp_mreg.src = ret; 2961 /* Check if already registered. */ 2962 MLX5_ASSERT(priv->mreg_cp_tbl); 2963 mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id); 2964 if (mcp_res) { 2965 /* For non-default rule. */ 2966 if (mark_id != MLX5_DEFAULT_COPY_ID) 2967 mcp_res->refcnt++; 2968 MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID || 2969 mcp_res->refcnt == 1); 2970 return mcp_res; 2971 } 2972 /* Provide the full width of FLAG specific value. */ 2973 if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT)) 2974 tag_spec.data = MLX5_FLOW_MARK_DEFAULT; 2975 /* Build a new flow. */ 2976 if (mark_id != MLX5_DEFAULT_COPY_ID) { 2977 items[0] = (struct rte_flow_item){ 2978 .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, 2979 .spec = &tag_spec, 2980 }; 2981 items[1] = (struct rte_flow_item){ 2982 .type = RTE_FLOW_ITEM_TYPE_END, 2983 }; 2984 actions[0] = (struct rte_flow_action){ 2985 .type = MLX5_RTE_FLOW_ACTION_TYPE_MARK, 2986 .conf = &ftag, 2987 }; 2988 actions[1] = (struct rte_flow_action){ 2989 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 2990 .conf = &cp_mreg, 2991 }; 2992 actions[2] = (struct rte_flow_action){ 2993 .type = RTE_FLOW_ACTION_TYPE_JUMP, 2994 .conf = &jump, 2995 }; 2996 actions[3] = (struct rte_flow_action){ 2997 .type = RTE_FLOW_ACTION_TYPE_END, 2998 }; 2999 } else { 3000 /* Default rule, wildcard match. */ 3001 attr.priority = MLX5_FLOW_PRIO_RSVD; 3002 items[0] = (struct rte_flow_item){ 3003 .type = RTE_FLOW_ITEM_TYPE_END, 3004 }; 3005 actions[0] = (struct rte_flow_action){ 3006 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 3007 .conf = &cp_mreg, 3008 }; 3009 actions[1] = (struct rte_flow_action){ 3010 .type = RTE_FLOW_ACTION_TYPE_JUMP, 3011 .conf = &jump, 3012 }; 3013 actions[2] = (struct rte_flow_action){ 3014 .type = RTE_FLOW_ACTION_TYPE_END, 3015 }; 3016 } 3017 /* Build a new entry. */ 3018 mcp_res = rte_zmalloc(__func__, sizeof(*mcp_res), 0); 3019 if (!mcp_res) { 3020 rte_errno = ENOMEM; 3021 return NULL; 3022 } 3023 /* 3024 * The copy Flows are not included in any list. There 3025 * ones are referenced from other Flows and can not 3026 * be applied, removed, deleted in ardbitrary order 3027 * by list traversing. 3028 */ 3029 mcp_res->flow = flow_list_create(dev, NULL, &attr, items, 3030 actions, false, error); 3031 if (!mcp_res->flow) 3032 goto error; 3033 mcp_res->refcnt++; 3034 mcp_res->hlist_ent.key = mark_id; 3035 ret = mlx5_hlist_insert(priv->mreg_cp_tbl, 3036 &mcp_res->hlist_ent); 3037 MLX5_ASSERT(!ret); 3038 if (ret) 3039 goto error; 3040 return mcp_res; 3041 error: 3042 if (mcp_res->flow) 3043 flow_list_destroy(dev, NULL, mcp_res->flow); 3044 rte_free(mcp_res); 3045 return NULL; 3046 } 3047 3048 /** 3049 * Release flow in RX_CP_TBL. 3050 * 3051 * @param dev 3052 * Pointer to Ethernet device. 3053 * @flow 3054 * Parent flow for wich copying is provided. 3055 */ 3056 static void 3057 flow_mreg_del_copy_action(struct rte_eth_dev *dev, 3058 struct rte_flow *flow) 3059 { 3060 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3061 struct mlx5_priv *priv = dev->data->dev_private; 3062 3063 if (!mcp_res || !priv->mreg_cp_tbl) 3064 return; 3065 if (flow->copy_applied) { 3066 MLX5_ASSERT(mcp_res->appcnt); 3067 flow->copy_applied = 0; 3068 --mcp_res->appcnt; 3069 if (!mcp_res->appcnt) 3070 flow_drv_remove(dev, mcp_res->flow); 3071 } 3072 /* 3073 * We do not check availability of metadata registers here, 3074 * because copy resources are not allocated in this case. 3075 */ 3076 if (--mcp_res->refcnt) 3077 return; 3078 MLX5_ASSERT(mcp_res->flow); 3079 flow_list_destroy(dev, NULL, mcp_res->flow); 3080 mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); 3081 rte_free(mcp_res); 3082 flow->mreg_copy = NULL; 3083 } 3084 3085 /** 3086 * Start flow in RX_CP_TBL. 3087 * 3088 * @param dev 3089 * Pointer to Ethernet device. 3090 * @flow 3091 * Parent flow for wich copying is provided. 3092 * 3093 * @return 3094 * 0 on success, a negative errno value otherwise and rte_errno is set. 3095 */ 3096 static int 3097 flow_mreg_start_copy_action(struct rte_eth_dev *dev, 3098 struct rte_flow *flow) 3099 { 3100 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3101 int ret; 3102 3103 if (!mcp_res || flow->copy_applied) 3104 return 0; 3105 if (!mcp_res->appcnt) { 3106 ret = flow_drv_apply(dev, mcp_res->flow, NULL); 3107 if (ret) 3108 return ret; 3109 } 3110 ++mcp_res->appcnt; 3111 flow->copy_applied = 1; 3112 return 0; 3113 } 3114 3115 /** 3116 * Stop flow in RX_CP_TBL. 3117 * 3118 * @param dev 3119 * Pointer to Ethernet device. 3120 * @flow 3121 * Parent flow for wich copying is provided. 3122 */ 3123 static void 3124 flow_mreg_stop_copy_action(struct rte_eth_dev *dev, 3125 struct rte_flow *flow) 3126 { 3127 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3128 3129 if (!mcp_res || !flow->copy_applied) 3130 return; 3131 MLX5_ASSERT(mcp_res->appcnt); 3132 --mcp_res->appcnt; 3133 flow->copy_applied = 0; 3134 if (!mcp_res->appcnt) 3135 flow_drv_remove(dev, mcp_res->flow); 3136 } 3137 3138 /** 3139 * Remove the default copy action from RX_CP_TBL. 3140 * 3141 * @param dev 3142 * Pointer to Ethernet device. 3143 */ 3144 static void 3145 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) 3146 { 3147 struct mlx5_flow_mreg_copy_resource *mcp_res; 3148 struct mlx5_priv *priv = dev->data->dev_private; 3149 3150 /* Check if default flow is registered. */ 3151 if (!priv->mreg_cp_tbl) 3152 return; 3153 mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, 3154 MLX5_DEFAULT_COPY_ID); 3155 if (!mcp_res) 3156 return; 3157 MLX5_ASSERT(mcp_res->flow); 3158 flow_list_destroy(dev, NULL, mcp_res->flow); 3159 mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); 3160 rte_free(mcp_res); 3161 } 3162 3163 /** 3164 * Add the default copy action in in RX_CP_TBL. 3165 * 3166 * @param dev 3167 * Pointer to Ethernet device. 3168 * @param[out] error 3169 * Perform verbose error reporting if not NULL. 3170 * 3171 * @return 3172 * 0 for success, negative value otherwise and rte_errno is set. 3173 */ 3174 static int 3175 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, 3176 struct rte_flow_error *error) 3177 { 3178 struct mlx5_priv *priv = dev->data->dev_private; 3179 struct mlx5_flow_mreg_copy_resource *mcp_res; 3180 3181 /* Check whether extensive metadata feature is engaged. */ 3182 if (!priv->config.dv_flow_en || 3183 priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3184 !mlx5_flow_ext_mreg_supported(dev) || 3185 !priv->sh->dv_regc0_mask) 3186 return 0; 3187 mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error); 3188 if (!mcp_res) 3189 return -rte_errno; 3190 return 0; 3191 } 3192 3193 /** 3194 * Add a flow of copying flow metadata registers in RX_CP_TBL. 3195 * 3196 * All the flow having Q/RSS action should be split by 3197 * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL 3198 * performs the following, 3199 * - CQE->flow_tag := reg_c[1] (MARK) 3200 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 3201 * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1] 3202 * but there should be a flow per each MARK ID set by MARK action. 3203 * 3204 * For the aforementioned reason, if there's a MARK action in flow's action 3205 * list, a corresponding flow should be added to the RX_CP_TBL in order to copy 3206 * the MARK ID to CQE's flow_tag like, 3207 * - If reg_c[1] is mark_id, 3208 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 3209 * 3210 * For SET_META action which stores value in reg_c[0], as the destination is 3211 * also a flow metadata register (reg_b), adding a default flow is enough. Zero 3212 * MARK ID means the default flow. The default flow looks like, 3213 * - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL. 3214 * 3215 * @param dev 3216 * Pointer to Ethernet device. 3217 * @param flow 3218 * Pointer to flow structure. 3219 * @param[in] actions 3220 * Pointer to the list of actions. 3221 * @param[out] error 3222 * Perform verbose error reporting if not NULL. 3223 * 3224 * @return 3225 * 0 on success, negative value otherwise and rte_errno is set. 3226 */ 3227 static int 3228 flow_mreg_update_copy_table(struct rte_eth_dev *dev, 3229 struct rte_flow *flow, 3230 const struct rte_flow_action *actions, 3231 struct rte_flow_error *error) 3232 { 3233 struct mlx5_priv *priv = dev->data->dev_private; 3234 struct mlx5_dev_config *config = &priv->config; 3235 struct mlx5_flow_mreg_copy_resource *mcp_res; 3236 const struct rte_flow_action_mark *mark; 3237 3238 /* Check whether extensive metadata feature is engaged. */ 3239 if (!config->dv_flow_en || 3240 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3241 !mlx5_flow_ext_mreg_supported(dev) || 3242 !priv->sh->dv_regc0_mask) 3243 return 0; 3244 /* Find MARK action. */ 3245 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3246 switch (actions->type) { 3247 case RTE_FLOW_ACTION_TYPE_FLAG: 3248 mcp_res = flow_mreg_add_copy_action 3249 (dev, MLX5_FLOW_MARK_DEFAULT, error); 3250 if (!mcp_res) 3251 return -rte_errno; 3252 flow->mreg_copy = mcp_res; 3253 if (dev->data->dev_started) { 3254 mcp_res->appcnt++; 3255 flow->copy_applied = 1; 3256 } 3257 return 0; 3258 case RTE_FLOW_ACTION_TYPE_MARK: 3259 mark = (const struct rte_flow_action_mark *) 3260 actions->conf; 3261 mcp_res = 3262 flow_mreg_add_copy_action(dev, mark->id, error); 3263 if (!mcp_res) 3264 return -rte_errno; 3265 flow->mreg_copy = mcp_res; 3266 if (dev->data->dev_started) { 3267 mcp_res->appcnt++; 3268 flow->copy_applied = 1; 3269 } 3270 return 0; 3271 default: 3272 break; 3273 } 3274 } 3275 return 0; 3276 } 3277 3278 #define MLX5_MAX_SPLIT_ACTIONS 24 3279 #define MLX5_MAX_SPLIT_ITEMS 24 3280 3281 /** 3282 * Split the hairpin flow. 3283 * Since HW can't support encap on Rx we move the encap to Tx. 3284 * If the count action is after the encap then we also 3285 * move the count action. in this case the count will also measure 3286 * the outer bytes. 3287 * 3288 * @param dev 3289 * Pointer to Ethernet device. 3290 * @param[in] actions 3291 * Associated actions (list terminated by the END action). 3292 * @param[out] actions_rx 3293 * Rx flow actions. 3294 * @param[out] actions_tx 3295 * Tx flow actions.. 3296 * @param[out] pattern_tx 3297 * The pattern items for the Tx flow. 3298 * @param[out] flow_id 3299 * The flow ID connected to this flow. 3300 * 3301 * @return 3302 * 0 on success. 3303 */ 3304 static int 3305 flow_hairpin_split(struct rte_eth_dev *dev, 3306 const struct rte_flow_action actions[], 3307 struct rte_flow_action actions_rx[], 3308 struct rte_flow_action actions_tx[], 3309 struct rte_flow_item pattern_tx[], 3310 uint32_t *flow_id) 3311 { 3312 struct mlx5_priv *priv = dev->data->dev_private; 3313 const struct rte_flow_action_raw_encap *raw_encap; 3314 const struct rte_flow_action_raw_decap *raw_decap; 3315 struct mlx5_rte_flow_action_set_tag *set_tag; 3316 struct rte_flow_action *tag_action; 3317 struct mlx5_rte_flow_item_tag *tag_item; 3318 struct rte_flow_item *item; 3319 char *addr; 3320 int encap = 0; 3321 3322 mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id); 3323 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3324 switch (actions->type) { 3325 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 3326 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 3327 rte_memcpy(actions_tx, actions, 3328 sizeof(struct rte_flow_action)); 3329 actions_tx++; 3330 break; 3331 case RTE_FLOW_ACTION_TYPE_COUNT: 3332 if (encap) { 3333 rte_memcpy(actions_tx, actions, 3334 sizeof(struct rte_flow_action)); 3335 actions_tx++; 3336 } else { 3337 rte_memcpy(actions_rx, actions, 3338 sizeof(struct rte_flow_action)); 3339 actions_rx++; 3340 } 3341 break; 3342 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 3343 raw_encap = actions->conf; 3344 if (raw_encap->size > 3345 (sizeof(struct rte_flow_item_eth) + 3346 sizeof(struct rte_flow_item_ipv4))) { 3347 memcpy(actions_tx, actions, 3348 sizeof(struct rte_flow_action)); 3349 actions_tx++; 3350 encap = 1; 3351 } else { 3352 rte_memcpy(actions_rx, actions, 3353 sizeof(struct rte_flow_action)); 3354 actions_rx++; 3355 } 3356 break; 3357 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 3358 raw_decap = actions->conf; 3359 if (raw_decap->size < 3360 (sizeof(struct rte_flow_item_eth) + 3361 sizeof(struct rte_flow_item_ipv4))) { 3362 memcpy(actions_tx, actions, 3363 sizeof(struct rte_flow_action)); 3364 actions_tx++; 3365 } else { 3366 rte_memcpy(actions_rx, actions, 3367 sizeof(struct rte_flow_action)); 3368 actions_rx++; 3369 } 3370 break; 3371 default: 3372 rte_memcpy(actions_rx, actions, 3373 sizeof(struct rte_flow_action)); 3374 actions_rx++; 3375 break; 3376 } 3377 } 3378 /* Add set meta action and end action for the Rx flow. */ 3379 tag_action = actions_rx; 3380 tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3381 actions_rx++; 3382 rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action)); 3383 actions_rx++; 3384 set_tag = (void *)actions_rx; 3385 set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL); 3386 MLX5_ASSERT(set_tag->id > REG_NONE); 3387 set_tag->data = *flow_id; 3388 tag_action->conf = set_tag; 3389 /* Create Tx item list. */ 3390 rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); 3391 addr = (void *)&pattern_tx[2]; 3392 item = pattern_tx; 3393 item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; 3394 tag_item = (void *)addr; 3395 tag_item->data = *flow_id; 3396 tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); 3397 MLX5_ASSERT(set_tag->id > REG_NONE); 3398 item->spec = tag_item; 3399 addr += sizeof(struct mlx5_rte_flow_item_tag); 3400 tag_item = (void *)addr; 3401 tag_item->data = UINT32_MAX; 3402 tag_item->id = UINT16_MAX; 3403 item->mask = tag_item; 3404 addr += sizeof(struct mlx5_rte_flow_item_tag); 3405 item->last = NULL; 3406 item++; 3407 item->type = RTE_FLOW_ITEM_TYPE_END; 3408 return 0; 3409 } 3410 3411 /** 3412 * The last stage of splitting chain, just creates the subflow 3413 * without any modification. 3414 * 3415 * @param dev 3416 * Pointer to Ethernet device. 3417 * @param[in] flow 3418 * Parent flow structure pointer. 3419 * @param[in, out] sub_flow 3420 * Pointer to return the created subflow, may be NULL. 3421 * @param[in] prefix_layers 3422 * Prefix subflow layers, may be 0. 3423 * @param[in] attr 3424 * Flow rule attributes. 3425 * @param[in] items 3426 * Pattern specification (list terminated by the END pattern item). 3427 * @param[in] actions 3428 * Associated actions (list terminated by the END action). 3429 * @param[in] external 3430 * This flow rule is created by request external to PMD. 3431 * @param[out] error 3432 * Perform verbose error reporting if not NULL. 3433 * @return 3434 * 0 on success, negative value otherwise 3435 */ 3436 static int 3437 flow_create_split_inner(struct rte_eth_dev *dev, 3438 struct rte_flow *flow, 3439 struct mlx5_flow **sub_flow, 3440 uint64_t prefix_layers, 3441 const struct rte_flow_attr *attr, 3442 const struct rte_flow_item items[], 3443 const struct rte_flow_action actions[], 3444 bool external, struct rte_flow_error *error) 3445 { 3446 struct mlx5_flow *dev_flow; 3447 3448 dev_flow = flow_drv_prepare(flow, attr, items, actions, error); 3449 if (!dev_flow) 3450 return -rte_errno; 3451 dev_flow->flow = flow; 3452 dev_flow->external = external; 3453 /* Subflow object was created, we must include one in the list. */ 3454 LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); 3455 /* 3456 * If dev_flow is as one of the suffix flow, some actions in suffix 3457 * flow may need some user defined item layer flags. 3458 */ 3459 if (prefix_layers) 3460 dev_flow->handle.layers = prefix_layers; 3461 if (sub_flow) 3462 *sub_flow = dev_flow; 3463 return flow_drv_translate(dev, dev_flow, attr, items, actions, error); 3464 } 3465 3466 /** 3467 * Split the meter flow. 3468 * 3469 * As meter flow will split to three sub flow, other than meter 3470 * action, the other actions make sense to only meter accepts 3471 * the packet. If it need to be dropped, no other additional 3472 * actions should be take. 3473 * 3474 * One kind of special action which decapsulates the L3 tunnel 3475 * header will be in the prefix sub flow, as not to take the 3476 * L3 tunnel header into account. 3477 * 3478 * @param dev 3479 * Pointer to Ethernet device. 3480 * @param[in] items 3481 * Pattern specification (list terminated by the END pattern item). 3482 * @param[out] sfx_items 3483 * Suffix flow match items (list terminated by the END pattern item). 3484 * @param[in] actions 3485 * Associated actions (list terminated by the END action). 3486 * @param[out] actions_sfx 3487 * Suffix flow actions. 3488 * @param[out] actions_pre 3489 * Prefix flow actions. 3490 * @param[out] pattern_sfx 3491 * The pattern items for the suffix flow. 3492 * @param[out] tag_sfx 3493 * Pointer to suffix flow tag. 3494 * 3495 * @return 3496 * 0 on success. 3497 */ 3498 static int 3499 flow_meter_split_prep(struct rte_eth_dev *dev, 3500 const struct rte_flow_item items[], 3501 struct rte_flow_item sfx_items[], 3502 const struct rte_flow_action actions[], 3503 struct rte_flow_action actions_sfx[], 3504 struct rte_flow_action actions_pre[]) 3505 { 3506 struct rte_flow_action *tag_action = NULL; 3507 struct rte_flow_item *tag_item; 3508 struct mlx5_rte_flow_action_set_tag *set_tag; 3509 struct rte_flow_error error; 3510 const struct rte_flow_action_raw_encap *raw_encap; 3511 const struct rte_flow_action_raw_decap *raw_decap; 3512 struct mlx5_rte_flow_item_tag *tag_spec; 3513 struct mlx5_rte_flow_item_tag *tag_mask; 3514 uint32_t tag_id; 3515 bool copy_vlan = false; 3516 3517 /* Prepare the actions for prefix and suffix flow. */ 3518 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3519 struct rte_flow_action **action_cur = NULL; 3520 3521 switch (actions->type) { 3522 case RTE_FLOW_ACTION_TYPE_METER: 3523 /* Add the extra tag action first. */ 3524 tag_action = actions_pre; 3525 tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3526 actions_pre++; 3527 action_cur = &actions_pre; 3528 break; 3529 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 3530 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 3531 action_cur = &actions_pre; 3532 break; 3533 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 3534 raw_encap = actions->conf; 3535 if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) 3536 action_cur = &actions_pre; 3537 break; 3538 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 3539 raw_decap = actions->conf; 3540 if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 3541 action_cur = &actions_pre; 3542 break; 3543 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 3544 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 3545 copy_vlan = true; 3546 break; 3547 default: 3548 break; 3549 } 3550 if (!action_cur) 3551 action_cur = &actions_sfx; 3552 memcpy(*action_cur, actions, sizeof(struct rte_flow_action)); 3553 (*action_cur)++; 3554 } 3555 /* Add end action to the actions. */ 3556 actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; 3557 actions_pre->type = RTE_FLOW_ACTION_TYPE_END; 3558 actions_pre++; 3559 /* Set the tag. */ 3560 set_tag = (void *)actions_pre; 3561 set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error); 3562 /* 3563 * Get the id from the qrss_pool to make qrss share the id with meter. 3564 */ 3565 tag_id = flow_qrss_get_id(dev); 3566 set_tag->data = tag_id << MLX5_MTR_COLOR_BITS; 3567 assert(tag_action); 3568 tag_action->conf = set_tag; 3569 /* Prepare the suffix subflow items. */ 3570 tag_item = sfx_items++; 3571 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 3572 int item_type = items->type; 3573 3574 switch (item_type) { 3575 case RTE_FLOW_ITEM_TYPE_PORT_ID: 3576 memcpy(sfx_items, items, sizeof(*sfx_items)); 3577 sfx_items++; 3578 break; 3579 case RTE_FLOW_ITEM_TYPE_VLAN: 3580 if (copy_vlan) { 3581 memcpy(sfx_items, items, sizeof(*sfx_items)); 3582 /* 3583 * Convert to internal match item, it is used 3584 * for vlan push and set vid. 3585 */ 3586 sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_VLAN; 3587 sfx_items++; 3588 } 3589 break; 3590 default: 3591 break; 3592 } 3593 } 3594 sfx_items->type = RTE_FLOW_ITEM_TYPE_END; 3595 sfx_items++; 3596 tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items; 3597 tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS; 3598 tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error); 3599 tag_mask = tag_spec + 1; 3600 tag_mask->data = 0xffffff00; 3601 tag_item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; 3602 tag_item->spec = tag_spec; 3603 tag_item->last = NULL; 3604 tag_item->mask = tag_mask; 3605 return tag_id; 3606 } 3607 3608 /** 3609 * Split action list having QUEUE/RSS for metadata register copy. 3610 * 3611 * Once Q/RSS action is detected in user's action list, the flow action 3612 * should be split in order to copy metadata registers, which will happen in 3613 * RX_CP_TBL like, 3614 * - CQE->flow_tag := reg_c[1] (MARK) 3615 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 3616 * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL. 3617 * This is because the last action of each flow must be a terminal action 3618 * (QUEUE, RSS or DROP). 3619 * 3620 * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is 3621 * stored and kept in the mlx5_flow structure per each sub_flow. 3622 * 3623 * The Q/RSS action is replaced with, 3624 * - SET_TAG, setting the allocated flow ID to reg_c[2]. 3625 * And the following JUMP action is added at the end, 3626 * - JUMP, to RX_CP_TBL. 3627 * 3628 * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by 3629 * flow_create_split_metadata() routine. The flow will look like, 3630 * - If flow ID matches (reg_c[2]), perform Q/RSS. 3631 * 3632 * @param dev 3633 * Pointer to Ethernet device. 3634 * @param[out] split_actions 3635 * Pointer to store split actions to jump to CP_TBL. 3636 * @param[in] actions 3637 * Pointer to the list of original flow actions. 3638 * @param[in] qrss 3639 * Pointer to the Q/RSS action. 3640 * @param[in] actions_n 3641 * Number of original actions. 3642 * @param[out] error 3643 * Perform verbose error reporting if not NULL. 3644 * 3645 * @return 3646 * non-zero unique flow_id on success, otherwise 0 and 3647 * error/rte_error are set. 3648 */ 3649 static uint32_t 3650 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, 3651 struct rte_flow_action *split_actions, 3652 const struct rte_flow_action *actions, 3653 const struct rte_flow_action *qrss, 3654 int actions_n, struct rte_flow_error *error) 3655 { 3656 struct mlx5_rte_flow_action_set_tag *set_tag; 3657 struct rte_flow_action_jump *jump; 3658 const int qrss_idx = qrss - actions; 3659 uint32_t flow_id = 0; 3660 int ret = 0; 3661 3662 /* 3663 * Given actions will be split 3664 * - Replace QUEUE/RSS action with SET_TAG to set flow ID. 3665 * - Add jump to mreg CP_TBL. 3666 * As a result, there will be one more action. 3667 */ 3668 ++actions_n; 3669 memcpy(split_actions, actions, sizeof(*split_actions) * actions_n); 3670 set_tag = (void *)(split_actions + actions_n); 3671 /* 3672 * If tag action is not set to void(it means we are not the meter 3673 * suffix flow), add the tag action. Since meter suffix flow already 3674 * has the tag added. 3675 */ 3676 if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) { 3677 /* 3678 * Allocate the new subflow ID. This one is unique within 3679 * device and not shared with representors. Otherwise, 3680 * we would have to resolve multi-thread access synch 3681 * issue. Each flow on the shared device is appended 3682 * with source vport identifier, so the resulting 3683 * flows will be unique in the shared (by master and 3684 * representors) domain even if they have coinciding 3685 * IDs. 3686 */ 3687 flow_id = flow_qrss_get_id(dev); 3688 if (!flow_id) 3689 return rte_flow_error_set(error, ENOMEM, 3690 RTE_FLOW_ERROR_TYPE_ACTION, 3691 NULL, "can't allocate id " 3692 "for split Q/RSS subflow"); 3693 /* Internal SET_TAG action to set flow ID. */ 3694 *set_tag = (struct mlx5_rte_flow_action_set_tag){ 3695 .data = flow_id, 3696 }; 3697 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); 3698 if (ret < 0) 3699 return ret; 3700 set_tag->id = ret; 3701 /* Construct new actions array. */ 3702 /* Replace QUEUE/RSS action. */ 3703 split_actions[qrss_idx] = (struct rte_flow_action){ 3704 .type = MLX5_RTE_FLOW_ACTION_TYPE_TAG, 3705 .conf = set_tag, 3706 }; 3707 } 3708 /* JUMP action to jump to mreg copy table (CP_TBL). */ 3709 jump = (void *)(set_tag + 1); 3710 *jump = (struct rte_flow_action_jump){ 3711 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 3712 }; 3713 split_actions[actions_n - 2] = (struct rte_flow_action){ 3714 .type = RTE_FLOW_ACTION_TYPE_JUMP, 3715 .conf = jump, 3716 }; 3717 split_actions[actions_n - 1] = (struct rte_flow_action){ 3718 .type = RTE_FLOW_ACTION_TYPE_END, 3719 }; 3720 return flow_id; 3721 } 3722 3723 /** 3724 * Extend the given action list for Tx metadata copy. 3725 * 3726 * Copy the given action list to the ext_actions and add flow metadata register 3727 * copy action in order to copy reg_a set by WQE to reg_c[0]. 3728 * 3729 * @param[out] ext_actions 3730 * Pointer to the extended action list. 3731 * @param[in] actions 3732 * Pointer to the list of actions. 3733 * @param[in] actions_n 3734 * Number of actions in the list. 3735 * @param[out] error 3736 * Perform verbose error reporting if not NULL. 3737 * @param[in] encap_idx 3738 * The encap action inndex. 3739 * 3740 * @return 3741 * 0 on success, negative value otherwise 3742 */ 3743 static int 3744 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, 3745 struct rte_flow_action *ext_actions, 3746 const struct rte_flow_action *actions, 3747 int actions_n, struct rte_flow_error *error, 3748 int encap_idx) 3749 { 3750 struct mlx5_flow_action_copy_mreg *cp_mreg = 3751 (struct mlx5_flow_action_copy_mreg *) 3752 (ext_actions + actions_n + 1); 3753 int ret; 3754 3755 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 3756 if (ret < 0) 3757 return ret; 3758 cp_mreg->dst = ret; 3759 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error); 3760 if (ret < 0) 3761 return ret; 3762 cp_mreg->src = ret; 3763 if (encap_idx != 0) 3764 memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx); 3765 if (encap_idx == actions_n - 1) { 3766 ext_actions[actions_n - 1] = (struct rte_flow_action){ 3767 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 3768 .conf = cp_mreg, 3769 }; 3770 ext_actions[actions_n] = (struct rte_flow_action){ 3771 .type = RTE_FLOW_ACTION_TYPE_END, 3772 }; 3773 } else { 3774 ext_actions[encap_idx] = (struct rte_flow_action){ 3775 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 3776 .conf = cp_mreg, 3777 }; 3778 memcpy(ext_actions + encap_idx + 1, actions + encap_idx, 3779 sizeof(*ext_actions) * (actions_n - encap_idx)); 3780 } 3781 return 0; 3782 } 3783 3784 /** 3785 * The splitting for metadata feature. 3786 * 3787 * - Q/RSS action on NIC Rx should be split in order to pass by 3788 * the mreg copy table (RX_CP_TBL) and then it jumps to the 3789 * action table (RX_ACT_TBL) which has the split Q/RSS action. 3790 * 3791 * - All the actions on NIC Tx should have a mreg copy action to 3792 * copy reg_a from WQE to reg_c[0]. 3793 * 3794 * @param dev 3795 * Pointer to Ethernet device. 3796 * @param[in] flow 3797 * Parent flow structure pointer. 3798 * @param[in] prefix_layers 3799 * Prefix flow layer flags. 3800 * @param[in] attr 3801 * Flow rule attributes. 3802 * @param[in] items 3803 * Pattern specification (list terminated by the END pattern item). 3804 * @param[in] actions 3805 * Associated actions (list terminated by the END action). 3806 * @param[in] external 3807 * This flow rule is created by request external to PMD. 3808 * @param[out] error 3809 * Perform verbose error reporting if not NULL. 3810 * @return 3811 * 0 on success, negative value otherwise 3812 */ 3813 static int 3814 flow_create_split_metadata(struct rte_eth_dev *dev, 3815 struct rte_flow *flow, 3816 uint64_t prefix_layers, 3817 const struct rte_flow_attr *attr, 3818 const struct rte_flow_item items[], 3819 const struct rte_flow_action actions[], 3820 bool external, struct rte_flow_error *error) 3821 { 3822 struct mlx5_priv *priv = dev->data->dev_private; 3823 struct mlx5_dev_config *config = &priv->config; 3824 const struct rte_flow_action *qrss = NULL; 3825 struct rte_flow_action *ext_actions = NULL; 3826 struct mlx5_flow *dev_flow = NULL; 3827 uint32_t qrss_id = 0; 3828 int mtr_sfx = 0; 3829 size_t act_size; 3830 int actions_n; 3831 int encap_idx; 3832 int ret; 3833 3834 /* Check whether extensive metadata feature is engaged. */ 3835 if (!config->dv_flow_en || 3836 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3837 !mlx5_flow_ext_mreg_supported(dev)) 3838 return flow_create_split_inner(dev, flow, NULL, prefix_layers, 3839 attr, items, actions, external, 3840 error); 3841 actions_n = flow_parse_metadata_split_actions_info(actions, &qrss, 3842 &encap_idx); 3843 if (qrss) { 3844 /* Exclude hairpin flows from splitting. */ 3845 if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) { 3846 const struct rte_flow_action_queue *queue; 3847 3848 queue = qrss->conf; 3849 if (mlx5_rxq_get_type(dev, queue->index) == 3850 MLX5_RXQ_TYPE_HAIRPIN) 3851 qrss = NULL; 3852 } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) { 3853 const struct rte_flow_action_rss *rss; 3854 3855 rss = qrss->conf; 3856 if (mlx5_rxq_get_type(dev, rss->queue[0]) == 3857 MLX5_RXQ_TYPE_HAIRPIN) 3858 qrss = NULL; 3859 } 3860 } 3861 if (qrss) { 3862 /* Check if it is in meter suffix table. */ 3863 mtr_sfx = attr->group == (attr->transfer ? 3864 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : 3865 MLX5_FLOW_TABLE_LEVEL_SUFFIX); 3866 /* 3867 * Q/RSS action on NIC Rx should be split in order to pass by 3868 * the mreg copy table (RX_CP_TBL) and then it jumps to the 3869 * action table (RX_ACT_TBL) which has the split Q/RSS action. 3870 */ 3871 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 3872 sizeof(struct rte_flow_action_set_tag) + 3873 sizeof(struct rte_flow_action_jump); 3874 ext_actions = rte_zmalloc(__func__, act_size, 0); 3875 if (!ext_actions) 3876 return rte_flow_error_set(error, ENOMEM, 3877 RTE_FLOW_ERROR_TYPE_ACTION, 3878 NULL, "no memory to split " 3879 "metadata flow"); 3880 /* 3881 * If we are the suffix flow of meter, tag already exist. 3882 * Set the tag action to void. 3883 */ 3884 if (mtr_sfx) 3885 ext_actions[qrss - actions].type = 3886 RTE_FLOW_ACTION_TYPE_VOID; 3887 else 3888 ext_actions[qrss - actions].type = 3889 MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3890 /* 3891 * Create the new actions list with removed Q/RSS action 3892 * and appended set tag and jump to register copy table 3893 * (RX_CP_TBL). We should preallocate unique tag ID here 3894 * in advance, because it is needed for set tag action. 3895 */ 3896 qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions, 3897 qrss, actions_n, error); 3898 if (!mtr_sfx && !qrss_id) { 3899 ret = -rte_errno; 3900 goto exit; 3901 } 3902 } else if (attr->egress && !attr->transfer) { 3903 /* 3904 * All the actions on NIC Tx should have a metadata register 3905 * copy action to copy reg_a from WQE to reg_c[meta] 3906 */ 3907 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 3908 sizeof(struct mlx5_flow_action_copy_mreg); 3909 ext_actions = rte_zmalloc(__func__, act_size, 0); 3910 if (!ext_actions) 3911 return rte_flow_error_set(error, ENOMEM, 3912 RTE_FLOW_ERROR_TYPE_ACTION, 3913 NULL, "no memory to split " 3914 "metadata flow"); 3915 /* Create the action list appended with copy register. */ 3916 ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions, 3917 actions_n, error, encap_idx); 3918 if (ret < 0) 3919 goto exit; 3920 } 3921 /* Add the unmodified original or prefix subflow. */ 3922 ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers, attr, 3923 items, ext_actions ? ext_actions : 3924 actions, external, error); 3925 if (ret < 0) 3926 goto exit; 3927 MLX5_ASSERT(dev_flow); 3928 if (qrss) { 3929 const struct rte_flow_attr q_attr = { 3930 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 3931 .ingress = 1, 3932 }; 3933 /* Internal PMD action to set register. */ 3934 struct mlx5_rte_flow_item_tag q_tag_spec = { 3935 .data = qrss_id, 3936 .id = 0, 3937 }; 3938 struct rte_flow_item q_items[] = { 3939 { 3940 .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, 3941 .spec = &q_tag_spec, 3942 .last = NULL, 3943 .mask = NULL, 3944 }, 3945 { 3946 .type = RTE_FLOW_ITEM_TYPE_END, 3947 }, 3948 }; 3949 struct rte_flow_action q_actions[] = { 3950 { 3951 .type = qrss->type, 3952 .conf = qrss->conf, 3953 }, 3954 { 3955 .type = RTE_FLOW_ACTION_TYPE_END, 3956 }, 3957 }; 3958 uint64_t layers = flow_get_prefix_layer_flags(dev_flow); 3959 3960 /* 3961 * Configure the tag item only if there is no meter subflow. 3962 * Since tag is already marked in the meter suffix subflow 3963 * we can just use the meter suffix items as is. 3964 */ 3965 if (qrss_id) { 3966 /* Not meter subflow. */ 3967 MLX5_ASSERT(!mtr_sfx); 3968 /* 3969 * Put unique id in prefix flow due to it is destroyed 3970 * after suffix flow and id will be freed after there 3971 * is no actual flows with this id and identifier 3972 * reallocation becomes possible (for example, for 3973 * other flows in other threads). 3974 */ 3975 dev_flow->handle.qrss_id = qrss_id; 3976 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, 3977 error); 3978 if (ret < 0) 3979 goto exit; 3980 q_tag_spec.id = ret; 3981 } 3982 dev_flow = NULL; 3983 /* Add suffix subflow to execute Q/RSS. */ 3984 ret = flow_create_split_inner(dev, flow, &dev_flow, layers, 3985 &q_attr, mtr_sfx ? items : 3986 q_items, q_actions, 3987 external, error); 3988 if (ret < 0) 3989 goto exit; 3990 /* qrss ID should be freed if failed. */ 3991 qrss_id = 0; 3992 MLX5_ASSERT(dev_flow); 3993 } 3994 3995 exit: 3996 /* 3997 * We do not destroy the partially created sub_flows in case of error. 3998 * These ones are included into parent flow list and will be destroyed 3999 * by flow_drv_destroy. 4000 */ 4001 flow_qrss_free_id(dev, qrss_id); 4002 rte_free(ext_actions); 4003 return ret; 4004 } 4005 4006 /** 4007 * The splitting for meter feature. 4008 * 4009 * - The meter flow will be split to two flows as prefix and 4010 * suffix flow. The packets make sense only it pass the prefix 4011 * meter action. 4012 * 4013 * - Reg_C_5 is used for the packet to match betweend prefix and 4014 * suffix flow. 4015 * 4016 * @param dev 4017 * Pointer to Ethernet device. 4018 * @param[in] flow 4019 * Parent flow structure pointer. 4020 * @param[in] attr 4021 * Flow rule attributes. 4022 * @param[in] items 4023 * Pattern specification (list terminated by the END pattern item). 4024 * @param[in] actions 4025 * Associated actions (list terminated by the END action). 4026 * @param[in] external 4027 * This flow rule is created by request external to PMD. 4028 * @param[out] error 4029 * Perform verbose error reporting if not NULL. 4030 * @return 4031 * 0 on success, negative value otherwise 4032 */ 4033 static int 4034 flow_create_split_meter(struct rte_eth_dev *dev, 4035 struct rte_flow *flow, 4036 const struct rte_flow_attr *attr, 4037 const struct rte_flow_item items[], 4038 const struct rte_flow_action actions[], 4039 bool external, struct rte_flow_error *error) 4040 { 4041 struct mlx5_priv *priv = dev->data->dev_private; 4042 struct rte_flow_action *sfx_actions = NULL; 4043 struct rte_flow_action *pre_actions = NULL; 4044 struct rte_flow_item *sfx_items = NULL; 4045 struct mlx5_flow *dev_flow = NULL; 4046 struct rte_flow_attr sfx_attr = *attr; 4047 uint32_t mtr = 0; 4048 uint32_t mtr_tag_id = 0; 4049 size_t act_size; 4050 size_t item_size; 4051 int actions_n = 0; 4052 int ret; 4053 4054 if (priv->mtr_en) 4055 actions_n = flow_check_meter_action(actions, &mtr); 4056 if (mtr) { 4057 /* The five prefix actions: meter, decap, encap, tag, end. */ 4058 act_size = sizeof(struct rte_flow_action) * (actions_n + 5) + 4059 sizeof(struct mlx5_rte_flow_action_set_tag); 4060 /* tag, vlan, port id, end. */ 4061 #define METER_SUFFIX_ITEM 4 4062 item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM + 4063 sizeof(struct mlx5_rte_flow_item_tag) * 2; 4064 sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0); 4065 if (!sfx_actions) 4066 return rte_flow_error_set(error, ENOMEM, 4067 RTE_FLOW_ERROR_TYPE_ACTION, 4068 NULL, "no memory to split " 4069 "meter flow"); 4070 sfx_items = (struct rte_flow_item *)((char *)sfx_actions + 4071 act_size); 4072 pre_actions = sfx_actions + actions_n; 4073 mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items, 4074 actions, sfx_actions, 4075 pre_actions); 4076 if (!mtr_tag_id) { 4077 ret = -rte_errno; 4078 goto exit; 4079 } 4080 /* Add the prefix subflow. */ 4081 ret = flow_create_split_inner(dev, flow, &dev_flow, 0, attr, 4082 items, pre_actions, external, 4083 error); 4084 if (ret) { 4085 ret = -rte_errno; 4086 goto exit; 4087 } 4088 dev_flow->handle.mtr_flow_id = mtr_tag_id; 4089 /* Setting the sfx group atrr. */ 4090 sfx_attr.group = sfx_attr.transfer ? 4091 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : 4092 MLX5_FLOW_TABLE_LEVEL_SUFFIX; 4093 } 4094 /* Add the prefix subflow. */ 4095 ret = flow_create_split_metadata(dev, flow, dev_flow ? 4096 flow_get_prefix_layer_flags(dev_flow) : 4097 0, &sfx_attr, 4098 sfx_items ? sfx_items : items, 4099 sfx_actions ? sfx_actions : actions, 4100 external, error); 4101 exit: 4102 if (sfx_actions) 4103 rte_free(sfx_actions); 4104 return ret; 4105 } 4106 4107 /** 4108 * Split the flow to subflow set. The splitters might be linked 4109 * in the chain, like this: 4110 * flow_create_split_outer() calls: 4111 * flow_create_split_meter() calls: 4112 * flow_create_split_metadata(meter_subflow_0) calls: 4113 * flow_create_split_inner(metadata_subflow_0) 4114 * flow_create_split_inner(metadata_subflow_1) 4115 * flow_create_split_inner(metadata_subflow_2) 4116 * flow_create_split_metadata(meter_subflow_1) calls: 4117 * flow_create_split_inner(metadata_subflow_0) 4118 * flow_create_split_inner(metadata_subflow_1) 4119 * flow_create_split_inner(metadata_subflow_2) 4120 * 4121 * This provide flexible way to add new levels of flow splitting. 4122 * The all of successfully created subflows are included to the 4123 * parent flow dev_flow list. 4124 * 4125 * @param dev 4126 * Pointer to Ethernet device. 4127 * @param[in] flow 4128 * Parent flow structure pointer. 4129 * @param[in] attr 4130 * Flow rule attributes. 4131 * @param[in] items 4132 * Pattern specification (list terminated by the END pattern item). 4133 * @param[in] actions 4134 * Associated actions (list terminated by the END action). 4135 * @param[in] external 4136 * This flow rule is created by request external to PMD. 4137 * @param[out] error 4138 * Perform verbose error reporting if not NULL. 4139 * @return 4140 * 0 on success, negative value otherwise 4141 */ 4142 static int 4143 flow_create_split_outer(struct rte_eth_dev *dev, 4144 struct rte_flow *flow, 4145 const struct rte_flow_attr *attr, 4146 const struct rte_flow_item items[], 4147 const struct rte_flow_action actions[], 4148 bool external, struct rte_flow_error *error) 4149 { 4150 int ret; 4151 4152 ret = flow_create_split_meter(dev, flow, attr, items, 4153 actions, external, error); 4154 MLX5_ASSERT(ret <= 0); 4155 return ret; 4156 } 4157 4158 /** 4159 * Create a flow and add it to @p list. 4160 * 4161 * @param dev 4162 * Pointer to Ethernet device. 4163 * @param list 4164 * Pointer to a TAILQ flow list. If this parameter NULL, 4165 * no list insertion occurred, flow is just created, 4166 * this is caller's responsibility to track the 4167 * created flow. 4168 * @param[in] attr 4169 * Flow rule attributes. 4170 * @param[in] items 4171 * Pattern specification (list terminated by the END pattern item). 4172 * @param[in] actions 4173 * Associated actions (list terminated by the END action). 4174 * @param[in] external 4175 * This flow rule is created by request external to PMD. 4176 * @param[out] error 4177 * Perform verbose error reporting if not NULL. 4178 * 4179 * @return 4180 * A flow on success, NULL otherwise and rte_errno is set. 4181 */ 4182 static struct rte_flow * 4183 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, 4184 const struct rte_flow_attr *attr, 4185 const struct rte_flow_item items[], 4186 const struct rte_flow_action actions[], 4187 bool external, struct rte_flow_error *error) 4188 { 4189 struct mlx5_priv *priv = dev->data->dev_private; 4190 struct rte_flow *flow = NULL; 4191 struct mlx5_flow *dev_flow; 4192 const struct rte_flow_action_rss *rss; 4193 union { 4194 struct rte_flow_expand_rss buf; 4195 uint8_t buffer[2048]; 4196 } expand_buffer; 4197 union { 4198 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 4199 uint8_t buffer[2048]; 4200 } actions_rx; 4201 union { 4202 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 4203 uint8_t buffer[2048]; 4204 } actions_hairpin_tx; 4205 union { 4206 struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS]; 4207 uint8_t buffer[2048]; 4208 } items_tx; 4209 struct rte_flow_expand_rss *buf = &expand_buffer.buf; 4210 const struct rte_flow_action *p_actions_rx = actions; 4211 uint32_t i; 4212 uint32_t flow_size; 4213 int hairpin_flow = 0; 4214 uint32_t hairpin_id = 0; 4215 struct rte_flow_attr attr_tx = { .priority = 0 }; 4216 int ret = flow_drv_validate(dev, attr, items, p_actions_rx, external, 4217 error); 4218 4219 if (ret < 0) 4220 return NULL; 4221 hairpin_flow = flow_check_hairpin_split(dev, attr, actions); 4222 if (hairpin_flow > 0) { 4223 if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { 4224 rte_errno = EINVAL; 4225 return NULL; 4226 } 4227 flow_hairpin_split(dev, actions, actions_rx.actions, 4228 actions_hairpin_tx.actions, items_tx.items, 4229 &hairpin_id); 4230 p_actions_rx = actions_rx.actions; 4231 } 4232 flow_size = sizeof(struct rte_flow); 4233 rss = flow_get_rss_action(p_actions_rx); 4234 if (rss) 4235 flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), 4236 sizeof(void *)); 4237 else 4238 flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); 4239 flow = rte_calloc(__func__, 1, flow_size, 0); 4240 if (!flow) { 4241 rte_errno = ENOMEM; 4242 goto error_before_flow; 4243 } 4244 flow->drv_type = flow_get_drv_type(dev, attr); 4245 if (hairpin_id != 0) 4246 flow->hairpin_flow_id = hairpin_id; 4247 MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && 4248 flow->drv_type < MLX5_FLOW_TYPE_MAX); 4249 flow->rss.queue = (void *)(flow + 1); 4250 if (rss) { 4251 /* 4252 * The following information is required by 4253 * mlx5_flow_hashfields_adjust() in advance. 4254 */ 4255 flow->rss.level = rss->level; 4256 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ 4257 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; 4258 } 4259 LIST_INIT(&flow->dev_flows); 4260 if (rss && rss->types) { 4261 unsigned int graph_root; 4262 4263 graph_root = find_graph_root(items, rss->level); 4264 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), 4265 items, rss->types, 4266 mlx5_support_expansion, 4267 graph_root); 4268 MLX5_ASSERT(ret > 0 && 4269 (unsigned int)ret < sizeof(expand_buffer.buffer)); 4270 } else { 4271 buf->entries = 1; 4272 buf->entry[0].pattern = (void *)(uintptr_t)items; 4273 } 4274 for (i = 0; i < buf->entries; ++i) { 4275 /* 4276 * The splitter may create multiple dev_flows, 4277 * depending on configuration. In the simplest 4278 * case it just creates unmodified original flow. 4279 */ 4280 ret = flow_create_split_outer(dev, flow, attr, 4281 buf->entry[i].pattern, 4282 p_actions_rx, external, 4283 error); 4284 if (ret < 0) 4285 goto error; 4286 } 4287 /* Create the tx flow. */ 4288 if (hairpin_flow) { 4289 attr_tx.group = MLX5_HAIRPIN_TX_TABLE; 4290 attr_tx.ingress = 0; 4291 attr_tx.egress = 1; 4292 dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items, 4293 actions_hairpin_tx.actions, error); 4294 if (!dev_flow) 4295 goto error; 4296 dev_flow->flow = flow; 4297 dev_flow->external = 0; 4298 LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); 4299 ret = flow_drv_translate(dev, dev_flow, &attr_tx, 4300 items_tx.items, 4301 actions_hairpin_tx.actions, error); 4302 if (ret < 0) 4303 goto error; 4304 } 4305 /* 4306 * Update the metadata register copy table. If extensive 4307 * metadata feature is enabled and registers are supported 4308 * we might create the extra rte_flow for each unique 4309 * MARK/FLAG action ID. 4310 * 4311 * The table is updated for ingress Flows only, because 4312 * the egress Flows belong to the different device and 4313 * copy table should be updated in peer NIC Rx domain. 4314 */ 4315 if (attr->ingress && 4316 (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) { 4317 ret = flow_mreg_update_copy_table(dev, flow, actions, error); 4318 if (ret) 4319 goto error; 4320 } 4321 if (dev->data->dev_started) { 4322 ret = flow_drv_apply(dev, flow, error); 4323 if (ret < 0) 4324 goto error; 4325 } 4326 if (list) 4327 TAILQ_INSERT_TAIL(list, flow, next); 4328 flow_rxq_flags_set(dev, flow); 4329 return flow; 4330 error_before_flow: 4331 if (hairpin_id) 4332 mlx5_flow_id_release(priv->sh->flow_id_pool, 4333 hairpin_id); 4334 return NULL; 4335 error: 4336 MLX5_ASSERT(flow); 4337 flow_mreg_del_copy_action(dev, flow); 4338 ret = rte_errno; /* Save rte_errno before cleanup. */ 4339 if (flow->hairpin_flow_id) 4340 mlx5_flow_id_release(priv->sh->flow_id_pool, 4341 flow->hairpin_flow_id); 4342 MLX5_ASSERT(flow); 4343 flow_drv_destroy(dev, flow); 4344 rte_free(flow); 4345 rte_errno = ret; /* Restore rte_errno. */ 4346 return NULL; 4347 } 4348 4349 /** 4350 * Create a dedicated flow rule on e-switch table 0 (root table), to direct all 4351 * incoming packets to table 1. 4352 * 4353 * Other flow rules, requested for group n, will be created in 4354 * e-switch table n+1. 4355 * Jump action to e-switch group n will be created to group n+1. 4356 * 4357 * Used when working in switchdev mode, to utilise advantages of table 1 4358 * and above. 4359 * 4360 * @param dev 4361 * Pointer to Ethernet device. 4362 * 4363 * @return 4364 * Pointer to flow on success, NULL otherwise and rte_errno is set. 4365 */ 4366 struct rte_flow * 4367 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) 4368 { 4369 const struct rte_flow_attr attr = { 4370 .group = 0, 4371 .priority = 0, 4372 .ingress = 1, 4373 .egress = 0, 4374 .transfer = 1, 4375 }; 4376 const struct rte_flow_item pattern = { 4377 .type = RTE_FLOW_ITEM_TYPE_END, 4378 }; 4379 struct rte_flow_action_jump jump = { 4380 .group = 1, 4381 }; 4382 const struct rte_flow_action actions[] = { 4383 { 4384 .type = RTE_FLOW_ACTION_TYPE_JUMP, 4385 .conf = &jump, 4386 }, 4387 { 4388 .type = RTE_FLOW_ACTION_TYPE_END, 4389 }, 4390 }; 4391 struct mlx5_priv *priv = dev->data->dev_private; 4392 struct rte_flow_error error; 4393 4394 return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern, 4395 actions, false, &error); 4396 } 4397 4398 /** 4399 * Create a flow. 4400 * 4401 * @see rte_flow_create() 4402 * @see rte_flow_ops 4403 */ 4404 struct rte_flow * 4405 mlx5_flow_create(struct rte_eth_dev *dev, 4406 const struct rte_flow_attr *attr, 4407 const struct rte_flow_item items[], 4408 const struct rte_flow_action actions[], 4409 struct rte_flow_error *error) 4410 { 4411 struct mlx5_priv *priv = dev->data->dev_private; 4412 4413 return flow_list_create(dev, &priv->flows, 4414 attr, items, actions, true, error); 4415 } 4416 4417 /** 4418 * Destroy a flow in a list. 4419 * 4420 * @param dev 4421 * Pointer to Ethernet device. 4422 * @param list 4423 * Pointer to a TAILQ flow list. If this parameter NULL, 4424 * there is no flow removal from the list. 4425 * @param[in] flow 4426 * Flow to destroy. 4427 */ 4428 static void 4429 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, 4430 struct rte_flow *flow) 4431 { 4432 struct mlx5_priv *priv = dev->data->dev_private; 4433 4434 /* 4435 * Update RX queue flags only if port is started, otherwise it is 4436 * already clean. 4437 */ 4438 if (dev->data->dev_started) 4439 flow_rxq_flags_trim(dev, flow); 4440 if (flow->hairpin_flow_id) 4441 mlx5_flow_id_release(priv->sh->flow_id_pool, 4442 flow->hairpin_flow_id); 4443 flow_drv_destroy(dev, flow); 4444 if (list) 4445 TAILQ_REMOVE(list, flow, next); 4446 flow_mreg_del_copy_action(dev, flow); 4447 rte_free(flow->fdir); 4448 rte_free(flow); 4449 } 4450 4451 /** 4452 * Destroy all flows. 4453 * 4454 * @param dev 4455 * Pointer to Ethernet device. 4456 * @param list 4457 * Pointer to a TAILQ flow list. 4458 * @param active 4459 * If flushing is called avtively. 4460 */ 4461 void 4462 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list, 4463 bool active) 4464 { 4465 uint32_t num_flushed = 0; 4466 4467 while (!TAILQ_EMPTY(list)) { 4468 struct rte_flow *flow; 4469 4470 flow = TAILQ_FIRST(list); 4471 flow_list_destroy(dev, list, flow); 4472 num_flushed++; 4473 } 4474 if (active) { 4475 DRV_LOG(INFO, "port %u: %u flows flushed before stopping", 4476 dev->data->port_id, num_flushed); 4477 } 4478 } 4479 4480 /** 4481 * Remove all flows. 4482 * 4483 * @param dev 4484 * Pointer to Ethernet device. 4485 * @param list 4486 * Pointer to a TAILQ flow list. 4487 */ 4488 void 4489 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) 4490 { 4491 struct rte_flow *flow; 4492 4493 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { 4494 flow_drv_remove(dev, flow); 4495 flow_mreg_stop_copy_action(dev, flow); 4496 } 4497 flow_mreg_del_default_copy_action(dev); 4498 flow_rxq_flags_clear(dev); 4499 } 4500 4501 /** 4502 * Add all flows. 4503 * 4504 * @param dev 4505 * Pointer to Ethernet device. 4506 * @param list 4507 * Pointer to a TAILQ flow list. 4508 * 4509 * @return 4510 * 0 on success, a negative errno value otherwise and rte_errno is set. 4511 */ 4512 int 4513 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) 4514 { 4515 struct rte_flow *flow; 4516 struct rte_flow_error error; 4517 int ret = 0; 4518 4519 /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ 4520 ret = flow_mreg_add_default_copy_action(dev, &error); 4521 if (ret < 0) 4522 return -rte_errno; 4523 /* Apply Flows created by application. */ 4524 TAILQ_FOREACH(flow, list, next) { 4525 ret = flow_mreg_start_copy_action(dev, flow); 4526 if (ret < 0) 4527 goto error; 4528 ret = flow_drv_apply(dev, flow, &error); 4529 if (ret < 0) 4530 goto error; 4531 flow_rxq_flags_set(dev, flow); 4532 } 4533 return 0; 4534 error: 4535 ret = rte_errno; /* Save rte_errno before cleanup. */ 4536 mlx5_flow_stop(dev, list); 4537 rte_errno = ret; /* Restore rte_errno. */ 4538 return -rte_errno; 4539 } 4540 4541 /** 4542 * Stop all default actions for flows. 4543 * 4544 * @param dev 4545 * Pointer to Ethernet device. 4546 * @param list 4547 * Pointer to a TAILQ flow list. 4548 */ 4549 void 4550 mlx5_flow_stop_default(struct rte_eth_dev *dev) 4551 { 4552 flow_mreg_del_default_copy_action(dev); 4553 } 4554 4555 /** 4556 * Start all default actions for flows. 4557 * 4558 * @param dev 4559 * Pointer to Ethernet device. 4560 * @return 4561 * 0 on success, a negative errno value otherwise and rte_errno is set. 4562 */ 4563 int 4564 mlx5_flow_start_default(struct rte_eth_dev *dev) 4565 { 4566 struct rte_flow_error error; 4567 4568 /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ 4569 return flow_mreg_add_default_copy_action(dev, &error); 4570 } 4571 4572 /** 4573 * Verify the flow list is empty 4574 * 4575 * @param dev 4576 * Pointer to Ethernet device. 4577 * 4578 * @return the number of flows not released. 4579 */ 4580 int 4581 mlx5_flow_verify(struct rte_eth_dev *dev) 4582 { 4583 struct mlx5_priv *priv = dev->data->dev_private; 4584 struct rte_flow *flow; 4585 int ret = 0; 4586 4587 TAILQ_FOREACH(flow, &priv->flows, next) { 4588 DRV_LOG(DEBUG, "port %u flow %p still referenced", 4589 dev->data->port_id, (void *)flow); 4590 ++ret; 4591 } 4592 return ret; 4593 } 4594 4595 /** 4596 * Enable default hairpin egress flow. 4597 * 4598 * @param dev 4599 * Pointer to Ethernet device. 4600 * @param queue 4601 * The queue index. 4602 * 4603 * @return 4604 * 0 on success, a negative errno value otherwise and rte_errno is set. 4605 */ 4606 int 4607 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, 4608 uint32_t queue) 4609 { 4610 struct mlx5_priv *priv = dev->data->dev_private; 4611 const struct rte_flow_attr attr = { 4612 .egress = 1, 4613 .priority = 0, 4614 }; 4615 struct mlx5_rte_flow_item_tx_queue queue_spec = { 4616 .queue = queue, 4617 }; 4618 struct mlx5_rte_flow_item_tx_queue queue_mask = { 4619 .queue = UINT32_MAX, 4620 }; 4621 struct rte_flow_item items[] = { 4622 { 4623 .type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, 4624 .spec = &queue_spec, 4625 .last = NULL, 4626 .mask = &queue_mask, 4627 }, 4628 { 4629 .type = RTE_FLOW_ITEM_TYPE_END, 4630 }, 4631 }; 4632 struct rte_flow_action_jump jump = { 4633 .group = MLX5_HAIRPIN_TX_TABLE, 4634 }; 4635 struct rte_flow_action actions[2]; 4636 struct rte_flow *flow; 4637 struct rte_flow_error error; 4638 4639 actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; 4640 actions[0].conf = &jump; 4641 actions[1].type = RTE_FLOW_ACTION_TYPE_END; 4642 flow = flow_list_create(dev, &priv->ctrl_flows, 4643 &attr, items, actions, false, &error); 4644 if (!flow) { 4645 DRV_LOG(DEBUG, 4646 "Failed to create ctrl flow: rte_errno(%d)," 4647 " type(%d), message(%s)", 4648 rte_errno, error.type, 4649 error.message ? error.message : " (no stated reason)"); 4650 return -rte_errno; 4651 } 4652 return 0; 4653 } 4654 4655 /** 4656 * Enable a control flow configured from the control plane. 4657 * 4658 * @param dev 4659 * Pointer to Ethernet device. 4660 * @param eth_spec 4661 * An Ethernet flow spec to apply. 4662 * @param eth_mask 4663 * An Ethernet flow mask to apply. 4664 * @param vlan_spec 4665 * A VLAN flow spec to apply. 4666 * @param vlan_mask 4667 * A VLAN flow mask to apply. 4668 * 4669 * @return 4670 * 0 on success, a negative errno value otherwise and rte_errno is set. 4671 */ 4672 int 4673 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, 4674 struct rte_flow_item_eth *eth_spec, 4675 struct rte_flow_item_eth *eth_mask, 4676 struct rte_flow_item_vlan *vlan_spec, 4677 struct rte_flow_item_vlan *vlan_mask) 4678 { 4679 struct mlx5_priv *priv = dev->data->dev_private; 4680 const struct rte_flow_attr attr = { 4681 .ingress = 1, 4682 .priority = MLX5_FLOW_PRIO_RSVD, 4683 }; 4684 struct rte_flow_item items[] = { 4685 { 4686 .type = RTE_FLOW_ITEM_TYPE_ETH, 4687 .spec = eth_spec, 4688 .last = NULL, 4689 .mask = eth_mask, 4690 }, 4691 { 4692 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : 4693 RTE_FLOW_ITEM_TYPE_END, 4694 .spec = vlan_spec, 4695 .last = NULL, 4696 .mask = vlan_mask, 4697 }, 4698 { 4699 .type = RTE_FLOW_ITEM_TYPE_END, 4700 }, 4701 }; 4702 uint16_t queue[priv->reta_idx_n]; 4703 struct rte_flow_action_rss action_rss = { 4704 .func = RTE_ETH_HASH_FUNCTION_DEFAULT, 4705 .level = 0, 4706 .types = priv->rss_conf.rss_hf, 4707 .key_len = priv->rss_conf.rss_key_len, 4708 .queue_num = priv->reta_idx_n, 4709 .key = priv->rss_conf.rss_key, 4710 .queue = queue, 4711 }; 4712 struct rte_flow_action actions[] = { 4713 { 4714 .type = RTE_FLOW_ACTION_TYPE_RSS, 4715 .conf = &action_rss, 4716 }, 4717 { 4718 .type = RTE_FLOW_ACTION_TYPE_END, 4719 }, 4720 }; 4721 struct rte_flow *flow; 4722 struct rte_flow_error error; 4723 unsigned int i; 4724 4725 if (!priv->reta_idx_n || !priv->rxqs_n) { 4726 return 0; 4727 } 4728 for (i = 0; i != priv->reta_idx_n; ++i) 4729 queue[i] = (*priv->reta_idx)[i]; 4730 flow = flow_list_create(dev, &priv->ctrl_flows, 4731 &attr, items, actions, false, &error); 4732 if (!flow) 4733 return -rte_errno; 4734 return 0; 4735 } 4736 4737 /** 4738 * Enable a flow control configured from the control plane. 4739 * 4740 * @param dev 4741 * Pointer to Ethernet device. 4742 * @param eth_spec 4743 * An Ethernet flow spec to apply. 4744 * @param eth_mask 4745 * An Ethernet flow mask to apply. 4746 * 4747 * @return 4748 * 0 on success, a negative errno value otherwise and rte_errno is set. 4749 */ 4750 int 4751 mlx5_ctrl_flow(struct rte_eth_dev *dev, 4752 struct rte_flow_item_eth *eth_spec, 4753 struct rte_flow_item_eth *eth_mask) 4754 { 4755 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); 4756 } 4757 4758 /** 4759 * Destroy a flow. 4760 * 4761 * @see rte_flow_destroy() 4762 * @see rte_flow_ops 4763 */ 4764 int 4765 mlx5_flow_destroy(struct rte_eth_dev *dev, 4766 struct rte_flow *flow, 4767 struct rte_flow_error *error __rte_unused) 4768 { 4769 struct mlx5_priv *priv = dev->data->dev_private; 4770 4771 flow_list_destroy(dev, &priv->flows, flow); 4772 return 0; 4773 } 4774 4775 /** 4776 * Destroy all flows. 4777 * 4778 * @see rte_flow_flush() 4779 * @see rte_flow_ops 4780 */ 4781 int 4782 mlx5_flow_flush(struct rte_eth_dev *dev, 4783 struct rte_flow_error *error __rte_unused) 4784 { 4785 struct mlx5_priv *priv = dev->data->dev_private; 4786 4787 mlx5_flow_list_flush(dev, &priv->flows, false); 4788 return 0; 4789 } 4790 4791 /** 4792 * Isolated mode. 4793 * 4794 * @see rte_flow_isolate() 4795 * @see rte_flow_ops 4796 */ 4797 int 4798 mlx5_flow_isolate(struct rte_eth_dev *dev, 4799 int enable, 4800 struct rte_flow_error *error) 4801 { 4802 struct mlx5_priv *priv = dev->data->dev_private; 4803 4804 if (dev->data->dev_started) { 4805 rte_flow_error_set(error, EBUSY, 4806 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4807 NULL, 4808 "port must be stopped first"); 4809 return -rte_errno; 4810 } 4811 priv->isolated = !!enable; 4812 if (enable) 4813 dev->dev_ops = &mlx5_dev_ops_isolate; 4814 else 4815 dev->dev_ops = &mlx5_dev_ops; 4816 return 0; 4817 } 4818 4819 /** 4820 * Query a flow. 4821 * 4822 * @see rte_flow_query() 4823 * @see rte_flow_ops 4824 */ 4825 static int 4826 flow_drv_query(struct rte_eth_dev *dev, 4827 struct rte_flow *flow, 4828 const struct rte_flow_action *actions, 4829 void *data, 4830 struct rte_flow_error *error) 4831 { 4832 const struct mlx5_flow_driver_ops *fops; 4833 enum mlx5_flow_drv_type ftype = flow->drv_type; 4834 4835 MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); 4836 fops = flow_get_drv_ops(ftype); 4837 4838 return fops->query(dev, flow, actions, data, error); 4839 } 4840 4841 /** 4842 * Query a flow. 4843 * 4844 * @see rte_flow_query() 4845 * @see rte_flow_ops 4846 */ 4847 int 4848 mlx5_flow_query(struct rte_eth_dev *dev, 4849 struct rte_flow *flow, 4850 const struct rte_flow_action *actions, 4851 void *data, 4852 struct rte_flow_error *error) 4853 { 4854 int ret; 4855 4856 ret = flow_drv_query(dev, flow, actions, data, error); 4857 if (ret < 0) 4858 return ret; 4859 return 0; 4860 } 4861 4862 /** 4863 * Convert a flow director filter to a generic flow. 4864 * 4865 * @param dev 4866 * Pointer to Ethernet device. 4867 * @param fdir_filter 4868 * Flow director filter to add. 4869 * @param attributes 4870 * Generic flow parameters structure. 4871 * 4872 * @return 4873 * 0 on success, a negative errno value otherwise and rte_errno is set. 4874 */ 4875 static int 4876 flow_fdir_filter_convert(struct rte_eth_dev *dev, 4877 const struct rte_eth_fdir_filter *fdir_filter, 4878 struct mlx5_fdir *attributes) 4879 { 4880 struct mlx5_priv *priv = dev->data->dev_private; 4881 const struct rte_eth_fdir_input *input = &fdir_filter->input; 4882 const struct rte_eth_fdir_masks *mask = 4883 &dev->data->dev_conf.fdir_conf.mask; 4884 4885 /* Validate queue number. */ 4886 if (fdir_filter->action.rx_queue >= priv->rxqs_n) { 4887 DRV_LOG(ERR, "port %u invalid queue number %d", 4888 dev->data->port_id, fdir_filter->action.rx_queue); 4889 rte_errno = EINVAL; 4890 return -rte_errno; 4891 } 4892 attributes->attr.ingress = 1; 4893 attributes->items[0] = (struct rte_flow_item) { 4894 .type = RTE_FLOW_ITEM_TYPE_ETH, 4895 .spec = &attributes->l2, 4896 .mask = &attributes->l2_mask, 4897 }; 4898 switch (fdir_filter->action.behavior) { 4899 case RTE_ETH_FDIR_ACCEPT: 4900 attributes->actions[0] = (struct rte_flow_action){ 4901 .type = RTE_FLOW_ACTION_TYPE_QUEUE, 4902 .conf = &attributes->queue, 4903 }; 4904 break; 4905 case RTE_ETH_FDIR_REJECT: 4906 attributes->actions[0] = (struct rte_flow_action){ 4907 .type = RTE_FLOW_ACTION_TYPE_DROP, 4908 }; 4909 break; 4910 default: 4911 DRV_LOG(ERR, "port %u invalid behavior %d", 4912 dev->data->port_id, 4913 fdir_filter->action.behavior); 4914 rte_errno = ENOTSUP; 4915 return -rte_errno; 4916 } 4917 attributes->queue.index = fdir_filter->action.rx_queue; 4918 /* Handle L3. */ 4919 switch (fdir_filter->input.flow_type) { 4920 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 4921 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 4922 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 4923 attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){ 4924 .src_addr = input->flow.ip4_flow.src_ip, 4925 .dst_addr = input->flow.ip4_flow.dst_ip, 4926 .time_to_live = input->flow.ip4_flow.ttl, 4927 .type_of_service = input->flow.ip4_flow.tos, 4928 }; 4929 attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){ 4930 .src_addr = mask->ipv4_mask.src_ip, 4931 .dst_addr = mask->ipv4_mask.dst_ip, 4932 .time_to_live = mask->ipv4_mask.ttl, 4933 .type_of_service = mask->ipv4_mask.tos, 4934 .next_proto_id = mask->ipv4_mask.proto, 4935 }; 4936 attributes->items[1] = (struct rte_flow_item){ 4937 .type = RTE_FLOW_ITEM_TYPE_IPV4, 4938 .spec = &attributes->l3, 4939 .mask = &attributes->l3_mask, 4940 }; 4941 break; 4942 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 4943 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 4944 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 4945 attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){ 4946 .hop_limits = input->flow.ipv6_flow.hop_limits, 4947 .proto = input->flow.ipv6_flow.proto, 4948 }; 4949 4950 memcpy(attributes->l3.ipv6.hdr.src_addr, 4951 input->flow.ipv6_flow.src_ip, 4952 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 4953 memcpy(attributes->l3.ipv6.hdr.dst_addr, 4954 input->flow.ipv6_flow.dst_ip, 4955 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 4956 memcpy(attributes->l3_mask.ipv6.hdr.src_addr, 4957 mask->ipv6_mask.src_ip, 4958 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 4959 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr, 4960 mask->ipv6_mask.dst_ip, 4961 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 4962 attributes->items[1] = (struct rte_flow_item){ 4963 .type = RTE_FLOW_ITEM_TYPE_IPV6, 4964 .spec = &attributes->l3, 4965 .mask = &attributes->l3_mask, 4966 }; 4967 break; 4968 default: 4969 DRV_LOG(ERR, "port %u invalid flow type%d", 4970 dev->data->port_id, fdir_filter->input.flow_type); 4971 rte_errno = ENOTSUP; 4972 return -rte_errno; 4973 } 4974 /* Handle L4. */ 4975 switch (fdir_filter->input.flow_type) { 4976 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 4977 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 4978 .src_port = input->flow.udp4_flow.src_port, 4979 .dst_port = input->flow.udp4_flow.dst_port, 4980 }; 4981 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 4982 .src_port = mask->src_port_mask, 4983 .dst_port = mask->dst_port_mask, 4984 }; 4985 attributes->items[2] = (struct rte_flow_item){ 4986 .type = RTE_FLOW_ITEM_TYPE_UDP, 4987 .spec = &attributes->l4, 4988 .mask = &attributes->l4_mask, 4989 }; 4990 break; 4991 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 4992 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 4993 .src_port = input->flow.tcp4_flow.src_port, 4994 .dst_port = input->flow.tcp4_flow.dst_port, 4995 }; 4996 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 4997 .src_port = mask->src_port_mask, 4998 .dst_port = mask->dst_port_mask, 4999 }; 5000 attributes->items[2] = (struct rte_flow_item){ 5001 .type = RTE_FLOW_ITEM_TYPE_TCP, 5002 .spec = &attributes->l4, 5003 .mask = &attributes->l4_mask, 5004 }; 5005 break; 5006 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 5007 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 5008 .src_port = input->flow.udp6_flow.src_port, 5009 .dst_port = input->flow.udp6_flow.dst_port, 5010 }; 5011 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 5012 .src_port = mask->src_port_mask, 5013 .dst_port = mask->dst_port_mask, 5014 }; 5015 attributes->items[2] = (struct rte_flow_item){ 5016 .type = RTE_FLOW_ITEM_TYPE_UDP, 5017 .spec = &attributes->l4, 5018 .mask = &attributes->l4_mask, 5019 }; 5020 break; 5021 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 5022 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 5023 .src_port = input->flow.tcp6_flow.src_port, 5024 .dst_port = input->flow.tcp6_flow.dst_port, 5025 }; 5026 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 5027 .src_port = mask->src_port_mask, 5028 .dst_port = mask->dst_port_mask, 5029 }; 5030 attributes->items[2] = (struct rte_flow_item){ 5031 .type = RTE_FLOW_ITEM_TYPE_TCP, 5032 .spec = &attributes->l4, 5033 .mask = &attributes->l4_mask, 5034 }; 5035 break; 5036 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 5037 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 5038 break; 5039 default: 5040 DRV_LOG(ERR, "port %u invalid flow type%d", 5041 dev->data->port_id, fdir_filter->input.flow_type); 5042 rte_errno = ENOTSUP; 5043 return -rte_errno; 5044 } 5045 return 0; 5046 } 5047 5048 #define FLOW_FDIR_CMP(f1, f2, fld) \ 5049 memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld)) 5050 5051 /** 5052 * Compare two FDIR flows. If items and actions are identical, the two flows are 5053 * regarded as same. 5054 * 5055 * @param dev 5056 * Pointer to Ethernet device. 5057 * @param f1 5058 * FDIR flow to compare. 5059 * @param f2 5060 * FDIR flow to compare. 5061 * 5062 * @return 5063 * Zero on match, 1 otherwise. 5064 */ 5065 static int 5066 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) 5067 { 5068 if (FLOW_FDIR_CMP(f1, f2, attr) || 5069 FLOW_FDIR_CMP(f1, f2, l2) || 5070 FLOW_FDIR_CMP(f1, f2, l2_mask) || 5071 FLOW_FDIR_CMP(f1, f2, l3) || 5072 FLOW_FDIR_CMP(f1, f2, l3_mask) || 5073 FLOW_FDIR_CMP(f1, f2, l4) || 5074 FLOW_FDIR_CMP(f1, f2, l4_mask) || 5075 FLOW_FDIR_CMP(f1, f2, actions[0].type)) 5076 return 1; 5077 if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE && 5078 FLOW_FDIR_CMP(f1, f2, queue)) 5079 return 1; 5080 return 0; 5081 } 5082 5083 /** 5084 * Search device flow list to find out a matched FDIR flow. 5085 * 5086 * @param dev 5087 * Pointer to Ethernet device. 5088 * @param fdir_flow 5089 * FDIR flow to lookup. 5090 * 5091 * @return 5092 * Pointer of flow if found, NULL otherwise. 5093 */ 5094 static struct rte_flow * 5095 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) 5096 { 5097 struct mlx5_priv *priv = dev->data->dev_private; 5098 struct rte_flow *flow = NULL; 5099 5100 MLX5_ASSERT(fdir_flow); 5101 TAILQ_FOREACH(flow, &priv->flows, next) { 5102 if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) { 5103 DRV_LOG(DEBUG, "port %u found FDIR flow %p", 5104 dev->data->port_id, (void *)flow); 5105 break; 5106 } 5107 } 5108 return flow; 5109 } 5110 5111 /** 5112 * Add new flow director filter and store it in list. 5113 * 5114 * @param dev 5115 * Pointer to Ethernet device. 5116 * @param fdir_filter 5117 * Flow director filter to add. 5118 * 5119 * @return 5120 * 0 on success, a negative errno value otherwise and rte_errno is set. 5121 */ 5122 static int 5123 flow_fdir_filter_add(struct rte_eth_dev *dev, 5124 const struct rte_eth_fdir_filter *fdir_filter) 5125 { 5126 struct mlx5_priv *priv = dev->data->dev_private; 5127 struct mlx5_fdir *fdir_flow; 5128 struct rte_flow *flow; 5129 int ret; 5130 5131 fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0); 5132 if (!fdir_flow) { 5133 rte_errno = ENOMEM; 5134 return -rte_errno; 5135 } 5136 ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow); 5137 if (ret) 5138 goto error; 5139 flow = flow_fdir_filter_lookup(dev, fdir_flow); 5140 if (flow) { 5141 rte_errno = EEXIST; 5142 goto error; 5143 } 5144 flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr, 5145 fdir_flow->items, fdir_flow->actions, true, 5146 NULL); 5147 if (!flow) 5148 goto error; 5149 MLX5_ASSERT(!flow->fdir); 5150 flow->fdir = fdir_flow; 5151 DRV_LOG(DEBUG, "port %u created FDIR flow %p", 5152 dev->data->port_id, (void *)flow); 5153 return 0; 5154 error: 5155 rte_free(fdir_flow); 5156 return -rte_errno; 5157 } 5158 5159 /** 5160 * Delete specific filter. 5161 * 5162 * @param dev 5163 * Pointer to Ethernet device. 5164 * @param fdir_filter 5165 * Filter to be deleted. 5166 * 5167 * @return 5168 * 0 on success, a negative errno value otherwise and rte_errno is set. 5169 */ 5170 static int 5171 flow_fdir_filter_delete(struct rte_eth_dev *dev, 5172 const struct rte_eth_fdir_filter *fdir_filter) 5173 { 5174 struct mlx5_priv *priv = dev->data->dev_private; 5175 struct rte_flow *flow; 5176 struct mlx5_fdir fdir_flow = { 5177 .attr.group = 0, 5178 }; 5179 int ret; 5180 5181 ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow); 5182 if (ret) 5183 return -rte_errno; 5184 flow = flow_fdir_filter_lookup(dev, &fdir_flow); 5185 if (!flow) { 5186 rte_errno = ENOENT; 5187 return -rte_errno; 5188 } 5189 flow_list_destroy(dev, &priv->flows, flow); 5190 DRV_LOG(DEBUG, "port %u deleted FDIR flow %p", 5191 dev->data->port_id, (void *)flow); 5192 return 0; 5193 } 5194 5195 /** 5196 * Update queue for specific filter. 5197 * 5198 * @param dev 5199 * Pointer to Ethernet device. 5200 * @param fdir_filter 5201 * Filter to be updated. 5202 * 5203 * @return 5204 * 0 on success, a negative errno value otherwise and rte_errno is set. 5205 */ 5206 static int 5207 flow_fdir_filter_update(struct rte_eth_dev *dev, 5208 const struct rte_eth_fdir_filter *fdir_filter) 5209 { 5210 int ret; 5211 5212 ret = flow_fdir_filter_delete(dev, fdir_filter); 5213 if (ret) 5214 return ret; 5215 return flow_fdir_filter_add(dev, fdir_filter); 5216 } 5217 5218 /** 5219 * Flush all filters. 5220 * 5221 * @param dev 5222 * Pointer to Ethernet device. 5223 */ 5224 static void 5225 flow_fdir_filter_flush(struct rte_eth_dev *dev) 5226 { 5227 struct mlx5_priv *priv = dev->data->dev_private; 5228 5229 mlx5_flow_list_flush(dev, &priv->flows, false); 5230 } 5231 5232 /** 5233 * Get flow director information. 5234 * 5235 * @param dev 5236 * Pointer to Ethernet device. 5237 * @param[out] fdir_info 5238 * Resulting flow director information. 5239 */ 5240 static void 5241 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) 5242 { 5243 struct rte_eth_fdir_masks *mask = 5244 &dev->data->dev_conf.fdir_conf.mask; 5245 5246 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; 5247 fdir_info->guarant_spc = 0; 5248 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); 5249 fdir_info->max_flexpayload = 0; 5250 fdir_info->flow_types_mask[0] = 0; 5251 fdir_info->flex_payload_unit = 0; 5252 fdir_info->max_flex_payload_segment_num = 0; 5253 fdir_info->flex_payload_limit = 0; 5254 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf)); 5255 } 5256 5257 /** 5258 * Deal with flow director operations. 5259 * 5260 * @param dev 5261 * Pointer to Ethernet device. 5262 * @param filter_op 5263 * Operation to perform. 5264 * @param arg 5265 * Pointer to operation-specific structure. 5266 * 5267 * @return 5268 * 0 on success, a negative errno value otherwise and rte_errno is set. 5269 */ 5270 static int 5271 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, 5272 void *arg) 5273 { 5274 enum rte_fdir_mode fdir_mode = 5275 dev->data->dev_conf.fdir_conf.mode; 5276 5277 if (filter_op == RTE_ETH_FILTER_NOP) 5278 return 0; 5279 if (fdir_mode != RTE_FDIR_MODE_PERFECT && 5280 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 5281 DRV_LOG(ERR, "port %u flow director mode %d not supported", 5282 dev->data->port_id, fdir_mode); 5283 rte_errno = EINVAL; 5284 return -rte_errno; 5285 } 5286 switch (filter_op) { 5287 case RTE_ETH_FILTER_ADD: 5288 return flow_fdir_filter_add(dev, arg); 5289 case RTE_ETH_FILTER_UPDATE: 5290 return flow_fdir_filter_update(dev, arg); 5291 case RTE_ETH_FILTER_DELETE: 5292 return flow_fdir_filter_delete(dev, arg); 5293 case RTE_ETH_FILTER_FLUSH: 5294 flow_fdir_filter_flush(dev); 5295 break; 5296 case RTE_ETH_FILTER_INFO: 5297 flow_fdir_info_get(dev, arg); 5298 break; 5299 default: 5300 DRV_LOG(DEBUG, "port %u unknown operation %u", 5301 dev->data->port_id, filter_op); 5302 rte_errno = EINVAL; 5303 return -rte_errno; 5304 } 5305 return 0; 5306 } 5307 5308 /** 5309 * Manage filter operations. 5310 * 5311 * @param dev 5312 * Pointer to Ethernet device structure. 5313 * @param filter_type 5314 * Filter type. 5315 * @param filter_op 5316 * Operation to perform. 5317 * @param arg 5318 * Pointer to operation-specific structure. 5319 * 5320 * @return 5321 * 0 on success, a negative errno value otherwise and rte_errno is set. 5322 */ 5323 int 5324 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, 5325 enum rte_filter_type filter_type, 5326 enum rte_filter_op filter_op, 5327 void *arg) 5328 { 5329 switch (filter_type) { 5330 case RTE_ETH_FILTER_GENERIC: 5331 if (filter_op != RTE_ETH_FILTER_GET) { 5332 rte_errno = EINVAL; 5333 return -rte_errno; 5334 } 5335 *(const void **)arg = &mlx5_flow_ops; 5336 return 0; 5337 case RTE_ETH_FILTER_FDIR: 5338 return flow_fdir_ctrl_func(dev, filter_op, arg); 5339 default: 5340 DRV_LOG(ERR, "port %u filter type (%d) not supported", 5341 dev->data->port_id, filter_type); 5342 rte_errno = ENOTSUP; 5343 return -rte_errno; 5344 } 5345 return 0; 5346 } 5347 5348 /** 5349 * Create the needed meter and suffix tables. 5350 * 5351 * @param[in] dev 5352 * Pointer to Ethernet device. 5353 * @param[in] fm 5354 * Pointer to the flow meter. 5355 * 5356 * @return 5357 * Pointer to table set on success, NULL otherwise. 5358 */ 5359 struct mlx5_meter_domains_infos * 5360 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev, 5361 const struct mlx5_flow_meter *fm) 5362 { 5363 const struct mlx5_flow_driver_ops *fops; 5364 5365 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5366 return fops->create_mtr_tbls(dev, fm); 5367 } 5368 5369 /** 5370 * Destroy the meter table set. 5371 * 5372 * @param[in] dev 5373 * Pointer to Ethernet device. 5374 * @param[in] tbl 5375 * Pointer to the meter table set. 5376 * 5377 * @return 5378 * 0 on success. 5379 */ 5380 int 5381 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, 5382 struct mlx5_meter_domains_infos *tbls) 5383 { 5384 const struct mlx5_flow_driver_ops *fops; 5385 5386 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5387 return fops->destroy_mtr_tbls(dev, tbls); 5388 } 5389 5390 /** 5391 * Create policer rules. 5392 * 5393 * @param[in] dev 5394 * Pointer to Ethernet device. 5395 * @param[in] fm 5396 * Pointer to flow meter structure. 5397 * @param[in] attr 5398 * Pointer to flow attributes. 5399 * 5400 * @return 5401 * 0 on success, -1 otherwise. 5402 */ 5403 int 5404 mlx5_flow_create_policer_rules(struct rte_eth_dev *dev, 5405 struct mlx5_flow_meter *fm, 5406 const struct rte_flow_attr *attr) 5407 { 5408 const struct mlx5_flow_driver_ops *fops; 5409 5410 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5411 return fops->create_policer_rules(dev, fm, attr); 5412 } 5413 5414 /** 5415 * Destroy policer rules. 5416 * 5417 * @param[in] fm 5418 * Pointer to flow meter structure. 5419 * @param[in] attr 5420 * Pointer to flow attributes. 5421 * 5422 * @return 5423 * 0 on success, -1 otherwise. 5424 */ 5425 int 5426 mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev, 5427 struct mlx5_flow_meter *fm, 5428 const struct rte_flow_attr *attr) 5429 { 5430 const struct mlx5_flow_driver_ops *fops; 5431 5432 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5433 return fops->destroy_policer_rules(dev, fm, attr); 5434 } 5435 5436 /** 5437 * Allocate a counter. 5438 * 5439 * @param[in] dev 5440 * Pointer to Ethernet device structure. 5441 * 5442 * @return 5443 * Pointer to allocated counter on success, NULL otherwise. 5444 */ 5445 struct mlx5_flow_counter * 5446 mlx5_counter_alloc(struct rte_eth_dev *dev) 5447 { 5448 const struct mlx5_flow_driver_ops *fops; 5449 struct rte_flow_attr attr = { .transfer = 0 }; 5450 5451 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5452 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5453 return fops->counter_alloc(dev); 5454 } 5455 DRV_LOG(ERR, 5456 "port %u counter allocate is not supported.", 5457 dev->data->port_id); 5458 return NULL; 5459 } 5460 5461 /** 5462 * Free a counter. 5463 * 5464 * @param[in] dev 5465 * Pointer to Ethernet device structure. 5466 * @param[in] cnt 5467 * Pointer to counter to be free. 5468 */ 5469 void 5470 mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt) 5471 { 5472 const struct mlx5_flow_driver_ops *fops; 5473 struct rte_flow_attr attr = { .transfer = 0 }; 5474 5475 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5476 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5477 fops->counter_free(dev, cnt); 5478 return; 5479 } 5480 DRV_LOG(ERR, 5481 "port %u counter free is not supported.", 5482 dev->data->port_id); 5483 } 5484 5485 /** 5486 * Query counter statistics. 5487 * 5488 * @param[in] dev 5489 * Pointer to Ethernet device structure. 5490 * @param[in] cnt 5491 * Pointer to counter to query. 5492 * @param[in] clear 5493 * Set to clear counter statistics. 5494 * @param[out] pkts 5495 * The counter hits packets number to save. 5496 * @param[out] bytes 5497 * The counter hits bytes number to save. 5498 * 5499 * @return 5500 * 0 on success, a negative errno value otherwise. 5501 */ 5502 int 5503 mlx5_counter_query(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt, 5504 bool clear, uint64_t *pkts, uint64_t *bytes) 5505 { 5506 const struct mlx5_flow_driver_ops *fops; 5507 struct rte_flow_attr attr = { .transfer = 0 }; 5508 5509 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5510 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5511 return fops->counter_query(dev, cnt, clear, pkts, bytes); 5512 } 5513 DRV_LOG(ERR, 5514 "port %u counter query is not supported.", 5515 dev->data->port_id); 5516 return -ENOTSUP; 5517 } 5518 5519 #define MLX5_POOL_QUERY_FREQ_US 1000000 5520 5521 /** 5522 * Set the periodic procedure for triggering asynchronous batch queries for all 5523 * the counter pools. 5524 * 5525 * @param[in] sh 5526 * Pointer to mlx5_ibv_shared object. 5527 */ 5528 void 5529 mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) 5530 { 5531 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0); 5532 uint32_t pools_n = rte_atomic16_read(&cont->n_valid); 5533 uint32_t us; 5534 5535 cont = MLX5_CNT_CONTAINER(sh, 1, 0); 5536 pools_n += rte_atomic16_read(&cont->n_valid); 5537 us = MLX5_POOL_QUERY_FREQ_US / pools_n; 5538 DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us); 5539 if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { 5540 sh->cmng.query_thread_on = 0; 5541 DRV_LOG(ERR, "Cannot reinitialize query alarm"); 5542 } else { 5543 sh->cmng.query_thread_on = 1; 5544 } 5545 } 5546 5547 /** 5548 * The periodic procedure for triggering asynchronous batch queries for all the 5549 * counter pools. This function is probably called by the host thread. 5550 * 5551 * @param[in] arg 5552 * The parameter for the alarm process. 5553 */ 5554 void 5555 mlx5_flow_query_alarm(void *arg) 5556 { 5557 struct mlx5_ibv_shared *sh = arg; 5558 struct mlx5_devx_obj *dcs; 5559 uint16_t offset; 5560 int ret; 5561 uint8_t batch = sh->cmng.batch; 5562 uint16_t pool_index = sh->cmng.pool_index; 5563 struct mlx5_pools_container *cont; 5564 struct mlx5_pools_container *mcont; 5565 struct mlx5_flow_counter_pool *pool; 5566 5567 if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) 5568 goto set_alarm; 5569 next_container: 5570 cont = MLX5_CNT_CONTAINER(sh, batch, 1); 5571 mcont = MLX5_CNT_CONTAINER(sh, batch, 0); 5572 /* Check if resize was done and need to flip a container. */ 5573 if (cont != mcont) { 5574 if (cont->pools) { 5575 /* Clean the old container. */ 5576 rte_free(cont->pools); 5577 memset(cont, 0, sizeof(*cont)); 5578 } 5579 rte_cio_wmb(); 5580 /* Flip the host container. */ 5581 sh->cmng.mhi[batch] ^= (uint8_t)2; 5582 cont = mcont; 5583 } 5584 if (!cont->pools) { 5585 /* 2 empty containers case is unexpected. */ 5586 if (unlikely(batch != sh->cmng.batch)) 5587 goto set_alarm; 5588 batch ^= 0x1; 5589 pool_index = 0; 5590 goto next_container; 5591 } 5592 pool = cont->pools[pool_index]; 5593 if (pool->raw_hw) 5594 /* There is a pool query in progress. */ 5595 goto set_alarm; 5596 pool->raw_hw = 5597 LIST_FIRST(&sh->cmng.free_stat_raws); 5598 if (!pool->raw_hw) 5599 /* No free counter statistics raw memory. */ 5600 goto set_alarm; 5601 dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read 5602 (&pool->a64_dcs); 5603 offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; 5604 ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - 5605 offset, NULL, NULL, 5606 pool->raw_hw->mem_mng->dm->id, 5607 (void *)(uintptr_t) 5608 (pool->raw_hw->data + offset), 5609 sh->devx_comp, 5610 (uint64_t)(uintptr_t)pool); 5611 if (ret) { 5612 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID" 5613 " %d", pool->min_dcs->id); 5614 pool->raw_hw = NULL; 5615 goto set_alarm; 5616 } 5617 pool->raw_hw->min_dcs_id = dcs->id; 5618 LIST_REMOVE(pool->raw_hw, next); 5619 sh->cmng.pending_queries++; 5620 pool_index++; 5621 if (pool_index >= rte_atomic16_read(&cont->n_valid)) { 5622 batch ^= 0x1; 5623 pool_index = 0; 5624 } 5625 set_alarm: 5626 sh->cmng.batch = batch; 5627 sh->cmng.pool_index = pool_index; 5628 mlx5_set_query_alarm(sh); 5629 } 5630 5631 /** 5632 * Handler for the HW respond about ready values from an asynchronous batch 5633 * query. This function is probably called by the host thread. 5634 * 5635 * @param[in] sh 5636 * The pointer to the shared IB device context. 5637 * @param[in] async_id 5638 * The Devx async ID. 5639 * @param[in] status 5640 * The status of the completion. 5641 */ 5642 void 5643 mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, 5644 uint64_t async_id, int status) 5645 { 5646 struct mlx5_flow_counter_pool *pool = 5647 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; 5648 struct mlx5_counter_stats_raw *raw_to_free; 5649 5650 if (unlikely(status)) { 5651 raw_to_free = pool->raw_hw; 5652 } else { 5653 raw_to_free = pool->raw; 5654 rte_spinlock_lock(&pool->sl); 5655 pool->raw = pool->raw_hw; 5656 rte_spinlock_unlock(&pool->sl); 5657 rte_atomic64_add(&pool->query_gen, 1); 5658 /* Be sure the new raw counters data is updated in memory. */ 5659 rte_cio_wmb(); 5660 } 5661 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next); 5662 pool->raw_hw = NULL; 5663 sh->cmng.pending_queries--; 5664 } 5665 5666 /** 5667 * Translate the rte_flow group index to HW table value. 5668 * 5669 * @param[in] attributes 5670 * Pointer to flow attributes 5671 * @param[in] external 5672 * Value is part of flow rule created by request external to PMD. 5673 * @param[in] group 5674 * rte_flow group index value. 5675 * @param[out] fdb_def_rule 5676 * Whether fdb jump to table 1 is configured. 5677 * @param[out] table 5678 * HW table value. 5679 * @param[out] error 5680 * Pointer to error structure. 5681 * 5682 * @return 5683 * 0 on success, a negative errno value otherwise and rte_errno is set. 5684 */ 5685 int 5686 mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external, 5687 uint32_t group, bool fdb_def_rule, uint32_t *table, 5688 struct rte_flow_error *error) 5689 { 5690 if (attributes->transfer && external && fdb_def_rule) { 5691 if (group == UINT32_MAX) 5692 return rte_flow_error_set 5693 (error, EINVAL, 5694 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 5695 NULL, 5696 "group index not supported"); 5697 *table = group + 1; 5698 } else { 5699 *table = group; 5700 } 5701 return 0; 5702 } 5703 5704 /** 5705 * Discover availability of metadata reg_c's. 5706 * 5707 * Iteratively use test flows to check availability. 5708 * 5709 * @param[in] dev 5710 * Pointer to the Ethernet device structure. 5711 * 5712 * @return 5713 * 0 on success, a negative errno value otherwise and rte_errno is set. 5714 */ 5715 int 5716 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) 5717 { 5718 struct mlx5_priv *priv = dev->data->dev_private; 5719 struct mlx5_dev_config *config = &priv->config; 5720 enum modify_reg idx; 5721 int n = 0; 5722 5723 /* reg_c[0] and reg_c[1] are reserved. */ 5724 config->flow_mreg_c[n++] = REG_C_0; 5725 config->flow_mreg_c[n++] = REG_C_1; 5726 /* Discover availability of other reg_c's. */ 5727 for (idx = REG_C_2; idx <= REG_C_7; ++idx) { 5728 struct rte_flow_attr attr = { 5729 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 5730 .priority = MLX5_FLOW_PRIO_RSVD, 5731 .ingress = 1, 5732 }; 5733 struct rte_flow_item items[] = { 5734 [0] = { 5735 .type = RTE_FLOW_ITEM_TYPE_END, 5736 }, 5737 }; 5738 struct rte_flow_action actions[] = { 5739 [0] = { 5740 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 5741 .conf = &(struct mlx5_flow_action_copy_mreg){ 5742 .src = REG_C_1, 5743 .dst = idx, 5744 }, 5745 }, 5746 [1] = { 5747 .type = RTE_FLOW_ACTION_TYPE_JUMP, 5748 .conf = &(struct rte_flow_action_jump){ 5749 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 5750 }, 5751 }, 5752 [2] = { 5753 .type = RTE_FLOW_ACTION_TYPE_END, 5754 }, 5755 }; 5756 struct rte_flow *flow; 5757 struct rte_flow_error error; 5758 5759 if (!config->dv_flow_en) 5760 break; 5761 /* Create internal flow, validation skips copy action. */ 5762 flow = flow_list_create(dev, NULL, &attr, items, 5763 actions, false, &error); 5764 if (!flow) 5765 continue; 5766 if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL)) 5767 config->flow_mreg_c[n++] = idx; 5768 flow_list_destroy(dev, NULL, flow); 5769 } 5770 for (; n < MLX5_MREG_C_NUM; ++n) 5771 config->flow_mreg_c[n] = REG_NONE; 5772 return 0; 5773 } 5774 5775 /** 5776 * Dump flow raw hw data to file 5777 * 5778 * @param[in] dev 5779 * The pointer to Ethernet device. 5780 * @param[in] file 5781 * A pointer to a file for output. 5782 * @param[out] error 5783 * Perform verbose error reporting if not NULL. PMDs initialize this 5784 * structure in case of error only. 5785 * @return 5786 * 0 on success, a nagative value otherwise. 5787 */ 5788 int 5789 mlx5_flow_dev_dump(struct rte_eth_dev *dev, 5790 FILE *file, 5791 struct rte_flow_error *error __rte_unused) 5792 { 5793 struct mlx5_priv *priv = dev->data->dev_private; 5794 struct mlx5_ibv_shared *sh = priv->sh; 5795 5796 return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain, 5797 sh->tx_domain, file); 5798 } 5799