1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <netinet/in.h> 7 #include <sys/queue.h> 8 #include <stdalign.h> 9 #include <stdint.h> 10 #include <string.h> 11 12 /* Verbs header. */ 13 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 14 #ifdef PEDANTIC 15 #pragma GCC diagnostic ignored "-Wpedantic" 16 #endif 17 #include <infiniband/verbs.h> 18 #ifdef PEDANTIC 19 #pragma GCC diagnostic error "-Wpedantic" 20 #endif 21 22 #include <rte_common.h> 23 #include <rte_ether.h> 24 #include <rte_ethdev_driver.h> 25 #include <rte_flow.h> 26 #include <rte_flow_driver.h> 27 #include <rte_malloc.h> 28 #include <rte_ip.h> 29 30 #include <mlx5_glue.h> 31 #include <mlx5_devx_cmds.h> 32 #include <mlx5_prm.h> 33 34 #include "mlx5_defs.h" 35 #include "mlx5.h" 36 #include "mlx5_flow.h" 37 #include "mlx5_rxtx.h" 38 39 /* Dev ops structure defined in mlx5.c */ 40 extern const struct eth_dev_ops mlx5_dev_ops; 41 extern const struct eth_dev_ops mlx5_dev_ops_isolate; 42 43 /** Device flow drivers. */ 44 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 45 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; 46 #endif 47 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; 48 49 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; 50 51 const struct mlx5_flow_driver_ops *flow_drv_ops[] = { 52 [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, 53 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 54 [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, 55 #endif 56 [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, 57 [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops 58 }; 59 60 enum mlx5_expansion { 61 MLX5_EXPANSION_ROOT, 62 MLX5_EXPANSION_ROOT_OUTER, 63 MLX5_EXPANSION_ROOT_ETH_VLAN, 64 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, 65 MLX5_EXPANSION_OUTER_ETH, 66 MLX5_EXPANSION_OUTER_ETH_VLAN, 67 MLX5_EXPANSION_OUTER_VLAN, 68 MLX5_EXPANSION_OUTER_IPV4, 69 MLX5_EXPANSION_OUTER_IPV4_UDP, 70 MLX5_EXPANSION_OUTER_IPV4_TCP, 71 MLX5_EXPANSION_OUTER_IPV6, 72 MLX5_EXPANSION_OUTER_IPV6_UDP, 73 MLX5_EXPANSION_OUTER_IPV6_TCP, 74 MLX5_EXPANSION_VXLAN, 75 MLX5_EXPANSION_VXLAN_GPE, 76 MLX5_EXPANSION_GRE, 77 MLX5_EXPANSION_MPLS, 78 MLX5_EXPANSION_ETH, 79 MLX5_EXPANSION_ETH_VLAN, 80 MLX5_EXPANSION_VLAN, 81 MLX5_EXPANSION_IPV4, 82 MLX5_EXPANSION_IPV4_UDP, 83 MLX5_EXPANSION_IPV4_TCP, 84 MLX5_EXPANSION_IPV6, 85 MLX5_EXPANSION_IPV6_UDP, 86 MLX5_EXPANSION_IPV6_TCP, 87 }; 88 89 /** Supported expansion of items. */ 90 static const struct rte_flow_expand_node mlx5_support_expansion[] = { 91 [MLX5_EXPANSION_ROOT] = { 92 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 93 MLX5_EXPANSION_IPV4, 94 MLX5_EXPANSION_IPV6), 95 .type = RTE_FLOW_ITEM_TYPE_END, 96 }, 97 [MLX5_EXPANSION_ROOT_OUTER] = { 98 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, 99 MLX5_EXPANSION_OUTER_IPV4, 100 MLX5_EXPANSION_OUTER_IPV6), 101 .type = RTE_FLOW_ITEM_TYPE_END, 102 }, 103 [MLX5_EXPANSION_ROOT_ETH_VLAN] = { 104 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), 105 .type = RTE_FLOW_ITEM_TYPE_END, 106 }, 107 [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { 108 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), 109 .type = RTE_FLOW_ITEM_TYPE_END, 110 }, 111 [MLX5_EXPANSION_OUTER_ETH] = { 112 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 113 MLX5_EXPANSION_OUTER_IPV6, 114 MLX5_EXPANSION_MPLS), 115 .type = RTE_FLOW_ITEM_TYPE_ETH, 116 .rss_types = 0, 117 }, 118 [MLX5_EXPANSION_OUTER_ETH_VLAN] = { 119 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), 120 .type = RTE_FLOW_ITEM_TYPE_ETH, 121 .rss_types = 0, 122 }, 123 [MLX5_EXPANSION_OUTER_VLAN] = { 124 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 125 MLX5_EXPANSION_OUTER_IPV6), 126 .type = RTE_FLOW_ITEM_TYPE_VLAN, 127 }, 128 [MLX5_EXPANSION_OUTER_IPV4] = { 129 .next = RTE_FLOW_EXPAND_RSS_NEXT 130 (MLX5_EXPANSION_OUTER_IPV4_UDP, 131 MLX5_EXPANSION_OUTER_IPV4_TCP, 132 MLX5_EXPANSION_GRE, 133 MLX5_EXPANSION_IPV4, 134 MLX5_EXPANSION_IPV6), 135 .type = RTE_FLOW_ITEM_TYPE_IPV4, 136 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 137 ETH_RSS_NONFRAG_IPV4_OTHER, 138 }, 139 [MLX5_EXPANSION_OUTER_IPV4_UDP] = { 140 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 141 MLX5_EXPANSION_VXLAN_GPE), 142 .type = RTE_FLOW_ITEM_TYPE_UDP, 143 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 144 }, 145 [MLX5_EXPANSION_OUTER_IPV4_TCP] = { 146 .type = RTE_FLOW_ITEM_TYPE_TCP, 147 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 148 }, 149 [MLX5_EXPANSION_OUTER_IPV6] = { 150 .next = RTE_FLOW_EXPAND_RSS_NEXT 151 (MLX5_EXPANSION_OUTER_IPV6_UDP, 152 MLX5_EXPANSION_OUTER_IPV6_TCP, 153 MLX5_EXPANSION_IPV4, 154 MLX5_EXPANSION_IPV6), 155 .type = RTE_FLOW_ITEM_TYPE_IPV6, 156 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 157 ETH_RSS_NONFRAG_IPV6_OTHER, 158 }, 159 [MLX5_EXPANSION_OUTER_IPV6_UDP] = { 160 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 161 MLX5_EXPANSION_VXLAN_GPE), 162 .type = RTE_FLOW_ITEM_TYPE_UDP, 163 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 164 }, 165 [MLX5_EXPANSION_OUTER_IPV6_TCP] = { 166 .type = RTE_FLOW_ITEM_TYPE_TCP, 167 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 168 }, 169 [MLX5_EXPANSION_VXLAN] = { 170 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), 171 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 172 }, 173 [MLX5_EXPANSION_VXLAN_GPE] = { 174 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 175 MLX5_EXPANSION_IPV4, 176 MLX5_EXPANSION_IPV6), 177 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, 178 }, 179 [MLX5_EXPANSION_GRE] = { 180 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), 181 .type = RTE_FLOW_ITEM_TYPE_GRE, 182 }, 183 [MLX5_EXPANSION_MPLS] = { 184 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 185 MLX5_EXPANSION_IPV6), 186 .type = RTE_FLOW_ITEM_TYPE_MPLS, 187 }, 188 [MLX5_EXPANSION_ETH] = { 189 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 190 MLX5_EXPANSION_IPV6), 191 .type = RTE_FLOW_ITEM_TYPE_ETH, 192 }, 193 [MLX5_EXPANSION_ETH_VLAN] = { 194 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), 195 .type = RTE_FLOW_ITEM_TYPE_ETH, 196 }, 197 [MLX5_EXPANSION_VLAN] = { 198 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 199 MLX5_EXPANSION_IPV6), 200 .type = RTE_FLOW_ITEM_TYPE_VLAN, 201 }, 202 [MLX5_EXPANSION_IPV4] = { 203 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, 204 MLX5_EXPANSION_IPV4_TCP), 205 .type = RTE_FLOW_ITEM_TYPE_IPV4, 206 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 207 ETH_RSS_NONFRAG_IPV4_OTHER, 208 }, 209 [MLX5_EXPANSION_IPV4_UDP] = { 210 .type = RTE_FLOW_ITEM_TYPE_UDP, 211 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 212 }, 213 [MLX5_EXPANSION_IPV4_TCP] = { 214 .type = RTE_FLOW_ITEM_TYPE_TCP, 215 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 216 }, 217 [MLX5_EXPANSION_IPV6] = { 218 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, 219 MLX5_EXPANSION_IPV6_TCP), 220 .type = RTE_FLOW_ITEM_TYPE_IPV6, 221 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 222 ETH_RSS_NONFRAG_IPV6_OTHER, 223 }, 224 [MLX5_EXPANSION_IPV6_UDP] = { 225 .type = RTE_FLOW_ITEM_TYPE_UDP, 226 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 227 }, 228 [MLX5_EXPANSION_IPV6_TCP] = { 229 .type = RTE_FLOW_ITEM_TYPE_TCP, 230 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 231 }, 232 }; 233 234 static const struct rte_flow_ops mlx5_flow_ops = { 235 .validate = mlx5_flow_validate, 236 .create = mlx5_flow_create, 237 .destroy = mlx5_flow_destroy, 238 .flush = mlx5_flow_flush, 239 .isolate = mlx5_flow_isolate, 240 .query = mlx5_flow_query, 241 .dev_dump = mlx5_flow_dev_dump, 242 }; 243 244 /* Convert FDIR request to Generic flow. */ 245 struct mlx5_fdir { 246 struct rte_flow_attr attr; 247 struct rte_flow_item items[4]; 248 struct rte_flow_item_eth l2; 249 struct rte_flow_item_eth l2_mask; 250 union { 251 struct rte_flow_item_ipv4 ipv4; 252 struct rte_flow_item_ipv6 ipv6; 253 } l3; 254 union { 255 struct rte_flow_item_ipv4 ipv4; 256 struct rte_flow_item_ipv6 ipv6; 257 } l3_mask; 258 union { 259 struct rte_flow_item_udp udp; 260 struct rte_flow_item_tcp tcp; 261 } l4; 262 union { 263 struct rte_flow_item_udp udp; 264 struct rte_flow_item_tcp tcp; 265 } l4_mask; 266 struct rte_flow_action actions[2]; 267 struct rte_flow_action_queue queue; 268 }; 269 270 /* Map of Verbs to Flow priority with 8 Verbs priorities. */ 271 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { 272 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, 273 }; 274 275 /* Map of Verbs to Flow priority with 16 Verbs priorities. */ 276 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { 277 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, 278 { 9, 10, 11 }, { 12, 13, 14 }, 279 }; 280 281 /* Tunnel information. */ 282 struct mlx5_flow_tunnel_info { 283 uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ 284 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ 285 }; 286 287 static struct mlx5_flow_tunnel_info tunnels_info[] = { 288 { 289 .tunnel = MLX5_FLOW_LAYER_VXLAN, 290 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, 291 }, 292 { 293 .tunnel = MLX5_FLOW_LAYER_GENEVE, 294 .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP, 295 }, 296 { 297 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, 298 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, 299 }, 300 { 301 .tunnel = MLX5_FLOW_LAYER_GRE, 302 .ptype = RTE_PTYPE_TUNNEL_GRE, 303 }, 304 { 305 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, 306 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, 307 }, 308 { 309 .tunnel = MLX5_FLOW_LAYER_MPLS, 310 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, 311 }, 312 { 313 .tunnel = MLX5_FLOW_LAYER_NVGRE, 314 .ptype = RTE_PTYPE_TUNNEL_NVGRE, 315 }, 316 { 317 .tunnel = MLX5_FLOW_LAYER_IPIP, 318 .ptype = RTE_PTYPE_TUNNEL_IP, 319 }, 320 { 321 .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP, 322 .ptype = RTE_PTYPE_TUNNEL_IP, 323 }, 324 { 325 .tunnel = MLX5_FLOW_LAYER_GTP, 326 .ptype = RTE_PTYPE_TUNNEL_GTPU, 327 }, 328 }; 329 330 /** 331 * Translate tag ID to register. 332 * 333 * @param[in] dev 334 * Pointer to the Ethernet device structure. 335 * @param[in] feature 336 * The feature that request the register. 337 * @param[in] id 338 * The request register ID. 339 * @param[out] error 340 * Error description in case of any. 341 * 342 * @return 343 * The request register on success, a negative errno 344 * value otherwise and rte_errno is set. 345 */ 346 int 347 mlx5_flow_get_reg_id(struct rte_eth_dev *dev, 348 enum mlx5_feature_name feature, 349 uint32_t id, 350 struct rte_flow_error *error) 351 { 352 struct mlx5_priv *priv = dev->data->dev_private; 353 struct mlx5_dev_config *config = &priv->config; 354 enum modify_reg start_reg; 355 bool skip_mtr_reg = false; 356 357 switch (feature) { 358 case MLX5_HAIRPIN_RX: 359 return REG_B; 360 case MLX5_HAIRPIN_TX: 361 return REG_A; 362 case MLX5_METADATA_RX: 363 switch (config->dv_xmeta_en) { 364 case MLX5_XMETA_MODE_LEGACY: 365 return REG_B; 366 case MLX5_XMETA_MODE_META16: 367 return REG_C_0; 368 case MLX5_XMETA_MODE_META32: 369 return REG_C_1; 370 } 371 break; 372 case MLX5_METADATA_TX: 373 return REG_A; 374 case MLX5_METADATA_FDB: 375 switch (config->dv_xmeta_en) { 376 case MLX5_XMETA_MODE_LEGACY: 377 return REG_NONE; 378 case MLX5_XMETA_MODE_META16: 379 return REG_C_0; 380 case MLX5_XMETA_MODE_META32: 381 return REG_C_1; 382 } 383 break; 384 case MLX5_FLOW_MARK: 385 switch (config->dv_xmeta_en) { 386 case MLX5_XMETA_MODE_LEGACY: 387 return REG_NONE; 388 case MLX5_XMETA_MODE_META16: 389 return REG_C_1; 390 case MLX5_XMETA_MODE_META32: 391 return REG_C_0; 392 } 393 break; 394 case MLX5_MTR_SFX: 395 /* 396 * If meter color and flow match share one register, flow match 397 * should use the meter color register for match. 398 */ 399 if (priv->mtr_reg_share) 400 return priv->mtr_color_reg; 401 else 402 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : 403 REG_C_3; 404 case MLX5_MTR_COLOR: 405 MLX5_ASSERT(priv->mtr_color_reg != REG_NONE); 406 return priv->mtr_color_reg; 407 case MLX5_COPY_MARK: 408 /* 409 * Metadata COPY_MARK register using is in meter suffix sub 410 * flow while with meter. It's safe to share the same register. 411 */ 412 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3; 413 case MLX5_APP_TAG: 414 /* 415 * If meter is enable, it will engage the register for color 416 * match and flow match. If meter color match is not using the 417 * REG_C_2, need to skip the REG_C_x be used by meter color 418 * match. 419 * If meter is disable, free to use all available registers. 420 */ 421 start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 : 422 (priv->mtr_reg_share ? REG_C_3 : REG_C_4); 423 skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2); 424 if (id > (REG_C_7 - start_reg)) 425 return rte_flow_error_set(error, EINVAL, 426 RTE_FLOW_ERROR_TYPE_ITEM, 427 NULL, "invalid tag id"); 428 if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NONE) 429 return rte_flow_error_set(error, ENOTSUP, 430 RTE_FLOW_ERROR_TYPE_ITEM, 431 NULL, "unsupported tag id"); 432 /* 433 * This case means meter is using the REG_C_x great than 2. 434 * Take care not to conflict with meter color REG_C_x. 435 * If the available index REG_C_y >= REG_C_x, skip the 436 * color register. 437 */ 438 if (skip_mtr_reg && config->flow_mreg_c 439 [id + start_reg - REG_C_0] >= priv->mtr_color_reg) { 440 if (config->flow_mreg_c 441 [id + 1 + start_reg - REG_C_0] != REG_NONE) 442 return config->flow_mreg_c 443 [id + 1 + start_reg - REG_C_0]; 444 return rte_flow_error_set(error, ENOTSUP, 445 RTE_FLOW_ERROR_TYPE_ITEM, 446 NULL, "unsupported tag id"); 447 } 448 return config->flow_mreg_c[id + start_reg - REG_C_0]; 449 } 450 MLX5_ASSERT(false); 451 return rte_flow_error_set(error, EINVAL, 452 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 453 NULL, "invalid feature name"); 454 } 455 456 /** 457 * Check extensive flow metadata register support. 458 * 459 * @param dev 460 * Pointer to rte_eth_dev structure. 461 * 462 * @return 463 * True if device supports extensive flow metadata register, otherwise false. 464 */ 465 bool 466 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) 467 { 468 struct mlx5_priv *priv = dev->data->dev_private; 469 struct mlx5_dev_config *config = &priv->config; 470 471 /* 472 * Having available reg_c can be regarded inclusively as supporting 473 * extensive flow metadata register, which could mean, 474 * - metadata register copy action by modify header. 475 * - 16 modify header actions is supported. 476 * - reg_c's are preserved across different domain (FDB and NIC) on 477 * packet loopback by flow lookup miss. 478 */ 479 return config->flow_mreg_c[2] != REG_NONE; 480 } 481 482 /** 483 * Discover the maximum number of priority available. 484 * 485 * @param[in] dev 486 * Pointer to the Ethernet device structure. 487 * 488 * @return 489 * number of supported flow priority on success, a negative errno 490 * value otherwise and rte_errno is set. 491 */ 492 int 493 mlx5_flow_discover_priorities(struct rte_eth_dev *dev) 494 { 495 struct mlx5_priv *priv = dev->data->dev_private; 496 struct { 497 struct ibv_flow_attr attr; 498 struct ibv_flow_spec_eth eth; 499 struct ibv_flow_spec_action_drop drop; 500 } flow_attr = { 501 .attr = { 502 .num_of_specs = 2, 503 .port = (uint8_t)priv->ibv_port, 504 }, 505 .eth = { 506 .type = IBV_FLOW_SPEC_ETH, 507 .size = sizeof(struct ibv_flow_spec_eth), 508 }, 509 .drop = { 510 .size = sizeof(struct ibv_flow_spec_action_drop), 511 .type = IBV_FLOW_SPEC_ACTION_DROP, 512 }, 513 }; 514 struct ibv_flow *flow; 515 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev); 516 uint16_t vprio[] = { 8, 16 }; 517 int i; 518 int priority = 0; 519 520 if (!drop) { 521 rte_errno = ENOTSUP; 522 return -rte_errno; 523 } 524 for (i = 0; i != RTE_DIM(vprio); i++) { 525 flow_attr.attr.priority = vprio[i] - 1; 526 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); 527 if (!flow) 528 break; 529 claim_zero(mlx5_glue->destroy_flow(flow)); 530 priority = vprio[i]; 531 } 532 mlx5_hrxq_drop_release(dev); 533 switch (priority) { 534 case 8: 535 priority = RTE_DIM(priority_map_3); 536 break; 537 case 16: 538 priority = RTE_DIM(priority_map_5); 539 break; 540 default: 541 rte_errno = ENOTSUP; 542 DRV_LOG(ERR, 543 "port %u verbs maximum priority: %d expected 8/16", 544 dev->data->port_id, priority); 545 return -rte_errno; 546 } 547 DRV_LOG(INFO, "port %u flow maximum priority: %d", 548 dev->data->port_id, priority); 549 return priority; 550 } 551 552 /** 553 * Adjust flow priority based on the highest layer and the request priority. 554 * 555 * @param[in] dev 556 * Pointer to the Ethernet device structure. 557 * @param[in] priority 558 * The rule base priority. 559 * @param[in] subpriority 560 * The priority based on the items. 561 * 562 * @return 563 * The new priority. 564 */ 565 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 566 uint32_t subpriority) 567 { 568 uint32_t res = 0; 569 struct mlx5_priv *priv = dev->data->dev_private; 570 571 switch (priv->config.flow_prio) { 572 case RTE_DIM(priority_map_3): 573 res = priority_map_3[priority][subpriority]; 574 break; 575 case RTE_DIM(priority_map_5): 576 res = priority_map_5[priority][subpriority]; 577 break; 578 } 579 return res; 580 } 581 582 /** 583 * Verify the @p item specifications (spec, last, mask) are compatible with the 584 * NIC capabilities. 585 * 586 * @param[in] item 587 * Item specification. 588 * @param[in] mask 589 * @p item->mask or flow default bit-masks. 590 * @param[in] nic_mask 591 * Bit-masks covering supported fields by the NIC to compare with user mask. 592 * @param[in] size 593 * Bit-masks size in bytes. 594 * @param[out] error 595 * Pointer to error structure. 596 * 597 * @return 598 * 0 on success, a negative errno value otherwise and rte_errno is set. 599 */ 600 int 601 mlx5_flow_item_acceptable(const struct rte_flow_item *item, 602 const uint8_t *mask, 603 const uint8_t *nic_mask, 604 unsigned int size, 605 struct rte_flow_error *error) 606 { 607 unsigned int i; 608 609 MLX5_ASSERT(nic_mask); 610 for (i = 0; i < size; ++i) 611 if ((nic_mask[i] | mask[i]) != nic_mask[i]) 612 return rte_flow_error_set(error, ENOTSUP, 613 RTE_FLOW_ERROR_TYPE_ITEM, 614 item, 615 "mask enables non supported" 616 " bits"); 617 if (!item->spec && (item->mask || item->last)) 618 return rte_flow_error_set(error, EINVAL, 619 RTE_FLOW_ERROR_TYPE_ITEM, item, 620 "mask/last without a spec is not" 621 " supported"); 622 if (item->spec && item->last) { 623 uint8_t spec[size]; 624 uint8_t last[size]; 625 unsigned int i; 626 int ret; 627 628 for (i = 0; i < size; ++i) { 629 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; 630 last[i] = ((const uint8_t *)item->last)[i] & mask[i]; 631 } 632 ret = memcmp(spec, last, size); 633 if (ret != 0) 634 return rte_flow_error_set(error, EINVAL, 635 RTE_FLOW_ERROR_TYPE_ITEM, 636 item, 637 "range is not valid"); 638 } 639 return 0; 640 } 641 642 /** 643 * Adjust the hash fields according to the @p flow information. 644 * 645 * @param[in] dev_flow. 646 * Pointer to the mlx5_flow. 647 * @param[in] tunnel 648 * 1 when the hash field is for a tunnel item. 649 * @param[in] layer_types 650 * ETH_RSS_* types. 651 * @param[in] hash_fields 652 * Item hash fields. 653 * 654 * @return 655 * The hash fields that should be used. 656 */ 657 uint64_t 658 mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, 659 int tunnel __rte_unused, uint64_t layer_types, 660 uint64_t hash_fields) 661 { 662 struct rte_flow *flow = dev_flow->flow; 663 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 664 int rss_request_inner = flow->rss.level >= 2; 665 666 /* Check RSS hash level for tunnel. */ 667 if (tunnel && rss_request_inner) 668 hash_fields |= IBV_RX_HASH_INNER; 669 else if (tunnel || rss_request_inner) 670 return 0; 671 #endif 672 /* Check if requested layer matches RSS hash fields. */ 673 if (!(flow->rss.types & layer_types)) 674 return 0; 675 return hash_fields; 676 } 677 678 /** 679 * Lookup and set the ptype in the data Rx part. A single Ptype can be used, 680 * if several tunnel rules are used on this queue, the tunnel ptype will be 681 * cleared. 682 * 683 * @param rxq_ctrl 684 * Rx queue to update. 685 */ 686 static void 687 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) 688 { 689 unsigned int i; 690 uint32_t tunnel_ptype = 0; 691 692 /* Look up for the ptype to use. */ 693 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { 694 if (!rxq_ctrl->flow_tunnels_n[i]) 695 continue; 696 if (!tunnel_ptype) { 697 tunnel_ptype = tunnels_info[i].ptype; 698 } else { 699 tunnel_ptype = 0; 700 break; 701 } 702 } 703 rxq_ctrl->rxq.tunnel = tunnel_ptype; 704 } 705 706 /** 707 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive 708 * flow. 709 * 710 * @param[in] dev 711 * Pointer to the Ethernet device structure. 712 * @param[in] dev_flow 713 * Pointer to device flow structure. 714 */ 715 static void 716 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 717 { 718 struct mlx5_priv *priv = dev->data->dev_private; 719 struct rte_flow *flow = dev_flow->flow; 720 const int mark = !!(dev_flow->actions & 721 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 722 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); 723 unsigned int i; 724 725 for (i = 0; i != flow->rss.queue_num; ++i) { 726 int idx = (*flow->rss.queue)[i]; 727 struct mlx5_rxq_ctrl *rxq_ctrl = 728 container_of((*priv->rxqs)[idx], 729 struct mlx5_rxq_ctrl, rxq); 730 731 /* 732 * To support metadata register copy on Tx loopback, 733 * this must be always enabled (metadata may arive 734 * from other port - not from local flows only. 735 */ 736 if (priv->config.dv_flow_en && 737 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 738 mlx5_flow_ext_mreg_supported(dev)) { 739 rxq_ctrl->rxq.mark = 1; 740 rxq_ctrl->flow_mark_n = 1; 741 } else if (mark) { 742 rxq_ctrl->rxq.mark = 1; 743 rxq_ctrl->flow_mark_n++; 744 } 745 if (tunnel) { 746 unsigned int j; 747 748 /* Increase the counter matching the flow. */ 749 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 750 if ((tunnels_info[j].tunnel & 751 dev_flow->layers) == 752 tunnels_info[j].tunnel) { 753 rxq_ctrl->flow_tunnels_n[j]++; 754 break; 755 } 756 } 757 flow_rxq_tunnel_ptype_update(rxq_ctrl); 758 } 759 } 760 } 761 762 /** 763 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow 764 * 765 * @param[in] dev 766 * Pointer to the Ethernet device structure. 767 * @param[in] flow 768 * Pointer to flow structure. 769 */ 770 static void 771 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) 772 { 773 struct mlx5_flow *dev_flow; 774 775 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 776 flow_drv_rxq_flags_set(dev, dev_flow); 777 } 778 779 /** 780 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 781 * device flow if no other flow uses it with the same kind of request. 782 * 783 * @param dev 784 * Pointer to Ethernet device. 785 * @param[in] dev_flow 786 * Pointer to the device flow. 787 */ 788 static void 789 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 790 { 791 struct mlx5_priv *priv = dev->data->dev_private; 792 struct rte_flow *flow = dev_flow->flow; 793 const int mark = !!(dev_flow->actions & 794 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 795 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); 796 unsigned int i; 797 798 MLX5_ASSERT(dev->data->dev_started); 799 for (i = 0; i != flow->rss.queue_num; ++i) { 800 int idx = (*flow->rss.queue)[i]; 801 struct mlx5_rxq_ctrl *rxq_ctrl = 802 container_of((*priv->rxqs)[idx], 803 struct mlx5_rxq_ctrl, rxq); 804 805 if (priv->config.dv_flow_en && 806 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 807 mlx5_flow_ext_mreg_supported(dev)) { 808 rxq_ctrl->rxq.mark = 1; 809 rxq_ctrl->flow_mark_n = 1; 810 } else if (mark) { 811 rxq_ctrl->flow_mark_n--; 812 rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; 813 } 814 if (tunnel) { 815 unsigned int j; 816 817 /* Decrease the counter matching the flow. */ 818 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 819 if ((tunnels_info[j].tunnel & 820 dev_flow->layers) == 821 tunnels_info[j].tunnel) { 822 rxq_ctrl->flow_tunnels_n[j]--; 823 break; 824 } 825 } 826 flow_rxq_tunnel_ptype_update(rxq_ctrl); 827 } 828 } 829 } 830 831 /** 832 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 833 * @p flow if no other flow uses it with the same kind of request. 834 * 835 * @param dev 836 * Pointer to Ethernet device. 837 * @param[in] flow 838 * Pointer to the flow. 839 */ 840 static void 841 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) 842 { 843 struct mlx5_flow *dev_flow; 844 845 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 846 flow_drv_rxq_flags_trim(dev, dev_flow); 847 } 848 849 /** 850 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. 851 * 852 * @param dev 853 * Pointer to Ethernet device. 854 */ 855 static void 856 flow_rxq_flags_clear(struct rte_eth_dev *dev) 857 { 858 struct mlx5_priv *priv = dev->data->dev_private; 859 unsigned int i; 860 861 for (i = 0; i != priv->rxqs_n; ++i) { 862 struct mlx5_rxq_ctrl *rxq_ctrl; 863 unsigned int j; 864 865 if (!(*priv->rxqs)[i]) 866 continue; 867 rxq_ctrl = container_of((*priv->rxqs)[i], 868 struct mlx5_rxq_ctrl, rxq); 869 rxq_ctrl->flow_mark_n = 0; 870 rxq_ctrl->rxq.mark = 0; 871 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) 872 rxq_ctrl->flow_tunnels_n[j] = 0; 873 rxq_ctrl->rxq.tunnel = 0; 874 } 875 } 876 877 /* 878 * return a pointer to the desired action in the list of actions. 879 * 880 * @param[in] actions 881 * The list of actions to search the action in. 882 * @param[in] action 883 * The action to find. 884 * 885 * @return 886 * Pointer to the action in the list, if found. NULL otherwise. 887 */ 888 const struct rte_flow_action * 889 mlx5_flow_find_action(const struct rte_flow_action *actions, 890 enum rte_flow_action_type action) 891 { 892 if (actions == NULL) 893 return NULL; 894 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) 895 if (actions->type == action) 896 return actions; 897 return NULL; 898 } 899 900 /* 901 * Validate the flag action. 902 * 903 * @param[in] action_flags 904 * Bit-fields that holds the actions detected until now. 905 * @param[in] attr 906 * Attributes of flow that includes this action. 907 * @param[out] error 908 * Pointer to error structure. 909 * 910 * @return 911 * 0 on success, a negative errno value otherwise and rte_errno is set. 912 */ 913 int 914 mlx5_flow_validate_action_flag(uint64_t action_flags, 915 const struct rte_flow_attr *attr, 916 struct rte_flow_error *error) 917 { 918 if (action_flags & MLX5_FLOW_ACTION_MARK) 919 return rte_flow_error_set(error, EINVAL, 920 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 921 "can't mark and flag in same flow"); 922 if (action_flags & MLX5_FLOW_ACTION_FLAG) 923 return rte_flow_error_set(error, EINVAL, 924 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 925 "can't have 2 flag" 926 " actions in same flow"); 927 if (attr->egress) 928 return rte_flow_error_set(error, ENOTSUP, 929 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 930 "flag action not supported for " 931 "egress"); 932 return 0; 933 } 934 935 /* 936 * Validate the mark action. 937 * 938 * @param[in] action 939 * Pointer to the queue action. 940 * @param[in] action_flags 941 * Bit-fields that holds the actions detected until now. 942 * @param[in] attr 943 * Attributes of flow that includes this action. 944 * @param[out] error 945 * Pointer to error structure. 946 * 947 * @return 948 * 0 on success, a negative errno value otherwise and rte_errno is set. 949 */ 950 int 951 mlx5_flow_validate_action_mark(const struct rte_flow_action *action, 952 uint64_t action_flags, 953 const struct rte_flow_attr *attr, 954 struct rte_flow_error *error) 955 { 956 const struct rte_flow_action_mark *mark = action->conf; 957 958 if (!mark) 959 return rte_flow_error_set(error, EINVAL, 960 RTE_FLOW_ERROR_TYPE_ACTION, 961 action, 962 "configuration cannot be null"); 963 if (mark->id >= MLX5_FLOW_MARK_MAX) 964 return rte_flow_error_set(error, EINVAL, 965 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 966 &mark->id, 967 "mark id must in 0 <= id < " 968 RTE_STR(MLX5_FLOW_MARK_MAX)); 969 if (action_flags & MLX5_FLOW_ACTION_FLAG) 970 return rte_flow_error_set(error, EINVAL, 971 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 972 "can't flag and mark in same flow"); 973 if (action_flags & MLX5_FLOW_ACTION_MARK) 974 return rte_flow_error_set(error, EINVAL, 975 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 976 "can't have 2 mark actions in same" 977 " flow"); 978 if (attr->egress) 979 return rte_flow_error_set(error, ENOTSUP, 980 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 981 "mark action not supported for " 982 "egress"); 983 return 0; 984 } 985 986 /* 987 * Validate the drop action. 988 * 989 * @param[in] action_flags 990 * Bit-fields that holds the actions detected until now. 991 * @param[in] attr 992 * Attributes of flow that includes this action. 993 * @param[out] error 994 * Pointer to error structure. 995 * 996 * @return 997 * 0 on success, a negative errno value otherwise and rte_errno is set. 998 */ 999 int 1000 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused, 1001 const struct rte_flow_attr *attr, 1002 struct rte_flow_error *error) 1003 { 1004 if (attr->egress) 1005 return rte_flow_error_set(error, ENOTSUP, 1006 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1007 "drop action not supported for " 1008 "egress"); 1009 return 0; 1010 } 1011 1012 /* 1013 * Validate the queue action. 1014 * 1015 * @param[in] action 1016 * Pointer to the queue action. 1017 * @param[in] action_flags 1018 * Bit-fields that holds the actions detected until now. 1019 * @param[in] dev 1020 * Pointer to the Ethernet device structure. 1021 * @param[in] attr 1022 * Attributes of flow that includes this action. 1023 * @param[out] error 1024 * Pointer to error structure. 1025 * 1026 * @return 1027 * 0 on success, a negative errno value otherwise and rte_errno is set. 1028 */ 1029 int 1030 mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 1031 uint64_t action_flags, 1032 struct rte_eth_dev *dev, 1033 const struct rte_flow_attr *attr, 1034 struct rte_flow_error *error) 1035 { 1036 struct mlx5_priv *priv = dev->data->dev_private; 1037 const struct rte_flow_action_queue *queue = action->conf; 1038 1039 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 1040 return rte_flow_error_set(error, EINVAL, 1041 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1042 "can't have 2 fate actions in" 1043 " same flow"); 1044 if (!priv->rxqs_n) 1045 return rte_flow_error_set(error, EINVAL, 1046 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1047 NULL, "No Rx queues configured"); 1048 if (queue->index >= priv->rxqs_n) 1049 return rte_flow_error_set(error, EINVAL, 1050 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1051 &queue->index, 1052 "queue index out of range"); 1053 if (!(*priv->rxqs)[queue->index]) 1054 return rte_flow_error_set(error, EINVAL, 1055 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1056 &queue->index, 1057 "queue is not configured"); 1058 if (attr->egress) 1059 return rte_flow_error_set(error, ENOTSUP, 1060 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1061 "queue action not supported for " 1062 "egress"); 1063 return 0; 1064 } 1065 1066 /* 1067 * Validate the rss action. 1068 * 1069 * @param[in] action 1070 * Pointer to the queue action. 1071 * @param[in] action_flags 1072 * Bit-fields that holds the actions detected until now. 1073 * @param[in] dev 1074 * Pointer to the Ethernet device structure. 1075 * @param[in] attr 1076 * Attributes of flow that includes this action. 1077 * @param[in] item_flags 1078 * Items that were detected. 1079 * @param[out] error 1080 * Pointer to error structure. 1081 * 1082 * @return 1083 * 0 on success, a negative errno value otherwise and rte_errno is set. 1084 */ 1085 int 1086 mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 1087 uint64_t action_flags, 1088 struct rte_eth_dev *dev, 1089 const struct rte_flow_attr *attr, 1090 uint64_t item_flags, 1091 struct rte_flow_error *error) 1092 { 1093 struct mlx5_priv *priv = dev->data->dev_private; 1094 const struct rte_flow_action_rss *rss = action->conf; 1095 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1096 unsigned int i; 1097 1098 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 1099 return rte_flow_error_set(error, EINVAL, 1100 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1101 "can't have 2 fate actions" 1102 " in same flow"); 1103 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && 1104 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) 1105 return rte_flow_error_set(error, ENOTSUP, 1106 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1107 &rss->func, 1108 "RSS hash function not supported"); 1109 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 1110 if (rss->level > 2) 1111 #else 1112 if (rss->level > 1) 1113 #endif 1114 return rte_flow_error_set(error, ENOTSUP, 1115 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1116 &rss->level, 1117 "tunnel RSS is not supported"); 1118 /* allow RSS key_len 0 in case of NULL (default) RSS key. */ 1119 if (rss->key_len == 0 && rss->key != NULL) 1120 return rte_flow_error_set(error, ENOTSUP, 1121 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1122 &rss->key_len, 1123 "RSS hash key length 0"); 1124 if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) 1125 return rte_flow_error_set(error, ENOTSUP, 1126 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1127 &rss->key_len, 1128 "RSS hash key too small"); 1129 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) 1130 return rte_flow_error_set(error, ENOTSUP, 1131 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1132 &rss->key_len, 1133 "RSS hash key too large"); 1134 if (rss->queue_num > priv->config.ind_table_max_size) 1135 return rte_flow_error_set(error, ENOTSUP, 1136 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1137 &rss->queue_num, 1138 "number of queues too large"); 1139 if (rss->types & MLX5_RSS_HF_MASK) 1140 return rte_flow_error_set(error, ENOTSUP, 1141 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1142 &rss->types, 1143 "some RSS protocols are not" 1144 " supported"); 1145 if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) && 1146 !(rss->types & ETH_RSS_IP)) 1147 return rte_flow_error_set(error, EINVAL, 1148 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1149 "L3 partial RSS requested but L3 RSS" 1150 " type not specified"); 1151 if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) && 1152 !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP))) 1153 return rte_flow_error_set(error, EINVAL, 1154 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1155 "L4 partial RSS requested but L4 RSS" 1156 " type not specified"); 1157 if (!priv->rxqs_n) 1158 return rte_flow_error_set(error, EINVAL, 1159 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1160 NULL, "No Rx queues configured"); 1161 if (!rss->queue_num) 1162 return rte_flow_error_set(error, EINVAL, 1163 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1164 NULL, "No queues configured"); 1165 for (i = 0; i != rss->queue_num; ++i) { 1166 if (rss->queue[i] >= priv->rxqs_n) 1167 return rte_flow_error_set 1168 (error, EINVAL, 1169 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1170 &rss->queue[i], "queue index out of range"); 1171 if (!(*priv->rxqs)[rss->queue[i]]) 1172 return rte_flow_error_set 1173 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1174 &rss->queue[i], "queue is not configured"); 1175 } 1176 if (attr->egress) 1177 return rte_flow_error_set(error, ENOTSUP, 1178 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1179 "rss action not supported for " 1180 "egress"); 1181 if (rss->level > 1 && !tunnel) 1182 return rte_flow_error_set(error, EINVAL, 1183 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1184 "inner RSS is not supported for " 1185 "non-tunnel flows"); 1186 return 0; 1187 } 1188 1189 /* 1190 * Validate the count action. 1191 * 1192 * @param[in] dev 1193 * Pointer to the Ethernet device structure. 1194 * @param[in] attr 1195 * Attributes of flow that includes this action. 1196 * @param[out] error 1197 * Pointer to error structure. 1198 * 1199 * @return 1200 * 0 on success, a negative errno value otherwise and rte_errno is set. 1201 */ 1202 int 1203 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, 1204 const struct rte_flow_attr *attr, 1205 struct rte_flow_error *error) 1206 { 1207 if (attr->egress) 1208 return rte_flow_error_set(error, ENOTSUP, 1209 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1210 "count action not supported for " 1211 "egress"); 1212 return 0; 1213 } 1214 1215 /** 1216 * Verify the @p attributes will be correctly understood by the NIC and store 1217 * them in the @p flow if everything is correct. 1218 * 1219 * @param[in] dev 1220 * Pointer to the Ethernet device structure. 1221 * @param[in] attributes 1222 * Pointer to flow attributes 1223 * @param[out] error 1224 * Pointer to error structure. 1225 * 1226 * @return 1227 * 0 on success, a negative errno value otherwise and rte_errno is set. 1228 */ 1229 int 1230 mlx5_flow_validate_attributes(struct rte_eth_dev *dev, 1231 const struct rte_flow_attr *attributes, 1232 struct rte_flow_error *error) 1233 { 1234 struct mlx5_priv *priv = dev->data->dev_private; 1235 uint32_t priority_max = priv->config.flow_prio - 1; 1236 1237 if (attributes->group) 1238 return rte_flow_error_set(error, ENOTSUP, 1239 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 1240 NULL, "groups is not supported"); 1241 if (attributes->priority != MLX5_FLOW_PRIO_RSVD && 1242 attributes->priority >= priority_max) 1243 return rte_flow_error_set(error, ENOTSUP, 1244 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1245 NULL, "priority out of range"); 1246 if (attributes->egress) 1247 return rte_flow_error_set(error, ENOTSUP, 1248 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1249 "egress is not supported"); 1250 if (attributes->transfer && !priv->config.dv_esw_en) 1251 return rte_flow_error_set(error, ENOTSUP, 1252 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 1253 NULL, "transfer is not supported"); 1254 if (!attributes->ingress) 1255 return rte_flow_error_set(error, EINVAL, 1256 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 1257 NULL, 1258 "ingress attribute is mandatory"); 1259 return 0; 1260 } 1261 1262 /** 1263 * Validate ICMP6 item. 1264 * 1265 * @param[in] item 1266 * Item specification. 1267 * @param[in] item_flags 1268 * Bit-fields that holds the items detected until now. 1269 * @param[out] error 1270 * Pointer to error structure. 1271 * 1272 * @return 1273 * 0 on success, a negative errno value otherwise and rte_errno is set. 1274 */ 1275 int 1276 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, 1277 uint64_t item_flags, 1278 uint8_t target_protocol, 1279 struct rte_flow_error *error) 1280 { 1281 const struct rte_flow_item_icmp6 *mask = item->mask; 1282 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1283 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 1284 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 1285 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1286 MLX5_FLOW_LAYER_OUTER_L4; 1287 int ret; 1288 1289 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) 1290 return rte_flow_error_set(error, EINVAL, 1291 RTE_FLOW_ERROR_TYPE_ITEM, item, 1292 "protocol filtering not compatible" 1293 " with ICMP6 layer"); 1294 if (!(item_flags & l3m)) 1295 return rte_flow_error_set(error, EINVAL, 1296 RTE_FLOW_ERROR_TYPE_ITEM, item, 1297 "IPv6 is mandatory to filter on" 1298 " ICMP6"); 1299 if (item_flags & l4m) 1300 return rte_flow_error_set(error, EINVAL, 1301 RTE_FLOW_ERROR_TYPE_ITEM, item, 1302 "multiple L4 layers not supported"); 1303 if (!mask) 1304 mask = &rte_flow_item_icmp6_mask; 1305 ret = mlx5_flow_item_acceptable 1306 (item, (const uint8_t *)mask, 1307 (const uint8_t *)&rte_flow_item_icmp6_mask, 1308 sizeof(struct rte_flow_item_icmp6), error); 1309 if (ret < 0) 1310 return ret; 1311 return 0; 1312 } 1313 1314 /** 1315 * Validate ICMP item. 1316 * 1317 * @param[in] item 1318 * Item specification. 1319 * @param[in] item_flags 1320 * Bit-fields that holds the items detected until now. 1321 * @param[out] error 1322 * Pointer to error structure. 1323 * 1324 * @return 1325 * 0 on success, a negative errno value otherwise and rte_errno is set. 1326 */ 1327 int 1328 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, 1329 uint64_t item_flags, 1330 uint8_t target_protocol, 1331 struct rte_flow_error *error) 1332 { 1333 const struct rte_flow_item_icmp *mask = item->mask; 1334 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1335 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 1336 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 1337 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1338 MLX5_FLOW_LAYER_OUTER_L4; 1339 int ret; 1340 1341 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) 1342 return rte_flow_error_set(error, EINVAL, 1343 RTE_FLOW_ERROR_TYPE_ITEM, item, 1344 "protocol filtering not compatible" 1345 " with ICMP layer"); 1346 if (!(item_flags & l3m)) 1347 return rte_flow_error_set(error, EINVAL, 1348 RTE_FLOW_ERROR_TYPE_ITEM, item, 1349 "IPv4 is mandatory to filter" 1350 " on ICMP"); 1351 if (item_flags & l4m) 1352 return rte_flow_error_set(error, EINVAL, 1353 RTE_FLOW_ERROR_TYPE_ITEM, item, 1354 "multiple L4 layers not supported"); 1355 if (!mask) 1356 mask = &rte_flow_item_icmp_mask; 1357 ret = mlx5_flow_item_acceptable 1358 (item, (const uint8_t *)mask, 1359 (const uint8_t *)&rte_flow_item_icmp_mask, 1360 sizeof(struct rte_flow_item_icmp), error); 1361 if (ret < 0) 1362 return ret; 1363 return 0; 1364 } 1365 1366 /** 1367 * Validate Ethernet item. 1368 * 1369 * @param[in] item 1370 * Item specification. 1371 * @param[in] item_flags 1372 * Bit-fields that holds the items detected until now. 1373 * @param[out] error 1374 * Pointer to error structure. 1375 * 1376 * @return 1377 * 0 on success, a negative errno value otherwise and rte_errno is set. 1378 */ 1379 int 1380 mlx5_flow_validate_item_eth(const struct rte_flow_item *item, 1381 uint64_t item_flags, 1382 struct rte_flow_error *error) 1383 { 1384 const struct rte_flow_item_eth *mask = item->mask; 1385 const struct rte_flow_item_eth nic_mask = { 1386 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1387 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1388 .type = RTE_BE16(0xffff), 1389 }; 1390 int ret; 1391 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1392 const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 1393 MLX5_FLOW_LAYER_OUTER_L2; 1394 1395 if (item_flags & ethm) 1396 return rte_flow_error_set(error, ENOTSUP, 1397 RTE_FLOW_ERROR_TYPE_ITEM, item, 1398 "multiple L2 layers not supported"); 1399 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) || 1400 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))) 1401 return rte_flow_error_set(error, EINVAL, 1402 RTE_FLOW_ERROR_TYPE_ITEM, item, 1403 "L2 layer should not follow " 1404 "L3 layers"); 1405 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) || 1406 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN))) 1407 return rte_flow_error_set(error, EINVAL, 1408 RTE_FLOW_ERROR_TYPE_ITEM, item, 1409 "L2 layer should not follow VLAN"); 1410 if (!mask) 1411 mask = &rte_flow_item_eth_mask; 1412 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1413 (const uint8_t *)&nic_mask, 1414 sizeof(struct rte_flow_item_eth), 1415 error); 1416 return ret; 1417 } 1418 1419 /** 1420 * Validate VLAN item. 1421 * 1422 * @param[in] item 1423 * Item specification. 1424 * @param[in] item_flags 1425 * Bit-fields that holds the items detected until now. 1426 * @param[in] dev 1427 * Ethernet device flow is being created on. 1428 * @param[out] error 1429 * Pointer to error structure. 1430 * 1431 * @return 1432 * 0 on success, a negative errno value otherwise and rte_errno is set. 1433 */ 1434 int 1435 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 1436 uint64_t item_flags, 1437 struct rte_eth_dev *dev, 1438 struct rte_flow_error *error) 1439 { 1440 const struct rte_flow_item_vlan *spec = item->spec; 1441 const struct rte_flow_item_vlan *mask = item->mask; 1442 const struct rte_flow_item_vlan nic_mask = { 1443 .tci = RTE_BE16(UINT16_MAX), 1444 .inner_type = RTE_BE16(UINT16_MAX), 1445 }; 1446 uint16_t vlan_tag = 0; 1447 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1448 int ret; 1449 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 1450 MLX5_FLOW_LAYER_INNER_L4) : 1451 (MLX5_FLOW_LAYER_OUTER_L3 | 1452 MLX5_FLOW_LAYER_OUTER_L4); 1453 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 1454 MLX5_FLOW_LAYER_OUTER_VLAN; 1455 1456 if (item_flags & vlanm) 1457 return rte_flow_error_set(error, EINVAL, 1458 RTE_FLOW_ERROR_TYPE_ITEM, item, 1459 "multiple VLAN layers not supported"); 1460 else if ((item_flags & l34m) != 0) 1461 return rte_flow_error_set(error, EINVAL, 1462 RTE_FLOW_ERROR_TYPE_ITEM, item, 1463 "VLAN cannot follow L3/L4 layer"); 1464 if (!mask) 1465 mask = &rte_flow_item_vlan_mask; 1466 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1467 (const uint8_t *)&nic_mask, 1468 sizeof(struct rte_flow_item_vlan), 1469 error); 1470 if (ret) 1471 return ret; 1472 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { 1473 struct mlx5_priv *priv = dev->data->dev_private; 1474 1475 if (priv->vmwa_context) { 1476 /* 1477 * Non-NULL context means we have a virtual machine 1478 * and SR-IOV enabled, we have to create VLAN interface 1479 * to make hypervisor to setup E-Switch vport 1480 * context correctly. We avoid creating the multiple 1481 * VLAN interfaces, so we cannot support VLAN tag mask. 1482 */ 1483 return rte_flow_error_set(error, EINVAL, 1484 RTE_FLOW_ERROR_TYPE_ITEM, 1485 item, 1486 "VLAN tag mask is not" 1487 " supported in virtual" 1488 " environment"); 1489 } 1490 } 1491 if (spec) { 1492 vlan_tag = spec->tci; 1493 vlan_tag &= mask->tci; 1494 } 1495 /* 1496 * From verbs perspective an empty VLAN is equivalent 1497 * to a packet without VLAN layer. 1498 */ 1499 if (!vlan_tag) 1500 return rte_flow_error_set(error, EINVAL, 1501 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1502 item->spec, 1503 "VLAN cannot be empty"); 1504 return 0; 1505 } 1506 1507 /** 1508 * Validate IPV4 item. 1509 * 1510 * @param[in] item 1511 * Item specification. 1512 * @param[in] item_flags 1513 * Bit-fields that holds the items detected until now. 1514 * @param[in] acc_mask 1515 * Acceptable mask, if NULL default internal default mask 1516 * will be used to check whether item fields are supported. 1517 * @param[out] error 1518 * Pointer to error structure. 1519 * 1520 * @return 1521 * 0 on success, a negative errno value otherwise and rte_errno is set. 1522 */ 1523 int 1524 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, 1525 uint64_t item_flags, 1526 uint64_t last_item, 1527 uint16_t ether_type, 1528 const struct rte_flow_item_ipv4 *acc_mask, 1529 struct rte_flow_error *error) 1530 { 1531 const struct rte_flow_item_ipv4 *mask = item->mask; 1532 const struct rte_flow_item_ipv4 *spec = item->spec; 1533 const struct rte_flow_item_ipv4 nic_mask = { 1534 .hdr = { 1535 .src_addr = RTE_BE32(0xffffffff), 1536 .dst_addr = RTE_BE32(0xffffffff), 1537 .type_of_service = 0xff, 1538 .next_proto_id = 0xff, 1539 }, 1540 }; 1541 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1542 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1543 MLX5_FLOW_LAYER_OUTER_L3; 1544 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1545 MLX5_FLOW_LAYER_OUTER_L4; 1546 int ret; 1547 uint8_t next_proto = 0xFF; 1548 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 1549 MLX5_FLOW_LAYER_OUTER_VLAN | 1550 MLX5_FLOW_LAYER_INNER_VLAN); 1551 1552 if ((last_item & l2_vlan) && ether_type && 1553 ether_type != RTE_ETHER_TYPE_IPV4) 1554 return rte_flow_error_set(error, EINVAL, 1555 RTE_FLOW_ERROR_TYPE_ITEM, item, 1556 "IPv4 cannot follow L2/VLAN layer " 1557 "which ether type is not IPv4"); 1558 if (item_flags & MLX5_FLOW_LAYER_IPIP) { 1559 if (mask && spec) 1560 next_proto = mask->hdr.next_proto_id & 1561 spec->hdr.next_proto_id; 1562 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1563 return rte_flow_error_set(error, EINVAL, 1564 RTE_FLOW_ERROR_TYPE_ITEM, 1565 item, 1566 "multiple tunnel " 1567 "not supported"); 1568 } 1569 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) 1570 return rte_flow_error_set(error, EINVAL, 1571 RTE_FLOW_ERROR_TYPE_ITEM, item, 1572 "wrong tunnel type - IPv6 specified " 1573 "but IPv4 item provided"); 1574 if (item_flags & l3m) 1575 return rte_flow_error_set(error, ENOTSUP, 1576 RTE_FLOW_ERROR_TYPE_ITEM, item, 1577 "multiple L3 layers not supported"); 1578 else if (item_flags & l4m) 1579 return rte_flow_error_set(error, EINVAL, 1580 RTE_FLOW_ERROR_TYPE_ITEM, item, 1581 "L3 cannot follow an L4 layer."); 1582 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1583 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1584 return rte_flow_error_set(error, EINVAL, 1585 RTE_FLOW_ERROR_TYPE_ITEM, item, 1586 "L3 cannot follow an NVGRE layer."); 1587 if (!mask) 1588 mask = &rte_flow_item_ipv4_mask; 1589 else if (mask->hdr.next_proto_id != 0 && 1590 mask->hdr.next_proto_id != 0xff) 1591 return rte_flow_error_set(error, EINVAL, 1592 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 1593 "partial mask is not supported" 1594 " for protocol"); 1595 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1596 acc_mask ? (const uint8_t *)acc_mask 1597 : (const uint8_t *)&nic_mask, 1598 sizeof(struct rte_flow_item_ipv4), 1599 error); 1600 if (ret < 0) 1601 return ret; 1602 return 0; 1603 } 1604 1605 /** 1606 * Validate IPV6 item. 1607 * 1608 * @param[in] item 1609 * Item specification. 1610 * @param[in] item_flags 1611 * Bit-fields that holds the items detected until now. 1612 * @param[in] acc_mask 1613 * Acceptable mask, if NULL default internal default mask 1614 * will be used to check whether item fields are supported. 1615 * @param[out] error 1616 * Pointer to error structure. 1617 * 1618 * @return 1619 * 0 on success, a negative errno value otherwise and rte_errno is set. 1620 */ 1621 int 1622 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, 1623 uint64_t item_flags, 1624 uint64_t last_item, 1625 uint16_t ether_type, 1626 const struct rte_flow_item_ipv6 *acc_mask, 1627 struct rte_flow_error *error) 1628 { 1629 const struct rte_flow_item_ipv6 *mask = item->mask; 1630 const struct rte_flow_item_ipv6 *spec = item->spec; 1631 const struct rte_flow_item_ipv6 nic_mask = { 1632 .hdr = { 1633 .src_addr = 1634 "\xff\xff\xff\xff\xff\xff\xff\xff" 1635 "\xff\xff\xff\xff\xff\xff\xff\xff", 1636 .dst_addr = 1637 "\xff\xff\xff\xff\xff\xff\xff\xff" 1638 "\xff\xff\xff\xff\xff\xff\xff\xff", 1639 .vtc_flow = RTE_BE32(0xffffffff), 1640 .proto = 0xff, 1641 .hop_limits = 0xff, 1642 }, 1643 }; 1644 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1645 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1646 MLX5_FLOW_LAYER_OUTER_L3; 1647 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1648 MLX5_FLOW_LAYER_OUTER_L4; 1649 int ret; 1650 uint8_t next_proto = 0xFF; 1651 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 1652 MLX5_FLOW_LAYER_OUTER_VLAN | 1653 MLX5_FLOW_LAYER_INNER_VLAN); 1654 1655 if ((last_item & l2_vlan) && ether_type && 1656 ether_type != RTE_ETHER_TYPE_IPV6) 1657 return rte_flow_error_set(error, EINVAL, 1658 RTE_FLOW_ERROR_TYPE_ITEM, item, 1659 "IPv6 cannot follow L2/VLAN layer " 1660 "which ether type is not IPv6"); 1661 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { 1662 if (mask && spec) 1663 next_proto = mask->hdr.proto & spec->hdr.proto; 1664 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1665 return rte_flow_error_set(error, EINVAL, 1666 RTE_FLOW_ERROR_TYPE_ITEM, 1667 item, 1668 "multiple tunnel " 1669 "not supported"); 1670 } 1671 if (item_flags & MLX5_FLOW_LAYER_IPIP) 1672 return rte_flow_error_set(error, EINVAL, 1673 RTE_FLOW_ERROR_TYPE_ITEM, item, 1674 "wrong tunnel type - IPv4 specified " 1675 "but IPv6 item provided"); 1676 if (item_flags & l3m) 1677 return rte_flow_error_set(error, ENOTSUP, 1678 RTE_FLOW_ERROR_TYPE_ITEM, item, 1679 "multiple L3 layers not supported"); 1680 else if (item_flags & l4m) 1681 return rte_flow_error_set(error, EINVAL, 1682 RTE_FLOW_ERROR_TYPE_ITEM, item, 1683 "L3 cannot follow an L4 layer."); 1684 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1685 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1686 return rte_flow_error_set(error, EINVAL, 1687 RTE_FLOW_ERROR_TYPE_ITEM, item, 1688 "L3 cannot follow an NVGRE layer."); 1689 if (!mask) 1690 mask = &rte_flow_item_ipv6_mask; 1691 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1692 acc_mask ? (const uint8_t *)acc_mask 1693 : (const uint8_t *)&nic_mask, 1694 sizeof(struct rte_flow_item_ipv6), 1695 error); 1696 if (ret < 0) 1697 return ret; 1698 return 0; 1699 } 1700 1701 /** 1702 * Validate UDP item. 1703 * 1704 * @param[in] item 1705 * Item specification. 1706 * @param[in] item_flags 1707 * Bit-fields that holds the items detected until now. 1708 * @param[in] target_protocol 1709 * The next protocol in the previous item. 1710 * @param[in] flow_mask 1711 * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask. 1712 * @param[out] error 1713 * Pointer to error structure. 1714 * 1715 * @return 1716 * 0 on success, a negative errno value otherwise and rte_errno is set. 1717 */ 1718 int 1719 mlx5_flow_validate_item_udp(const struct rte_flow_item *item, 1720 uint64_t item_flags, 1721 uint8_t target_protocol, 1722 struct rte_flow_error *error) 1723 { 1724 const struct rte_flow_item_udp *mask = item->mask; 1725 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1726 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1727 MLX5_FLOW_LAYER_OUTER_L3; 1728 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1729 MLX5_FLOW_LAYER_OUTER_L4; 1730 int ret; 1731 1732 if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) 1733 return rte_flow_error_set(error, EINVAL, 1734 RTE_FLOW_ERROR_TYPE_ITEM, item, 1735 "protocol filtering not compatible" 1736 " with UDP layer"); 1737 if (!(item_flags & l3m)) 1738 return rte_flow_error_set(error, EINVAL, 1739 RTE_FLOW_ERROR_TYPE_ITEM, item, 1740 "L3 is mandatory to filter on L4"); 1741 if (item_flags & l4m) 1742 return rte_flow_error_set(error, EINVAL, 1743 RTE_FLOW_ERROR_TYPE_ITEM, item, 1744 "multiple L4 layers not supported"); 1745 if (!mask) 1746 mask = &rte_flow_item_udp_mask; 1747 ret = mlx5_flow_item_acceptable 1748 (item, (const uint8_t *)mask, 1749 (const uint8_t *)&rte_flow_item_udp_mask, 1750 sizeof(struct rte_flow_item_udp), error); 1751 if (ret < 0) 1752 return ret; 1753 return 0; 1754 } 1755 1756 /** 1757 * Validate TCP item. 1758 * 1759 * @param[in] item 1760 * Item specification. 1761 * @param[in] item_flags 1762 * Bit-fields that holds the items detected until now. 1763 * @param[in] target_protocol 1764 * The next protocol in the previous item. 1765 * @param[out] error 1766 * Pointer to error structure. 1767 * 1768 * @return 1769 * 0 on success, a negative errno value otherwise and rte_errno is set. 1770 */ 1771 int 1772 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, 1773 uint64_t item_flags, 1774 uint8_t target_protocol, 1775 const struct rte_flow_item_tcp *flow_mask, 1776 struct rte_flow_error *error) 1777 { 1778 const struct rte_flow_item_tcp *mask = item->mask; 1779 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1780 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1781 MLX5_FLOW_LAYER_OUTER_L3; 1782 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1783 MLX5_FLOW_LAYER_OUTER_L4; 1784 int ret; 1785 1786 MLX5_ASSERT(flow_mask); 1787 if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) 1788 return rte_flow_error_set(error, EINVAL, 1789 RTE_FLOW_ERROR_TYPE_ITEM, item, 1790 "protocol filtering not compatible" 1791 " with TCP layer"); 1792 if (!(item_flags & l3m)) 1793 return rte_flow_error_set(error, EINVAL, 1794 RTE_FLOW_ERROR_TYPE_ITEM, item, 1795 "L3 is mandatory to filter on L4"); 1796 if (item_flags & l4m) 1797 return rte_flow_error_set(error, EINVAL, 1798 RTE_FLOW_ERROR_TYPE_ITEM, item, 1799 "multiple L4 layers not supported"); 1800 if (!mask) 1801 mask = &rte_flow_item_tcp_mask; 1802 ret = mlx5_flow_item_acceptable 1803 (item, (const uint8_t *)mask, 1804 (const uint8_t *)flow_mask, 1805 sizeof(struct rte_flow_item_tcp), error); 1806 if (ret < 0) 1807 return ret; 1808 return 0; 1809 } 1810 1811 /** 1812 * Validate VXLAN item. 1813 * 1814 * @param[in] item 1815 * Item specification. 1816 * @param[in] item_flags 1817 * Bit-fields that holds the items detected until now. 1818 * @param[in] target_protocol 1819 * The next protocol in the previous item. 1820 * @param[out] error 1821 * Pointer to error structure. 1822 * 1823 * @return 1824 * 0 on success, a negative errno value otherwise and rte_errno is set. 1825 */ 1826 int 1827 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, 1828 uint64_t item_flags, 1829 struct rte_flow_error *error) 1830 { 1831 const struct rte_flow_item_vxlan *spec = item->spec; 1832 const struct rte_flow_item_vxlan *mask = item->mask; 1833 int ret; 1834 union vni { 1835 uint32_t vlan_id; 1836 uint8_t vni[4]; 1837 } id = { .vlan_id = 0, }; 1838 uint32_t vlan_id = 0; 1839 1840 1841 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1842 return rte_flow_error_set(error, ENOTSUP, 1843 RTE_FLOW_ERROR_TYPE_ITEM, item, 1844 "multiple tunnel layers not" 1845 " supported"); 1846 /* 1847 * Verify only UDPv4 is present as defined in 1848 * https://tools.ietf.org/html/rfc7348 1849 */ 1850 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1851 return rte_flow_error_set(error, EINVAL, 1852 RTE_FLOW_ERROR_TYPE_ITEM, item, 1853 "no outer UDP layer found"); 1854 if (!mask) 1855 mask = &rte_flow_item_vxlan_mask; 1856 ret = mlx5_flow_item_acceptable 1857 (item, (const uint8_t *)mask, 1858 (const uint8_t *)&rte_flow_item_vxlan_mask, 1859 sizeof(struct rte_flow_item_vxlan), 1860 error); 1861 if (ret < 0) 1862 return ret; 1863 if (spec) { 1864 memcpy(&id.vni[1], spec->vni, 3); 1865 vlan_id = id.vlan_id; 1866 memcpy(&id.vni[1], mask->vni, 3); 1867 vlan_id &= id.vlan_id; 1868 } 1869 /* 1870 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if 1871 * only this layer is defined in the Verbs specification it is 1872 * interpreted as wildcard and all packets will match this 1873 * rule, if it follows a full stack layer (ex: eth / ipv4 / 1874 * udp), all packets matching the layers before will also 1875 * match this rule. To avoid such situation, VNI 0 is 1876 * currently refused. 1877 */ 1878 if (!vlan_id) 1879 return rte_flow_error_set(error, ENOTSUP, 1880 RTE_FLOW_ERROR_TYPE_ITEM, item, 1881 "VXLAN vni cannot be 0"); 1882 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1883 return rte_flow_error_set(error, ENOTSUP, 1884 RTE_FLOW_ERROR_TYPE_ITEM, item, 1885 "VXLAN tunnel must be fully defined"); 1886 return 0; 1887 } 1888 1889 /** 1890 * Validate VXLAN_GPE item. 1891 * 1892 * @param[in] item 1893 * Item specification. 1894 * @param[in] item_flags 1895 * Bit-fields that holds the items detected until now. 1896 * @param[in] priv 1897 * Pointer to the private data structure. 1898 * @param[in] target_protocol 1899 * The next protocol in the previous item. 1900 * @param[out] error 1901 * Pointer to error structure. 1902 * 1903 * @return 1904 * 0 on success, a negative errno value otherwise and rte_errno is set. 1905 */ 1906 int 1907 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 1908 uint64_t item_flags, 1909 struct rte_eth_dev *dev, 1910 struct rte_flow_error *error) 1911 { 1912 struct mlx5_priv *priv = dev->data->dev_private; 1913 const struct rte_flow_item_vxlan_gpe *spec = item->spec; 1914 const struct rte_flow_item_vxlan_gpe *mask = item->mask; 1915 int ret; 1916 union vni { 1917 uint32_t vlan_id; 1918 uint8_t vni[4]; 1919 } id = { .vlan_id = 0, }; 1920 uint32_t vlan_id = 0; 1921 1922 if (!priv->config.l3_vxlan_en) 1923 return rte_flow_error_set(error, ENOTSUP, 1924 RTE_FLOW_ERROR_TYPE_ITEM, item, 1925 "L3 VXLAN is not enabled by device" 1926 " parameter and/or not configured in" 1927 " firmware"); 1928 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1929 return rte_flow_error_set(error, ENOTSUP, 1930 RTE_FLOW_ERROR_TYPE_ITEM, item, 1931 "multiple tunnel layers not" 1932 " supported"); 1933 /* 1934 * Verify only UDPv4 is present as defined in 1935 * https://tools.ietf.org/html/rfc7348 1936 */ 1937 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1938 return rte_flow_error_set(error, EINVAL, 1939 RTE_FLOW_ERROR_TYPE_ITEM, item, 1940 "no outer UDP layer found"); 1941 if (!mask) 1942 mask = &rte_flow_item_vxlan_gpe_mask; 1943 ret = mlx5_flow_item_acceptable 1944 (item, (const uint8_t *)mask, 1945 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, 1946 sizeof(struct rte_flow_item_vxlan_gpe), 1947 error); 1948 if (ret < 0) 1949 return ret; 1950 if (spec) { 1951 if (spec->protocol) 1952 return rte_flow_error_set(error, ENOTSUP, 1953 RTE_FLOW_ERROR_TYPE_ITEM, 1954 item, 1955 "VxLAN-GPE protocol" 1956 " not supported"); 1957 memcpy(&id.vni[1], spec->vni, 3); 1958 vlan_id = id.vlan_id; 1959 memcpy(&id.vni[1], mask->vni, 3); 1960 vlan_id &= id.vlan_id; 1961 } 1962 /* 1963 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this 1964 * layer is defined in the Verbs specification it is interpreted as 1965 * wildcard and all packets will match this rule, if it follows a full 1966 * stack layer (ex: eth / ipv4 / udp), all packets matching the layers 1967 * before will also match this rule. To avoid such situation, VNI 0 1968 * is currently refused. 1969 */ 1970 if (!vlan_id) 1971 return rte_flow_error_set(error, ENOTSUP, 1972 RTE_FLOW_ERROR_TYPE_ITEM, item, 1973 "VXLAN-GPE vni cannot be 0"); 1974 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1975 return rte_flow_error_set(error, ENOTSUP, 1976 RTE_FLOW_ERROR_TYPE_ITEM, item, 1977 "VXLAN-GPE tunnel must be fully" 1978 " defined"); 1979 return 0; 1980 } 1981 /** 1982 * Validate GRE Key item. 1983 * 1984 * @param[in] item 1985 * Item specification. 1986 * @param[in] item_flags 1987 * Bit flags to mark detected items. 1988 * @param[in] gre_item 1989 * Pointer to gre_item 1990 * @param[out] error 1991 * Pointer to error structure. 1992 * 1993 * @return 1994 * 0 on success, a negative errno value otherwise and rte_errno is set. 1995 */ 1996 int 1997 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, 1998 uint64_t item_flags, 1999 const struct rte_flow_item *gre_item, 2000 struct rte_flow_error *error) 2001 { 2002 const rte_be32_t *mask = item->mask; 2003 int ret = 0; 2004 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); 2005 const struct rte_flow_item_gre *gre_spec; 2006 const struct rte_flow_item_gre *gre_mask; 2007 2008 if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) 2009 return rte_flow_error_set(error, ENOTSUP, 2010 RTE_FLOW_ERROR_TYPE_ITEM, item, 2011 "Multiple GRE key not support"); 2012 if (!(item_flags & MLX5_FLOW_LAYER_GRE)) 2013 return rte_flow_error_set(error, ENOTSUP, 2014 RTE_FLOW_ERROR_TYPE_ITEM, item, 2015 "No preceding GRE header"); 2016 if (item_flags & MLX5_FLOW_LAYER_INNER) 2017 return rte_flow_error_set(error, ENOTSUP, 2018 RTE_FLOW_ERROR_TYPE_ITEM, item, 2019 "GRE key following a wrong item"); 2020 gre_mask = gre_item->mask; 2021 if (!gre_mask) 2022 gre_mask = &rte_flow_item_gre_mask; 2023 gre_spec = gre_item->spec; 2024 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && 2025 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) 2026 return rte_flow_error_set(error, EINVAL, 2027 RTE_FLOW_ERROR_TYPE_ITEM, item, 2028 "Key bit must be on"); 2029 2030 if (!mask) 2031 mask = &gre_key_default_mask; 2032 ret = mlx5_flow_item_acceptable 2033 (item, (const uint8_t *)mask, 2034 (const uint8_t *)&gre_key_default_mask, 2035 sizeof(rte_be32_t), error); 2036 return ret; 2037 } 2038 2039 /** 2040 * Validate GRE item. 2041 * 2042 * @param[in] item 2043 * Item specification. 2044 * @param[in] item_flags 2045 * Bit flags to mark detected items. 2046 * @param[in] target_protocol 2047 * The next protocol in the previous item. 2048 * @param[out] error 2049 * Pointer to error structure. 2050 * 2051 * @return 2052 * 0 on success, a negative errno value otherwise and rte_errno is set. 2053 */ 2054 int 2055 mlx5_flow_validate_item_gre(const struct rte_flow_item *item, 2056 uint64_t item_flags, 2057 uint8_t target_protocol, 2058 struct rte_flow_error *error) 2059 { 2060 const struct rte_flow_item_gre *spec __rte_unused = item->spec; 2061 const struct rte_flow_item_gre *mask = item->mask; 2062 int ret; 2063 const struct rte_flow_item_gre nic_mask = { 2064 .c_rsvd0_ver = RTE_BE16(0xB000), 2065 .protocol = RTE_BE16(UINT16_MAX), 2066 }; 2067 2068 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 2069 return rte_flow_error_set(error, EINVAL, 2070 RTE_FLOW_ERROR_TYPE_ITEM, item, 2071 "protocol filtering not compatible" 2072 " with this GRE layer"); 2073 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2074 return rte_flow_error_set(error, ENOTSUP, 2075 RTE_FLOW_ERROR_TYPE_ITEM, item, 2076 "multiple tunnel layers not" 2077 " supported"); 2078 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 2079 return rte_flow_error_set(error, ENOTSUP, 2080 RTE_FLOW_ERROR_TYPE_ITEM, item, 2081 "L3 Layer is missing"); 2082 if (!mask) 2083 mask = &rte_flow_item_gre_mask; 2084 ret = mlx5_flow_item_acceptable 2085 (item, (const uint8_t *)mask, 2086 (const uint8_t *)&nic_mask, 2087 sizeof(struct rte_flow_item_gre), error); 2088 if (ret < 0) 2089 return ret; 2090 #ifndef HAVE_MLX5DV_DR 2091 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT 2092 if (spec && (spec->protocol & mask->protocol)) 2093 return rte_flow_error_set(error, ENOTSUP, 2094 RTE_FLOW_ERROR_TYPE_ITEM, item, 2095 "without MPLS support the" 2096 " specification cannot be used for" 2097 " filtering"); 2098 #endif 2099 #endif 2100 return 0; 2101 } 2102 2103 /** 2104 * Validate Geneve item. 2105 * 2106 * @param[in] item 2107 * Item specification. 2108 * @param[in] itemFlags 2109 * Bit-fields that holds the items detected until now. 2110 * @param[in] enPriv 2111 * Pointer to the private data structure. 2112 * @param[out] error 2113 * Pointer to error structure. 2114 * 2115 * @return 2116 * 0 on success, a negative errno value otherwise and rte_errno is set. 2117 */ 2118 2119 int 2120 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, 2121 uint64_t item_flags, 2122 struct rte_eth_dev *dev, 2123 struct rte_flow_error *error) 2124 { 2125 struct mlx5_priv *priv = dev->data->dev_private; 2126 const struct rte_flow_item_geneve *spec = item->spec; 2127 const struct rte_flow_item_geneve *mask = item->mask; 2128 int ret; 2129 uint16_t gbhdr; 2130 uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ? 2131 MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; 2132 const struct rte_flow_item_geneve nic_mask = { 2133 .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), 2134 .vni = "\xff\xff\xff", 2135 .protocol = RTE_BE16(UINT16_MAX), 2136 }; 2137 2138 if (!(priv->config.hca_attr.flex_parser_protocols & 2139 MLX5_HCA_FLEX_GENEVE_ENABLED) || 2140 !priv->config.hca_attr.tunnel_stateless_geneve_rx) 2141 return rte_flow_error_set(error, ENOTSUP, 2142 RTE_FLOW_ERROR_TYPE_ITEM, item, 2143 "L3 Geneve is not enabled by device" 2144 " parameter and/or not configured in" 2145 " firmware"); 2146 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2147 return rte_flow_error_set(error, ENOTSUP, 2148 RTE_FLOW_ERROR_TYPE_ITEM, item, 2149 "multiple tunnel layers not" 2150 " supported"); 2151 /* 2152 * Verify only UDPv4 is present as defined in 2153 * https://tools.ietf.org/html/rfc7348 2154 */ 2155 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 2156 return rte_flow_error_set(error, EINVAL, 2157 RTE_FLOW_ERROR_TYPE_ITEM, item, 2158 "no outer UDP layer found"); 2159 if (!mask) 2160 mask = &rte_flow_item_geneve_mask; 2161 ret = mlx5_flow_item_acceptable 2162 (item, (const uint8_t *)mask, 2163 (const uint8_t *)&nic_mask, 2164 sizeof(struct rte_flow_item_geneve), error); 2165 if (ret) 2166 return ret; 2167 if (spec) { 2168 gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0); 2169 if (MLX5_GENEVE_VER_VAL(gbhdr) || 2170 MLX5_GENEVE_CRITO_VAL(gbhdr) || 2171 MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1) 2172 return rte_flow_error_set(error, ENOTSUP, 2173 RTE_FLOW_ERROR_TYPE_ITEM, 2174 item, 2175 "Geneve protocol unsupported" 2176 " fields are being used"); 2177 if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len) 2178 return rte_flow_error_set 2179 (error, ENOTSUP, 2180 RTE_FLOW_ERROR_TYPE_ITEM, 2181 item, 2182 "Unsupported Geneve options length"); 2183 } 2184 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 2185 return rte_flow_error_set 2186 (error, ENOTSUP, 2187 RTE_FLOW_ERROR_TYPE_ITEM, item, 2188 "Geneve tunnel must be fully defined"); 2189 return 0; 2190 } 2191 2192 /** 2193 * Validate MPLS item. 2194 * 2195 * @param[in] dev 2196 * Pointer to the rte_eth_dev structure. 2197 * @param[in] item 2198 * Item specification. 2199 * @param[in] item_flags 2200 * Bit-fields that holds the items detected until now. 2201 * @param[in] prev_layer 2202 * The protocol layer indicated in previous item. 2203 * @param[out] error 2204 * Pointer to error structure. 2205 * 2206 * @return 2207 * 0 on success, a negative errno value otherwise and rte_errno is set. 2208 */ 2209 int 2210 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, 2211 const struct rte_flow_item *item __rte_unused, 2212 uint64_t item_flags __rte_unused, 2213 uint64_t prev_layer __rte_unused, 2214 struct rte_flow_error *error) 2215 { 2216 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 2217 const struct rte_flow_item_mpls *mask = item->mask; 2218 struct mlx5_priv *priv = dev->data->dev_private; 2219 int ret; 2220 2221 if (!priv->config.mpls_en) 2222 return rte_flow_error_set(error, ENOTSUP, 2223 RTE_FLOW_ERROR_TYPE_ITEM, item, 2224 "MPLS not supported or" 2225 " disabled in firmware" 2226 " configuration."); 2227 /* MPLS over IP, UDP, GRE is allowed */ 2228 if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | 2229 MLX5_FLOW_LAYER_OUTER_L4_UDP | 2230 MLX5_FLOW_LAYER_GRE))) 2231 return rte_flow_error_set(error, EINVAL, 2232 RTE_FLOW_ERROR_TYPE_ITEM, item, 2233 "protocol filtering not compatible" 2234 " with MPLS layer"); 2235 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ 2236 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && 2237 !(item_flags & MLX5_FLOW_LAYER_GRE)) 2238 return rte_flow_error_set(error, ENOTSUP, 2239 RTE_FLOW_ERROR_TYPE_ITEM, item, 2240 "multiple tunnel layers not" 2241 " supported"); 2242 if (!mask) 2243 mask = &rte_flow_item_mpls_mask; 2244 ret = mlx5_flow_item_acceptable 2245 (item, (const uint8_t *)mask, 2246 (const uint8_t *)&rte_flow_item_mpls_mask, 2247 sizeof(struct rte_flow_item_mpls), error); 2248 if (ret < 0) 2249 return ret; 2250 return 0; 2251 #endif 2252 return rte_flow_error_set(error, ENOTSUP, 2253 RTE_FLOW_ERROR_TYPE_ITEM, item, 2254 "MPLS is not supported by Verbs, please" 2255 " update."); 2256 } 2257 2258 /** 2259 * Validate NVGRE item. 2260 * 2261 * @param[in] item 2262 * Item specification. 2263 * @param[in] item_flags 2264 * Bit flags to mark detected items. 2265 * @param[in] target_protocol 2266 * The next protocol in the previous item. 2267 * @param[out] error 2268 * Pointer to error structure. 2269 * 2270 * @return 2271 * 0 on success, a negative errno value otherwise and rte_errno is set. 2272 */ 2273 int 2274 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, 2275 uint64_t item_flags, 2276 uint8_t target_protocol, 2277 struct rte_flow_error *error) 2278 { 2279 const struct rte_flow_item_nvgre *mask = item->mask; 2280 int ret; 2281 2282 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 2283 return rte_flow_error_set(error, EINVAL, 2284 RTE_FLOW_ERROR_TYPE_ITEM, item, 2285 "protocol filtering not compatible" 2286 " with this GRE layer"); 2287 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2288 return rte_flow_error_set(error, ENOTSUP, 2289 RTE_FLOW_ERROR_TYPE_ITEM, item, 2290 "multiple tunnel layers not" 2291 " supported"); 2292 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 2293 return rte_flow_error_set(error, ENOTSUP, 2294 RTE_FLOW_ERROR_TYPE_ITEM, item, 2295 "L3 Layer is missing"); 2296 if (!mask) 2297 mask = &rte_flow_item_nvgre_mask; 2298 ret = mlx5_flow_item_acceptable 2299 (item, (const uint8_t *)mask, 2300 (const uint8_t *)&rte_flow_item_nvgre_mask, 2301 sizeof(struct rte_flow_item_nvgre), error); 2302 if (ret < 0) 2303 return ret; 2304 return 0; 2305 } 2306 2307 /* Allocate unique ID for the split Q/RSS subflows. */ 2308 static uint32_t 2309 flow_qrss_get_id(struct rte_eth_dev *dev) 2310 { 2311 struct mlx5_priv *priv = dev->data->dev_private; 2312 uint32_t qrss_id, ret; 2313 2314 ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id); 2315 if (ret) 2316 return 0; 2317 MLX5_ASSERT(qrss_id); 2318 return qrss_id; 2319 } 2320 2321 /* Free unique ID for the split Q/RSS subflows. */ 2322 static void 2323 flow_qrss_free_id(struct rte_eth_dev *dev, uint32_t qrss_id) 2324 { 2325 struct mlx5_priv *priv = dev->data->dev_private; 2326 2327 if (qrss_id) 2328 mlx5_flow_id_release(priv->qrss_id_pool, qrss_id); 2329 } 2330 2331 /** 2332 * Release resource related QUEUE/RSS action split. 2333 * 2334 * @param dev 2335 * Pointer to Ethernet device. 2336 * @param flow 2337 * Flow to release id's from. 2338 */ 2339 static void 2340 flow_mreg_split_qrss_release(struct rte_eth_dev *dev, 2341 struct rte_flow *flow) 2342 { 2343 struct mlx5_flow *dev_flow; 2344 2345 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 2346 if (dev_flow->qrss_id) 2347 flow_qrss_free_id(dev, dev_flow->qrss_id); 2348 } 2349 2350 static int 2351 flow_null_validate(struct rte_eth_dev *dev __rte_unused, 2352 const struct rte_flow_attr *attr __rte_unused, 2353 const struct rte_flow_item items[] __rte_unused, 2354 const struct rte_flow_action actions[] __rte_unused, 2355 bool external __rte_unused, 2356 struct rte_flow_error *error) 2357 { 2358 return rte_flow_error_set(error, ENOTSUP, 2359 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2360 } 2361 2362 static struct mlx5_flow * 2363 flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, 2364 const struct rte_flow_item items[] __rte_unused, 2365 const struct rte_flow_action actions[] __rte_unused, 2366 struct rte_flow_error *error) 2367 { 2368 rte_flow_error_set(error, ENOTSUP, 2369 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2370 return NULL; 2371 } 2372 2373 static int 2374 flow_null_translate(struct rte_eth_dev *dev __rte_unused, 2375 struct mlx5_flow *dev_flow __rte_unused, 2376 const struct rte_flow_attr *attr __rte_unused, 2377 const struct rte_flow_item items[] __rte_unused, 2378 const struct rte_flow_action actions[] __rte_unused, 2379 struct rte_flow_error *error) 2380 { 2381 return rte_flow_error_set(error, ENOTSUP, 2382 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2383 } 2384 2385 static int 2386 flow_null_apply(struct rte_eth_dev *dev __rte_unused, 2387 struct rte_flow *flow __rte_unused, 2388 struct rte_flow_error *error) 2389 { 2390 return rte_flow_error_set(error, ENOTSUP, 2391 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2392 } 2393 2394 static void 2395 flow_null_remove(struct rte_eth_dev *dev __rte_unused, 2396 struct rte_flow *flow __rte_unused) 2397 { 2398 } 2399 2400 static void 2401 flow_null_destroy(struct rte_eth_dev *dev __rte_unused, 2402 struct rte_flow *flow __rte_unused) 2403 { 2404 } 2405 2406 static int 2407 flow_null_query(struct rte_eth_dev *dev __rte_unused, 2408 struct rte_flow *flow __rte_unused, 2409 const struct rte_flow_action *actions __rte_unused, 2410 void *data __rte_unused, 2411 struct rte_flow_error *error) 2412 { 2413 return rte_flow_error_set(error, ENOTSUP, 2414 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2415 } 2416 2417 /* Void driver to protect from null pointer reference. */ 2418 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { 2419 .validate = flow_null_validate, 2420 .prepare = flow_null_prepare, 2421 .translate = flow_null_translate, 2422 .apply = flow_null_apply, 2423 .remove = flow_null_remove, 2424 .destroy = flow_null_destroy, 2425 .query = flow_null_query, 2426 }; 2427 2428 /** 2429 * Select flow driver type according to flow attributes and device 2430 * configuration. 2431 * 2432 * @param[in] dev 2433 * Pointer to the dev structure. 2434 * @param[in] attr 2435 * Pointer to the flow attributes. 2436 * 2437 * @return 2438 * flow driver type, MLX5_FLOW_TYPE_MAX otherwise. 2439 */ 2440 static enum mlx5_flow_drv_type 2441 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) 2442 { 2443 struct mlx5_priv *priv = dev->data->dev_private; 2444 enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; 2445 2446 if (attr->transfer && priv->config.dv_esw_en) 2447 type = MLX5_FLOW_TYPE_DV; 2448 if (!attr->transfer) 2449 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : 2450 MLX5_FLOW_TYPE_VERBS; 2451 return type; 2452 } 2453 2454 #define flow_get_drv_ops(type) flow_drv_ops[type] 2455 2456 /** 2457 * Flow driver validation API. This abstracts calling driver specific functions. 2458 * The type of flow driver is determined according to flow attributes. 2459 * 2460 * @param[in] dev 2461 * Pointer to the dev structure. 2462 * @param[in] attr 2463 * Pointer to the flow attributes. 2464 * @param[in] items 2465 * Pointer to the list of items. 2466 * @param[in] actions 2467 * Pointer to the list of actions. 2468 * @param[in] external 2469 * This flow rule is created by request external to PMD. 2470 * @param[out] error 2471 * Pointer to the error structure. 2472 * 2473 * @return 2474 * 0 on success, a negative errno value otherwise and rte_errno is set. 2475 */ 2476 static inline int 2477 flow_drv_validate(struct rte_eth_dev *dev, 2478 const struct rte_flow_attr *attr, 2479 const struct rte_flow_item items[], 2480 const struct rte_flow_action actions[], 2481 bool external, struct rte_flow_error *error) 2482 { 2483 const struct mlx5_flow_driver_ops *fops; 2484 enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); 2485 2486 fops = flow_get_drv_ops(type); 2487 return fops->validate(dev, attr, items, actions, external, error); 2488 } 2489 2490 /** 2491 * Flow driver preparation API. This abstracts calling driver specific 2492 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2493 * calculates the size of memory required for device flow, allocates the memory, 2494 * initializes the device flow and returns the pointer. 2495 * 2496 * @note 2497 * This function initializes device flow structure such as dv or verbs in 2498 * struct mlx5_flow. However, it is caller's responsibility to initialize the 2499 * rest. For example, adding returning device flow to flow->dev_flow list and 2500 * setting backward reference to the flow should be done out of this function. 2501 * layers field is not filled either. 2502 * 2503 * @param[in] attr 2504 * Pointer to the flow attributes. 2505 * @param[in] items 2506 * Pointer to the list of items. 2507 * @param[in] actions 2508 * Pointer to the list of actions. 2509 * @param[out] error 2510 * Pointer to the error structure. 2511 * 2512 * @return 2513 * Pointer to device flow on success, otherwise NULL and rte_errno is set. 2514 */ 2515 static inline struct mlx5_flow * 2516 flow_drv_prepare(const struct rte_flow *flow, 2517 const struct rte_flow_attr *attr, 2518 const struct rte_flow_item items[], 2519 const struct rte_flow_action actions[], 2520 struct rte_flow_error *error) 2521 { 2522 const struct mlx5_flow_driver_ops *fops; 2523 enum mlx5_flow_drv_type type = flow->drv_type; 2524 2525 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2526 fops = flow_get_drv_ops(type); 2527 return fops->prepare(attr, items, actions, error); 2528 } 2529 2530 /** 2531 * Flow driver translation API. This abstracts calling driver specific 2532 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2533 * translates a generic flow into a driver flow. flow_drv_prepare() must 2534 * precede. 2535 * 2536 * @note 2537 * dev_flow->layers could be filled as a result of parsing during translation 2538 * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled 2539 * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, 2540 * flow->actions could be overwritten even though all the expanded dev_flows 2541 * have the same actions. 2542 * 2543 * @param[in] dev 2544 * Pointer to the rte dev structure. 2545 * @param[in, out] dev_flow 2546 * Pointer to the mlx5 flow. 2547 * @param[in] attr 2548 * Pointer to the flow attributes. 2549 * @param[in] items 2550 * Pointer to the list of items. 2551 * @param[in] actions 2552 * Pointer to the list of actions. 2553 * @param[out] error 2554 * Pointer to the error structure. 2555 * 2556 * @return 2557 * 0 on success, a negative errno value otherwise and rte_errno is set. 2558 */ 2559 static inline int 2560 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, 2561 const struct rte_flow_attr *attr, 2562 const struct rte_flow_item items[], 2563 const struct rte_flow_action actions[], 2564 struct rte_flow_error *error) 2565 { 2566 const struct mlx5_flow_driver_ops *fops; 2567 enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; 2568 2569 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2570 fops = flow_get_drv_ops(type); 2571 return fops->translate(dev, dev_flow, attr, items, actions, error); 2572 } 2573 2574 /** 2575 * Flow driver apply API. This abstracts calling driver specific functions. 2576 * Parent flow (rte_flow) should have driver type (drv_type). It applies 2577 * translated driver flows on to device. flow_drv_translate() must precede. 2578 * 2579 * @param[in] dev 2580 * Pointer to Ethernet device structure. 2581 * @param[in, out] flow 2582 * Pointer to flow structure. 2583 * @param[out] error 2584 * Pointer to error structure. 2585 * 2586 * @return 2587 * 0 on success, a negative errno value otherwise and rte_errno is set. 2588 */ 2589 static inline int 2590 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 2591 struct rte_flow_error *error) 2592 { 2593 const struct mlx5_flow_driver_ops *fops; 2594 enum mlx5_flow_drv_type type = flow->drv_type; 2595 2596 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2597 fops = flow_get_drv_ops(type); 2598 return fops->apply(dev, flow, error); 2599 } 2600 2601 /** 2602 * Flow driver remove API. This abstracts calling driver specific functions. 2603 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2604 * on device. All the resources of the flow should be freed by calling 2605 * flow_drv_destroy(). 2606 * 2607 * @param[in] dev 2608 * Pointer to Ethernet device. 2609 * @param[in, out] flow 2610 * Pointer to flow structure. 2611 */ 2612 static inline void 2613 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 2614 { 2615 const struct mlx5_flow_driver_ops *fops; 2616 enum mlx5_flow_drv_type type = flow->drv_type; 2617 2618 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2619 fops = flow_get_drv_ops(type); 2620 fops->remove(dev, flow); 2621 } 2622 2623 /** 2624 * Flow driver destroy API. This abstracts calling driver specific functions. 2625 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2626 * on device and releases resources of the flow. 2627 * 2628 * @param[in] dev 2629 * Pointer to Ethernet device. 2630 * @param[in, out] flow 2631 * Pointer to flow structure. 2632 */ 2633 static inline void 2634 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 2635 { 2636 const struct mlx5_flow_driver_ops *fops; 2637 enum mlx5_flow_drv_type type = flow->drv_type; 2638 2639 flow_mreg_split_qrss_release(dev, flow); 2640 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2641 fops = flow_get_drv_ops(type); 2642 fops->destroy(dev, flow); 2643 } 2644 2645 /** 2646 * Validate a flow supported by the NIC. 2647 * 2648 * @see rte_flow_validate() 2649 * @see rte_flow_ops 2650 */ 2651 int 2652 mlx5_flow_validate(struct rte_eth_dev *dev, 2653 const struct rte_flow_attr *attr, 2654 const struct rte_flow_item items[], 2655 const struct rte_flow_action actions[], 2656 struct rte_flow_error *error) 2657 { 2658 int ret; 2659 2660 ret = flow_drv_validate(dev, attr, items, actions, true, error); 2661 if (ret < 0) 2662 return ret; 2663 return 0; 2664 } 2665 2666 /** 2667 * Get port id item from the item list. 2668 * 2669 * @param[in] item 2670 * Pointer to the list of items. 2671 * 2672 * @return 2673 * Pointer to the port id item if exist, else return NULL. 2674 */ 2675 static const struct rte_flow_item * 2676 find_port_id_item(const struct rte_flow_item *item) 2677 { 2678 MLX5_ASSERT(item); 2679 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 2680 if (item->type == RTE_FLOW_ITEM_TYPE_PORT_ID) 2681 return item; 2682 } 2683 return NULL; 2684 } 2685 2686 /** 2687 * Get RSS action from the action list. 2688 * 2689 * @param[in] actions 2690 * Pointer to the list of actions. 2691 * 2692 * @return 2693 * Pointer to the RSS action if exist, else return NULL. 2694 */ 2695 static const struct rte_flow_action_rss* 2696 flow_get_rss_action(const struct rte_flow_action actions[]) 2697 { 2698 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2699 switch (actions->type) { 2700 case RTE_FLOW_ACTION_TYPE_RSS: 2701 return (const struct rte_flow_action_rss *) 2702 actions->conf; 2703 default: 2704 break; 2705 } 2706 } 2707 return NULL; 2708 } 2709 2710 static unsigned int 2711 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) 2712 { 2713 const struct rte_flow_item *item; 2714 unsigned int has_vlan = 0; 2715 2716 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 2717 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2718 has_vlan = 1; 2719 break; 2720 } 2721 } 2722 if (has_vlan) 2723 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : 2724 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; 2725 return rss_level < 2 ? MLX5_EXPANSION_ROOT : 2726 MLX5_EXPANSION_ROOT_OUTER; 2727 } 2728 2729 /** 2730 * Get QUEUE/RSS action from the action list. 2731 * 2732 * @param[in] actions 2733 * Pointer to the list of actions. 2734 * @param[out] qrss 2735 * Pointer to the return pointer. 2736 * @param[out] qrss_type 2737 * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned 2738 * if no QUEUE/RSS is found. 2739 * 2740 * @return 2741 * Total number of actions. 2742 */ 2743 static int 2744 flow_parse_qrss_action(const struct rte_flow_action actions[], 2745 const struct rte_flow_action **qrss) 2746 { 2747 int actions_n = 0; 2748 2749 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2750 switch (actions->type) { 2751 case RTE_FLOW_ACTION_TYPE_QUEUE: 2752 case RTE_FLOW_ACTION_TYPE_RSS: 2753 *qrss = actions; 2754 break; 2755 default: 2756 break; 2757 } 2758 actions_n++; 2759 } 2760 /* Count RTE_FLOW_ACTION_TYPE_END. */ 2761 return actions_n + 1; 2762 } 2763 2764 /** 2765 * Check meter action from the action list. 2766 * 2767 * @param[in] actions 2768 * Pointer to the list of actions. 2769 * @param[out] mtr 2770 * Pointer to the meter exist flag. 2771 * 2772 * @return 2773 * Total number of actions. 2774 */ 2775 static int 2776 flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr) 2777 { 2778 int actions_n = 0; 2779 2780 MLX5_ASSERT(mtr); 2781 *mtr = 0; 2782 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2783 switch (actions->type) { 2784 case RTE_FLOW_ACTION_TYPE_METER: 2785 *mtr = 1; 2786 break; 2787 default: 2788 break; 2789 } 2790 actions_n++; 2791 } 2792 /* Count RTE_FLOW_ACTION_TYPE_END. */ 2793 return actions_n + 1; 2794 } 2795 2796 /** 2797 * Check if the flow should be splited due to hairpin. 2798 * The reason for the split is that in current HW we can't 2799 * support encap on Rx, so if a flow have encap we move it 2800 * to Tx. 2801 * 2802 * @param dev 2803 * Pointer to Ethernet device. 2804 * @param[in] attr 2805 * Flow rule attributes. 2806 * @param[in] actions 2807 * Associated actions (list terminated by the END action). 2808 * 2809 * @return 2810 * > 0 the number of actions and the flow should be split, 2811 * 0 when no split required. 2812 */ 2813 static int 2814 flow_check_hairpin_split(struct rte_eth_dev *dev, 2815 const struct rte_flow_attr *attr, 2816 const struct rte_flow_action actions[]) 2817 { 2818 int queue_action = 0; 2819 int action_n = 0; 2820 int encap = 0; 2821 const struct rte_flow_action_queue *queue; 2822 const struct rte_flow_action_rss *rss; 2823 const struct rte_flow_action_raw_encap *raw_encap; 2824 2825 if (!attr->ingress) 2826 return 0; 2827 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2828 switch (actions->type) { 2829 case RTE_FLOW_ACTION_TYPE_QUEUE: 2830 queue = actions->conf; 2831 if (queue == NULL) 2832 return 0; 2833 if (mlx5_rxq_get_type(dev, queue->index) != 2834 MLX5_RXQ_TYPE_HAIRPIN) 2835 return 0; 2836 queue_action = 1; 2837 action_n++; 2838 break; 2839 case RTE_FLOW_ACTION_TYPE_RSS: 2840 rss = actions->conf; 2841 if (rss == NULL || rss->queue_num == 0) 2842 return 0; 2843 if (mlx5_rxq_get_type(dev, rss->queue[0]) != 2844 MLX5_RXQ_TYPE_HAIRPIN) 2845 return 0; 2846 queue_action = 1; 2847 action_n++; 2848 break; 2849 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 2850 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 2851 encap = 1; 2852 action_n++; 2853 break; 2854 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 2855 raw_encap = actions->conf; 2856 if (raw_encap->size > 2857 (sizeof(struct rte_flow_item_eth) + 2858 sizeof(struct rte_flow_item_ipv4))) 2859 encap = 1; 2860 action_n++; 2861 break; 2862 default: 2863 action_n++; 2864 break; 2865 } 2866 } 2867 if (encap == 1 && queue_action) 2868 return action_n; 2869 return 0; 2870 } 2871 2872 /* Declare flow create/destroy prototype in advance. */ 2873 static struct rte_flow * 2874 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, 2875 const struct rte_flow_attr *attr, 2876 const struct rte_flow_item items[], 2877 const struct rte_flow_action actions[], 2878 bool external, struct rte_flow_error *error); 2879 2880 static void 2881 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, 2882 struct rte_flow *flow); 2883 2884 /** 2885 * Add a flow of copying flow metadata registers in RX_CP_TBL. 2886 * 2887 * As mark_id is unique, if there's already a registered flow for the mark_id, 2888 * return by increasing the reference counter of the resource. Otherwise, create 2889 * the resource (mcp_res) and flow. 2890 * 2891 * Flow looks like, 2892 * - If ingress port is ANY and reg_c[1] is mark_id, 2893 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 2894 * 2895 * For default flow (zero mark_id), flow is like, 2896 * - If ingress port is ANY, 2897 * reg_b := reg_c[0] and jump to RX_ACT_TBL. 2898 * 2899 * @param dev 2900 * Pointer to Ethernet device. 2901 * @param mark_id 2902 * ID of MARK action, zero means default flow for META. 2903 * @param[out] error 2904 * Perform verbose error reporting if not NULL. 2905 * 2906 * @return 2907 * Associated resource on success, NULL otherwise and rte_errno is set. 2908 */ 2909 static struct mlx5_flow_mreg_copy_resource * 2910 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, 2911 struct rte_flow_error *error) 2912 { 2913 struct mlx5_priv *priv = dev->data->dev_private; 2914 struct rte_flow_attr attr = { 2915 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 2916 .ingress = 1, 2917 }; 2918 struct mlx5_rte_flow_item_tag tag_spec = { 2919 .data = mark_id, 2920 }; 2921 struct rte_flow_item items[] = { 2922 [1] = { .type = RTE_FLOW_ITEM_TYPE_END, }, 2923 }; 2924 struct rte_flow_action_mark ftag = { 2925 .id = mark_id, 2926 }; 2927 struct mlx5_flow_action_copy_mreg cp_mreg = { 2928 .dst = REG_B, 2929 .src = 0, 2930 }; 2931 struct rte_flow_action_jump jump = { 2932 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 2933 }; 2934 struct rte_flow_action actions[] = { 2935 [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, 2936 }; 2937 struct mlx5_flow_mreg_copy_resource *mcp_res; 2938 int ret; 2939 2940 /* Fill the register fileds in the flow. */ 2941 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 2942 if (ret < 0) 2943 return NULL; 2944 tag_spec.id = ret; 2945 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 2946 if (ret < 0) 2947 return NULL; 2948 cp_mreg.src = ret; 2949 /* Check if already registered. */ 2950 MLX5_ASSERT(priv->mreg_cp_tbl); 2951 mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id); 2952 if (mcp_res) { 2953 /* For non-default rule. */ 2954 if (mark_id != MLX5_DEFAULT_COPY_ID) 2955 mcp_res->refcnt++; 2956 MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID || 2957 mcp_res->refcnt == 1); 2958 return mcp_res; 2959 } 2960 /* Provide the full width of FLAG specific value. */ 2961 if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT)) 2962 tag_spec.data = MLX5_FLOW_MARK_DEFAULT; 2963 /* Build a new flow. */ 2964 if (mark_id != MLX5_DEFAULT_COPY_ID) { 2965 items[0] = (struct rte_flow_item){ 2966 .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, 2967 .spec = &tag_spec, 2968 }; 2969 items[1] = (struct rte_flow_item){ 2970 .type = RTE_FLOW_ITEM_TYPE_END, 2971 }; 2972 actions[0] = (struct rte_flow_action){ 2973 .type = MLX5_RTE_FLOW_ACTION_TYPE_MARK, 2974 .conf = &ftag, 2975 }; 2976 actions[1] = (struct rte_flow_action){ 2977 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 2978 .conf = &cp_mreg, 2979 }; 2980 actions[2] = (struct rte_flow_action){ 2981 .type = RTE_FLOW_ACTION_TYPE_JUMP, 2982 .conf = &jump, 2983 }; 2984 actions[3] = (struct rte_flow_action){ 2985 .type = RTE_FLOW_ACTION_TYPE_END, 2986 }; 2987 } else { 2988 /* Default rule, wildcard match. */ 2989 attr.priority = MLX5_FLOW_PRIO_RSVD; 2990 items[0] = (struct rte_flow_item){ 2991 .type = RTE_FLOW_ITEM_TYPE_END, 2992 }; 2993 actions[0] = (struct rte_flow_action){ 2994 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 2995 .conf = &cp_mreg, 2996 }; 2997 actions[1] = (struct rte_flow_action){ 2998 .type = RTE_FLOW_ACTION_TYPE_JUMP, 2999 .conf = &jump, 3000 }; 3001 actions[2] = (struct rte_flow_action){ 3002 .type = RTE_FLOW_ACTION_TYPE_END, 3003 }; 3004 } 3005 /* Build a new entry. */ 3006 mcp_res = rte_zmalloc(__func__, sizeof(*mcp_res), 0); 3007 if (!mcp_res) { 3008 rte_errno = ENOMEM; 3009 return NULL; 3010 } 3011 /* 3012 * The copy Flows are not included in any list. There 3013 * ones are referenced from other Flows and can not 3014 * be applied, removed, deleted in ardbitrary order 3015 * by list traversing. 3016 */ 3017 mcp_res->flow = flow_list_create(dev, NULL, &attr, items, 3018 actions, false, error); 3019 if (!mcp_res->flow) 3020 goto error; 3021 mcp_res->refcnt++; 3022 mcp_res->hlist_ent.key = mark_id; 3023 ret = mlx5_hlist_insert(priv->mreg_cp_tbl, 3024 &mcp_res->hlist_ent); 3025 MLX5_ASSERT(!ret); 3026 if (ret) 3027 goto error; 3028 return mcp_res; 3029 error: 3030 if (mcp_res->flow) 3031 flow_list_destroy(dev, NULL, mcp_res->flow); 3032 rte_free(mcp_res); 3033 return NULL; 3034 } 3035 3036 /** 3037 * Release flow in RX_CP_TBL. 3038 * 3039 * @param dev 3040 * Pointer to Ethernet device. 3041 * @flow 3042 * Parent flow for wich copying is provided. 3043 */ 3044 static void 3045 flow_mreg_del_copy_action(struct rte_eth_dev *dev, 3046 struct rte_flow *flow) 3047 { 3048 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3049 struct mlx5_priv *priv = dev->data->dev_private; 3050 3051 if (!mcp_res || !priv->mreg_cp_tbl) 3052 return; 3053 if (flow->copy_applied) { 3054 MLX5_ASSERT(mcp_res->appcnt); 3055 flow->copy_applied = 0; 3056 --mcp_res->appcnt; 3057 if (!mcp_res->appcnt) 3058 flow_drv_remove(dev, mcp_res->flow); 3059 } 3060 /* 3061 * We do not check availability of metadata registers here, 3062 * because copy resources are not allocated in this case. 3063 */ 3064 if (--mcp_res->refcnt) 3065 return; 3066 MLX5_ASSERT(mcp_res->flow); 3067 flow_list_destroy(dev, NULL, mcp_res->flow); 3068 mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); 3069 rte_free(mcp_res); 3070 flow->mreg_copy = NULL; 3071 } 3072 3073 /** 3074 * Start flow in RX_CP_TBL. 3075 * 3076 * @param dev 3077 * Pointer to Ethernet device. 3078 * @flow 3079 * Parent flow for wich copying is provided. 3080 * 3081 * @return 3082 * 0 on success, a negative errno value otherwise and rte_errno is set. 3083 */ 3084 static int 3085 flow_mreg_start_copy_action(struct rte_eth_dev *dev, 3086 struct rte_flow *flow) 3087 { 3088 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3089 int ret; 3090 3091 if (!mcp_res || flow->copy_applied) 3092 return 0; 3093 if (!mcp_res->appcnt) { 3094 ret = flow_drv_apply(dev, mcp_res->flow, NULL); 3095 if (ret) 3096 return ret; 3097 } 3098 ++mcp_res->appcnt; 3099 flow->copy_applied = 1; 3100 return 0; 3101 } 3102 3103 /** 3104 * Stop flow in RX_CP_TBL. 3105 * 3106 * @param dev 3107 * Pointer to Ethernet device. 3108 * @flow 3109 * Parent flow for wich copying is provided. 3110 */ 3111 static void 3112 flow_mreg_stop_copy_action(struct rte_eth_dev *dev, 3113 struct rte_flow *flow) 3114 { 3115 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3116 3117 if (!mcp_res || !flow->copy_applied) 3118 return; 3119 MLX5_ASSERT(mcp_res->appcnt); 3120 --mcp_res->appcnt; 3121 flow->copy_applied = 0; 3122 if (!mcp_res->appcnt) 3123 flow_drv_remove(dev, mcp_res->flow); 3124 } 3125 3126 /** 3127 * Remove the default copy action from RX_CP_TBL. 3128 * 3129 * @param dev 3130 * Pointer to Ethernet device. 3131 */ 3132 static void 3133 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) 3134 { 3135 struct mlx5_flow_mreg_copy_resource *mcp_res; 3136 struct mlx5_priv *priv = dev->data->dev_private; 3137 3138 /* Check if default flow is registered. */ 3139 if (!priv->mreg_cp_tbl) 3140 return; 3141 mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, 3142 MLX5_DEFAULT_COPY_ID); 3143 if (!mcp_res) 3144 return; 3145 MLX5_ASSERT(mcp_res->flow); 3146 flow_list_destroy(dev, NULL, mcp_res->flow); 3147 mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); 3148 rte_free(mcp_res); 3149 } 3150 3151 /** 3152 * Add the default copy action in in RX_CP_TBL. 3153 * 3154 * @param dev 3155 * Pointer to Ethernet device. 3156 * @param[out] error 3157 * Perform verbose error reporting if not NULL. 3158 * 3159 * @return 3160 * 0 for success, negative value otherwise and rte_errno is set. 3161 */ 3162 static int 3163 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, 3164 struct rte_flow_error *error) 3165 { 3166 struct mlx5_priv *priv = dev->data->dev_private; 3167 struct mlx5_flow_mreg_copy_resource *mcp_res; 3168 3169 /* Check whether extensive metadata feature is engaged. */ 3170 if (!priv->config.dv_flow_en || 3171 priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3172 !mlx5_flow_ext_mreg_supported(dev) || 3173 !priv->sh->dv_regc0_mask) 3174 return 0; 3175 mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error); 3176 if (!mcp_res) 3177 return -rte_errno; 3178 return 0; 3179 } 3180 3181 /** 3182 * Add a flow of copying flow metadata registers in RX_CP_TBL. 3183 * 3184 * All the flow having Q/RSS action should be split by 3185 * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL 3186 * performs the following, 3187 * - CQE->flow_tag := reg_c[1] (MARK) 3188 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 3189 * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1] 3190 * but there should be a flow per each MARK ID set by MARK action. 3191 * 3192 * For the aforementioned reason, if there's a MARK action in flow's action 3193 * list, a corresponding flow should be added to the RX_CP_TBL in order to copy 3194 * the MARK ID to CQE's flow_tag like, 3195 * - If reg_c[1] is mark_id, 3196 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 3197 * 3198 * For SET_META action which stores value in reg_c[0], as the destination is 3199 * also a flow metadata register (reg_b), adding a default flow is enough. Zero 3200 * MARK ID means the default flow. The default flow looks like, 3201 * - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL. 3202 * 3203 * @param dev 3204 * Pointer to Ethernet device. 3205 * @param flow 3206 * Pointer to flow structure. 3207 * @param[in] actions 3208 * Pointer to the list of actions. 3209 * @param[out] error 3210 * Perform verbose error reporting if not NULL. 3211 * 3212 * @return 3213 * 0 on success, negative value otherwise and rte_errno is set. 3214 */ 3215 static int 3216 flow_mreg_update_copy_table(struct rte_eth_dev *dev, 3217 struct rte_flow *flow, 3218 const struct rte_flow_action *actions, 3219 struct rte_flow_error *error) 3220 { 3221 struct mlx5_priv *priv = dev->data->dev_private; 3222 struct mlx5_dev_config *config = &priv->config; 3223 struct mlx5_flow_mreg_copy_resource *mcp_res; 3224 const struct rte_flow_action_mark *mark; 3225 3226 /* Check whether extensive metadata feature is engaged. */ 3227 if (!config->dv_flow_en || 3228 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3229 !mlx5_flow_ext_mreg_supported(dev) || 3230 !priv->sh->dv_regc0_mask) 3231 return 0; 3232 /* Find MARK action. */ 3233 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3234 switch (actions->type) { 3235 case RTE_FLOW_ACTION_TYPE_FLAG: 3236 mcp_res = flow_mreg_add_copy_action 3237 (dev, MLX5_FLOW_MARK_DEFAULT, error); 3238 if (!mcp_res) 3239 return -rte_errno; 3240 flow->mreg_copy = mcp_res; 3241 if (dev->data->dev_started) { 3242 mcp_res->appcnt++; 3243 flow->copy_applied = 1; 3244 } 3245 return 0; 3246 case RTE_FLOW_ACTION_TYPE_MARK: 3247 mark = (const struct rte_flow_action_mark *) 3248 actions->conf; 3249 mcp_res = 3250 flow_mreg_add_copy_action(dev, mark->id, error); 3251 if (!mcp_res) 3252 return -rte_errno; 3253 flow->mreg_copy = mcp_res; 3254 if (dev->data->dev_started) { 3255 mcp_res->appcnt++; 3256 flow->copy_applied = 1; 3257 } 3258 return 0; 3259 default: 3260 break; 3261 } 3262 } 3263 return 0; 3264 } 3265 3266 #define MLX5_MAX_SPLIT_ACTIONS 24 3267 #define MLX5_MAX_SPLIT_ITEMS 24 3268 3269 /** 3270 * Split the hairpin flow. 3271 * Since HW can't support encap on Rx we move the encap to Tx. 3272 * If the count action is after the encap then we also 3273 * move the count action. in this case the count will also measure 3274 * the outer bytes. 3275 * 3276 * @param dev 3277 * Pointer to Ethernet device. 3278 * @param[in] actions 3279 * Associated actions (list terminated by the END action). 3280 * @param[out] actions_rx 3281 * Rx flow actions. 3282 * @param[out] actions_tx 3283 * Tx flow actions.. 3284 * @param[out] pattern_tx 3285 * The pattern items for the Tx flow. 3286 * @param[out] flow_id 3287 * The flow ID connected to this flow. 3288 * 3289 * @return 3290 * 0 on success. 3291 */ 3292 static int 3293 flow_hairpin_split(struct rte_eth_dev *dev, 3294 const struct rte_flow_action actions[], 3295 struct rte_flow_action actions_rx[], 3296 struct rte_flow_action actions_tx[], 3297 struct rte_flow_item pattern_tx[], 3298 uint32_t *flow_id) 3299 { 3300 struct mlx5_priv *priv = dev->data->dev_private; 3301 const struct rte_flow_action_raw_encap *raw_encap; 3302 const struct rte_flow_action_raw_decap *raw_decap; 3303 struct mlx5_rte_flow_action_set_tag *set_tag; 3304 struct rte_flow_action *tag_action; 3305 struct mlx5_rte_flow_item_tag *tag_item; 3306 struct rte_flow_item *item; 3307 char *addr; 3308 int encap = 0; 3309 3310 mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id); 3311 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3312 switch (actions->type) { 3313 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 3314 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 3315 rte_memcpy(actions_tx, actions, 3316 sizeof(struct rte_flow_action)); 3317 actions_tx++; 3318 break; 3319 case RTE_FLOW_ACTION_TYPE_COUNT: 3320 if (encap) { 3321 rte_memcpy(actions_tx, actions, 3322 sizeof(struct rte_flow_action)); 3323 actions_tx++; 3324 } else { 3325 rte_memcpy(actions_rx, actions, 3326 sizeof(struct rte_flow_action)); 3327 actions_rx++; 3328 } 3329 break; 3330 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 3331 raw_encap = actions->conf; 3332 if (raw_encap->size > 3333 (sizeof(struct rte_flow_item_eth) + 3334 sizeof(struct rte_flow_item_ipv4))) { 3335 memcpy(actions_tx, actions, 3336 sizeof(struct rte_flow_action)); 3337 actions_tx++; 3338 encap = 1; 3339 } else { 3340 rte_memcpy(actions_rx, actions, 3341 sizeof(struct rte_flow_action)); 3342 actions_rx++; 3343 } 3344 break; 3345 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 3346 raw_decap = actions->conf; 3347 if (raw_decap->size < 3348 (sizeof(struct rte_flow_item_eth) + 3349 sizeof(struct rte_flow_item_ipv4))) { 3350 memcpy(actions_tx, actions, 3351 sizeof(struct rte_flow_action)); 3352 actions_tx++; 3353 } else { 3354 rte_memcpy(actions_rx, actions, 3355 sizeof(struct rte_flow_action)); 3356 actions_rx++; 3357 } 3358 break; 3359 default: 3360 rte_memcpy(actions_rx, actions, 3361 sizeof(struct rte_flow_action)); 3362 actions_rx++; 3363 break; 3364 } 3365 } 3366 /* Add set meta action and end action for the Rx flow. */ 3367 tag_action = actions_rx; 3368 tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3369 actions_rx++; 3370 rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action)); 3371 actions_rx++; 3372 set_tag = (void *)actions_rx; 3373 set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL); 3374 MLX5_ASSERT(set_tag->id > REG_NONE); 3375 set_tag->data = *flow_id; 3376 tag_action->conf = set_tag; 3377 /* Create Tx item list. */ 3378 rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); 3379 addr = (void *)&pattern_tx[2]; 3380 item = pattern_tx; 3381 item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; 3382 tag_item = (void *)addr; 3383 tag_item->data = *flow_id; 3384 tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); 3385 MLX5_ASSERT(set_tag->id > REG_NONE); 3386 item->spec = tag_item; 3387 addr += sizeof(struct mlx5_rte_flow_item_tag); 3388 tag_item = (void *)addr; 3389 tag_item->data = UINT32_MAX; 3390 tag_item->id = UINT16_MAX; 3391 item->mask = tag_item; 3392 addr += sizeof(struct mlx5_rte_flow_item_tag); 3393 item->last = NULL; 3394 item++; 3395 item->type = RTE_FLOW_ITEM_TYPE_END; 3396 return 0; 3397 } 3398 3399 /** 3400 * The last stage of splitting chain, just creates the subflow 3401 * without any modification. 3402 * 3403 * @param dev 3404 * Pointer to Ethernet device. 3405 * @param[in] flow 3406 * Parent flow structure pointer. 3407 * @param[in, out] sub_flow 3408 * Pointer to return the created subflow, may be NULL. 3409 * @param[in] attr 3410 * Flow rule attributes. 3411 * @param[in] items 3412 * Pattern specification (list terminated by the END pattern item). 3413 * @param[in] actions 3414 * Associated actions (list terminated by the END action). 3415 * @param[in] external 3416 * This flow rule is created by request external to PMD. 3417 * @param[out] error 3418 * Perform verbose error reporting if not NULL. 3419 * @return 3420 * 0 on success, negative value otherwise 3421 */ 3422 static int 3423 flow_create_split_inner(struct rte_eth_dev *dev, 3424 struct rte_flow *flow, 3425 struct mlx5_flow **sub_flow, 3426 const struct rte_flow_attr *attr, 3427 const struct rte_flow_item items[], 3428 const struct rte_flow_action actions[], 3429 bool external, struct rte_flow_error *error) 3430 { 3431 struct mlx5_flow *dev_flow; 3432 3433 dev_flow = flow_drv_prepare(flow, attr, items, actions, error); 3434 if (!dev_flow) 3435 return -rte_errno; 3436 dev_flow->flow = flow; 3437 dev_flow->external = external; 3438 /* Subflow object was created, we must include one in the list. */ 3439 LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); 3440 if (sub_flow) 3441 *sub_flow = dev_flow; 3442 return flow_drv_translate(dev, dev_flow, attr, items, actions, error); 3443 } 3444 3445 /** 3446 * Split the meter flow. 3447 * 3448 * As meter flow will split to three sub flow, other than meter 3449 * action, the other actions make sense to only meter accepts 3450 * the packet. If it need to be dropped, no other additional 3451 * actions should be take. 3452 * 3453 * One kind of special action which decapsulates the L3 tunnel 3454 * header will be in the prefix sub flow, as not to take the 3455 * L3 tunnel header into account. 3456 * 3457 * @param dev 3458 * Pointer to Ethernet device. 3459 * @param[in] actions 3460 * Associated actions (list terminated by the END action). 3461 * @param[out] actions_sfx 3462 * Suffix flow actions. 3463 * @param[out] actions_pre 3464 * Prefix flow actions. 3465 * @param[out] pattern_sfx 3466 * The pattern items for the suffix flow. 3467 * @param[out] tag_sfx 3468 * Pointer to suffix flow tag. 3469 * 3470 * @return 3471 * 0 on success. 3472 */ 3473 static int 3474 flow_meter_split_prep(struct rte_eth_dev *dev, 3475 const struct rte_flow_action actions[], 3476 struct rte_flow_action actions_sfx[], 3477 struct rte_flow_action actions_pre[]) 3478 { 3479 struct rte_flow_action *tag_action = NULL; 3480 struct mlx5_rte_flow_action_set_tag *set_tag; 3481 struct rte_flow_error error; 3482 const struct rte_flow_action_raw_encap *raw_encap; 3483 const struct rte_flow_action_raw_decap *raw_decap; 3484 uint32_t tag_id; 3485 3486 /* Prepare the actions for prefix and suffix flow. */ 3487 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3488 switch (actions->type) { 3489 case RTE_FLOW_ACTION_TYPE_METER: 3490 /* Add the extra tag action first. */ 3491 tag_action = actions_pre; 3492 tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3493 actions_pre++; 3494 memcpy(actions_pre, actions, 3495 sizeof(struct rte_flow_action)); 3496 actions_pre++; 3497 break; 3498 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 3499 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 3500 memcpy(actions_pre, actions, 3501 sizeof(struct rte_flow_action)); 3502 actions_pre++; 3503 break; 3504 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 3505 raw_encap = actions->conf; 3506 if (raw_encap->size > 3507 (sizeof(struct rte_flow_item_eth) + 3508 sizeof(struct rte_flow_item_ipv4))) { 3509 memcpy(actions_sfx, actions, 3510 sizeof(struct rte_flow_action)); 3511 actions_sfx++; 3512 } else { 3513 rte_memcpy(actions_pre, actions, 3514 sizeof(struct rte_flow_action)); 3515 actions_pre++; 3516 } 3517 break; 3518 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 3519 raw_decap = actions->conf; 3520 /* Size 0 decap means 50 bytes as vxlan decap. */ 3521 if (raw_decap->size && (raw_decap->size < 3522 (sizeof(struct rte_flow_item_eth) + 3523 sizeof(struct rte_flow_item_ipv4)))) { 3524 memcpy(actions_sfx, actions, 3525 sizeof(struct rte_flow_action)); 3526 actions_sfx++; 3527 } else { 3528 rte_memcpy(actions_pre, actions, 3529 sizeof(struct rte_flow_action)); 3530 actions_pre++; 3531 } 3532 break; 3533 default: 3534 memcpy(actions_sfx, actions, 3535 sizeof(struct rte_flow_action)); 3536 actions_sfx++; 3537 break; 3538 } 3539 } 3540 /* Add end action to the actions. */ 3541 actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; 3542 actions_pre->type = RTE_FLOW_ACTION_TYPE_END; 3543 actions_pre++; 3544 /* Set the tag. */ 3545 set_tag = (void *)actions_pre; 3546 set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error); 3547 /* 3548 * Get the id from the qrss_pool to make qrss share the id with meter. 3549 */ 3550 tag_id = flow_qrss_get_id(dev); 3551 set_tag->data = tag_id << MLX5_MTR_COLOR_BITS; 3552 assert(tag_action); 3553 tag_action->conf = set_tag; 3554 return tag_id; 3555 } 3556 3557 /** 3558 * Split action list having QUEUE/RSS for metadata register copy. 3559 * 3560 * Once Q/RSS action is detected in user's action list, the flow action 3561 * should be split in order to copy metadata registers, which will happen in 3562 * RX_CP_TBL like, 3563 * - CQE->flow_tag := reg_c[1] (MARK) 3564 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 3565 * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL. 3566 * This is because the last action of each flow must be a terminal action 3567 * (QUEUE, RSS or DROP). 3568 * 3569 * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is 3570 * stored and kept in the mlx5_flow structure per each sub_flow. 3571 * 3572 * The Q/RSS action is replaced with, 3573 * - SET_TAG, setting the allocated flow ID to reg_c[2]. 3574 * And the following JUMP action is added at the end, 3575 * - JUMP, to RX_CP_TBL. 3576 * 3577 * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by 3578 * flow_create_split_metadata() routine. The flow will look like, 3579 * - If flow ID matches (reg_c[2]), perform Q/RSS. 3580 * 3581 * @param dev 3582 * Pointer to Ethernet device. 3583 * @param[out] split_actions 3584 * Pointer to store split actions to jump to CP_TBL. 3585 * @param[in] actions 3586 * Pointer to the list of original flow actions. 3587 * @param[in] qrss 3588 * Pointer to the Q/RSS action. 3589 * @param[in] actions_n 3590 * Number of original actions. 3591 * @param[out] error 3592 * Perform verbose error reporting if not NULL. 3593 * 3594 * @return 3595 * non-zero unique flow_id on success, otherwise 0 and 3596 * error/rte_error are set. 3597 */ 3598 static uint32_t 3599 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, 3600 struct rte_flow_action *split_actions, 3601 const struct rte_flow_action *actions, 3602 const struct rte_flow_action *qrss, 3603 int actions_n, struct rte_flow_error *error) 3604 { 3605 struct mlx5_rte_flow_action_set_tag *set_tag; 3606 struct rte_flow_action_jump *jump; 3607 const int qrss_idx = qrss - actions; 3608 uint32_t flow_id = 0; 3609 int ret = 0; 3610 3611 /* 3612 * Given actions will be split 3613 * - Replace QUEUE/RSS action with SET_TAG to set flow ID. 3614 * - Add jump to mreg CP_TBL. 3615 * As a result, there will be one more action. 3616 */ 3617 ++actions_n; 3618 memcpy(split_actions, actions, sizeof(*split_actions) * actions_n); 3619 set_tag = (void *)(split_actions + actions_n); 3620 /* 3621 * If tag action is not set to void(it means we are not the meter 3622 * suffix flow), add the tag action. Since meter suffix flow already 3623 * has the tag added. 3624 */ 3625 if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) { 3626 /* 3627 * Allocate the new subflow ID. This one is unique within 3628 * device and not shared with representors. Otherwise, 3629 * we would have to resolve multi-thread access synch 3630 * issue. Each flow on the shared device is appended 3631 * with source vport identifier, so the resulting 3632 * flows will be unique in the shared (by master and 3633 * representors) domain even if they have coinciding 3634 * IDs. 3635 */ 3636 flow_id = flow_qrss_get_id(dev); 3637 if (!flow_id) 3638 return rte_flow_error_set(error, ENOMEM, 3639 RTE_FLOW_ERROR_TYPE_ACTION, 3640 NULL, "can't allocate id " 3641 "for split Q/RSS subflow"); 3642 /* Internal SET_TAG action to set flow ID. */ 3643 *set_tag = (struct mlx5_rte_flow_action_set_tag){ 3644 .data = flow_id, 3645 }; 3646 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); 3647 if (ret < 0) 3648 return ret; 3649 set_tag->id = ret; 3650 /* Construct new actions array. */ 3651 /* Replace QUEUE/RSS action. */ 3652 split_actions[qrss_idx] = (struct rte_flow_action){ 3653 .type = MLX5_RTE_FLOW_ACTION_TYPE_TAG, 3654 .conf = set_tag, 3655 }; 3656 } 3657 /* JUMP action to jump to mreg copy table (CP_TBL). */ 3658 jump = (void *)(set_tag + 1); 3659 *jump = (struct rte_flow_action_jump){ 3660 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 3661 }; 3662 split_actions[actions_n - 2] = (struct rte_flow_action){ 3663 .type = RTE_FLOW_ACTION_TYPE_JUMP, 3664 .conf = jump, 3665 }; 3666 split_actions[actions_n - 1] = (struct rte_flow_action){ 3667 .type = RTE_FLOW_ACTION_TYPE_END, 3668 }; 3669 return flow_id; 3670 } 3671 3672 /** 3673 * Extend the given action list for Tx metadata copy. 3674 * 3675 * Copy the given action list to the ext_actions and add flow metadata register 3676 * copy action in order to copy reg_a set by WQE to reg_c[0]. 3677 * 3678 * @param[out] ext_actions 3679 * Pointer to the extended action list. 3680 * @param[in] actions 3681 * Pointer to the list of actions. 3682 * @param[in] actions_n 3683 * Number of actions in the list. 3684 * @param[out] error 3685 * Perform verbose error reporting if not NULL. 3686 * 3687 * @return 3688 * 0 on success, negative value otherwise 3689 */ 3690 static int 3691 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, 3692 struct rte_flow_action *ext_actions, 3693 const struct rte_flow_action *actions, 3694 int actions_n, struct rte_flow_error *error) 3695 { 3696 struct mlx5_flow_action_copy_mreg *cp_mreg = 3697 (struct mlx5_flow_action_copy_mreg *) 3698 (ext_actions + actions_n + 1); 3699 int ret; 3700 3701 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 3702 if (ret < 0) 3703 return ret; 3704 cp_mreg->dst = ret; 3705 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error); 3706 if (ret < 0) 3707 return ret; 3708 cp_mreg->src = ret; 3709 memcpy(ext_actions, actions, 3710 sizeof(*ext_actions) * actions_n); 3711 ext_actions[actions_n - 1] = (struct rte_flow_action){ 3712 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 3713 .conf = cp_mreg, 3714 }; 3715 ext_actions[actions_n] = (struct rte_flow_action){ 3716 .type = RTE_FLOW_ACTION_TYPE_END, 3717 }; 3718 return 0; 3719 } 3720 3721 /** 3722 * The splitting for metadata feature. 3723 * 3724 * - Q/RSS action on NIC Rx should be split in order to pass by 3725 * the mreg copy table (RX_CP_TBL) and then it jumps to the 3726 * action table (RX_ACT_TBL) which has the split Q/RSS action. 3727 * 3728 * - All the actions on NIC Tx should have a mreg copy action to 3729 * copy reg_a from WQE to reg_c[0]. 3730 * 3731 * @param dev 3732 * Pointer to Ethernet device. 3733 * @param[in] flow 3734 * Parent flow structure pointer. 3735 * @param[in] attr 3736 * Flow rule attributes. 3737 * @param[in] items 3738 * Pattern specification (list terminated by the END pattern item). 3739 * @param[in] actions 3740 * Associated actions (list terminated by the END action). 3741 * @param[in] external 3742 * This flow rule is created by request external to PMD. 3743 * @param[out] error 3744 * Perform verbose error reporting if not NULL. 3745 * @return 3746 * 0 on success, negative value otherwise 3747 */ 3748 static int 3749 flow_create_split_metadata(struct rte_eth_dev *dev, 3750 struct rte_flow *flow, 3751 const struct rte_flow_attr *attr, 3752 const struct rte_flow_item items[], 3753 const struct rte_flow_action actions[], 3754 bool external, struct rte_flow_error *error) 3755 { 3756 struct mlx5_priv *priv = dev->data->dev_private; 3757 struct mlx5_dev_config *config = &priv->config; 3758 const struct rte_flow_action *qrss = NULL; 3759 struct rte_flow_action *ext_actions = NULL; 3760 struct mlx5_flow *dev_flow = NULL; 3761 uint32_t qrss_id = 0; 3762 int mtr_sfx = 0; 3763 size_t act_size; 3764 int actions_n; 3765 int ret; 3766 3767 /* Check whether extensive metadata feature is engaged. */ 3768 if (!config->dv_flow_en || 3769 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3770 !mlx5_flow_ext_mreg_supported(dev)) 3771 return flow_create_split_inner(dev, flow, NULL, attr, items, 3772 actions, external, error); 3773 actions_n = flow_parse_qrss_action(actions, &qrss); 3774 if (qrss) { 3775 /* Exclude hairpin flows from splitting. */ 3776 if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) { 3777 const struct rte_flow_action_queue *queue; 3778 3779 queue = qrss->conf; 3780 if (mlx5_rxq_get_type(dev, queue->index) == 3781 MLX5_RXQ_TYPE_HAIRPIN) 3782 qrss = NULL; 3783 } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) { 3784 const struct rte_flow_action_rss *rss; 3785 3786 rss = qrss->conf; 3787 if (mlx5_rxq_get_type(dev, rss->queue[0]) == 3788 MLX5_RXQ_TYPE_HAIRPIN) 3789 qrss = NULL; 3790 } 3791 } 3792 if (qrss) { 3793 /* Check if it is in meter suffix table. */ 3794 mtr_sfx = attr->group == (attr->transfer ? 3795 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : 3796 MLX5_FLOW_TABLE_LEVEL_SUFFIX); 3797 /* 3798 * Q/RSS action on NIC Rx should be split in order to pass by 3799 * the mreg copy table (RX_CP_TBL) and then it jumps to the 3800 * action table (RX_ACT_TBL) which has the split Q/RSS action. 3801 */ 3802 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 3803 sizeof(struct rte_flow_action_set_tag) + 3804 sizeof(struct rte_flow_action_jump); 3805 ext_actions = rte_zmalloc(__func__, act_size, 0); 3806 if (!ext_actions) 3807 return rte_flow_error_set(error, ENOMEM, 3808 RTE_FLOW_ERROR_TYPE_ACTION, 3809 NULL, "no memory to split " 3810 "metadata flow"); 3811 /* 3812 * If we are the suffix flow of meter, tag already exist. 3813 * Set the tag action to void. 3814 */ 3815 if (mtr_sfx) 3816 ext_actions[qrss - actions].type = 3817 RTE_FLOW_ACTION_TYPE_VOID; 3818 else 3819 ext_actions[qrss - actions].type = 3820 MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3821 /* 3822 * Create the new actions list with removed Q/RSS action 3823 * and appended set tag and jump to register copy table 3824 * (RX_CP_TBL). We should preallocate unique tag ID here 3825 * in advance, because it is needed for set tag action. 3826 */ 3827 qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions, 3828 qrss, actions_n, error); 3829 if (!mtr_sfx && !qrss_id) { 3830 ret = -rte_errno; 3831 goto exit; 3832 } 3833 } else if (attr->egress && !attr->transfer) { 3834 /* 3835 * All the actions on NIC Tx should have a metadata register 3836 * copy action to copy reg_a from WQE to reg_c[meta] 3837 */ 3838 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 3839 sizeof(struct mlx5_flow_action_copy_mreg); 3840 ext_actions = rte_zmalloc(__func__, act_size, 0); 3841 if (!ext_actions) 3842 return rte_flow_error_set(error, ENOMEM, 3843 RTE_FLOW_ERROR_TYPE_ACTION, 3844 NULL, "no memory to split " 3845 "metadata flow"); 3846 /* Create the action list appended with copy register. */ 3847 ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions, 3848 actions_n, error); 3849 if (ret < 0) 3850 goto exit; 3851 } 3852 /* Add the unmodified original or prefix subflow. */ 3853 ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items, 3854 ext_actions ? ext_actions : actions, 3855 external, error); 3856 if (ret < 0) 3857 goto exit; 3858 MLX5_ASSERT(dev_flow); 3859 if (qrss) { 3860 const struct rte_flow_attr q_attr = { 3861 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 3862 .ingress = 1, 3863 }; 3864 /* Internal PMD action to set register. */ 3865 struct mlx5_rte_flow_item_tag q_tag_spec = { 3866 .data = qrss_id, 3867 .id = 0, 3868 }; 3869 struct rte_flow_item q_items[] = { 3870 { 3871 .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, 3872 .spec = &q_tag_spec, 3873 .last = NULL, 3874 .mask = NULL, 3875 }, 3876 { 3877 .type = RTE_FLOW_ITEM_TYPE_END, 3878 }, 3879 }; 3880 struct rte_flow_action q_actions[] = { 3881 { 3882 .type = qrss->type, 3883 .conf = qrss->conf, 3884 }, 3885 { 3886 .type = RTE_FLOW_ACTION_TYPE_END, 3887 }, 3888 }; 3889 uint64_t hash_fields = dev_flow->hash_fields; 3890 3891 /* 3892 * Configure the tag item only if there is no meter subflow. 3893 * Since tag is already marked in the meter suffix subflow 3894 * we can just use the meter suffix items as is. 3895 */ 3896 if (qrss_id) { 3897 /* Not meter subflow. */ 3898 MLX5_ASSERT(!mtr_sfx); 3899 /* 3900 * Put unique id in prefix flow due to it is destroyed 3901 * after suffix flow and id will be freed after there 3902 * is no actual flows with this id and identifier 3903 * reallocation becomes possible (for example, for 3904 * other flows in other threads). 3905 */ 3906 dev_flow->qrss_id = qrss_id; 3907 qrss_id = 0; 3908 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, 3909 error); 3910 if (ret < 0) 3911 goto exit; 3912 q_tag_spec.id = ret; 3913 } 3914 dev_flow = NULL; 3915 /* Add suffix subflow to execute Q/RSS. */ 3916 ret = flow_create_split_inner(dev, flow, &dev_flow, 3917 &q_attr, mtr_sfx ? items : 3918 q_items, q_actions, 3919 external, error); 3920 if (ret < 0) 3921 goto exit; 3922 MLX5_ASSERT(dev_flow); 3923 dev_flow->hash_fields = hash_fields; 3924 } 3925 3926 exit: 3927 /* 3928 * We do not destroy the partially created sub_flows in case of error. 3929 * These ones are included into parent flow list and will be destroyed 3930 * by flow_drv_destroy. 3931 */ 3932 flow_qrss_free_id(dev, qrss_id); 3933 rte_free(ext_actions); 3934 return ret; 3935 } 3936 3937 /** 3938 * The splitting for meter feature. 3939 * 3940 * - The meter flow will be split to two flows as prefix and 3941 * suffix flow. The packets make sense only it pass the prefix 3942 * meter action. 3943 * 3944 * - Reg_C_5 is used for the packet to match betweend prefix and 3945 * suffix flow. 3946 * 3947 * @param dev 3948 * Pointer to Ethernet device. 3949 * @param[in] flow 3950 * Parent flow structure pointer. 3951 * @param[in] attr 3952 * Flow rule attributes. 3953 * @param[in] items 3954 * Pattern specification (list terminated by the END pattern item). 3955 * @param[in] actions 3956 * Associated actions (list terminated by the END action). 3957 * @param[in] external 3958 * This flow rule is created by request external to PMD. 3959 * @param[out] error 3960 * Perform verbose error reporting if not NULL. 3961 * @return 3962 * 0 on success, negative value otherwise 3963 */ 3964 static int 3965 flow_create_split_meter(struct rte_eth_dev *dev, 3966 struct rte_flow *flow, 3967 const struct rte_flow_attr *attr, 3968 const struct rte_flow_item items[], 3969 const struct rte_flow_action actions[], 3970 bool external, struct rte_flow_error *error) 3971 { 3972 struct mlx5_priv *priv = dev->data->dev_private; 3973 struct rte_flow_action *sfx_actions = NULL; 3974 struct rte_flow_action *pre_actions = NULL; 3975 struct rte_flow_item *sfx_items = NULL; 3976 const struct rte_flow_item *sfx_port_id_item; 3977 struct mlx5_flow *dev_flow = NULL; 3978 struct rte_flow_attr sfx_attr = *attr; 3979 uint32_t mtr = 0; 3980 uint32_t mtr_tag_id = 0; 3981 size_t act_size; 3982 size_t item_size; 3983 int actions_n = 0; 3984 int ret; 3985 3986 if (priv->mtr_en) 3987 actions_n = flow_check_meter_action(actions, &mtr); 3988 if (mtr) { 3989 struct mlx5_rte_flow_item_tag *tag_spec; 3990 struct mlx5_rte_flow_item_tag *tag_mask; 3991 /* The five prefix actions: meter, decap, encap, tag, end. */ 3992 act_size = sizeof(struct rte_flow_action) * (actions_n + 5) + 3993 sizeof(struct rte_flow_action_set_tag); 3994 /* tag, end. */ 3995 #define METER_SUFFIX_ITEM 3 3996 item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM + 3997 sizeof(struct mlx5_rte_flow_item_tag) * 2; 3998 sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0); 3999 if (!sfx_actions) 4000 return rte_flow_error_set(error, ENOMEM, 4001 RTE_FLOW_ERROR_TYPE_ACTION, 4002 NULL, "no memory to split " 4003 "meter flow"); 4004 pre_actions = sfx_actions + actions_n; 4005 mtr_tag_id = flow_meter_split_prep(dev, actions, sfx_actions, 4006 pre_actions); 4007 if (!mtr_tag_id) { 4008 ret = -rte_errno; 4009 goto exit; 4010 } 4011 /* Add the prefix subflow. */ 4012 ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items, 4013 pre_actions, external, error); 4014 if (ret) { 4015 ret = -rte_errno; 4016 goto exit; 4017 } 4018 dev_flow->mtr_flow_id = mtr_tag_id; 4019 /* Prepare the suffix flow match pattern. */ 4020 sfx_items = (struct rte_flow_item *)((char *)sfx_actions + 4021 act_size); 4022 tag_spec = (struct mlx5_rte_flow_item_tag *)(sfx_items + 4023 METER_SUFFIX_ITEM); 4024 tag_spec->data = dev_flow->mtr_flow_id << MLX5_MTR_COLOR_BITS; 4025 tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, 4026 error); 4027 tag_mask = tag_spec + 1; 4028 tag_mask->data = 0xffffff00; 4029 sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; 4030 sfx_items->spec = tag_spec; 4031 sfx_items->last = NULL; 4032 sfx_items->mask = tag_mask; 4033 sfx_items++; 4034 sfx_port_id_item = find_port_id_item(items); 4035 if (sfx_port_id_item) { 4036 memcpy(sfx_items, sfx_port_id_item, 4037 sizeof(*sfx_items)); 4038 sfx_items++; 4039 } 4040 sfx_items->type = RTE_FLOW_ITEM_TYPE_END; 4041 sfx_items -= sfx_port_id_item ? 2 : 1; 4042 /* Setting the sfx group atrr. */ 4043 sfx_attr.group = sfx_attr.transfer ? 4044 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : 4045 MLX5_FLOW_TABLE_LEVEL_SUFFIX; 4046 } 4047 /* Add the prefix subflow. */ 4048 ret = flow_create_split_metadata(dev, flow, &sfx_attr, 4049 sfx_items ? sfx_items : items, 4050 sfx_actions ? sfx_actions : actions, 4051 external, error); 4052 exit: 4053 if (sfx_actions) 4054 rte_free(sfx_actions); 4055 return ret; 4056 } 4057 4058 /** 4059 * Split the flow to subflow set. The splitters might be linked 4060 * in the chain, like this: 4061 * flow_create_split_outer() calls: 4062 * flow_create_split_meter() calls: 4063 * flow_create_split_metadata(meter_subflow_0) calls: 4064 * flow_create_split_inner(metadata_subflow_0) 4065 * flow_create_split_inner(metadata_subflow_1) 4066 * flow_create_split_inner(metadata_subflow_2) 4067 * flow_create_split_metadata(meter_subflow_1) calls: 4068 * flow_create_split_inner(metadata_subflow_0) 4069 * flow_create_split_inner(metadata_subflow_1) 4070 * flow_create_split_inner(metadata_subflow_2) 4071 * 4072 * This provide flexible way to add new levels of flow splitting. 4073 * The all of successfully created subflows are included to the 4074 * parent flow dev_flow list. 4075 * 4076 * @param dev 4077 * Pointer to Ethernet device. 4078 * @param[in] flow 4079 * Parent flow structure pointer. 4080 * @param[in] attr 4081 * Flow rule attributes. 4082 * @param[in] items 4083 * Pattern specification (list terminated by the END pattern item). 4084 * @param[in] actions 4085 * Associated actions (list terminated by the END action). 4086 * @param[in] external 4087 * This flow rule is created by request external to PMD. 4088 * @param[out] error 4089 * Perform verbose error reporting if not NULL. 4090 * @return 4091 * 0 on success, negative value otherwise 4092 */ 4093 static int 4094 flow_create_split_outer(struct rte_eth_dev *dev, 4095 struct rte_flow *flow, 4096 const struct rte_flow_attr *attr, 4097 const struct rte_flow_item items[], 4098 const struct rte_flow_action actions[], 4099 bool external, struct rte_flow_error *error) 4100 { 4101 int ret; 4102 4103 ret = flow_create_split_meter(dev, flow, attr, items, 4104 actions, external, error); 4105 MLX5_ASSERT(ret <= 0); 4106 return ret; 4107 } 4108 4109 /** 4110 * Create a flow and add it to @p list. 4111 * 4112 * @param dev 4113 * Pointer to Ethernet device. 4114 * @param list 4115 * Pointer to a TAILQ flow list. If this parameter NULL, 4116 * no list insertion occurred, flow is just created, 4117 * this is caller's responsibility to track the 4118 * created flow. 4119 * @param[in] attr 4120 * Flow rule attributes. 4121 * @param[in] items 4122 * Pattern specification (list terminated by the END pattern item). 4123 * @param[in] actions 4124 * Associated actions (list terminated by the END action). 4125 * @param[in] external 4126 * This flow rule is created by request external to PMD. 4127 * @param[out] error 4128 * Perform verbose error reporting if not NULL. 4129 * 4130 * @return 4131 * A flow on success, NULL otherwise and rte_errno is set. 4132 */ 4133 static struct rte_flow * 4134 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, 4135 const struct rte_flow_attr *attr, 4136 const struct rte_flow_item items[], 4137 const struct rte_flow_action actions[], 4138 bool external, struct rte_flow_error *error) 4139 { 4140 struct mlx5_priv *priv = dev->data->dev_private; 4141 struct rte_flow *flow = NULL; 4142 struct mlx5_flow *dev_flow; 4143 const struct rte_flow_action_rss *rss; 4144 union { 4145 struct rte_flow_expand_rss buf; 4146 uint8_t buffer[2048]; 4147 } expand_buffer; 4148 union { 4149 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 4150 uint8_t buffer[2048]; 4151 } actions_rx; 4152 union { 4153 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 4154 uint8_t buffer[2048]; 4155 } actions_hairpin_tx; 4156 union { 4157 struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS]; 4158 uint8_t buffer[2048]; 4159 } items_tx; 4160 struct rte_flow_expand_rss *buf = &expand_buffer.buf; 4161 const struct rte_flow_action *p_actions_rx = actions; 4162 uint32_t i; 4163 uint32_t flow_size; 4164 int hairpin_flow = 0; 4165 uint32_t hairpin_id = 0; 4166 struct rte_flow_attr attr_tx = { .priority = 0 }; 4167 int ret = flow_drv_validate(dev, attr, items, p_actions_rx, external, 4168 error); 4169 4170 if (ret < 0) 4171 return NULL; 4172 hairpin_flow = flow_check_hairpin_split(dev, attr, actions); 4173 if (hairpin_flow > 0) { 4174 if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { 4175 rte_errno = EINVAL; 4176 return NULL; 4177 } 4178 flow_hairpin_split(dev, actions, actions_rx.actions, 4179 actions_hairpin_tx.actions, items_tx.items, 4180 &hairpin_id); 4181 p_actions_rx = actions_rx.actions; 4182 } 4183 flow_size = sizeof(struct rte_flow); 4184 rss = flow_get_rss_action(p_actions_rx); 4185 if (rss) 4186 flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), 4187 sizeof(void *)); 4188 else 4189 flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); 4190 flow = rte_calloc(__func__, 1, flow_size, 0); 4191 if (!flow) { 4192 rte_errno = ENOMEM; 4193 goto error_before_flow; 4194 } 4195 flow->drv_type = flow_get_drv_type(dev, attr); 4196 if (hairpin_id != 0) 4197 flow->hairpin_flow_id = hairpin_id; 4198 MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && 4199 flow->drv_type < MLX5_FLOW_TYPE_MAX); 4200 flow->rss.queue = (void *)(flow + 1); 4201 if (rss) { 4202 /* 4203 * The following information is required by 4204 * mlx5_flow_hashfields_adjust() in advance. 4205 */ 4206 flow->rss.level = rss->level; 4207 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ 4208 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; 4209 } 4210 LIST_INIT(&flow->dev_flows); 4211 if (rss && rss->types) { 4212 unsigned int graph_root; 4213 4214 graph_root = find_graph_root(items, rss->level); 4215 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), 4216 items, rss->types, 4217 mlx5_support_expansion, 4218 graph_root); 4219 MLX5_ASSERT(ret > 0 && 4220 (unsigned int)ret < sizeof(expand_buffer.buffer)); 4221 } else { 4222 buf->entries = 1; 4223 buf->entry[0].pattern = (void *)(uintptr_t)items; 4224 } 4225 for (i = 0; i < buf->entries; ++i) { 4226 /* 4227 * The splitter may create multiple dev_flows, 4228 * depending on configuration. In the simplest 4229 * case it just creates unmodified original flow. 4230 */ 4231 ret = flow_create_split_outer(dev, flow, attr, 4232 buf->entry[i].pattern, 4233 p_actions_rx, external, 4234 error); 4235 if (ret < 0) 4236 goto error; 4237 } 4238 /* Create the tx flow. */ 4239 if (hairpin_flow) { 4240 attr_tx.group = MLX5_HAIRPIN_TX_TABLE; 4241 attr_tx.ingress = 0; 4242 attr_tx.egress = 1; 4243 dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items, 4244 actions_hairpin_tx.actions, error); 4245 if (!dev_flow) 4246 goto error; 4247 dev_flow->flow = flow; 4248 dev_flow->external = 0; 4249 LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); 4250 ret = flow_drv_translate(dev, dev_flow, &attr_tx, 4251 items_tx.items, 4252 actions_hairpin_tx.actions, error); 4253 if (ret < 0) 4254 goto error; 4255 } 4256 /* 4257 * Update the metadata register copy table. If extensive 4258 * metadata feature is enabled and registers are supported 4259 * we might create the extra rte_flow for each unique 4260 * MARK/FLAG action ID. 4261 * 4262 * The table is updated for ingress Flows only, because 4263 * the egress Flows belong to the different device and 4264 * copy table should be updated in peer NIC Rx domain. 4265 */ 4266 if (attr->ingress && 4267 (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) { 4268 ret = flow_mreg_update_copy_table(dev, flow, actions, error); 4269 if (ret) 4270 goto error; 4271 } 4272 if (dev->data->dev_started) { 4273 ret = flow_drv_apply(dev, flow, error); 4274 if (ret < 0) 4275 goto error; 4276 } 4277 if (list) 4278 TAILQ_INSERT_TAIL(list, flow, next); 4279 flow_rxq_flags_set(dev, flow); 4280 return flow; 4281 error_before_flow: 4282 if (hairpin_id) 4283 mlx5_flow_id_release(priv->sh->flow_id_pool, 4284 hairpin_id); 4285 return NULL; 4286 error: 4287 MLX5_ASSERT(flow); 4288 flow_mreg_del_copy_action(dev, flow); 4289 ret = rte_errno; /* Save rte_errno before cleanup. */ 4290 if (flow->hairpin_flow_id) 4291 mlx5_flow_id_release(priv->sh->flow_id_pool, 4292 flow->hairpin_flow_id); 4293 MLX5_ASSERT(flow); 4294 flow_drv_destroy(dev, flow); 4295 rte_free(flow); 4296 rte_errno = ret; /* Restore rte_errno. */ 4297 return NULL; 4298 } 4299 4300 /** 4301 * Create a dedicated flow rule on e-switch table 0 (root table), to direct all 4302 * incoming packets to table 1. 4303 * 4304 * Other flow rules, requested for group n, will be created in 4305 * e-switch table n+1. 4306 * Jump action to e-switch group n will be created to group n+1. 4307 * 4308 * Used when working in switchdev mode, to utilise advantages of table 1 4309 * and above. 4310 * 4311 * @param dev 4312 * Pointer to Ethernet device. 4313 * 4314 * @return 4315 * Pointer to flow on success, NULL otherwise and rte_errno is set. 4316 */ 4317 struct rte_flow * 4318 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) 4319 { 4320 const struct rte_flow_attr attr = { 4321 .group = 0, 4322 .priority = 0, 4323 .ingress = 1, 4324 .egress = 0, 4325 .transfer = 1, 4326 }; 4327 const struct rte_flow_item pattern = { 4328 .type = RTE_FLOW_ITEM_TYPE_END, 4329 }; 4330 struct rte_flow_action_jump jump = { 4331 .group = 1, 4332 }; 4333 const struct rte_flow_action actions[] = { 4334 { 4335 .type = RTE_FLOW_ACTION_TYPE_JUMP, 4336 .conf = &jump, 4337 }, 4338 { 4339 .type = RTE_FLOW_ACTION_TYPE_END, 4340 }, 4341 }; 4342 struct mlx5_priv *priv = dev->data->dev_private; 4343 struct rte_flow_error error; 4344 4345 return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern, 4346 actions, false, &error); 4347 } 4348 4349 /** 4350 * Create a flow. 4351 * 4352 * @see rte_flow_create() 4353 * @see rte_flow_ops 4354 */ 4355 struct rte_flow * 4356 mlx5_flow_create(struct rte_eth_dev *dev, 4357 const struct rte_flow_attr *attr, 4358 const struct rte_flow_item items[], 4359 const struct rte_flow_action actions[], 4360 struct rte_flow_error *error) 4361 { 4362 struct mlx5_priv *priv = dev->data->dev_private; 4363 4364 return flow_list_create(dev, &priv->flows, 4365 attr, items, actions, true, error); 4366 } 4367 4368 /** 4369 * Destroy a flow in a list. 4370 * 4371 * @param dev 4372 * Pointer to Ethernet device. 4373 * @param list 4374 * Pointer to a TAILQ flow list. If this parameter NULL, 4375 * there is no flow removal from the list. 4376 * @param[in] flow 4377 * Flow to destroy. 4378 */ 4379 static void 4380 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, 4381 struct rte_flow *flow) 4382 { 4383 struct mlx5_priv *priv = dev->data->dev_private; 4384 4385 /* 4386 * Update RX queue flags only if port is started, otherwise it is 4387 * already clean. 4388 */ 4389 if (dev->data->dev_started) 4390 flow_rxq_flags_trim(dev, flow); 4391 if (flow->hairpin_flow_id) 4392 mlx5_flow_id_release(priv->sh->flow_id_pool, 4393 flow->hairpin_flow_id); 4394 flow_drv_destroy(dev, flow); 4395 if (list) 4396 TAILQ_REMOVE(list, flow, next); 4397 flow_mreg_del_copy_action(dev, flow); 4398 rte_free(flow->fdir); 4399 rte_free(flow); 4400 } 4401 4402 /** 4403 * Destroy all flows. 4404 * 4405 * @param dev 4406 * Pointer to Ethernet device. 4407 * @param list 4408 * Pointer to a TAILQ flow list. 4409 */ 4410 void 4411 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) 4412 { 4413 while (!TAILQ_EMPTY(list)) { 4414 struct rte_flow *flow; 4415 4416 flow = TAILQ_FIRST(list); 4417 flow_list_destroy(dev, list, flow); 4418 } 4419 } 4420 4421 /** 4422 * Remove all flows. 4423 * 4424 * @param dev 4425 * Pointer to Ethernet device. 4426 * @param list 4427 * Pointer to a TAILQ flow list. 4428 */ 4429 void 4430 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) 4431 { 4432 struct rte_flow *flow; 4433 4434 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { 4435 flow_drv_remove(dev, flow); 4436 flow_mreg_stop_copy_action(dev, flow); 4437 } 4438 flow_mreg_del_default_copy_action(dev); 4439 flow_rxq_flags_clear(dev); 4440 } 4441 4442 /** 4443 * Add all flows. 4444 * 4445 * @param dev 4446 * Pointer to Ethernet device. 4447 * @param list 4448 * Pointer to a TAILQ flow list. 4449 * 4450 * @return 4451 * 0 on success, a negative errno value otherwise and rte_errno is set. 4452 */ 4453 int 4454 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) 4455 { 4456 struct rte_flow *flow; 4457 struct rte_flow_error error; 4458 int ret = 0; 4459 4460 /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ 4461 ret = flow_mreg_add_default_copy_action(dev, &error); 4462 if (ret < 0) 4463 return -rte_errno; 4464 /* Apply Flows created by application. */ 4465 TAILQ_FOREACH(flow, list, next) { 4466 ret = flow_mreg_start_copy_action(dev, flow); 4467 if (ret < 0) 4468 goto error; 4469 ret = flow_drv_apply(dev, flow, &error); 4470 if (ret < 0) 4471 goto error; 4472 flow_rxq_flags_set(dev, flow); 4473 } 4474 return 0; 4475 error: 4476 ret = rte_errno; /* Save rte_errno before cleanup. */ 4477 mlx5_flow_stop(dev, list); 4478 rte_errno = ret; /* Restore rte_errno. */ 4479 return -rte_errno; 4480 } 4481 4482 /** 4483 * Verify the flow list is empty 4484 * 4485 * @param dev 4486 * Pointer to Ethernet device. 4487 * 4488 * @return the number of flows not released. 4489 */ 4490 int 4491 mlx5_flow_verify(struct rte_eth_dev *dev) 4492 { 4493 struct mlx5_priv *priv = dev->data->dev_private; 4494 struct rte_flow *flow; 4495 int ret = 0; 4496 4497 TAILQ_FOREACH(flow, &priv->flows, next) { 4498 DRV_LOG(DEBUG, "port %u flow %p still referenced", 4499 dev->data->port_id, (void *)flow); 4500 ++ret; 4501 } 4502 return ret; 4503 } 4504 4505 /** 4506 * Enable default hairpin egress flow. 4507 * 4508 * @param dev 4509 * Pointer to Ethernet device. 4510 * @param queue 4511 * The queue index. 4512 * 4513 * @return 4514 * 0 on success, a negative errno value otherwise and rte_errno is set. 4515 */ 4516 int 4517 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, 4518 uint32_t queue) 4519 { 4520 struct mlx5_priv *priv = dev->data->dev_private; 4521 const struct rte_flow_attr attr = { 4522 .egress = 1, 4523 .priority = 0, 4524 }; 4525 struct mlx5_rte_flow_item_tx_queue queue_spec = { 4526 .queue = queue, 4527 }; 4528 struct mlx5_rte_flow_item_tx_queue queue_mask = { 4529 .queue = UINT32_MAX, 4530 }; 4531 struct rte_flow_item items[] = { 4532 { 4533 .type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, 4534 .spec = &queue_spec, 4535 .last = NULL, 4536 .mask = &queue_mask, 4537 }, 4538 { 4539 .type = RTE_FLOW_ITEM_TYPE_END, 4540 }, 4541 }; 4542 struct rte_flow_action_jump jump = { 4543 .group = MLX5_HAIRPIN_TX_TABLE, 4544 }; 4545 struct rte_flow_action actions[2]; 4546 struct rte_flow *flow; 4547 struct rte_flow_error error; 4548 4549 actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; 4550 actions[0].conf = &jump; 4551 actions[1].type = RTE_FLOW_ACTION_TYPE_END; 4552 flow = flow_list_create(dev, &priv->ctrl_flows, 4553 &attr, items, actions, false, &error); 4554 if (!flow) { 4555 DRV_LOG(DEBUG, 4556 "Failed to create ctrl flow: rte_errno(%d)," 4557 " type(%d), message(%s)", 4558 rte_errno, error.type, 4559 error.message ? error.message : " (no stated reason)"); 4560 return -rte_errno; 4561 } 4562 return 0; 4563 } 4564 4565 /** 4566 * Enable a control flow configured from the control plane. 4567 * 4568 * @param dev 4569 * Pointer to Ethernet device. 4570 * @param eth_spec 4571 * An Ethernet flow spec to apply. 4572 * @param eth_mask 4573 * An Ethernet flow mask to apply. 4574 * @param vlan_spec 4575 * A VLAN flow spec to apply. 4576 * @param vlan_mask 4577 * A VLAN flow mask to apply. 4578 * 4579 * @return 4580 * 0 on success, a negative errno value otherwise and rte_errno is set. 4581 */ 4582 int 4583 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, 4584 struct rte_flow_item_eth *eth_spec, 4585 struct rte_flow_item_eth *eth_mask, 4586 struct rte_flow_item_vlan *vlan_spec, 4587 struct rte_flow_item_vlan *vlan_mask) 4588 { 4589 struct mlx5_priv *priv = dev->data->dev_private; 4590 const struct rte_flow_attr attr = { 4591 .ingress = 1, 4592 .priority = MLX5_FLOW_PRIO_RSVD, 4593 }; 4594 struct rte_flow_item items[] = { 4595 { 4596 .type = RTE_FLOW_ITEM_TYPE_ETH, 4597 .spec = eth_spec, 4598 .last = NULL, 4599 .mask = eth_mask, 4600 }, 4601 { 4602 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : 4603 RTE_FLOW_ITEM_TYPE_END, 4604 .spec = vlan_spec, 4605 .last = NULL, 4606 .mask = vlan_mask, 4607 }, 4608 { 4609 .type = RTE_FLOW_ITEM_TYPE_END, 4610 }, 4611 }; 4612 uint16_t queue[priv->reta_idx_n]; 4613 struct rte_flow_action_rss action_rss = { 4614 .func = RTE_ETH_HASH_FUNCTION_DEFAULT, 4615 .level = 0, 4616 .types = priv->rss_conf.rss_hf, 4617 .key_len = priv->rss_conf.rss_key_len, 4618 .queue_num = priv->reta_idx_n, 4619 .key = priv->rss_conf.rss_key, 4620 .queue = queue, 4621 }; 4622 struct rte_flow_action actions[] = { 4623 { 4624 .type = RTE_FLOW_ACTION_TYPE_RSS, 4625 .conf = &action_rss, 4626 }, 4627 { 4628 .type = RTE_FLOW_ACTION_TYPE_END, 4629 }, 4630 }; 4631 struct rte_flow *flow; 4632 struct rte_flow_error error; 4633 unsigned int i; 4634 4635 if (!priv->reta_idx_n || !priv->rxqs_n) { 4636 return 0; 4637 } 4638 for (i = 0; i != priv->reta_idx_n; ++i) 4639 queue[i] = (*priv->reta_idx)[i]; 4640 flow = flow_list_create(dev, &priv->ctrl_flows, 4641 &attr, items, actions, false, &error); 4642 if (!flow) 4643 return -rte_errno; 4644 return 0; 4645 } 4646 4647 /** 4648 * Enable a flow control configured from the control plane. 4649 * 4650 * @param dev 4651 * Pointer to Ethernet device. 4652 * @param eth_spec 4653 * An Ethernet flow spec to apply. 4654 * @param eth_mask 4655 * An Ethernet flow mask to apply. 4656 * 4657 * @return 4658 * 0 on success, a negative errno value otherwise and rte_errno is set. 4659 */ 4660 int 4661 mlx5_ctrl_flow(struct rte_eth_dev *dev, 4662 struct rte_flow_item_eth *eth_spec, 4663 struct rte_flow_item_eth *eth_mask) 4664 { 4665 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); 4666 } 4667 4668 /** 4669 * Destroy a flow. 4670 * 4671 * @see rte_flow_destroy() 4672 * @see rte_flow_ops 4673 */ 4674 int 4675 mlx5_flow_destroy(struct rte_eth_dev *dev, 4676 struct rte_flow *flow, 4677 struct rte_flow_error *error __rte_unused) 4678 { 4679 struct mlx5_priv *priv = dev->data->dev_private; 4680 4681 flow_list_destroy(dev, &priv->flows, flow); 4682 return 0; 4683 } 4684 4685 /** 4686 * Destroy all flows. 4687 * 4688 * @see rte_flow_flush() 4689 * @see rte_flow_ops 4690 */ 4691 int 4692 mlx5_flow_flush(struct rte_eth_dev *dev, 4693 struct rte_flow_error *error __rte_unused) 4694 { 4695 struct mlx5_priv *priv = dev->data->dev_private; 4696 4697 mlx5_flow_list_flush(dev, &priv->flows); 4698 return 0; 4699 } 4700 4701 /** 4702 * Isolated mode. 4703 * 4704 * @see rte_flow_isolate() 4705 * @see rte_flow_ops 4706 */ 4707 int 4708 mlx5_flow_isolate(struct rte_eth_dev *dev, 4709 int enable, 4710 struct rte_flow_error *error) 4711 { 4712 struct mlx5_priv *priv = dev->data->dev_private; 4713 4714 if (dev->data->dev_started) { 4715 rte_flow_error_set(error, EBUSY, 4716 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4717 NULL, 4718 "port must be stopped first"); 4719 return -rte_errno; 4720 } 4721 priv->isolated = !!enable; 4722 if (enable) 4723 dev->dev_ops = &mlx5_dev_ops_isolate; 4724 else 4725 dev->dev_ops = &mlx5_dev_ops; 4726 return 0; 4727 } 4728 4729 /** 4730 * Query a flow. 4731 * 4732 * @see rte_flow_query() 4733 * @see rte_flow_ops 4734 */ 4735 static int 4736 flow_drv_query(struct rte_eth_dev *dev, 4737 struct rte_flow *flow, 4738 const struct rte_flow_action *actions, 4739 void *data, 4740 struct rte_flow_error *error) 4741 { 4742 const struct mlx5_flow_driver_ops *fops; 4743 enum mlx5_flow_drv_type ftype = flow->drv_type; 4744 4745 MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); 4746 fops = flow_get_drv_ops(ftype); 4747 4748 return fops->query(dev, flow, actions, data, error); 4749 } 4750 4751 /** 4752 * Query a flow. 4753 * 4754 * @see rte_flow_query() 4755 * @see rte_flow_ops 4756 */ 4757 int 4758 mlx5_flow_query(struct rte_eth_dev *dev, 4759 struct rte_flow *flow, 4760 const struct rte_flow_action *actions, 4761 void *data, 4762 struct rte_flow_error *error) 4763 { 4764 int ret; 4765 4766 ret = flow_drv_query(dev, flow, actions, data, error); 4767 if (ret < 0) 4768 return ret; 4769 return 0; 4770 } 4771 4772 /** 4773 * Convert a flow director filter to a generic flow. 4774 * 4775 * @param dev 4776 * Pointer to Ethernet device. 4777 * @param fdir_filter 4778 * Flow director filter to add. 4779 * @param attributes 4780 * Generic flow parameters structure. 4781 * 4782 * @return 4783 * 0 on success, a negative errno value otherwise and rte_errno is set. 4784 */ 4785 static int 4786 flow_fdir_filter_convert(struct rte_eth_dev *dev, 4787 const struct rte_eth_fdir_filter *fdir_filter, 4788 struct mlx5_fdir *attributes) 4789 { 4790 struct mlx5_priv *priv = dev->data->dev_private; 4791 const struct rte_eth_fdir_input *input = &fdir_filter->input; 4792 const struct rte_eth_fdir_masks *mask = 4793 &dev->data->dev_conf.fdir_conf.mask; 4794 4795 /* Validate queue number. */ 4796 if (fdir_filter->action.rx_queue >= priv->rxqs_n) { 4797 DRV_LOG(ERR, "port %u invalid queue number %d", 4798 dev->data->port_id, fdir_filter->action.rx_queue); 4799 rte_errno = EINVAL; 4800 return -rte_errno; 4801 } 4802 attributes->attr.ingress = 1; 4803 attributes->items[0] = (struct rte_flow_item) { 4804 .type = RTE_FLOW_ITEM_TYPE_ETH, 4805 .spec = &attributes->l2, 4806 .mask = &attributes->l2_mask, 4807 }; 4808 switch (fdir_filter->action.behavior) { 4809 case RTE_ETH_FDIR_ACCEPT: 4810 attributes->actions[0] = (struct rte_flow_action){ 4811 .type = RTE_FLOW_ACTION_TYPE_QUEUE, 4812 .conf = &attributes->queue, 4813 }; 4814 break; 4815 case RTE_ETH_FDIR_REJECT: 4816 attributes->actions[0] = (struct rte_flow_action){ 4817 .type = RTE_FLOW_ACTION_TYPE_DROP, 4818 }; 4819 break; 4820 default: 4821 DRV_LOG(ERR, "port %u invalid behavior %d", 4822 dev->data->port_id, 4823 fdir_filter->action.behavior); 4824 rte_errno = ENOTSUP; 4825 return -rte_errno; 4826 } 4827 attributes->queue.index = fdir_filter->action.rx_queue; 4828 /* Handle L3. */ 4829 switch (fdir_filter->input.flow_type) { 4830 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 4831 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 4832 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 4833 attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){ 4834 .src_addr = input->flow.ip4_flow.src_ip, 4835 .dst_addr = input->flow.ip4_flow.dst_ip, 4836 .time_to_live = input->flow.ip4_flow.ttl, 4837 .type_of_service = input->flow.ip4_flow.tos, 4838 }; 4839 attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){ 4840 .src_addr = mask->ipv4_mask.src_ip, 4841 .dst_addr = mask->ipv4_mask.dst_ip, 4842 .time_to_live = mask->ipv4_mask.ttl, 4843 .type_of_service = mask->ipv4_mask.tos, 4844 .next_proto_id = mask->ipv4_mask.proto, 4845 }; 4846 attributes->items[1] = (struct rte_flow_item){ 4847 .type = RTE_FLOW_ITEM_TYPE_IPV4, 4848 .spec = &attributes->l3, 4849 .mask = &attributes->l3_mask, 4850 }; 4851 break; 4852 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 4853 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 4854 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 4855 attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){ 4856 .hop_limits = input->flow.ipv6_flow.hop_limits, 4857 .proto = input->flow.ipv6_flow.proto, 4858 }; 4859 4860 memcpy(attributes->l3.ipv6.hdr.src_addr, 4861 input->flow.ipv6_flow.src_ip, 4862 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 4863 memcpy(attributes->l3.ipv6.hdr.dst_addr, 4864 input->flow.ipv6_flow.dst_ip, 4865 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 4866 memcpy(attributes->l3_mask.ipv6.hdr.src_addr, 4867 mask->ipv6_mask.src_ip, 4868 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 4869 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr, 4870 mask->ipv6_mask.dst_ip, 4871 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 4872 attributes->items[1] = (struct rte_flow_item){ 4873 .type = RTE_FLOW_ITEM_TYPE_IPV6, 4874 .spec = &attributes->l3, 4875 .mask = &attributes->l3_mask, 4876 }; 4877 break; 4878 default: 4879 DRV_LOG(ERR, "port %u invalid flow type%d", 4880 dev->data->port_id, fdir_filter->input.flow_type); 4881 rte_errno = ENOTSUP; 4882 return -rte_errno; 4883 } 4884 /* Handle L4. */ 4885 switch (fdir_filter->input.flow_type) { 4886 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 4887 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 4888 .src_port = input->flow.udp4_flow.src_port, 4889 .dst_port = input->flow.udp4_flow.dst_port, 4890 }; 4891 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 4892 .src_port = mask->src_port_mask, 4893 .dst_port = mask->dst_port_mask, 4894 }; 4895 attributes->items[2] = (struct rte_flow_item){ 4896 .type = RTE_FLOW_ITEM_TYPE_UDP, 4897 .spec = &attributes->l4, 4898 .mask = &attributes->l4_mask, 4899 }; 4900 break; 4901 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 4902 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 4903 .src_port = input->flow.tcp4_flow.src_port, 4904 .dst_port = input->flow.tcp4_flow.dst_port, 4905 }; 4906 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 4907 .src_port = mask->src_port_mask, 4908 .dst_port = mask->dst_port_mask, 4909 }; 4910 attributes->items[2] = (struct rte_flow_item){ 4911 .type = RTE_FLOW_ITEM_TYPE_TCP, 4912 .spec = &attributes->l4, 4913 .mask = &attributes->l4_mask, 4914 }; 4915 break; 4916 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 4917 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 4918 .src_port = input->flow.udp6_flow.src_port, 4919 .dst_port = input->flow.udp6_flow.dst_port, 4920 }; 4921 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 4922 .src_port = mask->src_port_mask, 4923 .dst_port = mask->dst_port_mask, 4924 }; 4925 attributes->items[2] = (struct rte_flow_item){ 4926 .type = RTE_FLOW_ITEM_TYPE_UDP, 4927 .spec = &attributes->l4, 4928 .mask = &attributes->l4_mask, 4929 }; 4930 break; 4931 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 4932 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 4933 .src_port = input->flow.tcp6_flow.src_port, 4934 .dst_port = input->flow.tcp6_flow.dst_port, 4935 }; 4936 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 4937 .src_port = mask->src_port_mask, 4938 .dst_port = mask->dst_port_mask, 4939 }; 4940 attributes->items[2] = (struct rte_flow_item){ 4941 .type = RTE_FLOW_ITEM_TYPE_TCP, 4942 .spec = &attributes->l4, 4943 .mask = &attributes->l4_mask, 4944 }; 4945 break; 4946 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 4947 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 4948 break; 4949 default: 4950 DRV_LOG(ERR, "port %u invalid flow type%d", 4951 dev->data->port_id, fdir_filter->input.flow_type); 4952 rte_errno = ENOTSUP; 4953 return -rte_errno; 4954 } 4955 return 0; 4956 } 4957 4958 #define FLOW_FDIR_CMP(f1, f2, fld) \ 4959 memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld)) 4960 4961 /** 4962 * Compare two FDIR flows. If items and actions are identical, the two flows are 4963 * regarded as same. 4964 * 4965 * @param dev 4966 * Pointer to Ethernet device. 4967 * @param f1 4968 * FDIR flow to compare. 4969 * @param f2 4970 * FDIR flow to compare. 4971 * 4972 * @return 4973 * Zero on match, 1 otherwise. 4974 */ 4975 static int 4976 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) 4977 { 4978 if (FLOW_FDIR_CMP(f1, f2, attr) || 4979 FLOW_FDIR_CMP(f1, f2, l2) || 4980 FLOW_FDIR_CMP(f1, f2, l2_mask) || 4981 FLOW_FDIR_CMP(f1, f2, l3) || 4982 FLOW_FDIR_CMP(f1, f2, l3_mask) || 4983 FLOW_FDIR_CMP(f1, f2, l4) || 4984 FLOW_FDIR_CMP(f1, f2, l4_mask) || 4985 FLOW_FDIR_CMP(f1, f2, actions[0].type)) 4986 return 1; 4987 if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE && 4988 FLOW_FDIR_CMP(f1, f2, queue)) 4989 return 1; 4990 return 0; 4991 } 4992 4993 /** 4994 * Search device flow list to find out a matched FDIR flow. 4995 * 4996 * @param dev 4997 * Pointer to Ethernet device. 4998 * @param fdir_flow 4999 * FDIR flow to lookup. 5000 * 5001 * @return 5002 * Pointer of flow if found, NULL otherwise. 5003 */ 5004 static struct rte_flow * 5005 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) 5006 { 5007 struct mlx5_priv *priv = dev->data->dev_private; 5008 struct rte_flow *flow = NULL; 5009 5010 MLX5_ASSERT(fdir_flow); 5011 TAILQ_FOREACH(flow, &priv->flows, next) { 5012 if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) { 5013 DRV_LOG(DEBUG, "port %u found FDIR flow %p", 5014 dev->data->port_id, (void *)flow); 5015 break; 5016 } 5017 } 5018 return flow; 5019 } 5020 5021 /** 5022 * Add new flow director filter and store it in list. 5023 * 5024 * @param dev 5025 * Pointer to Ethernet device. 5026 * @param fdir_filter 5027 * Flow director filter to add. 5028 * 5029 * @return 5030 * 0 on success, a negative errno value otherwise and rte_errno is set. 5031 */ 5032 static int 5033 flow_fdir_filter_add(struct rte_eth_dev *dev, 5034 const struct rte_eth_fdir_filter *fdir_filter) 5035 { 5036 struct mlx5_priv *priv = dev->data->dev_private; 5037 struct mlx5_fdir *fdir_flow; 5038 struct rte_flow *flow; 5039 int ret; 5040 5041 fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0); 5042 if (!fdir_flow) { 5043 rte_errno = ENOMEM; 5044 return -rte_errno; 5045 } 5046 ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow); 5047 if (ret) 5048 goto error; 5049 flow = flow_fdir_filter_lookup(dev, fdir_flow); 5050 if (flow) { 5051 rte_errno = EEXIST; 5052 goto error; 5053 } 5054 flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr, 5055 fdir_flow->items, fdir_flow->actions, true, 5056 NULL); 5057 if (!flow) 5058 goto error; 5059 MLX5_ASSERT(!flow->fdir); 5060 flow->fdir = fdir_flow; 5061 DRV_LOG(DEBUG, "port %u created FDIR flow %p", 5062 dev->data->port_id, (void *)flow); 5063 return 0; 5064 error: 5065 rte_free(fdir_flow); 5066 return -rte_errno; 5067 } 5068 5069 /** 5070 * Delete specific filter. 5071 * 5072 * @param dev 5073 * Pointer to Ethernet device. 5074 * @param fdir_filter 5075 * Filter to be deleted. 5076 * 5077 * @return 5078 * 0 on success, a negative errno value otherwise and rte_errno is set. 5079 */ 5080 static int 5081 flow_fdir_filter_delete(struct rte_eth_dev *dev, 5082 const struct rte_eth_fdir_filter *fdir_filter) 5083 { 5084 struct mlx5_priv *priv = dev->data->dev_private; 5085 struct rte_flow *flow; 5086 struct mlx5_fdir fdir_flow = { 5087 .attr.group = 0, 5088 }; 5089 int ret; 5090 5091 ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow); 5092 if (ret) 5093 return -rte_errno; 5094 flow = flow_fdir_filter_lookup(dev, &fdir_flow); 5095 if (!flow) { 5096 rte_errno = ENOENT; 5097 return -rte_errno; 5098 } 5099 flow_list_destroy(dev, &priv->flows, flow); 5100 DRV_LOG(DEBUG, "port %u deleted FDIR flow %p", 5101 dev->data->port_id, (void *)flow); 5102 return 0; 5103 } 5104 5105 /** 5106 * Update queue for specific filter. 5107 * 5108 * @param dev 5109 * Pointer to Ethernet device. 5110 * @param fdir_filter 5111 * Filter to be updated. 5112 * 5113 * @return 5114 * 0 on success, a negative errno value otherwise and rte_errno is set. 5115 */ 5116 static int 5117 flow_fdir_filter_update(struct rte_eth_dev *dev, 5118 const struct rte_eth_fdir_filter *fdir_filter) 5119 { 5120 int ret; 5121 5122 ret = flow_fdir_filter_delete(dev, fdir_filter); 5123 if (ret) 5124 return ret; 5125 return flow_fdir_filter_add(dev, fdir_filter); 5126 } 5127 5128 /** 5129 * Flush all filters. 5130 * 5131 * @param dev 5132 * Pointer to Ethernet device. 5133 */ 5134 static void 5135 flow_fdir_filter_flush(struct rte_eth_dev *dev) 5136 { 5137 struct mlx5_priv *priv = dev->data->dev_private; 5138 5139 mlx5_flow_list_flush(dev, &priv->flows); 5140 } 5141 5142 /** 5143 * Get flow director information. 5144 * 5145 * @param dev 5146 * Pointer to Ethernet device. 5147 * @param[out] fdir_info 5148 * Resulting flow director information. 5149 */ 5150 static void 5151 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) 5152 { 5153 struct rte_eth_fdir_masks *mask = 5154 &dev->data->dev_conf.fdir_conf.mask; 5155 5156 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; 5157 fdir_info->guarant_spc = 0; 5158 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); 5159 fdir_info->max_flexpayload = 0; 5160 fdir_info->flow_types_mask[0] = 0; 5161 fdir_info->flex_payload_unit = 0; 5162 fdir_info->max_flex_payload_segment_num = 0; 5163 fdir_info->flex_payload_limit = 0; 5164 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf)); 5165 } 5166 5167 /** 5168 * Deal with flow director operations. 5169 * 5170 * @param dev 5171 * Pointer to Ethernet device. 5172 * @param filter_op 5173 * Operation to perform. 5174 * @param arg 5175 * Pointer to operation-specific structure. 5176 * 5177 * @return 5178 * 0 on success, a negative errno value otherwise and rte_errno is set. 5179 */ 5180 static int 5181 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, 5182 void *arg) 5183 { 5184 enum rte_fdir_mode fdir_mode = 5185 dev->data->dev_conf.fdir_conf.mode; 5186 5187 if (filter_op == RTE_ETH_FILTER_NOP) 5188 return 0; 5189 if (fdir_mode != RTE_FDIR_MODE_PERFECT && 5190 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 5191 DRV_LOG(ERR, "port %u flow director mode %d not supported", 5192 dev->data->port_id, fdir_mode); 5193 rte_errno = EINVAL; 5194 return -rte_errno; 5195 } 5196 switch (filter_op) { 5197 case RTE_ETH_FILTER_ADD: 5198 return flow_fdir_filter_add(dev, arg); 5199 case RTE_ETH_FILTER_UPDATE: 5200 return flow_fdir_filter_update(dev, arg); 5201 case RTE_ETH_FILTER_DELETE: 5202 return flow_fdir_filter_delete(dev, arg); 5203 case RTE_ETH_FILTER_FLUSH: 5204 flow_fdir_filter_flush(dev); 5205 break; 5206 case RTE_ETH_FILTER_INFO: 5207 flow_fdir_info_get(dev, arg); 5208 break; 5209 default: 5210 DRV_LOG(DEBUG, "port %u unknown operation %u", 5211 dev->data->port_id, filter_op); 5212 rte_errno = EINVAL; 5213 return -rte_errno; 5214 } 5215 return 0; 5216 } 5217 5218 /** 5219 * Manage filter operations. 5220 * 5221 * @param dev 5222 * Pointer to Ethernet device structure. 5223 * @param filter_type 5224 * Filter type. 5225 * @param filter_op 5226 * Operation to perform. 5227 * @param arg 5228 * Pointer to operation-specific structure. 5229 * 5230 * @return 5231 * 0 on success, a negative errno value otherwise and rte_errno is set. 5232 */ 5233 int 5234 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, 5235 enum rte_filter_type filter_type, 5236 enum rte_filter_op filter_op, 5237 void *arg) 5238 { 5239 switch (filter_type) { 5240 case RTE_ETH_FILTER_GENERIC: 5241 if (filter_op != RTE_ETH_FILTER_GET) { 5242 rte_errno = EINVAL; 5243 return -rte_errno; 5244 } 5245 *(const void **)arg = &mlx5_flow_ops; 5246 return 0; 5247 case RTE_ETH_FILTER_FDIR: 5248 return flow_fdir_ctrl_func(dev, filter_op, arg); 5249 default: 5250 DRV_LOG(ERR, "port %u filter type (%d) not supported", 5251 dev->data->port_id, filter_type); 5252 rte_errno = ENOTSUP; 5253 return -rte_errno; 5254 } 5255 return 0; 5256 } 5257 5258 /** 5259 * Create the needed meter and suffix tables. 5260 * 5261 * @param[in] dev 5262 * Pointer to Ethernet device. 5263 * @param[in] fm 5264 * Pointer to the flow meter. 5265 * 5266 * @return 5267 * Pointer to table set on success, NULL otherwise. 5268 */ 5269 struct mlx5_meter_domains_infos * 5270 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev, 5271 const struct mlx5_flow_meter *fm) 5272 { 5273 const struct mlx5_flow_driver_ops *fops; 5274 5275 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5276 return fops->create_mtr_tbls(dev, fm); 5277 } 5278 5279 /** 5280 * Destroy the meter table set. 5281 * 5282 * @param[in] dev 5283 * Pointer to Ethernet device. 5284 * @param[in] tbl 5285 * Pointer to the meter table set. 5286 * 5287 * @return 5288 * 0 on success. 5289 */ 5290 int 5291 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, 5292 struct mlx5_meter_domains_infos *tbls) 5293 { 5294 const struct mlx5_flow_driver_ops *fops; 5295 5296 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5297 return fops->destroy_mtr_tbls(dev, tbls); 5298 } 5299 5300 /** 5301 * Create policer rules. 5302 * 5303 * @param[in] dev 5304 * Pointer to Ethernet device. 5305 * @param[in] fm 5306 * Pointer to flow meter structure. 5307 * @param[in] attr 5308 * Pointer to flow attributes. 5309 * 5310 * @return 5311 * 0 on success, -1 otherwise. 5312 */ 5313 int 5314 mlx5_flow_create_policer_rules(struct rte_eth_dev *dev, 5315 struct mlx5_flow_meter *fm, 5316 const struct rte_flow_attr *attr) 5317 { 5318 const struct mlx5_flow_driver_ops *fops; 5319 5320 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5321 return fops->create_policer_rules(dev, fm, attr); 5322 } 5323 5324 /** 5325 * Destroy policer rules. 5326 * 5327 * @param[in] fm 5328 * Pointer to flow meter structure. 5329 * @param[in] attr 5330 * Pointer to flow attributes. 5331 * 5332 * @return 5333 * 0 on success, -1 otherwise. 5334 */ 5335 int 5336 mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev, 5337 struct mlx5_flow_meter *fm, 5338 const struct rte_flow_attr *attr) 5339 { 5340 const struct mlx5_flow_driver_ops *fops; 5341 5342 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5343 return fops->destroy_policer_rules(dev, fm, attr); 5344 } 5345 5346 /** 5347 * Allocate a counter. 5348 * 5349 * @param[in] dev 5350 * Pointer to Ethernet device structure. 5351 * 5352 * @return 5353 * Pointer to allocated counter on success, NULL otherwise. 5354 */ 5355 struct mlx5_flow_counter * 5356 mlx5_counter_alloc(struct rte_eth_dev *dev) 5357 { 5358 const struct mlx5_flow_driver_ops *fops; 5359 struct rte_flow_attr attr = { .transfer = 0 }; 5360 5361 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5362 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5363 return fops->counter_alloc(dev); 5364 } 5365 DRV_LOG(ERR, 5366 "port %u counter allocate is not supported.", 5367 dev->data->port_id); 5368 return NULL; 5369 } 5370 5371 /** 5372 * Free a counter. 5373 * 5374 * @param[in] dev 5375 * Pointer to Ethernet device structure. 5376 * @param[in] cnt 5377 * Pointer to counter to be free. 5378 */ 5379 void 5380 mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt) 5381 { 5382 const struct mlx5_flow_driver_ops *fops; 5383 struct rte_flow_attr attr = { .transfer = 0 }; 5384 5385 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5386 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5387 fops->counter_free(dev, cnt); 5388 return; 5389 } 5390 DRV_LOG(ERR, 5391 "port %u counter free is not supported.", 5392 dev->data->port_id); 5393 } 5394 5395 /** 5396 * Query counter statistics. 5397 * 5398 * @param[in] dev 5399 * Pointer to Ethernet device structure. 5400 * @param[in] cnt 5401 * Pointer to counter to query. 5402 * @param[in] clear 5403 * Set to clear counter statistics. 5404 * @param[out] pkts 5405 * The counter hits packets number to save. 5406 * @param[out] bytes 5407 * The counter hits bytes number to save. 5408 * 5409 * @return 5410 * 0 on success, a negative errno value otherwise. 5411 */ 5412 int 5413 mlx5_counter_query(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt, 5414 bool clear, uint64_t *pkts, uint64_t *bytes) 5415 { 5416 const struct mlx5_flow_driver_ops *fops; 5417 struct rte_flow_attr attr = { .transfer = 0 }; 5418 5419 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5420 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5421 return fops->counter_query(dev, cnt, clear, pkts, bytes); 5422 } 5423 DRV_LOG(ERR, 5424 "port %u counter query is not supported.", 5425 dev->data->port_id); 5426 return -ENOTSUP; 5427 } 5428 5429 #define MLX5_POOL_QUERY_FREQ_US 1000000 5430 5431 /** 5432 * Set the periodic procedure for triggering asynchronous batch queries for all 5433 * the counter pools. 5434 * 5435 * @param[in] sh 5436 * Pointer to mlx5_ibv_shared object. 5437 */ 5438 void 5439 mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) 5440 { 5441 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0); 5442 uint32_t pools_n = rte_atomic16_read(&cont->n_valid); 5443 uint32_t us; 5444 5445 cont = MLX5_CNT_CONTAINER(sh, 1, 0); 5446 pools_n += rte_atomic16_read(&cont->n_valid); 5447 us = MLX5_POOL_QUERY_FREQ_US / pools_n; 5448 DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us); 5449 if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { 5450 sh->cmng.query_thread_on = 0; 5451 DRV_LOG(ERR, "Cannot reinitialize query alarm"); 5452 } else { 5453 sh->cmng.query_thread_on = 1; 5454 } 5455 } 5456 5457 /** 5458 * The periodic procedure for triggering asynchronous batch queries for all the 5459 * counter pools. This function is probably called by the host thread. 5460 * 5461 * @param[in] arg 5462 * The parameter for the alarm process. 5463 */ 5464 void 5465 mlx5_flow_query_alarm(void *arg) 5466 { 5467 struct mlx5_ibv_shared *sh = arg; 5468 struct mlx5_devx_obj *dcs; 5469 uint16_t offset; 5470 int ret; 5471 uint8_t batch = sh->cmng.batch; 5472 uint16_t pool_index = sh->cmng.pool_index; 5473 struct mlx5_pools_container *cont; 5474 struct mlx5_pools_container *mcont; 5475 struct mlx5_flow_counter_pool *pool; 5476 5477 if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) 5478 goto set_alarm; 5479 next_container: 5480 cont = MLX5_CNT_CONTAINER(sh, batch, 1); 5481 mcont = MLX5_CNT_CONTAINER(sh, batch, 0); 5482 /* Check if resize was done and need to flip a container. */ 5483 if (cont != mcont) { 5484 if (cont->pools) { 5485 /* Clean the old container. */ 5486 rte_free(cont->pools); 5487 memset(cont, 0, sizeof(*cont)); 5488 } 5489 rte_cio_wmb(); 5490 /* Flip the host container. */ 5491 sh->cmng.mhi[batch] ^= (uint8_t)2; 5492 cont = mcont; 5493 } 5494 if (!cont->pools) { 5495 /* 2 empty containers case is unexpected. */ 5496 if (unlikely(batch != sh->cmng.batch)) 5497 goto set_alarm; 5498 batch ^= 0x1; 5499 pool_index = 0; 5500 goto next_container; 5501 } 5502 pool = cont->pools[pool_index]; 5503 if (pool->raw_hw) 5504 /* There is a pool query in progress. */ 5505 goto set_alarm; 5506 pool->raw_hw = 5507 LIST_FIRST(&sh->cmng.free_stat_raws); 5508 if (!pool->raw_hw) 5509 /* No free counter statistics raw memory. */ 5510 goto set_alarm; 5511 dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read 5512 (&pool->a64_dcs); 5513 offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; 5514 ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - 5515 offset, NULL, NULL, 5516 pool->raw_hw->mem_mng->dm->id, 5517 (void *)(uintptr_t) 5518 (pool->raw_hw->data + offset), 5519 sh->devx_comp, 5520 (uint64_t)(uintptr_t)pool); 5521 if (ret) { 5522 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID" 5523 " %d", pool->min_dcs->id); 5524 pool->raw_hw = NULL; 5525 goto set_alarm; 5526 } 5527 pool->raw_hw->min_dcs_id = dcs->id; 5528 LIST_REMOVE(pool->raw_hw, next); 5529 sh->cmng.pending_queries++; 5530 pool_index++; 5531 if (pool_index >= rte_atomic16_read(&cont->n_valid)) { 5532 batch ^= 0x1; 5533 pool_index = 0; 5534 } 5535 set_alarm: 5536 sh->cmng.batch = batch; 5537 sh->cmng.pool_index = pool_index; 5538 mlx5_set_query_alarm(sh); 5539 } 5540 5541 /** 5542 * Handler for the HW respond about ready values from an asynchronous batch 5543 * query. This function is probably called by the host thread. 5544 * 5545 * @param[in] sh 5546 * The pointer to the shared IB device context. 5547 * @param[in] async_id 5548 * The Devx async ID. 5549 * @param[in] status 5550 * The status of the completion. 5551 */ 5552 void 5553 mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, 5554 uint64_t async_id, int status) 5555 { 5556 struct mlx5_flow_counter_pool *pool = 5557 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; 5558 struct mlx5_counter_stats_raw *raw_to_free; 5559 5560 if (unlikely(status)) { 5561 raw_to_free = pool->raw_hw; 5562 } else { 5563 raw_to_free = pool->raw; 5564 rte_spinlock_lock(&pool->sl); 5565 pool->raw = pool->raw_hw; 5566 rte_spinlock_unlock(&pool->sl); 5567 rte_atomic64_add(&pool->query_gen, 1); 5568 /* Be sure the new raw counters data is updated in memory. */ 5569 rte_cio_wmb(); 5570 } 5571 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next); 5572 pool->raw_hw = NULL; 5573 sh->cmng.pending_queries--; 5574 } 5575 5576 /** 5577 * Translate the rte_flow group index to HW table value. 5578 * 5579 * @param[in] attributes 5580 * Pointer to flow attributes 5581 * @param[in] external 5582 * Value is part of flow rule created by request external to PMD. 5583 * @param[in] group 5584 * rte_flow group index value. 5585 * @param[out] fdb_def_rule 5586 * Whether fdb jump to table 1 is configured. 5587 * @param[out] table 5588 * HW table value. 5589 * @param[out] error 5590 * Pointer to error structure. 5591 * 5592 * @return 5593 * 0 on success, a negative errno value otherwise and rte_errno is set. 5594 */ 5595 int 5596 mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external, 5597 uint32_t group, bool fdb_def_rule, uint32_t *table, 5598 struct rte_flow_error *error) 5599 { 5600 if (attributes->transfer && external && fdb_def_rule) { 5601 if (group == UINT32_MAX) 5602 return rte_flow_error_set 5603 (error, EINVAL, 5604 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 5605 NULL, 5606 "group index not supported"); 5607 *table = group + 1; 5608 } else { 5609 *table = group; 5610 } 5611 return 0; 5612 } 5613 5614 /** 5615 * Discover availability of metadata reg_c's. 5616 * 5617 * Iteratively use test flows to check availability. 5618 * 5619 * @param[in] dev 5620 * Pointer to the Ethernet device structure. 5621 * 5622 * @return 5623 * 0 on success, a negative errno value otherwise and rte_errno is set. 5624 */ 5625 int 5626 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) 5627 { 5628 struct mlx5_priv *priv = dev->data->dev_private; 5629 struct mlx5_dev_config *config = &priv->config; 5630 enum modify_reg idx; 5631 int n = 0; 5632 5633 /* reg_c[0] and reg_c[1] are reserved. */ 5634 config->flow_mreg_c[n++] = REG_C_0; 5635 config->flow_mreg_c[n++] = REG_C_1; 5636 /* Discover availability of other reg_c's. */ 5637 for (idx = REG_C_2; idx <= REG_C_7; ++idx) { 5638 struct rte_flow_attr attr = { 5639 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 5640 .priority = MLX5_FLOW_PRIO_RSVD, 5641 .ingress = 1, 5642 }; 5643 struct rte_flow_item items[] = { 5644 [0] = { 5645 .type = RTE_FLOW_ITEM_TYPE_END, 5646 }, 5647 }; 5648 struct rte_flow_action actions[] = { 5649 [0] = { 5650 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 5651 .conf = &(struct mlx5_flow_action_copy_mreg){ 5652 .src = REG_C_1, 5653 .dst = idx, 5654 }, 5655 }, 5656 [1] = { 5657 .type = RTE_FLOW_ACTION_TYPE_JUMP, 5658 .conf = &(struct rte_flow_action_jump){ 5659 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 5660 }, 5661 }, 5662 [2] = { 5663 .type = RTE_FLOW_ACTION_TYPE_END, 5664 }, 5665 }; 5666 struct rte_flow *flow; 5667 struct rte_flow_error error; 5668 5669 if (!config->dv_flow_en) 5670 break; 5671 /* Create internal flow, validation skips copy action. */ 5672 flow = flow_list_create(dev, NULL, &attr, items, 5673 actions, false, &error); 5674 if (!flow) 5675 continue; 5676 if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL)) 5677 config->flow_mreg_c[n++] = idx; 5678 flow_list_destroy(dev, NULL, flow); 5679 } 5680 for (; n < MLX5_MREG_C_NUM; ++n) 5681 config->flow_mreg_c[n] = REG_NONE; 5682 return 0; 5683 } 5684 5685 /** 5686 * Dump flow raw hw data to file 5687 * 5688 * @param[in] dev 5689 * The pointer to Ethernet device. 5690 * @param[in] file 5691 * A pointer to a file for output. 5692 * @param[out] error 5693 * Perform verbose error reporting if not NULL. PMDs initialize this 5694 * structure in case of error only. 5695 * @return 5696 * 0 on success, a nagative value otherwise. 5697 */ 5698 int 5699 mlx5_flow_dev_dump(struct rte_eth_dev *dev, 5700 FILE *file, 5701 struct rte_flow_error *error __rte_unused) 5702 { 5703 struct mlx5_priv *priv = dev->data->dev_private; 5704 struct mlx5_ibv_shared *sh = priv->sh; 5705 5706 return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain, 5707 sh->tx_domain, file); 5708 } 5709