1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <netinet/in.h> 7 #include <sys/queue.h> 8 #include <stdalign.h> 9 #include <stdint.h> 10 #include <string.h> 11 12 /* Verbs header. */ 13 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 14 #ifdef PEDANTIC 15 #pragma GCC diagnostic ignored "-Wpedantic" 16 #endif 17 #include <infiniband/verbs.h> 18 #ifdef PEDANTIC 19 #pragma GCC diagnostic error "-Wpedantic" 20 #endif 21 22 #include <rte_common.h> 23 #include <rte_ether.h> 24 #include <rte_ethdev_driver.h> 25 #include <rte_flow.h> 26 #include <rte_flow_driver.h> 27 #include <rte_malloc.h> 28 #include <rte_ip.h> 29 30 #include "mlx5.h" 31 #include "mlx5_defs.h" 32 #include "mlx5_flow.h" 33 #include "mlx5_glue.h" 34 #include "mlx5_prm.h" 35 #include "mlx5_rxtx.h" 36 37 /* Dev ops structure defined in mlx5.c */ 38 extern const struct eth_dev_ops mlx5_dev_ops; 39 extern const struct eth_dev_ops mlx5_dev_ops_isolate; 40 41 /** Device flow drivers. */ 42 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 43 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; 44 #endif 45 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; 46 47 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; 48 49 const struct mlx5_flow_driver_ops *flow_drv_ops[] = { 50 [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, 51 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 52 [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, 53 #endif 54 [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, 55 [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops 56 }; 57 58 enum mlx5_expansion { 59 MLX5_EXPANSION_ROOT, 60 MLX5_EXPANSION_ROOT_OUTER, 61 MLX5_EXPANSION_ROOT_ETH_VLAN, 62 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, 63 MLX5_EXPANSION_OUTER_ETH, 64 MLX5_EXPANSION_OUTER_ETH_VLAN, 65 MLX5_EXPANSION_OUTER_VLAN, 66 MLX5_EXPANSION_OUTER_IPV4, 67 MLX5_EXPANSION_OUTER_IPV4_UDP, 68 MLX5_EXPANSION_OUTER_IPV4_TCP, 69 MLX5_EXPANSION_OUTER_IPV6, 70 MLX5_EXPANSION_OUTER_IPV6_UDP, 71 MLX5_EXPANSION_OUTER_IPV6_TCP, 72 MLX5_EXPANSION_VXLAN, 73 MLX5_EXPANSION_VXLAN_GPE, 74 MLX5_EXPANSION_GRE, 75 MLX5_EXPANSION_MPLS, 76 MLX5_EXPANSION_ETH, 77 MLX5_EXPANSION_ETH_VLAN, 78 MLX5_EXPANSION_VLAN, 79 MLX5_EXPANSION_IPV4, 80 MLX5_EXPANSION_IPV4_UDP, 81 MLX5_EXPANSION_IPV4_TCP, 82 MLX5_EXPANSION_IPV6, 83 MLX5_EXPANSION_IPV6_UDP, 84 MLX5_EXPANSION_IPV6_TCP, 85 }; 86 87 /** Supported expansion of items. */ 88 static const struct rte_flow_expand_node mlx5_support_expansion[] = { 89 [MLX5_EXPANSION_ROOT] = { 90 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 91 MLX5_EXPANSION_IPV4, 92 MLX5_EXPANSION_IPV6), 93 .type = RTE_FLOW_ITEM_TYPE_END, 94 }, 95 [MLX5_EXPANSION_ROOT_OUTER] = { 96 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, 97 MLX5_EXPANSION_OUTER_IPV4, 98 MLX5_EXPANSION_OUTER_IPV6), 99 .type = RTE_FLOW_ITEM_TYPE_END, 100 }, 101 [MLX5_EXPANSION_ROOT_ETH_VLAN] = { 102 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), 103 .type = RTE_FLOW_ITEM_TYPE_END, 104 }, 105 [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { 106 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), 107 .type = RTE_FLOW_ITEM_TYPE_END, 108 }, 109 [MLX5_EXPANSION_OUTER_ETH] = { 110 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 111 MLX5_EXPANSION_OUTER_IPV6, 112 MLX5_EXPANSION_MPLS), 113 .type = RTE_FLOW_ITEM_TYPE_ETH, 114 .rss_types = 0, 115 }, 116 [MLX5_EXPANSION_OUTER_ETH_VLAN] = { 117 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), 118 .type = RTE_FLOW_ITEM_TYPE_ETH, 119 .rss_types = 0, 120 }, 121 [MLX5_EXPANSION_OUTER_VLAN] = { 122 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 123 MLX5_EXPANSION_OUTER_IPV6), 124 .type = RTE_FLOW_ITEM_TYPE_VLAN, 125 }, 126 [MLX5_EXPANSION_OUTER_IPV4] = { 127 .next = RTE_FLOW_EXPAND_RSS_NEXT 128 (MLX5_EXPANSION_OUTER_IPV4_UDP, 129 MLX5_EXPANSION_OUTER_IPV4_TCP, 130 MLX5_EXPANSION_GRE, 131 MLX5_EXPANSION_IPV4, 132 MLX5_EXPANSION_IPV6), 133 .type = RTE_FLOW_ITEM_TYPE_IPV4, 134 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 135 ETH_RSS_NONFRAG_IPV4_OTHER, 136 }, 137 [MLX5_EXPANSION_OUTER_IPV4_UDP] = { 138 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 139 MLX5_EXPANSION_VXLAN_GPE), 140 .type = RTE_FLOW_ITEM_TYPE_UDP, 141 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 142 }, 143 [MLX5_EXPANSION_OUTER_IPV4_TCP] = { 144 .type = RTE_FLOW_ITEM_TYPE_TCP, 145 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 146 }, 147 [MLX5_EXPANSION_OUTER_IPV6] = { 148 .next = RTE_FLOW_EXPAND_RSS_NEXT 149 (MLX5_EXPANSION_OUTER_IPV6_UDP, 150 MLX5_EXPANSION_OUTER_IPV6_TCP, 151 MLX5_EXPANSION_IPV4, 152 MLX5_EXPANSION_IPV6), 153 .type = RTE_FLOW_ITEM_TYPE_IPV6, 154 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 155 ETH_RSS_NONFRAG_IPV6_OTHER, 156 }, 157 [MLX5_EXPANSION_OUTER_IPV6_UDP] = { 158 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 159 MLX5_EXPANSION_VXLAN_GPE), 160 .type = RTE_FLOW_ITEM_TYPE_UDP, 161 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 162 }, 163 [MLX5_EXPANSION_OUTER_IPV6_TCP] = { 164 .type = RTE_FLOW_ITEM_TYPE_TCP, 165 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 166 }, 167 [MLX5_EXPANSION_VXLAN] = { 168 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), 169 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 170 }, 171 [MLX5_EXPANSION_VXLAN_GPE] = { 172 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 173 MLX5_EXPANSION_IPV4, 174 MLX5_EXPANSION_IPV6), 175 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, 176 }, 177 [MLX5_EXPANSION_GRE] = { 178 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), 179 .type = RTE_FLOW_ITEM_TYPE_GRE, 180 }, 181 [MLX5_EXPANSION_MPLS] = { 182 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 183 MLX5_EXPANSION_IPV6), 184 .type = RTE_FLOW_ITEM_TYPE_MPLS, 185 }, 186 [MLX5_EXPANSION_ETH] = { 187 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 188 MLX5_EXPANSION_IPV6), 189 .type = RTE_FLOW_ITEM_TYPE_ETH, 190 }, 191 [MLX5_EXPANSION_ETH_VLAN] = { 192 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), 193 .type = RTE_FLOW_ITEM_TYPE_ETH, 194 }, 195 [MLX5_EXPANSION_VLAN] = { 196 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 197 MLX5_EXPANSION_IPV6), 198 .type = RTE_FLOW_ITEM_TYPE_VLAN, 199 }, 200 [MLX5_EXPANSION_IPV4] = { 201 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, 202 MLX5_EXPANSION_IPV4_TCP), 203 .type = RTE_FLOW_ITEM_TYPE_IPV4, 204 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 205 ETH_RSS_NONFRAG_IPV4_OTHER, 206 }, 207 [MLX5_EXPANSION_IPV4_UDP] = { 208 .type = RTE_FLOW_ITEM_TYPE_UDP, 209 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 210 }, 211 [MLX5_EXPANSION_IPV4_TCP] = { 212 .type = RTE_FLOW_ITEM_TYPE_TCP, 213 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 214 }, 215 [MLX5_EXPANSION_IPV6] = { 216 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, 217 MLX5_EXPANSION_IPV6_TCP), 218 .type = RTE_FLOW_ITEM_TYPE_IPV6, 219 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 220 ETH_RSS_NONFRAG_IPV6_OTHER, 221 }, 222 [MLX5_EXPANSION_IPV6_UDP] = { 223 .type = RTE_FLOW_ITEM_TYPE_UDP, 224 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 225 }, 226 [MLX5_EXPANSION_IPV6_TCP] = { 227 .type = RTE_FLOW_ITEM_TYPE_TCP, 228 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 229 }, 230 }; 231 232 static const struct rte_flow_ops mlx5_flow_ops = { 233 .validate = mlx5_flow_validate, 234 .create = mlx5_flow_create, 235 .destroy = mlx5_flow_destroy, 236 .flush = mlx5_flow_flush, 237 .isolate = mlx5_flow_isolate, 238 .query = mlx5_flow_query, 239 .dev_dump = mlx5_flow_dev_dump, 240 }; 241 242 /* Convert FDIR request to Generic flow. */ 243 struct mlx5_fdir { 244 struct rte_flow_attr attr; 245 struct rte_flow_item items[4]; 246 struct rte_flow_item_eth l2; 247 struct rte_flow_item_eth l2_mask; 248 union { 249 struct rte_flow_item_ipv4 ipv4; 250 struct rte_flow_item_ipv6 ipv6; 251 } l3; 252 union { 253 struct rte_flow_item_ipv4 ipv4; 254 struct rte_flow_item_ipv6 ipv6; 255 } l3_mask; 256 union { 257 struct rte_flow_item_udp udp; 258 struct rte_flow_item_tcp tcp; 259 } l4; 260 union { 261 struct rte_flow_item_udp udp; 262 struct rte_flow_item_tcp tcp; 263 } l4_mask; 264 struct rte_flow_action actions[2]; 265 struct rte_flow_action_queue queue; 266 }; 267 268 /* Map of Verbs to Flow priority with 8 Verbs priorities. */ 269 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { 270 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, 271 }; 272 273 /* Map of Verbs to Flow priority with 16 Verbs priorities. */ 274 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { 275 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, 276 { 9, 10, 11 }, { 12, 13, 14 }, 277 }; 278 279 /* Tunnel information. */ 280 struct mlx5_flow_tunnel_info { 281 uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ 282 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ 283 }; 284 285 static struct mlx5_flow_tunnel_info tunnels_info[] = { 286 { 287 .tunnel = MLX5_FLOW_LAYER_VXLAN, 288 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, 289 }, 290 { 291 .tunnel = MLX5_FLOW_LAYER_GENEVE, 292 .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP, 293 }, 294 { 295 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, 296 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, 297 }, 298 { 299 .tunnel = MLX5_FLOW_LAYER_GRE, 300 .ptype = RTE_PTYPE_TUNNEL_GRE, 301 }, 302 { 303 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, 304 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, 305 }, 306 { 307 .tunnel = MLX5_FLOW_LAYER_MPLS, 308 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, 309 }, 310 { 311 .tunnel = MLX5_FLOW_LAYER_NVGRE, 312 .ptype = RTE_PTYPE_TUNNEL_NVGRE, 313 }, 314 { 315 .tunnel = MLX5_FLOW_LAYER_IPIP, 316 .ptype = RTE_PTYPE_TUNNEL_IP, 317 }, 318 { 319 .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP, 320 .ptype = RTE_PTYPE_TUNNEL_IP, 321 }, 322 }; 323 324 /** 325 * Translate tag ID to register. 326 * 327 * @param[in] dev 328 * Pointer to the Ethernet device structure. 329 * @param[in] feature 330 * The feature that request the register. 331 * @param[in] id 332 * The request register ID. 333 * @param[out] error 334 * Error description in case of any. 335 * 336 * @return 337 * The request register on success, a negative errno 338 * value otherwise and rte_errno is set. 339 */ 340 enum modify_reg 341 mlx5_flow_get_reg_id(struct rte_eth_dev *dev, 342 enum mlx5_feature_name feature, 343 uint32_t id, 344 struct rte_flow_error *error) 345 { 346 struct mlx5_priv *priv = dev->data->dev_private; 347 struct mlx5_dev_config *config = &priv->config; 348 enum modify_reg start_reg; 349 350 switch (feature) { 351 case MLX5_HAIRPIN_RX: 352 return REG_B; 353 case MLX5_HAIRPIN_TX: 354 return REG_A; 355 case MLX5_METADATA_RX: 356 switch (config->dv_xmeta_en) { 357 case MLX5_XMETA_MODE_LEGACY: 358 return REG_B; 359 case MLX5_XMETA_MODE_META16: 360 return REG_C_0; 361 case MLX5_XMETA_MODE_META32: 362 return REG_C_1; 363 } 364 break; 365 case MLX5_METADATA_TX: 366 return REG_A; 367 case MLX5_METADATA_FDB: 368 switch (config->dv_xmeta_en) { 369 case MLX5_XMETA_MODE_LEGACY: 370 return REG_NONE; 371 case MLX5_XMETA_MODE_META16: 372 return REG_C_0; 373 case MLX5_XMETA_MODE_META32: 374 return REG_C_1; 375 } 376 break; 377 case MLX5_FLOW_MARK: 378 switch (config->dv_xmeta_en) { 379 case MLX5_XMETA_MODE_LEGACY: 380 return REG_NONE; 381 case MLX5_XMETA_MODE_META16: 382 return REG_C_1; 383 case MLX5_XMETA_MODE_META32: 384 return REG_C_0; 385 } 386 break; 387 case MLX5_COPY_MARK: 388 case MLX5_MTR_SFX: 389 /* 390 * Metadata COPY_MARK register using is in meter suffix sub 391 * flow while with meter. It's safe to share the same register. 392 */ 393 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3; 394 case MLX5_MTR_COLOR: 395 RTE_ASSERT(priv->mtr_color_reg != REG_NONE); 396 return priv->mtr_color_reg; 397 case MLX5_APP_TAG: 398 /* 399 * If meter is enable, it will engage two registers for color 400 * match and flow match. If meter color match is not using the 401 * REG_C_2, need to skip the REG_C_x be used by meter color 402 * match. 403 * If meter is disable, free to use all available registers. 404 */ 405 if (priv->mtr_color_reg != REG_NONE) 406 start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_3 : 407 REG_C_4; 408 else 409 start_reg = REG_C_2; 410 if (id > (REG_C_7 - start_reg)) 411 return rte_flow_error_set(error, EINVAL, 412 RTE_FLOW_ERROR_TYPE_ITEM, 413 NULL, "invalid tag id"); 414 if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NONE) 415 return rte_flow_error_set(error, ENOTSUP, 416 RTE_FLOW_ERROR_TYPE_ITEM, 417 NULL, "unsupported tag id"); 418 /* 419 * This case means meter is using the REG_C_x great than 2. 420 * Take care not to conflict with meter color REG_C_x. 421 * If the available index REG_C_y >= REG_C_x, skip the 422 * color register. 423 */ 424 if (start_reg == REG_C_3 && config->flow_mreg_c 425 [id + REG_C_3 - REG_C_0] >= priv->mtr_color_reg) { 426 if (config->flow_mreg_c[id + 1 + REG_C_3 - REG_C_0] != 427 REG_NONE) 428 return config->flow_mreg_c 429 [id + 1 + REG_C_3 - REG_C_0]; 430 return rte_flow_error_set(error, ENOTSUP, 431 RTE_FLOW_ERROR_TYPE_ITEM, 432 NULL, "unsupported tag id"); 433 } 434 return config->flow_mreg_c[id + start_reg - REG_C_0]; 435 } 436 assert(false); 437 return rte_flow_error_set(error, EINVAL, 438 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 439 NULL, "invalid feature name"); 440 } 441 442 /** 443 * Check extensive flow metadata register support. 444 * 445 * @param dev 446 * Pointer to rte_eth_dev structure. 447 * 448 * @return 449 * True if device supports extensive flow metadata register, otherwise false. 450 */ 451 bool 452 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) 453 { 454 struct mlx5_priv *priv = dev->data->dev_private; 455 struct mlx5_dev_config *config = &priv->config; 456 457 /* 458 * Having available reg_c can be regarded inclusively as supporting 459 * extensive flow metadata register, which could mean, 460 * - metadata register copy action by modify header. 461 * - 16 modify header actions is supported. 462 * - reg_c's are preserved across different domain (FDB and NIC) on 463 * packet loopback by flow lookup miss. 464 */ 465 return config->flow_mreg_c[2] != REG_NONE; 466 } 467 468 /** 469 * Discover the maximum number of priority available. 470 * 471 * @param[in] dev 472 * Pointer to the Ethernet device structure. 473 * 474 * @return 475 * number of supported flow priority on success, a negative errno 476 * value otherwise and rte_errno is set. 477 */ 478 int 479 mlx5_flow_discover_priorities(struct rte_eth_dev *dev) 480 { 481 struct mlx5_priv *priv = dev->data->dev_private; 482 struct { 483 struct ibv_flow_attr attr; 484 struct ibv_flow_spec_eth eth; 485 struct ibv_flow_spec_action_drop drop; 486 } flow_attr = { 487 .attr = { 488 .num_of_specs = 2, 489 .port = (uint8_t)priv->ibv_port, 490 }, 491 .eth = { 492 .type = IBV_FLOW_SPEC_ETH, 493 .size = sizeof(struct ibv_flow_spec_eth), 494 }, 495 .drop = { 496 .size = sizeof(struct ibv_flow_spec_action_drop), 497 .type = IBV_FLOW_SPEC_ACTION_DROP, 498 }, 499 }; 500 struct ibv_flow *flow; 501 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev); 502 uint16_t vprio[] = { 8, 16 }; 503 int i; 504 int priority = 0; 505 506 if (!drop) { 507 rte_errno = ENOTSUP; 508 return -rte_errno; 509 } 510 for (i = 0; i != RTE_DIM(vprio); i++) { 511 flow_attr.attr.priority = vprio[i] - 1; 512 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); 513 if (!flow) 514 break; 515 claim_zero(mlx5_glue->destroy_flow(flow)); 516 priority = vprio[i]; 517 } 518 mlx5_hrxq_drop_release(dev); 519 switch (priority) { 520 case 8: 521 priority = RTE_DIM(priority_map_3); 522 break; 523 case 16: 524 priority = RTE_DIM(priority_map_5); 525 break; 526 default: 527 rte_errno = ENOTSUP; 528 DRV_LOG(ERR, 529 "port %u verbs maximum priority: %d expected 8/16", 530 dev->data->port_id, priority); 531 return -rte_errno; 532 } 533 DRV_LOG(INFO, "port %u flow maximum priority: %d", 534 dev->data->port_id, priority); 535 return priority; 536 } 537 538 /** 539 * Adjust flow priority based on the highest layer and the request priority. 540 * 541 * @param[in] dev 542 * Pointer to the Ethernet device structure. 543 * @param[in] priority 544 * The rule base priority. 545 * @param[in] subpriority 546 * The priority based on the items. 547 * 548 * @return 549 * The new priority. 550 */ 551 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 552 uint32_t subpriority) 553 { 554 uint32_t res = 0; 555 struct mlx5_priv *priv = dev->data->dev_private; 556 557 switch (priv->config.flow_prio) { 558 case RTE_DIM(priority_map_3): 559 res = priority_map_3[priority][subpriority]; 560 break; 561 case RTE_DIM(priority_map_5): 562 res = priority_map_5[priority][subpriority]; 563 break; 564 } 565 return res; 566 } 567 568 /** 569 * Verify the @p item specifications (spec, last, mask) are compatible with the 570 * NIC capabilities. 571 * 572 * @param[in] item 573 * Item specification. 574 * @param[in] mask 575 * @p item->mask or flow default bit-masks. 576 * @param[in] nic_mask 577 * Bit-masks covering supported fields by the NIC to compare with user mask. 578 * @param[in] size 579 * Bit-masks size in bytes. 580 * @param[out] error 581 * Pointer to error structure. 582 * 583 * @return 584 * 0 on success, a negative errno value otherwise and rte_errno is set. 585 */ 586 int 587 mlx5_flow_item_acceptable(const struct rte_flow_item *item, 588 const uint8_t *mask, 589 const uint8_t *nic_mask, 590 unsigned int size, 591 struct rte_flow_error *error) 592 { 593 unsigned int i; 594 595 assert(nic_mask); 596 for (i = 0; i < size; ++i) 597 if ((nic_mask[i] | mask[i]) != nic_mask[i]) 598 return rte_flow_error_set(error, ENOTSUP, 599 RTE_FLOW_ERROR_TYPE_ITEM, 600 item, 601 "mask enables non supported" 602 " bits"); 603 if (!item->spec && (item->mask || item->last)) 604 return rte_flow_error_set(error, EINVAL, 605 RTE_FLOW_ERROR_TYPE_ITEM, item, 606 "mask/last without a spec is not" 607 " supported"); 608 if (item->spec && item->last) { 609 uint8_t spec[size]; 610 uint8_t last[size]; 611 unsigned int i; 612 int ret; 613 614 for (i = 0; i < size; ++i) { 615 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; 616 last[i] = ((const uint8_t *)item->last)[i] & mask[i]; 617 } 618 ret = memcmp(spec, last, size); 619 if (ret != 0) 620 return rte_flow_error_set(error, EINVAL, 621 RTE_FLOW_ERROR_TYPE_ITEM, 622 item, 623 "range is not valid"); 624 } 625 return 0; 626 } 627 628 /** 629 * Adjust the hash fields according to the @p flow information. 630 * 631 * @param[in] dev_flow. 632 * Pointer to the mlx5_flow. 633 * @param[in] tunnel 634 * 1 when the hash field is for a tunnel item. 635 * @param[in] layer_types 636 * ETH_RSS_* types. 637 * @param[in] hash_fields 638 * Item hash fields. 639 * 640 * @return 641 * The hash fields that should be used. 642 */ 643 uint64_t 644 mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, 645 int tunnel __rte_unused, uint64_t layer_types, 646 uint64_t hash_fields) 647 { 648 struct rte_flow *flow = dev_flow->flow; 649 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 650 int rss_request_inner = flow->rss.level >= 2; 651 652 /* Check RSS hash level for tunnel. */ 653 if (tunnel && rss_request_inner) 654 hash_fields |= IBV_RX_HASH_INNER; 655 else if (tunnel || rss_request_inner) 656 return 0; 657 #endif 658 /* Check if requested layer matches RSS hash fields. */ 659 if (!(flow->rss.types & layer_types)) 660 return 0; 661 return hash_fields; 662 } 663 664 /** 665 * Lookup and set the ptype in the data Rx part. A single Ptype can be used, 666 * if several tunnel rules are used on this queue, the tunnel ptype will be 667 * cleared. 668 * 669 * @param rxq_ctrl 670 * Rx queue to update. 671 */ 672 static void 673 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) 674 { 675 unsigned int i; 676 uint32_t tunnel_ptype = 0; 677 678 /* Look up for the ptype to use. */ 679 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { 680 if (!rxq_ctrl->flow_tunnels_n[i]) 681 continue; 682 if (!tunnel_ptype) { 683 tunnel_ptype = tunnels_info[i].ptype; 684 } else { 685 tunnel_ptype = 0; 686 break; 687 } 688 } 689 rxq_ctrl->rxq.tunnel = tunnel_ptype; 690 } 691 692 /** 693 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive 694 * flow. 695 * 696 * @param[in] dev 697 * Pointer to the Ethernet device structure. 698 * @param[in] dev_flow 699 * Pointer to device flow structure. 700 */ 701 static void 702 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 703 { 704 struct mlx5_priv *priv = dev->data->dev_private; 705 struct rte_flow *flow = dev_flow->flow; 706 const int mark = !!(dev_flow->actions & 707 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 708 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); 709 unsigned int i; 710 711 for (i = 0; i != flow->rss.queue_num; ++i) { 712 int idx = (*flow->rss.queue)[i]; 713 struct mlx5_rxq_ctrl *rxq_ctrl = 714 container_of((*priv->rxqs)[idx], 715 struct mlx5_rxq_ctrl, rxq); 716 717 /* 718 * To support metadata register copy on Tx loopback, 719 * this must be always enabled (metadata may arive 720 * from other port - not from local flows only. 721 */ 722 if (priv->config.dv_flow_en && 723 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 724 mlx5_flow_ext_mreg_supported(dev)) { 725 rxq_ctrl->rxq.mark = 1; 726 rxq_ctrl->flow_mark_n = 1; 727 } else if (mark) { 728 rxq_ctrl->rxq.mark = 1; 729 rxq_ctrl->flow_mark_n++; 730 } 731 if (tunnel) { 732 unsigned int j; 733 734 /* Increase the counter matching the flow. */ 735 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 736 if ((tunnels_info[j].tunnel & 737 dev_flow->layers) == 738 tunnels_info[j].tunnel) { 739 rxq_ctrl->flow_tunnels_n[j]++; 740 break; 741 } 742 } 743 flow_rxq_tunnel_ptype_update(rxq_ctrl); 744 } 745 } 746 } 747 748 /** 749 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow 750 * 751 * @param[in] dev 752 * Pointer to the Ethernet device structure. 753 * @param[in] flow 754 * Pointer to flow structure. 755 */ 756 static void 757 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) 758 { 759 struct mlx5_flow *dev_flow; 760 761 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 762 flow_drv_rxq_flags_set(dev, dev_flow); 763 } 764 765 /** 766 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 767 * device flow if no other flow uses it with the same kind of request. 768 * 769 * @param dev 770 * Pointer to Ethernet device. 771 * @param[in] dev_flow 772 * Pointer to the device flow. 773 */ 774 static void 775 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 776 { 777 struct mlx5_priv *priv = dev->data->dev_private; 778 struct rte_flow *flow = dev_flow->flow; 779 const int mark = !!(dev_flow->actions & 780 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 781 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); 782 unsigned int i; 783 784 assert(dev->data->dev_started); 785 for (i = 0; i != flow->rss.queue_num; ++i) { 786 int idx = (*flow->rss.queue)[i]; 787 struct mlx5_rxq_ctrl *rxq_ctrl = 788 container_of((*priv->rxqs)[idx], 789 struct mlx5_rxq_ctrl, rxq); 790 791 if (priv->config.dv_flow_en && 792 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 793 mlx5_flow_ext_mreg_supported(dev)) { 794 rxq_ctrl->rxq.mark = 1; 795 rxq_ctrl->flow_mark_n = 1; 796 } else if (mark) { 797 rxq_ctrl->flow_mark_n--; 798 rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; 799 } 800 if (tunnel) { 801 unsigned int j; 802 803 /* Decrease the counter matching the flow. */ 804 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 805 if ((tunnels_info[j].tunnel & 806 dev_flow->layers) == 807 tunnels_info[j].tunnel) { 808 rxq_ctrl->flow_tunnels_n[j]--; 809 break; 810 } 811 } 812 flow_rxq_tunnel_ptype_update(rxq_ctrl); 813 } 814 } 815 } 816 817 /** 818 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 819 * @p flow if no other flow uses it with the same kind of request. 820 * 821 * @param dev 822 * Pointer to Ethernet device. 823 * @param[in] flow 824 * Pointer to the flow. 825 */ 826 static void 827 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) 828 { 829 struct mlx5_flow *dev_flow; 830 831 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 832 flow_drv_rxq_flags_trim(dev, dev_flow); 833 } 834 835 /** 836 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. 837 * 838 * @param dev 839 * Pointer to Ethernet device. 840 */ 841 static void 842 flow_rxq_flags_clear(struct rte_eth_dev *dev) 843 { 844 struct mlx5_priv *priv = dev->data->dev_private; 845 unsigned int i; 846 847 for (i = 0; i != priv->rxqs_n; ++i) { 848 struct mlx5_rxq_ctrl *rxq_ctrl; 849 unsigned int j; 850 851 if (!(*priv->rxqs)[i]) 852 continue; 853 rxq_ctrl = container_of((*priv->rxqs)[i], 854 struct mlx5_rxq_ctrl, rxq); 855 rxq_ctrl->flow_mark_n = 0; 856 rxq_ctrl->rxq.mark = 0; 857 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) 858 rxq_ctrl->flow_tunnels_n[j] = 0; 859 rxq_ctrl->rxq.tunnel = 0; 860 } 861 } 862 863 /* 864 * return a pointer to the desired action in the list of actions. 865 * 866 * @param[in] actions 867 * The list of actions to search the action in. 868 * @param[in] action 869 * The action to find. 870 * 871 * @return 872 * Pointer to the action in the list, if found. NULL otherwise. 873 */ 874 const struct rte_flow_action * 875 mlx5_flow_find_action(const struct rte_flow_action *actions, 876 enum rte_flow_action_type action) 877 { 878 if (actions == NULL) 879 return NULL; 880 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) 881 if (actions->type == action) 882 return actions; 883 return NULL; 884 } 885 886 /* 887 * Validate the flag action. 888 * 889 * @param[in] action_flags 890 * Bit-fields that holds the actions detected until now. 891 * @param[in] attr 892 * Attributes of flow that includes this action. 893 * @param[out] error 894 * Pointer to error structure. 895 * 896 * @return 897 * 0 on success, a negative errno value otherwise and rte_errno is set. 898 */ 899 int 900 mlx5_flow_validate_action_flag(uint64_t action_flags, 901 const struct rte_flow_attr *attr, 902 struct rte_flow_error *error) 903 { 904 905 if (action_flags & MLX5_FLOW_ACTION_DROP) 906 return rte_flow_error_set(error, EINVAL, 907 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 908 "can't drop and flag in same flow"); 909 if (action_flags & MLX5_FLOW_ACTION_MARK) 910 return rte_flow_error_set(error, EINVAL, 911 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 912 "can't mark and flag in same flow"); 913 if (action_flags & MLX5_FLOW_ACTION_FLAG) 914 return rte_flow_error_set(error, EINVAL, 915 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 916 "can't have 2 flag" 917 " actions in same flow"); 918 if (attr->egress) 919 return rte_flow_error_set(error, ENOTSUP, 920 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 921 "flag action not supported for " 922 "egress"); 923 return 0; 924 } 925 926 /* 927 * Validate the mark action. 928 * 929 * @param[in] action 930 * Pointer to the queue action. 931 * @param[in] action_flags 932 * Bit-fields that holds the actions detected until now. 933 * @param[in] attr 934 * Attributes of flow that includes this action. 935 * @param[out] error 936 * Pointer to error structure. 937 * 938 * @return 939 * 0 on success, a negative errno value otherwise and rte_errno is set. 940 */ 941 int 942 mlx5_flow_validate_action_mark(const struct rte_flow_action *action, 943 uint64_t action_flags, 944 const struct rte_flow_attr *attr, 945 struct rte_flow_error *error) 946 { 947 const struct rte_flow_action_mark *mark = action->conf; 948 949 if (!mark) 950 return rte_flow_error_set(error, EINVAL, 951 RTE_FLOW_ERROR_TYPE_ACTION, 952 action, 953 "configuration cannot be null"); 954 if (mark->id >= MLX5_FLOW_MARK_MAX) 955 return rte_flow_error_set(error, EINVAL, 956 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 957 &mark->id, 958 "mark id must in 0 <= id < " 959 RTE_STR(MLX5_FLOW_MARK_MAX)); 960 if (action_flags & MLX5_FLOW_ACTION_DROP) 961 return rte_flow_error_set(error, EINVAL, 962 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 963 "can't drop and mark in same flow"); 964 if (action_flags & MLX5_FLOW_ACTION_FLAG) 965 return rte_flow_error_set(error, EINVAL, 966 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 967 "can't flag and mark in same flow"); 968 if (action_flags & MLX5_FLOW_ACTION_MARK) 969 return rte_flow_error_set(error, EINVAL, 970 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 971 "can't have 2 mark actions in same" 972 " flow"); 973 if (attr->egress) 974 return rte_flow_error_set(error, ENOTSUP, 975 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 976 "mark action not supported for " 977 "egress"); 978 return 0; 979 } 980 981 /* 982 * Validate the drop action. 983 * 984 * @param[in] action_flags 985 * Bit-fields that holds the actions detected until now. 986 * @param[in] attr 987 * Attributes of flow that includes this action. 988 * @param[out] error 989 * Pointer to error structure. 990 * 991 * @return 992 * 0 on success, a negative errno value otherwise and rte_errno is set. 993 */ 994 int 995 mlx5_flow_validate_action_drop(uint64_t action_flags, 996 const struct rte_flow_attr *attr, 997 struct rte_flow_error *error) 998 { 999 if (action_flags & MLX5_FLOW_ACTION_FLAG) 1000 return rte_flow_error_set(error, EINVAL, 1001 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1002 "can't drop and flag in same flow"); 1003 if (action_flags & MLX5_FLOW_ACTION_MARK) 1004 return rte_flow_error_set(error, EINVAL, 1005 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1006 "can't drop and mark in same flow"); 1007 if (action_flags & (MLX5_FLOW_FATE_ACTIONS | 1008 MLX5_FLOW_FATE_ESWITCH_ACTIONS)) 1009 return rte_flow_error_set(error, EINVAL, 1010 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1011 "can't have 2 fate actions in" 1012 " same flow"); 1013 if (attr->egress) 1014 return rte_flow_error_set(error, ENOTSUP, 1015 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1016 "drop action not supported for " 1017 "egress"); 1018 return 0; 1019 } 1020 1021 /* 1022 * Validate the queue action. 1023 * 1024 * @param[in] action 1025 * Pointer to the queue action. 1026 * @param[in] action_flags 1027 * Bit-fields that holds the actions detected until now. 1028 * @param[in] dev 1029 * Pointer to the Ethernet device structure. 1030 * @param[in] attr 1031 * Attributes of flow that includes this action. 1032 * @param[out] error 1033 * Pointer to error structure. 1034 * 1035 * @return 1036 * 0 on success, a negative errno value otherwise and rte_errno is set. 1037 */ 1038 int 1039 mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 1040 uint64_t action_flags, 1041 struct rte_eth_dev *dev, 1042 const struct rte_flow_attr *attr, 1043 struct rte_flow_error *error) 1044 { 1045 struct mlx5_priv *priv = dev->data->dev_private; 1046 const struct rte_flow_action_queue *queue = action->conf; 1047 1048 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 1049 return rte_flow_error_set(error, EINVAL, 1050 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1051 "can't have 2 fate actions in" 1052 " same flow"); 1053 if (!priv->rxqs_n) 1054 return rte_flow_error_set(error, EINVAL, 1055 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1056 NULL, "No Rx queues configured"); 1057 if (queue->index >= priv->rxqs_n) 1058 return rte_flow_error_set(error, EINVAL, 1059 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1060 &queue->index, 1061 "queue index out of range"); 1062 if (!(*priv->rxqs)[queue->index]) 1063 return rte_flow_error_set(error, EINVAL, 1064 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1065 &queue->index, 1066 "queue is not configured"); 1067 if (attr->egress) 1068 return rte_flow_error_set(error, ENOTSUP, 1069 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1070 "queue action not supported for " 1071 "egress"); 1072 return 0; 1073 } 1074 1075 /* 1076 * Validate the rss action. 1077 * 1078 * @param[in] action 1079 * Pointer to the queue action. 1080 * @param[in] action_flags 1081 * Bit-fields that holds the actions detected until now. 1082 * @param[in] dev 1083 * Pointer to the Ethernet device structure. 1084 * @param[in] attr 1085 * Attributes of flow that includes this action. 1086 * @param[in] item_flags 1087 * Items that were detected. 1088 * @param[out] error 1089 * Pointer to error structure. 1090 * 1091 * @return 1092 * 0 on success, a negative errno value otherwise and rte_errno is set. 1093 */ 1094 int 1095 mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 1096 uint64_t action_flags, 1097 struct rte_eth_dev *dev, 1098 const struct rte_flow_attr *attr, 1099 uint64_t item_flags, 1100 struct rte_flow_error *error) 1101 { 1102 struct mlx5_priv *priv = dev->data->dev_private; 1103 const struct rte_flow_action_rss *rss = action->conf; 1104 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1105 unsigned int i; 1106 1107 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 1108 return rte_flow_error_set(error, EINVAL, 1109 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1110 "can't have 2 fate actions" 1111 " in same flow"); 1112 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && 1113 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) 1114 return rte_flow_error_set(error, ENOTSUP, 1115 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1116 &rss->func, 1117 "RSS hash function not supported"); 1118 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 1119 if (rss->level > 2) 1120 #else 1121 if (rss->level > 1) 1122 #endif 1123 return rte_flow_error_set(error, ENOTSUP, 1124 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1125 &rss->level, 1126 "tunnel RSS is not supported"); 1127 /* allow RSS key_len 0 in case of NULL (default) RSS key. */ 1128 if (rss->key_len == 0 && rss->key != NULL) 1129 return rte_flow_error_set(error, ENOTSUP, 1130 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1131 &rss->key_len, 1132 "RSS hash key length 0"); 1133 if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) 1134 return rte_flow_error_set(error, ENOTSUP, 1135 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1136 &rss->key_len, 1137 "RSS hash key too small"); 1138 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) 1139 return rte_flow_error_set(error, ENOTSUP, 1140 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1141 &rss->key_len, 1142 "RSS hash key too large"); 1143 if (rss->queue_num > priv->config.ind_table_max_size) 1144 return rte_flow_error_set(error, ENOTSUP, 1145 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1146 &rss->queue_num, 1147 "number of queues too large"); 1148 if (rss->types & MLX5_RSS_HF_MASK) 1149 return rte_flow_error_set(error, ENOTSUP, 1150 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1151 &rss->types, 1152 "some RSS protocols are not" 1153 " supported"); 1154 if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) && 1155 !(rss->types & ETH_RSS_IP)) 1156 return rte_flow_error_set(error, EINVAL, 1157 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1158 "L3 partial RSS requested but L3 RSS" 1159 " type not specified"); 1160 if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) && 1161 !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP))) 1162 return rte_flow_error_set(error, EINVAL, 1163 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1164 "L4 partial RSS requested but L4 RSS" 1165 " type not specified"); 1166 if (!priv->rxqs_n) 1167 return rte_flow_error_set(error, EINVAL, 1168 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1169 NULL, "No Rx queues configured"); 1170 if (!rss->queue_num) 1171 return rte_flow_error_set(error, EINVAL, 1172 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1173 NULL, "No queues configured"); 1174 for (i = 0; i != rss->queue_num; ++i) { 1175 if (rss->queue[i] >= priv->rxqs_n) 1176 return rte_flow_error_set 1177 (error, EINVAL, 1178 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1179 &rss->queue[i], "queue index out of range"); 1180 if (!(*priv->rxqs)[rss->queue[i]]) 1181 return rte_flow_error_set 1182 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1183 &rss->queue[i], "queue is not configured"); 1184 } 1185 if (attr->egress) 1186 return rte_flow_error_set(error, ENOTSUP, 1187 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1188 "rss action not supported for " 1189 "egress"); 1190 if (rss->level > 1 && !tunnel) 1191 return rte_flow_error_set(error, EINVAL, 1192 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1193 "inner RSS is not supported for " 1194 "non-tunnel flows"); 1195 return 0; 1196 } 1197 1198 /* 1199 * Validate the count action. 1200 * 1201 * @param[in] dev 1202 * Pointer to the Ethernet device structure. 1203 * @param[in] attr 1204 * Attributes of flow that includes this action. 1205 * @param[out] error 1206 * Pointer to error structure. 1207 * 1208 * @return 1209 * 0 on success, a negative errno value otherwise and rte_errno is set. 1210 */ 1211 int 1212 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, 1213 const struct rte_flow_attr *attr, 1214 struct rte_flow_error *error) 1215 { 1216 if (attr->egress) 1217 return rte_flow_error_set(error, ENOTSUP, 1218 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1219 "count action not supported for " 1220 "egress"); 1221 return 0; 1222 } 1223 1224 /** 1225 * Verify the @p attributes will be correctly understood by the NIC and store 1226 * them in the @p flow if everything is correct. 1227 * 1228 * @param[in] dev 1229 * Pointer to the Ethernet device structure. 1230 * @param[in] attributes 1231 * Pointer to flow attributes 1232 * @param[out] error 1233 * Pointer to error structure. 1234 * 1235 * @return 1236 * 0 on success, a negative errno value otherwise and rte_errno is set. 1237 */ 1238 int 1239 mlx5_flow_validate_attributes(struct rte_eth_dev *dev, 1240 const struct rte_flow_attr *attributes, 1241 struct rte_flow_error *error) 1242 { 1243 struct mlx5_priv *priv = dev->data->dev_private; 1244 uint32_t priority_max = priv->config.flow_prio - 1; 1245 1246 if (attributes->group) 1247 return rte_flow_error_set(error, ENOTSUP, 1248 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 1249 NULL, "groups is not supported"); 1250 if (attributes->priority != MLX5_FLOW_PRIO_RSVD && 1251 attributes->priority >= priority_max) 1252 return rte_flow_error_set(error, ENOTSUP, 1253 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1254 NULL, "priority out of range"); 1255 if (attributes->egress) 1256 return rte_flow_error_set(error, ENOTSUP, 1257 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1258 "egress is not supported"); 1259 if (attributes->transfer && !priv->config.dv_esw_en) 1260 return rte_flow_error_set(error, ENOTSUP, 1261 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 1262 NULL, "transfer is not supported"); 1263 if (!attributes->ingress) 1264 return rte_flow_error_set(error, EINVAL, 1265 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 1266 NULL, 1267 "ingress attribute is mandatory"); 1268 return 0; 1269 } 1270 1271 /** 1272 * Validate ICMP6 item. 1273 * 1274 * @param[in] item 1275 * Item specification. 1276 * @param[in] item_flags 1277 * Bit-fields that holds the items detected until now. 1278 * @param[out] error 1279 * Pointer to error structure. 1280 * 1281 * @return 1282 * 0 on success, a negative errno value otherwise and rte_errno is set. 1283 */ 1284 int 1285 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, 1286 uint64_t item_flags, 1287 uint8_t target_protocol, 1288 struct rte_flow_error *error) 1289 { 1290 const struct rte_flow_item_icmp6 *mask = item->mask; 1291 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1292 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 1293 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 1294 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1295 MLX5_FLOW_LAYER_OUTER_L4; 1296 int ret; 1297 1298 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) 1299 return rte_flow_error_set(error, EINVAL, 1300 RTE_FLOW_ERROR_TYPE_ITEM, item, 1301 "protocol filtering not compatible" 1302 " with ICMP6 layer"); 1303 if (!(item_flags & l3m)) 1304 return rte_flow_error_set(error, EINVAL, 1305 RTE_FLOW_ERROR_TYPE_ITEM, item, 1306 "IPv6 is mandatory to filter on" 1307 " ICMP6"); 1308 if (item_flags & l4m) 1309 return rte_flow_error_set(error, EINVAL, 1310 RTE_FLOW_ERROR_TYPE_ITEM, item, 1311 "multiple L4 layers not supported"); 1312 if (!mask) 1313 mask = &rte_flow_item_icmp6_mask; 1314 ret = mlx5_flow_item_acceptable 1315 (item, (const uint8_t *)mask, 1316 (const uint8_t *)&rte_flow_item_icmp6_mask, 1317 sizeof(struct rte_flow_item_icmp6), error); 1318 if (ret < 0) 1319 return ret; 1320 return 0; 1321 } 1322 1323 /** 1324 * Validate ICMP item. 1325 * 1326 * @param[in] item 1327 * Item specification. 1328 * @param[in] item_flags 1329 * Bit-fields that holds the items detected until now. 1330 * @param[out] error 1331 * Pointer to error structure. 1332 * 1333 * @return 1334 * 0 on success, a negative errno value otherwise and rte_errno is set. 1335 */ 1336 int 1337 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, 1338 uint64_t item_flags, 1339 uint8_t target_protocol, 1340 struct rte_flow_error *error) 1341 { 1342 const struct rte_flow_item_icmp *mask = item->mask; 1343 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1344 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 1345 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 1346 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1347 MLX5_FLOW_LAYER_OUTER_L4; 1348 int ret; 1349 1350 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) 1351 return rte_flow_error_set(error, EINVAL, 1352 RTE_FLOW_ERROR_TYPE_ITEM, item, 1353 "protocol filtering not compatible" 1354 " with ICMP layer"); 1355 if (!(item_flags & l3m)) 1356 return rte_flow_error_set(error, EINVAL, 1357 RTE_FLOW_ERROR_TYPE_ITEM, item, 1358 "IPv4 is mandatory to filter" 1359 " on ICMP"); 1360 if (item_flags & l4m) 1361 return rte_flow_error_set(error, EINVAL, 1362 RTE_FLOW_ERROR_TYPE_ITEM, item, 1363 "multiple L4 layers not supported"); 1364 if (!mask) 1365 mask = &rte_flow_item_icmp_mask; 1366 ret = mlx5_flow_item_acceptable 1367 (item, (const uint8_t *)mask, 1368 (const uint8_t *)&rte_flow_item_icmp_mask, 1369 sizeof(struct rte_flow_item_icmp), error); 1370 if (ret < 0) 1371 return ret; 1372 return 0; 1373 } 1374 1375 /** 1376 * Validate Ethernet item. 1377 * 1378 * @param[in] item 1379 * Item specification. 1380 * @param[in] item_flags 1381 * Bit-fields that holds the items detected until now. 1382 * @param[out] error 1383 * Pointer to error structure. 1384 * 1385 * @return 1386 * 0 on success, a negative errno value otherwise and rte_errno is set. 1387 */ 1388 int 1389 mlx5_flow_validate_item_eth(const struct rte_flow_item *item, 1390 uint64_t item_flags, 1391 struct rte_flow_error *error) 1392 { 1393 const struct rte_flow_item_eth *mask = item->mask; 1394 const struct rte_flow_item_eth nic_mask = { 1395 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1396 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1397 .type = RTE_BE16(0xffff), 1398 }; 1399 int ret; 1400 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1401 const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 1402 MLX5_FLOW_LAYER_OUTER_L2; 1403 1404 if (item_flags & ethm) 1405 return rte_flow_error_set(error, ENOTSUP, 1406 RTE_FLOW_ERROR_TYPE_ITEM, item, 1407 "multiple L2 layers not supported"); 1408 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) || 1409 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))) 1410 return rte_flow_error_set(error, EINVAL, 1411 RTE_FLOW_ERROR_TYPE_ITEM, item, 1412 "L2 layer should not follow " 1413 "L3 layers"); 1414 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) || 1415 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN))) 1416 return rte_flow_error_set(error, EINVAL, 1417 RTE_FLOW_ERROR_TYPE_ITEM, item, 1418 "L2 layer should not follow VLAN"); 1419 if (!mask) 1420 mask = &rte_flow_item_eth_mask; 1421 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1422 (const uint8_t *)&nic_mask, 1423 sizeof(struct rte_flow_item_eth), 1424 error); 1425 return ret; 1426 } 1427 1428 /** 1429 * Validate VLAN item. 1430 * 1431 * @param[in] item 1432 * Item specification. 1433 * @param[in] item_flags 1434 * Bit-fields that holds the items detected until now. 1435 * @param[in] dev 1436 * Ethernet device flow is being created on. 1437 * @param[out] error 1438 * Pointer to error structure. 1439 * 1440 * @return 1441 * 0 on success, a negative errno value otherwise and rte_errno is set. 1442 */ 1443 int 1444 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 1445 uint64_t item_flags, 1446 struct rte_eth_dev *dev, 1447 struct rte_flow_error *error) 1448 { 1449 const struct rte_flow_item_vlan *spec = item->spec; 1450 const struct rte_flow_item_vlan *mask = item->mask; 1451 const struct rte_flow_item_vlan nic_mask = { 1452 .tci = RTE_BE16(UINT16_MAX), 1453 .inner_type = RTE_BE16(UINT16_MAX), 1454 }; 1455 uint16_t vlan_tag = 0; 1456 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1457 int ret; 1458 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 1459 MLX5_FLOW_LAYER_INNER_L4) : 1460 (MLX5_FLOW_LAYER_OUTER_L3 | 1461 MLX5_FLOW_LAYER_OUTER_L4); 1462 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 1463 MLX5_FLOW_LAYER_OUTER_VLAN; 1464 1465 if (item_flags & vlanm) 1466 return rte_flow_error_set(error, EINVAL, 1467 RTE_FLOW_ERROR_TYPE_ITEM, item, 1468 "multiple VLAN layers not supported"); 1469 else if ((item_flags & l34m) != 0) 1470 return rte_flow_error_set(error, EINVAL, 1471 RTE_FLOW_ERROR_TYPE_ITEM, item, 1472 "VLAN cannot follow L3/L4 layer"); 1473 if (!mask) 1474 mask = &rte_flow_item_vlan_mask; 1475 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1476 (const uint8_t *)&nic_mask, 1477 sizeof(struct rte_flow_item_vlan), 1478 error); 1479 if (ret) 1480 return ret; 1481 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { 1482 struct mlx5_priv *priv = dev->data->dev_private; 1483 1484 if (priv->vmwa_context) { 1485 /* 1486 * Non-NULL context means we have a virtual machine 1487 * and SR-IOV enabled, we have to create VLAN interface 1488 * to make hypervisor to setup E-Switch vport 1489 * context correctly. We avoid creating the multiple 1490 * VLAN interfaces, so we cannot support VLAN tag mask. 1491 */ 1492 return rte_flow_error_set(error, EINVAL, 1493 RTE_FLOW_ERROR_TYPE_ITEM, 1494 item, 1495 "VLAN tag mask is not" 1496 " supported in virtual" 1497 " environment"); 1498 } 1499 } 1500 if (spec) { 1501 vlan_tag = spec->tci; 1502 vlan_tag &= mask->tci; 1503 } 1504 /* 1505 * From verbs perspective an empty VLAN is equivalent 1506 * to a packet without VLAN layer. 1507 */ 1508 if (!vlan_tag) 1509 return rte_flow_error_set(error, EINVAL, 1510 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1511 item->spec, 1512 "VLAN cannot be empty"); 1513 return 0; 1514 } 1515 1516 /** 1517 * Validate IPV4 item. 1518 * 1519 * @param[in] item 1520 * Item specification. 1521 * @param[in] item_flags 1522 * Bit-fields that holds the items detected until now. 1523 * @param[in] acc_mask 1524 * Acceptable mask, if NULL default internal default mask 1525 * will be used to check whether item fields are supported. 1526 * @param[out] error 1527 * Pointer to error structure. 1528 * 1529 * @return 1530 * 0 on success, a negative errno value otherwise and rte_errno is set. 1531 */ 1532 int 1533 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, 1534 uint64_t item_flags, 1535 uint64_t last_item, 1536 uint16_t ether_type, 1537 const struct rte_flow_item_ipv4 *acc_mask, 1538 struct rte_flow_error *error) 1539 { 1540 const struct rte_flow_item_ipv4 *mask = item->mask; 1541 const struct rte_flow_item_ipv4 *spec = item->spec; 1542 const struct rte_flow_item_ipv4 nic_mask = { 1543 .hdr = { 1544 .src_addr = RTE_BE32(0xffffffff), 1545 .dst_addr = RTE_BE32(0xffffffff), 1546 .type_of_service = 0xff, 1547 .next_proto_id = 0xff, 1548 }, 1549 }; 1550 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1551 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1552 MLX5_FLOW_LAYER_OUTER_L3; 1553 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1554 MLX5_FLOW_LAYER_OUTER_L4; 1555 int ret; 1556 uint8_t next_proto = 0xFF; 1557 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 1558 MLX5_FLOW_LAYER_OUTER_VLAN | 1559 MLX5_FLOW_LAYER_INNER_VLAN); 1560 1561 if ((last_item & l2_vlan) && ether_type && 1562 ether_type != RTE_ETHER_TYPE_IPV4) 1563 return rte_flow_error_set(error, EINVAL, 1564 RTE_FLOW_ERROR_TYPE_ITEM, item, 1565 "IPv4 cannot follow L2/VLAN layer " 1566 "which ether type is not IPv4"); 1567 if (item_flags & MLX5_FLOW_LAYER_IPIP) { 1568 if (mask && spec) 1569 next_proto = mask->hdr.next_proto_id & 1570 spec->hdr.next_proto_id; 1571 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1572 return rte_flow_error_set(error, EINVAL, 1573 RTE_FLOW_ERROR_TYPE_ITEM, 1574 item, 1575 "multiple tunnel " 1576 "not supported"); 1577 } 1578 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) 1579 return rte_flow_error_set(error, EINVAL, 1580 RTE_FLOW_ERROR_TYPE_ITEM, item, 1581 "wrong tunnel type - IPv6 specified " 1582 "but IPv4 item provided"); 1583 if (item_flags & l3m) 1584 return rte_flow_error_set(error, ENOTSUP, 1585 RTE_FLOW_ERROR_TYPE_ITEM, item, 1586 "multiple L3 layers not supported"); 1587 else if (item_flags & l4m) 1588 return rte_flow_error_set(error, EINVAL, 1589 RTE_FLOW_ERROR_TYPE_ITEM, item, 1590 "L3 cannot follow an L4 layer."); 1591 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1592 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1593 return rte_flow_error_set(error, EINVAL, 1594 RTE_FLOW_ERROR_TYPE_ITEM, item, 1595 "L3 cannot follow an NVGRE layer."); 1596 if (!mask) 1597 mask = &rte_flow_item_ipv4_mask; 1598 else if (mask->hdr.next_proto_id != 0 && 1599 mask->hdr.next_proto_id != 0xff) 1600 return rte_flow_error_set(error, EINVAL, 1601 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 1602 "partial mask is not supported" 1603 " for protocol"); 1604 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1605 acc_mask ? (const uint8_t *)acc_mask 1606 : (const uint8_t *)&nic_mask, 1607 sizeof(struct rte_flow_item_ipv4), 1608 error); 1609 if (ret < 0) 1610 return ret; 1611 return 0; 1612 } 1613 1614 /** 1615 * Validate IPV6 item. 1616 * 1617 * @param[in] item 1618 * Item specification. 1619 * @param[in] item_flags 1620 * Bit-fields that holds the items detected until now. 1621 * @param[in] acc_mask 1622 * Acceptable mask, if NULL default internal default mask 1623 * will be used to check whether item fields are supported. 1624 * @param[out] error 1625 * Pointer to error structure. 1626 * 1627 * @return 1628 * 0 on success, a negative errno value otherwise and rte_errno is set. 1629 */ 1630 int 1631 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, 1632 uint64_t item_flags, 1633 uint64_t last_item, 1634 uint16_t ether_type, 1635 const struct rte_flow_item_ipv6 *acc_mask, 1636 struct rte_flow_error *error) 1637 { 1638 const struct rte_flow_item_ipv6 *mask = item->mask; 1639 const struct rte_flow_item_ipv6 *spec = item->spec; 1640 const struct rte_flow_item_ipv6 nic_mask = { 1641 .hdr = { 1642 .src_addr = 1643 "\xff\xff\xff\xff\xff\xff\xff\xff" 1644 "\xff\xff\xff\xff\xff\xff\xff\xff", 1645 .dst_addr = 1646 "\xff\xff\xff\xff\xff\xff\xff\xff" 1647 "\xff\xff\xff\xff\xff\xff\xff\xff", 1648 .vtc_flow = RTE_BE32(0xffffffff), 1649 .proto = 0xff, 1650 .hop_limits = 0xff, 1651 }, 1652 }; 1653 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1654 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1655 MLX5_FLOW_LAYER_OUTER_L3; 1656 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1657 MLX5_FLOW_LAYER_OUTER_L4; 1658 int ret; 1659 uint8_t next_proto = 0xFF; 1660 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 1661 MLX5_FLOW_LAYER_OUTER_VLAN | 1662 MLX5_FLOW_LAYER_INNER_VLAN); 1663 1664 if ((last_item & l2_vlan) && ether_type && 1665 ether_type != RTE_ETHER_TYPE_IPV6) 1666 return rte_flow_error_set(error, EINVAL, 1667 RTE_FLOW_ERROR_TYPE_ITEM, item, 1668 "IPv6 cannot follow L2/VLAN layer " 1669 "which ether type is not IPv6"); 1670 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { 1671 if (mask && spec) 1672 next_proto = mask->hdr.proto & spec->hdr.proto; 1673 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1674 return rte_flow_error_set(error, EINVAL, 1675 RTE_FLOW_ERROR_TYPE_ITEM, 1676 item, 1677 "multiple tunnel " 1678 "not supported"); 1679 } 1680 if (item_flags & MLX5_FLOW_LAYER_IPIP) 1681 return rte_flow_error_set(error, EINVAL, 1682 RTE_FLOW_ERROR_TYPE_ITEM, item, 1683 "wrong tunnel type - IPv4 specified " 1684 "but IPv6 item provided"); 1685 if (item_flags & l3m) 1686 return rte_flow_error_set(error, ENOTSUP, 1687 RTE_FLOW_ERROR_TYPE_ITEM, item, 1688 "multiple L3 layers not supported"); 1689 else if (item_flags & l4m) 1690 return rte_flow_error_set(error, EINVAL, 1691 RTE_FLOW_ERROR_TYPE_ITEM, item, 1692 "L3 cannot follow an L4 layer."); 1693 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1694 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1695 return rte_flow_error_set(error, EINVAL, 1696 RTE_FLOW_ERROR_TYPE_ITEM, item, 1697 "L3 cannot follow an NVGRE layer."); 1698 if (!mask) 1699 mask = &rte_flow_item_ipv6_mask; 1700 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1701 acc_mask ? (const uint8_t *)acc_mask 1702 : (const uint8_t *)&nic_mask, 1703 sizeof(struct rte_flow_item_ipv6), 1704 error); 1705 if (ret < 0) 1706 return ret; 1707 return 0; 1708 } 1709 1710 /** 1711 * Validate UDP item. 1712 * 1713 * @param[in] item 1714 * Item specification. 1715 * @param[in] item_flags 1716 * Bit-fields that holds the items detected until now. 1717 * @param[in] target_protocol 1718 * The next protocol in the previous item. 1719 * @param[in] flow_mask 1720 * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask. 1721 * @param[out] error 1722 * Pointer to error structure. 1723 * 1724 * @return 1725 * 0 on success, a negative errno value otherwise and rte_errno is set. 1726 */ 1727 int 1728 mlx5_flow_validate_item_udp(const struct rte_flow_item *item, 1729 uint64_t item_flags, 1730 uint8_t target_protocol, 1731 struct rte_flow_error *error) 1732 { 1733 const struct rte_flow_item_udp *mask = item->mask; 1734 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1735 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1736 MLX5_FLOW_LAYER_OUTER_L3; 1737 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1738 MLX5_FLOW_LAYER_OUTER_L4; 1739 int ret; 1740 1741 if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) 1742 return rte_flow_error_set(error, EINVAL, 1743 RTE_FLOW_ERROR_TYPE_ITEM, item, 1744 "protocol filtering not compatible" 1745 " with UDP layer"); 1746 if (!(item_flags & l3m)) 1747 return rte_flow_error_set(error, EINVAL, 1748 RTE_FLOW_ERROR_TYPE_ITEM, item, 1749 "L3 is mandatory to filter on L4"); 1750 if (item_flags & l4m) 1751 return rte_flow_error_set(error, EINVAL, 1752 RTE_FLOW_ERROR_TYPE_ITEM, item, 1753 "multiple L4 layers not supported"); 1754 if (!mask) 1755 mask = &rte_flow_item_udp_mask; 1756 ret = mlx5_flow_item_acceptable 1757 (item, (const uint8_t *)mask, 1758 (const uint8_t *)&rte_flow_item_udp_mask, 1759 sizeof(struct rte_flow_item_udp), error); 1760 if (ret < 0) 1761 return ret; 1762 return 0; 1763 } 1764 1765 /** 1766 * Validate TCP item. 1767 * 1768 * @param[in] item 1769 * Item specification. 1770 * @param[in] item_flags 1771 * Bit-fields that holds the items detected until now. 1772 * @param[in] target_protocol 1773 * The next protocol in the previous item. 1774 * @param[out] error 1775 * Pointer to error structure. 1776 * 1777 * @return 1778 * 0 on success, a negative errno value otherwise and rte_errno is set. 1779 */ 1780 int 1781 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, 1782 uint64_t item_flags, 1783 uint8_t target_protocol, 1784 const struct rte_flow_item_tcp *flow_mask, 1785 struct rte_flow_error *error) 1786 { 1787 const struct rte_flow_item_tcp *mask = item->mask; 1788 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1789 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1790 MLX5_FLOW_LAYER_OUTER_L3; 1791 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1792 MLX5_FLOW_LAYER_OUTER_L4; 1793 int ret; 1794 1795 assert(flow_mask); 1796 if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) 1797 return rte_flow_error_set(error, EINVAL, 1798 RTE_FLOW_ERROR_TYPE_ITEM, item, 1799 "protocol filtering not compatible" 1800 " with TCP layer"); 1801 if (!(item_flags & l3m)) 1802 return rte_flow_error_set(error, EINVAL, 1803 RTE_FLOW_ERROR_TYPE_ITEM, item, 1804 "L3 is mandatory to filter on L4"); 1805 if (item_flags & l4m) 1806 return rte_flow_error_set(error, EINVAL, 1807 RTE_FLOW_ERROR_TYPE_ITEM, item, 1808 "multiple L4 layers not supported"); 1809 if (!mask) 1810 mask = &rte_flow_item_tcp_mask; 1811 ret = mlx5_flow_item_acceptable 1812 (item, (const uint8_t *)mask, 1813 (const uint8_t *)flow_mask, 1814 sizeof(struct rte_flow_item_tcp), error); 1815 if (ret < 0) 1816 return ret; 1817 return 0; 1818 } 1819 1820 /** 1821 * Validate VXLAN item. 1822 * 1823 * @param[in] item 1824 * Item specification. 1825 * @param[in] item_flags 1826 * Bit-fields that holds the items detected until now. 1827 * @param[in] target_protocol 1828 * The next protocol in the previous item. 1829 * @param[out] error 1830 * Pointer to error structure. 1831 * 1832 * @return 1833 * 0 on success, a negative errno value otherwise and rte_errno is set. 1834 */ 1835 int 1836 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, 1837 uint64_t item_flags, 1838 struct rte_flow_error *error) 1839 { 1840 const struct rte_flow_item_vxlan *spec = item->spec; 1841 const struct rte_flow_item_vxlan *mask = item->mask; 1842 int ret; 1843 union vni { 1844 uint32_t vlan_id; 1845 uint8_t vni[4]; 1846 } id = { .vlan_id = 0, }; 1847 uint32_t vlan_id = 0; 1848 1849 1850 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1851 return rte_flow_error_set(error, ENOTSUP, 1852 RTE_FLOW_ERROR_TYPE_ITEM, item, 1853 "multiple tunnel layers not" 1854 " supported"); 1855 /* 1856 * Verify only UDPv4 is present as defined in 1857 * https://tools.ietf.org/html/rfc7348 1858 */ 1859 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1860 return rte_flow_error_set(error, EINVAL, 1861 RTE_FLOW_ERROR_TYPE_ITEM, item, 1862 "no outer UDP layer found"); 1863 if (!mask) 1864 mask = &rte_flow_item_vxlan_mask; 1865 ret = mlx5_flow_item_acceptable 1866 (item, (const uint8_t *)mask, 1867 (const uint8_t *)&rte_flow_item_vxlan_mask, 1868 sizeof(struct rte_flow_item_vxlan), 1869 error); 1870 if (ret < 0) 1871 return ret; 1872 if (spec) { 1873 memcpy(&id.vni[1], spec->vni, 3); 1874 vlan_id = id.vlan_id; 1875 memcpy(&id.vni[1], mask->vni, 3); 1876 vlan_id &= id.vlan_id; 1877 } 1878 /* 1879 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if 1880 * only this layer is defined in the Verbs specification it is 1881 * interpreted as wildcard and all packets will match this 1882 * rule, if it follows a full stack layer (ex: eth / ipv4 / 1883 * udp), all packets matching the layers before will also 1884 * match this rule. To avoid such situation, VNI 0 is 1885 * currently refused. 1886 */ 1887 if (!vlan_id) 1888 return rte_flow_error_set(error, ENOTSUP, 1889 RTE_FLOW_ERROR_TYPE_ITEM, item, 1890 "VXLAN vni cannot be 0"); 1891 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1892 return rte_flow_error_set(error, ENOTSUP, 1893 RTE_FLOW_ERROR_TYPE_ITEM, item, 1894 "VXLAN tunnel must be fully defined"); 1895 return 0; 1896 } 1897 1898 /** 1899 * Validate VXLAN_GPE item. 1900 * 1901 * @param[in] item 1902 * Item specification. 1903 * @param[in] item_flags 1904 * Bit-fields that holds the items detected until now. 1905 * @param[in] priv 1906 * Pointer to the private data structure. 1907 * @param[in] target_protocol 1908 * The next protocol in the previous item. 1909 * @param[out] error 1910 * Pointer to error structure. 1911 * 1912 * @return 1913 * 0 on success, a negative errno value otherwise and rte_errno is set. 1914 */ 1915 int 1916 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 1917 uint64_t item_flags, 1918 struct rte_eth_dev *dev, 1919 struct rte_flow_error *error) 1920 { 1921 struct mlx5_priv *priv = dev->data->dev_private; 1922 const struct rte_flow_item_vxlan_gpe *spec = item->spec; 1923 const struct rte_flow_item_vxlan_gpe *mask = item->mask; 1924 int ret; 1925 union vni { 1926 uint32_t vlan_id; 1927 uint8_t vni[4]; 1928 } id = { .vlan_id = 0, }; 1929 uint32_t vlan_id = 0; 1930 1931 if (!priv->config.l3_vxlan_en) 1932 return rte_flow_error_set(error, ENOTSUP, 1933 RTE_FLOW_ERROR_TYPE_ITEM, item, 1934 "L3 VXLAN is not enabled by device" 1935 " parameter and/or not configured in" 1936 " firmware"); 1937 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1938 return rte_flow_error_set(error, ENOTSUP, 1939 RTE_FLOW_ERROR_TYPE_ITEM, item, 1940 "multiple tunnel layers not" 1941 " supported"); 1942 /* 1943 * Verify only UDPv4 is present as defined in 1944 * https://tools.ietf.org/html/rfc7348 1945 */ 1946 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1947 return rte_flow_error_set(error, EINVAL, 1948 RTE_FLOW_ERROR_TYPE_ITEM, item, 1949 "no outer UDP layer found"); 1950 if (!mask) 1951 mask = &rte_flow_item_vxlan_gpe_mask; 1952 ret = mlx5_flow_item_acceptable 1953 (item, (const uint8_t *)mask, 1954 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, 1955 sizeof(struct rte_flow_item_vxlan_gpe), 1956 error); 1957 if (ret < 0) 1958 return ret; 1959 if (spec) { 1960 if (spec->protocol) 1961 return rte_flow_error_set(error, ENOTSUP, 1962 RTE_FLOW_ERROR_TYPE_ITEM, 1963 item, 1964 "VxLAN-GPE protocol" 1965 " not supported"); 1966 memcpy(&id.vni[1], spec->vni, 3); 1967 vlan_id = id.vlan_id; 1968 memcpy(&id.vni[1], mask->vni, 3); 1969 vlan_id &= id.vlan_id; 1970 } 1971 /* 1972 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this 1973 * layer is defined in the Verbs specification it is interpreted as 1974 * wildcard and all packets will match this rule, if it follows a full 1975 * stack layer (ex: eth / ipv4 / udp), all packets matching the layers 1976 * before will also match this rule. To avoid such situation, VNI 0 1977 * is currently refused. 1978 */ 1979 if (!vlan_id) 1980 return rte_flow_error_set(error, ENOTSUP, 1981 RTE_FLOW_ERROR_TYPE_ITEM, item, 1982 "VXLAN-GPE vni cannot be 0"); 1983 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1984 return rte_flow_error_set(error, ENOTSUP, 1985 RTE_FLOW_ERROR_TYPE_ITEM, item, 1986 "VXLAN-GPE tunnel must be fully" 1987 " defined"); 1988 return 0; 1989 } 1990 /** 1991 * Validate GRE Key item. 1992 * 1993 * @param[in] item 1994 * Item specification. 1995 * @param[in] item_flags 1996 * Bit flags to mark detected items. 1997 * @param[in] gre_item 1998 * Pointer to gre_item 1999 * @param[out] error 2000 * Pointer to error structure. 2001 * 2002 * @return 2003 * 0 on success, a negative errno value otherwise and rte_errno is set. 2004 */ 2005 int 2006 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, 2007 uint64_t item_flags, 2008 const struct rte_flow_item *gre_item, 2009 struct rte_flow_error *error) 2010 { 2011 const rte_be32_t *mask = item->mask; 2012 int ret = 0; 2013 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); 2014 const struct rte_flow_item_gre *gre_spec; 2015 const struct rte_flow_item_gre *gre_mask; 2016 2017 if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) 2018 return rte_flow_error_set(error, ENOTSUP, 2019 RTE_FLOW_ERROR_TYPE_ITEM, item, 2020 "Multiple GRE key not support"); 2021 if (!(item_flags & MLX5_FLOW_LAYER_GRE)) 2022 return rte_flow_error_set(error, ENOTSUP, 2023 RTE_FLOW_ERROR_TYPE_ITEM, item, 2024 "No preceding GRE header"); 2025 if (item_flags & MLX5_FLOW_LAYER_INNER) 2026 return rte_flow_error_set(error, ENOTSUP, 2027 RTE_FLOW_ERROR_TYPE_ITEM, item, 2028 "GRE key following a wrong item"); 2029 gre_mask = gre_item->mask; 2030 if (!gre_mask) 2031 gre_mask = &rte_flow_item_gre_mask; 2032 gre_spec = gre_item->spec; 2033 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && 2034 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) 2035 return rte_flow_error_set(error, EINVAL, 2036 RTE_FLOW_ERROR_TYPE_ITEM, item, 2037 "Key bit must be on"); 2038 2039 if (!mask) 2040 mask = &gre_key_default_mask; 2041 ret = mlx5_flow_item_acceptable 2042 (item, (const uint8_t *)mask, 2043 (const uint8_t *)&gre_key_default_mask, 2044 sizeof(rte_be32_t), error); 2045 return ret; 2046 } 2047 2048 /** 2049 * Validate GRE item. 2050 * 2051 * @param[in] item 2052 * Item specification. 2053 * @param[in] item_flags 2054 * Bit flags to mark detected items. 2055 * @param[in] target_protocol 2056 * The next protocol in the previous item. 2057 * @param[out] error 2058 * Pointer to error structure. 2059 * 2060 * @return 2061 * 0 on success, a negative errno value otherwise and rte_errno is set. 2062 */ 2063 int 2064 mlx5_flow_validate_item_gre(const struct rte_flow_item *item, 2065 uint64_t item_flags, 2066 uint8_t target_protocol, 2067 struct rte_flow_error *error) 2068 { 2069 const struct rte_flow_item_gre *spec __rte_unused = item->spec; 2070 const struct rte_flow_item_gre *mask = item->mask; 2071 int ret; 2072 const struct rte_flow_item_gre nic_mask = { 2073 .c_rsvd0_ver = RTE_BE16(0xB000), 2074 .protocol = RTE_BE16(UINT16_MAX), 2075 }; 2076 2077 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 2078 return rte_flow_error_set(error, EINVAL, 2079 RTE_FLOW_ERROR_TYPE_ITEM, item, 2080 "protocol filtering not compatible" 2081 " with this GRE layer"); 2082 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2083 return rte_flow_error_set(error, ENOTSUP, 2084 RTE_FLOW_ERROR_TYPE_ITEM, item, 2085 "multiple tunnel layers not" 2086 " supported"); 2087 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 2088 return rte_flow_error_set(error, ENOTSUP, 2089 RTE_FLOW_ERROR_TYPE_ITEM, item, 2090 "L3 Layer is missing"); 2091 if (!mask) 2092 mask = &rte_flow_item_gre_mask; 2093 ret = mlx5_flow_item_acceptable 2094 (item, (const uint8_t *)mask, 2095 (const uint8_t *)&nic_mask, 2096 sizeof(struct rte_flow_item_gre), error); 2097 if (ret < 0) 2098 return ret; 2099 #ifndef HAVE_MLX5DV_DR 2100 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT 2101 if (spec && (spec->protocol & mask->protocol)) 2102 return rte_flow_error_set(error, ENOTSUP, 2103 RTE_FLOW_ERROR_TYPE_ITEM, item, 2104 "without MPLS support the" 2105 " specification cannot be used for" 2106 " filtering"); 2107 #endif 2108 #endif 2109 return 0; 2110 } 2111 2112 /** 2113 * Validate Geneve item. 2114 * 2115 * @param[in] item 2116 * Item specification. 2117 * @param[in] itemFlags 2118 * Bit-fields that holds the items detected until now. 2119 * @param[in] enPriv 2120 * Pointer to the private data structure. 2121 * @param[out] error 2122 * Pointer to error structure. 2123 * 2124 * @return 2125 * 0 on success, a negative errno value otherwise and rte_errno is set. 2126 */ 2127 2128 int 2129 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, 2130 uint64_t item_flags, 2131 struct rte_eth_dev *dev, 2132 struct rte_flow_error *error) 2133 { 2134 struct mlx5_priv *priv = dev->data->dev_private; 2135 const struct rte_flow_item_geneve *spec = item->spec; 2136 const struct rte_flow_item_geneve *mask = item->mask; 2137 int ret; 2138 uint16_t gbhdr; 2139 uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ? 2140 MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; 2141 const struct rte_flow_item_geneve nic_mask = { 2142 .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), 2143 .vni = "\xff\xff\xff", 2144 .protocol = RTE_BE16(UINT16_MAX), 2145 }; 2146 2147 if (!(priv->config.hca_attr.flex_parser_protocols & 2148 MLX5_HCA_FLEX_GENEVE_ENABLED) || 2149 !priv->config.hca_attr.tunnel_stateless_geneve_rx) 2150 return rte_flow_error_set(error, ENOTSUP, 2151 RTE_FLOW_ERROR_TYPE_ITEM, item, 2152 "L3 Geneve is not enabled by device" 2153 " parameter and/or not configured in" 2154 " firmware"); 2155 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2156 return rte_flow_error_set(error, ENOTSUP, 2157 RTE_FLOW_ERROR_TYPE_ITEM, item, 2158 "multiple tunnel layers not" 2159 " supported"); 2160 /* 2161 * Verify only UDPv4 is present as defined in 2162 * https://tools.ietf.org/html/rfc7348 2163 */ 2164 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 2165 return rte_flow_error_set(error, EINVAL, 2166 RTE_FLOW_ERROR_TYPE_ITEM, item, 2167 "no outer UDP layer found"); 2168 if (!mask) 2169 mask = &rte_flow_item_geneve_mask; 2170 ret = mlx5_flow_item_acceptable 2171 (item, (const uint8_t *)mask, 2172 (const uint8_t *)&nic_mask, 2173 sizeof(struct rte_flow_item_geneve), error); 2174 if (ret) 2175 return ret; 2176 if (spec) { 2177 gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0); 2178 if (MLX5_GENEVE_VER_VAL(gbhdr) || 2179 MLX5_GENEVE_CRITO_VAL(gbhdr) || 2180 MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1) 2181 return rte_flow_error_set(error, ENOTSUP, 2182 RTE_FLOW_ERROR_TYPE_ITEM, 2183 item, 2184 "Geneve protocol unsupported" 2185 " fields are being used"); 2186 if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len) 2187 return rte_flow_error_set 2188 (error, ENOTSUP, 2189 RTE_FLOW_ERROR_TYPE_ITEM, 2190 item, 2191 "Unsupported Geneve options length"); 2192 } 2193 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 2194 return rte_flow_error_set 2195 (error, ENOTSUP, 2196 RTE_FLOW_ERROR_TYPE_ITEM, item, 2197 "Geneve tunnel must be fully defined"); 2198 return 0; 2199 } 2200 2201 /** 2202 * Validate MPLS item. 2203 * 2204 * @param[in] dev 2205 * Pointer to the rte_eth_dev structure. 2206 * @param[in] item 2207 * Item specification. 2208 * @param[in] item_flags 2209 * Bit-fields that holds the items detected until now. 2210 * @param[in] prev_layer 2211 * The protocol layer indicated in previous item. 2212 * @param[out] error 2213 * Pointer to error structure. 2214 * 2215 * @return 2216 * 0 on success, a negative errno value otherwise and rte_errno is set. 2217 */ 2218 int 2219 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, 2220 const struct rte_flow_item *item __rte_unused, 2221 uint64_t item_flags __rte_unused, 2222 uint64_t prev_layer __rte_unused, 2223 struct rte_flow_error *error) 2224 { 2225 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 2226 const struct rte_flow_item_mpls *mask = item->mask; 2227 struct mlx5_priv *priv = dev->data->dev_private; 2228 int ret; 2229 2230 if (!priv->config.mpls_en) 2231 return rte_flow_error_set(error, ENOTSUP, 2232 RTE_FLOW_ERROR_TYPE_ITEM, item, 2233 "MPLS not supported or" 2234 " disabled in firmware" 2235 " configuration."); 2236 /* MPLS over IP, UDP, GRE is allowed */ 2237 if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | 2238 MLX5_FLOW_LAYER_OUTER_L4_UDP | 2239 MLX5_FLOW_LAYER_GRE))) 2240 return rte_flow_error_set(error, EINVAL, 2241 RTE_FLOW_ERROR_TYPE_ITEM, item, 2242 "protocol filtering not compatible" 2243 " with MPLS layer"); 2244 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ 2245 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && 2246 !(item_flags & MLX5_FLOW_LAYER_GRE)) 2247 return rte_flow_error_set(error, ENOTSUP, 2248 RTE_FLOW_ERROR_TYPE_ITEM, item, 2249 "multiple tunnel layers not" 2250 " supported"); 2251 if (!mask) 2252 mask = &rte_flow_item_mpls_mask; 2253 ret = mlx5_flow_item_acceptable 2254 (item, (const uint8_t *)mask, 2255 (const uint8_t *)&rte_flow_item_mpls_mask, 2256 sizeof(struct rte_flow_item_mpls), error); 2257 if (ret < 0) 2258 return ret; 2259 return 0; 2260 #endif 2261 return rte_flow_error_set(error, ENOTSUP, 2262 RTE_FLOW_ERROR_TYPE_ITEM, item, 2263 "MPLS is not supported by Verbs, please" 2264 " update."); 2265 } 2266 2267 /** 2268 * Validate NVGRE item. 2269 * 2270 * @param[in] item 2271 * Item specification. 2272 * @param[in] item_flags 2273 * Bit flags to mark detected items. 2274 * @param[in] target_protocol 2275 * The next protocol in the previous item. 2276 * @param[out] error 2277 * Pointer to error structure. 2278 * 2279 * @return 2280 * 0 on success, a negative errno value otherwise and rte_errno is set. 2281 */ 2282 int 2283 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, 2284 uint64_t item_flags, 2285 uint8_t target_protocol, 2286 struct rte_flow_error *error) 2287 { 2288 const struct rte_flow_item_nvgre *mask = item->mask; 2289 int ret; 2290 2291 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 2292 return rte_flow_error_set(error, EINVAL, 2293 RTE_FLOW_ERROR_TYPE_ITEM, item, 2294 "protocol filtering not compatible" 2295 " with this GRE layer"); 2296 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2297 return rte_flow_error_set(error, ENOTSUP, 2298 RTE_FLOW_ERROR_TYPE_ITEM, item, 2299 "multiple tunnel layers not" 2300 " supported"); 2301 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 2302 return rte_flow_error_set(error, ENOTSUP, 2303 RTE_FLOW_ERROR_TYPE_ITEM, item, 2304 "L3 Layer is missing"); 2305 if (!mask) 2306 mask = &rte_flow_item_nvgre_mask; 2307 ret = mlx5_flow_item_acceptable 2308 (item, (const uint8_t *)mask, 2309 (const uint8_t *)&rte_flow_item_nvgre_mask, 2310 sizeof(struct rte_flow_item_nvgre), error); 2311 if (ret < 0) 2312 return ret; 2313 return 0; 2314 } 2315 2316 /* Allocate unique ID for the split Q/RSS subflows. */ 2317 static uint32_t 2318 flow_qrss_get_id(struct rte_eth_dev *dev) 2319 { 2320 struct mlx5_priv *priv = dev->data->dev_private; 2321 uint32_t qrss_id, ret; 2322 2323 ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id); 2324 if (ret) 2325 return 0; 2326 assert(qrss_id); 2327 return qrss_id; 2328 } 2329 2330 /* Free unique ID for the split Q/RSS subflows. */ 2331 static void 2332 flow_qrss_free_id(struct rte_eth_dev *dev, uint32_t qrss_id) 2333 { 2334 struct mlx5_priv *priv = dev->data->dev_private; 2335 2336 if (qrss_id) 2337 mlx5_flow_id_release(priv->qrss_id_pool, qrss_id); 2338 } 2339 2340 /** 2341 * Release resource related QUEUE/RSS action split. 2342 * 2343 * @param dev 2344 * Pointer to Ethernet device. 2345 * @param flow 2346 * Flow to release id's from. 2347 */ 2348 static void 2349 flow_mreg_split_qrss_release(struct rte_eth_dev *dev, 2350 struct rte_flow *flow) 2351 { 2352 struct mlx5_flow *dev_flow; 2353 2354 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 2355 if (dev_flow->qrss_id) 2356 flow_qrss_free_id(dev, dev_flow->qrss_id); 2357 } 2358 2359 static int 2360 flow_null_validate(struct rte_eth_dev *dev __rte_unused, 2361 const struct rte_flow_attr *attr __rte_unused, 2362 const struct rte_flow_item items[] __rte_unused, 2363 const struct rte_flow_action actions[] __rte_unused, 2364 bool external __rte_unused, 2365 struct rte_flow_error *error) 2366 { 2367 return rte_flow_error_set(error, ENOTSUP, 2368 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2369 } 2370 2371 static struct mlx5_flow * 2372 flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, 2373 const struct rte_flow_item items[] __rte_unused, 2374 const struct rte_flow_action actions[] __rte_unused, 2375 struct rte_flow_error *error) 2376 { 2377 rte_flow_error_set(error, ENOTSUP, 2378 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2379 return NULL; 2380 } 2381 2382 static int 2383 flow_null_translate(struct rte_eth_dev *dev __rte_unused, 2384 struct mlx5_flow *dev_flow __rte_unused, 2385 const struct rte_flow_attr *attr __rte_unused, 2386 const struct rte_flow_item items[] __rte_unused, 2387 const struct rte_flow_action actions[] __rte_unused, 2388 struct rte_flow_error *error) 2389 { 2390 return rte_flow_error_set(error, ENOTSUP, 2391 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2392 } 2393 2394 static int 2395 flow_null_apply(struct rte_eth_dev *dev __rte_unused, 2396 struct rte_flow *flow __rte_unused, 2397 struct rte_flow_error *error) 2398 { 2399 return rte_flow_error_set(error, ENOTSUP, 2400 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2401 } 2402 2403 static void 2404 flow_null_remove(struct rte_eth_dev *dev __rte_unused, 2405 struct rte_flow *flow __rte_unused) 2406 { 2407 } 2408 2409 static void 2410 flow_null_destroy(struct rte_eth_dev *dev __rte_unused, 2411 struct rte_flow *flow __rte_unused) 2412 { 2413 } 2414 2415 static int 2416 flow_null_query(struct rte_eth_dev *dev __rte_unused, 2417 struct rte_flow *flow __rte_unused, 2418 const struct rte_flow_action *actions __rte_unused, 2419 void *data __rte_unused, 2420 struct rte_flow_error *error) 2421 { 2422 return rte_flow_error_set(error, ENOTSUP, 2423 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2424 } 2425 2426 /* Void driver to protect from null pointer reference. */ 2427 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { 2428 .validate = flow_null_validate, 2429 .prepare = flow_null_prepare, 2430 .translate = flow_null_translate, 2431 .apply = flow_null_apply, 2432 .remove = flow_null_remove, 2433 .destroy = flow_null_destroy, 2434 .query = flow_null_query, 2435 }; 2436 2437 /** 2438 * Select flow driver type according to flow attributes and device 2439 * configuration. 2440 * 2441 * @param[in] dev 2442 * Pointer to the dev structure. 2443 * @param[in] attr 2444 * Pointer to the flow attributes. 2445 * 2446 * @return 2447 * flow driver type, MLX5_FLOW_TYPE_MAX otherwise. 2448 */ 2449 static enum mlx5_flow_drv_type 2450 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) 2451 { 2452 struct mlx5_priv *priv = dev->data->dev_private; 2453 enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; 2454 2455 if (attr->transfer && priv->config.dv_esw_en) 2456 type = MLX5_FLOW_TYPE_DV; 2457 if (!attr->transfer) 2458 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : 2459 MLX5_FLOW_TYPE_VERBS; 2460 return type; 2461 } 2462 2463 #define flow_get_drv_ops(type) flow_drv_ops[type] 2464 2465 /** 2466 * Flow driver validation API. This abstracts calling driver specific functions. 2467 * The type of flow driver is determined according to flow attributes. 2468 * 2469 * @param[in] dev 2470 * Pointer to the dev structure. 2471 * @param[in] attr 2472 * Pointer to the flow attributes. 2473 * @param[in] items 2474 * Pointer to the list of items. 2475 * @param[in] actions 2476 * Pointer to the list of actions. 2477 * @param[in] external 2478 * This flow rule is created by request external to PMD. 2479 * @param[out] error 2480 * Pointer to the error structure. 2481 * 2482 * @return 2483 * 0 on success, a negative errno value otherwise and rte_errno is set. 2484 */ 2485 static inline int 2486 flow_drv_validate(struct rte_eth_dev *dev, 2487 const struct rte_flow_attr *attr, 2488 const struct rte_flow_item items[], 2489 const struct rte_flow_action actions[], 2490 bool external, struct rte_flow_error *error) 2491 { 2492 const struct mlx5_flow_driver_ops *fops; 2493 enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); 2494 2495 fops = flow_get_drv_ops(type); 2496 return fops->validate(dev, attr, items, actions, external, error); 2497 } 2498 2499 /** 2500 * Flow driver preparation API. This abstracts calling driver specific 2501 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2502 * calculates the size of memory required for device flow, allocates the memory, 2503 * initializes the device flow and returns the pointer. 2504 * 2505 * @note 2506 * This function initializes device flow structure such as dv or verbs in 2507 * struct mlx5_flow. However, it is caller's responsibility to initialize the 2508 * rest. For example, adding returning device flow to flow->dev_flow list and 2509 * setting backward reference to the flow should be done out of this function. 2510 * layers field is not filled either. 2511 * 2512 * @param[in] attr 2513 * Pointer to the flow attributes. 2514 * @param[in] items 2515 * Pointer to the list of items. 2516 * @param[in] actions 2517 * Pointer to the list of actions. 2518 * @param[out] error 2519 * Pointer to the error structure. 2520 * 2521 * @return 2522 * Pointer to device flow on success, otherwise NULL and rte_errno is set. 2523 */ 2524 static inline struct mlx5_flow * 2525 flow_drv_prepare(const struct rte_flow *flow, 2526 const struct rte_flow_attr *attr, 2527 const struct rte_flow_item items[], 2528 const struct rte_flow_action actions[], 2529 struct rte_flow_error *error) 2530 { 2531 const struct mlx5_flow_driver_ops *fops; 2532 enum mlx5_flow_drv_type type = flow->drv_type; 2533 2534 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2535 fops = flow_get_drv_ops(type); 2536 return fops->prepare(attr, items, actions, error); 2537 } 2538 2539 /** 2540 * Flow driver translation API. This abstracts calling driver specific 2541 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2542 * translates a generic flow into a driver flow. flow_drv_prepare() must 2543 * precede. 2544 * 2545 * @note 2546 * dev_flow->layers could be filled as a result of parsing during translation 2547 * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled 2548 * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, 2549 * flow->actions could be overwritten even though all the expanded dev_flows 2550 * have the same actions. 2551 * 2552 * @param[in] dev 2553 * Pointer to the rte dev structure. 2554 * @param[in, out] dev_flow 2555 * Pointer to the mlx5 flow. 2556 * @param[in] attr 2557 * Pointer to the flow attributes. 2558 * @param[in] items 2559 * Pointer to the list of items. 2560 * @param[in] actions 2561 * Pointer to the list of actions. 2562 * @param[out] error 2563 * Pointer to the error structure. 2564 * 2565 * @return 2566 * 0 on success, a negative errno value otherwise and rte_errno is set. 2567 */ 2568 static inline int 2569 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, 2570 const struct rte_flow_attr *attr, 2571 const struct rte_flow_item items[], 2572 const struct rte_flow_action actions[], 2573 struct rte_flow_error *error) 2574 { 2575 const struct mlx5_flow_driver_ops *fops; 2576 enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; 2577 2578 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2579 fops = flow_get_drv_ops(type); 2580 return fops->translate(dev, dev_flow, attr, items, actions, error); 2581 } 2582 2583 /** 2584 * Flow driver apply API. This abstracts calling driver specific functions. 2585 * Parent flow (rte_flow) should have driver type (drv_type). It applies 2586 * translated driver flows on to device. flow_drv_translate() must precede. 2587 * 2588 * @param[in] dev 2589 * Pointer to Ethernet device structure. 2590 * @param[in, out] flow 2591 * Pointer to flow structure. 2592 * @param[out] error 2593 * Pointer to error structure. 2594 * 2595 * @return 2596 * 0 on success, a negative errno value otherwise and rte_errno is set. 2597 */ 2598 static inline int 2599 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 2600 struct rte_flow_error *error) 2601 { 2602 const struct mlx5_flow_driver_ops *fops; 2603 enum mlx5_flow_drv_type type = flow->drv_type; 2604 2605 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2606 fops = flow_get_drv_ops(type); 2607 return fops->apply(dev, flow, error); 2608 } 2609 2610 /** 2611 * Flow driver remove API. This abstracts calling driver specific functions. 2612 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2613 * on device. All the resources of the flow should be freed by calling 2614 * flow_drv_destroy(). 2615 * 2616 * @param[in] dev 2617 * Pointer to Ethernet device. 2618 * @param[in, out] flow 2619 * Pointer to flow structure. 2620 */ 2621 static inline void 2622 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 2623 { 2624 const struct mlx5_flow_driver_ops *fops; 2625 enum mlx5_flow_drv_type type = flow->drv_type; 2626 2627 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2628 fops = flow_get_drv_ops(type); 2629 fops->remove(dev, flow); 2630 } 2631 2632 /** 2633 * Flow driver destroy API. This abstracts calling driver specific functions. 2634 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2635 * on device and releases resources of the flow. 2636 * 2637 * @param[in] dev 2638 * Pointer to Ethernet device. 2639 * @param[in, out] flow 2640 * Pointer to flow structure. 2641 */ 2642 static inline void 2643 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 2644 { 2645 const struct mlx5_flow_driver_ops *fops; 2646 enum mlx5_flow_drv_type type = flow->drv_type; 2647 2648 flow_mreg_split_qrss_release(dev, flow); 2649 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2650 fops = flow_get_drv_ops(type); 2651 fops->destroy(dev, flow); 2652 } 2653 2654 /** 2655 * Validate a flow supported by the NIC. 2656 * 2657 * @see rte_flow_validate() 2658 * @see rte_flow_ops 2659 */ 2660 int 2661 mlx5_flow_validate(struct rte_eth_dev *dev, 2662 const struct rte_flow_attr *attr, 2663 const struct rte_flow_item items[], 2664 const struct rte_flow_action actions[], 2665 struct rte_flow_error *error) 2666 { 2667 int ret; 2668 2669 ret = flow_drv_validate(dev, attr, items, actions, true, error); 2670 if (ret < 0) 2671 return ret; 2672 return 0; 2673 } 2674 2675 /** 2676 * Get port id item from the item list. 2677 * 2678 * @param[in] item 2679 * Pointer to the list of items. 2680 * 2681 * @return 2682 * Pointer to the port id item if exist, else return NULL. 2683 */ 2684 static const struct rte_flow_item * 2685 find_port_id_item(const struct rte_flow_item *item) 2686 { 2687 assert(item); 2688 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 2689 if (item->type == RTE_FLOW_ITEM_TYPE_PORT_ID) 2690 return item; 2691 } 2692 return NULL; 2693 } 2694 2695 /** 2696 * Get RSS action from the action list. 2697 * 2698 * @param[in] actions 2699 * Pointer to the list of actions. 2700 * 2701 * @return 2702 * Pointer to the RSS action if exist, else return NULL. 2703 */ 2704 static const struct rte_flow_action_rss* 2705 flow_get_rss_action(const struct rte_flow_action actions[]) 2706 { 2707 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2708 switch (actions->type) { 2709 case RTE_FLOW_ACTION_TYPE_RSS: 2710 return (const struct rte_flow_action_rss *) 2711 actions->conf; 2712 default: 2713 break; 2714 } 2715 } 2716 return NULL; 2717 } 2718 2719 static unsigned int 2720 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) 2721 { 2722 const struct rte_flow_item *item; 2723 unsigned int has_vlan = 0; 2724 2725 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 2726 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2727 has_vlan = 1; 2728 break; 2729 } 2730 } 2731 if (has_vlan) 2732 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : 2733 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; 2734 return rss_level < 2 ? MLX5_EXPANSION_ROOT : 2735 MLX5_EXPANSION_ROOT_OUTER; 2736 } 2737 2738 /** 2739 * Get QUEUE/RSS action from the action list. 2740 * 2741 * @param[in] actions 2742 * Pointer to the list of actions. 2743 * @param[out] qrss 2744 * Pointer to the return pointer. 2745 * @param[out] qrss_type 2746 * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned 2747 * if no QUEUE/RSS is found. 2748 * 2749 * @return 2750 * Total number of actions. 2751 */ 2752 static int 2753 flow_parse_qrss_action(const struct rte_flow_action actions[], 2754 const struct rte_flow_action **qrss) 2755 { 2756 int actions_n = 0; 2757 2758 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2759 switch (actions->type) { 2760 case RTE_FLOW_ACTION_TYPE_QUEUE: 2761 case RTE_FLOW_ACTION_TYPE_RSS: 2762 *qrss = actions; 2763 break; 2764 default: 2765 break; 2766 } 2767 actions_n++; 2768 } 2769 /* Count RTE_FLOW_ACTION_TYPE_END. */ 2770 return actions_n + 1; 2771 } 2772 2773 /** 2774 * Check meter action from the action list. 2775 * 2776 * @param[in] actions 2777 * Pointer to the list of actions. 2778 * @param[out] mtr 2779 * Pointer to the meter exist flag. 2780 * 2781 * @return 2782 * Total number of actions. 2783 */ 2784 static int 2785 flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr) 2786 { 2787 int actions_n = 0; 2788 2789 assert(mtr); 2790 *mtr = 0; 2791 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2792 switch (actions->type) { 2793 case RTE_FLOW_ACTION_TYPE_METER: 2794 *mtr = 1; 2795 break; 2796 default: 2797 break; 2798 } 2799 actions_n++; 2800 } 2801 /* Count RTE_FLOW_ACTION_TYPE_END. */ 2802 return actions_n + 1; 2803 } 2804 2805 /** 2806 * Check if the flow should be splited due to hairpin. 2807 * The reason for the split is that in current HW we can't 2808 * support encap on Rx, so if a flow have encap we move it 2809 * to Tx. 2810 * 2811 * @param dev 2812 * Pointer to Ethernet device. 2813 * @param[in] attr 2814 * Flow rule attributes. 2815 * @param[in] actions 2816 * Associated actions (list terminated by the END action). 2817 * 2818 * @return 2819 * > 0 the number of actions and the flow should be split, 2820 * 0 when no split required. 2821 */ 2822 static int 2823 flow_check_hairpin_split(struct rte_eth_dev *dev, 2824 const struct rte_flow_attr *attr, 2825 const struct rte_flow_action actions[]) 2826 { 2827 int queue_action = 0; 2828 int action_n = 0; 2829 int encap = 0; 2830 const struct rte_flow_action_queue *queue; 2831 const struct rte_flow_action_rss *rss; 2832 const struct rte_flow_action_raw_encap *raw_encap; 2833 2834 if (!attr->ingress) 2835 return 0; 2836 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2837 switch (actions->type) { 2838 case RTE_FLOW_ACTION_TYPE_QUEUE: 2839 queue = actions->conf; 2840 if (queue == NULL) 2841 return 0; 2842 if (mlx5_rxq_get_type(dev, queue->index) != 2843 MLX5_RXQ_TYPE_HAIRPIN) 2844 return 0; 2845 queue_action = 1; 2846 action_n++; 2847 break; 2848 case RTE_FLOW_ACTION_TYPE_RSS: 2849 rss = actions->conf; 2850 if (rss == NULL || rss->queue_num == 0) 2851 return 0; 2852 if (mlx5_rxq_get_type(dev, rss->queue[0]) != 2853 MLX5_RXQ_TYPE_HAIRPIN) 2854 return 0; 2855 queue_action = 1; 2856 action_n++; 2857 break; 2858 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 2859 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 2860 encap = 1; 2861 action_n++; 2862 break; 2863 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 2864 raw_encap = actions->conf; 2865 if (raw_encap->size > 2866 (sizeof(struct rte_flow_item_eth) + 2867 sizeof(struct rte_flow_item_ipv4))) 2868 encap = 1; 2869 action_n++; 2870 break; 2871 default: 2872 action_n++; 2873 break; 2874 } 2875 } 2876 if (encap == 1 && queue_action) 2877 return action_n; 2878 return 0; 2879 } 2880 2881 /* Declare flow create/destroy prototype in advance. */ 2882 static struct rte_flow * 2883 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, 2884 const struct rte_flow_attr *attr, 2885 const struct rte_flow_item items[], 2886 const struct rte_flow_action actions[], 2887 bool external, struct rte_flow_error *error); 2888 2889 static void 2890 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, 2891 struct rte_flow *flow); 2892 2893 /** 2894 * Add a flow of copying flow metadata registers in RX_CP_TBL. 2895 * 2896 * As mark_id is unique, if there's already a registered flow for the mark_id, 2897 * return by increasing the reference counter of the resource. Otherwise, create 2898 * the resource (mcp_res) and flow. 2899 * 2900 * Flow looks like, 2901 * - If ingress port is ANY and reg_c[1] is mark_id, 2902 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 2903 * 2904 * For default flow (zero mark_id), flow is like, 2905 * - If ingress port is ANY, 2906 * reg_b := reg_c[0] and jump to RX_ACT_TBL. 2907 * 2908 * @param dev 2909 * Pointer to Ethernet device. 2910 * @param mark_id 2911 * ID of MARK action, zero means default flow for META. 2912 * @param[out] error 2913 * Perform verbose error reporting if not NULL. 2914 * 2915 * @return 2916 * Associated resource on success, NULL otherwise and rte_errno is set. 2917 */ 2918 static struct mlx5_flow_mreg_copy_resource * 2919 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, 2920 struct rte_flow_error *error) 2921 { 2922 struct mlx5_priv *priv = dev->data->dev_private; 2923 struct rte_flow_attr attr = { 2924 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 2925 .ingress = 1, 2926 }; 2927 struct mlx5_rte_flow_item_tag tag_spec = { 2928 .data = mark_id, 2929 }; 2930 struct rte_flow_item items[] = { 2931 [1] = { .type = RTE_FLOW_ITEM_TYPE_END, }, 2932 }; 2933 struct rte_flow_action_mark ftag = { 2934 .id = mark_id, 2935 }; 2936 struct mlx5_flow_action_copy_mreg cp_mreg = { 2937 .dst = REG_B, 2938 .src = 0, 2939 }; 2940 struct rte_flow_action_jump jump = { 2941 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 2942 }; 2943 struct rte_flow_action actions[] = { 2944 [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, 2945 }; 2946 struct mlx5_flow_mreg_copy_resource *mcp_res; 2947 int ret; 2948 2949 /* Fill the register fileds in the flow. */ 2950 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 2951 if (ret < 0) 2952 return NULL; 2953 tag_spec.id = ret; 2954 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 2955 if (ret < 0) 2956 return NULL; 2957 cp_mreg.src = ret; 2958 /* Check if already registered. */ 2959 assert(priv->mreg_cp_tbl); 2960 mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id); 2961 if (mcp_res) { 2962 /* For non-default rule. */ 2963 if (mark_id != MLX5_DEFAULT_COPY_ID) 2964 mcp_res->refcnt++; 2965 assert(mark_id != MLX5_DEFAULT_COPY_ID || mcp_res->refcnt == 1); 2966 return mcp_res; 2967 } 2968 /* Provide the full width of FLAG specific value. */ 2969 if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT)) 2970 tag_spec.data = MLX5_FLOW_MARK_DEFAULT; 2971 /* Build a new flow. */ 2972 if (mark_id != MLX5_DEFAULT_COPY_ID) { 2973 items[0] = (struct rte_flow_item){ 2974 .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, 2975 .spec = &tag_spec, 2976 }; 2977 items[1] = (struct rte_flow_item){ 2978 .type = RTE_FLOW_ITEM_TYPE_END, 2979 }; 2980 actions[0] = (struct rte_flow_action){ 2981 .type = MLX5_RTE_FLOW_ACTION_TYPE_MARK, 2982 .conf = &ftag, 2983 }; 2984 actions[1] = (struct rte_flow_action){ 2985 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 2986 .conf = &cp_mreg, 2987 }; 2988 actions[2] = (struct rte_flow_action){ 2989 .type = RTE_FLOW_ACTION_TYPE_JUMP, 2990 .conf = &jump, 2991 }; 2992 actions[3] = (struct rte_flow_action){ 2993 .type = RTE_FLOW_ACTION_TYPE_END, 2994 }; 2995 } else { 2996 /* Default rule, wildcard match. */ 2997 attr.priority = MLX5_FLOW_PRIO_RSVD; 2998 items[0] = (struct rte_flow_item){ 2999 .type = RTE_FLOW_ITEM_TYPE_END, 3000 }; 3001 actions[0] = (struct rte_flow_action){ 3002 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 3003 .conf = &cp_mreg, 3004 }; 3005 actions[1] = (struct rte_flow_action){ 3006 .type = RTE_FLOW_ACTION_TYPE_JUMP, 3007 .conf = &jump, 3008 }; 3009 actions[2] = (struct rte_flow_action){ 3010 .type = RTE_FLOW_ACTION_TYPE_END, 3011 }; 3012 } 3013 /* Build a new entry. */ 3014 mcp_res = rte_zmalloc(__func__, sizeof(*mcp_res), 0); 3015 if (!mcp_res) { 3016 rte_errno = ENOMEM; 3017 return NULL; 3018 } 3019 /* 3020 * The copy Flows are not included in any list. There 3021 * ones are referenced from other Flows and can not 3022 * be applied, removed, deleted in ardbitrary order 3023 * by list traversing. 3024 */ 3025 mcp_res->flow = flow_list_create(dev, NULL, &attr, items, 3026 actions, false, error); 3027 if (!mcp_res->flow) 3028 goto error; 3029 mcp_res->refcnt++; 3030 mcp_res->hlist_ent.key = mark_id; 3031 ret = mlx5_hlist_insert(priv->mreg_cp_tbl, 3032 &mcp_res->hlist_ent); 3033 assert(!ret); 3034 if (ret) 3035 goto error; 3036 return mcp_res; 3037 error: 3038 if (mcp_res->flow) 3039 flow_list_destroy(dev, NULL, mcp_res->flow); 3040 rte_free(mcp_res); 3041 return NULL; 3042 } 3043 3044 /** 3045 * Release flow in RX_CP_TBL. 3046 * 3047 * @param dev 3048 * Pointer to Ethernet device. 3049 * @flow 3050 * Parent flow for wich copying is provided. 3051 */ 3052 static void 3053 flow_mreg_del_copy_action(struct rte_eth_dev *dev, 3054 struct rte_flow *flow) 3055 { 3056 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3057 struct mlx5_priv *priv = dev->data->dev_private; 3058 3059 if (!mcp_res || !priv->mreg_cp_tbl) 3060 return; 3061 if (flow->copy_applied) { 3062 assert(mcp_res->appcnt); 3063 flow->copy_applied = 0; 3064 --mcp_res->appcnt; 3065 if (!mcp_res->appcnt) 3066 flow_drv_remove(dev, mcp_res->flow); 3067 } 3068 /* 3069 * We do not check availability of metadata registers here, 3070 * because copy resources are not allocated in this case. 3071 */ 3072 if (--mcp_res->refcnt) 3073 return; 3074 assert(mcp_res->flow); 3075 flow_list_destroy(dev, NULL, mcp_res->flow); 3076 mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); 3077 rte_free(mcp_res); 3078 flow->mreg_copy = NULL; 3079 } 3080 3081 /** 3082 * Start flow in RX_CP_TBL. 3083 * 3084 * @param dev 3085 * Pointer to Ethernet device. 3086 * @flow 3087 * Parent flow for wich copying is provided. 3088 * 3089 * @return 3090 * 0 on success, a negative errno value otherwise and rte_errno is set. 3091 */ 3092 static int 3093 flow_mreg_start_copy_action(struct rte_eth_dev *dev, 3094 struct rte_flow *flow) 3095 { 3096 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3097 int ret; 3098 3099 if (!mcp_res || flow->copy_applied) 3100 return 0; 3101 if (!mcp_res->appcnt) { 3102 ret = flow_drv_apply(dev, mcp_res->flow, NULL); 3103 if (ret) 3104 return ret; 3105 } 3106 ++mcp_res->appcnt; 3107 flow->copy_applied = 1; 3108 return 0; 3109 } 3110 3111 /** 3112 * Stop flow in RX_CP_TBL. 3113 * 3114 * @param dev 3115 * Pointer to Ethernet device. 3116 * @flow 3117 * Parent flow for wich copying is provided. 3118 */ 3119 static void 3120 flow_mreg_stop_copy_action(struct rte_eth_dev *dev, 3121 struct rte_flow *flow) 3122 { 3123 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3124 3125 if (!mcp_res || !flow->copy_applied) 3126 return; 3127 assert(mcp_res->appcnt); 3128 --mcp_res->appcnt; 3129 flow->copy_applied = 0; 3130 if (!mcp_res->appcnt) 3131 flow_drv_remove(dev, mcp_res->flow); 3132 } 3133 3134 /** 3135 * Remove the default copy action from RX_CP_TBL. 3136 * 3137 * @param dev 3138 * Pointer to Ethernet device. 3139 */ 3140 static void 3141 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) 3142 { 3143 struct mlx5_flow_mreg_copy_resource *mcp_res; 3144 struct mlx5_priv *priv = dev->data->dev_private; 3145 3146 /* Check if default flow is registered. */ 3147 if (!priv->mreg_cp_tbl) 3148 return; 3149 mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, 3150 MLX5_DEFAULT_COPY_ID); 3151 if (!mcp_res) 3152 return; 3153 assert(mcp_res->flow); 3154 flow_list_destroy(dev, NULL, mcp_res->flow); 3155 mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); 3156 rte_free(mcp_res); 3157 } 3158 3159 /** 3160 * Add the default copy action in in RX_CP_TBL. 3161 * 3162 * @param dev 3163 * Pointer to Ethernet device. 3164 * @param[out] error 3165 * Perform verbose error reporting if not NULL. 3166 * 3167 * @return 3168 * 0 for success, negative value otherwise and rte_errno is set. 3169 */ 3170 static int 3171 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, 3172 struct rte_flow_error *error) 3173 { 3174 struct mlx5_priv *priv = dev->data->dev_private; 3175 struct mlx5_flow_mreg_copy_resource *mcp_res; 3176 3177 /* Check whether extensive metadata feature is engaged. */ 3178 if (!priv->config.dv_flow_en || 3179 priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3180 !mlx5_flow_ext_mreg_supported(dev) || 3181 !priv->sh->dv_regc0_mask) 3182 return 0; 3183 mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error); 3184 if (!mcp_res) 3185 return -rte_errno; 3186 return 0; 3187 } 3188 3189 /** 3190 * Add a flow of copying flow metadata registers in RX_CP_TBL. 3191 * 3192 * All the flow having Q/RSS action should be split by 3193 * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL 3194 * performs the following, 3195 * - CQE->flow_tag := reg_c[1] (MARK) 3196 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 3197 * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1] 3198 * but there should be a flow per each MARK ID set by MARK action. 3199 * 3200 * For the aforementioned reason, if there's a MARK action in flow's action 3201 * list, a corresponding flow should be added to the RX_CP_TBL in order to copy 3202 * the MARK ID to CQE's flow_tag like, 3203 * - If reg_c[1] is mark_id, 3204 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 3205 * 3206 * For SET_META action which stores value in reg_c[0], as the destination is 3207 * also a flow metadata register (reg_b), adding a default flow is enough. Zero 3208 * MARK ID means the default flow. The default flow looks like, 3209 * - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL. 3210 * 3211 * @param dev 3212 * Pointer to Ethernet device. 3213 * @param flow 3214 * Pointer to flow structure. 3215 * @param[in] actions 3216 * Pointer to the list of actions. 3217 * @param[out] error 3218 * Perform verbose error reporting if not NULL. 3219 * 3220 * @return 3221 * 0 on success, negative value otherwise and rte_errno is set. 3222 */ 3223 static int 3224 flow_mreg_update_copy_table(struct rte_eth_dev *dev, 3225 struct rte_flow *flow, 3226 const struct rte_flow_action *actions, 3227 struct rte_flow_error *error) 3228 { 3229 struct mlx5_priv *priv = dev->data->dev_private; 3230 struct mlx5_dev_config *config = &priv->config; 3231 struct mlx5_flow_mreg_copy_resource *mcp_res; 3232 const struct rte_flow_action_mark *mark; 3233 3234 /* Check whether extensive metadata feature is engaged. */ 3235 if (!config->dv_flow_en || 3236 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3237 !mlx5_flow_ext_mreg_supported(dev) || 3238 !priv->sh->dv_regc0_mask) 3239 return 0; 3240 /* Find MARK action. */ 3241 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3242 switch (actions->type) { 3243 case RTE_FLOW_ACTION_TYPE_FLAG: 3244 mcp_res = flow_mreg_add_copy_action 3245 (dev, MLX5_FLOW_MARK_DEFAULT, error); 3246 if (!mcp_res) 3247 return -rte_errno; 3248 flow->mreg_copy = mcp_res; 3249 if (dev->data->dev_started) { 3250 mcp_res->appcnt++; 3251 flow->copy_applied = 1; 3252 } 3253 return 0; 3254 case RTE_FLOW_ACTION_TYPE_MARK: 3255 mark = (const struct rte_flow_action_mark *) 3256 actions->conf; 3257 mcp_res = 3258 flow_mreg_add_copy_action(dev, mark->id, error); 3259 if (!mcp_res) 3260 return -rte_errno; 3261 flow->mreg_copy = mcp_res; 3262 if (dev->data->dev_started) { 3263 mcp_res->appcnt++; 3264 flow->copy_applied = 1; 3265 } 3266 return 0; 3267 default: 3268 break; 3269 } 3270 } 3271 return 0; 3272 } 3273 3274 #define MLX5_MAX_SPLIT_ACTIONS 24 3275 #define MLX5_MAX_SPLIT_ITEMS 24 3276 3277 /** 3278 * Split the hairpin flow. 3279 * Since HW can't support encap on Rx we move the encap to Tx. 3280 * If the count action is after the encap then we also 3281 * move the count action. in this case the count will also measure 3282 * the outer bytes. 3283 * 3284 * @param dev 3285 * Pointer to Ethernet device. 3286 * @param[in] actions 3287 * Associated actions (list terminated by the END action). 3288 * @param[out] actions_rx 3289 * Rx flow actions. 3290 * @param[out] actions_tx 3291 * Tx flow actions.. 3292 * @param[out] pattern_tx 3293 * The pattern items for the Tx flow. 3294 * @param[out] flow_id 3295 * The flow ID connected to this flow. 3296 * 3297 * @return 3298 * 0 on success. 3299 */ 3300 static int 3301 flow_hairpin_split(struct rte_eth_dev *dev, 3302 const struct rte_flow_action actions[], 3303 struct rte_flow_action actions_rx[], 3304 struct rte_flow_action actions_tx[], 3305 struct rte_flow_item pattern_tx[], 3306 uint32_t *flow_id) 3307 { 3308 struct mlx5_priv *priv = dev->data->dev_private; 3309 const struct rte_flow_action_raw_encap *raw_encap; 3310 const struct rte_flow_action_raw_decap *raw_decap; 3311 struct mlx5_rte_flow_action_set_tag *set_tag; 3312 struct rte_flow_action *tag_action; 3313 struct mlx5_rte_flow_item_tag *tag_item; 3314 struct rte_flow_item *item; 3315 char *addr; 3316 int encap = 0; 3317 3318 mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id); 3319 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3320 switch (actions->type) { 3321 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 3322 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 3323 rte_memcpy(actions_tx, actions, 3324 sizeof(struct rte_flow_action)); 3325 actions_tx++; 3326 break; 3327 case RTE_FLOW_ACTION_TYPE_COUNT: 3328 if (encap) { 3329 rte_memcpy(actions_tx, actions, 3330 sizeof(struct rte_flow_action)); 3331 actions_tx++; 3332 } else { 3333 rte_memcpy(actions_rx, actions, 3334 sizeof(struct rte_flow_action)); 3335 actions_rx++; 3336 } 3337 break; 3338 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 3339 raw_encap = actions->conf; 3340 if (raw_encap->size > 3341 (sizeof(struct rte_flow_item_eth) + 3342 sizeof(struct rte_flow_item_ipv4))) { 3343 memcpy(actions_tx, actions, 3344 sizeof(struct rte_flow_action)); 3345 actions_tx++; 3346 encap = 1; 3347 } else { 3348 rte_memcpy(actions_rx, actions, 3349 sizeof(struct rte_flow_action)); 3350 actions_rx++; 3351 } 3352 break; 3353 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 3354 raw_decap = actions->conf; 3355 if (raw_decap->size < 3356 (sizeof(struct rte_flow_item_eth) + 3357 sizeof(struct rte_flow_item_ipv4))) { 3358 memcpy(actions_tx, actions, 3359 sizeof(struct rte_flow_action)); 3360 actions_tx++; 3361 } else { 3362 rte_memcpy(actions_rx, actions, 3363 sizeof(struct rte_flow_action)); 3364 actions_rx++; 3365 } 3366 break; 3367 default: 3368 rte_memcpy(actions_rx, actions, 3369 sizeof(struct rte_flow_action)); 3370 actions_rx++; 3371 break; 3372 } 3373 } 3374 /* Add set meta action and end action for the Rx flow. */ 3375 tag_action = actions_rx; 3376 tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3377 actions_rx++; 3378 rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action)); 3379 actions_rx++; 3380 set_tag = (void *)actions_rx; 3381 set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL); 3382 assert(set_tag->id > REG_NONE); 3383 set_tag->data = *flow_id; 3384 tag_action->conf = set_tag; 3385 /* Create Tx item list. */ 3386 rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); 3387 addr = (void *)&pattern_tx[2]; 3388 item = pattern_tx; 3389 item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; 3390 tag_item = (void *)addr; 3391 tag_item->data = *flow_id; 3392 tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); 3393 assert(set_tag->id > REG_NONE); 3394 item->spec = tag_item; 3395 addr += sizeof(struct mlx5_rte_flow_item_tag); 3396 tag_item = (void *)addr; 3397 tag_item->data = UINT32_MAX; 3398 tag_item->id = UINT16_MAX; 3399 item->mask = tag_item; 3400 addr += sizeof(struct mlx5_rte_flow_item_tag); 3401 item->last = NULL; 3402 item++; 3403 item->type = RTE_FLOW_ITEM_TYPE_END; 3404 return 0; 3405 } 3406 3407 /** 3408 * The last stage of splitting chain, just creates the subflow 3409 * without any modification. 3410 * 3411 * @param dev 3412 * Pointer to Ethernet device. 3413 * @param[in] flow 3414 * Parent flow structure pointer. 3415 * @param[in, out] sub_flow 3416 * Pointer to return the created subflow, may be NULL. 3417 * @param[in] attr 3418 * Flow rule attributes. 3419 * @param[in] items 3420 * Pattern specification (list terminated by the END pattern item). 3421 * @param[in] actions 3422 * Associated actions (list terminated by the END action). 3423 * @param[in] external 3424 * This flow rule is created by request external to PMD. 3425 * @param[out] error 3426 * Perform verbose error reporting if not NULL. 3427 * @return 3428 * 0 on success, negative value otherwise 3429 */ 3430 static int 3431 flow_create_split_inner(struct rte_eth_dev *dev, 3432 struct rte_flow *flow, 3433 struct mlx5_flow **sub_flow, 3434 const struct rte_flow_attr *attr, 3435 const struct rte_flow_item items[], 3436 const struct rte_flow_action actions[], 3437 bool external, struct rte_flow_error *error) 3438 { 3439 struct mlx5_flow *dev_flow; 3440 3441 dev_flow = flow_drv_prepare(flow, attr, items, actions, error); 3442 if (!dev_flow) 3443 return -rte_errno; 3444 dev_flow->flow = flow; 3445 dev_flow->external = external; 3446 /* Subflow object was created, we must include one in the list. */ 3447 LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); 3448 if (sub_flow) 3449 *sub_flow = dev_flow; 3450 return flow_drv_translate(dev, dev_flow, attr, items, actions, error); 3451 } 3452 3453 /** 3454 * Split the meter flow. 3455 * 3456 * As meter flow will split to three sub flow, other than meter 3457 * action, the other actions make sense to only meter accepts 3458 * the packet. If it need to be dropped, no other additional 3459 * actions should be take. 3460 * 3461 * One kind of special action which decapsulates the L3 tunnel 3462 * header will be in the prefix sub flow, as not to take the 3463 * L3 tunnel header into account. 3464 * 3465 * @param dev 3466 * Pointer to Ethernet device. 3467 * @param[in] actions 3468 * Associated actions (list terminated by the END action). 3469 * @param[out] actions_sfx 3470 * Suffix flow actions. 3471 * @param[out] actions_pre 3472 * Prefix flow actions. 3473 * @param[out] pattern_sfx 3474 * The pattern items for the suffix flow. 3475 * @param[out] tag_sfx 3476 * Pointer to suffix flow tag. 3477 * 3478 * @return 3479 * 0 on success. 3480 */ 3481 static int 3482 flow_meter_split_prep(struct rte_eth_dev *dev, 3483 const struct rte_flow_action actions[], 3484 struct rte_flow_action actions_sfx[], 3485 struct rte_flow_action actions_pre[]) 3486 { 3487 struct rte_flow_action *tag_action; 3488 struct mlx5_rte_flow_action_set_tag *set_tag; 3489 struct rte_flow_error error; 3490 const struct rte_flow_action_raw_encap *raw_encap; 3491 const struct rte_flow_action_raw_decap *raw_decap; 3492 uint32_t tag_id; 3493 3494 /* Add the extra tag action first. */ 3495 tag_action = actions_pre; 3496 tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3497 actions_pre++; 3498 /* Prepare the actions for prefix and suffix flow. */ 3499 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3500 switch (actions->type) { 3501 case RTE_FLOW_ACTION_TYPE_METER: 3502 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 3503 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 3504 memcpy(actions_pre, actions, 3505 sizeof(struct rte_flow_action)); 3506 actions_pre++; 3507 break; 3508 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 3509 raw_encap = actions->conf; 3510 if (raw_encap->size > 3511 (sizeof(struct rte_flow_item_eth) + 3512 sizeof(struct rte_flow_item_ipv4))) { 3513 memcpy(actions_sfx, actions, 3514 sizeof(struct rte_flow_action)); 3515 actions_sfx++; 3516 } else { 3517 rte_memcpy(actions_pre, actions, 3518 sizeof(struct rte_flow_action)); 3519 actions_pre++; 3520 } 3521 break; 3522 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 3523 raw_decap = actions->conf; 3524 /* Size 0 decap means 50 bytes as vxlan decap. */ 3525 if (raw_decap->size && (raw_decap->size < 3526 (sizeof(struct rte_flow_item_eth) + 3527 sizeof(struct rte_flow_item_ipv4)))) { 3528 memcpy(actions_sfx, actions, 3529 sizeof(struct rte_flow_action)); 3530 actions_sfx++; 3531 } else { 3532 rte_memcpy(actions_pre, actions, 3533 sizeof(struct rte_flow_action)); 3534 actions_pre++; 3535 } 3536 break; 3537 default: 3538 memcpy(actions_sfx, actions, 3539 sizeof(struct rte_flow_action)); 3540 actions_sfx++; 3541 break; 3542 } 3543 } 3544 /* Add end action to the actions. */ 3545 actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; 3546 actions_pre->type = RTE_FLOW_ACTION_TYPE_END; 3547 actions_pre++; 3548 /* Set the tag. */ 3549 set_tag = (void *)actions_pre; 3550 set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error); 3551 /* 3552 * Get the id from the qrss_pool to make qrss share the id with meter. 3553 */ 3554 tag_id = flow_qrss_get_id(dev); 3555 set_tag->data = rte_cpu_to_be_32(tag_id); 3556 tag_action->conf = set_tag; 3557 return tag_id; 3558 } 3559 3560 /** 3561 * Split action list having QUEUE/RSS for metadata register copy. 3562 * 3563 * Once Q/RSS action is detected in user's action list, the flow action 3564 * should be split in order to copy metadata registers, which will happen in 3565 * RX_CP_TBL like, 3566 * - CQE->flow_tag := reg_c[1] (MARK) 3567 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 3568 * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL. 3569 * This is because the last action of each flow must be a terminal action 3570 * (QUEUE, RSS or DROP). 3571 * 3572 * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is 3573 * stored and kept in the mlx5_flow structure per each sub_flow. 3574 * 3575 * The Q/RSS action is replaced with, 3576 * - SET_TAG, setting the allocated flow ID to reg_c[2]. 3577 * And the following JUMP action is added at the end, 3578 * - JUMP, to RX_CP_TBL. 3579 * 3580 * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by 3581 * flow_create_split_metadata() routine. The flow will look like, 3582 * - If flow ID matches (reg_c[2]), perform Q/RSS. 3583 * 3584 * @param dev 3585 * Pointer to Ethernet device. 3586 * @param[out] split_actions 3587 * Pointer to store split actions to jump to CP_TBL. 3588 * @param[in] actions 3589 * Pointer to the list of original flow actions. 3590 * @param[in] qrss 3591 * Pointer to the Q/RSS action. 3592 * @param[in] actions_n 3593 * Number of original actions. 3594 * @param[out] error 3595 * Perform verbose error reporting if not NULL. 3596 * 3597 * @return 3598 * non-zero unique flow_id on success, otherwise 0 and 3599 * error/rte_error are set. 3600 */ 3601 static uint32_t 3602 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, 3603 struct rte_flow_action *split_actions, 3604 const struct rte_flow_action *actions, 3605 const struct rte_flow_action *qrss, 3606 int actions_n, struct rte_flow_error *error) 3607 { 3608 struct mlx5_rte_flow_action_set_tag *set_tag; 3609 struct rte_flow_action_jump *jump; 3610 const int qrss_idx = qrss - actions; 3611 uint32_t flow_id = 0; 3612 int ret = 0; 3613 3614 /* 3615 * Given actions will be split 3616 * - Replace QUEUE/RSS action with SET_TAG to set flow ID. 3617 * - Add jump to mreg CP_TBL. 3618 * As a result, there will be one more action. 3619 */ 3620 ++actions_n; 3621 memcpy(split_actions, actions, sizeof(*split_actions) * actions_n); 3622 set_tag = (void *)(split_actions + actions_n); 3623 /* 3624 * If tag action is not set to void(it means we are not the meter 3625 * suffix flow), add the tag action. Since meter suffix flow already 3626 * has the tag added. 3627 */ 3628 if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) { 3629 /* 3630 * Allocate the new subflow ID. This one is unique within 3631 * device and not shared with representors. Otherwise, 3632 * we would have to resolve multi-thread access synch 3633 * issue. Each flow on the shared device is appended 3634 * with source vport identifier, so the resulting 3635 * flows will be unique in the shared (by master and 3636 * representors) domain even if they have coinciding 3637 * IDs. 3638 */ 3639 flow_id = flow_qrss_get_id(dev); 3640 if (!flow_id) 3641 return rte_flow_error_set(error, ENOMEM, 3642 RTE_FLOW_ERROR_TYPE_ACTION, 3643 NULL, "can't allocate id " 3644 "for split Q/RSS subflow"); 3645 /* Internal SET_TAG action to set flow ID. */ 3646 *set_tag = (struct mlx5_rte_flow_action_set_tag){ 3647 .data = flow_id, 3648 }; 3649 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); 3650 if (ret < 0) 3651 return ret; 3652 set_tag->id = ret; 3653 /* Construct new actions array. */ 3654 /* Replace QUEUE/RSS action. */ 3655 split_actions[qrss_idx] = (struct rte_flow_action){ 3656 .type = MLX5_RTE_FLOW_ACTION_TYPE_TAG, 3657 .conf = set_tag, 3658 }; 3659 } 3660 /* JUMP action to jump to mreg copy table (CP_TBL). */ 3661 jump = (void *)(set_tag + 1); 3662 *jump = (struct rte_flow_action_jump){ 3663 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 3664 }; 3665 split_actions[actions_n - 2] = (struct rte_flow_action){ 3666 .type = RTE_FLOW_ACTION_TYPE_JUMP, 3667 .conf = jump, 3668 }; 3669 split_actions[actions_n - 1] = (struct rte_flow_action){ 3670 .type = RTE_FLOW_ACTION_TYPE_END, 3671 }; 3672 return flow_id; 3673 } 3674 3675 /** 3676 * Extend the given action list for Tx metadata copy. 3677 * 3678 * Copy the given action list to the ext_actions and add flow metadata register 3679 * copy action in order to copy reg_a set by WQE to reg_c[0]. 3680 * 3681 * @param[out] ext_actions 3682 * Pointer to the extended action list. 3683 * @param[in] actions 3684 * Pointer to the list of actions. 3685 * @param[in] actions_n 3686 * Number of actions in the list. 3687 * @param[out] error 3688 * Perform verbose error reporting if not NULL. 3689 * 3690 * @return 3691 * 0 on success, negative value otherwise 3692 */ 3693 static int 3694 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, 3695 struct rte_flow_action *ext_actions, 3696 const struct rte_flow_action *actions, 3697 int actions_n, struct rte_flow_error *error) 3698 { 3699 struct mlx5_flow_action_copy_mreg *cp_mreg = 3700 (struct mlx5_flow_action_copy_mreg *) 3701 (ext_actions + actions_n + 1); 3702 int ret; 3703 3704 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 3705 if (ret < 0) 3706 return ret; 3707 cp_mreg->dst = ret; 3708 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error); 3709 if (ret < 0) 3710 return ret; 3711 cp_mreg->src = ret; 3712 memcpy(ext_actions, actions, 3713 sizeof(*ext_actions) * actions_n); 3714 ext_actions[actions_n - 1] = (struct rte_flow_action){ 3715 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 3716 .conf = cp_mreg, 3717 }; 3718 ext_actions[actions_n] = (struct rte_flow_action){ 3719 .type = RTE_FLOW_ACTION_TYPE_END, 3720 }; 3721 return 0; 3722 } 3723 3724 /** 3725 * The splitting for metadata feature. 3726 * 3727 * - Q/RSS action on NIC Rx should be split in order to pass by 3728 * the mreg copy table (RX_CP_TBL) and then it jumps to the 3729 * action table (RX_ACT_TBL) which has the split Q/RSS action. 3730 * 3731 * - All the actions on NIC Tx should have a mreg copy action to 3732 * copy reg_a from WQE to reg_c[0]. 3733 * 3734 * @param dev 3735 * Pointer to Ethernet device. 3736 * @param[in] flow 3737 * Parent flow structure pointer. 3738 * @param[in] attr 3739 * Flow rule attributes. 3740 * @param[in] items 3741 * Pattern specification (list terminated by the END pattern item). 3742 * @param[in] actions 3743 * Associated actions (list terminated by the END action). 3744 * @param[in] external 3745 * This flow rule is created by request external to PMD. 3746 * @param[out] error 3747 * Perform verbose error reporting if not NULL. 3748 * @return 3749 * 0 on success, negative value otherwise 3750 */ 3751 static int 3752 flow_create_split_metadata(struct rte_eth_dev *dev, 3753 struct rte_flow *flow, 3754 const struct rte_flow_attr *attr, 3755 const struct rte_flow_item items[], 3756 const struct rte_flow_action actions[], 3757 bool external, struct rte_flow_error *error) 3758 { 3759 struct mlx5_priv *priv = dev->data->dev_private; 3760 struct mlx5_dev_config *config = &priv->config; 3761 const struct rte_flow_action *qrss = NULL; 3762 struct rte_flow_action *ext_actions = NULL; 3763 struct mlx5_flow *dev_flow = NULL; 3764 uint32_t qrss_id = 0; 3765 int mtr_sfx = 0; 3766 size_t act_size; 3767 int actions_n; 3768 int ret; 3769 3770 /* Check whether extensive metadata feature is engaged. */ 3771 if (!config->dv_flow_en || 3772 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3773 !mlx5_flow_ext_mreg_supported(dev)) 3774 return flow_create_split_inner(dev, flow, NULL, attr, items, 3775 actions, external, error); 3776 actions_n = flow_parse_qrss_action(actions, &qrss); 3777 if (qrss) { 3778 /* Exclude hairpin flows from splitting. */ 3779 if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) { 3780 const struct rte_flow_action_queue *queue; 3781 3782 queue = qrss->conf; 3783 if (mlx5_rxq_get_type(dev, queue->index) == 3784 MLX5_RXQ_TYPE_HAIRPIN) 3785 qrss = NULL; 3786 } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) { 3787 const struct rte_flow_action_rss *rss; 3788 3789 rss = qrss->conf; 3790 if (mlx5_rxq_get_type(dev, rss->queue[0]) == 3791 MLX5_RXQ_TYPE_HAIRPIN) 3792 qrss = NULL; 3793 } 3794 } 3795 if (qrss) { 3796 /* Check if it is in meter suffix table. */ 3797 mtr_sfx = attr->group == (attr->transfer ? 3798 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : 3799 MLX5_FLOW_TABLE_LEVEL_SUFFIX); 3800 /* 3801 * Q/RSS action on NIC Rx should be split in order to pass by 3802 * the mreg copy table (RX_CP_TBL) and then it jumps to the 3803 * action table (RX_ACT_TBL) which has the split Q/RSS action. 3804 */ 3805 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 3806 sizeof(struct rte_flow_action_set_tag) + 3807 sizeof(struct rte_flow_action_jump); 3808 ext_actions = rte_zmalloc(__func__, act_size, 0); 3809 if (!ext_actions) 3810 return rte_flow_error_set(error, ENOMEM, 3811 RTE_FLOW_ERROR_TYPE_ACTION, 3812 NULL, "no memory to split " 3813 "metadata flow"); 3814 /* 3815 * If we are the suffix flow of meter, tag already exist. 3816 * Set the tag action to void. 3817 */ 3818 if (mtr_sfx) 3819 ext_actions[qrss - actions].type = 3820 RTE_FLOW_ACTION_TYPE_VOID; 3821 else 3822 ext_actions[qrss - actions].type = 3823 MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3824 /* 3825 * Create the new actions list with removed Q/RSS action 3826 * and appended set tag and jump to register copy table 3827 * (RX_CP_TBL). We should preallocate unique tag ID here 3828 * in advance, because it is needed for set tag action. 3829 */ 3830 qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions, 3831 qrss, actions_n, error); 3832 if (!mtr_sfx && !qrss_id) { 3833 ret = -rte_errno; 3834 goto exit; 3835 } 3836 } else if (attr->egress && !attr->transfer) { 3837 /* 3838 * All the actions on NIC Tx should have a metadata register 3839 * copy action to copy reg_a from WQE to reg_c[meta] 3840 */ 3841 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 3842 sizeof(struct mlx5_flow_action_copy_mreg); 3843 ext_actions = rte_zmalloc(__func__, act_size, 0); 3844 if (!ext_actions) 3845 return rte_flow_error_set(error, ENOMEM, 3846 RTE_FLOW_ERROR_TYPE_ACTION, 3847 NULL, "no memory to split " 3848 "metadata flow"); 3849 /* Create the action list appended with copy register. */ 3850 ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions, 3851 actions_n, error); 3852 if (ret < 0) 3853 goto exit; 3854 } 3855 /* Add the unmodified original or prefix subflow. */ 3856 ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items, 3857 ext_actions ? ext_actions : actions, 3858 external, error); 3859 if (ret < 0) 3860 goto exit; 3861 assert(dev_flow); 3862 if (qrss) { 3863 const struct rte_flow_attr q_attr = { 3864 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 3865 .ingress = 1, 3866 }; 3867 /* Internal PMD action to set register. */ 3868 struct mlx5_rte_flow_item_tag q_tag_spec = { 3869 .data = qrss_id, 3870 .id = 0, 3871 }; 3872 struct rte_flow_item q_items[] = { 3873 { 3874 .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, 3875 .spec = &q_tag_spec, 3876 .last = NULL, 3877 .mask = NULL, 3878 }, 3879 { 3880 .type = RTE_FLOW_ITEM_TYPE_END, 3881 }, 3882 }; 3883 struct rte_flow_action q_actions[] = { 3884 { 3885 .type = qrss->type, 3886 .conf = qrss->conf, 3887 }, 3888 { 3889 .type = RTE_FLOW_ACTION_TYPE_END, 3890 }, 3891 }; 3892 uint64_t hash_fields = dev_flow->hash_fields; 3893 3894 /* 3895 * Configure the tag item only if there is no meter subflow. 3896 * Since tag is already marked in the meter suffix subflow 3897 * we can just use the meter suffix items as is. 3898 */ 3899 if (qrss_id) { 3900 /* Not meter subflow. */ 3901 assert(!mtr_sfx); 3902 /* 3903 * Put unique id in prefix flow due to it is destroyed 3904 * after suffix flow and id will be freed after there 3905 * is no actual flows with this id and identifier 3906 * reallocation becomes possible (for example, for 3907 * other flows in other threads). 3908 */ 3909 dev_flow->qrss_id = qrss_id; 3910 qrss_id = 0; 3911 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, 3912 error); 3913 if (ret < 0) 3914 goto exit; 3915 q_tag_spec.id = ret; 3916 } 3917 dev_flow = NULL; 3918 /* Add suffix subflow to execute Q/RSS. */ 3919 ret = flow_create_split_inner(dev, flow, &dev_flow, 3920 &q_attr, mtr_sfx ? items : 3921 q_items, q_actions, 3922 external, error); 3923 if (ret < 0) 3924 goto exit; 3925 assert(dev_flow); 3926 dev_flow->hash_fields = hash_fields; 3927 } 3928 3929 exit: 3930 /* 3931 * We do not destroy the partially created sub_flows in case of error. 3932 * These ones are included into parent flow list and will be destroyed 3933 * by flow_drv_destroy. 3934 */ 3935 flow_qrss_free_id(dev, qrss_id); 3936 rte_free(ext_actions); 3937 return ret; 3938 } 3939 3940 /** 3941 * The splitting for meter feature. 3942 * 3943 * - The meter flow will be split to two flows as prefix and 3944 * suffix flow. The packets make sense only it pass the prefix 3945 * meter action. 3946 * 3947 * - Reg_C_5 is used for the packet to match betweend prefix and 3948 * suffix flow. 3949 * 3950 * @param dev 3951 * Pointer to Ethernet device. 3952 * @param[in] flow 3953 * Parent flow structure pointer. 3954 * @param[in] attr 3955 * Flow rule attributes. 3956 * @param[in] items 3957 * Pattern specification (list terminated by the END pattern item). 3958 * @param[in] actions 3959 * Associated actions (list terminated by the END action). 3960 * @param[in] external 3961 * This flow rule is created by request external to PMD. 3962 * @param[out] error 3963 * Perform verbose error reporting if not NULL. 3964 * @return 3965 * 0 on success, negative value otherwise 3966 */ 3967 static int 3968 flow_create_split_meter(struct rte_eth_dev *dev, 3969 struct rte_flow *flow, 3970 const struct rte_flow_attr *attr, 3971 const struct rte_flow_item items[], 3972 const struct rte_flow_action actions[], 3973 bool external, struct rte_flow_error *error) 3974 { 3975 struct mlx5_priv *priv = dev->data->dev_private; 3976 struct rte_flow_action *sfx_actions = NULL; 3977 struct rte_flow_action *pre_actions = NULL; 3978 struct rte_flow_item *sfx_items = NULL; 3979 const struct rte_flow_item *sfx_port_id_item; 3980 struct mlx5_flow *dev_flow = NULL; 3981 struct rte_flow_attr sfx_attr = *attr; 3982 uint32_t mtr = 0; 3983 uint32_t mtr_tag_id = 0; 3984 size_t act_size; 3985 size_t item_size; 3986 int actions_n = 0; 3987 int ret; 3988 3989 if (priv->mtr_en) 3990 actions_n = flow_check_meter_action(actions, &mtr); 3991 if (mtr) { 3992 struct mlx5_rte_flow_item_tag *tag_spec; 3993 /* The five prefix actions: meter, decap, encap, tag, end. */ 3994 act_size = sizeof(struct rte_flow_action) * (actions_n + 5) + 3995 sizeof(struct rte_flow_action_set_tag); 3996 /* tag, end. */ 3997 #define METER_SUFFIX_ITEM 3 3998 item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM + 3999 sizeof(struct mlx5_rte_flow_item_tag); 4000 sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0); 4001 if (!sfx_actions) 4002 return rte_flow_error_set(error, ENOMEM, 4003 RTE_FLOW_ERROR_TYPE_ACTION, 4004 NULL, "no memory to split " 4005 "meter flow"); 4006 pre_actions = sfx_actions + actions_n; 4007 mtr_tag_id = flow_meter_split_prep(dev, actions, sfx_actions, 4008 pre_actions); 4009 if (!mtr_tag_id) { 4010 ret = -rte_errno; 4011 goto exit; 4012 } 4013 /* Add the prefix subflow. */ 4014 ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items, 4015 pre_actions, external, error); 4016 if (ret) { 4017 ret = -rte_errno; 4018 goto exit; 4019 } 4020 dev_flow->mtr_flow_id = mtr_tag_id; 4021 /* Prepare the suffix flow match pattern. */ 4022 sfx_items = (struct rte_flow_item *)((char *)sfx_actions + 4023 act_size); 4024 tag_spec = (struct mlx5_rte_flow_item_tag *)(sfx_items + 4025 METER_SUFFIX_ITEM); 4026 tag_spec->data = rte_cpu_to_be_32(dev_flow->mtr_flow_id); 4027 tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, 4028 error); 4029 sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; 4030 sfx_items->spec = tag_spec; 4031 sfx_items->last = NULL; 4032 sfx_items->mask = NULL; 4033 sfx_items++; 4034 sfx_port_id_item = find_port_id_item(items); 4035 if (sfx_port_id_item) { 4036 memcpy(sfx_items, sfx_port_id_item, 4037 sizeof(*sfx_items)); 4038 sfx_items++; 4039 } 4040 sfx_items->type = RTE_FLOW_ITEM_TYPE_END; 4041 sfx_items -= METER_SUFFIX_ITEM; 4042 /* Setting the sfx group atrr. */ 4043 sfx_attr.group = sfx_attr.transfer ? 4044 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : 4045 MLX5_FLOW_TABLE_LEVEL_SUFFIX; 4046 } 4047 /* Add the prefix subflow. */ 4048 ret = flow_create_split_metadata(dev, flow, &sfx_attr, 4049 sfx_items ? sfx_items : items, 4050 sfx_actions ? sfx_actions : actions, 4051 external, error); 4052 exit: 4053 if (sfx_actions) 4054 rte_free(sfx_actions); 4055 return ret; 4056 } 4057 4058 /** 4059 * Split the flow to subflow set. The splitters might be linked 4060 * in the chain, like this: 4061 * flow_create_split_outer() calls: 4062 * flow_create_split_meter() calls: 4063 * flow_create_split_metadata(meter_subflow_0) calls: 4064 * flow_create_split_inner(metadata_subflow_0) 4065 * flow_create_split_inner(metadata_subflow_1) 4066 * flow_create_split_inner(metadata_subflow_2) 4067 * flow_create_split_metadata(meter_subflow_1) calls: 4068 * flow_create_split_inner(metadata_subflow_0) 4069 * flow_create_split_inner(metadata_subflow_1) 4070 * flow_create_split_inner(metadata_subflow_2) 4071 * 4072 * This provide flexible way to add new levels of flow splitting. 4073 * The all of successfully created subflows are included to the 4074 * parent flow dev_flow list. 4075 * 4076 * @param dev 4077 * Pointer to Ethernet device. 4078 * @param[in] flow 4079 * Parent flow structure pointer. 4080 * @param[in] attr 4081 * Flow rule attributes. 4082 * @param[in] items 4083 * Pattern specification (list terminated by the END pattern item). 4084 * @param[in] actions 4085 * Associated actions (list terminated by the END action). 4086 * @param[in] external 4087 * This flow rule is created by request external to PMD. 4088 * @param[out] error 4089 * Perform verbose error reporting if not NULL. 4090 * @return 4091 * 0 on success, negative value otherwise 4092 */ 4093 static int 4094 flow_create_split_outer(struct rte_eth_dev *dev, 4095 struct rte_flow *flow, 4096 const struct rte_flow_attr *attr, 4097 const struct rte_flow_item items[], 4098 const struct rte_flow_action actions[], 4099 bool external, struct rte_flow_error *error) 4100 { 4101 int ret; 4102 4103 ret = flow_create_split_meter(dev, flow, attr, items, 4104 actions, external, error); 4105 assert(ret <= 0); 4106 return ret; 4107 } 4108 4109 /** 4110 * Create a flow and add it to @p list. 4111 * 4112 * @param dev 4113 * Pointer to Ethernet device. 4114 * @param list 4115 * Pointer to a TAILQ flow list. If this parameter NULL, 4116 * no list insertion occurred, flow is just created, 4117 * this is caller's responsibility to track the 4118 * created flow. 4119 * @param[in] attr 4120 * Flow rule attributes. 4121 * @param[in] items 4122 * Pattern specification (list terminated by the END pattern item). 4123 * @param[in] actions 4124 * Associated actions (list terminated by the END action). 4125 * @param[in] external 4126 * This flow rule is created by request external to PMD. 4127 * @param[out] error 4128 * Perform verbose error reporting if not NULL. 4129 * 4130 * @return 4131 * A flow on success, NULL otherwise and rte_errno is set. 4132 */ 4133 static struct rte_flow * 4134 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, 4135 const struct rte_flow_attr *attr, 4136 const struct rte_flow_item items[], 4137 const struct rte_flow_action actions[], 4138 bool external, struct rte_flow_error *error) 4139 { 4140 struct mlx5_priv *priv = dev->data->dev_private; 4141 struct rte_flow *flow = NULL; 4142 struct mlx5_flow *dev_flow; 4143 const struct rte_flow_action_rss *rss; 4144 union { 4145 struct rte_flow_expand_rss buf; 4146 uint8_t buffer[2048]; 4147 } expand_buffer; 4148 union { 4149 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 4150 uint8_t buffer[2048]; 4151 } actions_rx; 4152 union { 4153 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 4154 uint8_t buffer[2048]; 4155 } actions_hairpin_tx; 4156 union { 4157 struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS]; 4158 uint8_t buffer[2048]; 4159 } items_tx; 4160 struct rte_flow_expand_rss *buf = &expand_buffer.buf; 4161 const struct rte_flow_action *p_actions_rx = actions; 4162 int ret; 4163 uint32_t i; 4164 uint32_t flow_size; 4165 int hairpin_flow = 0; 4166 uint32_t hairpin_id = 0; 4167 struct rte_flow_attr attr_tx = { .priority = 0 }; 4168 4169 hairpin_flow = flow_check_hairpin_split(dev, attr, actions); 4170 if (hairpin_flow > 0) { 4171 if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { 4172 rte_errno = EINVAL; 4173 return NULL; 4174 } 4175 flow_hairpin_split(dev, actions, actions_rx.actions, 4176 actions_hairpin_tx.actions, items_tx.items, 4177 &hairpin_id); 4178 p_actions_rx = actions_rx.actions; 4179 } 4180 ret = flow_drv_validate(dev, attr, items, p_actions_rx, external, 4181 error); 4182 if (ret < 0) 4183 goto error_before_flow; 4184 flow_size = sizeof(struct rte_flow); 4185 rss = flow_get_rss_action(p_actions_rx); 4186 if (rss) 4187 flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), 4188 sizeof(void *)); 4189 else 4190 flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); 4191 flow = rte_calloc(__func__, 1, flow_size, 0); 4192 if (!flow) { 4193 rte_errno = ENOMEM; 4194 goto error_before_flow; 4195 } 4196 flow->drv_type = flow_get_drv_type(dev, attr); 4197 if (hairpin_id != 0) 4198 flow->hairpin_flow_id = hairpin_id; 4199 assert(flow->drv_type > MLX5_FLOW_TYPE_MIN && 4200 flow->drv_type < MLX5_FLOW_TYPE_MAX); 4201 flow->rss.queue = (void *)(flow + 1); 4202 if (rss) { 4203 /* 4204 * The following information is required by 4205 * mlx5_flow_hashfields_adjust() in advance. 4206 */ 4207 flow->rss.level = rss->level; 4208 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ 4209 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; 4210 } 4211 LIST_INIT(&flow->dev_flows); 4212 if (rss && rss->types) { 4213 unsigned int graph_root; 4214 4215 graph_root = find_graph_root(items, rss->level); 4216 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), 4217 items, rss->types, 4218 mlx5_support_expansion, 4219 graph_root); 4220 assert(ret > 0 && 4221 (unsigned int)ret < sizeof(expand_buffer.buffer)); 4222 } else { 4223 buf->entries = 1; 4224 buf->entry[0].pattern = (void *)(uintptr_t)items; 4225 } 4226 for (i = 0; i < buf->entries; ++i) { 4227 /* 4228 * The splitter may create multiple dev_flows, 4229 * depending on configuration. In the simplest 4230 * case it just creates unmodified original flow. 4231 */ 4232 ret = flow_create_split_outer(dev, flow, attr, 4233 buf->entry[i].pattern, 4234 p_actions_rx, external, 4235 error); 4236 if (ret < 0) 4237 goto error; 4238 } 4239 /* Create the tx flow. */ 4240 if (hairpin_flow) { 4241 attr_tx.group = MLX5_HAIRPIN_TX_TABLE; 4242 attr_tx.ingress = 0; 4243 attr_tx.egress = 1; 4244 dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items, 4245 actions_hairpin_tx.actions, error); 4246 if (!dev_flow) 4247 goto error; 4248 dev_flow->flow = flow; 4249 dev_flow->external = 0; 4250 LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); 4251 ret = flow_drv_translate(dev, dev_flow, &attr_tx, 4252 items_tx.items, 4253 actions_hairpin_tx.actions, error); 4254 if (ret < 0) 4255 goto error; 4256 } 4257 /* 4258 * Update the metadata register copy table. If extensive 4259 * metadata feature is enabled and registers are supported 4260 * we might create the extra rte_flow for each unique 4261 * MARK/FLAG action ID. 4262 * 4263 * The table is updated for ingress Flows only, because 4264 * the egress Flows belong to the different device and 4265 * copy table should be updated in peer NIC Rx domain. 4266 */ 4267 if (attr->ingress && 4268 (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) { 4269 ret = flow_mreg_update_copy_table(dev, flow, actions, error); 4270 if (ret) 4271 goto error; 4272 } 4273 if (dev->data->dev_started) { 4274 ret = flow_drv_apply(dev, flow, error); 4275 if (ret < 0) 4276 goto error; 4277 } 4278 if (list) 4279 TAILQ_INSERT_TAIL(list, flow, next); 4280 flow_rxq_flags_set(dev, flow); 4281 return flow; 4282 error_before_flow: 4283 if (hairpin_id) 4284 mlx5_flow_id_release(priv->sh->flow_id_pool, 4285 hairpin_id); 4286 return NULL; 4287 error: 4288 assert(flow); 4289 flow_mreg_del_copy_action(dev, flow); 4290 ret = rte_errno; /* Save rte_errno before cleanup. */ 4291 if (flow->hairpin_flow_id) 4292 mlx5_flow_id_release(priv->sh->flow_id_pool, 4293 flow->hairpin_flow_id); 4294 assert(flow); 4295 flow_drv_destroy(dev, flow); 4296 rte_free(flow); 4297 rte_errno = ret; /* Restore rte_errno. */ 4298 return NULL; 4299 } 4300 4301 /** 4302 * Create a dedicated flow rule on e-switch table 0 (root table), to direct all 4303 * incoming packets to table 1. 4304 * 4305 * Other flow rules, requested for group n, will be created in 4306 * e-switch table n+1. 4307 * Jump action to e-switch group n will be created to group n+1. 4308 * 4309 * Used when working in switchdev mode, to utilise advantages of table 1 4310 * and above. 4311 * 4312 * @param dev 4313 * Pointer to Ethernet device. 4314 * 4315 * @return 4316 * Pointer to flow on success, NULL otherwise and rte_errno is set. 4317 */ 4318 struct rte_flow * 4319 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) 4320 { 4321 const struct rte_flow_attr attr = { 4322 .group = 0, 4323 .priority = 0, 4324 .ingress = 1, 4325 .egress = 0, 4326 .transfer = 1, 4327 }; 4328 const struct rte_flow_item pattern = { 4329 .type = RTE_FLOW_ITEM_TYPE_END, 4330 }; 4331 struct rte_flow_action_jump jump = { 4332 .group = 1, 4333 }; 4334 const struct rte_flow_action actions[] = { 4335 { 4336 .type = RTE_FLOW_ACTION_TYPE_JUMP, 4337 .conf = &jump, 4338 }, 4339 { 4340 .type = RTE_FLOW_ACTION_TYPE_END, 4341 }, 4342 }; 4343 struct mlx5_priv *priv = dev->data->dev_private; 4344 struct rte_flow_error error; 4345 4346 return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern, 4347 actions, false, &error); 4348 } 4349 4350 /** 4351 * Create a flow. 4352 * 4353 * @see rte_flow_create() 4354 * @see rte_flow_ops 4355 */ 4356 struct rte_flow * 4357 mlx5_flow_create(struct rte_eth_dev *dev, 4358 const struct rte_flow_attr *attr, 4359 const struct rte_flow_item items[], 4360 const struct rte_flow_action actions[], 4361 struct rte_flow_error *error) 4362 { 4363 struct mlx5_priv *priv = dev->data->dev_private; 4364 4365 return flow_list_create(dev, &priv->flows, 4366 attr, items, actions, true, error); 4367 } 4368 4369 /** 4370 * Destroy a flow in a list. 4371 * 4372 * @param dev 4373 * Pointer to Ethernet device. 4374 * @param list 4375 * Pointer to a TAILQ flow list. If this parameter NULL, 4376 * there is no flow removal from the list. 4377 * @param[in] flow 4378 * Flow to destroy. 4379 */ 4380 static void 4381 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, 4382 struct rte_flow *flow) 4383 { 4384 struct mlx5_priv *priv = dev->data->dev_private; 4385 4386 /* 4387 * Update RX queue flags only if port is started, otherwise it is 4388 * already clean. 4389 */ 4390 if (dev->data->dev_started) 4391 flow_rxq_flags_trim(dev, flow); 4392 if (flow->hairpin_flow_id) 4393 mlx5_flow_id_release(priv->sh->flow_id_pool, 4394 flow->hairpin_flow_id); 4395 flow_drv_destroy(dev, flow); 4396 if (list) 4397 TAILQ_REMOVE(list, flow, next); 4398 flow_mreg_del_copy_action(dev, flow); 4399 rte_free(flow->fdir); 4400 rte_free(flow); 4401 } 4402 4403 /** 4404 * Destroy all flows. 4405 * 4406 * @param dev 4407 * Pointer to Ethernet device. 4408 * @param list 4409 * Pointer to a TAILQ flow list. 4410 */ 4411 void 4412 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) 4413 { 4414 while (!TAILQ_EMPTY(list)) { 4415 struct rte_flow *flow; 4416 4417 flow = TAILQ_FIRST(list); 4418 flow_list_destroy(dev, list, flow); 4419 } 4420 } 4421 4422 /** 4423 * Remove all flows. 4424 * 4425 * @param dev 4426 * Pointer to Ethernet device. 4427 * @param list 4428 * Pointer to a TAILQ flow list. 4429 */ 4430 void 4431 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) 4432 { 4433 struct rte_flow *flow; 4434 4435 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { 4436 flow_drv_remove(dev, flow); 4437 flow_mreg_stop_copy_action(dev, flow); 4438 } 4439 flow_mreg_del_default_copy_action(dev); 4440 flow_rxq_flags_clear(dev); 4441 } 4442 4443 /** 4444 * Add all flows. 4445 * 4446 * @param dev 4447 * Pointer to Ethernet device. 4448 * @param list 4449 * Pointer to a TAILQ flow list. 4450 * 4451 * @return 4452 * 0 on success, a negative errno value otherwise and rte_errno is set. 4453 */ 4454 int 4455 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) 4456 { 4457 struct rte_flow *flow; 4458 struct rte_flow_error error; 4459 int ret = 0; 4460 4461 /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ 4462 ret = flow_mreg_add_default_copy_action(dev, &error); 4463 if (ret < 0) 4464 return -rte_errno; 4465 /* Apply Flows created by application. */ 4466 TAILQ_FOREACH(flow, list, next) { 4467 ret = flow_mreg_start_copy_action(dev, flow); 4468 if (ret < 0) 4469 goto error; 4470 ret = flow_drv_apply(dev, flow, &error); 4471 if (ret < 0) 4472 goto error; 4473 flow_rxq_flags_set(dev, flow); 4474 } 4475 return 0; 4476 error: 4477 ret = rte_errno; /* Save rte_errno before cleanup. */ 4478 mlx5_flow_stop(dev, list); 4479 rte_errno = ret; /* Restore rte_errno. */ 4480 return -rte_errno; 4481 } 4482 4483 /** 4484 * Verify the flow list is empty 4485 * 4486 * @param dev 4487 * Pointer to Ethernet device. 4488 * 4489 * @return the number of flows not released. 4490 */ 4491 int 4492 mlx5_flow_verify(struct rte_eth_dev *dev) 4493 { 4494 struct mlx5_priv *priv = dev->data->dev_private; 4495 struct rte_flow *flow; 4496 int ret = 0; 4497 4498 TAILQ_FOREACH(flow, &priv->flows, next) { 4499 DRV_LOG(DEBUG, "port %u flow %p still referenced", 4500 dev->data->port_id, (void *)flow); 4501 ++ret; 4502 } 4503 return ret; 4504 } 4505 4506 /** 4507 * Enable default hairpin egress flow. 4508 * 4509 * @param dev 4510 * Pointer to Ethernet device. 4511 * @param queue 4512 * The queue index. 4513 * 4514 * @return 4515 * 0 on success, a negative errno value otherwise and rte_errno is set. 4516 */ 4517 int 4518 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, 4519 uint32_t queue) 4520 { 4521 struct mlx5_priv *priv = dev->data->dev_private; 4522 const struct rte_flow_attr attr = { 4523 .egress = 1, 4524 .priority = 0, 4525 }; 4526 struct mlx5_rte_flow_item_tx_queue queue_spec = { 4527 .queue = queue, 4528 }; 4529 struct mlx5_rte_flow_item_tx_queue queue_mask = { 4530 .queue = UINT32_MAX, 4531 }; 4532 struct rte_flow_item items[] = { 4533 { 4534 .type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, 4535 .spec = &queue_spec, 4536 .last = NULL, 4537 .mask = &queue_mask, 4538 }, 4539 { 4540 .type = RTE_FLOW_ITEM_TYPE_END, 4541 }, 4542 }; 4543 struct rte_flow_action_jump jump = { 4544 .group = MLX5_HAIRPIN_TX_TABLE, 4545 }; 4546 struct rte_flow_action actions[2]; 4547 struct rte_flow *flow; 4548 struct rte_flow_error error; 4549 4550 actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; 4551 actions[0].conf = &jump; 4552 actions[1].type = RTE_FLOW_ACTION_TYPE_END; 4553 flow = flow_list_create(dev, &priv->ctrl_flows, 4554 &attr, items, actions, false, &error); 4555 if (!flow) { 4556 DRV_LOG(DEBUG, 4557 "Failed to create ctrl flow: rte_errno(%d)," 4558 " type(%d), message(%s)", 4559 rte_errno, error.type, 4560 error.message ? error.message : " (no stated reason)"); 4561 return -rte_errno; 4562 } 4563 return 0; 4564 } 4565 4566 /** 4567 * Enable a control flow configured from the control plane. 4568 * 4569 * @param dev 4570 * Pointer to Ethernet device. 4571 * @param eth_spec 4572 * An Ethernet flow spec to apply. 4573 * @param eth_mask 4574 * An Ethernet flow mask to apply. 4575 * @param vlan_spec 4576 * A VLAN flow spec to apply. 4577 * @param vlan_mask 4578 * A VLAN flow mask to apply. 4579 * 4580 * @return 4581 * 0 on success, a negative errno value otherwise and rte_errno is set. 4582 */ 4583 int 4584 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, 4585 struct rte_flow_item_eth *eth_spec, 4586 struct rte_flow_item_eth *eth_mask, 4587 struct rte_flow_item_vlan *vlan_spec, 4588 struct rte_flow_item_vlan *vlan_mask) 4589 { 4590 struct mlx5_priv *priv = dev->data->dev_private; 4591 const struct rte_flow_attr attr = { 4592 .ingress = 1, 4593 .priority = MLX5_FLOW_PRIO_RSVD, 4594 }; 4595 struct rte_flow_item items[] = { 4596 { 4597 .type = RTE_FLOW_ITEM_TYPE_ETH, 4598 .spec = eth_spec, 4599 .last = NULL, 4600 .mask = eth_mask, 4601 }, 4602 { 4603 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : 4604 RTE_FLOW_ITEM_TYPE_END, 4605 .spec = vlan_spec, 4606 .last = NULL, 4607 .mask = vlan_mask, 4608 }, 4609 { 4610 .type = RTE_FLOW_ITEM_TYPE_END, 4611 }, 4612 }; 4613 uint16_t queue[priv->reta_idx_n]; 4614 struct rte_flow_action_rss action_rss = { 4615 .func = RTE_ETH_HASH_FUNCTION_DEFAULT, 4616 .level = 0, 4617 .types = priv->rss_conf.rss_hf, 4618 .key_len = priv->rss_conf.rss_key_len, 4619 .queue_num = priv->reta_idx_n, 4620 .key = priv->rss_conf.rss_key, 4621 .queue = queue, 4622 }; 4623 struct rte_flow_action actions[] = { 4624 { 4625 .type = RTE_FLOW_ACTION_TYPE_RSS, 4626 .conf = &action_rss, 4627 }, 4628 { 4629 .type = RTE_FLOW_ACTION_TYPE_END, 4630 }, 4631 }; 4632 struct rte_flow *flow; 4633 struct rte_flow_error error; 4634 unsigned int i; 4635 4636 if (!priv->reta_idx_n || !priv->rxqs_n) { 4637 return 0; 4638 } 4639 for (i = 0; i != priv->reta_idx_n; ++i) 4640 queue[i] = (*priv->reta_idx)[i]; 4641 flow = flow_list_create(dev, &priv->ctrl_flows, 4642 &attr, items, actions, false, &error); 4643 if (!flow) 4644 return -rte_errno; 4645 return 0; 4646 } 4647 4648 /** 4649 * Enable a flow control configured from the control plane. 4650 * 4651 * @param dev 4652 * Pointer to Ethernet device. 4653 * @param eth_spec 4654 * An Ethernet flow spec to apply. 4655 * @param eth_mask 4656 * An Ethernet flow mask to apply. 4657 * 4658 * @return 4659 * 0 on success, a negative errno value otherwise and rte_errno is set. 4660 */ 4661 int 4662 mlx5_ctrl_flow(struct rte_eth_dev *dev, 4663 struct rte_flow_item_eth *eth_spec, 4664 struct rte_flow_item_eth *eth_mask) 4665 { 4666 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); 4667 } 4668 4669 /** 4670 * Destroy a flow. 4671 * 4672 * @see rte_flow_destroy() 4673 * @see rte_flow_ops 4674 */ 4675 int 4676 mlx5_flow_destroy(struct rte_eth_dev *dev, 4677 struct rte_flow *flow, 4678 struct rte_flow_error *error __rte_unused) 4679 { 4680 struct mlx5_priv *priv = dev->data->dev_private; 4681 4682 flow_list_destroy(dev, &priv->flows, flow); 4683 return 0; 4684 } 4685 4686 /** 4687 * Destroy all flows. 4688 * 4689 * @see rte_flow_flush() 4690 * @see rte_flow_ops 4691 */ 4692 int 4693 mlx5_flow_flush(struct rte_eth_dev *dev, 4694 struct rte_flow_error *error __rte_unused) 4695 { 4696 struct mlx5_priv *priv = dev->data->dev_private; 4697 4698 mlx5_flow_list_flush(dev, &priv->flows); 4699 return 0; 4700 } 4701 4702 /** 4703 * Isolated mode. 4704 * 4705 * @see rte_flow_isolate() 4706 * @see rte_flow_ops 4707 */ 4708 int 4709 mlx5_flow_isolate(struct rte_eth_dev *dev, 4710 int enable, 4711 struct rte_flow_error *error) 4712 { 4713 struct mlx5_priv *priv = dev->data->dev_private; 4714 4715 if (dev->data->dev_started) { 4716 rte_flow_error_set(error, EBUSY, 4717 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4718 NULL, 4719 "port must be stopped first"); 4720 return -rte_errno; 4721 } 4722 priv->isolated = !!enable; 4723 if (enable) 4724 dev->dev_ops = &mlx5_dev_ops_isolate; 4725 else 4726 dev->dev_ops = &mlx5_dev_ops; 4727 return 0; 4728 } 4729 4730 /** 4731 * Query a flow. 4732 * 4733 * @see rte_flow_query() 4734 * @see rte_flow_ops 4735 */ 4736 static int 4737 flow_drv_query(struct rte_eth_dev *dev, 4738 struct rte_flow *flow, 4739 const struct rte_flow_action *actions, 4740 void *data, 4741 struct rte_flow_error *error) 4742 { 4743 const struct mlx5_flow_driver_ops *fops; 4744 enum mlx5_flow_drv_type ftype = flow->drv_type; 4745 4746 assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); 4747 fops = flow_get_drv_ops(ftype); 4748 4749 return fops->query(dev, flow, actions, data, error); 4750 } 4751 4752 /** 4753 * Query a flow. 4754 * 4755 * @see rte_flow_query() 4756 * @see rte_flow_ops 4757 */ 4758 int 4759 mlx5_flow_query(struct rte_eth_dev *dev, 4760 struct rte_flow *flow, 4761 const struct rte_flow_action *actions, 4762 void *data, 4763 struct rte_flow_error *error) 4764 { 4765 int ret; 4766 4767 ret = flow_drv_query(dev, flow, actions, data, error); 4768 if (ret < 0) 4769 return ret; 4770 return 0; 4771 } 4772 4773 /** 4774 * Convert a flow director filter to a generic flow. 4775 * 4776 * @param dev 4777 * Pointer to Ethernet device. 4778 * @param fdir_filter 4779 * Flow director filter to add. 4780 * @param attributes 4781 * Generic flow parameters structure. 4782 * 4783 * @return 4784 * 0 on success, a negative errno value otherwise and rte_errno is set. 4785 */ 4786 static int 4787 flow_fdir_filter_convert(struct rte_eth_dev *dev, 4788 const struct rte_eth_fdir_filter *fdir_filter, 4789 struct mlx5_fdir *attributes) 4790 { 4791 struct mlx5_priv *priv = dev->data->dev_private; 4792 const struct rte_eth_fdir_input *input = &fdir_filter->input; 4793 const struct rte_eth_fdir_masks *mask = 4794 &dev->data->dev_conf.fdir_conf.mask; 4795 4796 /* Validate queue number. */ 4797 if (fdir_filter->action.rx_queue >= priv->rxqs_n) { 4798 DRV_LOG(ERR, "port %u invalid queue number %d", 4799 dev->data->port_id, fdir_filter->action.rx_queue); 4800 rte_errno = EINVAL; 4801 return -rte_errno; 4802 } 4803 attributes->attr.ingress = 1; 4804 attributes->items[0] = (struct rte_flow_item) { 4805 .type = RTE_FLOW_ITEM_TYPE_ETH, 4806 .spec = &attributes->l2, 4807 .mask = &attributes->l2_mask, 4808 }; 4809 switch (fdir_filter->action.behavior) { 4810 case RTE_ETH_FDIR_ACCEPT: 4811 attributes->actions[0] = (struct rte_flow_action){ 4812 .type = RTE_FLOW_ACTION_TYPE_QUEUE, 4813 .conf = &attributes->queue, 4814 }; 4815 break; 4816 case RTE_ETH_FDIR_REJECT: 4817 attributes->actions[0] = (struct rte_flow_action){ 4818 .type = RTE_FLOW_ACTION_TYPE_DROP, 4819 }; 4820 break; 4821 default: 4822 DRV_LOG(ERR, "port %u invalid behavior %d", 4823 dev->data->port_id, 4824 fdir_filter->action.behavior); 4825 rte_errno = ENOTSUP; 4826 return -rte_errno; 4827 } 4828 attributes->queue.index = fdir_filter->action.rx_queue; 4829 /* Handle L3. */ 4830 switch (fdir_filter->input.flow_type) { 4831 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 4832 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 4833 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 4834 attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){ 4835 .src_addr = input->flow.ip4_flow.src_ip, 4836 .dst_addr = input->flow.ip4_flow.dst_ip, 4837 .time_to_live = input->flow.ip4_flow.ttl, 4838 .type_of_service = input->flow.ip4_flow.tos, 4839 }; 4840 attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){ 4841 .src_addr = mask->ipv4_mask.src_ip, 4842 .dst_addr = mask->ipv4_mask.dst_ip, 4843 .time_to_live = mask->ipv4_mask.ttl, 4844 .type_of_service = mask->ipv4_mask.tos, 4845 .next_proto_id = mask->ipv4_mask.proto, 4846 }; 4847 attributes->items[1] = (struct rte_flow_item){ 4848 .type = RTE_FLOW_ITEM_TYPE_IPV4, 4849 .spec = &attributes->l3, 4850 .mask = &attributes->l3_mask, 4851 }; 4852 break; 4853 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 4854 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 4855 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 4856 attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){ 4857 .hop_limits = input->flow.ipv6_flow.hop_limits, 4858 .proto = input->flow.ipv6_flow.proto, 4859 }; 4860 4861 memcpy(attributes->l3.ipv6.hdr.src_addr, 4862 input->flow.ipv6_flow.src_ip, 4863 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 4864 memcpy(attributes->l3.ipv6.hdr.dst_addr, 4865 input->flow.ipv6_flow.dst_ip, 4866 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 4867 memcpy(attributes->l3_mask.ipv6.hdr.src_addr, 4868 mask->ipv6_mask.src_ip, 4869 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 4870 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr, 4871 mask->ipv6_mask.dst_ip, 4872 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 4873 attributes->items[1] = (struct rte_flow_item){ 4874 .type = RTE_FLOW_ITEM_TYPE_IPV6, 4875 .spec = &attributes->l3, 4876 .mask = &attributes->l3_mask, 4877 }; 4878 break; 4879 default: 4880 DRV_LOG(ERR, "port %u invalid flow type%d", 4881 dev->data->port_id, fdir_filter->input.flow_type); 4882 rte_errno = ENOTSUP; 4883 return -rte_errno; 4884 } 4885 /* Handle L4. */ 4886 switch (fdir_filter->input.flow_type) { 4887 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 4888 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 4889 .src_port = input->flow.udp4_flow.src_port, 4890 .dst_port = input->flow.udp4_flow.dst_port, 4891 }; 4892 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 4893 .src_port = mask->src_port_mask, 4894 .dst_port = mask->dst_port_mask, 4895 }; 4896 attributes->items[2] = (struct rte_flow_item){ 4897 .type = RTE_FLOW_ITEM_TYPE_UDP, 4898 .spec = &attributes->l4, 4899 .mask = &attributes->l4_mask, 4900 }; 4901 break; 4902 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 4903 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 4904 .src_port = input->flow.tcp4_flow.src_port, 4905 .dst_port = input->flow.tcp4_flow.dst_port, 4906 }; 4907 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 4908 .src_port = mask->src_port_mask, 4909 .dst_port = mask->dst_port_mask, 4910 }; 4911 attributes->items[2] = (struct rte_flow_item){ 4912 .type = RTE_FLOW_ITEM_TYPE_TCP, 4913 .spec = &attributes->l4, 4914 .mask = &attributes->l4_mask, 4915 }; 4916 break; 4917 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 4918 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 4919 .src_port = input->flow.udp6_flow.src_port, 4920 .dst_port = input->flow.udp6_flow.dst_port, 4921 }; 4922 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 4923 .src_port = mask->src_port_mask, 4924 .dst_port = mask->dst_port_mask, 4925 }; 4926 attributes->items[2] = (struct rte_flow_item){ 4927 .type = RTE_FLOW_ITEM_TYPE_UDP, 4928 .spec = &attributes->l4, 4929 .mask = &attributes->l4_mask, 4930 }; 4931 break; 4932 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 4933 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 4934 .src_port = input->flow.tcp6_flow.src_port, 4935 .dst_port = input->flow.tcp6_flow.dst_port, 4936 }; 4937 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 4938 .src_port = mask->src_port_mask, 4939 .dst_port = mask->dst_port_mask, 4940 }; 4941 attributes->items[2] = (struct rte_flow_item){ 4942 .type = RTE_FLOW_ITEM_TYPE_TCP, 4943 .spec = &attributes->l4, 4944 .mask = &attributes->l4_mask, 4945 }; 4946 break; 4947 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 4948 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 4949 break; 4950 default: 4951 DRV_LOG(ERR, "port %u invalid flow type%d", 4952 dev->data->port_id, fdir_filter->input.flow_type); 4953 rte_errno = ENOTSUP; 4954 return -rte_errno; 4955 } 4956 return 0; 4957 } 4958 4959 #define FLOW_FDIR_CMP(f1, f2, fld) \ 4960 memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld)) 4961 4962 /** 4963 * Compare two FDIR flows. If items and actions are identical, the two flows are 4964 * regarded as same. 4965 * 4966 * @param dev 4967 * Pointer to Ethernet device. 4968 * @param f1 4969 * FDIR flow to compare. 4970 * @param f2 4971 * FDIR flow to compare. 4972 * 4973 * @return 4974 * Zero on match, 1 otherwise. 4975 */ 4976 static int 4977 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) 4978 { 4979 if (FLOW_FDIR_CMP(f1, f2, attr) || 4980 FLOW_FDIR_CMP(f1, f2, l2) || 4981 FLOW_FDIR_CMP(f1, f2, l2_mask) || 4982 FLOW_FDIR_CMP(f1, f2, l3) || 4983 FLOW_FDIR_CMP(f1, f2, l3_mask) || 4984 FLOW_FDIR_CMP(f1, f2, l4) || 4985 FLOW_FDIR_CMP(f1, f2, l4_mask) || 4986 FLOW_FDIR_CMP(f1, f2, actions[0].type)) 4987 return 1; 4988 if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE && 4989 FLOW_FDIR_CMP(f1, f2, queue)) 4990 return 1; 4991 return 0; 4992 } 4993 4994 /** 4995 * Search device flow list to find out a matched FDIR flow. 4996 * 4997 * @param dev 4998 * Pointer to Ethernet device. 4999 * @param fdir_flow 5000 * FDIR flow to lookup. 5001 * 5002 * @return 5003 * Pointer of flow if found, NULL otherwise. 5004 */ 5005 static struct rte_flow * 5006 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) 5007 { 5008 struct mlx5_priv *priv = dev->data->dev_private; 5009 struct rte_flow *flow = NULL; 5010 5011 assert(fdir_flow); 5012 TAILQ_FOREACH(flow, &priv->flows, next) { 5013 if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) { 5014 DRV_LOG(DEBUG, "port %u found FDIR flow %p", 5015 dev->data->port_id, (void *)flow); 5016 break; 5017 } 5018 } 5019 return flow; 5020 } 5021 5022 /** 5023 * Add new flow director filter and store it in list. 5024 * 5025 * @param dev 5026 * Pointer to Ethernet device. 5027 * @param fdir_filter 5028 * Flow director filter to add. 5029 * 5030 * @return 5031 * 0 on success, a negative errno value otherwise and rte_errno is set. 5032 */ 5033 static int 5034 flow_fdir_filter_add(struct rte_eth_dev *dev, 5035 const struct rte_eth_fdir_filter *fdir_filter) 5036 { 5037 struct mlx5_priv *priv = dev->data->dev_private; 5038 struct mlx5_fdir *fdir_flow; 5039 struct rte_flow *flow; 5040 int ret; 5041 5042 fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0); 5043 if (!fdir_flow) { 5044 rte_errno = ENOMEM; 5045 return -rte_errno; 5046 } 5047 ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow); 5048 if (ret) 5049 goto error; 5050 flow = flow_fdir_filter_lookup(dev, fdir_flow); 5051 if (flow) { 5052 rte_errno = EEXIST; 5053 goto error; 5054 } 5055 flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr, 5056 fdir_flow->items, fdir_flow->actions, true, 5057 NULL); 5058 if (!flow) 5059 goto error; 5060 assert(!flow->fdir); 5061 flow->fdir = fdir_flow; 5062 DRV_LOG(DEBUG, "port %u created FDIR flow %p", 5063 dev->data->port_id, (void *)flow); 5064 return 0; 5065 error: 5066 rte_free(fdir_flow); 5067 return -rte_errno; 5068 } 5069 5070 /** 5071 * Delete specific filter. 5072 * 5073 * @param dev 5074 * Pointer to Ethernet device. 5075 * @param fdir_filter 5076 * Filter to be deleted. 5077 * 5078 * @return 5079 * 0 on success, a negative errno value otherwise and rte_errno is set. 5080 */ 5081 static int 5082 flow_fdir_filter_delete(struct rte_eth_dev *dev, 5083 const struct rte_eth_fdir_filter *fdir_filter) 5084 { 5085 struct mlx5_priv *priv = dev->data->dev_private; 5086 struct rte_flow *flow; 5087 struct mlx5_fdir fdir_flow = { 5088 .attr.group = 0, 5089 }; 5090 int ret; 5091 5092 ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow); 5093 if (ret) 5094 return -rte_errno; 5095 flow = flow_fdir_filter_lookup(dev, &fdir_flow); 5096 if (!flow) { 5097 rte_errno = ENOENT; 5098 return -rte_errno; 5099 } 5100 flow_list_destroy(dev, &priv->flows, flow); 5101 DRV_LOG(DEBUG, "port %u deleted FDIR flow %p", 5102 dev->data->port_id, (void *)flow); 5103 return 0; 5104 } 5105 5106 /** 5107 * Update queue for specific filter. 5108 * 5109 * @param dev 5110 * Pointer to Ethernet device. 5111 * @param fdir_filter 5112 * Filter to be updated. 5113 * 5114 * @return 5115 * 0 on success, a negative errno value otherwise and rte_errno is set. 5116 */ 5117 static int 5118 flow_fdir_filter_update(struct rte_eth_dev *dev, 5119 const struct rte_eth_fdir_filter *fdir_filter) 5120 { 5121 int ret; 5122 5123 ret = flow_fdir_filter_delete(dev, fdir_filter); 5124 if (ret) 5125 return ret; 5126 return flow_fdir_filter_add(dev, fdir_filter); 5127 } 5128 5129 /** 5130 * Flush all filters. 5131 * 5132 * @param dev 5133 * Pointer to Ethernet device. 5134 */ 5135 static void 5136 flow_fdir_filter_flush(struct rte_eth_dev *dev) 5137 { 5138 struct mlx5_priv *priv = dev->data->dev_private; 5139 5140 mlx5_flow_list_flush(dev, &priv->flows); 5141 } 5142 5143 /** 5144 * Get flow director information. 5145 * 5146 * @param dev 5147 * Pointer to Ethernet device. 5148 * @param[out] fdir_info 5149 * Resulting flow director information. 5150 */ 5151 static void 5152 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) 5153 { 5154 struct rte_eth_fdir_masks *mask = 5155 &dev->data->dev_conf.fdir_conf.mask; 5156 5157 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; 5158 fdir_info->guarant_spc = 0; 5159 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); 5160 fdir_info->max_flexpayload = 0; 5161 fdir_info->flow_types_mask[0] = 0; 5162 fdir_info->flex_payload_unit = 0; 5163 fdir_info->max_flex_payload_segment_num = 0; 5164 fdir_info->flex_payload_limit = 0; 5165 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf)); 5166 } 5167 5168 /** 5169 * Deal with flow director operations. 5170 * 5171 * @param dev 5172 * Pointer to Ethernet device. 5173 * @param filter_op 5174 * Operation to perform. 5175 * @param arg 5176 * Pointer to operation-specific structure. 5177 * 5178 * @return 5179 * 0 on success, a negative errno value otherwise and rte_errno is set. 5180 */ 5181 static int 5182 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, 5183 void *arg) 5184 { 5185 enum rte_fdir_mode fdir_mode = 5186 dev->data->dev_conf.fdir_conf.mode; 5187 5188 if (filter_op == RTE_ETH_FILTER_NOP) 5189 return 0; 5190 if (fdir_mode != RTE_FDIR_MODE_PERFECT && 5191 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 5192 DRV_LOG(ERR, "port %u flow director mode %d not supported", 5193 dev->data->port_id, fdir_mode); 5194 rte_errno = EINVAL; 5195 return -rte_errno; 5196 } 5197 switch (filter_op) { 5198 case RTE_ETH_FILTER_ADD: 5199 return flow_fdir_filter_add(dev, arg); 5200 case RTE_ETH_FILTER_UPDATE: 5201 return flow_fdir_filter_update(dev, arg); 5202 case RTE_ETH_FILTER_DELETE: 5203 return flow_fdir_filter_delete(dev, arg); 5204 case RTE_ETH_FILTER_FLUSH: 5205 flow_fdir_filter_flush(dev); 5206 break; 5207 case RTE_ETH_FILTER_INFO: 5208 flow_fdir_info_get(dev, arg); 5209 break; 5210 default: 5211 DRV_LOG(DEBUG, "port %u unknown operation %u", 5212 dev->data->port_id, filter_op); 5213 rte_errno = EINVAL; 5214 return -rte_errno; 5215 } 5216 return 0; 5217 } 5218 5219 /** 5220 * Manage filter operations. 5221 * 5222 * @param dev 5223 * Pointer to Ethernet device structure. 5224 * @param filter_type 5225 * Filter type. 5226 * @param filter_op 5227 * Operation to perform. 5228 * @param arg 5229 * Pointer to operation-specific structure. 5230 * 5231 * @return 5232 * 0 on success, a negative errno value otherwise and rte_errno is set. 5233 */ 5234 int 5235 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, 5236 enum rte_filter_type filter_type, 5237 enum rte_filter_op filter_op, 5238 void *arg) 5239 { 5240 switch (filter_type) { 5241 case RTE_ETH_FILTER_GENERIC: 5242 if (filter_op != RTE_ETH_FILTER_GET) { 5243 rte_errno = EINVAL; 5244 return -rte_errno; 5245 } 5246 *(const void **)arg = &mlx5_flow_ops; 5247 return 0; 5248 case RTE_ETH_FILTER_FDIR: 5249 return flow_fdir_ctrl_func(dev, filter_op, arg); 5250 default: 5251 DRV_LOG(ERR, "port %u filter type (%d) not supported", 5252 dev->data->port_id, filter_type); 5253 rte_errno = ENOTSUP; 5254 return -rte_errno; 5255 } 5256 return 0; 5257 } 5258 5259 /** 5260 * Create the needed meter and suffix tables. 5261 * 5262 * @param[in] dev 5263 * Pointer to Ethernet device. 5264 * @param[in] fm 5265 * Pointer to the flow meter. 5266 * 5267 * @return 5268 * Pointer to table set on success, NULL otherwise. 5269 */ 5270 struct mlx5_meter_domains_infos * 5271 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev, 5272 const struct mlx5_flow_meter *fm) 5273 { 5274 const struct mlx5_flow_driver_ops *fops; 5275 5276 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5277 return fops->create_mtr_tbls(dev, fm); 5278 } 5279 5280 /** 5281 * Destroy the meter table set. 5282 * 5283 * @param[in] dev 5284 * Pointer to Ethernet device. 5285 * @param[in] tbl 5286 * Pointer to the meter table set. 5287 * 5288 * @return 5289 * 0 on success. 5290 */ 5291 int 5292 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, 5293 struct mlx5_meter_domains_infos *tbls) 5294 { 5295 const struct mlx5_flow_driver_ops *fops; 5296 5297 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5298 return fops->destroy_mtr_tbls(dev, tbls); 5299 } 5300 5301 /** 5302 * Create policer rules. 5303 * 5304 * @param[in] dev 5305 * Pointer to Ethernet device. 5306 * @param[in] fm 5307 * Pointer to flow meter structure. 5308 * @param[in] attr 5309 * Pointer to flow attributes. 5310 * 5311 * @return 5312 * 0 on success, -1 otherwise. 5313 */ 5314 int 5315 mlx5_flow_create_policer_rules(struct rte_eth_dev *dev, 5316 struct mlx5_flow_meter *fm, 5317 const struct rte_flow_attr *attr) 5318 { 5319 const struct mlx5_flow_driver_ops *fops; 5320 5321 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5322 return fops->create_policer_rules(dev, fm, attr); 5323 } 5324 5325 /** 5326 * Destroy policer rules. 5327 * 5328 * @param[in] fm 5329 * Pointer to flow meter structure. 5330 * @param[in] attr 5331 * Pointer to flow attributes. 5332 * 5333 * @return 5334 * 0 on success, -1 otherwise. 5335 */ 5336 int 5337 mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev, 5338 struct mlx5_flow_meter *fm, 5339 const struct rte_flow_attr *attr) 5340 { 5341 const struct mlx5_flow_driver_ops *fops; 5342 5343 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5344 return fops->destroy_policer_rules(dev, fm, attr); 5345 } 5346 5347 /** 5348 * Allocate a counter. 5349 * 5350 * @param[in] dev 5351 * Pointer to Ethernet device structure. 5352 * 5353 * @return 5354 * Pointer to allocated counter on success, NULL otherwise. 5355 */ 5356 struct mlx5_flow_counter * 5357 mlx5_counter_alloc(struct rte_eth_dev *dev) 5358 { 5359 const struct mlx5_flow_driver_ops *fops; 5360 struct rte_flow_attr attr = { .transfer = 0 }; 5361 5362 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5363 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5364 return fops->counter_alloc(dev); 5365 } 5366 DRV_LOG(ERR, 5367 "port %u counter allocate is not supported.", 5368 dev->data->port_id); 5369 return NULL; 5370 } 5371 5372 /** 5373 * Free a counter. 5374 * 5375 * @param[in] dev 5376 * Pointer to Ethernet device structure. 5377 * @param[in] cnt 5378 * Pointer to counter to be free. 5379 */ 5380 void 5381 mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt) 5382 { 5383 const struct mlx5_flow_driver_ops *fops; 5384 struct rte_flow_attr attr = { .transfer = 0 }; 5385 5386 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5387 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5388 fops->counter_free(dev, cnt); 5389 return; 5390 } 5391 DRV_LOG(ERR, 5392 "port %u counter free is not supported.", 5393 dev->data->port_id); 5394 } 5395 5396 /** 5397 * Query counter statistics. 5398 * 5399 * @param[in] dev 5400 * Pointer to Ethernet device structure. 5401 * @param[in] cnt 5402 * Pointer to counter to query. 5403 * @param[in] clear 5404 * Set to clear counter statistics. 5405 * @param[out] pkts 5406 * The counter hits packets number to save. 5407 * @param[out] bytes 5408 * The counter hits bytes number to save. 5409 * 5410 * @return 5411 * 0 on success, a negative errno value otherwise. 5412 */ 5413 int 5414 mlx5_counter_query(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt, 5415 bool clear, uint64_t *pkts, uint64_t *bytes) 5416 { 5417 const struct mlx5_flow_driver_ops *fops; 5418 struct rte_flow_attr attr = { .transfer = 0 }; 5419 5420 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5421 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5422 return fops->counter_query(dev, cnt, clear, pkts, bytes); 5423 } 5424 DRV_LOG(ERR, 5425 "port %u counter query is not supported.", 5426 dev->data->port_id); 5427 return -ENOTSUP; 5428 } 5429 5430 #define MLX5_POOL_QUERY_FREQ_US 1000000 5431 5432 /** 5433 * Set the periodic procedure for triggering asynchronous batch queries for all 5434 * the counter pools. 5435 * 5436 * @param[in] sh 5437 * Pointer to mlx5_ibv_shared object. 5438 */ 5439 void 5440 mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) 5441 { 5442 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0); 5443 uint32_t pools_n = rte_atomic16_read(&cont->n_valid); 5444 uint32_t us; 5445 5446 cont = MLX5_CNT_CONTAINER(sh, 1, 0); 5447 pools_n += rte_atomic16_read(&cont->n_valid); 5448 us = MLX5_POOL_QUERY_FREQ_US / pools_n; 5449 DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us); 5450 if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { 5451 sh->cmng.query_thread_on = 0; 5452 DRV_LOG(ERR, "Cannot reinitialize query alarm"); 5453 } else { 5454 sh->cmng.query_thread_on = 1; 5455 } 5456 } 5457 5458 /** 5459 * The periodic procedure for triggering asynchronous batch queries for all the 5460 * counter pools. This function is probably called by the host thread. 5461 * 5462 * @param[in] arg 5463 * The parameter for the alarm process. 5464 */ 5465 void 5466 mlx5_flow_query_alarm(void *arg) 5467 { 5468 struct mlx5_ibv_shared *sh = arg; 5469 struct mlx5_devx_obj *dcs; 5470 uint16_t offset; 5471 int ret; 5472 uint8_t batch = sh->cmng.batch; 5473 uint16_t pool_index = sh->cmng.pool_index; 5474 struct mlx5_pools_container *cont; 5475 struct mlx5_pools_container *mcont; 5476 struct mlx5_flow_counter_pool *pool; 5477 5478 if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) 5479 goto set_alarm; 5480 next_container: 5481 cont = MLX5_CNT_CONTAINER(sh, batch, 1); 5482 mcont = MLX5_CNT_CONTAINER(sh, batch, 0); 5483 /* Check if resize was done and need to flip a container. */ 5484 if (cont != mcont) { 5485 if (cont->pools) { 5486 /* Clean the old container. */ 5487 rte_free(cont->pools); 5488 memset(cont, 0, sizeof(*cont)); 5489 } 5490 rte_cio_wmb(); 5491 /* Flip the host container. */ 5492 sh->cmng.mhi[batch] ^= (uint8_t)2; 5493 cont = mcont; 5494 } 5495 if (!cont->pools) { 5496 /* 2 empty containers case is unexpected. */ 5497 if (unlikely(batch != sh->cmng.batch)) 5498 goto set_alarm; 5499 batch ^= 0x1; 5500 pool_index = 0; 5501 goto next_container; 5502 } 5503 pool = cont->pools[pool_index]; 5504 if (pool->raw_hw) 5505 /* There is a pool query in progress. */ 5506 goto set_alarm; 5507 pool->raw_hw = 5508 LIST_FIRST(&sh->cmng.free_stat_raws); 5509 if (!pool->raw_hw) 5510 /* No free counter statistics raw memory. */ 5511 goto set_alarm; 5512 dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read 5513 (&pool->a64_dcs); 5514 offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; 5515 ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - 5516 offset, NULL, NULL, 5517 pool->raw_hw->mem_mng->dm->id, 5518 (void *)(uintptr_t) 5519 (pool->raw_hw->data + offset), 5520 sh->devx_comp, 5521 (uint64_t)(uintptr_t)pool); 5522 if (ret) { 5523 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID" 5524 " %d", pool->min_dcs->id); 5525 pool->raw_hw = NULL; 5526 goto set_alarm; 5527 } 5528 pool->raw_hw->min_dcs_id = dcs->id; 5529 LIST_REMOVE(pool->raw_hw, next); 5530 sh->cmng.pending_queries++; 5531 pool_index++; 5532 if (pool_index >= rte_atomic16_read(&cont->n_valid)) { 5533 batch ^= 0x1; 5534 pool_index = 0; 5535 } 5536 set_alarm: 5537 sh->cmng.batch = batch; 5538 sh->cmng.pool_index = pool_index; 5539 mlx5_set_query_alarm(sh); 5540 } 5541 5542 /** 5543 * Handler for the HW respond about ready values from an asynchronous batch 5544 * query. This function is probably called by the host thread. 5545 * 5546 * @param[in] sh 5547 * The pointer to the shared IB device context. 5548 * @param[in] async_id 5549 * The Devx async ID. 5550 * @param[in] status 5551 * The status of the completion. 5552 */ 5553 void 5554 mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, 5555 uint64_t async_id, int status) 5556 { 5557 struct mlx5_flow_counter_pool *pool = 5558 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; 5559 struct mlx5_counter_stats_raw *raw_to_free; 5560 5561 if (unlikely(status)) { 5562 raw_to_free = pool->raw_hw; 5563 } else { 5564 raw_to_free = pool->raw; 5565 rte_spinlock_lock(&pool->sl); 5566 pool->raw = pool->raw_hw; 5567 rte_spinlock_unlock(&pool->sl); 5568 rte_atomic64_add(&pool->query_gen, 1); 5569 /* Be sure the new raw counters data is updated in memory. */ 5570 rte_cio_wmb(); 5571 } 5572 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next); 5573 pool->raw_hw = NULL; 5574 sh->cmng.pending_queries--; 5575 } 5576 5577 /** 5578 * Translate the rte_flow group index to HW table value. 5579 * 5580 * @param[in] attributes 5581 * Pointer to flow attributes 5582 * @param[in] external 5583 * Value is part of flow rule created by request external to PMD. 5584 * @param[in] group 5585 * rte_flow group index value. 5586 * @param[out] table 5587 * HW table value. 5588 * @param[out] error 5589 * Pointer to error structure. 5590 * 5591 * @return 5592 * 0 on success, a negative errno value otherwise and rte_errno is set. 5593 */ 5594 int 5595 mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external, 5596 uint32_t group, uint32_t *table, 5597 struct rte_flow_error *error) 5598 { 5599 if (attributes->transfer && external) { 5600 if (group == UINT32_MAX) 5601 return rte_flow_error_set 5602 (error, EINVAL, 5603 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 5604 NULL, 5605 "group index not supported"); 5606 *table = group + 1; 5607 } else { 5608 *table = group; 5609 } 5610 return 0; 5611 } 5612 5613 /** 5614 * Discover availability of metadata reg_c's. 5615 * 5616 * Iteratively use test flows to check availability. 5617 * 5618 * @param[in] dev 5619 * Pointer to the Ethernet device structure. 5620 * 5621 * @return 5622 * 0 on success, a negative errno value otherwise and rte_errno is set. 5623 */ 5624 int 5625 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) 5626 { 5627 struct mlx5_priv *priv = dev->data->dev_private; 5628 struct mlx5_dev_config *config = &priv->config; 5629 enum modify_reg idx; 5630 int n = 0; 5631 5632 /* reg_c[0] and reg_c[1] are reserved. */ 5633 config->flow_mreg_c[n++] = REG_C_0; 5634 config->flow_mreg_c[n++] = REG_C_1; 5635 /* Discover availability of other reg_c's. */ 5636 for (idx = REG_C_2; idx <= REG_C_7; ++idx) { 5637 struct rte_flow_attr attr = { 5638 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 5639 .priority = MLX5_FLOW_PRIO_RSVD, 5640 .ingress = 1, 5641 }; 5642 struct rte_flow_item items[] = { 5643 [0] = { 5644 .type = RTE_FLOW_ITEM_TYPE_END, 5645 }, 5646 }; 5647 struct rte_flow_action actions[] = { 5648 [0] = { 5649 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 5650 .conf = &(struct mlx5_flow_action_copy_mreg){ 5651 .src = REG_C_1, 5652 .dst = idx, 5653 }, 5654 }, 5655 [1] = { 5656 .type = RTE_FLOW_ACTION_TYPE_JUMP, 5657 .conf = &(struct rte_flow_action_jump){ 5658 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 5659 }, 5660 }, 5661 [2] = { 5662 .type = RTE_FLOW_ACTION_TYPE_END, 5663 }, 5664 }; 5665 struct rte_flow *flow; 5666 struct rte_flow_error error; 5667 5668 if (!config->dv_flow_en) 5669 break; 5670 /* Create internal flow, validation skips copy action. */ 5671 flow = flow_list_create(dev, NULL, &attr, items, 5672 actions, false, &error); 5673 if (!flow) 5674 continue; 5675 if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL)) 5676 config->flow_mreg_c[n++] = idx; 5677 flow_list_destroy(dev, NULL, flow); 5678 } 5679 for (; n < MLX5_MREG_C_NUM; ++n) 5680 config->flow_mreg_c[n] = REG_NONE; 5681 return 0; 5682 } 5683 5684 /** 5685 * Dump flow raw hw data to file 5686 * 5687 * @param[in] dev 5688 * The pointer to Ethernet device. 5689 * @param[in] file 5690 * A pointer to a file for output. 5691 * @param[out] error 5692 * Perform verbose error reporting if not NULL. PMDs initialize this 5693 * structure in case of error only. 5694 * @return 5695 * 0 on success, a nagative value otherwise. 5696 */ 5697 int 5698 mlx5_flow_dev_dump(struct rte_eth_dev *dev, 5699 FILE *file, 5700 struct rte_flow_error *error __rte_unused) 5701 { 5702 struct mlx5_priv *priv = dev->data->dev_private; 5703 5704 return mlx5_devx_cmd_flow_dump(priv->sh, file); 5705 } 5706