1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <netinet/in.h> 7 #include <sys/queue.h> 8 #include <stdalign.h> 9 #include <stdint.h> 10 #include <string.h> 11 12 /* Verbs header. */ 13 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 14 #ifdef PEDANTIC 15 #pragma GCC diagnostic ignored "-Wpedantic" 16 #endif 17 #include <infiniband/verbs.h> 18 #ifdef PEDANTIC 19 #pragma GCC diagnostic error "-Wpedantic" 20 #endif 21 22 #include <rte_common.h> 23 #include <rte_ether.h> 24 #include <rte_ethdev_driver.h> 25 #include <rte_flow.h> 26 #include <rte_flow_driver.h> 27 #include <rte_malloc.h> 28 #include <rte_ip.h> 29 30 #include "mlx5.h" 31 #include "mlx5_defs.h" 32 #include "mlx5_flow.h" 33 #include "mlx5_glue.h" 34 #include "mlx5_prm.h" 35 #include "mlx5_rxtx.h" 36 37 /* Dev ops structure defined in mlx5.c */ 38 extern const struct eth_dev_ops mlx5_dev_ops; 39 extern const struct eth_dev_ops mlx5_dev_ops_isolate; 40 41 /** Device flow drivers. */ 42 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 43 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; 44 #endif 45 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; 46 47 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; 48 49 const struct mlx5_flow_driver_ops *flow_drv_ops[] = { 50 [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, 51 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 52 [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, 53 #endif 54 [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, 55 [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops 56 }; 57 58 enum mlx5_expansion { 59 MLX5_EXPANSION_ROOT, 60 MLX5_EXPANSION_ROOT_OUTER, 61 MLX5_EXPANSION_ROOT_ETH_VLAN, 62 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, 63 MLX5_EXPANSION_OUTER_ETH, 64 MLX5_EXPANSION_OUTER_ETH_VLAN, 65 MLX5_EXPANSION_OUTER_VLAN, 66 MLX5_EXPANSION_OUTER_IPV4, 67 MLX5_EXPANSION_OUTER_IPV4_UDP, 68 MLX5_EXPANSION_OUTER_IPV4_TCP, 69 MLX5_EXPANSION_OUTER_IPV6, 70 MLX5_EXPANSION_OUTER_IPV6_UDP, 71 MLX5_EXPANSION_OUTER_IPV6_TCP, 72 MLX5_EXPANSION_VXLAN, 73 MLX5_EXPANSION_VXLAN_GPE, 74 MLX5_EXPANSION_GRE, 75 MLX5_EXPANSION_MPLS, 76 MLX5_EXPANSION_ETH, 77 MLX5_EXPANSION_ETH_VLAN, 78 MLX5_EXPANSION_VLAN, 79 MLX5_EXPANSION_IPV4, 80 MLX5_EXPANSION_IPV4_UDP, 81 MLX5_EXPANSION_IPV4_TCP, 82 MLX5_EXPANSION_IPV6, 83 MLX5_EXPANSION_IPV6_UDP, 84 MLX5_EXPANSION_IPV6_TCP, 85 }; 86 87 /** Supported expansion of items. */ 88 static const struct rte_flow_expand_node mlx5_support_expansion[] = { 89 [MLX5_EXPANSION_ROOT] = { 90 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 91 MLX5_EXPANSION_IPV4, 92 MLX5_EXPANSION_IPV6), 93 .type = RTE_FLOW_ITEM_TYPE_END, 94 }, 95 [MLX5_EXPANSION_ROOT_OUTER] = { 96 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, 97 MLX5_EXPANSION_OUTER_IPV4, 98 MLX5_EXPANSION_OUTER_IPV6), 99 .type = RTE_FLOW_ITEM_TYPE_END, 100 }, 101 [MLX5_EXPANSION_ROOT_ETH_VLAN] = { 102 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), 103 .type = RTE_FLOW_ITEM_TYPE_END, 104 }, 105 [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { 106 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), 107 .type = RTE_FLOW_ITEM_TYPE_END, 108 }, 109 [MLX5_EXPANSION_OUTER_ETH] = { 110 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 111 MLX5_EXPANSION_OUTER_IPV6, 112 MLX5_EXPANSION_MPLS), 113 .type = RTE_FLOW_ITEM_TYPE_ETH, 114 .rss_types = 0, 115 }, 116 [MLX5_EXPANSION_OUTER_ETH_VLAN] = { 117 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), 118 .type = RTE_FLOW_ITEM_TYPE_ETH, 119 .rss_types = 0, 120 }, 121 [MLX5_EXPANSION_OUTER_VLAN] = { 122 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 123 MLX5_EXPANSION_OUTER_IPV6), 124 .type = RTE_FLOW_ITEM_TYPE_VLAN, 125 }, 126 [MLX5_EXPANSION_OUTER_IPV4] = { 127 .next = RTE_FLOW_EXPAND_RSS_NEXT 128 (MLX5_EXPANSION_OUTER_IPV4_UDP, 129 MLX5_EXPANSION_OUTER_IPV4_TCP, 130 MLX5_EXPANSION_GRE, 131 MLX5_EXPANSION_IPV4, 132 MLX5_EXPANSION_IPV6), 133 .type = RTE_FLOW_ITEM_TYPE_IPV4, 134 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 135 ETH_RSS_NONFRAG_IPV4_OTHER, 136 }, 137 [MLX5_EXPANSION_OUTER_IPV4_UDP] = { 138 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 139 MLX5_EXPANSION_VXLAN_GPE), 140 .type = RTE_FLOW_ITEM_TYPE_UDP, 141 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 142 }, 143 [MLX5_EXPANSION_OUTER_IPV4_TCP] = { 144 .type = RTE_FLOW_ITEM_TYPE_TCP, 145 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 146 }, 147 [MLX5_EXPANSION_OUTER_IPV6] = { 148 .next = RTE_FLOW_EXPAND_RSS_NEXT 149 (MLX5_EXPANSION_OUTER_IPV6_UDP, 150 MLX5_EXPANSION_OUTER_IPV6_TCP, 151 MLX5_EXPANSION_IPV4, 152 MLX5_EXPANSION_IPV6), 153 .type = RTE_FLOW_ITEM_TYPE_IPV6, 154 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 155 ETH_RSS_NONFRAG_IPV6_OTHER, 156 }, 157 [MLX5_EXPANSION_OUTER_IPV6_UDP] = { 158 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 159 MLX5_EXPANSION_VXLAN_GPE), 160 .type = RTE_FLOW_ITEM_TYPE_UDP, 161 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 162 }, 163 [MLX5_EXPANSION_OUTER_IPV6_TCP] = { 164 .type = RTE_FLOW_ITEM_TYPE_TCP, 165 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 166 }, 167 [MLX5_EXPANSION_VXLAN] = { 168 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), 169 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 170 }, 171 [MLX5_EXPANSION_VXLAN_GPE] = { 172 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 173 MLX5_EXPANSION_IPV4, 174 MLX5_EXPANSION_IPV6), 175 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, 176 }, 177 [MLX5_EXPANSION_GRE] = { 178 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), 179 .type = RTE_FLOW_ITEM_TYPE_GRE, 180 }, 181 [MLX5_EXPANSION_MPLS] = { 182 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 183 MLX5_EXPANSION_IPV6), 184 .type = RTE_FLOW_ITEM_TYPE_MPLS, 185 }, 186 [MLX5_EXPANSION_ETH] = { 187 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 188 MLX5_EXPANSION_IPV6), 189 .type = RTE_FLOW_ITEM_TYPE_ETH, 190 }, 191 [MLX5_EXPANSION_ETH_VLAN] = { 192 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), 193 .type = RTE_FLOW_ITEM_TYPE_ETH, 194 }, 195 [MLX5_EXPANSION_VLAN] = { 196 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 197 MLX5_EXPANSION_IPV6), 198 .type = RTE_FLOW_ITEM_TYPE_VLAN, 199 }, 200 [MLX5_EXPANSION_IPV4] = { 201 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, 202 MLX5_EXPANSION_IPV4_TCP), 203 .type = RTE_FLOW_ITEM_TYPE_IPV4, 204 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 205 ETH_RSS_NONFRAG_IPV4_OTHER, 206 }, 207 [MLX5_EXPANSION_IPV4_UDP] = { 208 .type = RTE_FLOW_ITEM_TYPE_UDP, 209 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 210 }, 211 [MLX5_EXPANSION_IPV4_TCP] = { 212 .type = RTE_FLOW_ITEM_TYPE_TCP, 213 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 214 }, 215 [MLX5_EXPANSION_IPV6] = { 216 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, 217 MLX5_EXPANSION_IPV6_TCP), 218 .type = RTE_FLOW_ITEM_TYPE_IPV6, 219 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 220 ETH_RSS_NONFRAG_IPV6_OTHER, 221 }, 222 [MLX5_EXPANSION_IPV6_UDP] = { 223 .type = RTE_FLOW_ITEM_TYPE_UDP, 224 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 225 }, 226 [MLX5_EXPANSION_IPV6_TCP] = { 227 .type = RTE_FLOW_ITEM_TYPE_TCP, 228 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 229 }, 230 }; 231 232 static const struct rte_flow_ops mlx5_flow_ops = { 233 .validate = mlx5_flow_validate, 234 .create = mlx5_flow_create, 235 .destroy = mlx5_flow_destroy, 236 .flush = mlx5_flow_flush, 237 .isolate = mlx5_flow_isolate, 238 .query = mlx5_flow_query, 239 .dev_dump = mlx5_flow_dev_dump, 240 }; 241 242 /* Convert FDIR request to Generic flow. */ 243 struct mlx5_fdir { 244 struct rte_flow_attr attr; 245 struct rte_flow_item items[4]; 246 struct rte_flow_item_eth l2; 247 struct rte_flow_item_eth l2_mask; 248 union { 249 struct rte_flow_item_ipv4 ipv4; 250 struct rte_flow_item_ipv6 ipv6; 251 } l3; 252 union { 253 struct rte_flow_item_ipv4 ipv4; 254 struct rte_flow_item_ipv6 ipv6; 255 } l3_mask; 256 union { 257 struct rte_flow_item_udp udp; 258 struct rte_flow_item_tcp tcp; 259 } l4; 260 union { 261 struct rte_flow_item_udp udp; 262 struct rte_flow_item_tcp tcp; 263 } l4_mask; 264 struct rte_flow_action actions[2]; 265 struct rte_flow_action_queue queue; 266 }; 267 268 /* Map of Verbs to Flow priority with 8 Verbs priorities. */ 269 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { 270 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, 271 }; 272 273 /* Map of Verbs to Flow priority with 16 Verbs priorities. */ 274 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { 275 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, 276 { 9, 10, 11 }, { 12, 13, 14 }, 277 }; 278 279 /* Tunnel information. */ 280 struct mlx5_flow_tunnel_info { 281 uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ 282 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ 283 }; 284 285 static struct mlx5_flow_tunnel_info tunnels_info[] = { 286 { 287 .tunnel = MLX5_FLOW_LAYER_VXLAN, 288 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, 289 }, 290 { 291 .tunnel = MLX5_FLOW_LAYER_GENEVE, 292 .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP, 293 }, 294 { 295 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, 296 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, 297 }, 298 { 299 .tunnel = MLX5_FLOW_LAYER_GRE, 300 .ptype = RTE_PTYPE_TUNNEL_GRE, 301 }, 302 { 303 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, 304 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, 305 }, 306 { 307 .tunnel = MLX5_FLOW_LAYER_MPLS, 308 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, 309 }, 310 { 311 .tunnel = MLX5_FLOW_LAYER_NVGRE, 312 .ptype = RTE_PTYPE_TUNNEL_NVGRE, 313 }, 314 { 315 .tunnel = MLX5_FLOW_LAYER_IPIP, 316 .ptype = RTE_PTYPE_TUNNEL_IP, 317 }, 318 { 319 .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP, 320 .ptype = RTE_PTYPE_TUNNEL_IP, 321 }, 322 { 323 .tunnel = MLX5_FLOW_LAYER_GTP, 324 .ptype = RTE_PTYPE_TUNNEL_GTPU, 325 }, 326 }; 327 328 /** 329 * Translate tag ID to register. 330 * 331 * @param[in] dev 332 * Pointer to the Ethernet device structure. 333 * @param[in] feature 334 * The feature that request the register. 335 * @param[in] id 336 * The request register ID. 337 * @param[out] error 338 * Error description in case of any. 339 * 340 * @return 341 * The request register on success, a negative errno 342 * value otherwise and rte_errno is set. 343 */ 344 int 345 mlx5_flow_get_reg_id(struct rte_eth_dev *dev, 346 enum mlx5_feature_name feature, 347 uint32_t id, 348 struct rte_flow_error *error) 349 { 350 struct mlx5_priv *priv = dev->data->dev_private; 351 struct mlx5_dev_config *config = &priv->config; 352 enum modify_reg start_reg; 353 bool skip_mtr_reg = false; 354 355 switch (feature) { 356 case MLX5_HAIRPIN_RX: 357 return REG_B; 358 case MLX5_HAIRPIN_TX: 359 return REG_A; 360 case MLX5_METADATA_RX: 361 switch (config->dv_xmeta_en) { 362 case MLX5_XMETA_MODE_LEGACY: 363 return REG_B; 364 case MLX5_XMETA_MODE_META16: 365 return REG_C_0; 366 case MLX5_XMETA_MODE_META32: 367 return REG_C_1; 368 } 369 break; 370 case MLX5_METADATA_TX: 371 return REG_A; 372 case MLX5_METADATA_FDB: 373 switch (config->dv_xmeta_en) { 374 case MLX5_XMETA_MODE_LEGACY: 375 return REG_NONE; 376 case MLX5_XMETA_MODE_META16: 377 return REG_C_0; 378 case MLX5_XMETA_MODE_META32: 379 return REG_C_1; 380 } 381 break; 382 case MLX5_FLOW_MARK: 383 switch (config->dv_xmeta_en) { 384 case MLX5_XMETA_MODE_LEGACY: 385 return REG_NONE; 386 case MLX5_XMETA_MODE_META16: 387 return REG_C_1; 388 case MLX5_XMETA_MODE_META32: 389 return REG_C_0; 390 } 391 break; 392 case MLX5_MTR_SFX: 393 /* 394 * If meter color and flow match share one register, flow match 395 * should use the meter color register for match. 396 */ 397 if (priv->mtr_reg_share) 398 return priv->mtr_color_reg; 399 else 400 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : 401 REG_C_3; 402 case MLX5_MTR_COLOR: 403 RTE_ASSERT(priv->mtr_color_reg != REG_NONE); 404 return priv->mtr_color_reg; 405 case MLX5_COPY_MARK: 406 /* 407 * Metadata COPY_MARK register using is in meter suffix sub 408 * flow while with meter. It's safe to share the same register. 409 */ 410 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3; 411 case MLX5_APP_TAG: 412 /* 413 * If meter is enable, it will engage the register for color 414 * match and flow match. If meter color match is not using the 415 * REG_C_2, need to skip the REG_C_x be used by meter color 416 * match. 417 * If meter is disable, free to use all available registers. 418 */ 419 start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 : 420 (priv->mtr_reg_share ? REG_C_3 : REG_C_4); 421 skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2); 422 if (id > (REG_C_7 - start_reg)) 423 return rte_flow_error_set(error, EINVAL, 424 RTE_FLOW_ERROR_TYPE_ITEM, 425 NULL, "invalid tag id"); 426 if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NONE) 427 return rte_flow_error_set(error, ENOTSUP, 428 RTE_FLOW_ERROR_TYPE_ITEM, 429 NULL, "unsupported tag id"); 430 /* 431 * This case means meter is using the REG_C_x great than 2. 432 * Take care not to conflict with meter color REG_C_x. 433 * If the available index REG_C_y >= REG_C_x, skip the 434 * color register. 435 */ 436 if (skip_mtr_reg && config->flow_mreg_c 437 [id + start_reg - REG_C_0] >= priv->mtr_color_reg) { 438 if (config->flow_mreg_c 439 [id + 1 + start_reg - REG_C_0] != REG_NONE) 440 return config->flow_mreg_c 441 [id + 1 + start_reg - REG_C_0]; 442 return rte_flow_error_set(error, ENOTSUP, 443 RTE_FLOW_ERROR_TYPE_ITEM, 444 NULL, "unsupported tag id"); 445 } 446 return config->flow_mreg_c[id + start_reg - REG_C_0]; 447 } 448 assert(false); 449 return rte_flow_error_set(error, EINVAL, 450 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 451 NULL, "invalid feature name"); 452 } 453 454 /** 455 * Check extensive flow metadata register support. 456 * 457 * @param dev 458 * Pointer to rte_eth_dev structure. 459 * 460 * @return 461 * True if device supports extensive flow metadata register, otherwise false. 462 */ 463 bool 464 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) 465 { 466 struct mlx5_priv *priv = dev->data->dev_private; 467 struct mlx5_dev_config *config = &priv->config; 468 469 /* 470 * Having available reg_c can be regarded inclusively as supporting 471 * extensive flow metadata register, which could mean, 472 * - metadata register copy action by modify header. 473 * - 16 modify header actions is supported. 474 * - reg_c's are preserved across different domain (FDB and NIC) on 475 * packet loopback by flow lookup miss. 476 */ 477 return config->flow_mreg_c[2] != REG_NONE; 478 } 479 480 /** 481 * Discover the maximum number of priority available. 482 * 483 * @param[in] dev 484 * Pointer to the Ethernet device structure. 485 * 486 * @return 487 * number of supported flow priority on success, a negative errno 488 * value otherwise and rte_errno is set. 489 */ 490 int 491 mlx5_flow_discover_priorities(struct rte_eth_dev *dev) 492 { 493 struct mlx5_priv *priv = dev->data->dev_private; 494 struct { 495 struct ibv_flow_attr attr; 496 struct ibv_flow_spec_eth eth; 497 struct ibv_flow_spec_action_drop drop; 498 } flow_attr = { 499 .attr = { 500 .num_of_specs = 2, 501 .port = (uint8_t)priv->ibv_port, 502 }, 503 .eth = { 504 .type = IBV_FLOW_SPEC_ETH, 505 .size = sizeof(struct ibv_flow_spec_eth), 506 }, 507 .drop = { 508 .size = sizeof(struct ibv_flow_spec_action_drop), 509 .type = IBV_FLOW_SPEC_ACTION_DROP, 510 }, 511 }; 512 struct ibv_flow *flow; 513 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev); 514 uint16_t vprio[] = { 8, 16 }; 515 int i; 516 int priority = 0; 517 518 if (!drop) { 519 rte_errno = ENOTSUP; 520 return -rte_errno; 521 } 522 for (i = 0; i != RTE_DIM(vprio); i++) { 523 flow_attr.attr.priority = vprio[i] - 1; 524 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); 525 if (!flow) 526 break; 527 claim_zero(mlx5_glue->destroy_flow(flow)); 528 priority = vprio[i]; 529 } 530 mlx5_hrxq_drop_release(dev); 531 switch (priority) { 532 case 8: 533 priority = RTE_DIM(priority_map_3); 534 break; 535 case 16: 536 priority = RTE_DIM(priority_map_5); 537 break; 538 default: 539 rte_errno = ENOTSUP; 540 DRV_LOG(ERR, 541 "port %u verbs maximum priority: %d expected 8/16", 542 dev->data->port_id, priority); 543 return -rte_errno; 544 } 545 DRV_LOG(INFO, "port %u flow maximum priority: %d", 546 dev->data->port_id, priority); 547 return priority; 548 } 549 550 /** 551 * Adjust flow priority based on the highest layer and the request priority. 552 * 553 * @param[in] dev 554 * Pointer to the Ethernet device structure. 555 * @param[in] priority 556 * The rule base priority. 557 * @param[in] subpriority 558 * The priority based on the items. 559 * 560 * @return 561 * The new priority. 562 */ 563 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 564 uint32_t subpriority) 565 { 566 uint32_t res = 0; 567 struct mlx5_priv *priv = dev->data->dev_private; 568 569 switch (priv->config.flow_prio) { 570 case RTE_DIM(priority_map_3): 571 res = priority_map_3[priority][subpriority]; 572 break; 573 case RTE_DIM(priority_map_5): 574 res = priority_map_5[priority][subpriority]; 575 break; 576 } 577 return res; 578 } 579 580 /** 581 * Verify the @p item specifications (spec, last, mask) are compatible with the 582 * NIC capabilities. 583 * 584 * @param[in] item 585 * Item specification. 586 * @param[in] mask 587 * @p item->mask or flow default bit-masks. 588 * @param[in] nic_mask 589 * Bit-masks covering supported fields by the NIC to compare with user mask. 590 * @param[in] size 591 * Bit-masks size in bytes. 592 * @param[out] error 593 * Pointer to error structure. 594 * 595 * @return 596 * 0 on success, a negative errno value otherwise and rte_errno is set. 597 */ 598 int 599 mlx5_flow_item_acceptable(const struct rte_flow_item *item, 600 const uint8_t *mask, 601 const uint8_t *nic_mask, 602 unsigned int size, 603 struct rte_flow_error *error) 604 { 605 unsigned int i; 606 607 assert(nic_mask); 608 for (i = 0; i < size; ++i) 609 if ((nic_mask[i] | mask[i]) != nic_mask[i]) 610 return rte_flow_error_set(error, ENOTSUP, 611 RTE_FLOW_ERROR_TYPE_ITEM, 612 item, 613 "mask enables non supported" 614 " bits"); 615 if (!item->spec && (item->mask || item->last)) 616 return rte_flow_error_set(error, EINVAL, 617 RTE_FLOW_ERROR_TYPE_ITEM, item, 618 "mask/last without a spec is not" 619 " supported"); 620 if (item->spec && item->last) { 621 uint8_t spec[size]; 622 uint8_t last[size]; 623 unsigned int i; 624 int ret; 625 626 for (i = 0; i < size; ++i) { 627 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; 628 last[i] = ((const uint8_t *)item->last)[i] & mask[i]; 629 } 630 ret = memcmp(spec, last, size); 631 if (ret != 0) 632 return rte_flow_error_set(error, EINVAL, 633 RTE_FLOW_ERROR_TYPE_ITEM, 634 item, 635 "range is not valid"); 636 } 637 return 0; 638 } 639 640 /** 641 * Adjust the hash fields according to the @p flow information. 642 * 643 * @param[in] dev_flow. 644 * Pointer to the mlx5_flow. 645 * @param[in] tunnel 646 * 1 when the hash field is for a tunnel item. 647 * @param[in] layer_types 648 * ETH_RSS_* types. 649 * @param[in] hash_fields 650 * Item hash fields. 651 * 652 * @return 653 * The hash fields that should be used. 654 */ 655 uint64_t 656 mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, 657 int tunnel __rte_unused, uint64_t layer_types, 658 uint64_t hash_fields) 659 { 660 struct rte_flow *flow = dev_flow->flow; 661 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 662 int rss_request_inner = flow->rss.level >= 2; 663 664 /* Check RSS hash level for tunnel. */ 665 if (tunnel && rss_request_inner) 666 hash_fields |= IBV_RX_HASH_INNER; 667 else if (tunnel || rss_request_inner) 668 return 0; 669 #endif 670 /* Check if requested layer matches RSS hash fields. */ 671 if (!(flow->rss.types & layer_types)) 672 return 0; 673 return hash_fields; 674 } 675 676 /** 677 * Lookup and set the ptype in the data Rx part. A single Ptype can be used, 678 * if several tunnel rules are used on this queue, the tunnel ptype will be 679 * cleared. 680 * 681 * @param rxq_ctrl 682 * Rx queue to update. 683 */ 684 static void 685 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) 686 { 687 unsigned int i; 688 uint32_t tunnel_ptype = 0; 689 690 /* Look up for the ptype to use. */ 691 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { 692 if (!rxq_ctrl->flow_tunnels_n[i]) 693 continue; 694 if (!tunnel_ptype) { 695 tunnel_ptype = tunnels_info[i].ptype; 696 } else { 697 tunnel_ptype = 0; 698 break; 699 } 700 } 701 rxq_ctrl->rxq.tunnel = tunnel_ptype; 702 } 703 704 /** 705 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive 706 * flow. 707 * 708 * @param[in] dev 709 * Pointer to the Ethernet device structure. 710 * @param[in] dev_flow 711 * Pointer to device flow structure. 712 */ 713 static void 714 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 715 { 716 struct mlx5_priv *priv = dev->data->dev_private; 717 struct rte_flow *flow = dev_flow->flow; 718 const int mark = !!(dev_flow->actions & 719 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 720 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); 721 unsigned int i; 722 723 for (i = 0; i != flow->rss.queue_num; ++i) { 724 int idx = (*flow->rss.queue)[i]; 725 struct mlx5_rxq_ctrl *rxq_ctrl = 726 container_of((*priv->rxqs)[idx], 727 struct mlx5_rxq_ctrl, rxq); 728 729 /* 730 * To support metadata register copy on Tx loopback, 731 * this must be always enabled (metadata may arive 732 * from other port - not from local flows only. 733 */ 734 if (priv->config.dv_flow_en && 735 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 736 mlx5_flow_ext_mreg_supported(dev)) { 737 rxq_ctrl->rxq.mark = 1; 738 rxq_ctrl->flow_mark_n = 1; 739 } else if (mark) { 740 rxq_ctrl->rxq.mark = 1; 741 rxq_ctrl->flow_mark_n++; 742 } 743 if (tunnel) { 744 unsigned int j; 745 746 /* Increase the counter matching the flow. */ 747 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 748 if ((tunnels_info[j].tunnel & 749 dev_flow->layers) == 750 tunnels_info[j].tunnel) { 751 rxq_ctrl->flow_tunnels_n[j]++; 752 break; 753 } 754 } 755 flow_rxq_tunnel_ptype_update(rxq_ctrl); 756 } 757 } 758 } 759 760 /** 761 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow 762 * 763 * @param[in] dev 764 * Pointer to the Ethernet device structure. 765 * @param[in] flow 766 * Pointer to flow structure. 767 */ 768 static void 769 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) 770 { 771 struct mlx5_flow *dev_flow; 772 773 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 774 flow_drv_rxq_flags_set(dev, dev_flow); 775 } 776 777 /** 778 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 779 * device flow if no other flow uses it with the same kind of request. 780 * 781 * @param dev 782 * Pointer to Ethernet device. 783 * @param[in] dev_flow 784 * Pointer to the device flow. 785 */ 786 static void 787 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 788 { 789 struct mlx5_priv *priv = dev->data->dev_private; 790 struct rte_flow *flow = dev_flow->flow; 791 const int mark = !!(dev_flow->actions & 792 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 793 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); 794 unsigned int i; 795 796 assert(dev->data->dev_started); 797 for (i = 0; i != flow->rss.queue_num; ++i) { 798 int idx = (*flow->rss.queue)[i]; 799 struct mlx5_rxq_ctrl *rxq_ctrl = 800 container_of((*priv->rxqs)[idx], 801 struct mlx5_rxq_ctrl, rxq); 802 803 if (priv->config.dv_flow_en && 804 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 805 mlx5_flow_ext_mreg_supported(dev)) { 806 rxq_ctrl->rxq.mark = 1; 807 rxq_ctrl->flow_mark_n = 1; 808 } else if (mark) { 809 rxq_ctrl->flow_mark_n--; 810 rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; 811 } 812 if (tunnel) { 813 unsigned int j; 814 815 /* Decrease the counter matching the flow. */ 816 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 817 if ((tunnels_info[j].tunnel & 818 dev_flow->layers) == 819 tunnels_info[j].tunnel) { 820 rxq_ctrl->flow_tunnels_n[j]--; 821 break; 822 } 823 } 824 flow_rxq_tunnel_ptype_update(rxq_ctrl); 825 } 826 } 827 } 828 829 /** 830 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 831 * @p flow if no other flow uses it with the same kind of request. 832 * 833 * @param dev 834 * Pointer to Ethernet device. 835 * @param[in] flow 836 * Pointer to the flow. 837 */ 838 static void 839 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) 840 { 841 struct mlx5_flow *dev_flow; 842 843 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 844 flow_drv_rxq_flags_trim(dev, dev_flow); 845 } 846 847 /** 848 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. 849 * 850 * @param dev 851 * Pointer to Ethernet device. 852 */ 853 static void 854 flow_rxq_flags_clear(struct rte_eth_dev *dev) 855 { 856 struct mlx5_priv *priv = dev->data->dev_private; 857 unsigned int i; 858 859 for (i = 0; i != priv->rxqs_n; ++i) { 860 struct mlx5_rxq_ctrl *rxq_ctrl; 861 unsigned int j; 862 863 if (!(*priv->rxqs)[i]) 864 continue; 865 rxq_ctrl = container_of((*priv->rxqs)[i], 866 struct mlx5_rxq_ctrl, rxq); 867 rxq_ctrl->flow_mark_n = 0; 868 rxq_ctrl->rxq.mark = 0; 869 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) 870 rxq_ctrl->flow_tunnels_n[j] = 0; 871 rxq_ctrl->rxq.tunnel = 0; 872 } 873 } 874 875 /* 876 * return a pointer to the desired action in the list of actions. 877 * 878 * @param[in] actions 879 * The list of actions to search the action in. 880 * @param[in] action 881 * The action to find. 882 * 883 * @return 884 * Pointer to the action in the list, if found. NULL otherwise. 885 */ 886 const struct rte_flow_action * 887 mlx5_flow_find_action(const struct rte_flow_action *actions, 888 enum rte_flow_action_type action) 889 { 890 if (actions == NULL) 891 return NULL; 892 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) 893 if (actions->type == action) 894 return actions; 895 return NULL; 896 } 897 898 /* 899 * Validate the flag action. 900 * 901 * @param[in] action_flags 902 * Bit-fields that holds the actions detected until now. 903 * @param[in] attr 904 * Attributes of flow that includes this action. 905 * @param[out] error 906 * Pointer to error structure. 907 * 908 * @return 909 * 0 on success, a negative errno value otherwise and rte_errno is set. 910 */ 911 int 912 mlx5_flow_validate_action_flag(uint64_t action_flags, 913 const struct rte_flow_attr *attr, 914 struct rte_flow_error *error) 915 { 916 if (action_flags & MLX5_FLOW_ACTION_MARK) 917 return rte_flow_error_set(error, EINVAL, 918 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 919 "can't mark and flag in same flow"); 920 if (action_flags & MLX5_FLOW_ACTION_FLAG) 921 return rte_flow_error_set(error, EINVAL, 922 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 923 "can't have 2 flag" 924 " actions in same flow"); 925 if (attr->egress) 926 return rte_flow_error_set(error, ENOTSUP, 927 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 928 "flag action not supported for " 929 "egress"); 930 return 0; 931 } 932 933 /* 934 * Validate the mark action. 935 * 936 * @param[in] action 937 * Pointer to the queue action. 938 * @param[in] action_flags 939 * Bit-fields that holds the actions detected until now. 940 * @param[in] attr 941 * Attributes of flow that includes this action. 942 * @param[out] error 943 * Pointer to error structure. 944 * 945 * @return 946 * 0 on success, a negative errno value otherwise and rte_errno is set. 947 */ 948 int 949 mlx5_flow_validate_action_mark(const struct rte_flow_action *action, 950 uint64_t action_flags, 951 const struct rte_flow_attr *attr, 952 struct rte_flow_error *error) 953 { 954 const struct rte_flow_action_mark *mark = action->conf; 955 956 if (!mark) 957 return rte_flow_error_set(error, EINVAL, 958 RTE_FLOW_ERROR_TYPE_ACTION, 959 action, 960 "configuration cannot be null"); 961 if (mark->id >= MLX5_FLOW_MARK_MAX) 962 return rte_flow_error_set(error, EINVAL, 963 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 964 &mark->id, 965 "mark id must in 0 <= id < " 966 RTE_STR(MLX5_FLOW_MARK_MAX)); 967 if (action_flags & MLX5_FLOW_ACTION_FLAG) 968 return rte_flow_error_set(error, EINVAL, 969 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 970 "can't flag and mark in same flow"); 971 if (action_flags & MLX5_FLOW_ACTION_MARK) 972 return rte_flow_error_set(error, EINVAL, 973 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 974 "can't have 2 mark actions in same" 975 " flow"); 976 if (attr->egress) 977 return rte_flow_error_set(error, ENOTSUP, 978 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 979 "mark action not supported for " 980 "egress"); 981 return 0; 982 } 983 984 /* 985 * Validate the drop action. 986 * 987 * @param[in] action_flags 988 * Bit-fields that holds the actions detected until now. 989 * @param[in] attr 990 * Attributes of flow that includes this action. 991 * @param[out] error 992 * Pointer to error structure. 993 * 994 * @return 995 * 0 on success, a negative errno value otherwise and rte_errno is set. 996 */ 997 int 998 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused, 999 const struct rte_flow_attr *attr, 1000 struct rte_flow_error *error) 1001 { 1002 if (attr->egress) 1003 return rte_flow_error_set(error, ENOTSUP, 1004 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1005 "drop action not supported for " 1006 "egress"); 1007 return 0; 1008 } 1009 1010 /* 1011 * Validate the queue action. 1012 * 1013 * @param[in] action 1014 * Pointer to the queue action. 1015 * @param[in] action_flags 1016 * Bit-fields that holds the actions detected until now. 1017 * @param[in] dev 1018 * Pointer to the Ethernet device structure. 1019 * @param[in] attr 1020 * Attributes of flow that includes this action. 1021 * @param[out] error 1022 * Pointer to error structure. 1023 * 1024 * @return 1025 * 0 on success, a negative errno value otherwise and rte_errno is set. 1026 */ 1027 int 1028 mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 1029 uint64_t action_flags, 1030 struct rte_eth_dev *dev, 1031 const struct rte_flow_attr *attr, 1032 struct rte_flow_error *error) 1033 { 1034 struct mlx5_priv *priv = dev->data->dev_private; 1035 const struct rte_flow_action_queue *queue = action->conf; 1036 1037 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 1038 return rte_flow_error_set(error, EINVAL, 1039 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1040 "can't have 2 fate actions in" 1041 " same flow"); 1042 if (!priv->rxqs_n) 1043 return rte_flow_error_set(error, EINVAL, 1044 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1045 NULL, "No Rx queues configured"); 1046 if (queue->index >= priv->rxqs_n) 1047 return rte_flow_error_set(error, EINVAL, 1048 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1049 &queue->index, 1050 "queue index out of range"); 1051 if (!(*priv->rxqs)[queue->index]) 1052 return rte_flow_error_set(error, EINVAL, 1053 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1054 &queue->index, 1055 "queue is not configured"); 1056 if (attr->egress) 1057 return rte_flow_error_set(error, ENOTSUP, 1058 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1059 "queue action not supported for " 1060 "egress"); 1061 return 0; 1062 } 1063 1064 /* 1065 * Validate the rss action. 1066 * 1067 * @param[in] action 1068 * Pointer to the queue action. 1069 * @param[in] action_flags 1070 * Bit-fields that holds the actions detected until now. 1071 * @param[in] dev 1072 * Pointer to the Ethernet device structure. 1073 * @param[in] attr 1074 * Attributes of flow that includes this action. 1075 * @param[in] item_flags 1076 * Items that were detected. 1077 * @param[out] error 1078 * Pointer to error structure. 1079 * 1080 * @return 1081 * 0 on success, a negative errno value otherwise and rte_errno is set. 1082 */ 1083 int 1084 mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 1085 uint64_t action_flags, 1086 struct rte_eth_dev *dev, 1087 const struct rte_flow_attr *attr, 1088 uint64_t item_flags, 1089 struct rte_flow_error *error) 1090 { 1091 struct mlx5_priv *priv = dev->data->dev_private; 1092 const struct rte_flow_action_rss *rss = action->conf; 1093 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1094 unsigned int i; 1095 1096 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 1097 return rte_flow_error_set(error, EINVAL, 1098 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1099 "can't have 2 fate actions" 1100 " in same flow"); 1101 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && 1102 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) 1103 return rte_flow_error_set(error, ENOTSUP, 1104 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1105 &rss->func, 1106 "RSS hash function not supported"); 1107 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 1108 if (rss->level > 2) 1109 #else 1110 if (rss->level > 1) 1111 #endif 1112 return rte_flow_error_set(error, ENOTSUP, 1113 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1114 &rss->level, 1115 "tunnel RSS is not supported"); 1116 /* allow RSS key_len 0 in case of NULL (default) RSS key. */ 1117 if (rss->key_len == 0 && rss->key != NULL) 1118 return rte_flow_error_set(error, ENOTSUP, 1119 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1120 &rss->key_len, 1121 "RSS hash key length 0"); 1122 if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) 1123 return rte_flow_error_set(error, ENOTSUP, 1124 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1125 &rss->key_len, 1126 "RSS hash key too small"); 1127 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) 1128 return rte_flow_error_set(error, ENOTSUP, 1129 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1130 &rss->key_len, 1131 "RSS hash key too large"); 1132 if (rss->queue_num > priv->config.ind_table_max_size) 1133 return rte_flow_error_set(error, ENOTSUP, 1134 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1135 &rss->queue_num, 1136 "number of queues too large"); 1137 if (rss->types & MLX5_RSS_HF_MASK) 1138 return rte_flow_error_set(error, ENOTSUP, 1139 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1140 &rss->types, 1141 "some RSS protocols are not" 1142 " supported"); 1143 if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) && 1144 !(rss->types & ETH_RSS_IP)) 1145 return rte_flow_error_set(error, EINVAL, 1146 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1147 "L3 partial RSS requested but L3 RSS" 1148 " type not specified"); 1149 if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) && 1150 !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP))) 1151 return rte_flow_error_set(error, EINVAL, 1152 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1153 "L4 partial RSS requested but L4 RSS" 1154 " type not specified"); 1155 if (!priv->rxqs_n) 1156 return rte_flow_error_set(error, EINVAL, 1157 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1158 NULL, "No Rx queues configured"); 1159 if (!rss->queue_num) 1160 return rte_flow_error_set(error, EINVAL, 1161 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1162 NULL, "No queues configured"); 1163 for (i = 0; i != rss->queue_num; ++i) { 1164 if (rss->queue[i] >= priv->rxqs_n) 1165 return rte_flow_error_set 1166 (error, EINVAL, 1167 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1168 &rss->queue[i], "queue index out of range"); 1169 if (!(*priv->rxqs)[rss->queue[i]]) 1170 return rte_flow_error_set 1171 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1172 &rss->queue[i], "queue is not configured"); 1173 } 1174 if (attr->egress) 1175 return rte_flow_error_set(error, ENOTSUP, 1176 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1177 "rss action not supported for " 1178 "egress"); 1179 if (rss->level > 1 && !tunnel) 1180 return rte_flow_error_set(error, EINVAL, 1181 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1182 "inner RSS is not supported for " 1183 "non-tunnel flows"); 1184 return 0; 1185 } 1186 1187 /* 1188 * Validate the count action. 1189 * 1190 * @param[in] dev 1191 * Pointer to the Ethernet device structure. 1192 * @param[in] attr 1193 * Attributes of flow that includes this action. 1194 * @param[out] error 1195 * Pointer to error structure. 1196 * 1197 * @return 1198 * 0 on success, a negative errno value otherwise and rte_errno is set. 1199 */ 1200 int 1201 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, 1202 const struct rte_flow_attr *attr, 1203 struct rte_flow_error *error) 1204 { 1205 if (attr->egress) 1206 return rte_flow_error_set(error, ENOTSUP, 1207 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1208 "count action not supported for " 1209 "egress"); 1210 return 0; 1211 } 1212 1213 /** 1214 * Verify the @p attributes will be correctly understood by the NIC and store 1215 * them in the @p flow if everything is correct. 1216 * 1217 * @param[in] dev 1218 * Pointer to the Ethernet device structure. 1219 * @param[in] attributes 1220 * Pointer to flow attributes 1221 * @param[out] error 1222 * Pointer to error structure. 1223 * 1224 * @return 1225 * 0 on success, a negative errno value otherwise and rte_errno is set. 1226 */ 1227 int 1228 mlx5_flow_validate_attributes(struct rte_eth_dev *dev, 1229 const struct rte_flow_attr *attributes, 1230 struct rte_flow_error *error) 1231 { 1232 struct mlx5_priv *priv = dev->data->dev_private; 1233 uint32_t priority_max = priv->config.flow_prio - 1; 1234 1235 if (attributes->group) 1236 return rte_flow_error_set(error, ENOTSUP, 1237 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 1238 NULL, "groups is not supported"); 1239 if (attributes->priority != MLX5_FLOW_PRIO_RSVD && 1240 attributes->priority >= priority_max) 1241 return rte_flow_error_set(error, ENOTSUP, 1242 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1243 NULL, "priority out of range"); 1244 if (attributes->egress) 1245 return rte_flow_error_set(error, ENOTSUP, 1246 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1247 "egress is not supported"); 1248 if (attributes->transfer && !priv->config.dv_esw_en) 1249 return rte_flow_error_set(error, ENOTSUP, 1250 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 1251 NULL, "transfer is not supported"); 1252 if (!attributes->ingress) 1253 return rte_flow_error_set(error, EINVAL, 1254 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 1255 NULL, 1256 "ingress attribute is mandatory"); 1257 return 0; 1258 } 1259 1260 /** 1261 * Validate ICMP6 item. 1262 * 1263 * @param[in] item 1264 * Item specification. 1265 * @param[in] item_flags 1266 * Bit-fields that holds the items detected until now. 1267 * @param[out] error 1268 * Pointer to error structure. 1269 * 1270 * @return 1271 * 0 on success, a negative errno value otherwise and rte_errno is set. 1272 */ 1273 int 1274 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, 1275 uint64_t item_flags, 1276 uint8_t target_protocol, 1277 struct rte_flow_error *error) 1278 { 1279 const struct rte_flow_item_icmp6 *mask = item->mask; 1280 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1281 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 1282 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 1283 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1284 MLX5_FLOW_LAYER_OUTER_L4; 1285 int ret; 1286 1287 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) 1288 return rte_flow_error_set(error, EINVAL, 1289 RTE_FLOW_ERROR_TYPE_ITEM, item, 1290 "protocol filtering not compatible" 1291 " with ICMP6 layer"); 1292 if (!(item_flags & l3m)) 1293 return rte_flow_error_set(error, EINVAL, 1294 RTE_FLOW_ERROR_TYPE_ITEM, item, 1295 "IPv6 is mandatory to filter on" 1296 " ICMP6"); 1297 if (item_flags & l4m) 1298 return rte_flow_error_set(error, EINVAL, 1299 RTE_FLOW_ERROR_TYPE_ITEM, item, 1300 "multiple L4 layers not supported"); 1301 if (!mask) 1302 mask = &rte_flow_item_icmp6_mask; 1303 ret = mlx5_flow_item_acceptable 1304 (item, (const uint8_t *)mask, 1305 (const uint8_t *)&rte_flow_item_icmp6_mask, 1306 sizeof(struct rte_flow_item_icmp6), error); 1307 if (ret < 0) 1308 return ret; 1309 return 0; 1310 } 1311 1312 /** 1313 * Validate ICMP item. 1314 * 1315 * @param[in] item 1316 * Item specification. 1317 * @param[in] item_flags 1318 * Bit-fields that holds the items detected until now. 1319 * @param[out] error 1320 * Pointer to error structure. 1321 * 1322 * @return 1323 * 0 on success, a negative errno value otherwise and rte_errno is set. 1324 */ 1325 int 1326 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, 1327 uint64_t item_flags, 1328 uint8_t target_protocol, 1329 struct rte_flow_error *error) 1330 { 1331 const struct rte_flow_item_icmp *mask = item->mask; 1332 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1333 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 1334 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 1335 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1336 MLX5_FLOW_LAYER_OUTER_L4; 1337 int ret; 1338 1339 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) 1340 return rte_flow_error_set(error, EINVAL, 1341 RTE_FLOW_ERROR_TYPE_ITEM, item, 1342 "protocol filtering not compatible" 1343 " with ICMP layer"); 1344 if (!(item_flags & l3m)) 1345 return rte_flow_error_set(error, EINVAL, 1346 RTE_FLOW_ERROR_TYPE_ITEM, item, 1347 "IPv4 is mandatory to filter" 1348 " on ICMP"); 1349 if (item_flags & l4m) 1350 return rte_flow_error_set(error, EINVAL, 1351 RTE_FLOW_ERROR_TYPE_ITEM, item, 1352 "multiple L4 layers not supported"); 1353 if (!mask) 1354 mask = &rte_flow_item_icmp_mask; 1355 ret = mlx5_flow_item_acceptable 1356 (item, (const uint8_t *)mask, 1357 (const uint8_t *)&rte_flow_item_icmp_mask, 1358 sizeof(struct rte_flow_item_icmp), error); 1359 if (ret < 0) 1360 return ret; 1361 return 0; 1362 } 1363 1364 /** 1365 * Validate Ethernet item. 1366 * 1367 * @param[in] item 1368 * Item specification. 1369 * @param[in] item_flags 1370 * Bit-fields that holds the items detected until now. 1371 * @param[out] error 1372 * Pointer to error structure. 1373 * 1374 * @return 1375 * 0 on success, a negative errno value otherwise and rte_errno is set. 1376 */ 1377 int 1378 mlx5_flow_validate_item_eth(const struct rte_flow_item *item, 1379 uint64_t item_flags, 1380 struct rte_flow_error *error) 1381 { 1382 const struct rte_flow_item_eth *mask = item->mask; 1383 const struct rte_flow_item_eth nic_mask = { 1384 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1385 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1386 .type = RTE_BE16(0xffff), 1387 }; 1388 int ret; 1389 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1390 const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 1391 MLX5_FLOW_LAYER_OUTER_L2; 1392 1393 if (item_flags & ethm) 1394 return rte_flow_error_set(error, ENOTSUP, 1395 RTE_FLOW_ERROR_TYPE_ITEM, item, 1396 "multiple L2 layers not supported"); 1397 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) || 1398 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))) 1399 return rte_flow_error_set(error, EINVAL, 1400 RTE_FLOW_ERROR_TYPE_ITEM, item, 1401 "L2 layer should not follow " 1402 "L3 layers"); 1403 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) || 1404 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN))) 1405 return rte_flow_error_set(error, EINVAL, 1406 RTE_FLOW_ERROR_TYPE_ITEM, item, 1407 "L2 layer should not follow VLAN"); 1408 if (!mask) 1409 mask = &rte_flow_item_eth_mask; 1410 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1411 (const uint8_t *)&nic_mask, 1412 sizeof(struct rte_flow_item_eth), 1413 error); 1414 return ret; 1415 } 1416 1417 /** 1418 * Validate VLAN item. 1419 * 1420 * @param[in] item 1421 * Item specification. 1422 * @param[in] item_flags 1423 * Bit-fields that holds the items detected until now. 1424 * @param[in] dev 1425 * Ethernet device flow is being created on. 1426 * @param[out] error 1427 * Pointer to error structure. 1428 * 1429 * @return 1430 * 0 on success, a negative errno value otherwise and rte_errno is set. 1431 */ 1432 int 1433 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 1434 uint64_t item_flags, 1435 struct rte_eth_dev *dev, 1436 struct rte_flow_error *error) 1437 { 1438 const struct rte_flow_item_vlan *spec = item->spec; 1439 const struct rte_flow_item_vlan *mask = item->mask; 1440 const struct rte_flow_item_vlan nic_mask = { 1441 .tci = RTE_BE16(UINT16_MAX), 1442 .inner_type = RTE_BE16(UINT16_MAX), 1443 }; 1444 uint16_t vlan_tag = 0; 1445 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1446 int ret; 1447 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 1448 MLX5_FLOW_LAYER_INNER_L4) : 1449 (MLX5_FLOW_LAYER_OUTER_L3 | 1450 MLX5_FLOW_LAYER_OUTER_L4); 1451 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 1452 MLX5_FLOW_LAYER_OUTER_VLAN; 1453 1454 if (item_flags & vlanm) 1455 return rte_flow_error_set(error, EINVAL, 1456 RTE_FLOW_ERROR_TYPE_ITEM, item, 1457 "multiple VLAN layers not supported"); 1458 else if ((item_flags & l34m) != 0) 1459 return rte_flow_error_set(error, EINVAL, 1460 RTE_FLOW_ERROR_TYPE_ITEM, item, 1461 "VLAN cannot follow L3/L4 layer"); 1462 if (!mask) 1463 mask = &rte_flow_item_vlan_mask; 1464 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1465 (const uint8_t *)&nic_mask, 1466 sizeof(struct rte_flow_item_vlan), 1467 error); 1468 if (ret) 1469 return ret; 1470 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { 1471 struct mlx5_priv *priv = dev->data->dev_private; 1472 1473 if (priv->vmwa_context) { 1474 /* 1475 * Non-NULL context means we have a virtual machine 1476 * and SR-IOV enabled, we have to create VLAN interface 1477 * to make hypervisor to setup E-Switch vport 1478 * context correctly. We avoid creating the multiple 1479 * VLAN interfaces, so we cannot support VLAN tag mask. 1480 */ 1481 return rte_flow_error_set(error, EINVAL, 1482 RTE_FLOW_ERROR_TYPE_ITEM, 1483 item, 1484 "VLAN tag mask is not" 1485 " supported in virtual" 1486 " environment"); 1487 } 1488 } 1489 if (spec) { 1490 vlan_tag = spec->tci; 1491 vlan_tag &= mask->tci; 1492 } 1493 /* 1494 * From verbs perspective an empty VLAN is equivalent 1495 * to a packet without VLAN layer. 1496 */ 1497 if (!vlan_tag) 1498 return rte_flow_error_set(error, EINVAL, 1499 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1500 item->spec, 1501 "VLAN cannot be empty"); 1502 return 0; 1503 } 1504 1505 /** 1506 * Validate IPV4 item. 1507 * 1508 * @param[in] item 1509 * Item specification. 1510 * @param[in] item_flags 1511 * Bit-fields that holds the items detected until now. 1512 * @param[in] acc_mask 1513 * Acceptable mask, if NULL default internal default mask 1514 * will be used to check whether item fields are supported. 1515 * @param[out] error 1516 * Pointer to error structure. 1517 * 1518 * @return 1519 * 0 on success, a negative errno value otherwise and rte_errno is set. 1520 */ 1521 int 1522 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, 1523 uint64_t item_flags, 1524 uint64_t last_item, 1525 uint16_t ether_type, 1526 const struct rte_flow_item_ipv4 *acc_mask, 1527 struct rte_flow_error *error) 1528 { 1529 const struct rte_flow_item_ipv4 *mask = item->mask; 1530 const struct rte_flow_item_ipv4 *spec = item->spec; 1531 const struct rte_flow_item_ipv4 nic_mask = { 1532 .hdr = { 1533 .src_addr = RTE_BE32(0xffffffff), 1534 .dst_addr = RTE_BE32(0xffffffff), 1535 .type_of_service = 0xff, 1536 .next_proto_id = 0xff, 1537 }, 1538 }; 1539 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1540 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1541 MLX5_FLOW_LAYER_OUTER_L3; 1542 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1543 MLX5_FLOW_LAYER_OUTER_L4; 1544 int ret; 1545 uint8_t next_proto = 0xFF; 1546 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 1547 MLX5_FLOW_LAYER_OUTER_VLAN | 1548 MLX5_FLOW_LAYER_INNER_VLAN); 1549 1550 if ((last_item & l2_vlan) && ether_type && 1551 ether_type != RTE_ETHER_TYPE_IPV4) 1552 return rte_flow_error_set(error, EINVAL, 1553 RTE_FLOW_ERROR_TYPE_ITEM, item, 1554 "IPv4 cannot follow L2/VLAN layer " 1555 "which ether type is not IPv4"); 1556 if (item_flags & MLX5_FLOW_LAYER_IPIP) { 1557 if (mask && spec) 1558 next_proto = mask->hdr.next_proto_id & 1559 spec->hdr.next_proto_id; 1560 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1561 return rte_flow_error_set(error, EINVAL, 1562 RTE_FLOW_ERROR_TYPE_ITEM, 1563 item, 1564 "multiple tunnel " 1565 "not supported"); 1566 } 1567 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) 1568 return rte_flow_error_set(error, EINVAL, 1569 RTE_FLOW_ERROR_TYPE_ITEM, item, 1570 "wrong tunnel type - IPv6 specified " 1571 "but IPv4 item provided"); 1572 if (item_flags & l3m) 1573 return rte_flow_error_set(error, ENOTSUP, 1574 RTE_FLOW_ERROR_TYPE_ITEM, item, 1575 "multiple L3 layers not supported"); 1576 else if (item_flags & l4m) 1577 return rte_flow_error_set(error, EINVAL, 1578 RTE_FLOW_ERROR_TYPE_ITEM, item, 1579 "L3 cannot follow an L4 layer."); 1580 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1581 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1582 return rte_flow_error_set(error, EINVAL, 1583 RTE_FLOW_ERROR_TYPE_ITEM, item, 1584 "L3 cannot follow an NVGRE layer."); 1585 if (!mask) 1586 mask = &rte_flow_item_ipv4_mask; 1587 else if (mask->hdr.next_proto_id != 0 && 1588 mask->hdr.next_proto_id != 0xff) 1589 return rte_flow_error_set(error, EINVAL, 1590 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 1591 "partial mask is not supported" 1592 " for protocol"); 1593 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1594 acc_mask ? (const uint8_t *)acc_mask 1595 : (const uint8_t *)&nic_mask, 1596 sizeof(struct rte_flow_item_ipv4), 1597 error); 1598 if (ret < 0) 1599 return ret; 1600 return 0; 1601 } 1602 1603 /** 1604 * Validate IPV6 item. 1605 * 1606 * @param[in] item 1607 * Item specification. 1608 * @param[in] item_flags 1609 * Bit-fields that holds the items detected until now. 1610 * @param[in] acc_mask 1611 * Acceptable mask, if NULL default internal default mask 1612 * will be used to check whether item fields are supported. 1613 * @param[out] error 1614 * Pointer to error structure. 1615 * 1616 * @return 1617 * 0 on success, a negative errno value otherwise and rte_errno is set. 1618 */ 1619 int 1620 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, 1621 uint64_t item_flags, 1622 uint64_t last_item, 1623 uint16_t ether_type, 1624 const struct rte_flow_item_ipv6 *acc_mask, 1625 struct rte_flow_error *error) 1626 { 1627 const struct rte_flow_item_ipv6 *mask = item->mask; 1628 const struct rte_flow_item_ipv6 *spec = item->spec; 1629 const struct rte_flow_item_ipv6 nic_mask = { 1630 .hdr = { 1631 .src_addr = 1632 "\xff\xff\xff\xff\xff\xff\xff\xff" 1633 "\xff\xff\xff\xff\xff\xff\xff\xff", 1634 .dst_addr = 1635 "\xff\xff\xff\xff\xff\xff\xff\xff" 1636 "\xff\xff\xff\xff\xff\xff\xff\xff", 1637 .vtc_flow = RTE_BE32(0xffffffff), 1638 .proto = 0xff, 1639 .hop_limits = 0xff, 1640 }, 1641 }; 1642 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1643 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1644 MLX5_FLOW_LAYER_OUTER_L3; 1645 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1646 MLX5_FLOW_LAYER_OUTER_L4; 1647 int ret; 1648 uint8_t next_proto = 0xFF; 1649 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | 1650 MLX5_FLOW_LAYER_OUTER_VLAN | 1651 MLX5_FLOW_LAYER_INNER_VLAN); 1652 1653 if ((last_item & l2_vlan) && ether_type && 1654 ether_type != RTE_ETHER_TYPE_IPV6) 1655 return rte_flow_error_set(error, EINVAL, 1656 RTE_FLOW_ERROR_TYPE_ITEM, item, 1657 "IPv6 cannot follow L2/VLAN layer " 1658 "which ether type is not IPv6"); 1659 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { 1660 if (mask && spec) 1661 next_proto = mask->hdr.proto & spec->hdr.proto; 1662 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1663 return rte_flow_error_set(error, EINVAL, 1664 RTE_FLOW_ERROR_TYPE_ITEM, 1665 item, 1666 "multiple tunnel " 1667 "not supported"); 1668 } 1669 if (item_flags & MLX5_FLOW_LAYER_IPIP) 1670 return rte_flow_error_set(error, EINVAL, 1671 RTE_FLOW_ERROR_TYPE_ITEM, item, 1672 "wrong tunnel type - IPv4 specified " 1673 "but IPv6 item provided"); 1674 if (item_flags & l3m) 1675 return rte_flow_error_set(error, ENOTSUP, 1676 RTE_FLOW_ERROR_TYPE_ITEM, item, 1677 "multiple L3 layers not supported"); 1678 else if (item_flags & l4m) 1679 return rte_flow_error_set(error, EINVAL, 1680 RTE_FLOW_ERROR_TYPE_ITEM, item, 1681 "L3 cannot follow an L4 layer."); 1682 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1683 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1684 return rte_flow_error_set(error, EINVAL, 1685 RTE_FLOW_ERROR_TYPE_ITEM, item, 1686 "L3 cannot follow an NVGRE layer."); 1687 if (!mask) 1688 mask = &rte_flow_item_ipv6_mask; 1689 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1690 acc_mask ? (const uint8_t *)acc_mask 1691 : (const uint8_t *)&nic_mask, 1692 sizeof(struct rte_flow_item_ipv6), 1693 error); 1694 if (ret < 0) 1695 return ret; 1696 return 0; 1697 } 1698 1699 /** 1700 * Validate UDP item. 1701 * 1702 * @param[in] item 1703 * Item specification. 1704 * @param[in] item_flags 1705 * Bit-fields that holds the items detected until now. 1706 * @param[in] target_protocol 1707 * The next protocol in the previous item. 1708 * @param[in] flow_mask 1709 * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask. 1710 * @param[out] error 1711 * Pointer to error structure. 1712 * 1713 * @return 1714 * 0 on success, a negative errno value otherwise and rte_errno is set. 1715 */ 1716 int 1717 mlx5_flow_validate_item_udp(const struct rte_flow_item *item, 1718 uint64_t item_flags, 1719 uint8_t target_protocol, 1720 struct rte_flow_error *error) 1721 { 1722 const struct rte_flow_item_udp *mask = item->mask; 1723 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1724 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1725 MLX5_FLOW_LAYER_OUTER_L3; 1726 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1727 MLX5_FLOW_LAYER_OUTER_L4; 1728 int ret; 1729 1730 if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) 1731 return rte_flow_error_set(error, EINVAL, 1732 RTE_FLOW_ERROR_TYPE_ITEM, item, 1733 "protocol filtering not compatible" 1734 " with UDP layer"); 1735 if (!(item_flags & l3m)) 1736 return rte_flow_error_set(error, EINVAL, 1737 RTE_FLOW_ERROR_TYPE_ITEM, item, 1738 "L3 is mandatory to filter on L4"); 1739 if (item_flags & l4m) 1740 return rte_flow_error_set(error, EINVAL, 1741 RTE_FLOW_ERROR_TYPE_ITEM, item, 1742 "multiple L4 layers not supported"); 1743 if (!mask) 1744 mask = &rte_flow_item_udp_mask; 1745 ret = mlx5_flow_item_acceptable 1746 (item, (const uint8_t *)mask, 1747 (const uint8_t *)&rte_flow_item_udp_mask, 1748 sizeof(struct rte_flow_item_udp), error); 1749 if (ret < 0) 1750 return ret; 1751 return 0; 1752 } 1753 1754 /** 1755 * Validate TCP item. 1756 * 1757 * @param[in] item 1758 * Item specification. 1759 * @param[in] item_flags 1760 * Bit-fields that holds the items detected until now. 1761 * @param[in] target_protocol 1762 * The next protocol in the previous item. 1763 * @param[out] error 1764 * Pointer to error structure. 1765 * 1766 * @return 1767 * 0 on success, a negative errno value otherwise and rte_errno is set. 1768 */ 1769 int 1770 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, 1771 uint64_t item_flags, 1772 uint8_t target_protocol, 1773 const struct rte_flow_item_tcp *flow_mask, 1774 struct rte_flow_error *error) 1775 { 1776 const struct rte_flow_item_tcp *mask = item->mask; 1777 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1778 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1779 MLX5_FLOW_LAYER_OUTER_L3; 1780 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1781 MLX5_FLOW_LAYER_OUTER_L4; 1782 int ret; 1783 1784 assert(flow_mask); 1785 if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) 1786 return rte_flow_error_set(error, EINVAL, 1787 RTE_FLOW_ERROR_TYPE_ITEM, item, 1788 "protocol filtering not compatible" 1789 " with TCP layer"); 1790 if (!(item_flags & l3m)) 1791 return rte_flow_error_set(error, EINVAL, 1792 RTE_FLOW_ERROR_TYPE_ITEM, item, 1793 "L3 is mandatory to filter on L4"); 1794 if (item_flags & l4m) 1795 return rte_flow_error_set(error, EINVAL, 1796 RTE_FLOW_ERROR_TYPE_ITEM, item, 1797 "multiple L4 layers not supported"); 1798 if (!mask) 1799 mask = &rte_flow_item_tcp_mask; 1800 ret = mlx5_flow_item_acceptable 1801 (item, (const uint8_t *)mask, 1802 (const uint8_t *)flow_mask, 1803 sizeof(struct rte_flow_item_tcp), error); 1804 if (ret < 0) 1805 return ret; 1806 return 0; 1807 } 1808 1809 /** 1810 * Validate VXLAN item. 1811 * 1812 * @param[in] item 1813 * Item specification. 1814 * @param[in] item_flags 1815 * Bit-fields that holds the items detected until now. 1816 * @param[in] target_protocol 1817 * The next protocol in the previous item. 1818 * @param[out] error 1819 * Pointer to error structure. 1820 * 1821 * @return 1822 * 0 on success, a negative errno value otherwise and rte_errno is set. 1823 */ 1824 int 1825 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, 1826 uint64_t item_flags, 1827 struct rte_flow_error *error) 1828 { 1829 const struct rte_flow_item_vxlan *spec = item->spec; 1830 const struct rte_flow_item_vxlan *mask = item->mask; 1831 int ret; 1832 union vni { 1833 uint32_t vlan_id; 1834 uint8_t vni[4]; 1835 } id = { .vlan_id = 0, }; 1836 uint32_t vlan_id = 0; 1837 1838 1839 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1840 return rte_flow_error_set(error, ENOTSUP, 1841 RTE_FLOW_ERROR_TYPE_ITEM, item, 1842 "multiple tunnel layers not" 1843 " supported"); 1844 /* 1845 * Verify only UDPv4 is present as defined in 1846 * https://tools.ietf.org/html/rfc7348 1847 */ 1848 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1849 return rte_flow_error_set(error, EINVAL, 1850 RTE_FLOW_ERROR_TYPE_ITEM, item, 1851 "no outer UDP layer found"); 1852 if (!mask) 1853 mask = &rte_flow_item_vxlan_mask; 1854 ret = mlx5_flow_item_acceptable 1855 (item, (const uint8_t *)mask, 1856 (const uint8_t *)&rte_flow_item_vxlan_mask, 1857 sizeof(struct rte_flow_item_vxlan), 1858 error); 1859 if (ret < 0) 1860 return ret; 1861 if (spec) { 1862 memcpy(&id.vni[1], spec->vni, 3); 1863 vlan_id = id.vlan_id; 1864 memcpy(&id.vni[1], mask->vni, 3); 1865 vlan_id &= id.vlan_id; 1866 } 1867 /* 1868 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if 1869 * only this layer is defined in the Verbs specification it is 1870 * interpreted as wildcard and all packets will match this 1871 * rule, if it follows a full stack layer (ex: eth / ipv4 / 1872 * udp), all packets matching the layers before will also 1873 * match this rule. To avoid such situation, VNI 0 is 1874 * currently refused. 1875 */ 1876 if (!vlan_id) 1877 return rte_flow_error_set(error, ENOTSUP, 1878 RTE_FLOW_ERROR_TYPE_ITEM, item, 1879 "VXLAN vni cannot be 0"); 1880 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1881 return rte_flow_error_set(error, ENOTSUP, 1882 RTE_FLOW_ERROR_TYPE_ITEM, item, 1883 "VXLAN tunnel must be fully defined"); 1884 return 0; 1885 } 1886 1887 /** 1888 * Validate VXLAN_GPE item. 1889 * 1890 * @param[in] item 1891 * Item specification. 1892 * @param[in] item_flags 1893 * Bit-fields that holds the items detected until now. 1894 * @param[in] priv 1895 * Pointer to the private data structure. 1896 * @param[in] target_protocol 1897 * The next protocol in the previous item. 1898 * @param[out] error 1899 * Pointer to error structure. 1900 * 1901 * @return 1902 * 0 on success, a negative errno value otherwise and rte_errno is set. 1903 */ 1904 int 1905 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 1906 uint64_t item_flags, 1907 struct rte_eth_dev *dev, 1908 struct rte_flow_error *error) 1909 { 1910 struct mlx5_priv *priv = dev->data->dev_private; 1911 const struct rte_flow_item_vxlan_gpe *spec = item->spec; 1912 const struct rte_flow_item_vxlan_gpe *mask = item->mask; 1913 int ret; 1914 union vni { 1915 uint32_t vlan_id; 1916 uint8_t vni[4]; 1917 } id = { .vlan_id = 0, }; 1918 uint32_t vlan_id = 0; 1919 1920 if (!priv->config.l3_vxlan_en) 1921 return rte_flow_error_set(error, ENOTSUP, 1922 RTE_FLOW_ERROR_TYPE_ITEM, item, 1923 "L3 VXLAN is not enabled by device" 1924 " parameter and/or not configured in" 1925 " firmware"); 1926 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1927 return rte_flow_error_set(error, ENOTSUP, 1928 RTE_FLOW_ERROR_TYPE_ITEM, item, 1929 "multiple tunnel layers not" 1930 " supported"); 1931 /* 1932 * Verify only UDPv4 is present as defined in 1933 * https://tools.ietf.org/html/rfc7348 1934 */ 1935 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1936 return rte_flow_error_set(error, EINVAL, 1937 RTE_FLOW_ERROR_TYPE_ITEM, item, 1938 "no outer UDP layer found"); 1939 if (!mask) 1940 mask = &rte_flow_item_vxlan_gpe_mask; 1941 ret = mlx5_flow_item_acceptable 1942 (item, (const uint8_t *)mask, 1943 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, 1944 sizeof(struct rte_flow_item_vxlan_gpe), 1945 error); 1946 if (ret < 0) 1947 return ret; 1948 if (spec) { 1949 if (spec->protocol) 1950 return rte_flow_error_set(error, ENOTSUP, 1951 RTE_FLOW_ERROR_TYPE_ITEM, 1952 item, 1953 "VxLAN-GPE protocol" 1954 " not supported"); 1955 memcpy(&id.vni[1], spec->vni, 3); 1956 vlan_id = id.vlan_id; 1957 memcpy(&id.vni[1], mask->vni, 3); 1958 vlan_id &= id.vlan_id; 1959 } 1960 /* 1961 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this 1962 * layer is defined in the Verbs specification it is interpreted as 1963 * wildcard and all packets will match this rule, if it follows a full 1964 * stack layer (ex: eth / ipv4 / udp), all packets matching the layers 1965 * before will also match this rule. To avoid such situation, VNI 0 1966 * is currently refused. 1967 */ 1968 if (!vlan_id) 1969 return rte_flow_error_set(error, ENOTSUP, 1970 RTE_FLOW_ERROR_TYPE_ITEM, item, 1971 "VXLAN-GPE vni cannot be 0"); 1972 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1973 return rte_flow_error_set(error, ENOTSUP, 1974 RTE_FLOW_ERROR_TYPE_ITEM, item, 1975 "VXLAN-GPE tunnel must be fully" 1976 " defined"); 1977 return 0; 1978 } 1979 /** 1980 * Validate GRE Key item. 1981 * 1982 * @param[in] item 1983 * Item specification. 1984 * @param[in] item_flags 1985 * Bit flags to mark detected items. 1986 * @param[in] gre_item 1987 * Pointer to gre_item 1988 * @param[out] error 1989 * Pointer to error structure. 1990 * 1991 * @return 1992 * 0 on success, a negative errno value otherwise and rte_errno is set. 1993 */ 1994 int 1995 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, 1996 uint64_t item_flags, 1997 const struct rte_flow_item *gre_item, 1998 struct rte_flow_error *error) 1999 { 2000 const rte_be32_t *mask = item->mask; 2001 int ret = 0; 2002 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); 2003 const struct rte_flow_item_gre *gre_spec; 2004 const struct rte_flow_item_gre *gre_mask; 2005 2006 if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) 2007 return rte_flow_error_set(error, ENOTSUP, 2008 RTE_FLOW_ERROR_TYPE_ITEM, item, 2009 "Multiple GRE key not support"); 2010 if (!(item_flags & MLX5_FLOW_LAYER_GRE)) 2011 return rte_flow_error_set(error, ENOTSUP, 2012 RTE_FLOW_ERROR_TYPE_ITEM, item, 2013 "No preceding GRE header"); 2014 if (item_flags & MLX5_FLOW_LAYER_INNER) 2015 return rte_flow_error_set(error, ENOTSUP, 2016 RTE_FLOW_ERROR_TYPE_ITEM, item, 2017 "GRE key following a wrong item"); 2018 gre_mask = gre_item->mask; 2019 if (!gre_mask) 2020 gre_mask = &rte_flow_item_gre_mask; 2021 gre_spec = gre_item->spec; 2022 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && 2023 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) 2024 return rte_flow_error_set(error, EINVAL, 2025 RTE_FLOW_ERROR_TYPE_ITEM, item, 2026 "Key bit must be on"); 2027 2028 if (!mask) 2029 mask = &gre_key_default_mask; 2030 ret = mlx5_flow_item_acceptable 2031 (item, (const uint8_t *)mask, 2032 (const uint8_t *)&gre_key_default_mask, 2033 sizeof(rte_be32_t), error); 2034 return ret; 2035 } 2036 2037 /** 2038 * Validate GRE item. 2039 * 2040 * @param[in] item 2041 * Item specification. 2042 * @param[in] item_flags 2043 * Bit flags to mark detected items. 2044 * @param[in] target_protocol 2045 * The next protocol in the previous item. 2046 * @param[out] error 2047 * Pointer to error structure. 2048 * 2049 * @return 2050 * 0 on success, a negative errno value otherwise and rte_errno is set. 2051 */ 2052 int 2053 mlx5_flow_validate_item_gre(const struct rte_flow_item *item, 2054 uint64_t item_flags, 2055 uint8_t target_protocol, 2056 struct rte_flow_error *error) 2057 { 2058 const struct rte_flow_item_gre *spec __rte_unused = item->spec; 2059 const struct rte_flow_item_gre *mask = item->mask; 2060 int ret; 2061 const struct rte_flow_item_gre nic_mask = { 2062 .c_rsvd0_ver = RTE_BE16(0xB000), 2063 .protocol = RTE_BE16(UINT16_MAX), 2064 }; 2065 2066 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 2067 return rte_flow_error_set(error, EINVAL, 2068 RTE_FLOW_ERROR_TYPE_ITEM, item, 2069 "protocol filtering not compatible" 2070 " with this GRE layer"); 2071 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2072 return rte_flow_error_set(error, ENOTSUP, 2073 RTE_FLOW_ERROR_TYPE_ITEM, item, 2074 "multiple tunnel layers not" 2075 " supported"); 2076 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 2077 return rte_flow_error_set(error, ENOTSUP, 2078 RTE_FLOW_ERROR_TYPE_ITEM, item, 2079 "L3 Layer is missing"); 2080 if (!mask) 2081 mask = &rte_flow_item_gre_mask; 2082 ret = mlx5_flow_item_acceptable 2083 (item, (const uint8_t *)mask, 2084 (const uint8_t *)&nic_mask, 2085 sizeof(struct rte_flow_item_gre), error); 2086 if (ret < 0) 2087 return ret; 2088 #ifndef HAVE_MLX5DV_DR 2089 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT 2090 if (spec && (spec->protocol & mask->protocol)) 2091 return rte_flow_error_set(error, ENOTSUP, 2092 RTE_FLOW_ERROR_TYPE_ITEM, item, 2093 "without MPLS support the" 2094 " specification cannot be used for" 2095 " filtering"); 2096 #endif 2097 #endif 2098 return 0; 2099 } 2100 2101 /** 2102 * Validate Geneve item. 2103 * 2104 * @param[in] item 2105 * Item specification. 2106 * @param[in] itemFlags 2107 * Bit-fields that holds the items detected until now. 2108 * @param[in] enPriv 2109 * Pointer to the private data structure. 2110 * @param[out] error 2111 * Pointer to error structure. 2112 * 2113 * @return 2114 * 0 on success, a negative errno value otherwise and rte_errno is set. 2115 */ 2116 2117 int 2118 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, 2119 uint64_t item_flags, 2120 struct rte_eth_dev *dev, 2121 struct rte_flow_error *error) 2122 { 2123 struct mlx5_priv *priv = dev->data->dev_private; 2124 const struct rte_flow_item_geneve *spec = item->spec; 2125 const struct rte_flow_item_geneve *mask = item->mask; 2126 int ret; 2127 uint16_t gbhdr; 2128 uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ? 2129 MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; 2130 const struct rte_flow_item_geneve nic_mask = { 2131 .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), 2132 .vni = "\xff\xff\xff", 2133 .protocol = RTE_BE16(UINT16_MAX), 2134 }; 2135 2136 if (!(priv->config.hca_attr.flex_parser_protocols & 2137 MLX5_HCA_FLEX_GENEVE_ENABLED) || 2138 !priv->config.hca_attr.tunnel_stateless_geneve_rx) 2139 return rte_flow_error_set(error, ENOTSUP, 2140 RTE_FLOW_ERROR_TYPE_ITEM, item, 2141 "L3 Geneve is not enabled by device" 2142 " parameter and/or not configured in" 2143 " firmware"); 2144 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2145 return rte_flow_error_set(error, ENOTSUP, 2146 RTE_FLOW_ERROR_TYPE_ITEM, item, 2147 "multiple tunnel layers not" 2148 " supported"); 2149 /* 2150 * Verify only UDPv4 is present as defined in 2151 * https://tools.ietf.org/html/rfc7348 2152 */ 2153 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 2154 return rte_flow_error_set(error, EINVAL, 2155 RTE_FLOW_ERROR_TYPE_ITEM, item, 2156 "no outer UDP layer found"); 2157 if (!mask) 2158 mask = &rte_flow_item_geneve_mask; 2159 ret = mlx5_flow_item_acceptable 2160 (item, (const uint8_t *)mask, 2161 (const uint8_t *)&nic_mask, 2162 sizeof(struct rte_flow_item_geneve), error); 2163 if (ret) 2164 return ret; 2165 if (spec) { 2166 gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0); 2167 if (MLX5_GENEVE_VER_VAL(gbhdr) || 2168 MLX5_GENEVE_CRITO_VAL(gbhdr) || 2169 MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1) 2170 return rte_flow_error_set(error, ENOTSUP, 2171 RTE_FLOW_ERROR_TYPE_ITEM, 2172 item, 2173 "Geneve protocol unsupported" 2174 " fields are being used"); 2175 if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len) 2176 return rte_flow_error_set 2177 (error, ENOTSUP, 2178 RTE_FLOW_ERROR_TYPE_ITEM, 2179 item, 2180 "Unsupported Geneve options length"); 2181 } 2182 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 2183 return rte_flow_error_set 2184 (error, ENOTSUP, 2185 RTE_FLOW_ERROR_TYPE_ITEM, item, 2186 "Geneve tunnel must be fully defined"); 2187 return 0; 2188 } 2189 2190 /** 2191 * Validate MPLS item. 2192 * 2193 * @param[in] dev 2194 * Pointer to the rte_eth_dev structure. 2195 * @param[in] item 2196 * Item specification. 2197 * @param[in] item_flags 2198 * Bit-fields that holds the items detected until now. 2199 * @param[in] prev_layer 2200 * The protocol layer indicated in previous item. 2201 * @param[out] error 2202 * Pointer to error structure. 2203 * 2204 * @return 2205 * 0 on success, a negative errno value otherwise and rte_errno is set. 2206 */ 2207 int 2208 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, 2209 const struct rte_flow_item *item __rte_unused, 2210 uint64_t item_flags __rte_unused, 2211 uint64_t prev_layer __rte_unused, 2212 struct rte_flow_error *error) 2213 { 2214 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 2215 const struct rte_flow_item_mpls *mask = item->mask; 2216 struct mlx5_priv *priv = dev->data->dev_private; 2217 int ret; 2218 2219 if (!priv->config.mpls_en) 2220 return rte_flow_error_set(error, ENOTSUP, 2221 RTE_FLOW_ERROR_TYPE_ITEM, item, 2222 "MPLS not supported or" 2223 " disabled in firmware" 2224 " configuration."); 2225 /* MPLS over IP, UDP, GRE is allowed */ 2226 if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | 2227 MLX5_FLOW_LAYER_OUTER_L4_UDP | 2228 MLX5_FLOW_LAYER_GRE))) 2229 return rte_flow_error_set(error, EINVAL, 2230 RTE_FLOW_ERROR_TYPE_ITEM, item, 2231 "protocol filtering not compatible" 2232 " with MPLS layer"); 2233 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ 2234 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && 2235 !(item_flags & MLX5_FLOW_LAYER_GRE)) 2236 return rte_flow_error_set(error, ENOTSUP, 2237 RTE_FLOW_ERROR_TYPE_ITEM, item, 2238 "multiple tunnel layers not" 2239 " supported"); 2240 if (!mask) 2241 mask = &rte_flow_item_mpls_mask; 2242 ret = mlx5_flow_item_acceptable 2243 (item, (const uint8_t *)mask, 2244 (const uint8_t *)&rte_flow_item_mpls_mask, 2245 sizeof(struct rte_flow_item_mpls), error); 2246 if (ret < 0) 2247 return ret; 2248 return 0; 2249 #endif 2250 return rte_flow_error_set(error, ENOTSUP, 2251 RTE_FLOW_ERROR_TYPE_ITEM, item, 2252 "MPLS is not supported by Verbs, please" 2253 " update."); 2254 } 2255 2256 /** 2257 * Validate NVGRE item. 2258 * 2259 * @param[in] item 2260 * Item specification. 2261 * @param[in] item_flags 2262 * Bit flags to mark detected items. 2263 * @param[in] target_protocol 2264 * The next protocol in the previous item. 2265 * @param[out] error 2266 * Pointer to error structure. 2267 * 2268 * @return 2269 * 0 on success, a negative errno value otherwise and rte_errno is set. 2270 */ 2271 int 2272 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, 2273 uint64_t item_flags, 2274 uint8_t target_protocol, 2275 struct rte_flow_error *error) 2276 { 2277 const struct rte_flow_item_nvgre *mask = item->mask; 2278 int ret; 2279 2280 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 2281 return rte_flow_error_set(error, EINVAL, 2282 RTE_FLOW_ERROR_TYPE_ITEM, item, 2283 "protocol filtering not compatible" 2284 " with this GRE layer"); 2285 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2286 return rte_flow_error_set(error, ENOTSUP, 2287 RTE_FLOW_ERROR_TYPE_ITEM, item, 2288 "multiple tunnel layers not" 2289 " supported"); 2290 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 2291 return rte_flow_error_set(error, ENOTSUP, 2292 RTE_FLOW_ERROR_TYPE_ITEM, item, 2293 "L3 Layer is missing"); 2294 if (!mask) 2295 mask = &rte_flow_item_nvgre_mask; 2296 ret = mlx5_flow_item_acceptable 2297 (item, (const uint8_t *)mask, 2298 (const uint8_t *)&rte_flow_item_nvgre_mask, 2299 sizeof(struct rte_flow_item_nvgre), error); 2300 if (ret < 0) 2301 return ret; 2302 return 0; 2303 } 2304 2305 /* Allocate unique ID for the split Q/RSS subflows. */ 2306 static uint32_t 2307 flow_qrss_get_id(struct rte_eth_dev *dev) 2308 { 2309 struct mlx5_priv *priv = dev->data->dev_private; 2310 uint32_t qrss_id, ret; 2311 2312 ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id); 2313 if (ret) 2314 return 0; 2315 assert(qrss_id); 2316 return qrss_id; 2317 } 2318 2319 /* Free unique ID for the split Q/RSS subflows. */ 2320 static void 2321 flow_qrss_free_id(struct rte_eth_dev *dev, uint32_t qrss_id) 2322 { 2323 struct mlx5_priv *priv = dev->data->dev_private; 2324 2325 if (qrss_id) 2326 mlx5_flow_id_release(priv->qrss_id_pool, qrss_id); 2327 } 2328 2329 /** 2330 * Release resource related QUEUE/RSS action split. 2331 * 2332 * @param dev 2333 * Pointer to Ethernet device. 2334 * @param flow 2335 * Flow to release id's from. 2336 */ 2337 static void 2338 flow_mreg_split_qrss_release(struct rte_eth_dev *dev, 2339 struct rte_flow *flow) 2340 { 2341 struct mlx5_flow *dev_flow; 2342 2343 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 2344 if (dev_flow->qrss_id) 2345 flow_qrss_free_id(dev, dev_flow->qrss_id); 2346 } 2347 2348 static int 2349 flow_null_validate(struct rte_eth_dev *dev __rte_unused, 2350 const struct rte_flow_attr *attr __rte_unused, 2351 const struct rte_flow_item items[] __rte_unused, 2352 const struct rte_flow_action actions[] __rte_unused, 2353 bool external __rte_unused, 2354 struct rte_flow_error *error) 2355 { 2356 return rte_flow_error_set(error, ENOTSUP, 2357 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2358 } 2359 2360 static struct mlx5_flow * 2361 flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, 2362 const struct rte_flow_item items[] __rte_unused, 2363 const struct rte_flow_action actions[] __rte_unused, 2364 struct rte_flow_error *error) 2365 { 2366 rte_flow_error_set(error, ENOTSUP, 2367 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2368 return NULL; 2369 } 2370 2371 static int 2372 flow_null_translate(struct rte_eth_dev *dev __rte_unused, 2373 struct mlx5_flow *dev_flow __rte_unused, 2374 const struct rte_flow_attr *attr __rte_unused, 2375 const struct rte_flow_item items[] __rte_unused, 2376 const struct rte_flow_action actions[] __rte_unused, 2377 struct rte_flow_error *error) 2378 { 2379 return rte_flow_error_set(error, ENOTSUP, 2380 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2381 } 2382 2383 static int 2384 flow_null_apply(struct rte_eth_dev *dev __rte_unused, 2385 struct rte_flow *flow __rte_unused, 2386 struct rte_flow_error *error) 2387 { 2388 return rte_flow_error_set(error, ENOTSUP, 2389 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2390 } 2391 2392 static void 2393 flow_null_remove(struct rte_eth_dev *dev __rte_unused, 2394 struct rte_flow *flow __rte_unused) 2395 { 2396 } 2397 2398 static void 2399 flow_null_destroy(struct rte_eth_dev *dev __rte_unused, 2400 struct rte_flow *flow __rte_unused) 2401 { 2402 } 2403 2404 static int 2405 flow_null_query(struct rte_eth_dev *dev __rte_unused, 2406 struct rte_flow *flow __rte_unused, 2407 const struct rte_flow_action *actions __rte_unused, 2408 void *data __rte_unused, 2409 struct rte_flow_error *error) 2410 { 2411 return rte_flow_error_set(error, ENOTSUP, 2412 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2413 } 2414 2415 /* Void driver to protect from null pointer reference. */ 2416 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { 2417 .validate = flow_null_validate, 2418 .prepare = flow_null_prepare, 2419 .translate = flow_null_translate, 2420 .apply = flow_null_apply, 2421 .remove = flow_null_remove, 2422 .destroy = flow_null_destroy, 2423 .query = flow_null_query, 2424 }; 2425 2426 /** 2427 * Select flow driver type according to flow attributes and device 2428 * configuration. 2429 * 2430 * @param[in] dev 2431 * Pointer to the dev structure. 2432 * @param[in] attr 2433 * Pointer to the flow attributes. 2434 * 2435 * @return 2436 * flow driver type, MLX5_FLOW_TYPE_MAX otherwise. 2437 */ 2438 static enum mlx5_flow_drv_type 2439 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) 2440 { 2441 struct mlx5_priv *priv = dev->data->dev_private; 2442 enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; 2443 2444 if (attr->transfer && priv->config.dv_esw_en) 2445 type = MLX5_FLOW_TYPE_DV; 2446 if (!attr->transfer) 2447 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : 2448 MLX5_FLOW_TYPE_VERBS; 2449 return type; 2450 } 2451 2452 #define flow_get_drv_ops(type) flow_drv_ops[type] 2453 2454 /** 2455 * Flow driver validation API. This abstracts calling driver specific functions. 2456 * The type of flow driver is determined according to flow attributes. 2457 * 2458 * @param[in] dev 2459 * Pointer to the dev structure. 2460 * @param[in] attr 2461 * Pointer to the flow attributes. 2462 * @param[in] items 2463 * Pointer to the list of items. 2464 * @param[in] actions 2465 * Pointer to the list of actions. 2466 * @param[in] external 2467 * This flow rule is created by request external to PMD. 2468 * @param[out] error 2469 * Pointer to the error structure. 2470 * 2471 * @return 2472 * 0 on success, a negative errno value otherwise and rte_errno is set. 2473 */ 2474 static inline int 2475 flow_drv_validate(struct rte_eth_dev *dev, 2476 const struct rte_flow_attr *attr, 2477 const struct rte_flow_item items[], 2478 const struct rte_flow_action actions[], 2479 bool external, struct rte_flow_error *error) 2480 { 2481 const struct mlx5_flow_driver_ops *fops; 2482 enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); 2483 2484 fops = flow_get_drv_ops(type); 2485 return fops->validate(dev, attr, items, actions, external, error); 2486 } 2487 2488 /** 2489 * Flow driver preparation API. This abstracts calling driver specific 2490 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2491 * calculates the size of memory required for device flow, allocates the memory, 2492 * initializes the device flow and returns the pointer. 2493 * 2494 * @note 2495 * This function initializes device flow structure such as dv or verbs in 2496 * struct mlx5_flow. However, it is caller's responsibility to initialize the 2497 * rest. For example, adding returning device flow to flow->dev_flow list and 2498 * setting backward reference to the flow should be done out of this function. 2499 * layers field is not filled either. 2500 * 2501 * @param[in] attr 2502 * Pointer to the flow attributes. 2503 * @param[in] items 2504 * Pointer to the list of items. 2505 * @param[in] actions 2506 * Pointer to the list of actions. 2507 * @param[out] error 2508 * Pointer to the error structure. 2509 * 2510 * @return 2511 * Pointer to device flow on success, otherwise NULL and rte_errno is set. 2512 */ 2513 static inline struct mlx5_flow * 2514 flow_drv_prepare(const struct rte_flow *flow, 2515 const struct rte_flow_attr *attr, 2516 const struct rte_flow_item items[], 2517 const struct rte_flow_action actions[], 2518 struct rte_flow_error *error) 2519 { 2520 const struct mlx5_flow_driver_ops *fops; 2521 enum mlx5_flow_drv_type type = flow->drv_type; 2522 2523 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2524 fops = flow_get_drv_ops(type); 2525 return fops->prepare(attr, items, actions, error); 2526 } 2527 2528 /** 2529 * Flow driver translation API. This abstracts calling driver specific 2530 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2531 * translates a generic flow into a driver flow. flow_drv_prepare() must 2532 * precede. 2533 * 2534 * @note 2535 * dev_flow->layers could be filled as a result of parsing during translation 2536 * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled 2537 * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, 2538 * flow->actions could be overwritten even though all the expanded dev_flows 2539 * have the same actions. 2540 * 2541 * @param[in] dev 2542 * Pointer to the rte dev structure. 2543 * @param[in, out] dev_flow 2544 * Pointer to the mlx5 flow. 2545 * @param[in] attr 2546 * Pointer to the flow attributes. 2547 * @param[in] items 2548 * Pointer to the list of items. 2549 * @param[in] actions 2550 * Pointer to the list of actions. 2551 * @param[out] error 2552 * Pointer to the error structure. 2553 * 2554 * @return 2555 * 0 on success, a negative errno value otherwise and rte_errno is set. 2556 */ 2557 static inline int 2558 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, 2559 const struct rte_flow_attr *attr, 2560 const struct rte_flow_item items[], 2561 const struct rte_flow_action actions[], 2562 struct rte_flow_error *error) 2563 { 2564 const struct mlx5_flow_driver_ops *fops; 2565 enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; 2566 2567 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2568 fops = flow_get_drv_ops(type); 2569 return fops->translate(dev, dev_flow, attr, items, actions, error); 2570 } 2571 2572 /** 2573 * Flow driver apply API. This abstracts calling driver specific functions. 2574 * Parent flow (rte_flow) should have driver type (drv_type). It applies 2575 * translated driver flows on to device. flow_drv_translate() must precede. 2576 * 2577 * @param[in] dev 2578 * Pointer to Ethernet device structure. 2579 * @param[in, out] flow 2580 * Pointer to flow structure. 2581 * @param[out] error 2582 * Pointer to error structure. 2583 * 2584 * @return 2585 * 0 on success, a negative errno value otherwise and rte_errno is set. 2586 */ 2587 static inline int 2588 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 2589 struct rte_flow_error *error) 2590 { 2591 const struct mlx5_flow_driver_ops *fops; 2592 enum mlx5_flow_drv_type type = flow->drv_type; 2593 2594 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2595 fops = flow_get_drv_ops(type); 2596 return fops->apply(dev, flow, error); 2597 } 2598 2599 /** 2600 * Flow driver remove API. This abstracts calling driver specific functions. 2601 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2602 * on device. All the resources of the flow should be freed by calling 2603 * flow_drv_destroy(). 2604 * 2605 * @param[in] dev 2606 * Pointer to Ethernet device. 2607 * @param[in, out] flow 2608 * Pointer to flow structure. 2609 */ 2610 static inline void 2611 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 2612 { 2613 const struct mlx5_flow_driver_ops *fops; 2614 enum mlx5_flow_drv_type type = flow->drv_type; 2615 2616 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2617 fops = flow_get_drv_ops(type); 2618 fops->remove(dev, flow); 2619 } 2620 2621 /** 2622 * Flow driver destroy API. This abstracts calling driver specific functions. 2623 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2624 * on device and releases resources of the flow. 2625 * 2626 * @param[in] dev 2627 * Pointer to Ethernet device. 2628 * @param[in, out] flow 2629 * Pointer to flow structure. 2630 */ 2631 static inline void 2632 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 2633 { 2634 const struct mlx5_flow_driver_ops *fops; 2635 enum mlx5_flow_drv_type type = flow->drv_type; 2636 2637 flow_mreg_split_qrss_release(dev, flow); 2638 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2639 fops = flow_get_drv_ops(type); 2640 fops->destroy(dev, flow); 2641 } 2642 2643 /** 2644 * Validate a flow supported by the NIC. 2645 * 2646 * @see rte_flow_validate() 2647 * @see rte_flow_ops 2648 */ 2649 int 2650 mlx5_flow_validate(struct rte_eth_dev *dev, 2651 const struct rte_flow_attr *attr, 2652 const struct rte_flow_item items[], 2653 const struct rte_flow_action actions[], 2654 struct rte_flow_error *error) 2655 { 2656 int ret; 2657 2658 ret = flow_drv_validate(dev, attr, items, actions, true, error); 2659 if (ret < 0) 2660 return ret; 2661 return 0; 2662 } 2663 2664 /** 2665 * Get port id item from the item list. 2666 * 2667 * @param[in] item 2668 * Pointer to the list of items. 2669 * 2670 * @return 2671 * Pointer to the port id item if exist, else return NULL. 2672 */ 2673 static const struct rte_flow_item * 2674 find_port_id_item(const struct rte_flow_item *item) 2675 { 2676 assert(item); 2677 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 2678 if (item->type == RTE_FLOW_ITEM_TYPE_PORT_ID) 2679 return item; 2680 } 2681 return NULL; 2682 } 2683 2684 /** 2685 * Get RSS action from the action list. 2686 * 2687 * @param[in] actions 2688 * Pointer to the list of actions. 2689 * 2690 * @return 2691 * Pointer to the RSS action if exist, else return NULL. 2692 */ 2693 static const struct rte_flow_action_rss* 2694 flow_get_rss_action(const struct rte_flow_action actions[]) 2695 { 2696 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2697 switch (actions->type) { 2698 case RTE_FLOW_ACTION_TYPE_RSS: 2699 return (const struct rte_flow_action_rss *) 2700 actions->conf; 2701 default: 2702 break; 2703 } 2704 } 2705 return NULL; 2706 } 2707 2708 static unsigned int 2709 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) 2710 { 2711 const struct rte_flow_item *item; 2712 unsigned int has_vlan = 0; 2713 2714 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 2715 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2716 has_vlan = 1; 2717 break; 2718 } 2719 } 2720 if (has_vlan) 2721 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : 2722 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; 2723 return rss_level < 2 ? MLX5_EXPANSION_ROOT : 2724 MLX5_EXPANSION_ROOT_OUTER; 2725 } 2726 2727 /** 2728 * Get QUEUE/RSS action from the action list. 2729 * 2730 * @param[in] actions 2731 * Pointer to the list of actions. 2732 * @param[out] qrss 2733 * Pointer to the return pointer. 2734 * @param[out] qrss_type 2735 * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned 2736 * if no QUEUE/RSS is found. 2737 * 2738 * @return 2739 * Total number of actions. 2740 */ 2741 static int 2742 flow_parse_qrss_action(const struct rte_flow_action actions[], 2743 const struct rte_flow_action **qrss) 2744 { 2745 int actions_n = 0; 2746 2747 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2748 switch (actions->type) { 2749 case RTE_FLOW_ACTION_TYPE_QUEUE: 2750 case RTE_FLOW_ACTION_TYPE_RSS: 2751 *qrss = actions; 2752 break; 2753 default: 2754 break; 2755 } 2756 actions_n++; 2757 } 2758 /* Count RTE_FLOW_ACTION_TYPE_END. */ 2759 return actions_n + 1; 2760 } 2761 2762 /** 2763 * Check meter action from the action list. 2764 * 2765 * @param[in] actions 2766 * Pointer to the list of actions. 2767 * @param[out] mtr 2768 * Pointer to the meter exist flag. 2769 * 2770 * @return 2771 * Total number of actions. 2772 */ 2773 static int 2774 flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr) 2775 { 2776 int actions_n = 0; 2777 2778 assert(mtr); 2779 *mtr = 0; 2780 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2781 switch (actions->type) { 2782 case RTE_FLOW_ACTION_TYPE_METER: 2783 *mtr = 1; 2784 break; 2785 default: 2786 break; 2787 } 2788 actions_n++; 2789 } 2790 /* Count RTE_FLOW_ACTION_TYPE_END. */ 2791 return actions_n + 1; 2792 } 2793 2794 /** 2795 * Check if the flow should be splited due to hairpin. 2796 * The reason for the split is that in current HW we can't 2797 * support encap on Rx, so if a flow have encap we move it 2798 * to Tx. 2799 * 2800 * @param dev 2801 * Pointer to Ethernet device. 2802 * @param[in] attr 2803 * Flow rule attributes. 2804 * @param[in] actions 2805 * Associated actions (list terminated by the END action). 2806 * 2807 * @return 2808 * > 0 the number of actions and the flow should be split, 2809 * 0 when no split required. 2810 */ 2811 static int 2812 flow_check_hairpin_split(struct rte_eth_dev *dev, 2813 const struct rte_flow_attr *attr, 2814 const struct rte_flow_action actions[]) 2815 { 2816 int queue_action = 0; 2817 int action_n = 0; 2818 int encap = 0; 2819 const struct rte_flow_action_queue *queue; 2820 const struct rte_flow_action_rss *rss; 2821 const struct rte_flow_action_raw_encap *raw_encap; 2822 2823 if (!attr->ingress) 2824 return 0; 2825 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2826 switch (actions->type) { 2827 case RTE_FLOW_ACTION_TYPE_QUEUE: 2828 queue = actions->conf; 2829 if (queue == NULL) 2830 return 0; 2831 if (mlx5_rxq_get_type(dev, queue->index) != 2832 MLX5_RXQ_TYPE_HAIRPIN) 2833 return 0; 2834 queue_action = 1; 2835 action_n++; 2836 break; 2837 case RTE_FLOW_ACTION_TYPE_RSS: 2838 rss = actions->conf; 2839 if (rss == NULL || rss->queue_num == 0) 2840 return 0; 2841 if (mlx5_rxq_get_type(dev, rss->queue[0]) != 2842 MLX5_RXQ_TYPE_HAIRPIN) 2843 return 0; 2844 queue_action = 1; 2845 action_n++; 2846 break; 2847 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 2848 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 2849 encap = 1; 2850 action_n++; 2851 break; 2852 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 2853 raw_encap = actions->conf; 2854 if (raw_encap->size > 2855 (sizeof(struct rte_flow_item_eth) + 2856 sizeof(struct rte_flow_item_ipv4))) 2857 encap = 1; 2858 action_n++; 2859 break; 2860 default: 2861 action_n++; 2862 break; 2863 } 2864 } 2865 if (encap == 1 && queue_action) 2866 return action_n; 2867 return 0; 2868 } 2869 2870 /* Declare flow create/destroy prototype in advance. */ 2871 static struct rte_flow * 2872 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, 2873 const struct rte_flow_attr *attr, 2874 const struct rte_flow_item items[], 2875 const struct rte_flow_action actions[], 2876 bool external, struct rte_flow_error *error); 2877 2878 static void 2879 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, 2880 struct rte_flow *flow); 2881 2882 /** 2883 * Add a flow of copying flow metadata registers in RX_CP_TBL. 2884 * 2885 * As mark_id is unique, if there's already a registered flow for the mark_id, 2886 * return by increasing the reference counter of the resource. Otherwise, create 2887 * the resource (mcp_res) and flow. 2888 * 2889 * Flow looks like, 2890 * - If ingress port is ANY and reg_c[1] is mark_id, 2891 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 2892 * 2893 * For default flow (zero mark_id), flow is like, 2894 * - If ingress port is ANY, 2895 * reg_b := reg_c[0] and jump to RX_ACT_TBL. 2896 * 2897 * @param dev 2898 * Pointer to Ethernet device. 2899 * @param mark_id 2900 * ID of MARK action, zero means default flow for META. 2901 * @param[out] error 2902 * Perform verbose error reporting if not NULL. 2903 * 2904 * @return 2905 * Associated resource on success, NULL otherwise and rte_errno is set. 2906 */ 2907 static struct mlx5_flow_mreg_copy_resource * 2908 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, 2909 struct rte_flow_error *error) 2910 { 2911 struct mlx5_priv *priv = dev->data->dev_private; 2912 struct rte_flow_attr attr = { 2913 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 2914 .ingress = 1, 2915 }; 2916 struct mlx5_rte_flow_item_tag tag_spec = { 2917 .data = mark_id, 2918 }; 2919 struct rte_flow_item items[] = { 2920 [1] = { .type = RTE_FLOW_ITEM_TYPE_END, }, 2921 }; 2922 struct rte_flow_action_mark ftag = { 2923 .id = mark_id, 2924 }; 2925 struct mlx5_flow_action_copy_mreg cp_mreg = { 2926 .dst = REG_B, 2927 .src = 0, 2928 }; 2929 struct rte_flow_action_jump jump = { 2930 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 2931 }; 2932 struct rte_flow_action actions[] = { 2933 [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, 2934 }; 2935 struct mlx5_flow_mreg_copy_resource *mcp_res; 2936 int ret; 2937 2938 /* Fill the register fileds in the flow. */ 2939 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 2940 if (ret < 0) 2941 return NULL; 2942 tag_spec.id = ret; 2943 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 2944 if (ret < 0) 2945 return NULL; 2946 cp_mreg.src = ret; 2947 /* Check if already registered. */ 2948 assert(priv->mreg_cp_tbl); 2949 mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id); 2950 if (mcp_res) { 2951 /* For non-default rule. */ 2952 if (mark_id != MLX5_DEFAULT_COPY_ID) 2953 mcp_res->refcnt++; 2954 assert(mark_id != MLX5_DEFAULT_COPY_ID || mcp_res->refcnt == 1); 2955 return mcp_res; 2956 } 2957 /* Provide the full width of FLAG specific value. */ 2958 if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT)) 2959 tag_spec.data = MLX5_FLOW_MARK_DEFAULT; 2960 /* Build a new flow. */ 2961 if (mark_id != MLX5_DEFAULT_COPY_ID) { 2962 items[0] = (struct rte_flow_item){ 2963 .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, 2964 .spec = &tag_spec, 2965 }; 2966 items[1] = (struct rte_flow_item){ 2967 .type = RTE_FLOW_ITEM_TYPE_END, 2968 }; 2969 actions[0] = (struct rte_flow_action){ 2970 .type = MLX5_RTE_FLOW_ACTION_TYPE_MARK, 2971 .conf = &ftag, 2972 }; 2973 actions[1] = (struct rte_flow_action){ 2974 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 2975 .conf = &cp_mreg, 2976 }; 2977 actions[2] = (struct rte_flow_action){ 2978 .type = RTE_FLOW_ACTION_TYPE_JUMP, 2979 .conf = &jump, 2980 }; 2981 actions[3] = (struct rte_flow_action){ 2982 .type = RTE_FLOW_ACTION_TYPE_END, 2983 }; 2984 } else { 2985 /* Default rule, wildcard match. */ 2986 attr.priority = MLX5_FLOW_PRIO_RSVD; 2987 items[0] = (struct rte_flow_item){ 2988 .type = RTE_FLOW_ITEM_TYPE_END, 2989 }; 2990 actions[0] = (struct rte_flow_action){ 2991 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 2992 .conf = &cp_mreg, 2993 }; 2994 actions[1] = (struct rte_flow_action){ 2995 .type = RTE_FLOW_ACTION_TYPE_JUMP, 2996 .conf = &jump, 2997 }; 2998 actions[2] = (struct rte_flow_action){ 2999 .type = RTE_FLOW_ACTION_TYPE_END, 3000 }; 3001 } 3002 /* Build a new entry. */ 3003 mcp_res = rte_zmalloc(__func__, sizeof(*mcp_res), 0); 3004 if (!mcp_res) { 3005 rte_errno = ENOMEM; 3006 return NULL; 3007 } 3008 /* 3009 * The copy Flows are not included in any list. There 3010 * ones are referenced from other Flows and can not 3011 * be applied, removed, deleted in ardbitrary order 3012 * by list traversing. 3013 */ 3014 mcp_res->flow = flow_list_create(dev, NULL, &attr, items, 3015 actions, false, error); 3016 if (!mcp_res->flow) 3017 goto error; 3018 mcp_res->refcnt++; 3019 mcp_res->hlist_ent.key = mark_id; 3020 ret = mlx5_hlist_insert(priv->mreg_cp_tbl, 3021 &mcp_res->hlist_ent); 3022 assert(!ret); 3023 if (ret) 3024 goto error; 3025 return mcp_res; 3026 error: 3027 if (mcp_res->flow) 3028 flow_list_destroy(dev, NULL, mcp_res->flow); 3029 rte_free(mcp_res); 3030 return NULL; 3031 } 3032 3033 /** 3034 * Release flow in RX_CP_TBL. 3035 * 3036 * @param dev 3037 * Pointer to Ethernet device. 3038 * @flow 3039 * Parent flow for wich copying is provided. 3040 */ 3041 static void 3042 flow_mreg_del_copy_action(struct rte_eth_dev *dev, 3043 struct rte_flow *flow) 3044 { 3045 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3046 struct mlx5_priv *priv = dev->data->dev_private; 3047 3048 if (!mcp_res || !priv->mreg_cp_tbl) 3049 return; 3050 if (flow->copy_applied) { 3051 assert(mcp_res->appcnt); 3052 flow->copy_applied = 0; 3053 --mcp_res->appcnt; 3054 if (!mcp_res->appcnt) 3055 flow_drv_remove(dev, mcp_res->flow); 3056 } 3057 /* 3058 * We do not check availability of metadata registers here, 3059 * because copy resources are not allocated in this case. 3060 */ 3061 if (--mcp_res->refcnt) 3062 return; 3063 assert(mcp_res->flow); 3064 flow_list_destroy(dev, NULL, mcp_res->flow); 3065 mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); 3066 rte_free(mcp_res); 3067 flow->mreg_copy = NULL; 3068 } 3069 3070 /** 3071 * Start flow in RX_CP_TBL. 3072 * 3073 * @param dev 3074 * Pointer to Ethernet device. 3075 * @flow 3076 * Parent flow for wich copying is provided. 3077 * 3078 * @return 3079 * 0 on success, a negative errno value otherwise and rte_errno is set. 3080 */ 3081 static int 3082 flow_mreg_start_copy_action(struct rte_eth_dev *dev, 3083 struct rte_flow *flow) 3084 { 3085 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3086 int ret; 3087 3088 if (!mcp_res || flow->copy_applied) 3089 return 0; 3090 if (!mcp_res->appcnt) { 3091 ret = flow_drv_apply(dev, mcp_res->flow, NULL); 3092 if (ret) 3093 return ret; 3094 } 3095 ++mcp_res->appcnt; 3096 flow->copy_applied = 1; 3097 return 0; 3098 } 3099 3100 /** 3101 * Stop flow in RX_CP_TBL. 3102 * 3103 * @param dev 3104 * Pointer to Ethernet device. 3105 * @flow 3106 * Parent flow for wich copying is provided. 3107 */ 3108 static void 3109 flow_mreg_stop_copy_action(struct rte_eth_dev *dev, 3110 struct rte_flow *flow) 3111 { 3112 struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; 3113 3114 if (!mcp_res || !flow->copy_applied) 3115 return; 3116 assert(mcp_res->appcnt); 3117 --mcp_res->appcnt; 3118 flow->copy_applied = 0; 3119 if (!mcp_res->appcnt) 3120 flow_drv_remove(dev, mcp_res->flow); 3121 } 3122 3123 /** 3124 * Remove the default copy action from RX_CP_TBL. 3125 * 3126 * @param dev 3127 * Pointer to Ethernet device. 3128 */ 3129 static void 3130 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) 3131 { 3132 struct mlx5_flow_mreg_copy_resource *mcp_res; 3133 struct mlx5_priv *priv = dev->data->dev_private; 3134 3135 /* Check if default flow is registered. */ 3136 if (!priv->mreg_cp_tbl) 3137 return; 3138 mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, 3139 MLX5_DEFAULT_COPY_ID); 3140 if (!mcp_res) 3141 return; 3142 assert(mcp_res->flow); 3143 flow_list_destroy(dev, NULL, mcp_res->flow); 3144 mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); 3145 rte_free(mcp_res); 3146 } 3147 3148 /** 3149 * Add the default copy action in in RX_CP_TBL. 3150 * 3151 * @param dev 3152 * Pointer to Ethernet device. 3153 * @param[out] error 3154 * Perform verbose error reporting if not NULL. 3155 * 3156 * @return 3157 * 0 for success, negative value otherwise and rte_errno is set. 3158 */ 3159 static int 3160 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, 3161 struct rte_flow_error *error) 3162 { 3163 struct mlx5_priv *priv = dev->data->dev_private; 3164 struct mlx5_flow_mreg_copy_resource *mcp_res; 3165 3166 /* Check whether extensive metadata feature is engaged. */ 3167 if (!priv->config.dv_flow_en || 3168 priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3169 !mlx5_flow_ext_mreg_supported(dev) || 3170 !priv->sh->dv_regc0_mask) 3171 return 0; 3172 mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error); 3173 if (!mcp_res) 3174 return -rte_errno; 3175 return 0; 3176 } 3177 3178 /** 3179 * Add a flow of copying flow metadata registers in RX_CP_TBL. 3180 * 3181 * All the flow having Q/RSS action should be split by 3182 * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL 3183 * performs the following, 3184 * - CQE->flow_tag := reg_c[1] (MARK) 3185 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 3186 * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1] 3187 * but there should be a flow per each MARK ID set by MARK action. 3188 * 3189 * For the aforementioned reason, if there's a MARK action in flow's action 3190 * list, a corresponding flow should be added to the RX_CP_TBL in order to copy 3191 * the MARK ID to CQE's flow_tag like, 3192 * - If reg_c[1] is mark_id, 3193 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. 3194 * 3195 * For SET_META action which stores value in reg_c[0], as the destination is 3196 * also a flow metadata register (reg_b), adding a default flow is enough. Zero 3197 * MARK ID means the default flow. The default flow looks like, 3198 * - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL. 3199 * 3200 * @param dev 3201 * Pointer to Ethernet device. 3202 * @param flow 3203 * Pointer to flow structure. 3204 * @param[in] actions 3205 * Pointer to the list of actions. 3206 * @param[out] error 3207 * Perform verbose error reporting if not NULL. 3208 * 3209 * @return 3210 * 0 on success, negative value otherwise and rte_errno is set. 3211 */ 3212 static int 3213 flow_mreg_update_copy_table(struct rte_eth_dev *dev, 3214 struct rte_flow *flow, 3215 const struct rte_flow_action *actions, 3216 struct rte_flow_error *error) 3217 { 3218 struct mlx5_priv *priv = dev->data->dev_private; 3219 struct mlx5_dev_config *config = &priv->config; 3220 struct mlx5_flow_mreg_copy_resource *mcp_res; 3221 const struct rte_flow_action_mark *mark; 3222 3223 /* Check whether extensive metadata feature is engaged. */ 3224 if (!config->dv_flow_en || 3225 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3226 !mlx5_flow_ext_mreg_supported(dev) || 3227 !priv->sh->dv_regc0_mask) 3228 return 0; 3229 /* Find MARK action. */ 3230 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3231 switch (actions->type) { 3232 case RTE_FLOW_ACTION_TYPE_FLAG: 3233 mcp_res = flow_mreg_add_copy_action 3234 (dev, MLX5_FLOW_MARK_DEFAULT, error); 3235 if (!mcp_res) 3236 return -rte_errno; 3237 flow->mreg_copy = mcp_res; 3238 if (dev->data->dev_started) { 3239 mcp_res->appcnt++; 3240 flow->copy_applied = 1; 3241 } 3242 return 0; 3243 case RTE_FLOW_ACTION_TYPE_MARK: 3244 mark = (const struct rte_flow_action_mark *) 3245 actions->conf; 3246 mcp_res = 3247 flow_mreg_add_copy_action(dev, mark->id, error); 3248 if (!mcp_res) 3249 return -rte_errno; 3250 flow->mreg_copy = mcp_res; 3251 if (dev->data->dev_started) { 3252 mcp_res->appcnt++; 3253 flow->copy_applied = 1; 3254 } 3255 return 0; 3256 default: 3257 break; 3258 } 3259 } 3260 return 0; 3261 } 3262 3263 #define MLX5_MAX_SPLIT_ACTIONS 24 3264 #define MLX5_MAX_SPLIT_ITEMS 24 3265 3266 /** 3267 * Split the hairpin flow. 3268 * Since HW can't support encap on Rx we move the encap to Tx. 3269 * If the count action is after the encap then we also 3270 * move the count action. in this case the count will also measure 3271 * the outer bytes. 3272 * 3273 * @param dev 3274 * Pointer to Ethernet device. 3275 * @param[in] actions 3276 * Associated actions (list terminated by the END action). 3277 * @param[out] actions_rx 3278 * Rx flow actions. 3279 * @param[out] actions_tx 3280 * Tx flow actions.. 3281 * @param[out] pattern_tx 3282 * The pattern items for the Tx flow. 3283 * @param[out] flow_id 3284 * The flow ID connected to this flow. 3285 * 3286 * @return 3287 * 0 on success. 3288 */ 3289 static int 3290 flow_hairpin_split(struct rte_eth_dev *dev, 3291 const struct rte_flow_action actions[], 3292 struct rte_flow_action actions_rx[], 3293 struct rte_flow_action actions_tx[], 3294 struct rte_flow_item pattern_tx[], 3295 uint32_t *flow_id) 3296 { 3297 struct mlx5_priv *priv = dev->data->dev_private; 3298 const struct rte_flow_action_raw_encap *raw_encap; 3299 const struct rte_flow_action_raw_decap *raw_decap; 3300 struct mlx5_rte_flow_action_set_tag *set_tag; 3301 struct rte_flow_action *tag_action; 3302 struct mlx5_rte_flow_item_tag *tag_item; 3303 struct rte_flow_item *item; 3304 char *addr; 3305 int encap = 0; 3306 3307 mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id); 3308 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3309 switch (actions->type) { 3310 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 3311 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 3312 rte_memcpy(actions_tx, actions, 3313 sizeof(struct rte_flow_action)); 3314 actions_tx++; 3315 break; 3316 case RTE_FLOW_ACTION_TYPE_COUNT: 3317 if (encap) { 3318 rte_memcpy(actions_tx, actions, 3319 sizeof(struct rte_flow_action)); 3320 actions_tx++; 3321 } else { 3322 rte_memcpy(actions_rx, actions, 3323 sizeof(struct rte_flow_action)); 3324 actions_rx++; 3325 } 3326 break; 3327 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 3328 raw_encap = actions->conf; 3329 if (raw_encap->size > 3330 (sizeof(struct rte_flow_item_eth) + 3331 sizeof(struct rte_flow_item_ipv4))) { 3332 memcpy(actions_tx, actions, 3333 sizeof(struct rte_flow_action)); 3334 actions_tx++; 3335 encap = 1; 3336 } else { 3337 rte_memcpy(actions_rx, actions, 3338 sizeof(struct rte_flow_action)); 3339 actions_rx++; 3340 } 3341 break; 3342 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 3343 raw_decap = actions->conf; 3344 if (raw_decap->size < 3345 (sizeof(struct rte_flow_item_eth) + 3346 sizeof(struct rte_flow_item_ipv4))) { 3347 memcpy(actions_tx, actions, 3348 sizeof(struct rte_flow_action)); 3349 actions_tx++; 3350 } else { 3351 rte_memcpy(actions_rx, actions, 3352 sizeof(struct rte_flow_action)); 3353 actions_rx++; 3354 } 3355 break; 3356 default: 3357 rte_memcpy(actions_rx, actions, 3358 sizeof(struct rte_flow_action)); 3359 actions_rx++; 3360 break; 3361 } 3362 } 3363 /* Add set meta action and end action for the Rx flow. */ 3364 tag_action = actions_rx; 3365 tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3366 actions_rx++; 3367 rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action)); 3368 actions_rx++; 3369 set_tag = (void *)actions_rx; 3370 set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL); 3371 assert(set_tag->id > REG_NONE); 3372 set_tag->data = *flow_id; 3373 tag_action->conf = set_tag; 3374 /* Create Tx item list. */ 3375 rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); 3376 addr = (void *)&pattern_tx[2]; 3377 item = pattern_tx; 3378 item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; 3379 tag_item = (void *)addr; 3380 tag_item->data = *flow_id; 3381 tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); 3382 assert(set_tag->id > REG_NONE); 3383 item->spec = tag_item; 3384 addr += sizeof(struct mlx5_rte_flow_item_tag); 3385 tag_item = (void *)addr; 3386 tag_item->data = UINT32_MAX; 3387 tag_item->id = UINT16_MAX; 3388 item->mask = tag_item; 3389 addr += sizeof(struct mlx5_rte_flow_item_tag); 3390 item->last = NULL; 3391 item++; 3392 item->type = RTE_FLOW_ITEM_TYPE_END; 3393 return 0; 3394 } 3395 3396 /** 3397 * The last stage of splitting chain, just creates the subflow 3398 * without any modification. 3399 * 3400 * @param dev 3401 * Pointer to Ethernet device. 3402 * @param[in] flow 3403 * Parent flow structure pointer. 3404 * @param[in, out] sub_flow 3405 * Pointer to return the created subflow, may be NULL. 3406 * @param[in] attr 3407 * Flow rule attributes. 3408 * @param[in] items 3409 * Pattern specification (list terminated by the END pattern item). 3410 * @param[in] actions 3411 * Associated actions (list terminated by the END action). 3412 * @param[in] external 3413 * This flow rule is created by request external to PMD. 3414 * @param[out] error 3415 * Perform verbose error reporting if not NULL. 3416 * @return 3417 * 0 on success, negative value otherwise 3418 */ 3419 static int 3420 flow_create_split_inner(struct rte_eth_dev *dev, 3421 struct rte_flow *flow, 3422 struct mlx5_flow **sub_flow, 3423 const struct rte_flow_attr *attr, 3424 const struct rte_flow_item items[], 3425 const struct rte_flow_action actions[], 3426 bool external, struct rte_flow_error *error) 3427 { 3428 struct mlx5_flow *dev_flow; 3429 3430 dev_flow = flow_drv_prepare(flow, attr, items, actions, error); 3431 if (!dev_flow) 3432 return -rte_errno; 3433 dev_flow->flow = flow; 3434 dev_flow->external = external; 3435 /* Subflow object was created, we must include one in the list. */ 3436 LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); 3437 if (sub_flow) 3438 *sub_flow = dev_flow; 3439 return flow_drv_translate(dev, dev_flow, attr, items, actions, error); 3440 } 3441 3442 /** 3443 * Split the meter flow. 3444 * 3445 * As meter flow will split to three sub flow, other than meter 3446 * action, the other actions make sense to only meter accepts 3447 * the packet. If it need to be dropped, no other additional 3448 * actions should be take. 3449 * 3450 * One kind of special action which decapsulates the L3 tunnel 3451 * header will be in the prefix sub flow, as not to take the 3452 * L3 tunnel header into account. 3453 * 3454 * @param dev 3455 * Pointer to Ethernet device. 3456 * @param[in] actions 3457 * Associated actions (list terminated by the END action). 3458 * @param[out] actions_sfx 3459 * Suffix flow actions. 3460 * @param[out] actions_pre 3461 * Prefix flow actions. 3462 * @param[out] pattern_sfx 3463 * The pattern items for the suffix flow. 3464 * @param[out] tag_sfx 3465 * Pointer to suffix flow tag. 3466 * 3467 * @return 3468 * 0 on success. 3469 */ 3470 static int 3471 flow_meter_split_prep(struct rte_eth_dev *dev, 3472 const struct rte_flow_action actions[], 3473 struct rte_flow_action actions_sfx[], 3474 struct rte_flow_action actions_pre[]) 3475 { 3476 struct rte_flow_action *tag_action; 3477 struct mlx5_rte_flow_action_set_tag *set_tag; 3478 struct rte_flow_error error; 3479 const struct rte_flow_action_raw_encap *raw_encap; 3480 const struct rte_flow_action_raw_decap *raw_decap; 3481 uint32_t tag_id; 3482 3483 /* Add the extra tag action first. */ 3484 tag_action = actions_pre; 3485 tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3486 actions_pre++; 3487 /* Prepare the actions for prefix and suffix flow. */ 3488 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 3489 switch (actions->type) { 3490 case RTE_FLOW_ACTION_TYPE_METER: 3491 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 3492 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 3493 memcpy(actions_pre, actions, 3494 sizeof(struct rte_flow_action)); 3495 actions_pre++; 3496 break; 3497 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 3498 raw_encap = actions->conf; 3499 if (raw_encap->size > 3500 (sizeof(struct rte_flow_item_eth) + 3501 sizeof(struct rte_flow_item_ipv4))) { 3502 memcpy(actions_sfx, actions, 3503 sizeof(struct rte_flow_action)); 3504 actions_sfx++; 3505 } else { 3506 rte_memcpy(actions_pre, actions, 3507 sizeof(struct rte_flow_action)); 3508 actions_pre++; 3509 } 3510 break; 3511 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 3512 raw_decap = actions->conf; 3513 /* Size 0 decap means 50 bytes as vxlan decap. */ 3514 if (raw_decap->size && (raw_decap->size < 3515 (sizeof(struct rte_flow_item_eth) + 3516 sizeof(struct rte_flow_item_ipv4)))) { 3517 memcpy(actions_sfx, actions, 3518 sizeof(struct rte_flow_action)); 3519 actions_sfx++; 3520 } else { 3521 rte_memcpy(actions_pre, actions, 3522 sizeof(struct rte_flow_action)); 3523 actions_pre++; 3524 } 3525 break; 3526 default: 3527 memcpy(actions_sfx, actions, 3528 sizeof(struct rte_flow_action)); 3529 actions_sfx++; 3530 break; 3531 } 3532 } 3533 /* Add end action to the actions. */ 3534 actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; 3535 actions_pre->type = RTE_FLOW_ACTION_TYPE_END; 3536 actions_pre++; 3537 /* Set the tag. */ 3538 set_tag = (void *)actions_pre; 3539 set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error); 3540 /* 3541 * Get the id from the qrss_pool to make qrss share the id with meter. 3542 */ 3543 tag_id = flow_qrss_get_id(dev); 3544 set_tag->data = tag_id << MLX5_MTR_COLOR_BITS; 3545 tag_action->conf = set_tag; 3546 return tag_id; 3547 } 3548 3549 /** 3550 * Split action list having QUEUE/RSS for metadata register copy. 3551 * 3552 * Once Q/RSS action is detected in user's action list, the flow action 3553 * should be split in order to copy metadata registers, which will happen in 3554 * RX_CP_TBL like, 3555 * - CQE->flow_tag := reg_c[1] (MARK) 3556 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) 3557 * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL. 3558 * This is because the last action of each flow must be a terminal action 3559 * (QUEUE, RSS or DROP). 3560 * 3561 * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is 3562 * stored and kept in the mlx5_flow structure per each sub_flow. 3563 * 3564 * The Q/RSS action is replaced with, 3565 * - SET_TAG, setting the allocated flow ID to reg_c[2]. 3566 * And the following JUMP action is added at the end, 3567 * - JUMP, to RX_CP_TBL. 3568 * 3569 * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by 3570 * flow_create_split_metadata() routine. The flow will look like, 3571 * - If flow ID matches (reg_c[2]), perform Q/RSS. 3572 * 3573 * @param dev 3574 * Pointer to Ethernet device. 3575 * @param[out] split_actions 3576 * Pointer to store split actions to jump to CP_TBL. 3577 * @param[in] actions 3578 * Pointer to the list of original flow actions. 3579 * @param[in] qrss 3580 * Pointer to the Q/RSS action. 3581 * @param[in] actions_n 3582 * Number of original actions. 3583 * @param[out] error 3584 * Perform verbose error reporting if not NULL. 3585 * 3586 * @return 3587 * non-zero unique flow_id on success, otherwise 0 and 3588 * error/rte_error are set. 3589 */ 3590 static uint32_t 3591 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, 3592 struct rte_flow_action *split_actions, 3593 const struct rte_flow_action *actions, 3594 const struct rte_flow_action *qrss, 3595 int actions_n, struct rte_flow_error *error) 3596 { 3597 struct mlx5_rte_flow_action_set_tag *set_tag; 3598 struct rte_flow_action_jump *jump; 3599 const int qrss_idx = qrss - actions; 3600 uint32_t flow_id = 0; 3601 int ret = 0; 3602 3603 /* 3604 * Given actions will be split 3605 * - Replace QUEUE/RSS action with SET_TAG to set flow ID. 3606 * - Add jump to mreg CP_TBL. 3607 * As a result, there will be one more action. 3608 */ 3609 ++actions_n; 3610 memcpy(split_actions, actions, sizeof(*split_actions) * actions_n); 3611 set_tag = (void *)(split_actions + actions_n); 3612 /* 3613 * If tag action is not set to void(it means we are not the meter 3614 * suffix flow), add the tag action. Since meter suffix flow already 3615 * has the tag added. 3616 */ 3617 if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) { 3618 /* 3619 * Allocate the new subflow ID. This one is unique within 3620 * device and not shared with representors. Otherwise, 3621 * we would have to resolve multi-thread access synch 3622 * issue. Each flow on the shared device is appended 3623 * with source vport identifier, so the resulting 3624 * flows will be unique in the shared (by master and 3625 * representors) domain even if they have coinciding 3626 * IDs. 3627 */ 3628 flow_id = flow_qrss_get_id(dev); 3629 if (!flow_id) 3630 return rte_flow_error_set(error, ENOMEM, 3631 RTE_FLOW_ERROR_TYPE_ACTION, 3632 NULL, "can't allocate id " 3633 "for split Q/RSS subflow"); 3634 /* Internal SET_TAG action to set flow ID. */ 3635 *set_tag = (struct mlx5_rte_flow_action_set_tag){ 3636 .data = flow_id, 3637 }; 3638 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); 3639 if (ret < 0) 3640 return ret; 3641 set_tag->id = ret; 3642 /* Construct new actions array. */ 3643 /* Replace QUEUE/RSS action. */ 3644 split_actions[qrss_idx] = (struct rte_flow_action){ 3645 .type = MLX5_RTE_FLOW_ACTION_TYPE_TAG, 3646 .conf = set_tag, 3647 }; 3648 } 3649 /* JUMP action to jump to mreg copy table (CP_TBL). */ 3650 jump = (void *)(set_tag + 1); 3651 *jump = (struct rte_flow_action_jump){ 3652 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 3653 }; 3654 split_actions[actions_n - 2] = (struct rte_flow_action){ 3655 .type = RTE_FLOW_ACTION_TYPE_JUMP, 3656 .conf = jump, 3657 }; 3658 split_actions[actions_n - 1] = (struct rte_flow_action){ 3659 .type = RTE_FLOW_ACTION_TYPE_END, 3660 }; 3661 return flow_id; 3662 } 3663 3664 /** 3665 * Extend the given action list for Tx metadata copy. 3666 * 3667 * Copy the given action list to the ext_actions and add flow metadata register 3668 * copy action in order to copy reg_a set by WQE to reg_c[0]. 3669 * 3670 * @param[out] ext_actions 3671 * Pointer to the extended action list. 3672 * @param[in] actions 3673 * Pointer to the list of actions. 3674 * @param[in] actions_n 3675 * Number of actions in the list. 3676 * @param[out] error 3677 * Perform verbose error reporting if not NULL. 3678 * 3679 * @return 3680 * 0 on success, negative value otherwise 3681 */ 3682 static int 3683 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, 3684 struct rte_flow_action *ext_actions, 3685 const struct rte_flow_action *actions, 3686 int actions_n, struct rte_flow_error *error) 3687 { 3688 struct mlx5_flow_action_copy_mreg *cp_mreg = 3689 (struct mlx5_flow_action_copy_mreg *) 3690 (ext_actions + actions_n + 1); 3691 int ret; 3692 3693 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); 3694 if (ret < 0) 3695 return ret; 3696 cp_mreg->dst = ret; 3697 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error); 3698 if (ret < 0) 3699 return ret; 3700 cp_mreg->src = ret; 3701 memcpy(ext_actions, actions, 3702 sizeof(*ext_actions) * actions_n); 3703 ext_actions[actions_n - 1] = (struct rte_flow_action){ 3704 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 3705 .conf = cp_mreg, 3706 }; 3707 ext_actions[actions_n] = (struct rte_flow_action){ 3708 .type = RTE_FLOW_ACTION_TYPE_END, 3709 }; 3710 return 0; 3711 } 3712 3713 /** 3714 * The splitting for metadata feature. 3715 * 3716 * - Q/RSS action on NIC Rx should be split in order to pass by 3717 * the mreg copy table (RX_CP_TBL) and then it jumps to the 3718 * action table (RX_ACT_TBL) which has the split Q/RSS action. 3719 * 3720 * - All the actions on NIC Tx should have a mreg copy action to 3721 * copy reg_a from WQE to reg_c[0]. 3722 * 3723 * @param dev 3724 * Pointer to Ethernet device. 3725 * @param[in] flow 3726 * Parent flow structure pointer. 3727 * @param[in] attr 3728 * Flow rule attributes. 3729 * @param[in] items 3730 * Pattern specification (list terminated by the END pattern item). 3731 * @param[in] actions 3732 * Associated actions (list terminated by the END action). 3733 * @param[in] external 3734 * This flow rule is created by request external to PMD. 3735 * @param[out] error 3736 * Perform verbose error reporting if not NULL. 3737 * @return 3738 * 0 on success, negative value otherwise 3739 */ 3740 static int 3741 flow_create_split_metadata(struct rte_eth_dev *dev, 3742 struct rte_flow *flow, 3743 const struct rte_flow_attr *attr, 3744 const struct rte_flow_item items[], 3745 const struct rte_flow_action actions[], 3746 bool external, struct rte_flow_error *error) 3747 { 3748 struct mlx5_priv *priv = dev->data->dev_private; 3749 struct mlx5_dev_config *config = &priv->config; 3750 const struct rte_flow_action *qrss = NULL; 3751 struct rte_flow_action *ext_actions = NULL; 3752 struct mlx5_flow *dev_flow = NULL; 3753 uint32_t qrss_id = 0; 3754 int mtr_sfx = 0; 3755 size_t act_size; 3756 int actions_n; 3757 int ret; 3758 3759 /* Check whether extensive metadata feature is engaged. */ 3760 if (!config->dv_flow_en || 3761 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || 3762 !mlx5_flow_ext_mreg_supported(dev)) 3763 return flow_create_split_inner(dev, flow, NULL, attr, items, 3764 actions, external, error); 3765 actions_n = flow_parse_qrss_action(actions, &qrss); 3766 if (qrss) { 3767 /* Exclude hairpin flows from splitting. */ 3768 if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) { 3769 const struct rte_flow_action_queue *queue; 3770 3771 queue = qrss->conf; 3772 if (mlx5_rxq_get_type(dev, queue->index) == 3773 MLX5_RXQ_TYPE_HAIRPIN) 3774 qrss = NULL; 3775 } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) { 3776 const struct rte_flow_action_rss *rss; 3777 3778 rss = qrss->conf; 3779 if (mlx5_rxq_get_type(dev, rss->queue[0]) == 3780 MLX5_RXQ_TYPE_HAIRPIN) 3781 qrss = NULL; 3782 } 3783 } 3784 if (qrss) { 3785 /* Check if it is in meter suffix table. */ 3786 mtr_sfx = attr->group == (attr->transfer ? 3787 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : 3788 MLX5_FLOW_TABLE_LEVEL_SUFFIX); 3789 /* 3790 * Q/RSS action on NIC Rx should be split in order to pass by 3791 * the mreg copy table (RX_CP_TBL) and then it jumps to the 3792 * action table (RX_ACT_TBL) which has the split Q/RSS action. 3793 */ 3794 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 3795 sizeof(struct rte_flow_action_set_tag) + 3796 sizeof(struct rte_flow_action_jump); 3797 ext_actions = rte_zmalloc(__func__, act_size, 0); 3798 if (!ext_actions) 3799 return rte_flow_error_set(error, ENOMEM, 3800 RTE_FLOW_ERROR_TYPE_ACTION, 3801 NULL, "no memory to split " 3802 "metadata flow"); 3803 /* 3804 * If we are the suffix flow of meter, tag already exist. 3805 * Set the tag action to void. 3806 */ 3807 if (mtr_sfx) 3808 ext_actions[qrss - actions].type = 3809 RTE_FLOW_ACTION_TYPE_VOID; 3810 else 3811 ext_actions[qrss - actions].type = 3812 MLX5_RTE_FLOW_ACTION_TYPE_TAG; 3813 /* 3814 * Create the new actions list with removed Q/RSS action 3815 * and appended set tag and jump to register copy table 3816 * (RX_CP_TBL). We should preallocate unique tag ID here 3817 * in advance, because it is needed for set tag action. 3818 */ 3819 qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions, 3820 qrss, actions_n, error); 3821 if (!mtr_sfx && !qrss_id) { 3822 ret = -rte_errno; 3823 goto exit; 3824 } 3825 } else if (attr->egress && !attr->transfer) { 3826 /* 3827 * All the actions on NIC Tx should have a metadata register 3828 * copy action to copy reg_a from WQE to reg_c[meta] 3829 */ 3830 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + 3831 sizeof(struct mlx5_flow_action_copy_mreg); 3832 ext_actions = rte_zmalloc(__func__, act_size, 0); 3833 if (!ext_actions) 3834 return rte_flow_error_set(error, ENOMEM, 3835 RTE_FLOW_ERROR_TYPE_ACTION, 3836 NULL, "no memory to split " 3837 "metadata flow"); 3838 /* Create the action list appended with copy register. */ 3839 ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions, 3840 actions_n, error); 3841 if (ret < 0) 3842 goto exit; 3843 } 3844 /* Add the unmodified original or prefix subflow. */ 3845 ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items, 3846 ext_actions ? ext_actions : actions, 3847 external, error); 3848 if (ret < 0) 3849 goto exit; 3850 assert(dev_flow); 3851 if (qrss) { 3852 const struct rte_flow_attr q_attr = { 3853 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 3854 .ingress = 1, 3855 }; 3856 /* Internal PMD action to set register. */ 3857 struct mlx5_rte_flow_item_tag q_tag_spec = { 3858 .data = qrss_id, 3859 .id = 0, 3860 }; 3861 struct rte_flow_item q_items[] = { 3862 { 3863 .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, 3864 .spec = &q_tag_spec, 3865 .last = NULL, 3866 .mask = NULL, 3867 }, 3868 { 3869 .type = RTE_FLOW_ITEM_TYPE_END, 3870 }, 3871 }; 3872 struct rte_flow_action q_actions[] = { 3873 { 3874 .type = qrss->type, 3875 .conf = qrss->conf, 3876 }, 3877 { 3878 .type = RTE_FLOW_ACTION_TYPE_END, 3879 }, 3880 }; 3881 uint64_t hash_fields = dev_flow->hash_fields; 3882 3883 /* 3884 * Configure the tag item only if there is no meter subflow. 3885 * Since tag is already marked in the meter suffix subflow 3886 * we can just use the meter suffix items as is. 3887 */ 3888 if (qrss_id) { 3889 /* Not meter subflow. */ 3890 assert(!mtr_sfx); 3891 /* 3892 * Put unique id in prefix flow due to it is destroyed 3893 * after suffix flow and id will be freed after there 3894 * is no actual flows with this id and identifier 3895 * reallocation becomes possible (for example, for 3896 * other flows in other threads). 3897 */ 3898 dev_flow->qrss_id = qrss_id; 3899 qrss_id = 0; 3900 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, 3901 error); 3902 if (ret < 0) 3903 goto exit; 3904 q_tag_spec.id = ret; 3905 } 3906 dev_flow = NULL; 3907 /* Add suffix subflow to execute Q/RSS. */ 3908 ret = flow_create_split_inner(dev, flow, &dev_flow, 3909 &q_attr, mtr_sfx ? items : 3910 q_items, q_actions, 3911 external, error); 3912 if (ret < 0) 3913 goto exit; 3914 assert(dev_flow); 3915 dev_flow->hash_fields = hash_fields; 3916 } 3917 3918 exit: 3919 /* 3920 * We do not destroy the partially created sub_flows in case of error. 3921 * These ones are included into parent flow list and will be destroyed 3922 * by flow_drv_destroy. 3923 */ 3924 flow_qrss_free_id(dev, qrss_id); 3925 rte_free(ext_actions); 3926 return ret; 3927 } 3928 3929 /** 3930 * The splitting for meter feature. 3931 * 3932 * - The meter flow will be split to two flows as prefix and 3933 * suffix flow. The packets make sense only it pass the prefix 3934 * meter action. 3935 * 3936 * - Reg_C_5 is used for the packet to match betweend prefix and 3937 * suffix flow. 3938 * 3939 * @param dev 3940 * Pointer to Ethernet device. 3941 * @param[in] flow 3942 * Parent flow structure pointer. 3943 * @param[in] attr 3944 * Flow rule attributes. 3945 * @param[in] items 3946 * Pattern specification (list terminated by the END pattern item). 3947 * @param[in] actions 3948 * Associated actions (list terminated by the END action). 3949 * @param[in] external 3950 * This flow rule is created by request external to PMD. 3951 * @param[out] error 3952 * Perform verbose error reporting if not NULL. 3953 * @return 3954 * 0 on success, negative value otherwise 3955 */ 3956 static int 3957 flow_create_split_meter(struct rte_eth_dev *dev, 3958 struct rte_flow *flow, 3959 const struct rte_flow_attr *attr, 3960 const struct rte_flow_item items[], 3961 const struct rte_flow_action actions[], 3962 bool external, struct rte_flow_error *error) 3963 { 3964 struct mlx5_priv *priv = dev->data->dev_private; 3965 struct rte_flow_action *sfx_actions = NULL; 3966 struct rte_flow_action *pre_actions = NULL; 3967 struct rte_flow_item *sfx_items = NULL; 3968 const struct rte_flow_item *sfx_port_id_item; 3969 struct mlx5_flow *dev_flow = NULL; 3970 struct rte_flow_attr sfx_attr = *attr; 3971 uint32_t mtr = 0; 3972 uint32_t mtr_tag_id = 0; 3973 size_t act_size; 3974 size_t item_size; 3975 int actions_n = 0; 3976 int ret; 3977 3978 if (priv->mtr_en) 3979 actions_n = flow_check_meter_action(actions, &mtr); 3980 if (mtr) { 3981 struct mlx5_rte_flow_item_tag *tag_spec; 3982 struct mlx5_rte_flow_item_tag *tag_mask; 3983 /* The five prefix actions: meter, decap, encap, tag, end. */ 3984 act_size = sizeof(struct rte_flow_action) * (actions_n + 5) + 3985 sizeof(struct rte_flow_action_set_tag); 3986 /* tag, end. */ 3987 #define METER_SUFFIX_ITEM 3 3988 item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM + 3989 sizeof(struct mlx5_rte_flow_item_tag) * 2; 3990 sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0); 3991 if (!sfx_actions) 3992 return rte_flow_error_set(error, ENOMEM, 3993 RTE_FLOW_ERROR_TYPE_ACTION, 3994 NULL, "no memory to split " 3995 "meter flow"); 3996 pre_actions = sfx_actions + actions_n; 3997 mtr_tag_id = flow_meter_split_prep(dev, actions, sfx_actions, 3998 pre_actions); 3999 if (!mtr_tag_id) { 4000 ret = -rte_errno; 4001 goto exit; 4002 } 4003 /* Add the prefix subflow. */ 4004 ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items, 4005 pre_actions, external, error); 4006 if (ret) { 4007 ret = -rte_errno; 4008 goto exit; 4009 } 4010 dev_flow->mtr_flow_id = mtr_tag_id; 4011 /* Prepare the suffix flow match pattern. */ 4012 sfx_items = (struct rte_flow_item *)((char *)sfx_actions + 4013 act_size); 4014 tag_spec = (struct mlx5_rte_flow_item_tag *)(sfx_items + 4015 METER_SUFFIX_ITEM); 4016 tag_spec->data = dev_flow->mtr_flow_id << MLX5_MTR_COLOR_BITS; 4017 tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, 4018 error); 4019 tag_mask = tag_spec + 1; 4020 tag_mask->data = 0xffffff00; 4021 sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; 4022 sfx_items->spec = tag_spec; 4023 sfx_items->last = NULL; 4024 sfx_items->mask = tag_mask; 4025 sfx_items++; 4026 sfx_port_id_item = find_port_id_item(items); 4027 if (sfx_port_id_item) { 4028 memcpy(sfx_items, sfx_port_id_item, 4029 sizeof(*sfx_items)); 4030 sfx_items++; 4031 } 4032 sfx_items->type = RTE_FLOW_ITEM_TYPE_END; 4033 sfx_items -= sfx_port_id_item ? 2 : 1; 4034 /* Setting the sfx group atrr. */ 4035 sfx_attr.group = sfx_attr.transfer ? 4036 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : 4037 MLX5_FLOW_TABLE_LEVEL_SUFFIX; 4038 } 4039 /* Add the prefix subflow. */ 4040 ret = flow_create_split_metadata(dev, flow, &sfx_attr, 4041 sfx_items ? sfx_items : items, 4042 sfx_actions ? sfx_actions : actions, 4043 external, error); 4044 exit: 4045 if (sfx_actions) 4046 rte_free(sfx_actions); 4047 return ret; 4048 } 4049 4050 /** 4051 * Split the flow to subflow set. The splitters might be linked 4052 * in the chain, like this: 4053 * flow_create_split_outer() calls: 4054 * flow_create_split_meter() calls: 4055 * flow_create_split_metadata(meter_subflow_0) calls: 4056 * flow_create_split_inner(metadata_subflow_0) 4057 * flow_create_split_inner(metadata_subflow_1) 4058 * flow_create_split_inner(metadata_subflow_2) 4059 * flow_create_split_metadata(meter_subflow_1) calls: 4060 * flow_create_split_inner(metadata_subflow_0) 4061 * flow_create_split_inner(metadata_subflow_1) 4062 * flow_create_split_inner(metadata_subflow_2) 4063 * 4064 * This provide flexible way to add new levels of flow splitting. 4065 * The all of successfully created subflows are included to the 4066 * parent flow dev_flow list. 4067 * 4068 * @param dev 4069 * Pointer to Ethernet device. 4070 * @param[in] flow 4071 * Parent flow structure pointer. 4072 * @param[in] attr 4073 * Flow rule attributes. 4074 * @param[in] items 4075 * Pattern specification (list terminated by the END pattern item). 4076 * @param[in] actions 4077 * Associated actions (list terminated by the END action). 4078 * @param[in] external 4079 * This flow rule is created by request external to PMD. 4080 * @param[out] error 4081 * Perform verbose error reporting if not NULL. 4082 * @return 4083 * 0 on success, negative value otherwise 4084 */ 4085 static int 4086 flow_create_split_outer(struct rte_eth_dev *dev, 4087 struct rte_flow *flow, 4088 const struct rte_flow_attr *attr, 4089 const struct rte_flow_item items[], 4090 const struct rte_flow_action actions[], 4091 bool external, struct rte_flow_error *error) 4092 { 4093 int ret; 4094 4095 ret = flow_create_split_meter(dev, flow, attr, items, 4096 actions, external, error); 4097 assert(ret <= 0); 4098 return ret; 4099 } 4100 4101 /** 4102 * Create a flow and add it to @p list. 4103 * 4104 * @param dev 4105 * Pointer to Ethernet device. 4106 * @param list 4107 * Pointer to a TAILQ flow list. If this parameter NULL, 4108 * no list insertion occurred, flow is just created, 4109 * this is caller's responsibility to track the 4110 * created flow. 4111 * @param[in] attr 4112 * Flow rule attributes. 4113 * @param[in] items 4114 * Pattern specification (list terminated by the END pattern item). 4115 * @param[in] actions 4116 * Associated actions (list terminated by the END action). 4117 * @param[in] external 4118 * This flow rule is created by request external to PMD. 4119 * @param[out] error 4120 * Perform verbose error reporting if not NULL. 4121 * 4122 * @return 4123 * A flow on success, NULL otherwise and rte_errno is set. 4124 */ 4125 static struct rte_flow * 4126 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, 4127 const struct rte_flow_attr *attr, 4128 const struct rte_flow_item items[], 4129 const struct rte_flow_action actions[], 4130 bool external, struct rte_flow_error *error) 4131 { 4132 struct mlx5_priv *priv = dev->data->dev_private; 4133 struct rte_flow *flow = NULL; 4134 struct mlx5_flow *dev_flow; 4135 const struct rte_flow_action_rss *rss; 4136 union { 4137 struct rte_flow_expand_rss buf; 4138 uint8_t buffer[2048]; 4139 } expand_buffer; 4140 union { 4141 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 4142 uint8_t buffer[2048]; 4143 } actions_rx; 4144 union { 4145 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; 4146 uint8_t buffer[2048]; 4147 } actions_hairpin_tx; 4148 union { 4149 struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS]; 4150 uint8_t buffer[2048]; 4151 } items_tx; 4152 struct rte_flow_expand_rss *buf = &expand_buffer.buf; 4153 const struct rte_flow_action *p_actions_rx = actions; 4154 int ret; 4155 uint32_t i; 4156 uint32_t flow_size; 4157 int hairpin_flow = 0; 4158 uint32_t hairpin_id = 0; 4159 struct rte_flow_attr attr_tx = { .priority = 0 }; 4160 4161 hairpin_flow = flow_check_hairpin_split(dev, attr, actions); 4162 if (hairpin_flow > 0) { 4163 if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { 4164 rte_errno = EINVAL; 4165 return NULL; 4166 } 4167 flow_hairpin_split(dev, actions, actions_rx.actions, 4168 actions_hairpin_tx.actions, items_tx.items, 4169 &hairpin_id); 4170 p_actions_rx = actions_rx.actions; 4171 } 4172 ret = flow_drv_validate(dev, attr, items, p_actions_rx, external, 4173 error); 4174 if (ret < 0) 4175 goto error_before_flow; 4176 flow_size = sizeof(struct rte_flow); 4177 rss = flow_get_rss_action(p_actions_rx); 4178 if (rss) 4179 flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), 4180 sizeof(void *)); 4181 else 4182 flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); 4183 flow = rte_calloc(__func__, 1, flow_size, 0); 4184 if (!flow) { 4185 rte_errno = ENOMEM; 4186 goto error_before_flow; 4187 } 4188 flow->drv_type = flow_get_drv_type(dev, attr); 4189 if (hairpin_id != 0) 4190 flow->hairpin_flow_id = hairpin_id; 4191 assert(flow->drv_type > MLX5_FLOW_TYPE_MIN && 4192 flow->drv_type < MLX5_FLOW_TYPE_MAX); 4193 flow->rss.queue = (void *)(flow + 1); 4194 if (rss) { 4195 /* 4196 * The following information is required by 4197 * mlx5_flow_hashfields_adjust() in advance. 4198 */ 4199 flow->rss.level = rss->level; 4200 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ 4201 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; 4202 } 4203 LIST_INIT(&flow->dev_flows); 4204 if (rss && rss->types) { 4205 unsigned int graph_root; 4206 4207 graph_root = find_graph_root(items, rss->level); 4208 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), 4209 items, rss->types, 4210 mlx5_support_expansion, 4211 graph_root); 4212 assert(ret > 0 && 4213 (unsigned int)ret < sizeof(expand_buffer.buffer)); 4214 } else { 4215 buf->entries = 1; 4216 buf->entry[0].pattern = (void *)(uintptr_t)items; 4217 } 4218 for (i = 0; i < buf->entries; ++i) { 4219 /* 4220 * The splitter may create multiple dev_flows, 4221 * depending on configuration. In the simplest 4222 * case it just creates unmodified original flow. 4223 */ 4224 ret = flow_create_split_outer(dev, flow, attr, 4225 buf->entry[i].pattern, 4226 p_actions_rx, external, 4227 error); 4228 if (ret < 0) 4229 goto error; 4230 } 4231 /* Create the tx flow. */ 4232 if (hairpin_flow) { 4233 attr_tx.group = MLX5_HAIRPIN_TX_TABLE; 4234 attr_tx.ingress = 0; 4235 attr_tx.egress = 1; 4236 dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items, 4237 actions_hairpin_tx.actions, error); 4238 if (!dev_flow) 4239 goto error; 4240 dev_flow->flow = flow; 4241 dev_flow->external = 0; 4242 LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); 4243 ret = flow_drv_translate(dev, dev_flow, &attr_tx, 4244 items_tx.items, 4245 actions_hairpin_tx.actions, error); 4246 if (ret < 0) 4247 goto error; 4248 } 4249 /* 4250 * Update the metadata register copy table. If extensive 4251 * metadata feature is enabled and registers are supported 4252 * we might create the extra rte_flow for each unique 4253 * MARK/FLAG action ID. 4254 * 4255 * The table is updated for ingress Flows only, because 4256 * the egress Flows belong to the different device and 4257 * copy table should be updated in peer NIC Rx domain. 4258 */ 4259 if (attr->ingress && 4260 (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) { 4261 ret = flow_mreg_update_copy_table(dev, flow, actions, error); 4262 if (ret) 4263 goto error; 4264 } 4265 if (dev->data->dev_started) { 4266 ret = flow_drv_apply(dev, flow, error); 4267 if (ret < 0) 4268 goto error; 4269 } 4270 if (list) 4271 TAILQ_INSERT_TAIL(list, flow, next); 4272 flow_rxq_flags_set(dev, flow); 4273 return flow; 4274 error_before_flow: 4275 if (hairpin_id) 4276 mlx5_flow_id_release(priv->sh->flow_id_pool, 4277 hairpin_id); 4278 return NULL; 4279 error: 4280 assert(flow); 4281 flow_mreg_del_copy_action(dev, flow); 4282 ret = rte_errno; /* Save rte_errno before cleanup. */ 4283 if (flow->hairpin_flow_id) 4284 mlx5_flow_id_release(priv->sh->flow_id_pool, 4285 flow->hairpin_flow_id); 4286 assert(flow); 4287 flow_drv_destroy(dev, flow); 4288 rte_free(flow); 4289 rte_errno = ret; /* Restore rte_errno. */ 4290 return NULL; 4291 } 4292 4293 /** 4294 * Create a dedicated flow rule on e-switch table 0 (root table), to direct all 4295 * incoming packets to table 1. 4296 * 4297 * Other flow rules, requested for group n, will be created in 4298 * e-switch table n+1. 4299 * Jump action to e-switch group n will be created to group n+1. 4300 * 4301 * Used when working in switchdev mode, to utilise advantages of table 1 4302 * and above. 4303 * 4304 * @param dev 4305 * Pointer to Ethernet device. 4306 * 4307 * @return 4308 * Pointer to flow on success, NULL otherwise and rte_errno is set. 4309 */ 4310 struct rte_flow * 4311 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) 4312 { 4313 const struct rte_flow_attr attr = { 4314 .group = 0, 4315 .priority = 0, 4316 .ingress = 1, 4317 .egress = 0, 4318 .transfer = 1, 4319 }; 4320 const struct rte_flow_item pattern = { 4321 .type = RTE_FLOW_ITEM_TYPE_END, 4322 }; 4323 struct rte_flow_action_jump jump = { 4324 .group = 1, 4325 }; 4326 const struct rte_flow_action actions[] = { 4327 { 4328 .type = RTE_FLOW_ACTION_TYPE_JUMP, 4329 .conf = &jump, 4330 }, 4331 { 4332 .type = RTE_FLOW_ACTION_TYPE_END, 4333 }, 4334 }; 4335 struct mlx5_priv *priv = dev->data->dev_private; 4336 struct rte_flow_error error; 4337 4338 return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern, 4339 actions, false, &error); 4340 } 4341 4342 /** 4343 * Create a flow. 4344 * 4345 * @see rte_flow_create() 4346 * @see rte_flow_ops 4347 */ 4348 struct rte_flow * 4349 mlx5_flow_create(struct rte_eth_dev *dev, 4350 const struct rte_flow_attr *attr, 4351 const struct rte_flow_item items[], 4352 const struct rte_flow_action actions[], 4353 struct rte_flow_error *error) 4354 { 4355 struct mlx5_priv *priv = dev->data->dev_private; 4356 4357 return flow_list_create(dev, &priv->flows, 4358 attr, items, actions, true, error); 4359 } 4360 4361 /** 4362 * Destroy a flow in a list. 4363 * 4364 * @param dev 4365 * Pointer to Ethernet device. 4366 * @param list 4367 * Pointer to a TAILQ flow list. If this parameter NULL, 4368 * there is no flow removal from the list. 4369 * @param[in] flow 4370 * Flow to destroy. 4371 */ 4372 static void 4373 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, 4374 struct rte_flow *flow) 4375 { 4376 struct mlx5_priv *priv = dev->data->dev_private; 4377 4378 /* 4379 * Update RX queue flags only if port is started, otherwise it is 4380 * already clean. 4381 */ 4382 if (dev->data->dev_started) 4383 flow_rxq_flags_trim(dev, flow); 4384 if (flow->hairpin_flow_id) 4385 mlx5_flow_id_release(priv->sh->flow_id_pool, 4386 flow->hairpin_flow_id); 4387 flow_drv_destroy(dev, flow); 4388 if (list) 4389 TAILQ_REMOVE(list, flow, next); 4390 flow_mreg_del_copy_action(dev, flow); 4391 rte_free(flow->fdir); 4392 rte_free(flow); 4393 } 4394 4395 /** 4396 * Destroy all flows. 4397 * 4398 * @param dev 4399 * Pointer to Ethernet device. 4400 * @param list 4401 * Pointer to a TAILQ flow list. 4402 */ 4403 void 4404 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) 4405 { 4406 while (!TAILQ_EMPTY(list)) { 4407 struct rte_flow *flow; 4408 4409 flow = TAILQ_FIRST(list); 4410 flow_list_destroy(dev, list, flow); 4411 } 4412 } 4413 4414 /** 4415 * Remove all flows. 4416 * 4417 * @param dev 4418 * Pointer to Ethernet device. 4419 * @param list 4420 * Pointer to a TAILQ flow list. 4421 */ 4422 void 4423 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) 4424 { 4425 struct rte_flow *flow; 4426 4427 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { 4428 flow_drv_remove(dev, flow); 4429 flow_mreg_stop_copy_action(dev, flow); 4430 } 4431 flow_mreg_del_default_copy_action(dev); 4432 flow_rxq_flags_clear(dev); 4433 } 4434 4435 /** 4436 * Add all flows. 4437 * 4438 * @param dev 4439 * Pointer to Ethernet device. 4440 * @param list 4441 * Pointer to a TAILQ flow list. 4442 * 4443 * @return 4444 * 0 on success, a negative errno value otherwise and rte_errno is set. 4445 */ 4446 int 4447 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) 4448 { 4449 struct rte_flow *flow; 4450 struct rte_flow_error error; 4451 int ret = 0; 4452 4453 /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ 4454 ret = flow_mreg_add_default_copy_action(dev, &error); 4455 if (ret < 0) 4456 return -rte_errno; 4457 /* Apply Flows created by application. */ 4458 TAILQ_FOREACH(flow, list, next) { 4459 ret = flow_mreg_start_copy_action(dev, flow); 4460 if (ret < 0) 4461 goto error; 4462 ret = flow_drv_apply(dev, flow, &error); 4463 if (ret < 0) 4464 goto error; 4465 flow_rxq_flags_set(dev, flow); 4466 } 4467 return 0; 4468 error: 4469 ret = rte_errno; /* Save rte_errno before cleanup. */ 4470 mlx5_flow_stop(dev, list); 4471 rte_errno = ret; /* Restore rte_errno. */ 4472 return -rte_errno; 4473 } 4474 4475 /** 4476 * Verify the flow list is empty 4477 * 4478 * @param dev 4479 * Pointer to Ethernet device. 4480 * 4481 * @return the number of flows not released. 4482 */ 4483 int 4484 mlx5_flow_verify(struct rte_eth_dev *dev) 4485 { 4486 struct mlx5_priv *priv = dev->data->dev_private; 4487 struct rte_flow *flow; 4488 int ret = 0; 4489 4490 TAILQ_FOREACH(flow, &priv->flows, next) { 4491 DRV_LOG(DEBUG, "port %u flow %p still referenced", 4492 dev->data->port_id, (void *)flow); 4493 ++ret; 4494 } 4495 return ret; 4496 } 4497 4498 /** 4499 * Enable default hairpin egress flow. 4500 * 4501 * @param dev 4502 * Pointer to Ethernet device. 4503 * @param queue 4504 * The queue index. 4505 * 4506 * @return 4507 * 0 on success, a negative errno value otherwise and rte_errno is set. 4508 */ 4509 int 4510 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, 4511 uint32_t queue) 4512 { 4513 struct mlx5_priv *priv = dev->data->dev_private; 4514 const struct rte_flow_attr attr = { 4515 .egress = 1, 4516 .priority = 0, 4517 }; 4518 struct mlx5_rte_flow_item_tx_queue queue_spec = { 4519 .queue = queue, 4520 }; 4521 struct mlx5_rte_flow_item_tx_queue queue_mask = { 4522 .queue = UINT32_MAX, 4523 }; 4524 struct rte_flow_item items[] = { 4525 { 4526 .type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, 4527 .spec = &queue_spec, 4528 .last = NULL, 4529 .mask = &queue_mask, 4530 }, 4531 { 4532 .type = RTE_FLOW_ITEM_TYPE_END, 4533 }, 4534 }; 4535 struct rte_flow_action_jump jump = { 4536 .group = MLX5_HAIRPIN_TX_TABLE, 4537 }; 4538 struct rte_flow_action actions[2]; 4539 struct rte_flow *flow; 4540 struct rte_flow_error error; 4541 4542 actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; 4543 actions[0].conf = &jump; 4544 actions[1].type = RTE_FLOW_ACTION_TYPE_END; 4545 flow = flow_list_create(dev, &priv->ctrl_flows, 4546 &attr, items, actions, false, &error); 4547 if (!flow) { 4548 DRV_LOG(DEBUG, 4549 "Failed to create ctrl flow: rte_errno(%d)," 4550 " type(%d), message(%s)", 4551 rte_errno, error.type, 4552 error.message ? error.message : " (no stated reason)"); 4553 return -rte_errno; 4554 } 4555 return 0; 4556 } 4557 4558 /** 4559 * Enable a control flow configured from the control plane. 4560 * 4561 * @param dev 4562 * Pointer to Ethernet device. 4563 * @param eth_spec 4564 * An Ethernet flow spec to apply. 4565 * @param eth_mask 4566 * An Ethernet flow mask to apply. 4567 * @param vlan_spec 4568 * A VLAN flow spec to apply. 4569 * @param vlan_mask 4570 * A VLAN flow mask to apply. 4571 * 4572 * @return 4573 * 0 on success, a negative errno value otherwise and rte_errno is set. 4574 */ 4575 int 4576 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, 4577 struct rte_flow_item_eth *eth_spec, 4578 struct rte_flow_item_eth *eth_mask, 4579 struct rte_flow_item_vlan *vlan_spec, 4580 struct rte_flow_item_vlan *vlan_mask) 4581 { 4582 struct mlx5_priv *priv = dev->data->dev_private; 4583 const struct rte_flow_attr attr = { 4584 .ingress = 1, 4585 .priority = MLX5_FLOW_PRIO_RSVD, 4586 }; 4587 struct rte_flow_item items[] = { 4588 { 4589 .type = RTE_FLOW_ITEM_TYPE_ETH, 4590 .spec = eth_spec, 4591 .last = NULL, 4592 .mask = eth_mask, 4593 }, 4594 { 4595 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : 4596 RTE_FLOW_ITEM_TYPE_END, 4597 .spec = vlan_spec, 4598 .last = NULL, 4599 .mask = vlan_mask, 4600 }, 4601 { 4602 .type = RTE_FLOW_ITEM_TYPE_END, 4603 }, 4604 }; 4605 uint16_t queue[priv->reta_idx_n]; 4606 struct rte_flow_action_rss action_rss = { 4607 .func = RTE_ETH_HASH_FUNCTION_DEFAULT, 4608 .level = 0, 4609 .types = priv->rss_conf.rss_hf, 4610 .key_len = priv->rss_conf.rss_key_len, 4611 .queue_num = priv->reta_idx_n, 4612 .key = priv->rss_conf.rss_key, 4613 .queue = queue, 4614 }; 4615 struct rte_flow_action actions[] = { 4616 { 4617 .type = RTE_FLOW_ACTION_TYPE_RSS, 4618 .conf = &action_rss, 4619 }, 4620 { 4621 .type = RTE_FLOW_ACTION_TYPE_END, 4622 }, 4623 }; 4624 struct rte_flow *flow; 4625 struct rte_flow_error error; 4626 unsigned int i; 4627 4628 if (!priv->reta_idx_n || !priv->rxqs_n) { 4629 return 0; 4630 } 4631 for (i = 0; i != priv->reta_idx_n; ++i) 4632 queue[i] = (*priv->reta_idx)[i]; 4633 flow = flow_list_create(dev, &priv->ctrl_flows, 4634 &attr, items, actions, false, &error); 4635 if (!flow) 4636 return -rte_errno; 4637 return 0; 4638 } 4639 4640 /** 4641 * Enable a flow control configured from the control plane. 4642 * 4643 * @param dev 4644 * Pointer to Ethernet device. 4645 * @param eth_spec 4646 * An Ethernet flow spec to apply. 4647 * @param eth_mask 4648 * An Ethernet flow mask to apply. 4649 * 4650 * @return 4651 * 0 on success, a negative errno value otherwise and rte_errno is set. 4652 */ 4653 int 4654 mlx5_ctrl_flow(struct rte_eth_dev *dev, 4655 struct rte_flow_item_eth *eth_spec, 4656 struct rte_flow_item_eth *eth_mask) 4657 { 4658 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); 4659 } 4660 4661 /** 4662 * Destroy a flow. 4663 * 4664 * @see rte_flow_destroy() 4665 * @see rte_flow_ops 4666 */ 4667 int 4668 mlx5_flow_destroy(struct rte_eth_dev *dev, 4669 struct rte_flow *flow, 4670 struct rte_flow_error *error __rte_unused) 4671 { 4672 struct mlx5_priv *priv = dev->data->dev_private; 4673 4674 flow_list_destroy(dev, &priv->flows, flow); 4675 return 0; 4676 } 4677 4678 /** 4679 * Destroy all flows. 4680 * 4681 * @see rte_flow_flush() 4682 * @see rte_flow_ops 4683 */ 4684 int 4685 mlx5_flow_flush(struct rte_eth_dev *dev, 4686 struct rte_flow_error *error __rte_unused) 4687 { 4688 struct mlx5_priv *priv = dev->data->dev_private; 4689 4690 mlx5_flow_list_flush(dev, &priv->flows); 4691 return 0; 4692 } 4693 4694 /** 4695 * Isolated mode. 4696 * 4697 * @see rte_flow_isolate() 4698 * @see rte_flow_ops 4699 */ 4700 int 4701 mlx5_flow_isolate(struct rte_eth_dev *dev, 4702 int enable, 4703 struct rte_flow_error *error) 4704 { 4705 struct mlx5_priv *priv = dev->data->dev_private; 4706 4707 if (dev->data->dev_started) { 4708 rte_flow_error_set(error, EBUSY, 4709 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4710 NULL, 4711 "port must be stopped first"); 4712 return -rte_errno; 4713 } 4714 priv->isolated = !!enable; 4715 if (enable) 4716 dev->dev_ops = &mlx5_dev_ops_isolate; 4717 else 4718 dev->dev_ops = &mlx5_dev_ops; 4719 return 0; 4720 } 4721 4722 /** 4723 * Query a flow. 4724 * 4725 * @see rte_flow_query() 4726 * @see rte_flow_ops 4727 */ 4728 static int 4729 flow_drv_query(struct rte_eth_dev *dev, 4730 struct rte_flow *flow, 4731 const struct rte_flow_action *actions, 4732 void *data, 4733 struct rte_flow_error *error) 4734 { 4735 const struct mlx5_flow_driver_ops *fops; 4736 enum mlx5_flow_drv_type ftype = flow->drv_type; 4737 4738 assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); 4739 fops = flow_get_drv_ops(ftype); 4740 4741 return fops->query(dev, flow, actions, data, error); 4742 } 4743 4744 /** 4745 * Query a flow. 4746 * 4747 * @see rte_flow_query() 4748 * @see rte_flow_ops 4749 */ 4750 int 4751 mlx5_flow_query(struct rte_eth_dev *dev, 4752 struct rte_flow *flow, 4753 const struct rte_flow_action *actions, 4754 void *data, 4755 struct rte_flow_error *error) 4756 { 4757 int ret; 4758 4759 ret = flow_drv_query(dev, flow, actions, data, error); 4760 if (ret < 0) 4761 return ret; 4762 return 0; 4763 } 4764 4765 /** 4766 * Convert a flow director filter to a generic flow. 4767 * 4768 * @param dev 4769 * Pointer to Ethernet device. 4770 * @param fdir_filter 4771 * Flow director filter to add. 4772 * @param attributes 4773 * Generic flow parameters structure. 4774 * 4775 * @return 4776 * 0 on success, a negative errno value otherwise and rte_errno is set. 4777 */ 4778 static int 4779 flow_fdir_filter_convert(struct rte_eth_dev *dev, 4780 const struct rte_eth_fdir_filter *fdir_filter, 4781 struct mlx5_fdir *attributes) 4782 { 4783 struct mlx5_priv *priv = dev->data->dev_private; 4784 const struct rte_eth_fdir_input *input = &fdir_filter->input; 4785 const struct rte_eth_fdir_masks *mask = 4786 &dev->data->dev_conf.fdir_conf.mask; 4787 4788 /* Validate queue number. */ 4789 if (fdir_filter->action.rx_queue >= priv->rxqs_n) { 4790 DRV_LOG(ERR, "port %u invalid queue number %d", 4791 dev->data->port_id, fdir_filter->action.rx_queue); 4792 rte_errno = EINVAL; 4793 return -rte_errno; 4794 } 4795 attributes->attr.ingress = 1; 4796 attributes->items[0] = (struct rte_flow_item) { 4797 .type = RTE_FLOW_ITEM_TYPE_ETH, 4798 .spec = &attributes->l2, 4799 .mask = &attributes->l2_mask, 4800 }; 4801 switch (fdir_filter->action.behavior) { 4802 case RTE_ETH_FDIR_ACCEPT: 4803 attributes->actions[0] = (struct rte_flow_action){ 4804 .type = RTE_FLOW_ACTION_TYPE_QUEUE, 4805 .conf = &attributes->queue, 4806 }; 4807 break; 4808 case RTE_ETH_FDIR_REJECT: 4809 attributes->actions[0] = (struct rte_flow_action){ 4810 .type = RTE_FLOW_ACTION_TYPE_DROP, 4811 }; 4812 break; 4813 default: 4814 DRV_LOG(ERR, "port %u invalid behavior %d", 4815 dev->data->port_id, 4816 fdir_filter->action.behavior); 4817 rte_errno = ENOTSUP; 4818 return -rte_errno; 4819 } 4820 attributes->queue.index = fdir_filter->action.rx_queue; 4821 /* Handle L3. */ 4822 switch (fdir_filter->input.flow_type) { 4823 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 4824 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 4825 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 4826 attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){ 4827 .src_addr = input->flow.ip4_flow.src_ip, 4828 .dst_addr = input->flow.ip4_flow.dst_ip, 4829 .time_to_live = input->flow.ip4_flow.ttl, 4830 .type_of_service = input->flow.ip4_flow.tos, 4831 }; 4832 attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){ 4833 .src_addr = mask->ipv4_mask.src_ip, 4834 .dst_addr = mask->ipv4_mask.dst_ip, 4835 .time_to_live = mask->ipv4_mask.ttl, 4836 .type_of_service = mask->ipv4_mask.tos, 4837 .next_proto_id = mask->ipv4_mask.proto, 4838 }; 4839 attributes->items[1] = (struct rte_flow_item){ 4840 .type = RTE_FLOW_ITEM_TYPE_IPV4, 4841 .spec = &attributes->l3, 4842 .mask = &attributes->l3_mask, 4843 }; 4844 break; 4845 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 4846 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 4847 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 4848 attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){ 4849 .hop_limits = input->flow.ipv6_flow.hop_limits, 4850 .proto = input->flow.ipv6_flow.proto, 4851 }; 4852 4853 memcpy(attributes->l3.ipv6.hdr.src_addr, 4854 input->flow.ipv6_flow.src_ip, 4855 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 4856 memcpy(attributes->l3.ipv6.hdr.dst_addr, 4857 input->flow.ipv6_flow.dst_ip, 4858 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 4859 memcpy(attributes->l3_mask.ipv6.hdr.src_addr, 4860 mask->ipv6_mask.src_ip, 4861 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 4862 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr, 4863 mask->ipv6_mask.dst_ip, 4864 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 4865 attributes->items[1] = (struct rte_flow_item){ 4866 .type = RTE_FLOW_ITEM_TYPE_IPV6, 4867 .spec = &attributes->l3, 4868 .mask = &attributes->l3_mask, 4869 }; 4870 break; 4871 default: 4872 DRV_LOG(ERR, "port %u invalid flow type%d", 4873 dev->data->port_id, fdir_filter->input.flow_type); 4874 rte_errno = ENOTSUP; 4875 return -rte_errno; 4876 } 4877 /* Handle L4. */ 4878 switch (fdir_filter->input.flow_type) { 4879 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 4880 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 4881 .src_port = input->flow.udp4_flow.src_port, 4882 .dst_port = input->flow.udp4_flow.dst_port, 4883 }; 4884 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 4885 .src_port = mask->src_port_mask, 4886 .dst_port = mask->dst_port_mask, 4887 }; 4888 attributes->items[2] = (struct rte_flow_item){ 4889 .type = RTE_FLOW_ITEM_TYPE_UDP, 4890 .spec = &attributes->l4, 4891 .mask = &attributes->l4_mask, 4892 }; 4893 break; 4894 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 4895 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 4896 .src_port = input->flow.tcp4_flow.src_port, 4897 .dst_port = input->flow.tcp4_flow.dst_port, 4898 }; 4899 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 4900 .src_port = mask->src_port_mask, 4901 .dst_port = mask->dst_port_mask, 4902 }; 4903 attributes->items[2] = (struct rte_flow_item){ 4904 .type = RTE_FLOW_ITEM_TYPE_TCP, 4905 .spec = &attributes->l4, 4906 .mask = &attributes->l4_mask, 4907 }; 4908 break; 4909 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 4910 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 4911 .src_port = input->flow.udp6_flow.src_port, 4912 .dst_port = input->flow.udp6_flow.dst_port, 4913 }; 4914 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 4915 .src_port = mask->src_port_mask, 4916 .dst_port = mask->dst_port_mask, 4917 }; 4918 attributes->items[2] = (struct rte_flow_item){ 4919 .type = RTE_FLOW_ITEM_TYPE_UDP, 4920 .spec = &attributes->l4, 4921 .mask = &attributes->l4_mask, 4922 }; 4923 break; 4924 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 4925 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 4926 .src_port = input->flow.tcp6_flow.src_port, 4927 .dst_port = input->flow.tcp6_flow.dst_port, 4928 }; 4929 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 4930 .src_port = mask->src_port_mask, 4931 .dst_port = mask->dst_port_mask, 4932 }; 4933 attributes->items[2] = (struct rte_flow_item){ 4934 .type = RTE_FLOW_ITEM_TYPE_TCP, 4935 .spec = &attributes->l4, 4936 .mask = &attributes->l4_mask, 4937 }; 4938 break; 4939 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 4940 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 4941 break; 4942 default: 4943 DRV_LOG(ERR, "port %u invalid flow type%d", 4944 dev->data->port_id, fdir_filter->input.flow_type); 4945 rte_errno = ENOTSUP; 4946 return -rte_errno; 4947 } 4948 return 0; 4949 } 4950 4951 #define FLOW_FDIR_CMP(f1, f2, fld) \ 4952 memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld)) 4953 4954 /** 4955 * Compare two FDIR flows. If items and actions are identical, the two flows are 4956 * regarded as same. 4957 * 4958 * @param dev 4959 * Pointer to Ethernet device. 4960 * @param f1 4961 * FDIR flow to compare. 4962 * @param f2 4963 * FDIR flow to compare. 4964 * 4965 * @return 4966 * Zero on match, 1 otherwise. 4967 */ 4968 static int 4969 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) 4970 { 4971 if (FLOW_FDIR_CMP(f1, f2, attr) || 4972 FLOW_FDIR_CMP(f1, f2, l2) || 4973 FLOW_FDIR_CMP(f1, f2, l2_mask) || 4974 FLOW_FDIR_CMP(f1, f2, l3) || 4975 FLOW_FDIR_CMP(f1, f2, l3_mask) || 4976 FLOW_FDIR_CMP(f1, f2, l4) || 4977 FLOW_FDIR_CMP(f1, f2, l4_mask) || 4978 FLOW_FDIR_CMP(f1, f2, actions[0].type)) 4979 return 1; 4980 if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE && 4981 FLOW_FDIR_CMP(f1, f2, queue)) 4982 return 1; 4983 return 0; 4984 } 4985 4986 /** 4987 * Search device flow list to find out a matched FDIR flow. 4988 * 4989 * @param dev 4990 * Pointer to Ethernet device. 4991 * @param fdir_flow 4992 * FDIR flow to lookup. 4993 * 4994 * @return 4995 * Pointer of flow if found, NULL otherwise. 4996 */ 4997 static struct rte_flow * 4998 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) 4999 { 5000 struct mlx5_priv *priv = dev->data->dev_private; 5001 struct rte_flow *flow = NULL; 5002 5003 assert(fdir_flow); 5004 TAILQ_FOREACH(flow, &priv->flows, next) { 5005 if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) { 5006 DRV_LOG(DEBUG, "port %u found FDIR flow %p", 5007 dev->data->port_id, (void *)flow); 5008 break; 5009 } 5010 } 5011 return flow; 5012 } 5013 5014 /** 5015 * Add new flow director filter and store it in list. 5016 * 5017 * @param dev 5018 * Pointer to Ethernet device. 5019 * @param fdir_filter 5020 * Flow director filter to add. 5021 * 5022 * @return 5023 * 0 on success, a negative errno value otherwise and rte_errno is set. 5024 */ 5025 static int 5026 flow_fdir_filter_add(struct rte_eth_dev *dev, 5027 const struct rte_eth_fdir_filter *fdir_filter) 5028 { 5029 struct mlx5_priv *priv = dev->data->dev_private; 5030 struct mlx5_fdir *fdir_flow; 5031 struct rte_flow *flow; 5032 int ret; 5033 5034 fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0); 5035 if (!fdir_flow) { 5036 rte_errno = ENOMEM; 5037 return -rte_errno; 5038 } 5039 ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow); 5040 if (ret) 5041 goto error; 5042 flow = flow_fdir_filter_lookup(dev, fdir_flow); 5043 if (flow) { 5044 rte_errno = EEXIST; 5045 goto error; 5046 } 5047 flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr, 5048 fdir_flow->items, fdir_flow->actions, true, 5049 NULL); 5050 if (!flow) 5051 goto error; 5052 assert(!flow->fdir); 5053 flow->fdir = fdir_flow; 5054 DRV_LOG(DEBUG, "port %u created FDIR flow %p", 5055 dev->data->port_id, (void *)flow); 5056 return 0; 5057 error: 5058 rte_free(fdir_flow); 5059 return -rte_errno; 5060 } 5061 5062 /** 5063 * Delete specific filter. 5064 * 5065 * @param dev 5066 * Pointer to Ethernet device. 5067 * @param fdir_filter 5068 * Filter to be deleted. 5069 * 5070 * @return 5071 * 0 on success, a negative errno value otherwise and rte_errno is set. 5072 */ 5073 static int 5074 flow_fdir_filter_delete(struct rte_eth_dev *dev, 5075 const struct rte_eth_fdir_filter *fdir_filter) 5076 { 5077 struct mlx5_priv *priv = dev->data->dev_private; 5078 struct rte_flow *flow; 5079 struct mlx5_fdir fdir_flow = { 5080 .attr.group = 0, 5081 }; 5082 int ret; 5083 5084 ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow); 5085 if (ret) 5086 return -rte_errno; 5087 flow = flow_fdir_filter_lookup(dev, &fdir_flow); 5088 if (!flow) { 5089 rte_errno = ENOENT; 5090 return -rte_errno; 5091 } 5092 flow_list_destroy(dev, &priv->flows, flow); 5093 DRV_LOG(DEBUG, "port %u deleted FDIR flow %p", 5094 dev->data->port_id, (void *)flow); 5095 return 0; 5096 } 5097 5098 /** 5099 * Update queue for specific filter. 5100 * 5101 * @param dev 5102 * Pointer to Ethernet device. 5103 * @param fdir_filter 5104 * Filter to be updated. 5105 * 5106 * @return 5107 * 0 on success, a negative errno value otherwise and rte_errno is set. 5108 */ 5109 static int 5110 flow_fdir_filter_update(struct rte_eth_dev *dev, 5111 const struct rte_eth_fdir_filter *fdir_filter) 5112 { 5113 int ret; 5114 5115 ret = flow_fdir_filter_delete(dev, fdir_filter); 5116 if (ret) 5117 return ret; 5118 return flow_fdir_filter_add(dev, fdir_filter); 5119 } 5120 5121 /** 5122 * Flush all filters. 5123 * 5124 * @param dev 5125 * Pointer to Ethernet device. 5126 */ 5127 static void 5128 flow_fdir_filter_flush(struct rte_eth_dev *dev) 5129 { 5130 struct mlx5_priv *priv = dev->data->dev_private; 5131 5132 mlx5_flow_list_flush(dev, &priv->flows); 5133 } 5134 5135 /** 5136 * Get flow director information. 5137 * 5138 * @param dev 5139 * Pointer to Ethernet device. 5140 * @param[out] fdir_info 5141 * Resulting flow director information. 5142 */ 5143 static void 5144 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) 5145 { 5146 struct rte_eth_fdir_masks *mask = 5147 &dev->data->dev_conf.fdir_conf.mask; 5148 5149 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; 5150 fdir_info->guarant_spc = 0; 5151 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); 5152 fdir_info->max_flexpayload = 0; 5153 fdir_info->flow_types_mask[0] = 0; 5154 fdir_info->flex_payload_unit = 0; 5155 fdir_info->max_flex_payload_segment_num = 0; 5156 fdir_info->flex_payload_limit = 0; 5157 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf)); 5158 } 5159 5160 /** 5161 * Deal with flow director operations. 5162 * 5163 * @param dev 5164 * Pointer to Ethernet device. 5165 * @param filter_op 5166 * Operation to perform. 5167 * @param arg 5168 * Pointer to operation-specific structure. 5169 * 5170 * @return 5171 * 0 on success, a negative errno value otherwise and rte_errno is set. 5172 */ 5173 static int 5174 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, 5175 void *arg) 5176 { 5177 enum rte_fdir_mode fdir_mode = 5178 dev->data->dev_conf.fdir_conf.mode; 5179 5180 if (filter_op == RTE_ETH_FILTER_NOP) 5181 return 0; 5182 if (fdir_mode != RTE_FDIR_MODE_PERFECT && 5183 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 5184 DRV_LOG(ERR, "port %u flow director mode %d not supported", 5185 dev->data->port_id, fdir_mode); 5186 rte_errno = EINVAL; 5187 return -rte_errno; 5188 } 5189 switch (filter_op) { 5190 case RTE_ETH_FILTER_ADD: 5191 return flow_fdir_filter_add(dev, arg); 5192 case RTE_ETH_FILTER_UPDATE: 5193 return flow_fdir_filter_update(dev, arg); 5194 case RTE_ETH_FILTER_DELETE: 5195 return flow_fdir_filter_delete(dev, arg); 5196 case RTE_ETH_FILTER_FLUSH: 5197 flow_fdir_filter_flush(dev); 5198 break; 5199 case RTE_ETH_FILTER_INFO: 5200 flow_fdir_info_get(dev, arg); 5201 break; 5202 default: 5203 DRV_LOG(DEBUG, "port %u unknown operation %u", 5204 dev->data->port_id, filter_op); 5205 rte_errno = EINVAL; 5206 return -rte_errno; 5207 } 5208 return 0; 5209 } 5210 5211 /** 5212 * Manage filter operations. 5213 * 5214 * @param dev 5215 * Pointer to Ethernet device structure. 5216 * @param filter_type 5217 * Filter type. 5218 * @param filter_op 5219 * Operation to perform. 5220 * @param arg 5221 * Pointer to operation-specific structure. 5222 * 5223 * @return 5224 * 0 on success, a negative errno value otherwise and rte_errno is set. 5225 */ 5226 int 5227 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, 5228 enum rte_filter_type filter_type, 5229 enum rte_filter_op filter_op, 5230 void *arg) 5231 { 5232 switch (filter_type) { 5233 case RTE_ETH_FILTER_GENERIC: 5234 if (filter_op != RTE_ETH_FILTER_GET) { 5235 rte_errno = EINVAL; 5236 return -rte_errno; 5237 } 5238 *(const void **)arg = &mlx5_flow_ops; 5239 return 0; 5240 case RTE_ETH_FILTER_FDIR: 5241 return flow_fdir_ctrl_func(dev, filter_op, arg); 5242 default: 5243 DRV_LOG(ERR, "port %u filter type (%d) not supported", 5244 dev->data->port_id, filter_type); 5245 rte_errno = ENOTSUP; 5246 return -rte_errno; 5247 } 5248 return 0; 5249 } 5250 5251 /** 5252 * Create the needed meter and suffix tables. 5253 * 5254 * @param[in] dev 5255 * Pointer to Ethernet device. 5256 * @param[in] fm 5257 * Pointer to the flow meter. 5258 * 5259 * @return 5260 * Pointer to table set on success, NULL otherwise. 5261 */ 5262 struct mlx5_meter_domains_infos * 5263 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev, 5264 const struct mlx5_flow_meter *fm) 5265 { 5266 const struct mlx5_flow_driver_ops *fops; 5267 5268 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5269 return fops->create_mtr_tbls(dev, fm); 5270 } 5271 5272 /** 5273 * Destroy the meter table set. 5274 * 5275 * @param[in] dev 5276 * Pointer to Ethernet device. 5277 * @param[in] tbl 5278 * Pointer to the meter table set. 5279 * 5280 * @return 5281 * 0 on success. 5282 */ 5283 int 5284 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, 5285 struct mlx5_meter_domains_infos *tbls) 5286 { 5287 const struct mlx5_flow_driver_ops *fops; 5288 5289 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5290 return fops->destroy_mtr_tbls(dev, tbls); 5291 } 5292 5293 /** 5294 * Create policer rules. 5295 * 5296 * @param[in] dev 5297 * Pointer to Ethernet device. 5298 * @param[in] fm 5299 * Pointer to flow meter structure. 5300 * @param[in] attr 5301 * Pointer to flow attributes. 5302 * 5303 * @return 5304 * 0 on success, -1 otherwise. 5305 */ 5306 int 5307 mlx5_flow_create_policer_rules(struct rte_eth_dev *dev, 5308 struct mlx5_flow_meter *fm, 5309 const struct rte_flow_attr *attr) 5310 { 5311 const struct mlx5_flow_driver_ops *fops; 5312 5313 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5314 return fops->create_policer_rules(dev, fm, attr); 5315 } 5316 5317 /** 5318 * Destroy policer rules. 5319 * 5320 * @param[in] fm 5321 * Pointer to flow meter structure. 5322 * @param[in] attr 5323 * Pointer to flow attributes. 5324 * 5325 * @return 5326 * 0 on success, -1 otherwise. 5327 */ 5328 int 5329 mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev, 5330 struct mlx5_flow_meter *fm, 5331 const struct rte_flow_attr *attr) 5332 { 5333 const struct mlx5_flow_driver_ops *fops; 5334 5335 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5336 return fops->destroy_policer_rules(dev, fm, attr); 5337 } 5338 5339 /** 5340 * Allocate a counter. 5341 * 5342 * @param[in] dev 5343 * Pointer to Ethernet device structure. 5344 * 5345 * @return 5346 * Pointer to allocated counter on success, NULL otherwise. 5347 */ 5348 struct mlx5_flow_counter * 5349 mlx5_counter_alloc(struct rte_eth_dev *dev) 5350 { 5351 const struct mlx5_flow_driver_ops *fops; 5352 struct rte_flow_attr attr = { .transfer = 0 }; 5353 5354 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5355 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5356 return fops->counter_alloc(dev); 5357 } 5358 DRV_LOG(ERR, 5359 "port %u counter allocate is not supported.", 5360 dev->data->port_id); 5361 return NULL; 5362 } 5363 5364 /** 5365 * Free a counter. 5366 * 5367 * @param[in] dev 5368 * Pointer to Ethernet device structure. 5369 * @param[in] cnt 5370 * Pointer to counter to be free. 5371 */ 5372 void 5373 mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt) 5374 { 5375 const struct mlx5_flow_driver_ops *fops; 5376 struct rte_flow_attr attr = { .transfer = 0 }; 5377 5378 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5379 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5380 fops->counter_free(dev, cnt); 5381 return; 5382 } 5383 DRV_LOG(ERR, 5384 "port %u counter free is not supported.", 5385 dev->data->port_id); 5386 } 5387 5388 /** 5389 * Query counter statistics. 5390 * 5391 * @param[in] dev 5392 * Pointer to Ethernet device structure. 5393 * @param[in] cnt 5394 * Pointer to counter to query. 5395 * @param[in] clear 5396 * Set to clear counter statistics. 5397 * @param[out] pkts 5398 * The counter hits packets number to save. 5399 * @param[out] bytes 5400 * The counter hits bytes number to save. 5401 * 5402 * @return 5403 * 0 on success, a negative errno value otherwise. 5404 */ 5405 int 5406 mlx5_counter_query(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt, 5407 bool clear, uint64_t *pkts, uint64_t *bytes) 5408 { 5409 const struct mlx5_flow_driver_ops *fops; 5410 struct rte_flow_attr attr = { .transfer = 0 }; 5411 5412 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { 5413 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); 5414 return fops->counter_query(dev, cnt, clear, pkts, bytes); 5415 } 5416 DRV_LOG(ERR, 5417 "port %u counter query is not supported.", 5418 dev->data->port_id); 5419 return -ENOTSUP; 5420 } 5421 5422 #define MLX5_POOL_QUERY_FREQ_US 1000000 5423 5424 /** 5425 * Set the periodic procedure for triggering asynchronous batch queries for all 5426 * the counter pools. 5427 * 5428 * @param[in] sh 5429 * Pointer to mlx5_ibv_shared object. 5430 */ 5431 void 5432 mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) 5433 { 5434 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0); 5435 uint32_t pools_n = rte_atomic16_read(&cont->n_valid); 5436 uint32_t us; 5437 5438 cont = MLX5_CNT_CONTAINER(sh, 1, 0); 5439 pools_n += rte_atomic16_read(&cont->n_valid); 5440 us = MLX5_POOL_QUERY_FREQ_US / pools_n; 5441 DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us); 5442 if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { 5443 sh->cmng.query_thread_on = 0; 5444 DRV_LOG(ERR, "Cannot reinitialize query alarm"); 5445 } else { 5446 sh->cmng.query_thread_on = 1; 5447 } 5448 } 5449 5450 /** 5451 * The periodic procedure for triggering asynchronous batch queries for all the 5452 * counter pools. This function is probably called by the host thread. 5453 * 5454 * @param[in] arg 5455 * The parameter for the alarm process. 5456 */ 5457 void 5458 mlx5_flow_query_alarm(void *arg) 5459 { 5460 struct mlx5_ibv_shared *sh = arg; 5461 struct mlx5_devx_obj *dcs; 5462 uint16_t offset; 5463 int ret; 5464 uint8_t batch = sh->cmng.batch; 5465 uint16_t pool_index = sh->cmng.pool_index; 5466 struct mlx5_pools_container *cont; 5467 struct mlx5_pools_container *mcont; 5468 struct mlx5_flow_counter_pool *pool; 5469 5470 if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) 5471 goto set_alarm; 5472 next_container: 5473 cont = MLX5_CNT_CONTAINER(sh, batch, 1); 5474 mcont = MLX5_CNT_CONTAINER(sh, batch, 0); 5475 /* Check if resize was done and need to flip a container. */ 5476 if (cont != mcont) { 5477 if (cont->pools) { 5478 /* Clean the old container. */ 5479 rte_free(cont->pools); 5480 memset(cont, 0, sizeof(*cont)); 5481 } 5482 rte_cio_wmb(); 5483 /* Flip the host container. */ 5484 sh->cmng.mhi[batch] ^= (uint8_t)2; 5485 cont = mcont; 5486 } 5487 if (!cont->pools) { 5488 /* 2 empty containers case is unexpected. */ 5489 if (unlikely(batch != sh->cmng.batch)) 5490 goto set_alarm; 5491 batch ^= 0x1; 5492 pool_index = 0; 5493 goto next_container; 5494 } 5495 pool = cont->pools[pool_index]; 5496 if (pool->raw_hw) 5497 /* There is a pool query in progress. */ 5498 goto set_alarm; 5499 pool->raw_hw = 5500 LIST_FIRST(&sh->cmng.free_stat_raws); 5501 if (!pool->raw_hw) 5502 /* No free counter statistics raw memory. */ 5503 goto set_alarm; 5504 dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read 5505 (&pool->a64_dcs); 5506 offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; 5507 ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - 5508 offset, NULL, NULL, 5509 pool->raw_hw->mem_mng->dm->id, 5510 (void *)(uintptr_t) 5511 (pool->raw_hw->data + offset), 5512 sh->devx_comp, 5513 (uint64_t)(uintptr_t)pool); 5514 if (ret) { 5515 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID" 5516 " %d", pool->min_dcs->id); 5517 pool->raw_hw = NULL; 5518 goto set_alarm; 5519 } 5520 pool->raw_hw->min_dcs_id = dcs->id; 5521 LIST_REMOVE(pool->raw_hw, next); 5522 sh->cmng.pending_queries++; 5523 pool_index++; 5524 if (pool_index >= rte_atomic16_read(&cont->n_valid)) { 5525 batch ^= 0x1; 5526 pool_index = 0; 5527 } 5528 set_alarm: 5529 sh->cmng.batch = batch; 5530 sh->cmng.pool_index = pool_index; 5531 mlx5_set_query_alarm(sh); 5532 } 5533 5534 /** 5535 * Handler for the HW respond about ready values from an asynchronous batch 5536 * query. This function is probably called by the host thread. 5537 * 5538 * @param[in] sh 5539 * The pointer to the shared IB device context. 5540 * @param[in] async_id 5541 * The Devx async ID. 5542 * @param[in] status 5543 * The status of the completion. 5544 */ 5545 void 5546 mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, 5547 uint64_t async_id, int status) 5548 { 5549 struct mlx5_flow_counter_pool *pool = 5550 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; 5551 struct mlx5_counter_stats_raw *raw_to_free; 5552 5553 if (unlikely(status)) { 5554 raw_to_free = pool->raw_hw; 5555 } else { 5556 raw_to_free = pool->raw; 5557 rte_spinlock_lock(&pool->sl); 5558 pool->raw = pool->raw_hw; 5559 rte_spinlock_unlock(&pool->sl); 5560 rte_atomic64_add(&pool->query_gen, 1); 5561 /* Be sure the new raw counters data is updated in memory. */ 5562 rte_cio_wmb(); 5563 } 5564 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next); 5565 pool->raw_hw = NULL; 5566 sh->cmng.pending_queries--; 5567 } 5568 5569 /** 5570 * Translate the rte_flow group index to HW table value. 5571 * 5572 * @param[in] attributes 5573 * Pointer to flow attributes 5574 * @param[in] external 5575 * Value is part of flow rule created by request external to PMD. 5576 * @param[in] group 5577 * rte_flow group index value. 5578 * @param[out] table 5579 * HW table value. 5580 * @param[out] error 5581 * Pointer to error structure. 5582 * 5583 * @return 5584 * 0 on success, a negative errno value otherwise and rte_errno is set. 5585 */ 5586 int 5587 mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external, 5588 uint32_t group, uint32_t *table, 5589 struct rte_flow_error *error) 5590 { 5591 if (attributes->transfer && external) { 5592 if (group == UINT32_MAX) 5593 return rte_flow_error_set 5594 (error, EINVAL, 5595 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 5596 NULL, 5597 "group index not supported"); 5598 *table = group + 1; 5599 } else { 5600 *table = group; 5601 } 5602 return 0; 5603 } 5604 5605 /** 5606 * Discover availability of metadata reg_c's. 5607 * 5608 * Iteratively use test flows to check availability. 5609 * 5610 * @param[in] dev 5611 * Pointer to the Ethernet device structure. 5612 * 5613 * @return 5614 * 0 on success, a negative errno value otherwise and rte_errno is set. 5615 */ 5616 int 5617 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) 5618 { 5619 struct mlx5_priv *priv = dev->data->dev_private; 5620 struct mlx5_dev_config *config = &priv->config; 5621 enum modify_reg idx; 5622 int n = 0; 5623 5624 /* reg_c[0] and reg_c[1] are reserved. */ 5625 config->flow_mreg_c[n++] = REG_C_0; 5626 config->flow_mreg_c[n++] = REG_C_1; 5627 /* Discover availability of other reg_c's. */ 5628 for (idx = REG_C_2; idx <= REG_C_7; ++idx) { 5629 struct rte_flow_attr attr = { 5630 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, 5631 .priority = MLX5_FLOW_PRIO_RSVD, 5632 .ingress = 1, 5633 }; 5634 struct rte_flow_item items[] = { 5635 [0] = { 5636 .type = RTE_FLOW_ITEM_TYPE_END, 5637 }, 5638 }; 5639 struct rte_flow_action actions[] = { 5640 [0] = { 5641 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 5642 .conf = &(struct mlx5_flow_action_copy_mreg){ 5643 .src = REG_C_1, 5644 .dst = idx, 5645 }, 5646 }, 5647 [1] = { 5648 .type = RTE_FLOW_ACTION_TYPE_JUMP, 5649 .conf = &(struct rte_flow_action_jump){ 5650 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, 5651 }, 5652 }, 5653 [2] = { 5654 .type = RTE_FLOW_ACTION_TYPE_END, 5655 }, 5656 }; 5657 struct rte_flow *flow; 5658 struct rte_flow_error error; 5659 5660 if (!config->dv_flow_en) 5661 break; 5662 /* Create internal flow, validation skips copy action. */ 5663 flow = flow_list_create(dev, NULL, &attr, items, 5664 actions, false, &error); 5665 if (!flow) 5666 continue; 5667 if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL)) 5668 config->flow_mreg_c[n++] = idx; 5669 flow_list_destroy(dev, NULL, flow); 5670 } 5671 for (; n < MLX5_MREG_C_NUM; ++n) 5672 config->flow_mreg_c[n] = REG_NONE; 5673 return 0; 5674 } 5675 5676 /** 5677 * Dump flow raw hw data to file 5678 * 5679 * @param[in] dev 5680 * The pointer to Ethernet device. 5681 * @param[in] file 5682 * A pointer to a file for output. 5683 * @param[out] error 5684 * Perform verbose error reporting if not NULL. PMDs initialize this 5685 * structure in case of error only. 5686 * @return 5687 * 0 on success, a nagative value otherwise. 5688 */ 5689 int 5690 mlx5_flow_dev_dump(struct rte_eth_dev *dev, 5691 FILE *file, 5692 struct rte_flow_error *error __rte_unused) 5693 { 5694 struct mlx5_priv *priv = dev->data->dev_private; 5695 5696 return mlx5_devx_cmd_flow_dump(priv->sh, file); 5697 } 5698