1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <netinet/in.h> 7 #include <sys/queue.h> 8 #include <stdalign.h> 9 #include <stdint.h> 10 #include <string.h> 11 12 /* Verbs header. */ 13 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 14 #ifdef PEDANTIC 15 #pragma GCC diagnostic ignored "-Wpedantic" 16 #endif 17 #include <infiniband/verbs.h> 18 #ifdef PEDANTIC 19 #pragma GCC diagnostic error "-Wpedantic" 20 #endif 21 22 #include <rte_common.h> 23 #include <rte_ether.h> 24 #include <rte_ethdev_driver.h> 25 #include <rte_flow.h> 26 #include <rte_flow_driver.h> 27 #include <rte_malloc.h> 28 #include <rte_ip.h> 29 30 #include "mlx5.h" 31 #include "mlx5_defs.h" 32 #include "mlx5_flow.h" 33 #include "mlx5_glue.h" 34 #include "mlx5_prm.h" 35 #include "mlx5_rxtx.h" 36 37 /* Dev ops structure defined in mlx5.c */ 38 extern const struct eth_dev_ops mlx5_dev_ops; 39 extern const struct eth_dev_ops mlx5_dev_ops_isolate; 40 41 /** Device flow drivers. */ 42 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 43 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; 44 #endif 45 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; 46 47 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; 48 49 const struct mlx5_flow_driver_ops *flow_drv_ops[] = { 50 [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, 51 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 52 [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, 53 #endif 54 [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, 55 [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops 56 }; 57 58 enum mlx5_expansion { 59 MLX5_EXPANSION_ROOT, 60 MLX5_EXPANSION_ROOT_OUTER, 61 MLX5_EXPANSION_ROOT_ETH_VLAN, 62 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, 63 MLX5_EXPANSION_OUTER_ETH, 64 MLX5_EXPANSION_OUTER_ETH_VLAN, 65 MLX5_EXPANSION_OUTER_VLAN, 66 MLX5_EXPANSION_OUTER_IPV4, 67 MLX5_EXPANSION_OUTER_IPV4_UDP, 68 MLX5_EXPANSION_OUTER_IPV4_TCP, 69 MLX5_EXPANSION_OUTER_IPV6, 70 MLX5_EXPANSION_OUTER_IPV6_UDP, 71 MLX5_EXPANSION_OUTER_IPV6_TCP, 72 MLX5_EXPANSION_VXLAN, 73 MLX5_EXPANSION_VXLAN_GPE, 74 MLX5_EXPANSION_GRE, 75 MLX5_EXPANSION_MPLS, 76 MLX5_EXPANSION_ETH, 77 MLX5_EXPANSION_ETH_VLAN, 78 MLX5_EXPANSION_VLAN, 79 MLX5_EXPANSION_IPV4, 80 MLX5_EXPANSION_IPV4_UDP, 81 MLX5_EXPANSION_IPV4_TCP, 82 MLX5_EXPANSION_IPV6, 83 MLX5_EXPANSION_IPV6_UDP, 84 MLX5_EXPANSION_IPV6_TCP, 85 }; 86 87 /** Supported expansion of items. */ 88 static const struct rte_flow_expand_node mlx5_support_expansion[] = { 89 [MLX5_EXPANSION_ROOT] = { 90 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 91 MLX5_EXPANSION_IPV4, 92 MLX5_EXPANSION_IPV6), 93 .type = RTE_FLOW_ITEM_TYPE_END, 94 }, 95 [MLX5_EXPANSION_ROOT_OUTER] = { 96 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, 97 MLX5_EXPANSION_OUTER_IPV4, 98 MLX5_EXPANSION_OUTER_IPV6), 99 .type = RTE_FLOW_ITEM_TYPE_END, 100 }, 101 [MLX5_EXPANSION_ROOT_ETH_VLAN] = { 102 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), 103 .type = RTE_FLOW_ITEM_TYPE_END, 104 }, 105 [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { 106 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), 107 .type = RTE_FLOW_ITEM_TYPE_END, 108 }, 109 [MLX5_EXPANSION_OUTER_ETH] = { 110 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 111 MLX5_EXPANSION_OUTER_IPV6, 112 MLX5_EXPANSION_MPLS), 113 .type = RTE_FLOW_ITEM_TYPE_ETH, 114 .rss_types = 0, 115 }, 116 [MLX5_EXPANSION_OUTER_ETH_VLAN] = { 117 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), 118 .type = RTE_FLOW_ITEM_TYPE_ETH, 119 .rss_types = 0, 120 }, 121 [MLX5_EXPANSION_OUTER_VLAN] = { 122 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 123 MLX5_EXPANSION_OUTER_IPV6), 124 .type = RTE_FLOW_ITEM_TYPE_VLAN, 125 }, 126 [MLX5_EXPANSION_OUTER_IPV4] = { 127 .next = RTE_FLOW_EXPAND_RSS_NEXT 128 (MLX5_EXPANSION_OUTER_IPV4_UDP, 129 MLX5_EXPANSION_OUTER_IPV4_TCP, 130 MLX5_EXPANSION_GRE, 131 MLX5_EXPANSION_IPV4, 132 MLX5_EXPANSION_IPV6), 133 .type = RTE_FLOW_ITEM_TYPE_IPV4, 134 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 135 ETH_RSS_NONFRAG_IPV4_OTHER, 136 }, 137 [MLX5_EXPANSION_OUTER_IPV4_UDP] = { 138 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 139 MLX5_EXPANSION_VXLAN_GPE), 140 .type = RTE_FLOW_ITEM_TYPE_UDP, 141 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 142 }, 143 [MLX5_EXPANSION_OUTER_IPV4_TCP] = { 144 .type = RTE_FLOW_ITEM_TYPE_TCP, 145 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 146 }, 147 [MLX5_EXPANSION_OUTER_IPV6] = { 148 .next = RTE_FLOW_EXPAND_RSS_NEXT 149 (MLX5_EXPANSION_OUTER_IPV6_UDP, 150 MLX5_EXPANSION_OUTER_IPV6_TCP, 151 MLX5_EXPANSION_IPV4, 152 MLX5_EXPANSION_IPV6), 153 .type = RTE_FLOW_ITEM_TYPE_IPV6, 154 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 155 ETH_RSS_NONFRAG_IPV6_OTHER, 156 }, 157 [MLX5_EXPANSION_OUTER_IPV6_UDP] = { 158 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 159 MLX5_EXPANSION_VXLAN_GPE), 160 .type = RTE_FLOW_ITEM_TYPE_UDP, 161 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 162 }, 163 [MLX5_EXPANSION_OUTER_IPV6_TCP] = { 164 .type = RTE_FLOW_ITEM_TYPE_TCP, 165 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 166 }, 167 [MLX5_EXPANSION_VXLAN] = { 168 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), 169 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 170 }, 171 [MLX5_EXPANSION_VXLAN_GPE] = { 172 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 173 MLX5_EXPANSION_IPV4, 174 MLX5_EXPANSION_IPV6), 175 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, 176 }, 177 [MLX5_EXPANSION_GRE] = { 178 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), 179 .type = RTE_FLOW_ITEM_TYPE_GRE, 180 }, 181 [MLX5_EXPANSION_MPLS] = { 182 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 183 MLX5_EXPANSION_IPV6), 184 .type = RTE_FLOW_ITEM_TYPE_MPLS, 185 }, 186 [MLX5_EXPANSION_ETH] = { 187 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 188 MLX5_EXPANSION_IPV6), 189 .type = RTE_FLOW_ITEM_TYPE_ETH, 190 }, 191 [MLX5_EXPANSION_ETH_VLAN] = { 192 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), 193 .type = RTE_FLOW_ITEM_TYPE_ETH, 194 }, 195 [MLX5_EXPANSION_VLAN] = { 196 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 197 MLX5_EXPANSION_IPV6), 198 .type = RTE_FLOW_ITEM_TYPE_VLAN, 199 }, 200 [MLX5_EXPANSION_IPV4] = { 201 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, 202 MLX5_EXPANSION_IPV4_TCP), 203 .type = RTE_FLOW_ITEM_TYPE_IPV4, 204 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 205 ETH_RSS_NONFRAG_IPV4_OTHER, 206 }, 207 [MLX5_EXPANSION_IPV4_UDP] = { 208 .type = RTE_FLOW_ITEM_TYPE_UDP, 209 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 210 }, 211 [MLX5_EXPANSION_IPV4_TCP] = { 212 .type = RTE_FLOW_ITEM_TYPE_TCP, 213 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 214 }, 215 [MLX5_EXPANSION_IPV6] = { 216 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, 217 MLX5_EXPANSION_IPV6_TCP), 218 .type = RTE_FLOW_ITEM_TYPE_IPV6, 219 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 220 ETH_RSS_NONFRAG_IPV6_OTHER, 221 }, 222 [MLX5_EXPANSION_IPV6_UDP] = { 223 .type = RTE_FLOW_ITEM_TYPE_UDP, 224 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 225 }, 226 [MLX5_EXPANSION_IPV6_TCP] = { 227 .type = RTE_FLOW_ITEM_TYPE_TCP, 228 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 229 }, 230 }; 231 232 static const struct rte_flow_ops mlx5_flow_ops = { 233 .validate = mlx5_flow_validate, 234 .create = mlx5_flow_create, 235 .destroy = mlx5_flow_destroy, 236 .flush = mlx5_flow_flush, 237 .isolate = mlx5_flow_isolate, 238 .query = mlx5_flow_query, 239 }; 240 241 /* Convert FDIR request to Generic flow. */ 242 struct mlx5_fdir { 243 struct rte_flow_attr attr; 244 struct rte_flow_item items[4]; 245 struct rte_flow_item_eth l2; 246 struct rte_flow_item_eth l2_mask; 247 union { 248 struct rte_flow_item_ipv4 ipv4; 249 struct rte_flow_item_ipv6 ipv6; 250 } l3; 251 union { 252 struct rte_flow_item_ipv4 ipv4; 253 struct rte_flow_item_ipv6 ipv6; 254 } l3_mask; 255 union { 256 struct rte_flow_item_udp udp; 257 struct rte_flow_item_tcp tcp; 258 } l4; 259 union { 260 struct rte_flow_item_udp udp; 261 struct rte_flow_item_tcp tcp; 262 } l4_mask; 263 struct rte_flow_action actions[2]; 264 struct rte_flow_action_queue queue; 265 }; 266 267 /* Map of Verbs to Flow priority with 8 Verbs priorities. */ 268 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { 269 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, 270 }; 271 272 /* Map of Verbs to Flow priority with 16 Verbs priorities. */ 273 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { 274 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, 275 { 9, 10, 11 }, { 12, 13, 14 }, 276 }; 277 278 /* Tunnel information. */ 279 struct mlx5_flow_tunnel_info { 280 uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ 281 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ 282 }; 283 284 static struct mlx5_flow_tunnel_info tunnels_info[] = { 285 { 286 .tunnel = MLX5_FLOW_LAYER_VXLAN, 287 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, 288 }, 289 { 290 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, 291 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, 292 }, 293 { 294 .tunnel = MLX5_FLOW_LAYER_GRE, 295 .ptype = RTE_PTYPE_TUNNEL_GRE, 296 }, 297 { 298 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, 299 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, 300 }, 301 { 302 .tunnel = MLX5_FLOW_LAYER_MPLS, 303 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, 304 }, 305 { 306 .tunnel = MLX5_FLOW_LAYER_NVGRE, 307 .ptype = RTE_PTYPE_TUNNEL_NVGRE, 308 }, 309 { 310 .tunnel = MLX5_FLOW_LAYER_IPIP, 311 .ptype = RTE_PTYPE_TUNNEL_IP, 312 }, 313 { 314 .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP, 315 .ptype = RTE_PTYPE_TUNNEL_IP, 316 }, 317 }; 318 319 enum mlx5_feature_name { 320 MLX5_HAIRPIN_RX, 321 MLX5_HAIRPIN_TX, 322 MLX5_APPLICATION, 323 }; 324 325 /** 326 * Translate tag ID to register. 327 * 328 * @param[in] dev 329 * Pointer to the Ethernet device structure. 330 * @param[in] feature 331 * The feature that request the register. 332 * @param[in] id 333 * The request register ID. 334 * @param[out] error 335 * Error description in case of any. 336 * 337 * @return 338 * The request register on success, a negative errno 339 * value otherwise and rte_errno is set. 340 */ 341 __rte_unused 342 static enum modify_reg flow_get_reg_id(struct rte_eth_dev *dev, 343 enum mlx5_feature_name feature, 344 uint32_t id, 345 struct rte_flow_error *error) 346 { 347 static enum modify_reg id2reg[] = { 348 [0] = REG_A, 349 [1] = REG_C_2, 350 [2] = REG_C_3, 351 [3] = REG_C_4, 352 [4] = REG_B,}; 353 354 dev = (void *)dev; 355 switch (feature) { 356 case MLX5_HAIRPIN_RX: 357 return REG_B; 358 case MLX5_HAIRPIN_TX: 359 return REG_A; 360 case MLX5_APPLICATION: 361 if (id > 4) 362 return rte_flow_error_set(error, EINVAL, 363 RTE_FLOW_ERROR_TYPE_ITEM, 364 NULL, "invalid tag id"); 365 return id2reg[id]; 366 } 367 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, 368 NULL, "invalid feature name"); 369 } 370 371 /** 372 * Discover the maximum number of priority available. 373 * 374 * @param[in] dev 375 * Pointer to the Ethernet device structure. 376 * 377 * @return 378 * number of supported flow priority on success, a negative errno 379 * value otherwise and rte_errno is set. 380 */ 381 int 382 mlx5_flow_discover_priorities(struct rte_eth_dev *dev) 383 { 384 struct mlx5_priv *priv = dev->data->dev_private; 385 struct { 386 struct ibv_flow_attr attr; 387 struct ibv_flow_spec_eth eth; 388 struct ibv_flow_spec_action_drop drop; 389 } flow_attr = { 390 .attr = { 391 .num_of_specs = 2, 392 .port = (uint8_t)priv->ibv_port, 393 }, 394 .eth = { 395 .type = IBV_FLOW_SPEC_ETH, 396 .size = sizeof(struct ibv_flow_spec_eth), 397 }, 398 .drop = { 399 .size = sizeof(struct ibv_flow_spec_action_drop), 400 .type = IBV_FLOW_SPEC_ACTION_DROP, 401 }, 402 }; 403 struct ibv_flow *flow; 404 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev); 405 uint16_t vprio[] = { 8, 16 }; 406 int i; 407 int priority = 0; 408 409 if (!drop) { 410 rte_errno = ENOTSUP; 411 return -rte_errno; 412 } 413 for (i = 0; i != RTE_DIM(vprio); i++) { 414 flow_attr.attr.priority = vprio[i] - 1; 415 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); 416 if (!flow) 417 break; 418 claim_zero(mlx5_glue->destroy_flow(flow)); 419 priority = vprio[i]; 420 } 421 mlx5_hrxq_drop_release(dev); 422 switch (priority) { 423 case 8: 424 priority = RTE_DIM(priority_map_3); 425 break; 426 case 16: 427 priority = RTE_DIM(priority_map_5); 428 break; 429 default: 430 rte_errno = ENOTSUP; 431 DRV_LOG(ERR, 432 "port %u verbs maximum priority: %d expected 8/16", 433 dev->data->port_id, priority); 434 return -rte_errno; 435 } 436 DRV_LOG(INFO, "port %u flow maximum priority: %d", 437 dev->data->port_id, priority); 438 return priority; 439 } 440 441 /** 442 * Adjust flow priority based on the highest layer and the request priority. 443 * 444 * @param[in] dev 445 * Pointer to the Ethernet device structure. 446 * @param[in] priority 447 * The rule base priority. 448 * @param[in] subpriority 449 * The priority based on the items. 450 * 451 * @return 452 * The new priority. 453 */ 454 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 455 uint32_t subpriority) 456 { 457 uint32_t res = 0; 458 struct mlx5_priv *priv = dev->data->dev_private; 459 460 switch (priv->config.flow_prio) { 461 case RTE_DIM(priority_map_3): 462 res = priority_map_3[priority][subpriority]; 463 break; 464 case RTE_DIM(priority_map_5): 465 res = priority_map_5[priority][subpriority]; 466 break; 467 } 468 return res; 469 } 470 471 /** 472 * Verify the @p item specifications (spec, last, mask) are compatible with the 473 * NIC capabilities. 474 * 475 * @param[in] item 476 * Item specification. 477 * @param[in] mask 478 * @p item->mask or flow default bit-masks. 479 * @param[in] nic_mask 480 * Bit-masks covering supported fields by the NIC to compare with user mask. 481 * @param[in] size 482 * Bit-masks size in bytes. 483 * @param[out] error 484 * Pointer to error structure. 485 * 486 * @return 487 * 0 on success, a negative errno value otherwise and rte_errno is set. 488 */ 489 int 490 mlx5_flow_item_acceptable(const struct rte_flow_item *item, 491 const uint8_t *mask, 492 const uint8_t *nic_mask, 493 unsigned int size, 494 struct rte_flow_error *error) 495 { 496 unsigned int i; 497 498 assert(nic_mask); 499 for (i = 0; i < size; ++i) 500 if ((nic_mask[i] | mask[i]) != nic_mask[i]) 501 return rte_flow_error_set(error, ENOTSUP, 502 RTE_FLOW_ERROR_TYPE_ITEM, 503 item, 504 "mask enables non supported" 505 " bits"); 506 if (!item->spec && (item->mask || item->last)) 507 return rte_flow_error_set(error, EINVAL, 508 RTE_FLOW_ERROR_TYPE_ITEM, item, 509 "mask/last without a spec is not" 510 " supported"); 511 if (item->spec && item->last) { 512 uint8_t spec[size]; 513 uint8_t last[size]; 514 unsigned int i; 515 int ret; 516 517 for (i = 0; i < size; ++i) { 518 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; 519 last[i] = ((const uint8_t *)item->last)[i] & mask[i]; 520 } 521 ret = memcmp(spec, last, size); 522 if (ret != 0) 523 return rte_flow_error_set(error, EINVAL, 524 RTE_FLOW_ERROR_TYPE_ITEM, 525 item, 526 "range is not valid"); 527 } 528 return 0; 529 } 530 531 /** 532 * Adjust the hash fields according to the @p flow information. 533 * 534 * @param[in] dev_flow. 535 * Pointer to the mlx5_flow. 536 * @param[in] tunnel 537 * 1 when the hash field is for a tunnel item. 538 * @param[in] layer_types 539 * ETH_RSS_* types. 540 * @param[in] hash_fields 541 * Item hash fields. 542 * 543 * @return 544 * The hash fields that should be used. 545 */ 546 uint64_t 547 mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, 548 int tunnel __rte_unused, uint64_t layer_types, 549 uint64_t hash_fields) 550 { 551 struct rte_flow *flow = dev_flow->flow; 552 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 553 int rss_request_inner = flow->rss.level >= 2; 554 555 /* Check RSS hash level for tunnel. */ 556 if (tunnel && rss_request_inner) 557 hash_fields |= IBV_RX_HASH_INNER; 558 else if (tunnel || rss_request_inner) 559 return 0; 560 #endif 561 /* Check if requested layer matches RSS hash fields. */ 562 if (!(flow->rss.types & layer_types)) 563 return 0; 564 return hash_fields; 565 } 566 567 /** 568 * Lookup and set the ptype in the data Rx part. A single Ptype can be used, 569 * if several tunnel rules are used on this queue, the tunnel ptype will be 570 * cleared. 571 * 572 * @param rxq_ctrl 573 * Rx queue to update. 574 */ 575 static void 576 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) 577 { 578 unsigned int i; 579 uint32_t tunnel_ptype = 0; 580 581 /* Look up for the ptype to use. */ 582 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { 583 if (!rxq_ctrl->flow_tunnels_n[i]) 584 continue; 585 if (!tunnel_ptype) { 586 tunnel_ptype = tunnels_info[i].ptype; 587 } else { 588 tunnel_ptype = 0; 589 break; 590 } 591 } 592 rxq_ctrl->rxq.tunnel = tunnel_ptype; 593 } 594 595 /** 596 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive 597 * flow. 598 * 599 * @param[in] dev 600 * Pointer to the Ethernet device structure. 601 * @param[in] dev_flow 602 * Pointer to device flow structure. 603 */ 604 static void 605 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 606 { 607 struct mlx5_priv *priv = dev->data->dev_private; 608 struct rte_flow *flow = dev_flow->flow; 609 const int mark = !!(flow->actions & 610 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 611 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); 612 unsigned int i; 613 614 for (i = 0; i != flow->rss.queue_num; ++i) { 615 int idx = (*flow->queue)[i]; 616 struct mlx5_rxq_ctrl *rxq_ctrl = 617 container_of((*priv->rxqs)[idx], 618 struct mlx5_rxq_ctrl, rxq); 619 620 if (mark) { 621 rxq_ctrl->rxq.mark = 1; 622 rxq_ctrl->flow_mark_n++; 623 } 624 if (tunnel) { 625 unsigned int j; 626 627 /* Increase the counter matching the flow. */ 628 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 629 if ((tunnels_info[j].tunnel & 630 dev_flow->layers) == 631 tunnels_info[j].tunnel) { 632 rxq_ctrl->flow_tunnels_n[j]++; 633 break; 634 } 635 } 636 flow_rxq_tunnel_ptype_update(rxq_ctrl); 637 } 638 } 639 } 640 641 /** 642 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow 643 * 644 * @param[in] dev 645 * Pointer to the Ethernet device structure. 646 * @param[in] flow 647 * Pointer to flow structure. 648 */ 649 static void 650 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) 651 { 652 struct mlx5_flow *dev_flow; 653 654 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 655 flow_drv_rxq_flags_set(dev, dev_flow); 656 } 657 658 /** 659 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 660 * device flow if no other flow uses it with the same kind of request. 661 * 662 * @param dev 663 * Pointer to Ethernet device. 664 * @param[in] dev_flow 665 * Pointer to the device flow. 666 */ 667 static void 668 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 669 { 670 struct mlx5_priv *priv = dev->data->dev_private; 671 struct rte_flow *flow = dev_flow->flow; 672 const int mark = !!(flow->actions & 673 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 674 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); 675 unsigned int i; 676 677 assert(dev->data->dev_started); 678 for (i = 0; i != flow->rss.queue_num; ++i) { 679 int idx = (*flow->queue)[i]; 680 struct mlx5_rxq_ctrl *rxq_ctrl = 681 container_of((*priv->rxqs)[idx], 682 struct mlx5_rxq_ctrl, rxq); 683 684 if (mark) { 685 rxq_ctrl->flow_mark_n--; 686 rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; 687 } 688 if (tunnel) { 689 unsigned int j; 690 691 /* Decrease the counter matching the flow. */ 692 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 693 if ((tunnels_info[j].tunnel & 694 dev_flow->layers) == 695 tunnels_info[j].tunnel) { 696 rxq_ctrl->flow_tunnels_n[j]--; 697 break; 698 } 699 } 700 flow_rxq_tunnel_ptype_update(rxq_ctrl); 701 } 702 } 703 } 704 705 /** 706 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 707 * @p flow if no other flow uses it with the same kind of request. 708 * 709 * @param dev 710 * Pointer to Ethernet device. 711 * @param[in] flow 712 * Pointer to the flow. 713 */ 714 static void 715 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) 716 { 717 struct mlx5_flow *dev_flow; 718 719 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 720 flow_drv_rxq_flags_trim(dev, dev_flow); 721 } 722 723 /** 724 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. 725 * 726 * @param dev 727 * Pointer to Ethernet device. 728 */ 729 static void 730 flow_rxq_flags_clear(struct rte_eth_dev *dev) 731 { 732 struct mlx5_priv *priv = dev->data->dev_private; 733 unsigned int i; 734 735 for (i = 0; i != priv->rxqs_n; ++i) { 736 struct mlx5_rxq_ctrl *rxq_ctrl; 737 unsigned int j; 738 739 if (!(*priv->rxqs)[i]) 740 continue; 741 rxq_ctrl = container_of((*priv->rxqs)[i], 742 struct mlx5_rxq_ctrl, rxq); 743 rxq_ctrl->flow_mark_n = 0; 744 rxq_ctrl->rxq.mark = 0; 745 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) 746 rxq_ctrl->flow_tunnels_n[j] = 0; 747 rxq_ctrl->rxq.tunnel = 0; 748 } 749 } 750 751 /* 752 * return a pointer to the desired action in the list of actions. 753 * 754 * @param[in] actions 755 * The list of actions to search the action in. 756 * @param[in] action 757 * The action to find. 758 * 759 * @return 760 * Pointer to the action in the list, if found. NULL otherwise. 761 */ 762 const struct rte_flow_action * 763 mlx5_flow_find_action(const struct rte_flow_action *actions, 764 enum rte_flow_action_type action) 765 { 766 if (actions == NULL) 767 return NULL; 768 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) 769 if (actions->type == action) 770 return actions; 771 return NULL; 772 } 773 774 /* 775 * Validate the flag action. 776 * 777 * @param[in] action_flags 778 * Bit-fields that holds the actions detected until now. 779 * @param[in] attr 780 * Attributes of flow that includes this action. 781 * @param[out] error 782 * Pointer to error structure. 783 * 784 * @return 785 * 0 on success, a negative errno value otherwise and rte_errno is set. 786 */ 787 int 788 mlx5_flow_validate_action_flag(uint64_t action_flags, 789 const struct rte_flow_attr *attr, 790 struct rte_flow_error *error) 791 { 792 793 if (action_flags & MLX5_FLOW_ACTION_DROP) 794 return rte_flow_error_set(error, EINVAL, 795 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 796 "can't drop and flag in same flow"); 797 if (action_flags & MLX5_FLOW_ACTION_MARK) 798 return rte_flow_error_set(error, EINVAL, 799 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 800 "can't mark and flag in same flow"); 801 if (action_flags & MLX5_FLOW_ACTION_FLAG) 802 return rte_flow_error_set(error, EINVAL, 803 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 804 "can't have 2 flag" 805 " actions in same flow"); 806 if (attr->egress) 807 return rte_flow_error_set(error, ENOTSUP, 808 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 809 "flag action not supported for " 810 "egress"); 811 return 0; 812 } 813 814 /* 815 * Validate the mark action. 816 * 817 * @param[in] action 818 * Pointer to the queue action. 819 * @param[in] action_flags 820 * Bit-fields that holds the actions detected until now. 821 * @param[in] attr 822 * Attributes of flow that includes this action. 823 * @param[out] error 824 * Pointer to error structure. 825 * 826 * @return 827 * 0 on success, a negative errno value otherwise and rte_errno is set. 828 */ 829 int 830 mlx5_flow_validate_action_mark(const struct rte_flow_action *action, 831 uint64_t action_flags, 832 const struct rte_flow_attr *attr, 833 struct rte_flow_error *error) 834 { 835 const struct rte_flow_action_mark *mark = action->conf; 836 837 if (!mark) 838 return rte_flow_error_set(error, EINVAL, 839 RTE_FLOW_ERROR_TYPE_ACTION, 840 action, 841 "configuration cannot be null"); 842 if (mark->id >= MLX5_FLOW_MARK_MAX) 843 return rte_flow_error_set(error, EINVAL, 844 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 845 &mark->id, 846 "mark id must in 0 <= id < " 847 RTE_STR(MLX5_FLOW_MARK_MAX)); 848 if (action_flags & MLX5_FLOW_ACTION_DROP) 849 return rte_flow_error_set(error, EINVAL, 850 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 851 "can't drop and mark in same flow"); 852 if (action_flags & MLX5_FLOW_ACTION_FLAG) 853 return rte_flow_error_set(error, EINVAL, 854 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 855 "can't flag and mark in same flow"); 856 if (action_flags & MLX5_FLOW_ACTION_MARK) 857 return rte_flow_error_set(error, EINVAL, 858 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 859 "can't have 2 mark actions in same" 860 " flow"); 861 if (attr->egress) 862 return rte_flow_error_set(error, ENOTSUP, 863 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 864 "mark action not supported for " 865 "egress"); 866 return 0; 867 } 868 869 /* 870 * Validate the drop action. 871 * 872 * @param[in] action_flags 873 * Bit-fields that holds the actions detected until now. 874 * @param[in] attr 875 * Attributes of flow that includes this action. 876 * @param[out] error 877 * Pointer to error structure. 878 * 879 * @return 880 * 0 on success, a negative errno value otherwise and rte_errno is set. 881 */ 882 int 883 mlx5_flow_validate_action_drop(uint64_t action_flags, 884 const struct rte_flow_attr *attr, 885 struct rte_flow_error *error) 886 { 887 if (action_flags & MLX5_FLOW_ACTION_FLAG) 888 return rte_flow_error_set(error, EINVAL, 889 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 890 "can't drop and flag in same flow"); 891 if (action_flags & MLX5_FLOW_ACTION_MARK) 892 return rte_flow_error_set(error, EINVAL, 893 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 894 "can't drop and mark in same flow"); 895 if (action_flags & (MLX5_FLOW_FATE_ACTIONS | 896 MLX5_FLOW_FATE_ESWITCH_ACTIONS)) 897 return rte_flow_error_set(error, EINVAL, 898 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 899 "can't have 2 fate actions in" 900 " same flow"); 901 if (attr->egress) 902 return rte_flow_error_set(error, ENOTSUP, 903 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 904 "drop action not supported for " 905 "egress"); 906 return 0; 907 } 908 909 /* 910 * Validate the queue action. 911 * 912 * @param[in] action 913 * Pointer to the queue action. 914 * @param[in] action_flags 915 * Bit-fields that holds the actions detected until now. 916 * @param[in] dev 917 * Pointer to the Ethernet device structure. 918 * @param[in] attr 919 * Attributes of flow that includes this action. 920 * @param[out] error 921 * Pointer to error structure. 922 * 923 * @return 924 * 0 on success, a negative errno value otherwise and rte_errno is set. 925 */ 926 int 927 mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 928 uint64_t action_flags, 929 struct rte_eth_dev *dev, 930 const struct rte_flow_attr *attr, 931 struct rte_flow_error *error) 932 { 933 struct mlx5_priv *priv = dev->data->dev_private; 934 const struct rte_flow_action_queue *queue = action->conf; 935 936 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 937 return rte_flow_error_set(error, EINVAL, 938 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 939 "can't have 2 fate actions in" 940 " same flow"); 941 if (!priv->rxqs_n) 942 return rte_flow_error_set(error, EINVAL, 943 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 944 NULL, "No Rx queues configured"); 945 if (queue->index >= priv->rxqs_n) 946 return rte_flow_error_set(error, EINVAL, 947 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 948 &queue->index, 949 "queue index out of range"); 950 if (!(*priv->rxqs)[queue->index]) 951 return rte_flow_error_set(error, EINVAL, 952 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 953 &queue->index, 954 "queue is not configured"); 955 if (attr->egress) 956 return rte_flow_error_set(error, ENOTSUP, 957 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 958 "queue action not supported for " 959 "egress"); 960 return 0; 961 } 962 963 /* 964 * Validate the rss action. 965 * 966 * @param[in] action 967 * Pointer to the queue action. 968 * @param[in] action_flags 969 * Bit-fields that holds the actions detected until now. 970 * @param[in] dev 971 * Pointer to the Ethernet device structure. 972 * @param[in] attr 973 * Attributes of flow that includes this action. 974 * @param[in] item_flags 975 * Items that were detected. 976 * @param[out] error 977 * Pointer to error structure. 978 * 979 * @return 980 * 0 on success, a negative errno value otherwise and rte_errno is set. 981 */ 982 int 983 mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 984 uint64_t action_flags, 985 struct rte_eth_dev *dev, 986 const struct rte_flow_attr *attr, 987 uint64_t item_flags, 988 struct rte_flow_error *error) 989 { 990 struct mlx5_priv *priv = dev->data->dev_private; 991 const struct rte_flow_action_rss *rss = action->conf; 992 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 993 unsigned int i; 994 995 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 996 return rte_flow_error_set(error, EINVAL, 997 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 998 "can't have 2 fate actions" 999 " in same flow"); 1000 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && 1001 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) 1002 return rte_flow_error_set(error, ENOTSUP, 1003 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1004 &rss->func, 1005 "RSS hash function not supported"); 1006 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 1007 if (rss->level > 2) 1008 #else 1009 if (rss->level > 1) 1010 #endif 1011 return rte_flow_error_set(error, ENOTSUP, 1012 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1013 &rss->level, 1014 "tunnel RSS is not supported"); 1015 /* allow RSS key_len 0 in case of NULL (default) RSS key. */ 1016 if (rss->key_len == 0 && rss->key != NULL) 1017 return rte_flow_error_set(error, ENOTSUP, 1018 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1019 &rss->key_len, 1020 "RSS hash key length 0"); 1021 if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) 1022 return rte_flow_error_set(error, ENOTSUP, 1023 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1024 &rss->key_len, 1025 "RSS hash key too small"); 1026 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) 1027 return rte_flow_error_set(error, ENOTSUP, 1028 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1029 &rss->key_len, 1030 "RSS hash key too large"); 1031 if (rss->queue_num > priv->config.ind_table_max_size) 1032 return rte_flow_error_set(error, ENOTSUP, 1033 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1034 &rss->queue_num, 1035 "number of queues too large"); 1036 if (rss->types & MLX5_RSS_HF_MASK) 1037 return rte_flow_error_set(error, ENOTSUP, 1038 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1039 &rss->types, 1040 "some RSS protocols are not" 1041 " supported"); 1042 if (!priv->rxqs_n) 1043 return rte_flow_error_set(error, EINVAL, 1044 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1045 NULL, "No Rx queues configured"); 1046 if (!rss->queue_num) 1047 return rte_flow_error_set(error, EINVAL, 1048 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1049 NULL, "No queues configured"); 1050 for (i = 0; i != rss->queue_num; ++i) { 1051 if (!(*priv->rxqs)[rss->queue[i]]) 1052 return rte_flow_error_set 1053 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1054 &rss->queue[i], "queue is not configured"); 1055 } 1056 if (attr->egress) 1057 return rte_flow_error_set(error, ENOTSUP, 1058 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1059 "rss action not supported for " 1060 "egress"); 1061 if (rss->level > 1 && !tunnel) 1062 return rte_flow_error_set(error, EINVAL, 1063 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 1064 "inner RSS is not supported for " 1065 "non-tunnel flows"); 1066 return 0; 1067 } 1068 1069 /* 1070 * Validate the count action. 1071 * 1072 * @param[in] dev 1073 * Pointer to the Ethernet device structure. 1074 * @param[in] attr 1075 * Attributes of flow that includes this action. 1076 * @param[out] error 1077 * Pointer to error structure. 1078 * 1079 * @return 1080 * 0 on success, a negative errno value otherwise and rte_errno is set. 1081 */ 1082 int 1083 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, 1084 const struct rte_flow_attr *attr, 1085 struct rte_flow_error *error) 1086 { 1087 if (attr->egress) 1088 return rte_flow_error_set(error, ENOTSUP, 1089 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1090 "count action not supported for " 1091 "egress"); 1092 return 0; 1093 } 1094 1095 /** 1096 * Verify the @p attributes will be correctly understood by the NIC and store 1097 * them in the @p flow if everything is correct. 1098 * 1099 * @param[in] dev 1100 * Pointer to the Ethernet device structure. 1101 * @param[in] attributes 1102 * Pointer to flow attributes 1103 * @param[out] error 1104 * Pointer to error structure. 1105 * 1106 * @return 1107 * 0 on success, a negative errno value otherwise and rte_errno is set. 1108 */ 1109 int 1110 mlx5_flow_validate_attributes(struct rte_eth_dev *dev, 1111 const struct rte_flow_attr *attributes, 1112 struct rte_flow_error *error) 1113 { 1114 struct mlx5_priv *priv = dev->data->dev_private; 1115 uint32_t priority_max = priv->config.flow_prio - 1; 1116 1117 if (attributes->group) 1118 return rte_flow_error_set(error, ENOTSUP, 1119 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 1120 NULL, "groups is not supported"); 1121 if (attributes->priority != MLX5_FLOW_PRIO_RSVD && 1122 attributes->priority >= priority_max) 1123 return rte_flow_error_set(error, ENOTSUP, 1124 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1125 NULL, "priority out of range"); 1126 if (attributes->egress) 1127 return rte_flow_error_set(error, ENOTSUP, 1128 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1129 "egress is not supported"); 1130 if (attributes->transfer && !priv->config.dv_esw_en) 1131 return rte_flow_error_set(error, ENOTSUP, 1132 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 1133 NULL, "transfer is not supported"); 1134 if (!attributes->ingress) 1135 return rte_flow_error_set(error, EINVAL, 1136 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 1137 NULL, 1138 "ingress attribute is mandatory"); 1139 return 0; 1140 } 1141 1142 /** 1143 * Validate ICMP6 item. 1144 * 1145 * @param[in] item 1146 * Item specification. 1147 * @param[in] item_flags 1148 * Bit-fields that holds the items detected until now. 1149 * @param[out] error 1150 * Pointer to error structure. 1151 * 1152 * @return 1153 * 0 on success, a negative errno value otherwise and rte_errno is set. 1154 */ 1155 int 1156 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, 1157 uint64_t item_flags, 1158 uint8_t target_protocol, 1159 struct rte_flow_error *error) 1160 { 1161 const struct rte_flow_item_icmp6 *mask = item->mask; 1162 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1163 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 1164 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 1165 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1166 MLX5_FLOW_LAYER_OUTER_L4; 1167 int ret; 1168 1169 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) 1170 return rte_flow_error_set(error, EINVAL, 1171 RTE_FLOW_ERROR_TYPE_ITEM, item, 1172 "protocol filtering not compatible" 1173 " with ICMP6 layer"); 1174 if (!(item_flags & l3m)) 1175 return rte_flow_error_set(error, EINVAL, 1176 RTE_FLOW_ERROR_TYPE_ITEM, item, 1177 "IPv6 is mandatory to filter on" 1178 " ICMP6"); 1179 if (item_flags & l4m) 1180 return rte_flow_error_set(error, EINVAL, 1181 RTE_FLOW_ERROR_TYPE_ITEM, item, 1182 "multiple L4 layers not supported"); 1183 if (!mask) 1184 mask = &rte_flow_item_icmp6_mask; 1185 ret = mlx5_flow_item_acceptable 1186 (item, (const uint8_t *)mask, 1187 (const uint8_t *)&rte_flow_item_icmp6_mask, 1188 sizeof(struct rte_flow_item_icmp6), error); 1189 if (ret < 0) 1190 return ret; 1191 return 0; 1192 } 1193 1194 /** 1195 * Validate ICMP item. 1196 * 1197 * @param[in] item 1198 * Item specification. 1199 * @param[in] item_flags 1200 * Bit-fields that holds the items detected until now. 1201 * @param[out] error 1202 * Pointer to error structure. 1203 * 1204 * @return 1205 * 0 on success, a negative errno value otherwise and rte_errno is set. 1206 */ 1207 int 1208 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, 1209 uint64_t item_flags, 1210 uint8_t target_protocol, 1211 struct rte_flow_error *error) 1212 { 1213 const struct rte_flow_item_icmp *mask = item->mask; 1214 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1215 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 1216 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 1217 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1218 MLX5_FLOW_LAYER_OUTER_L4; 1219 int ret; 1220 1221 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) 1222 return rte_flow_error_set(error, EINVAL, 1223 RTE_FLOW_ERROR_TYPE_ITEM, item, 1224 "protocol filtering not compatible" 1225 " with ICMP layer"); 1226 if (!(item_flags & l3m)) 1227 return rte_flow_error_set(error, EINVAL, 1228 RTE_FLOW_ERROR_TYPE_ITEM, item, 1229 "IPv4 is mandatory to filter" 1230 " on ICMP"); 1231 if (item_flags & l4m) 1232 return rte_flow_error_set(error, EINVAL, 1233 RTE_FLOW_ERROR_TYPE_ITEM, item, 1234 "multiple L4 layers not supported"); 1235 if (!mask) 1236 mask = &rte_flow_item_icmp_mask; 1237 ret = mlx5_flow_item_acceptable 1238 (item, (const uint8_t *)mask, 1239 (const uint8_t *)&rte_flow_item_icmp_mask, 1240 sizeof(struct rte_flow_item_icmp), error); 1241 if (ret < 0) 1242 return ret; 1243 return 0; 1244 } 1245 1246 /** 1247 * Validate Ethernet item. 1248 * 1249 * @param[in] item 1250 * Item specification. 1251 * @param[in] item_flags 1252 * Bit-fields that holds the items detected until now. 1253 * @param[out] error 1254 * Pointer to error structure. 1255 * 1256 * @return 1257 * 0 on success, a negative errno value otherwise and rte_errno is set. 1258 */ 1259 int 1260 mlx5_flow_validate_item_eth(const struct rte_flow_item *item, 1261 uint64_t item_flags, 1262 struct rte_flow_error *error) 1263 { 1264 const struct rte_flow_item_eth *mask = item->mask; 1265 const struct rte_flow_item_eth nic_mask = { 1266 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1267 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1268 .type = RTE_BE16(0xffff), 1269 }; 1270 int ret; 1271 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1272 const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 1273 MLX5_FLOW_LAYER_OUTER_L2; 1274 1275 if (item_flags & ethm) 1276 return rte_flow_error_set(error, ENOTSUP, 1277 RTE_FLOW_ERROR_TYPE_ITEM, item, 1278 "multiple L2 layers not supported"); 1279 if (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3)) 1280 return rte_flow_error_set(error, EINVAL, 1281 RTE_FLOW_ERROR_TYPE_ITEM, item, 1282 "inner L2 layer should not " 1283 "follow inner L3 layers"); 1284 if (!mask) 1285 mask = &rte_flow_item_eth_mask; 1286 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1287 (const uint8_t *)&nic_mask, 1288 sizeof(struct rte_flow_item_eth), 1289 error); 1290 return ret; 1291 } 1292 1293 /** 1294 * Validate VLAN item. 1295 * 1296 * @param[in] item 1297 * Item specification. 1298 * @param[in] item_flags 1299 * Bit-fields that holds the items detected until now. 1300 * @param[in] dev 1301 * Ethernet device flow is being created on. 1302 * @param[out] error 1303 * Pointer to error structure. 1304 * 1305 * @return 1306 * 0 on success, a negative errno value otherwise and rte_errno is set. 1307 */ 1308 int 1309 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 1310 uint64_t item_flags, 1311 struct rte_eth_dev *dev, 1312 struct rte_flow_error *error) 1313 { 1314 const struct rte_flow_item_vlan *spec = item->spec; 1315 const struct rte_flow_item_vlan *mask = item->mask; 1316 const struct rte_flow_item_vlan nic_mask = { 1317 .tci = RTE_BE16(UINT16_MAX), 1318 .inner_type = RTE_BE16(UINT16_MAX), 1319 }; 1320 uint16_t vlan_tag = 0; 1321 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1322 int ret; 1323 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 1324 MLX5_FLOW_LAYER_INNER_L4) : 1325 (MLX5_FLOW_LAYER_OUTER_L3 | 1326 MLX5_FLOW_LAYER_OUTER_L4); 1327 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 1328 MLX5_FLOW_LAYER_OUTER_VLAN; 1329 1330 const uint64_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 1331 MLX5_FLOW_LAYER_OUTER_L2; 1332 if (item_flags & vlanm) 1333 return rte_flow_error_set(error, EINVAL, 1334 RTE_FLOW_ERROR_TYPE_ITEM, item, 1335 "multiple VLAN layers not supported"); 1336 else if ((item_flags & l34m) != 0) 1337 return rte_flow_error_set(error, EINVAL, 1338 RTE_FLOW_ERROR_TYPE_ITEM, item, 1339 "L2 layer cannot follow L3/L4 layer"); 1340 else if ((item_flags & l2m) == 0) 1341 return rte_flow_error_set(error, EINVAL, 1342 RTE_FLOW_ERROR_TYPE_ITEM, item, 1343 "no L2 layer before VLAN"); 1344 if (!mask) 1345 mask = &rte_flow_item_vlan_mask; 1346 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1347 (const uint8_t *)&nic_mask, 1348 sizeof(struct rte_flow_item_vlan), 1349 error); 1350 if (ret) 1351 return ret; 1352 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { 1353 struct mlx5_priv *priv = dev->data->dev_private; 1354 1355 if (priv->vmwa_context) { 1356 /* 1357 * Non-NULL context means we have a virtual machine 1358 * and SR-IOV enabled, we have to create VLAN interface 1359 * to make hypervisor to setup E-Switch vport 1360 * context correctly. We avoid creating the multiple 1361 * VLAN interfaces, so we cannot support VLAN tag mask. 1362 */ 1363 return rte_flow_error_set(error, EINVAL, 1364 RTE_FLOW_ERROR_TYPE_ITEM, 1365 item, 1366 "VLAN tag mask is not" 1367 " supported in virtual" 1368 " environment"); 1369 } 1370 } 1371 if (spec) { 1372 vlan_tag = spec->tci; 1373 vlan_tag &= mask->tci; 1374 } 1375 /* 1376 * From verbs perspective an empty VLAN is equivalent 1377 * to a packet without VLAN layer. 1378 */ 1379 if (!vlan_tag) 1380 return rte_flow_error_set(error, EINVAL, 1381 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1382 item->spec, 1383 "VLAN cannot be empty"); 1384 return 0; 1385 } 1386 1387 /** 1388 * Validate IPV4 item. 1389 * 1390 * @param[in] item 1391 * Item specification. 1392 * @param[in] item_flags 1393 * Bit-fields that holds the items detected until now. 1394 * @param[in] acc_mask 1395 * Acceptable mask, if NULL default internal default mask 1396 * will be used to check whether item fields are supported. 1397 * @param[out] error 1398 * Pointer to error structure. 1399 * 1400 * @return 1401 * 0 on success, a negative errno value otherwise and rte_errno is set. 1402 */ 1403 int 1404 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, 1405 uint64_t item_flags, 1406 const struct rte_flow_item_ipv4 *acc_mask, 1407 struct rte_flow_error *error) 1408 { 1409 const struct rte_flow_item_ipv4 *mask = item->mask; 1410 const struct rte_flow_item_ipv4 *spec = item->spec; 1411 const struct rte_flow_item_ipv4 nic_mask = { 1412 .hdr = { 1413 .src_addr = RTE_BE32(0xffffffff), 1414 .dst_addr = RTE_BE32(0xffffffff), 1415 .type_of_service = 0xff, 1416 .next_proto_id = 0xff, 1417 }, 1418 }; 1419 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1420 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1421 MLX5_FLOW_LAYER_OUTER_L3; 1422 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1423 MLX5_FLOW_LAYER_OUTER_L4; 1424 int ret; 1425 uint8_t next_proto = 0xFF; 1426 1427 if (item_flags & MLX5_FLOW_LAYER_IPIP) { 1428 if (mask && spec) 1429 next_proto = mask->hdr.next_proto_id & 1430 spec->hdr.next_proto_id; 1431 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1432 return rte_flow_error_set(error, EINVAL, 1433 RTE_FLOW_ERROR_TYPE_ITEM, 1434 item, 1435 "multiple tunnel " 1436 "not supported"); 1437 } 1438 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) 1439 return rte_flow_error_set(error, EINVAL, 1440 RTE_FLOW_ERROR_TYPE_ITEM, item, 1441 "wrong tunnel type - IPv6 specified " 1442 "but IPv4 item provided"); 1443 if (item_flags & l3m) 1444 return rte_flow_error_set(error, ENOTSUP, 1445 RTE_FLOW_ERROR_TYPE_ITEM, item, 1446 "multiple L3 layers not supported"); 1447 else if (item_flags & l4m) 1448 return rte_flow_error_set(error, EINVAL, 1449 RTE_FLOW_ERROR_TYPE_ITEM, item, 1450 "L3 cannot follow an L4 layer."); 1451 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1452 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1453 return rte_flow_error_set(error, EINVAL, 1454 RTE_FLOW_ERROR_TYPE_ITEM, item, 1455 "L3 cannot follow an NVGRE layer."); 1456 else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2)) 1457 return rte_flow_error_set(error, EINVAL, 1458 RTE_FLOW_ERROR_TYPE_ITEM, item, 1459 "no L2 layer before IPV4"); 1460 if (!mask) 1461 mask = &rte_flow_item_ipv4_mask; 1462 else if (mask->hdr.next_proto_id != 0 && 1463 mask->hdr.next_proto_id != 0xff) 1464 return rte_flow_error_set(error, EINVAL, 1465 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 1466 "partial mask is not supported" 1467 " for protocol"); 1468 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1469 acc_mask ? (const uint8_t *)acc_mask 1470 : (const uint8_t *)&nic_mask, 1471 sizeof(struct rte_flow_item_ipv4), 1472 error); 1473 if (ret < 0) 1474 return ret; 1475 return 0; 1476 } 1477 1478 /** 1479 * Validate IPV6 item. 1480 * 1481 * @param[in] item 1482 * Item specification. 1483 * @param[in] item_flags 1484 * Bit-fields that holds the items detected until now. 1485 * @param[in] acc_mask 1486 * Acceptable mask, if NULL default internal default mask 1487 * will be used to check whether item fields are supported. 1488 * @param[out] error 1489 * Pointer to error structure. 1490 * 1491 * @return 1492 * 0 on success, a negative errno value otherwise and rte_errno is set. 1493 */ 1494 int 1495 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, 1496 uint64_t item_flags, 1497 const struct rte_flow_item_ipv6 *acc_mask, 1498 struct rte_flow_error *error) 1499 { 1500 const struct rte_flow_item_ipv6 *mask = item->mask; 1501 const struct rte_flow_item_ipv6 *spec = item->spec; 1502 const struct rte_flow_item_ipv6 nic_mask = { 1503 .hdr = { 1504 .src_addr = 1505 "\xff\xff\xff\xff\xff\xff\xff\xff" 1506 "\xff\xff\xff\xff\xff\xff\xff\xff", 1507 .dst_addr = 1508 "\xff\xff\xff\xff\xff\xff\xff\xff" 1509 "\xff\xff\xff\xff\xff\xff\xff\xff", 1510 .vtc_flow = RTE_BE32(0xffffffff), 1511 .proto = 0xff, 1512 .hop_limits = 0xff, 1513 }, 1514 }; 1515 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1516 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1517 MLX5_FLOW_LAYER_OUTER_L3; 1518 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1519 MLX5_FLOW_LAYER_OUTER_L4; 1520 int ret; 1521 uint8_t next_proto = 0xFF; 1522 1523 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { 1524 if (mask && spec) 1525 next_proto = mask->hdr.proto & spec->hdr.proto; 1526 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1527 return rte_flow_error_set(error, EINVAL, 1528 RTE_FLOW_ERROR_TYPE_ITEM, 1529 item, 1530 "multiple tunnel " 1531 "not supported"); 1532 } 1533 if (item_flags & MLX5_FLOW_LAYER_IPIP) 1534 return rte_flow_error_set(error, EINVAL, 1535 RTE_FLOW_ERROR_TYPE_ITEM, item, 1536 "wrong tunnel type - IPv4 specified " 1537 "but IPv6 item provided"); 1538 if (item_flags & l3m) 1539 return rte_flow_error_set(error, ENOTSUP, 1540 RTE_FLOW_ERROR_TYPE_ITEM, item, 1541 "multiple L3 layers not supported"); 1542 else if (item_flags & l4m) 1543 return rte_flow_error_set(error, EINVAL, 1544 RTE_FLOW_ERROR_TYPE_ITEM, item, 1545 "L3 cannot follow an L4 layer."); 1546 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1547 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1548 return rte_flow_error_set(error, EINVAL, 1549 RTE_FLOW_ERROR_TYPE_ITEM, item, 1550 "L3 cannot follow an NVGRE layer."); 1551 else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2)) 1552 return rte_flow_error_set(error, EINVAL, 1553 RTE_FLOW_ERROR_TYPE_ITEM, item, 1554 "no L2 layer before IPV6"); 1555 if (!mask) 1556 mask = &rte_flow_item_ipv6_mask; 1557 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1558 acc_mask ? (const uint8_t *)acc_mask 1559 : (const uint8_t *)&nic_mask, 1560 sizeof(struct rte_flow_item_ipv6), 1561 error); 1562 if (ret < 0) 1563 return ret; 1564 return 0; 1565 } 1566 1567 /** 1568 * Validate UDP item. 1569 * 1570 * @param[in] item 1571 * Item specification. 1572 * @param[in] item_flags 1573 * Bit-fields that holds the items detected until now. 1574 * @param[in] target_protocol 1575 * The next protocol in the previous item. 1576 * @param[in] flow_mask 1577 * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask. 1578 * @param[out] error 1579 * Pointer to error structure. 1580 * 1581 * @return 1582 * 0 on success, a negative errno value otherwise and rte_errno is set. 1583 */ 1584 int 1585 mlx5_flow_validate_item_udp(const struct rte_flow_item *item, 1586 uint64_t item_flags, 1587 uint8_t target_protocol, 1588 struct rte_flow_error *error) 1589 { 1590 const struct rte_flow_item_udp *mask = item->mask; 1591 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1592 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1593 MLX5_FLOW_LAYER_OUTER_L3; 1594 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1595 MLX5_FLOW_LAYER_OUTER_L4; 1596 int ret; 1597 1598 if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) 1599 return rte_flow_error_set(error, EINVAL, 1600 RTE_FLOW_ERROR_TYPE_ITEM, item, 1601 "protocol filtering not compatible" 1602 " with UDP layer"); 1603 if (!(item_flags & l3m)) 1604 return rte_flow_error_set(error, EINVAL, 1605 RTE_FLOW_ERROR_TYPE_ITEM, item, 1606 "L3 is mandatory to filter on L4"); 1607 if (item_flags & l4m) 1608 return rte_flow_error_set(error, EINVAL, 1609 RTE_FLOW_ERROR_TYPE_ITEM, item, 1610 "multiple L4 layers not supported"); 1611 if (!mask) 1612 mask = &rte_flow_item_udp_mask; 1613 ret = mlx5_flow_item_acceptable 1614 (item, (const uint8_t *)mask, 1615 (const uint8_t *)&rte_flow_item_udp_mask, 1616 sizeof(struct rte_flow_item_udp), error); 1617 if (ret < 0) 1618 return ret; 1619 return 0; 1620 } 1621 1622 /** 1623 * Validate TCP item. 1624 * 1625 * @param[in] item 1626 * Item specification. 1627 * @param[in] item_flags 1628 * Bit-fields that holds the items detected until now. 1629 * @param[in] target_protocol 1630 * The next protocol in the previous item. 1631 * @param[out] error 1632 * Pointer to error structure. 1633 * 1634 * @return 1635 * 0 on success, a negative errno value otherwise and rte_errno is set. 1636 */ 1637 int 1638 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, 1639 uint64_t item_flags, 1640 uint8_t target_protocol, 1641 const struct rte_flow_item_tcp *flow_mask, 1642 struct rte_flow_error *error) 1643 { 1644 const struct rte_flow_item_tcp *mask = item->mask; 1645 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1646 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1647 MLX5_FLOW_LAYER_OUTER_L3; 1648 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1649 MLX5_FLOW_LAYER_OUTER_L4; 1650 int ret; 1651 1652 assert(flow_mask); 1653 if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) 1654 return rte_flow_error_set(error, EINVAL, 1655 RTE_FLOW_ERROR_TYPE_ITEM, item, 1656 "protocol filtering not compatible" 1657 " with TCP layer"); 1658 if (!(item_flags & l3m)) 1659 return rte_flow_error_set(error, EINVAL, 1660 RTE_FLOW_ERROR_TYPE_ITEM, item, 1661 "L3 is mandatory to filter on L4"); 1662 if (item_flags & l4m) 1663 return rte_flow_error_set(error, EINVAL, 1664 RTE_FLOW_ERROR_TYPE_ITEM, item, 1665 "multiple L4 layers not supported"); 1666 if (!mask) 1667 mask = &rte_flow_item_tcp_mask; 1668 ret = mlx5_flow_item_acceptable 1669 (item, (const uint8_t *)mask, 1670 (const uint8_t *)flow_mask, 1671 sizeof(struct rte_flow_item_tcp), error); 1672 if (ret < 0) 1673 return ret; 1674 return 0; 1675 } 1676 1677 /** 1678 * Validate VXLAN item. 1679 * 1680 * @param[in] item 1681 * Item specification. 1682 * @param[in] item_flags 1683 * Bit-fields that holds the items detected until now. 1684 * @param[in] target_protocol 1685 * The next protocol in the previous item. 1686 * @param[out] error 1687 * Pointer to error structure. 1688 * 1689 * @return 1690 * 0 on success, a negative errno value otherwise and rte_errno is set. 1691 */ 1692 int 1693 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, 1694 uint64_t item_flags, 1695 struct rte_flow_error *error) 1696 { 1697 const struct rte_flow_item_vxlan *spec = item->spec; 1698 const struct rte_flow_item_vxlan *mask = item->mask; 1699 int ret; 1700 union vni { 1701 uint32_t vlan_id; 1702 uint8_t vni[4]; 1703 } id = { .vlan_id = 0, }; 1704 uint32_t vlan_id = 0; 1705 1706 1707 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1708 return rte_flow_error_set(error, ENOTSUP, 1709 RTE_FLOW_ERROR_TYPE_ITEM, item, 1710 "multiple tunnel layers not" 1711 " supported"); 1712 /* 1713 * Verify only UDPv4 is present as defined in 1714 * https://tools.ietf.org/html/rfc7348 1715 */ 1716 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1717 return rte_flow_error_set(error, EINVAL, 1718 RTE_FLOW_ERROR_TYPE_ITEM, item, 1719 "no outer UDP layer found"); 1720 if (!mask) 1721 mask = &rte_flow_item_vxlan_mask; 1722 ret = mlx5_flow_item_acceptable 1723 (item, (const uint8_t *)mask, 1724 (const uint8_t *)&rte_flow_item_vxlan_mask, 1725 sizeof(struct rte_flow_item_vxlan), 1726 error); 1727 if (ret < 0) 1728 return ret; 1729 if (spec) { 1730 memcpy(&id.vni[1], spec->vni, 3); 1731 vlan_id = id.vlan_id; 1732 memcpy(&id.vni[1], mask->vni, 3); 1733 vlan_id &= id.vlan_id; 1734 } 1735 /* 1736 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if 1737 * only this layer is defined in the Verbs specification it is 1738 * interpreted as wildcard and all packets will match this 1739 * rule, if it follows a full stack layer (ex: eth / ipv4 / 1740 * udp), all packets matching the layers before will also 1741 * match this rule. To avoid such situation, VNI 0 is 1742 * currently refused. 1743 */ 1744 if (!vlan_id) 1745 return rte_flow_error_set(error, ENOTSUP, 1746 RTE_FLOW_ERROR_TYPE_ITEM, item, 1747 "VXLAN vni cannot be 0"); 1748 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1749 return rte_flow_error_set(error, ENOTSUP, 1750 RTE_FLOW_ERROR_TYPE_ITEM, item, 1751 "VXLAN tunnel must be fully defined"); 1752 return 0; 1753 } 1754 1755 /** 1756 * Validate VXLAN_GPE item. 1757 * 1758 * @param[in] item 1759 * Item specification. 1760 * @param[in] item_flags 1761 * Bit-fields that holds the items detected until now. 1762 * @param[in] priv 1763 * Pointer to the private data structure. 1764 * @param[in] target_protocol 1765 * The next protocol in the previous item. 1766 * @param[out] error 1767 * Pointer to error structure. 1768 * 1769 * @return 1770 * 0 on success, a negative errno value otherwise and rte_errno is set. 1771 */ 1772 int 1773 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 1774 uint64_t item_flags, 1775 struct rte_eth_dev *dev, 1776 struct rte_flow_error *error) 1777 { 1778 struct mlx5_priv *priv = dev->data->dev_private; 1779 const struct rte_flow_item_vxlan_gpe *spec = item->spec; 1780 const struct rte_flow_item_vxlan_gpe *mask = item->mask; 1781 int ret; 1782 union vni { 1783 uint32_t vlan_id; 1784 uint8_t vni[4]; 1785 } id = { .vlan_id = 0, }; 1786 uint32_t vlan_id = 0; 1787 1788 if (!priv->config.l3_vxlan_en) 1789 return rte_flow_error_set(error, ENOTSUP, 1790 RTE_FLOW_ERROR_TYPE_ITEM, item, 1791 "L3 VXLAN is not enabled by device" 1792 " parameter and/or not configured in" 1793 " firmware"); 1794 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1795 return rte_flow_error_set(error, ENOTSUP, 1796 RTE_FLOW_ERROR_TYPE_ITEM, item, 1797 "multiple tunnel layers not" 1798 " supported"); 1799 /* 1800 * Verify only UDPv4 is present as defined in 1801 * https://tools.ietf.org/html/rfc7348 1802 */ 1803 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1804 return rte_flow_error_set(error, EINVAL, 1805 RTE_FLOW_ERROR_TYPE_ITEM, item, 1806 "no outer UDP layer found"); 1807 if (!mask) 1808 mask = &rte_flow_item_vxlan_gpe_mask; 1809 ret = mlx5_flow_item_acceptable 1810 (item, (const uint8_t *)mask, 1811 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, 1812 sizeof(struct rte_flow_item_vxlan_gpe), 1813 error); 1814 if (ret < 0) 1815 return ret; 1816 if (spec) { 1817 if (spec->protocol) 1818 return rte_flow_error_set(error, ENOTSUP, 1819 RTE_FLOW_ERROR_TYPE_ITEM, 1820 item, 1821 "VxLAN-GPE protocol" 1822 " not supported"); 1823 memcpy(&id.vni[1], spec->vni, 3); 1824 vlan_id = id.vlan_id; 1825 memcpy(&id.vni[1], mask->vni, 3); 1826 vlan_id &= id.vlan_id; 1827 } 1828 /* 1829 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this 1830 * layer is defined in the Verbs specification it is interpreted as 1831 * wildcard and all packets will match this rule, if it follows a full 1832 * stack layer (ex: eth / ipv4 / udp), all packets matching the layers 1833 * before will also match this rule. To avoid such situation, VNI 0 1834 * is currently refused. 1835 */ 1836 if (!vlan_id) 1837 return rte_flow_error_set(error, ENOTSUP, 1838 RTE_FLOW_ERROR_TYPE_ITEM, item, 1839 "VXLAN-GPE vni cannot be 0"); 1840 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1841 return rte_flow_error_set(error, ENOTSUP, 1842 RTE_FLOW_ERROR_TYPE_ITEM, item, 1843 "VXLAN-GPE tunnel must be fully" 1844 " defined"); 1845 return 0; 1846 } 1847 /** 1848 * Validate GRE Key item. 1849 * 1850 * @param[in] item 1851 * Item specification. 1852 * @param[in] item_flags 1853 * Bit flags to mark detected items. 1854 * @param[in] gre_item 1855 * Pointer to gre_item 1856 * @param[out] error 1857 * Pointer to error structure. 1858 * 1859 * @return 1860 * 0 on success, a negative errno value otherwise and rte_errno is set. 1861 */ 1862 int 1863 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, 1864 uint64_t item_flags, 1865 const struct rte_flow_item *gre_item, 1866 struct rte_flow_error *error) 1867 { 1868 const rte_be32_t *mask = item->mask; 1869 int ret = 0; 1870 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); 1871 const struct rte_flow_item_gre *gre_spec = gre_item->spec; 1872 const struct rte_flow_item_gre *gre_mask = gre_item->mask; 1873 1874 if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) 1875 return rte_flow_error_set(error, ENOTSUP, 1876 RTE_FLOW_ERROR_TYPE_ITEM, item, 1877 "Multiple GRE key not support"); 1878 if (!(item_flags & MLX5_FLOW_LAYER_GRE)) 1879 return rte_flow_error_set(error, ENOTSUP, 1880 RTE_FLOW_ERROR_TYPE_ITEM, item, 1881 "No preceding GRE header"); 1882 if (item_flags & MLX5_FLOW_LAYER_INNER) 1883 return rte_flow_error_set(error, ENOTSUP, 1884 RTE_FLOW_ERROR_TYPE_ITEM, item, 1885 "GRE key following a wrong item"); 1886 if (!gre_mask) 1887 gre_mask = &rte_flow_item_gre_mask; 1888 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && 1889 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) 1890 return rte_flow_error_set(error, EINVAL, 1891 RTE_FLOW_ERROR_TYPE_ITEM, item, 1892 "Key bit must be on"); 1893 1894 if (!mask) 1895 mask = &gre_key_default_mask; 1896 ret = mlx5_flow_item_acceptable 1897 (item, (const uint8_t *)mask, 1898 (const uint8_t *)&gre_key_default_mask, 1899 sizeof(rte_be32_t), error); 1900 return ret; 1901 } 1902 1903 /** 1904 * Validate GRE item. 1905 * 1906 * @param[in] item 1907 * Item specification. 1908 * @param[in] item_flags 1909 * Bit flags to mark detected items. 1910 * @param[in] target_protocol 1911 * The next protocol in the previous item. 1912 * @param[out] error 1913 * Pointer to error structure. 1914 * 1915 * @return 1916 * 0 on success, a negative errno value otherwise and rte_errno is set. 1917 */ 1918 int 1919 mlx5_flow_validate_item_gre(const struct rte_flow_item *item, 1920 uint64_t item_flags, 1921 uint8_t target_protocol, 1922 struct rte_flow_error *error) 1923 { 1924 const struct rte_flow_item_gre *spec __rte_unused = item->spec; 1925 const struct rte_flow_item_gre *mask = item->mask; 1926 int ret; 1927 const struct rte_flow_item_gre nic_mask = { 1928 .c_rsvd0_ver = RTE_BE16(0xB000), 1929 .protocol = RTE_BE16(UINT16_MAX), 1930 }; 1931 1932 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 1933 return rte_flow_error_set(error, EINVAL, 1934 RTE_FLOW_ERROR_TYPE_ITEM, item, 1935 "protocol filtering not compatible" 1936 " with this GRE layer"); 1937 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1938 return rte_flow_error_set(error, ENOTSUP, 1939 RTE_FLOW_ERROR_TYPE_ITEM, item, 1940 "multiple tunnel layers not" 1941 " supported"); 1942 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 1943 return rte_flow_error_set(error, ENOTSUP, 1944 RTE_FLOW_ERROR_TYPE_ITEM, item, 1945 "L3 Layer is missing"); 1946 if (!mask) 1947 mask = &rte_flow_item_gre_mask; 1948 ret = mlx5_flow_item_acceptable 1949 (item, (const uint8_t *)mask, 1950 (const uint8_t *)&nic_mask, 1951 sizeof(struct rte_flow_item_gre), error); 1952 if (ret < 0) 1953 return ret; 1954 #ifndef HAVE_MLX5DV_DR 1955 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT 1956 if (spec && (spec->protocol & mask->protocol)) 1957 return rte_flow_error_set(error, ENOTSUP, 1958 RTE_FLOW_ERROR_TYPE_ITEM, item, 1959 "without MPLS support the" 1960 " specification cannot be used for" 1961 " filtering"); 1962 #endif 1963 #endif 1964 return 0; 1965 } 1966 1967 /** 1968 * Validate Geneve item. 1969 * 1970 * @param[in] item 1971 * Item specification. 1972 * @param[in] itemFlags 1973 * Bit-fields that holds the items detected until now. 1974 * @param[in] enPriv 1975 * Pointer to the private data structure. 1976 * @param[out] error 1977 * Pointer to error structure. 1978 * 1979 * @return 1980 * 0 on success, a negative errno value otherwise and rte_errno is set. 1981 */ 1982 1983 int 1984 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, 1985 uint64_t item_flags, 1986 struct rte_eth_dev *dev, 1987 struct rte_flow_error *error) 1988 { 1989 struct mlx5_priv *priv = dev->data->dev_private; 1990 const struct rte_flow_item_geneve *spec = item->spec; 1991 const struct rte_flow_item_geneve *mask = item->mask; 1992 int ret; 1993 uint16_t gbhdr; 1994 uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ? 1995 MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; 1996 const struct rte_flow_item_geneve nic_mask = { 1997 .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), 1998 .vni = "\xff\xff\xff", 1999 .protocol = RTE_BE16(UINT16_MAX), 2000 }; 2001 2002 if (!(priv->config.hca_attr.flex_parser_protocols & 2003 MLX5_HCA_FLEX_GENEVE_ENABLED) || 2004 !priv->config.hca_attr.tunnel_stateless_geneve_rx) 2005 return rte_flow_error_set(error, ENOTSUP, 2006 RTE_FLOW_ERROR_TYPE_ITEM, item, 2007 "L3 Geneve is not enabled by device" 2008 " parameter and/or not configured in" 2009 " firmware"); 2010 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2011 return rte_flow_error_set(error, ENOTSUP, 2012 RTE_FLOW_ERROR_TYPE_ITEM, item, 2013 "multiple tunnel layers not" 2014 " supported"); 2015 /* 2016 * Verify only UDPv4 is present as defined in 2017 * https://tools.ietf.org/html/rfc7348 2018 */ 2019 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 2020 return rte_flow_error_set(error, EINVAL, 2021 RTE_FLOW_ERROR_TYPE_ITEM, item, 2022 "no outer UDP layer found"); 2023 if (!mask) 2024 mask = &rte_flow_item_geneve_mask; 2025 ret = mlx5_flow_item_acceptable 2026 (item, (const uint8_t *)mask, 2027 (const uint8_t *)&nic_mask, 2028 sizeof(struct rte_flow_item_geneve), error); 2029 if (ret) 2030 return ret; 2031 if (spec) { 2032 gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0); 2033 if (MLX5_GENEVE_VER_VAL(gbhdr) || 2034 MLX5_GENEVE_CRITO_VAL(gbhdr) || 2035 MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1) 2036 return rte_flow_error_set(error, ENOTSUP, 2037 RTE_FLOW_ERROR_TYPE_ITEM, 2038 item, 2039 "Geneve protocol unsupported" 2040 " fields are being used"); 2041 if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len) 2042 return rte_flow_error_set 2043 (error, ENOTSUP, 2044 RTE_FLOW_ERROR_TYPE_ITEM, 2045 item, 2046 "Unsupported Geneve options length"); 2047 } 2048 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 2049 return rte_flow_error_set 2050 (error, ENOTSUP, 2051 RTE_FLOW_ERROR_TYPE_ITEM, item, 2052 "Geneve tunnel must be fully defined"); 2053 return 0; 2054 } 2055 2056 /** 2057 * Validate MPLS item. 2058 * 2059 * @param[in] dev 2060 * Pointer to the rte_eth_dev structure. 2061 * @param[in] item 2062 * Item specification. 2063 * @param[in] item_flags 2064 * Bit-fields that holds the items detected until now. 2065 * @param[in] prev_layer 2066 * The protocol layer indicated in previous item. 2067 * @param[out] error 2068 * Pointer to error structure. 2069 * 2070 * @return 2071 * 0 on success, a negative errno value otherwise and rte_errno is set. 2072 */ 2073 int 2074 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, 2075 const struct rte_flow_item *item __rte_unused, 2076 uint64_t item_flags __rte_unused, 2077 uint64_t prev_layer __rte_unused, 2078 struct rte_flow_error *error) 2079 { 2080 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 2081 const struct rte_flow_item_mpls *mask = item->mask; 2082 struct mlx5_priv *priv = dev->data->dev_private; 2083 int ret; 2084 2085 if (!priv->config.mpls_en) 2086 return rte_flow_error_set(error, ENOTSUP, 2087 RTE_FLOW_ERROR_TYPE_ITEM, item, 2088 "MPLS not supported or" 2089 " disabled in firmware" 2090 " configuration."); 2091 /* MPLS over IP, UDP, GRE is allowed */ 2092 if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | 2093 MLX5_FLOW_LAYER_OUTER_L4_UDP | 2094 MLX5_FLOW_LAYER_GRE))) 2095 return rte_flow_error_set(error, EINVAL, 2096 RTE_FLOW_ERROR_TYPE_ITEM, item, 2097 "protocol filtering not compatible" 2098 " with MPLS layer"); 2099 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ 2100 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && 2101 !(item_flags & MLX5_FLOW_LAYER_GRE)) 2102 return rte_flow_error_set(error, ENOTSUP, 2103 RTE_FLOW_ERROR_TYPE_ITEM, item, 2104 "multiple tunnel layers not" 2105 " supported"); 2106 if (!mask) 2107 mask = &rte_flow_item_mpls_mask; 2108 ret = mlx5_flow_item_acceptable 2109 (item, (const uint8_t *)mask, 2110 (const uint8_t *)&rte_flow_item_mpls_mask, 2111 sizeof(struct rte_flow_item_mpls), error); 2112 if (ret < 0) 2113 return ret; 2114 return 0; 2115 #endif 2116 return rte_flow_error_set(error, ENOTSUP, 2117 RTE_FLOW_ERROR_TYPE_ITEM, item, 2118 "MPLS is not supported by Verbs, please" 2119 " update."); 2120 } 2121 2122 /** 2123 * Validate NVGRE item. 2124 * 2125 * @param[in] item 2126 * Item specification. 2127 * @param[in] item_flags 2128 * Bit flags to mark detected items. 2129 * @param[in] target_protocol 2130 * The next protocol in the previous item. 2131 * @param[out] error 2132 * Pointer to error structure. 2133 * 2134 * @return 2135 * 0 on success, a negative errno value otherwise and rte_errno is set. 2136 */ 2137 int 2138 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, 2139 uint64_t item_flags, 2140 uint8_t target_protocol, 2141 struct rte_flow_error *error) 2142 { 2143 const struct rte_flow_item_nvgre *mask = item->mask; 2144 int ret; 2145 2146 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 2147 return rte_flow_error_set(error, EINVAL, 2148 RTE_FLOW_ERROR_TYPE_ITEM, item, 2149 "protocol filtering not compatible" 2150 " with this GRE layer"); 2151 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 2152 return rte_flow_error_set(error, ENOTSUP, 2153 RTE_FLOW_ERROR_TYPE_ITEM, item, 2154 "multiple tunnel layers not" 2155 " supported"); 2156 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 2157 return rte_flow_error_set(error, ENOTSUP, 2158 RTE_FLOW_ERROR_TYPE_ITEM, item, 2159 "L3 Layer is missing"); 2160 if (!mask) 2161 mask = &rte_flow_item_nvgre_mask; 2162 ret = mlx5_flow_item_acceptable 2163 (item, (const uint8_t *)mask, 2164 (const uint8_t *)&rte_flow_item_nvgre_mask, 2165 sizeof(struct rte_flow_item_nvgre), error); 2166 if (ret < 0) 2167 return ret; 2168 return 0; 2169 } 2170 2171 static int 2172 flow_null_validate(struct rte_eth_dev *dev __rte_unused, 2173 const struct rte_flow_attr *attr __rte_unused, 2174 const struct rte_flow_item items[] __rte_unused, 2175 const struct rte_flow_action actions[] __rte_unused, 2176 bool external __rte_unused, 2177 struct rte_flow_error *error) 2178 { 2179 return rte_flow_error_set(error, ENOTSUP, 2180 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2181 } 2182 2183 static struct mlx5_flow * 2184 flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, 2185 const struct rte_flow_item items[] __rte_unused, 2186 const struct rte_flow_action actions[] __rte_unused, 2187 struct rte_flow_error *error) 2188 { 2189 rte_flow_error_set(error, ENOTSUP, 2190 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2191 return NULL; 2192 } 2193 2194 static int 2195 flow_null_translate(struct rte_eth_dev *dev __rte_unused, 2196 struct mlx5_flow *dev_flow __rte_unused, 2197 const struct rte_flow_attr *attr __rte_unused, 2198 const struct rte_flow_item items[] __rte_unused, 2199 const struct rte_flow_action actions[] __rte_unused, 2200 struct rte_flow_error *error) 2201 { 2202 return rte_flow_error_set(error, ENOTSUP, 2203 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2204 } 2205 2206 static int 2207 flow_null_apply(struct rte_eth_dev *dev __rte_unused, 2208 struct rte_flow *flow __rte_unused, 2209 struct rte_flow_error *error) 2210 { 2211 return rte_flow_error_set(error, ENOTSUP, 2212 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2213 } 2214 2215 static void 2216 flow_null_remove(struct rte_eth_dev *dev __rte_unused, 2217 struct rte_flow *flow __rte_unused) 2218 { 2219 } 2220 2221 static void 2222 flow_null_destroy(struct rte_eth_dev *dev __rte_unused, 2223 struct rte_flow *flow __rte_unused) 2224 { 2225 } 2226 2227 static int 2228 flow_null_query(struct rte_eth_dev *dev __rte_unused, 2229 struct rte_flow *flow __rte_unused, 2230 const struct rte_flow_action *actions __rte_unused, 2231 void *data __rte_unused, 2232 struct rte_flow_error *error) 2233 { 2234 return rte_flow_error_set(error, ENOTSUP, 2235 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2236 } 2237 2238 /* Void driver to protect from null pointer reference. */ 2239 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { 2240 .validate = flow_null_validate, 2241 .prepare = flow_null_prepare, 2242 .translate = flow_null_translate, 2243 .apply = flow_null_apply, 2244 .remove = flow_null_remove, 2245 .destroy = flow_null_destroy, 2246 .query = flow_null_query, 2247 }; 2248 2249 /** 2250 * Select flow driver type according to flow attributes and device 2251 * configuration. 2252 * 2253 * @param[in] dev 2254 * Pointer to the dev structure. 2255 * @param[in] attr 2256 * Pointer to the flow attributes. 2257 * 2258 * @return 2259 * flow driver type, MLX5_FLOW_TYPE_MAX otherwise. 2260 */ 2261 static enum mlx5_flow_drv_type 2262 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) 2263 { 2264 struct mlx5_priv *priv = dev->data->dev_private; 2265 enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; 2266 2267 if (attr->transfer && priv->config.dv_esw_en) 2268 type = MLX5_FLOW_TYPE_DV; 2269 if (!attr->transfer) 2270 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : 2271 MLX5_FLOW_TYPE_VERBS; 2272 return type; 2273 } 2274 2275 #define flow_get_drv_ops(type) flow_drv_ops[type] 2276 2277 /** 2278 * Flow driver validation API. This abstracts calling driver specific functions. 2279 * The type of flow driver is determined according to flow attributes. 2280 * 2281 * @param[in] dev 2282 * Pointer to the dev structure. 2283 * @param[in] attr 2284 * Pointer to the flow attributes. 2285 * @param[in] items 2286 * Pointer to the list of items. 2287 * @param[in] actions 2288 * Pointer to the list of actions. 2289 * @param[in] external 2290 * This flow rule is created by request external to PMD. 2291 * @param[out] error 2292 * Pointer to the error structure. 2293 * 2294 * @return 2295 * 0 on success, a negative errno value otherwise and rte_errno is set. 2296 */ 2297 static inline int 2298 flow_drv_validate(struct rte_eth_dev *dev, 2299 const struct rte_flow_attr *attr, 2300 const struct rte_flow_item items[], 2301 const struct rte_flow_action actions[], 2302 bool external, struct rte_flow_error *error) 2303 { 2304 const struct mlx5_flow_driver_ops *fops; 2305 enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); 2306 2307 fops = flow_get_drv_ops(type); 2308 return fops->validate(dev, attr, items, actions, external, error); 2309 } 2310 2311 /** 2312 * Flow driver preparation API. This abstracts calling driver specific 2313 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2314 * calculates the size of memory required for device flow, allocates the memory, 2315 * initializes the device flow and returns the pointer. 2316 * 2317 * @note 2318 * This function initializes device flow structure such as dv or verbs in 2319 * struct mlx5_flow. However, it is caller's responsibility to initialize the 2320 * rest. For example, adding returning device flow to flow->dev_flow list and 2321 * setting backward reference to the flow should be done out of this function. 2322 * layers field is not filled either. 2323 * 2324 * @param[in] attr 2325 * Pointer to the flow attributes. 2326 * @param[in] items 2327 * Pointer to the list of items. 2328 * @param[in] actions 2329 * Pointer to the list of actions. 2330 * @param[out] error 2331 * Pointer to the error structure. 2332 * 2333 * @return 2334 * Pointer to device flow on success, otherwise NULL and rte_errno is set. 2335 */ 2336 static inline struct mlx5_flow * 2337 flow_drv_prepare(const struct rte_flow *flow, 2338 const struct rte_flow_attr *attr, 2339 const struct rte_flow_item items[], 2340 const struct rte_flow_action actions[], 2341 struct rte_flow_error *error) 2342 { 2343 const struct mlx5_flow_driver_ops *fops; 2344 enum mlx5_flow_drv_type type = flow->drv_type; 2345 2346 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2347 fops = flow_get_drv_ops(type); 2348 return fops->prepare(attr, items, actions, error); 2349 } 2350 2351 /** 2352 * Flow driver translation API. This abstracts calling driver specific 2353 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2354 * translates a generic flow into a driver flow. flow_drv_prepare() must 2355 * precede. 2356 * 2357 * @note 2358 * dev_flow->layers could be filled as a result of parsing during translation 2359 * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled 2360 * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, 2361 * flow->actions could be overwritten even though all the expanded dev_flows 2362 * have the same actions. 2363 * 2364 * @param[in] dev 2365 * Pointer to the rte dev structure. 2366 * @param[in, out] dev_flow 2367 * Pointer to the mlx5 flow. 2368 * @param[in] attr 2369 * Pointer to the flow attributes. 2370 * @param[in] items 2371 * Pointer to the list of items. 2372 * @param[in] actions 2373 * Pointer to the list of actions. 2374 * @param[out] error 2375 * Pointer to the error structure. 2376 * 2377 * @return 2378 * 0 on success, a negative errno value otherwise and rte_errno is set. 2379 */ 2380 static inline int 2381 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, 2382 const struct rte_flow_attr *attr, 2383 const struct rte_flow_item items[], 2384 const struct rte_flow_action actions[], 2385 struct rte_flow_error *error) 2386 { 2387 const struct mlx5_flow_driver_ops *fops; 2388 enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; 2389 2390 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2391 fops = flow_get_drv_ops(type); 2392 return fops->translate(dev, dev_flow, attr, items, actions, error); 2393 } 2394 2395 /** 2396 * Flow driver apply API. This abstracts calling driver specific functions. 2397 * Parent flow (rte_flow) should have driver type (drv_type). It applies 2398 * translated driver flows on to device. flow_drv_translate() must precede. 2399 * 2400 * @param[in] dev 2401 * Pointer to Ethernet device structure. 2402 * @param[in, out] flow 2403 * Pointer to flow structure. 2404 * @param[out] error 2405 * Pointer to error structure. 2406 * 2407 * @return 2408 * 0 on success, a negative errno value otherwise and rte_errno is set. 2409 */ 2410 static inline int 2411 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 2412 struct rte_flow_error *error) 2413 { 2414 const struct mlx5_flow_driver_ops *fops; 2415 enum mlx5_flow_drv_type type = flow->drv_type; 2416 2417 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2418 fops = flow_get_drv_ops(type); 2419 return fops->apply(dev, flow, error); 2420 } 2421 2422 /** 2423 * Flow driver remove API. This abstracts calling driver specific functions. 2424 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2425 * on device. All the resources of the flow should be freed by calling 2426 * flow_drv_destroy(). 2427 * 2428 * @param[in] dev 2429 * Pointer to Ethernet device. 2430 * @param[in, out] flow 2431 * Pointer to flow structure. 2432 */ 2433 static inline void 2434 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 2435 { 2436 const struct mlx5_flow_driver_ops *fops; 2437 enum mlx5_flow_drv_type type = flow->drv_type; 2438 2439 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2440 fops = flow_get_drv_ops(type); 2441 fops->remove(dev, flow); 2442 } 2443 2444 /** 2445 * Flow driver destroy API. This abstracts calling driver specific functions. 2446 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2447 * on device and releases resources of the flow. 2448 * 2449 * @param[in] dev 2450 * Pointer to Ethernet device. 2451 * @param[in, out] flow 2452 * Pointer to flow structure. 2453 */ 2454 static inline void 2455 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 2456 { 2457 const struct mlx5_flow_driver_ops *fops; 2458 enum mlx5_flow_drv_type type = flow->drv_type; 2459 2460 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2461 fops = flow_get_drv_ops(type); 2462 fops->destroy(dev, flow); 2463 } 2464 2465 /** 2466 * Validate a flow supported by the NIC. 2467 * 2468 * @see rte_flow_validate() 2469 * @see rte_flow_ops 2470 */ 2471 int 2472 mlx5_flow_validate(struct rte_eth_dev *dev, 2473 const struct rte_flow_attr *attr, 2474 const struct rte_flow_item items[], 2475 const struct rte_flow_action actions[], 2476 struct rte_flow_error *error) 2477 { 2478 int ret; 2479 2480 ret = flow_drv_validate(dev, attr, items, actions, true, error); 2481 if (ret < 0) 2482 return ret; 2483 return 0; 2484 } 2485 2486 /** 2487 * Get RSS action from the action list. 2488 * 2489 * @param[in] actions 2490 * Pointer to the list of actions. 2491 * 2492 * @return 2493 * Pointer to the RSS action if exist, else return NULL. 2494 */ 2495 static const struct rte_flow_action_rss* 2496 flow_get_rss_action(const struct rte_flow_action actions[]) 2497 { 2498 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2499 switch (actions->type) { 2500 case RTE_FLOW_ACTION_TYPE_RSS: 2501 return (const struct rte_flow_action_rss *) 2502 actions->conf; 2503 default: 2504 break; 2505 } 2506 } 2507 return NULL; 2508 } 2509 2510 static unsigned int 2511 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) 2512 { 2513 const struct rte_flow_item *item; 2514 unsigned int has_vlan = 0; 2515 2516 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 2517 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2518 has_vlan = 1; 2519 break; 2520 } 2521 } 2522 if (has_vlan) 2523 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : 2524 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; 2525 return rss_level < 2 ? MLX5_EXPANSION_ROOT : 2526 MLX5_EXPANSION_ROOT_OUTER; 2527 } 2528 2529 /** 2530 * Create a flow and add it to @p list. 2531 * 2532 * @param dev 2533 * Pointer to Ethernet device. 2534 * @param list 2535 * Pointer to a TAILQ flow list. 2536 * @param[in] attr 2537 * Flow rule attributes. 2538 * @param[in] items 2539 * Pattern specification (list terminated by the END pattern item). 2540 * @param[in] actions 2541 * Associated actions (list terminated by the END action). 2542 * @param[in] external 2543 * This flow rule is created by request external to PMD. 2544 * @param[out] error 2545 * Perform verbose error reporting if not NULL. 2546 * 2547 * @return 2548 * A flow on success, NULL otherwise and rte_errno is set. 2549 */ 2550 static struct rte_flow * 2551 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, 2552 const struct rte_flow_attr *attr, 2553 const struct rte_flow_item items[], 2554 const struct rte_flow_action actions[], 2555 bool external, struct rte_flow_error *error) 2556 { 2557 struct rte_flow *flow = NULL; 2558 struct mlx5_flow *dev_flow; 2559 const struct rte_flow_action_rss *rss; 2560 union { 2561 struct rte_flow_expand_rss buf; 2562 uint8_t buffer[2048]; 2563 } expand_buffer; 2564 struct rte_flow_expand_rss *buf = &expand_buffer.buf; 2565 int ret; 2566 uint32_t i; 2567 uint32_t flow_size; 2568 2569 ret = flow_drv_validate(dev, attr, items, actions, external, error); 2570 if (ret < 0) 2571 return NULL; 2572 flow_size = sizeof(struct rte_flow); 2573 rss = flow_get_rss_action(actions); 2574 if (rss) 2575 flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), 2576 sizeof(void *)); 2577 else 2578 flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); 2579 flow = rte_calloc(__func__, 1, flow_size, 0); 2580 if (!flow) { 2581 rte_errno = ENOMEM; 2582 return NULL; 2583 } 2584 flow->drv_type = flow_get_drv_type(dev, attr); 2585 flow->ingress = attr->ingress; 2586 flow->transfer = attr->transfer; 2587 assert(flow->drv_type > MLX5_FLOW_TYPE_MIN && 2588 flow->drv_type < MLX5_FLOW_TYPE_MAX); 2589 flow->queue = (void *)(flow + 1); 2590 LIST_INIT(&flow->dev_flows); 2591 if (rss && rss->types) { 2592 unsigned int graph_root; 2593 2594 graph_root = find_graph_root(items, rss->level); 2595 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), 2596 items, rss->types, 2597 mlx5_support_expansion, 2598 graph_root); 2599 assert(ret > 0 && 2600 (unsigned int)ret < sizeof(expand_buffer.buffer)); 2601 } else { 2602 buf->entries = 1; 2603 buf->entry[0].pattern = (void *)(uintptr_t)items; 2604 } 2605 for (i = 0; i < buf->entries; ++i) { 2606 dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern, 2607 actions, error); 2608 if (!dev_flow) 2609 goto error; 2610 dev_flow->flow = flow; 2611 dev_flow->external = external; 2612 LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); 2613 ret = flow_drv_translate(dev, dev_flow, attr, 2614 buf->entry[i].pattern, 2615 actions, error); 2616 if (ret < 0) 2617 goto error; 2618 } 2619 if (dev->data->dev_started) { 2620 ret = flow_drv_apply(dev, flow, error); 2621 if (ret < 0) 2622 goto error; 2623 } 2624 TAILQ_INSERT_TAIL(list, flow, next); 2625 flow_rxq_flags_set(dev, flow); 2626 return flow; 2627 error: 2628 ret = rte_errno; /* Save rte_errno before cleanup. */ 2629 assert(flow); 2630 flow_drv_destroy(dev, flow); 2631 rte_free(flow); 2632 rte_errno = ret; /* Restore rte_errno. */ 2633 return NULL; 2634 } 2635 2636 /** 2637 * Create a dedicated flow rule on e-switch table 0 (root table), to direct all 2638 * incoming packets to table 1. 2639 * 2640 * Other flow rules, requested for group n, will be created in 2641 * e-switch table n+1. 2642 * Jump action to e-switch group n will be created to group n+1. 2643 * 2644 * Used when working in switchdev mode, to utilise advantages of table 1 2645 * and above. 2646 * 2647 * @param dev 2648 * Pointer to Ethernet device. 2649 * 2650 * @return 2651 * Pointer to flow on success, NULL otherwise and rte_errno is set. 2652 */ 2653 struct rte_flow * 2654 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) 2655 { 2656 const struct rte_flow_attr attr = { 2657 .group = 0, 2658 .priority = 0, 2659 .ingress = 1, 2660 .egress = 0, 2661 .transfer = 1, 2662 }; 2663 const struct rte_flow_item pattern = { 2664 .type = RTE_FLOW_ITEM_TYPE_END, 2665 }; 2666 struct rte_flow_action_jump jump = { 2667 .group = 1, 2668 }; 2669 const struct rte_flow_action actions[] = { 2670 { 2671 .type = RTE_FLOW_ACTION_TYPE_JUMP, 2672 .conf = &jump, 2673 }, 2674 { 2675 .type = RTE_FLOW_ACTION_TYPE_END, 2676 }, 2677 }; 2678 struct mlx5_priv *priv = dev->data->dev_private; 2679 struct rte_flow_error error; 2680 2681 return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern, 2682 actions, false, &error); 2683 } 2684 2685 /** 2686 * Create a flow. 2687 * 2688 * @see rte_flow_create() 2689 * @see rte_flow_ops 2690 */ 2691 struct rte_flow * 2692 mlx5_flow_create(struct rte_eth_dev *dev, 2693 const struct rte_flow_attr *attr, 2694 const struct rte_flow_item items[], 2695 const struct rte_flow_action actions[], 2696 struct rte_flow_error *error) 2697 { 2698 struct mlx5_priv *priv = dev->data->dev_private; 2699 2700 return flow_list_create(dev, &priv->flows, 2701 attr, items, actions, true, error); 2702 } 2703 2704 /** 2705 * Destroy a flow in a list. 2706 * 2707 * @param dev 2708 * Pointer to Ethernet device. 2709 * @param list 2710 * Pointer to a TAILQ flow list. 2711 * @param[in] flow 2712 * Flow to destroy. 2713 */ 2714 static void 2715 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, 2716 struct rte_flow *flow) 2717 { 2718 /* 2719 * Update RX queue flags only if port is started, otherwise it is 2720 * already clean. 2721 */ 2722 if (dev->data->dev_started) 2723 flow_rxq_flags_trim(dev, flow); 2724 flow_drv_destroy(dev, flow); 2725 TAILQ_REMOVE(list, flow, next); 2726 rte_free(flow->fdir); 2727 rte_free(flow); 2728 } 2729 2730 /** 2731 * Destroy all flows. 2732 * 2733 * @param dev 2734 * Pointer to Ethernet device. 2735 * @param list 2736 * Pointer to a TAILQ flow list. 2737 */ 2738 void 2739 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) 2740 { 2741 while (!TAILQ_EMPTY(list)) { 2742 struct rte_flow *flow; 2743 2744 flow = TAILQ_FIRST(list); 2745 flow_list_destroy(dev, list, flow); 2746 } 2747 } 2748 2749 /** 2750 * Remove all flows. 2751 * 2752 * @param dev 2753 * Pointer to Ethernet device. 2754 * @param list 2755 * Pointer to a TAILQ flow list. 2756 */ 2757 void 2758 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) 2759 { 2760 struct rte_flow *flow; 2761 2762 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) 2763 flow_drv_remove(dev, flow); 2764 flow_rxq_flags_clear(dev); 2765 } 2766 2767 /** 2768 * Add all flows. 2769 * 2770 * @param dev 2771 * Pointer to Ethernet device. 2772 * @param list 2773 * Pointer to a TAILQ flow list. 2774 * 2775 * @return 2776 * 0 on success, a negative errno value otherwise and rte_errno is set. 2777 */ 2778 int 2779 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) 2780 { 2781 struct rte_flow *flow; 2782 struct rte_flow_error error; 2783 int ret = 0; 2784 2785 TAILQ_FOREACH(flow, list, next) { 2786 ret = flow_drv_apply(dev, flow, &error); 2787 if (ret < 0) 2788 goto error; 2789 flow_rxq_flags_set(dev, flow); 2790 } 2791 return 0; 2792 error: 2793 ret = rte_errno; /* Save rte_errno before cleanup. */ 2794 mlx5_flow_stop(dev, list); 2795 rte_errno = ret; /* Restore rte_errno. */ 2796 return -rte_errno; 2797 } 2798 2799 /** 2800 * Verify the flow list is empty 2801 * 2802 * @param dev 2803 * Pointer to Ethernet device. 2804 * 2805 * @return the number of flows not released. 2806 */ 2807 int 2808 mlx5_flow_verify(struct rte_eth_dev *dev) 2809 { 2810 struct mlx5_priv *priv = dev->data->dev_private; 2811 struct rte_flow *flow; 2812 int ret = 0; 2813 2814 TAILQ_FOREACH(flow, &priv->flows, next) { 2815 DRV_LOG(DEBUG, "port %u flow %p still referenced", 2816 dev->data->port_id, (void *)flow); 2817 ++ret; 2818 } 2819 return ret; 2820 } 2821 2822 /** 2823 * Enable default hairpin egress flow. 2824 * 2825 * @param dev 2826 * Pointer to Ethernet device. 2827 * @param queue 2828 * The queue index. 2829 * 2830 * @return 2831 * 0 on success, a negative errno value otherwise and rte_errno is set. 2832 */ 2833 int 2834 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, 2835 uint32_t queue) 2836 { 2837 struct mlx5_priv *priv = dev->data->dev_private; 2838 const struct rte_flow_attr attr = { 2839 .egress = 1, 2840 .priority = 0, 2841 }; 2842 struct mlx5_rte_flow_item_tx_queue queue_spec = { 2843 .queue = queue, 2844 }; 2845 struct mlx5_rte_flow_item_tx_queue queue_mask = { 2846 .queue = UINT32_MAX, 2847 }; 2848 struct rte_flow_item items[] = { 2849 { 2850 .type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, 2851 .spec = &queue_spec, 2852 .last = NULL, 2853 .mask = &queue_mask, 2854 }, 2855 { 2856 .type = RTE_FLOW_ITEM_TYPE_END, 2857 }, 2858 }; 2859 struct rte_flow_action_jump jump = { 2860 .group = MLX5_HAIRPIN_TX_TABLE, 2861 }; 2862 struct rte_flow_action actions[2]; 2863 struct rte_flow *flow; 2864 struct rte_flow_error error; 2865 2866 actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; 2867 actions[0].conf = &jump; 2868 actions[1].type = RTE_FLOW_ACTION_TYPE_END; 2869 flow = flow_list_create(dev, &priv->ctrl_flows, 2870 &attr, items, actions, false, &error); 2871 if (!flow) { 2872 DRV_LOG(DEBUG, 2873 "Failed to create ctrl flow: rte_errno(%d)," 2874 " type(%d), message(%s)\n", 2875 rte_errno, error.type, 2876 error.message ? error.message : " (no stated reason)"); 2877 return -rte_errno; 2878 } 2879 return 0; 2880 } 2881 2882 /** 2883 * Enable a control flow configured from the control plane. 2884 * 2885 * @param dev 2886 * Pointer to Ethernet device. 2887 * @param eth_spec 2888 * An Ethernet flow spec to apply. 2889 * @param eth_mask 2890 * An Ethernet flow mask to apply. 2891 * @param vlan_spec 2892 * A VLAN flow spec to apply. 2893 * @param vlan_mask 2894 * A VLAN flow mask to apply. 2895 * 2896 * @return 2897 * 0 on success, a negative errno value otherwise and rte_errno is set. 2898 */ 2899 int 2900 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, 2901 struct rte_flow_item_eth *eth_spec, 2902 struct rte_flow_item_eth *eth_mask, 2903 struct rte_flow_item_vlan *vlan_spec, 2904 struct rte_flow_item_vlan *vlan_mask) 2905 { 2906 struct mlx5_priv *priv = dev->data->dev_private; 2907 const struct rte_flow_attr attr = { 2908 .ingress = 1, 2909 .priority = MLX5_FLOW_PRIO_RSVD, 2910 }; 2911 struct rte_flow_item items[] = { 2912 { 2913 .type = RTE_FLOW_ITEM_TYPE_ETH, 2914 .spec = eth_spec, 2915 .last = NULL, 2916 .mask = eth_mask, 2917 }, 2918 { 2919 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : 2920 RTE_FLOW_ITEM_TYPE_END, 2921 .spec = vlan_spec, 2922 .last = NULL, 2923 .mask = vlan_mask, 2924 }, 2925 { 2926 .type = RTE_FLOW_ITEM_TYPE_END, 2927 }, 2928 }; 2929 uint16_t queue[priv->reta_idx_n]; 2930 struct rte_flow_action_rss action_rss = { 2931 .func = RTE_ETH_HASH_FUNCTION_DEFAULT, 2932 .level = 0, 2933 .types = priv->rss_conf.rss_hf, 2934 .key_len = priv->rss_conf.rss_key_len, 2935 .queue_num = priv->reta_idx_n, 2936 .key = priv->rss_conf.rss_key, 2937 .queue = queue, 2938 }; 2939 struct rte_flow_action actions[] = { 2940 { 2941 .type = RTE_FLOW_ACTION_TYPE_RSS, 2942 .conf = &action_rss, 2943 }, 2944 { 2945 .type = RTE_FLOW_ACTION_TYPE_END, 2946 }, 2947 }; 2948 struct rte_flow *flow; 2949 struct rte_flow_error error; 2950 unsigned int i; 2951 2952 if (!priv->reta_idx_n || !priv->rxqs_n) { 2953 return 0; 2954 } 2955 for (i = 0; i != priv->reta_idx_n; ++i) 2956 queue[i] = (*priv->reta_idx)[i]; 2957 flow = flow_list_create(dev, &priv->ctrl_flows, 2958 &attr, items, actions, false, &error); 2959 if (!flow) 2960 return -rte_errno; 2961 return 0; 2962 } 2963 2964 /** 2965 * Enable a flow control configured from the control plane. 2966 * 2967 * @param dev 2968 * Pointer to Ethernet device. 2969 * @param eth_spec 2970 * An Ethernet flow spec to apply. 2971 * @param eth_mask 2972 * An Ethernet flow mask to apply. 2973 * 2974 * @return 2975 * 0 on success, a negative errno value otherwise and rte_errno is set. 2976 */ 2977 int 2978 mlx5_ctrl_flow(struct rte_eth_dev *dev, 2979 struct rte_flow_item_eth *eth_spec, 2980 struct rte_flow_item_eth *eth_mask) 2981 { 2982 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); 2983 } 2984 2985 /** 2986 * Destroy a flow. 2987 * 2988 * @see rte_flow_destroy() 2989 * @see rte_flow_ops 2990 */ 2991 int 2992 mlx5_flow_destroy(struct rte_eth_dev *dev, 2993 struct rte_flow *flow, 2994 struct rte_flow_error *error __rte_unused) 2995 { 2996 struct mlx5_priv *priv = dev->data->dev_private; 2997 2998 flow_list_destroy(dev, &priv->flows, flow); 2999 return 0; 3000 } 3001 3002 /** 3003 * Destroy all flows. 3004 * 3005 * @see rte_flow_flush() 3006 * @see rte_flow_ops 3007 */ 3008 int 3009 mlx5_flow_flush(struct rte_eth_dev *dev, 3010 struct rte_flow_error *error __rte_unused) 3011 { 3012 struct mlx5_priv *priv = dev->data->dev_private; 3013 3014 mlx5_flow_list_flush(dev, &priv->flows); 3015 return 0; 3016 } 3017 3018 /** 3019 * Isolated mode. 3020 * 3021 * @see rte_flow_isolate() 3022 * @see rte_flow_ops 3023 */ 3024 int 3025 mlx5_flow_isolate(struct rte_eth_dev *dev, 3026 int enable, 3027 struct rte_flow_error *error) 3028 { 3029 struct mlx5_priv *priv = dev->data->dev_private; 3030 3031 if (dev->data->dev_started) { 3032 rte_flow_error_set(error, EBUSY, 3033 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3034 NULL, 3035 "port must be stopped first"); 3036 return -rte_errno; 3037 } 3038 priv->isolated = !!enable; 3039 if (enable) 3040 dev->dev_ops = &mlx5_dev_ops_isolate; 3041 else 3042 dev->dev_ops = &mlx5_dev_ops; 3043 return 0; 3044 } 3045 3046 /** 3047 * Query a flow. 3048 * 3049 * @see rte_flow_query() 3050 * @see rte_flow_ops 3051 */ 3052 static int 3053 flow_drv_query(struct rte_eth_dev *dev, 3054 struct rte_flow *flow, 3055 const struct rte_flow_action *actions, 3056 void *data, 3057 struct rte_flow_error *error) 3058 { 3059 const struct mlx5_flow_driver_ops *fops; 3060 enum mlx5_flow_drv_type ftype = flow->drv_type; 3061 3062 assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); 3063 fops = flow_get_drv_ops(ftype); 3064 3065 return fops->query(dev, flow, actions, data, error); 3066 } 3067 3068 /** 3069 * Query a flow. 3070 * 3071 * @see rte_flow_query() 3072 * @see rte_flow_ops 3073 */ 3074 int 3075 mlx5_flow_query(struct rte_eth_dev *dev, 3076 struct rte_flow *flow, 3077 const struct rte_flow_action *actions, 3078 void *data, 3079 struct rte_flow_error *error) 3080 { 3081 int ret; 3082 3083 ret = flow_drv_query(dev, flow, actions, data, error); 3084 if (ret < 0) 3085 return ret; 3086 return 0; 3087 } 3088 3089 /** 3090 * Convert a flow director filter to a generic flow. 3091 * 3092 * @param dev 3093 * Pointer to Ethernet device. 3094 * @param fdir_filter 3095 * Flow director filter to add. 3096 * @param attributes 3097 * Generic flow parameters structure. 3098 * 3099 * @return 3100 * 0 on success, a negative errno value otherwise and rte_errno is set. 3101 */ 3102 static int 3103 flow_fdir_filter_convert(struct rte_eth_dev *dev, 3104 const struct rte_eth_fdir_filter *fdir_filter, 3105 struct mlx5_fdir *attributes) 3106 { 3107 struct mlx5_priv *priv = dev->data->dev_private; 3108 const struct rte_eth_fdir_input *input = &fdir_filter->input; 3109 const struct rte_eth_fdir_masks *mask = 3110 &dev->data->dev_conf.fdir_conf.mask; 3111 3112 /* Validate queue number. */ 3113 if (fdir_filter->action.rx_queue >= priv->rxqs_n) { 3114 DRV_LOG(ERR, "port %u invalid queue number %d", 3115 dev->data->port_id, fdir_filter->action.rx_queue); 3116 rte_errno = EINVAL; 3117 return -rte_errno; 3118 } 3119 attributes->attr.ingress = 1; 3120 attributes->items[0] = (struct rte_flow_item) { 3121 .type = RTE_FLOW_ITEM_TYPE_ETH, 3122 .spec = &attributes->l2, 3123 .mask = &attributes->l2_mask, 3124 }; 3125 switch (fdir_filter->action.behavior) { 3126 case RTE_ETH_FDIR_ACCEPT: 3127 attributes->actions[0] = (struct rte_flow_action){ 3128 .type = RTE_FLOW_ACTION_TYPE_QUEUE, 3129 .conf = &attributes->queue, 3130 }; 3131 break; 3132 case RTE_ETH_FDIR_REJECT: 3133 attributes->actions[0] = (struct rte_flow_action){ 3134 .type = RTE_FLOW_ACTION_TYPE_DROP, 3135 }; 3136 break; 3137 default: 3138 DRV_LOG(ERR, "port %u invalid behavior %d", 3139 dev->data->port_id, 3140 fdir_filter->action.behavior); 3141 rte_errno = ENOTSUP; 3142 return -rte_errno; 3143 } 3144 attributes->queue.index = fdir_filter->action.rx_queue; 3145 /* Handle L3. */ 3146 switch (fdir_filter->input.flow_type) { 3147 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 3148 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 3149 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 3150 attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){ 3151 .src_addr = input->flow.ip4_flow.src_ip, 3152 .dst_addr = input->flow.ip4_flow.dst_ip, 3153 .time_to_live = input->flow.ip4_flow.ttl, 3154 .type_of_service = input->flow.ip4_flow.tos, 3155 }; 3156 attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){ 3157 .src_addr = mask->ipv4_mask.src_ip, 3158 .dst_addr = mask->ipv4_mask.dst_ip, 3159 .time_to_live = mask->ipv4_mask.ttl, 3160 .type_of_service = mask->ipv4_mask.tos, 3161 .next_proto_id = mask->ipv4_mask.proto, 3162 }; 3163 attributes->items[1] = (struct rte_flow_item){ 3164 .type = RTE_FLOW_ITEM_TYPE_IPV4, 3165 .spec = &attributes->l3, 3166 .mask = &attributes->l3_mask, 3167 }; 3168 break; 3169 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 3170 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 3171 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 3172 attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){ 3173 .hop_limits = input->flow.ipv6_flow.hop_limits, 3174 .proto = input->flow.ipv6_flow.proto, 3175 }; 3176 3177 memcpy(attributes->l3.ipv6.hdr.src_addr, 3178 input->flow.ipv6_flow.src_ip, 3179 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 3180 memcpy(attributes->l3.ipv6.hdr.dst_addr, 3181 input->flow.ipv6_flow.dst_ip, 3182 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 3183 memcpy(attributes->l3_mask.ipv6.hdr.src_addr, 3184 mask->ipv6_mask.src_ip, 3185 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 3186 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr, 3187 mask->ipv6_mask.dst_ip, 3188 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 3189 attributes->items[1] = (struct rte_flow_item){ 3190 .type = RTE_FLOW_ITEM_TYPE_IPV6, 3191 .spec = &attributes->l3, 3192 .mask = &attributes->l3_mask, 3193 }; 3194 break; 3195 default: 3196 DRV_LOG(ERR, "port %u invalid flow type%d", 3197 dev->data->port_id, fdir_filter->input.flow_type); 3198 rte_errno = ENOTSUP; 3199 return -rte_errno; 3200 } 3201 /* Handle L4. */ 3202 switch (fdir_filter->input.flow_type) { 3203 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 3204 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 3205 .src_port = input->flow.udp4_flow.src_port, 3206 .dst_port = input->flow.udp4_flow.dst_port, 3207 }; 3208 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 3209 .src_port = mask->src_port_mask, 3210 .dst_port = mask->dst_port_mask, 3211 }; 3212 attributes->items[2] = (struct rte_flow_item){ 3213 .type = RTE_FLOW_ITEM_TYPE_UDP, 3214 .spec = &attributes->l4, 3215 .mask = &attributes->l4_mask, 3216 }; 3217 break; 3218 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 3219 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 3220 .src_port = input->flow.tcp4_flow.src_port, 3221 .dst_port = input->flow.tcp4_flow.dst_port, 3222 }; 3223 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 3224 .src_port = mask->src_port_mask, 3225 .dst_port = mask->dst_port_mask, 3226 }; 3227 attributes->items[2] = (struct rte_flow_item){ 3228 .type = RTE_FLOW_ITEM_TYPE_TCP, 3229 .spec = &attributes->l4, 3230 .mask = &attributes->l4_mask, 3231 }; 3232 break; 3233 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 3234 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 3235 .src_port = input->flow.udp6_flow.src_port, 3236 .dst_port = input->flow.udp6_flow.dst_port, 3237 }; 3238 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 3239 .src_port = mask->src_port_mask, 3240 .dst_port = mask->dst_port_mask, 3241 }; 3242 attributes->items[2] = (struct rte_flow_item){ 3243 .type = RTE_FLOW_ITEM_TYPE_UDP, 3244 .spec = &attributes->l4, 3245 .mask = &attributes->l4_mask, 3246 }; 3247 break; 3248 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 3249 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 3250 .src_port = input->flow.tcp6_flow.src_port, 3251 .dst_port = input->flow.tcp6_flow.dst_port, 3252 }; 3253 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 3254 .src_port = mask->src_port_mask, 3255 .dst_port = mask->dst_port_mask, 3256 }; 3257 attributes->items[2] = (struct rte_flow_item){ 3258 .type = RTE_FLOW_ITEM_TYPE_TCP, 3259 .spec = &attributes->l4, 3260 .mask = &attributes->l4_mask, 3261 }; 3262 break; 3263 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 3264 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 3265 break; 3266 default: 3267 DRV_LOG(ERR, "port %u invalid flow type%d", 3268 dev->data->port_id, fdir_filter->input.flow_type); 3269 rte_errno = ENOTSUP; 3270 return -rte_errno; 3271 } 3272 return 0; 3273 } 3274 3275 #define FLOW_FDIR_CMP(f1, f2, fld) \ 3276 memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld)) 3277 3278 /** 3279 * Compare two FDIR flows. If items and actions are identical, the two flows are 3280 * regarded as same. 3281 * 3282 * @param dev 3283 * Pointer to Ethernet device. 3284 * @param f1 3285 * FDIR flow to compare. 3286 * @param f2 3287 * FDIR flow to compare. 3288 * 3289 * @return 3290 * Zero on match, 1 otherwise. 3291 */ 3292 static int 3293 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) 3294 { 3295 if (FLOW_FDIR_CMP(f1, f2, attr) || 3296 FLOW_FDIR_CMP(f1, f2, l2) || 3297 FLOW_FDIR_CMP(f1, f2, l2_mask) || 3298 FLOW_FDIR_CMP(f1, f2, l3) || 3299 FLOW_FDIR_CMP(f1, f2, l3_mask) || 3300 FLOW_FDIR_CMP(f1, f2, l4) || 3301 FLOW_FDIR_CMP(f1, f2, l4_mask) || 3302 FLOW_FDIR_CMP(f1, f2, actions[0].type)) 3303 return 1; 3304 if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE && 3305 FLOW_FDIR_CMP(f1, f2, queue)) 3306 return 1; 3307 return 0; 3308 } 3309 3310 /** 3311 * Search device flow list to find out a matched FDIR flow. 3312 * 3313 * @param dev 3314 * Pointer to Ethernet device. 3315 * @param fdir_flow 3316 * FDIR flow to lookup. 3317 * 3318 * @return 3319 * Pointer of flow if found, NULL otherwise. 3320 */ 3321 static struct rte_flow * 3322 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) 3323 { 3324 struct mlx5_priv *priv = dev->data->dev_private; 3325 struct rte_flow *flow = NULL; 3326 3327 assert(fdir_flow); 3328 TAILQ_FOREACH(flow, &priv->flows, next) { 3329 if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) { 3330 DRV_LOG(DEBUG, "port %u found FDIR flow %p", 3331 dev->data->port_id, (void *)flow); 3332 break; 3333 } 3334 } 3335 return flow; 3336 } 3337 3338 /** 3339 * Add new flow director filter and store it in list. 3340 * 3341 * @param dev 3342 * Pointer to Ethernet device. 3343 * @param fdir_filter 3344 * Flow director filter to add. 3345 * 3346 * @return 3347 * 0 on success, a negative errno value otherwise and rte_errno is set. 3348 */ 3349 static int 3350 flow_fdir_filter_add(struct rte_eth_dev *dev, 3351 const struct rte_eth_fdir_filter *fdir_filter) 3352 { 3353 struct mlx5_priv *priv = dev->data->dev_private; 3354 struct mlx5_fdir *fdir_flow; 3355 struct rte_flow *flow; 3356 int ret; 3357 3358 fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0); 3359 if (!fdir_flow) { 3360 rte_errno = ENOMEM; 3361 return -rte_errno; 3362 } 3363 ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow); 3364 if (ret) 3365 goto error; 3366 flow = flow_fdir_filter_lookup(dev, fdir_flow); 3367 if (flow) { 3368 rte_errno = EEXIST; 3369 goto error; 3370 } 3371 flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr, 3372 fdir_flow->items, fdir_flow->actions, true, 3373 NULL); 3374 if (!flow) 3375 goto error; 3376 assert(!flow->fdir); 3377 flow->fdir = fdir_flow; 3378 DRV_LOG(DEBUG, "port %u created FDIR flow %p", 3379 dev->data->port_id, (void *)flow); 3380 return 0; 3381 error: 3382 rte_free(fdir_flow); 3383 return -rte_errno; 3384 } 3385 3386 /** 3387 * Delete specific filter. 3388 * 3389 * @param dev 3390 * Pointer to Ethernet device. 3391 * @param fdir_filter 3392 * Filter to be deleted. 3393 * 3394 * @return 3395 * 0 on success, a negative errno value otherwise and rte_errno is set. 3396 */ 3397 static int 3398 flow_fdir_filter_delete(struct rte_eth_dev *dev, 3399 const struct rte_eth_fdir_filter *fdir_filter) 3400 { 3401 struct mlx5_priv *priv = dev->data->dev_private; 3402 struct rte_flow *flow; 3403 struct mlx5_fdir fdir_flow = { 3404 .attr.group = 0, 3405 }; 3406 int ret; 3407 3408 ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow); 3409 if (ret) 3410 return -rte_errno; 3411 flow = flow_fdir_filter_lookup(dev, &fdir_flow); 3412 if (!flow) { 3413 rte_errno = ENOENT; 3414 return -rte_errno; 3415 } 3416 flow_list_destroy(dev, &priv->flows, flow); 3417 DRV_LOG(DEBUG, "port %u deleted FDIR flow %p", 3418 dev->data->port_id, (void *)flow); 3419 return 0; 3420 } 3421 3422 /** 3423 * Update queue for specific filter. 3424 * 3425 * @param dev 3426 * Pointer to Ethernet device. 3427 * @param fdir_filter 3428 * Filter to be updated. 3429 * 3430 * @return 3431 * 0 on success, a negative errno value otherwise and rte_errno is set. 3432 */ 3433 static int 3434 flow_fdir_filter_update(struct rte_eth_dev *dev, 3435 const struct rte_eth_fdir_filter *fdir_filter) 3436 { 3437 int ret; 3438 3439 ret = flow_fdir_filter_delete(dev, fdir_filter); 3440 if (ret) 3441 return ret; 3442 return flow_fdir_filter_add(dev, fdir_filter); 3443 } 3444 3445 /** 3446 * Flush all filters. 3447 * 3448 * @param dev 3449 * Pointer to Ethernet device. 3450 */ 3451 static void 3452 flow_fdir_filter_flush(struct rte_eth_dev *dev) 3453 { 3454 struct mlx5_priv *priv = dev->data->dev_private; 3455 3456 mlx5_flow_list_flush(dev, &priv->flows); 3457 } 3458 3459 /** 3460 * Get flow director information. 3461 * 3462 * @param dev 3463 * Pointer to Ethernet device. 3464 * @param[out] fdir_info 3465 * Resulting flow director information. 3466 */ 3467 static void 3468 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) 3469 { 3470 struct rte_eth_fdir_masks *mask = 3471 &dev->data->dev_conf.fdir_conf.mask; 3472 3473 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; 3474 fdir_info->guarant_spc = 0; 3475 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); 3476 fdir_info->max_flexpayload = 0; 3477 fdir_info->flow_types_mask[0] = 0; 3478 fdir_info->flex_payload_unit = 0; 3479 fdir_info->max_flex_payload_segment_num = 0; 3480 fdir_info->flex_payload_limit = 0; 3481 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf)); 3482 } 3483 3484 /** 3485 * Deal with flow director operations. 3486 * 3487 * @param dev 3488 * Pointer to Ethernet device. 3489 * @param filter_op 3490 * Operation to perform. 3491 * @param arg 3492 * Pointer to operation-specific structure. 3493 * 3494 * @return 3495 * 0 on success, a negative errno value otherwise and rte_errno is set. 3496 */ 3497 static int 3498 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, 3499 void *arg) 3500 { 3501 enum rte_fdir_mode fdir_mode = 3502 dev->data->dev_conf.fdir_conf.mode; 3503 3504 if (filter_op == RTE_ETH_FILTER_NOP) 3505 return 0; 3506 if (fdir_mode != RTE_FDIR_MODE_PERFECT && 3507 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3508 DRV_LOG(ERR, "port %u flow director mode %d not supported", 3509 dev->data->port_id, fdir_mode); 3510 rte_errno = EINVAL; 3511 return -rte_errno; 3512 } 3513 switch (filter_op) { 3514 case RTE_ETH_FILTER_ADD: 3515 return flow_fdir_filter_add(dev, arg); 3516 case RTE_ETH_FILTER_UPDATE: 3517 return flow_fdir_filter_update(dev, arg); 3518 case RTE_ETH_FILTER_DELETE: 3519 return flow_fdir_filter_delete(dev, arg); 3520 case RTE_ETH_FILTER_FLUSH: 3521 flow_fdir_filter_flush(dev); 3522 break; 3523 case RTE_ETH_FILTER_INFO: 3524 flow_fdir_info_get(dev, arg); 3525 break; 3526 default: 3527 DRV_LOG(DEBUG, "port %u unknown operation %u", 3528 dev->data->port_id, filter_op); 3529 rte_errno = EINVAL; 3530 return -rte_errno; 3531 } 3532 return 0; 3533 } 3534 3535 /** 3536 * Manage filter operations. 3537 * 3538 * @param dev 3539 * Pointer to Ethernet device structure. 3540 * @param filter_type 3541 * Filter type. 3542 * @param filter_op 3543 * Operation to perform. 3544 * @param arg 3545 * Pointer to operation-specific structure. 3546 * 3547 * @return 3548 * 0 on success, a negative errno value otherwise and rte_errno is set. 3549 */ 3550 int 3551 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, 3552 enum rte_filter_type filter_type, 3553 enum rte_filter_op filter_op, 3554 void *arg) 3555 { 3556 switch (filter_type) { 3557 case RTE_ETH_FILTER_GENERIC: 3558 if (filter_op != RTE_ETH_FILTER_GET) { 3559 rte_errno = EINVAL; 3560 return -rte_errno; 3561 } 3562 *(const void **)arg = &mlx5_flow_ops; 3563 return 0; 3564 case RTE_ETH_FILTER_FDIR: 3565 return flow_fdir_ctrl_func(dev, filter_op, arg); 3566 default: 3567 DRV_LOG(ERR, "port %u filter type (%d) not supported", 3568 dev->data->port_id, filter_type); 3569 rte_errno = ENOTSUP; 3570 return -rte_errno; 3571 } 3572 return 0; 3573 } 3574 3575 #define MLX5_POOL_QUERY_FREQ_US 1000000 3576 3577 /** 3578 * Set the periodic procedure for triggering asynchronous batch queries for all 3579 * the counter pools. 3580 * 3581 * @param[in] sh 3582 * Pointer to mlx5_ibv_shared object. 3583 */ 3584 void 3585 mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) 3586 { 3587 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0); 3588 uint32_t pools_n = rte_atomic16_read(&cont->n_valid); 3589 uint32_t us; 3590 3591 cont = MLX5_CNT_CONTAINER(sh, 1, 0); 3592 pools_n += rte_atomic16_read(&cont->n_valid); 3593 us = MLX5_POOL_QUERY_FREQ_US / pools_n; 3594 DRV_LOG(DEBUG, "Set alarm for %u pools each %u us\n", pools_n, us); 3595 if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { 3596 sh->cmng.query_thread_on = 0; 3597 DRV_LOG(ERR, "Cannot reinitialize query alarm\n"); 3598 } else { 3599 sh->cmng.query_thread_on = 1; 3600 } 3601 } 3602 3603 /** 3604 * The periodic procedure for triggering asynchronous batch queries for all the 3605 * counter pools. This function is probably called by the host thread. 3606 * 3607 * @param[in] arg 3608 * The parameter for the alarm process. 3609 */ 3610 void 3611 mlx5_flow_query_alarm(void *arg) 3612 { 3613 struct mlx5_ibv_shared *sh = arg; 3614 struct mlx5_devx_obj *dcs; 3615 uint16_t offset; 3616 int ret; 3617 uint8_t batch = sh->cmng.batch; 3618 uint16_t pool_index = sh->cmng.pool_index; 3619 struct mlx5_pools_container *cont; 3620 struct mlx5_pools_container *mcont; 3621 struct mlx5_flow_counter_pool *pool; 3622 3623 if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) 3624 goto set_alarm; 3625 next_container: 3626 cont = MLX5_CNT_CONTAINER(sh, batch, 1); 3627 mcont = MLX5_CNT_CONTAINER(sh, batch, 0); 3628 /* Check if resize was done and need to flip a container. */ 3629 if (cont != mcont) { 3630 if (cont->pools) { 3631 /* Clean the old container. */ 3632 rte_free(cont->pools); 3633 memset(cont, 0, sizeof(*cont)); 3634 } 3635 rte_cio_wmb(); 3636 /* Flip the host container. */ 3637 sh->cmng.mhi[batch] ^= (uint8_t)2; 3638 cont = mcont; 3639 } 3640 if (!cont->pools) { 3641 /* 2 empty containers case is unexpected. */ 3642 if (unlikely(batch != sh->cmng.batch)) 3643 goto set_alarm; 3644 batch ^= 0x1; 3645 pool_index = 0; 3646 goto next_container; 3647 } 3648 pool = cont->pools[pool_index]; 3649 if (pool->raw_hw) 3650 /* There is a pool query in progress. */ 3651 goto set_alarm; 3652 pool->raw_hw = 3653 LIST_FIRST(&sh->cmng.free_stat_raws); 3654 if (!pool->raw_hw) 3655 /* No free counter statistics raw memory. */ 3656 goto set_alarm; 3657 dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read 3658 (&pool->a64_dcs); 3659 offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; 3660 ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - 3661 offset, NULL, NULL, 3662 pool->raw_hw->mem_mng->dm->id, 3663 (void *)(uintptr_t) 3664 (pool->raw_hw->data + offset), 3665 sh->devx_comp, 3666 (uint64_t)(uintptr_t)pool); 3667 if (ret) { 3668 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID" 3669 " %d\n", pool->min_dcs->id); 3670 pool->raw_hw = NULL; 3671 goto set_alarm; 3672 } 3673 pool->raw_hw->min_dcs_id = dcs->id; 3674 LIST_REMOVE(pool->raw_hw, next); 3675 sh->cmng.pending_queries++; 3676 pool_index++; 3677 if (pool_index >= rte_atomic16_read(&cont->n_valid)) { 3678 batch ^= 0x1; 3679 pool_index = 0; 3680 } 3681 set_alarm: 3682 sh->cmng.batch = batch; 3683 sh->cmng.pool_index = pool_index; 3684 mlx5_set_query_alarm(sh); 3685 } 3686 3687 /** 3688 * Handler for the HW respond about ready values from an asynchronous batch 3689 * query. This function is probably called by the host thread. 3690 * 3691 * @param[in] sh 3692 * The pointer to the shared IB device context. 3693 * @param[in] async_id 3694 * The Devx async ID. 3695 * @param[in] status 3696 * The status of the completion. 3697 */ 3698 void 3699 mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, 3700 uint64_t async_id, int status) 3701 { 3702 struct mlx5_flow_counter_pool *pool = 3703 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; 3704 struct mlx5_counter_stats_raw *raw_to_free; 3705 3706 if (unlikely(status)) { 3707 raw_to_free = pool->raw_hw; 3708 } else { 3709 raw_to_free = pool->raw; 3710 rte_spinlock_lock(&pool->sl); 3711 pool->raw = pool->raw_hw; 3712 rte_spinlock_unlock(&pool->sl); 3713 rte_atomic64_add(&pool->query_gen, 1); 3714 /* Be sure the new raw counters data is updated in memory. */ 3715 rte_cio_wmb(); 3716 } 3717 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next); 3718 pool->raw_hw = NULL; 3719 sh->cmng.pending_queries--; 3720 } 3721 3722 /** 3723 * Translate the rte_flow group index to HW table value. 3724 * 3725 * @param[in] attributes 3726 * Pointer to flow attributes 3727 * @param[in] external 3728 * Value is part of flow rule created by request external to PMD. 3729 * @param[in] group 3730 * rte_flow group index value. 3731 * @param[out] table 3732 * HW table value. 3733 * @param[out] error 3734 * Pointer to error structure. 3735 * 3736 * @return 3737 * 0 on success, a negative errno value otherwise and rte_errno is set. 3738 */ 3739 int 3740 mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external, 3741 uint32_t group, uint32_t *table, 3742 struct rte_flow_error *error) 3743 { 3744 if (attributes->transfer && external) { 3745 if (group == UINT32_MAX) 3746 return rte_flow_error_set 3747 (error, EINVAL, 3748 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 3749 NULL, 3750 "group index not supported"); 3751 *table = group + 1; 3752 } else { 3753 *table = group; 3754 } 3755 return 0; 3756 } 3757