1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <netinet/in.h> 7 #include <sys/queue.h> 8 #include <stdalign.h> 9 #include <stdint.h> 10 #include <string.h> 11 12 /* Verbs header. */ 13 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 14 #ifdef PEDANTIC 15 #pragma GCC diagnostic ignored "-Wpedantic" 16 #endif 17 #include <infiniband/verbs.h> 18 #ifdef PEDANTIC 19 #pragma GCC diagnostic error "-Wpedantic" 20 #endif 21 22 #include <rte_common.h> 23 #include <rte_ether.h> 24 #include <rte_ethdev_driver.h> 25 #include <rte_flow.h> 26 #include <rte_flow_driver.h> 27 #include <rte_malloc.h> 28 #include <rte_ip.h> 29 30 #include "mlx5.h" 31 #include "mlx5_defs.h" 32 #include "mlx5_flow.h" 33 #include "mlx5_glue.h" 34 #include "mlx5_prm.h" 35 #include "mlx5_rxtx.h" 36 37 /* Dev ops structure defined in mlx5.c */ 38 extern const struct eth_dev_ops mlx5_dev_ops; 39 extern const struct eth_dev_ops mlx5_dev_ops_isolate; 40 41 /** Device flow drivers. */ 42 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 43 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; 44 #endif 45 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; 46 47 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; 48 49 const struct mlx5_flow_driver_ops *flow_drv_ops[] = { 50 [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, 51 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 52 [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, 53 #endif 54 [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, 55 [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops 56 }; 57 58 enum mlx5_expansion { 59 MLX5_EXPANSION_ROOT, 60 MLX5_EXPANSION_ROOT_OUTER, 61 MLX5_EXPANSION_ROOT_ETH_VLAN, 62 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, 63 MLX5_EXPANSION_OUTER_ETH, 64 MLX5_EXPANSION_OUTER_ETH_VLAN, 65 MLX5_EXPANSION_OUTER_VLAN, 66 MLX5_EXPANSION_OUTER_IPV4, 67 MLX5_EXPANSION_OUTER_IPV4_UDP, 68 MLX5_EXPANSION_OUTER_IPV4_TCP, 69 MLX5_EXPANSION_OUTER_IPV6, 70 MLX5_EXPANSION_OUTER_IPV6_UDP, 71 MLX5_EXPANSION_OUTER_IPV6_TCP, 72 MLX5_EXPANSION_VXLAN, 73 MLX5_EXPANSION_VXLAN_GPE, 74 MLX5_EXPANSION_GRE, 75 MLX5_EXPANSION_MPLS, 76 MLX5_EXPANSION_ETH, 77 MLX5_EXPANSION_ETH_VLAN, 78 MLX5_EXPANSION_VLAN, 79 MLX5_EXPANSION_IPV4, 80 MLX5_EXPANSION_IPV4_UDP, 81 MLX5_EXPANSION_IPV4_TCP, 82 MLX5_EXPANSION_IPV6, 83 MLX5_EXPANSION_IPV6_UDP, 84 MLX5_EXPANSION_IPV6_TCP, 85 }; 86 87 /** Supported expansion of items. */ 88 static const struct rte_flow_expand_node mlx5_support_expansion[] = { 89 [MLX5_EXPANSION_ROOT] = { 90 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 91 MLX5_EXPANSION_IPV4, 92 MLX5_EXPANSION_IPV6), 93 .type = RTE_FLOW_ITEM_TYPE_END, 94 }, 95 [MLX5_EXPANSION_ROOT_OUTER] = { 96 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, 97 MLX5_EXPANSION_OUTER_IPV4, 98 MLX5_EXPANSION_OUTER_IPV6), 99 .type = RTE_FLOW_ITEM_TYPE_END, 100 }, 101 [MLX5_EXPANSION_ROOT_ETH_VLAN] = { 102 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), 103 .type = RTE_FLOW_ITEM_TYPE_END, 104 }, 105 [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { 106 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), 107 .type = RTE_FLOW_ITEM_TYPE_END, 108 }, 109 [MLX5_EXPANSION_OUTER_ETH] = { 110 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 111 MLX5_EXPANSION_OUTER_IPV6, 112 MLX5_EXPANSION_MPLS), 113 .type = RTE_FLOW_ITEM_TYPE_ETH, 114 .rss_types = 0, 115 }, 116 [MLX5_EXPANSION_OUTER_ETH_VLAN] = { 117 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), 118 .type = RTE_FLOW_ITEM_TYPE_ETH, 119 .rss_types = 0, 120 }, 121 [MLX5_EXPANSION_OUTER_VLAN] = { 122 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 123 MLX5_EXPANSION_OUTER_IPV6), 124 .type = RTE_FLOW_ITEM_TYPE_VLAN, 125 }, 126 [MLX5_EXPANSION_OUTER_IPV4] = { 127 .next = RTE_FLOW_EXPAND_RSS_NEXT 128 (MLX5_EXPANSION_OUTER_IPV4_UDP, 129 MLX5_EXPANSION_OUTER_IPV4_TCP, 130 MLX5_EXPANSION_GRE), 131 .type = RTE_FLOW_ITEM_TYPE_IPV4, 132 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 133 ETH_RSS_NONFRAG_IPV4_OTHER, 134 }, 135 [MLX5_EXPANSION_OUTER_IPV4_UDP] = { 136 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 137 MLX5_EXPANSION_VXLAN_GPE), 138 .type = RTE_FLOW_ITEM_TYPE_UDP, 139 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 140 }, 141 [MLX5_EXPANSION_OUTER_IPV4_TCP] = { 142 .type = RTE_FLOW_ITEM_TYPE_TCP, 143 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 144 }, 145 [MLX5_EXPANSION_OUTER_IPV6] = { 146 .next = RTE_FLOW_EXPAND_RSS_NEXT 147 (MLX5_EXPANSION_OUTER_IPV6_UDP, 148 MLX5_EXPANSION_OUTER_IPV6_TCP), 149 .type = RTE_FLOW_ITEM_TYPE_IPV6, 150 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 151 ETH_RSS_NONFRAG_IPV6_OTHER, 152 }, 153 [MLX5_EXPANSION_OUTER_IPV6_UDP] = { 154 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 155 MLX5_EXPANSION_VXLAN_GPE), 156 .type = RTE_FLOW_ITEM_TYPE_UDP, 157 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 158 }, 159 [MLX5_EXPANSION_OUTER_IPV6_TCP] = { 160 .type = RTE_FLOW_ITEM_TYPE_TCP, 161 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 162 }, 163 [MLX5_EXPANSION_VXLAN] = { 164 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), 165 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 166 }, 167 [MLX5_EXPANSION_VXLAN_GPE] = { 168 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 169 MLX5_EXPANSION_IPV4, 170 MLX5_EXPANSION_IPV6), 171 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, 172 }, 173 [MLX5_EXPANSION_GRE] = { 174 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), 175 .type = RTE_FLOW_ITEM_TYPE_GRE, 176 }, 177 [MLX5_EXPANSION_MPLS] = { 178 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 179 MLX5_EXPANSION_IPV6), 180 .type = RTE_FLOW_ITEM_TYPE_MPLS, 181 }, 182 [MLX5_EXPANSION_ETH] = { 183 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 184 MLX5_EXPANSION_IPV6), 185 .type = RTE_FLOW_ITEM_TYPE_ETH, 186 }, 187 [MLX5_EXPANSION_ETH_VLAN] = { 188 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), 189 .type = RTE_FLOW_ITEM_TYPE_ETH, 190 }, 191 [MLX5_EXPANSION_VLAN] = { 192 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 193 MLX5_EXPANSION_IPV6), 194 .type = RTE_FLOW_ITEM_TYPE_VLAN, 195 }, 196 [MLX5_EXPANSION_IPV4] = { 197 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, 198 MLX5_EXPANSION_IPV4_TCP), 199 .type = RTE_FLOW_ITEM_TYPE_IPV4, 200 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 201 ETH_RSS_NONFRAG_IPV4_OTHER, 202 }, 203 [MLX5_EXPANSION_IPV4_UDP] = { 204 .type = RTE_FLOW_ITEM_TYPE_UDP, 205 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 206 }, 207 [MLX5_EXPANSION_IPV4_TCP] = { 208 .type = RTE_FLOW_ITEM_TYPE_TCP, 209 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 210 }, 211 [MLX5_EXPANSION_IPV6] = { 212 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, 213 MLX5_EXPANSION_IPV6_TCP), 214 .type = RTE_FLOW_ITEM_TYPE_IPV6, 215 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 216 ETH_RSS_NONFRAG_IPV6_OTHER, 217 }, 218 [MLX5_EXPANSION_IPV6_UDP] = { 219 .type = RTE_FLOW_ITEM_TYPE_UDP, 220 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 221 }, 222 [MLX5_EXPANSION_IPV6_TCP] = { 223 .type = RTE_FLOW_ITEM_TYPE_TCP, 224 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 225 }, 226 }; 227 228 static const struct rte_flow_ops mlx5_flow_ops = { 229 .validate = mlx5_flow_validate, 230 .create = mlx5_flow_create, 231 .destroy = mlx5_flow_destroy, 232 .flush = mlx5_flow_flush, 233 .isolate = mlx5_flow_isolate, 234 .query = mlx5_flow_query, 235 }; 236 237 /* Convert FDIR request to Generic flow. */ 238 struct mlx5_fdir { 239 struct rte_flow_attr attr; 240 struct rte_flow_item items[4]; 241 struct rte_flow_item_eth l2; 242 struct rte_flow_item_eth l2_mask; 243 union { 244 struct rte_flow_item_ipv4 ipv4; 245 struct rte_flow_item_ipv6 ipv6; 246 } l3; 247 union { 248 struct rte_flow_item_ipv4 ipv4; 249 struct rte_flow_item_ipv6 ipv6; 250 } l3_mask; 251 union { 252 struct rte_flow_item_udp udp; 253 struct rte_flow_item_tcp tcp; 254 } l4; 255 union { 256 struct rte_flow_item_udp udp; 257 struct rte_flow_item_tcp tcp; 258 } l4_mask; 259 struct rte_flow_action actions[2]; 260 struct rte_flow_action_queue queue; 261 }; 262 263 /* Map of Verbs to Flow priority with 8 Verbs priorities. */ 264 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { 265 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, 266 }; 267 268 /* Map of Verbs to Flow priority with 16 Verbs priorities. */ 269 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { 270 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, 271 { 9, 10, 11 }, { 12, 13, 14 }, 272 }; 273 274 /* Tunnel information. */ 275 struct mlx5_flow_tunnel_info { 276 uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ 277 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ 278 }; 279 280 static struct mlx5_flow_tunnel_info tunnels_info[] = { 281 { 282 .tunnel = MLX5_FLOW_LAYER_VXLAN, 283 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, 284 }, 285 { 286 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, 287 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, 288 }, 289 { 290 .tunnel = MLX5_FLOW_LAYER_GRE, 291 .ptype = RTE_PTYPE_TUNNEL_GRE, 292 }, 293 { 294 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, 295 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, 296 }, 297 { 298 .tunnel = MLX5_FLOW_LAYER_MPLS, 299 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, 300 }, 301 { 302 .tunnel = MLX5_FLOW_LAYER_NVGRE, 303 .ptype = RTE_PTYPE_TUNNEL_NVGRE, 304 }, 305 }; 306 307 /** 308 * Discover the maximum number of priority available. 309 * 310 * @param[in] dev 311 * Pointer to the Ethernet device structure. 312 * 313 * @return 314 * number of supported flow priority on success, a negative errno 315 * value otherwise and rte_errno is set. 316 */ 317 int 318 mlx5_flow_discover_priorities(struct rte_eth_dev *dev) 319 { 320 struct mlx5_priv *priv = dev->data->dev_private; 321 struct { 322 struct ibv_flow_attr attr; 323 struct ibv_flow_spec_eth eth; 324 struct ibv_flow_spec_action_drop drop; 325 } flow_attr = { 326 .attr = { 327 .num_of_specs = 2, 328 .port = (uint8_t)priv->ibv_port, 329 }, 330 .eth = { 331 .type = IBV_FLOW_SPEC_ETH, 332 .size = sizeof(struct ibv_flow_spec_eth), 333 }, 334 .drop = { 335 .size = sizeof(struct ibv_flow_spec_action_drop), 336 .type = IBV_FLOW_SPEC_ACTION_DROP, 337 }, 338 }; 339 struct ibv_flow *flow; 340 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev); 341 uint16_t vprio[] = { 8, 16 }; 342 int i; 343 int priority = 0; 344 345 if (!drop) { 346 rte_errno = ENOTSUP; 347 return -rte_errno; 348 } 349 for (i = 0; i != RTE_DIM(vprio); i++) { 350 flow_attr.attr.priority = vprio[i] - 1; 351 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); 352 if (!flow) 353 break; 354 claim_zero(mlx5_glue->destroy_flow(flow)); 355 priority = vprio[i]; 356 } 357 mlx5_hrxq_drop_release(dev); 358 switch (priority) { 359 case 8: 360 priority = RTE_DIM(priority_map_3); 361 break; 362 case 16: 363 priority = RTE_DIM(priority_map_5); 364 break; 365 default: 366 rte_errno = ENOTSUP; 367 DRV_LOG(ERR, 368 "port %u verbs maximum priority: %d expected 8/16", 369 dev->data->port_id, priority); 370 return -rte_errno; 371 } 372 DRV_LOG(INFO, "port %u flow maximum priority: %d", 373 dev->data->port_id, priority); 374 return priority; 375 } 376 377 /** 378 * Adjust flow priority based on the highest layer and the request priority. 379 * 380 * @param[in] dev 381 * Pointer to the Ethernet device structure. 382 * @param[in] priority 383 * The rule base priority. 384 * @param[in] subpriority 385 * The priority based on the items. 386 * 387 * @return 388 * The new priority. 389 */ 390 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 391 uint32_t subpriority) 392 { 393 uint32_t res = 0; 394 struct mlx5_priv *priv = dev->data->dev_private; 395 396 switch (priv->config.flow_prio) { 397 case RTE_DIM(priority_map_3): 398 res = priority_map_3[priority][subpriority]; 399 break; 400 case RTE_DIM(priority_map_5): 401 res = priority_map_5[priority][subpriority]; 402 break; 403 } 404 return res; 405 } 406 407 /** 408 * Verify the @p item specifications (spec, last, mask) are compatible with the 409 * NIC capabilities. 410 * 411 * @param[in] item 412 * Item specification. 413 * @param[in] mask 414 * @p item->mask or flow default bit-masks. 415 * @param[in] nic_mask 416 * Bit-masks covering supported fields by the NIC to compare with user mask. 417 * @param[in] size 418 * Bit-masks size in bytes. 419 * @param[out] error 420 * Pointer to error structure. 421 * 422 * @return 423 * 0 on success, a negative errno value otherwise and rte_errno is set. 424 */ 425 int 426 mlx5_flow_item_acceptable(const struct rte_flow_item *item, 427 const uint8_t *mask, 428 const uint8_t *nic_mask, 429 unsigned int size, 430 struct rte_flow_error *error) 431 { 432 unsigned int i; 433 434 assert(nic_mask); 435 for (i = 0; i < size; ++i) 436 if ((nic_mask[i] | mask[i]) != nic_mask[i]) 437 return rte_flow_error_set(error, ENOTSUP, 438 RTE_FLOW_ERROR_TYPE_ITEM, 439 item, 440 "mask enables non supported" 441 " bits"); 442 if (!item->spec && (item->mask || item->last)) 443 return rte_flow_error_set(error, EINVAL, 444 RTE_FLOW_ERROR_TYPE_ITEM, item, 445 "mask/last without a spec is not" 446 " supported"); 447 if (item->spec && item->last) { 448 uint8_t spec[size]; 449 uint8_t last[size]; 450 unsigned int i; 451 int ret; 452 453 for (i = 0; i < size; ++i) { 454 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; 455 last[i] = ((const uint8_t *)item->last)[i] & mask[i]; 456 } 457 ret = memcmp(spec, last, size); 458 if (ret != 0) 459 return rte_flow_error_set(error, EINVAL, 460 RTE_FLOW_ERROR_TYPE_ITEM, 461 item, 462 "range is not valid"); 463 } 464 return 0; 465 } 466 467 /** 468 * Adjust the hash fields according to the @p flow information. 469 * 470 * @param[in] dev_flow. 471 * Pointer to the mlx5_flow. 472 * @param[in] tunnel 473 * 1 when the hash field is for a tunnel item. 474 * @param[in] layer_types 475 * ETH_RSS_* types. 476 * @param[in] hash_fields 477 * Item hash fields. 478 * 479 * @return 480 * The hash fields that should be used. 481 */ 482 uint64_t 483 mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, 484 int tunnel __rte_unused, uint64_t layer_types, 485 uint64_t hash_fields) 486 { 487 struct rte_flow *flow = dev_flow->flow; 488 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 489 int rss_request_inner = flow->rss.level >= 2; 490 491 /* Check RSS hash level for tunnel. */ 492 if (tunnel && rss_request_inner) 493 hash_fields |= IBV_RX_HASH_INNER; 494 else if (tunnel || rss_request_inner) 495 return 0; 496 #endif 497 /* Check if requested layer matches RSS hash fields. */ 498 if (!(flow->rss.types & layer_types)) 499 return 0; 500 return hash_fields; 501 } 502 503 /** 504 * Lookup and set the ptype in the data Rx part. A single Ptype can be used, 505 * if several tunnel rules are used on this queue, the tunnel ptype will be 506 * cleared. 507 * 508 * @param rxq_ctrl 509 * Rx queue to update. 510 */ 511 static void 512 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) 513 { 514 unsigned int i; 515 uint32_t tunnel_ptype = 0; 516 517 /* Look up for the ptype to use. */ 518 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { 519 if (!rxq_ctrl->flow_tunnels_n[i]) 520 continue; 521 if (!tunnel_ptype) { 522 tunnel_ptype = tunnels_info[i].ptype; 523 } else { 524 tunnel_ptype = 0; 525 break; 526 } 527 } 528 rxq_ctrl->rxq.tunnel = tunnel_ptype; 529 } 530 531 /** 532 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive 533 * flow. 534 * 535 * @param[in] dev 536 * Pointer to the Ethernet device structure. 537 * @param[in] dev_flow 538 * Pointer to device flow structure. 539 */ 540 static void 541 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 542 { 543 struct mlx5_priv *priv = dev->data->dev_private; 544 struct rte_flow *flow = dev_flow->flow; 545 const int mark = !!(flow->actions & 546 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 547 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); 548 unsigned int i; 549 550 for (i = 0; i != flow->rss.queue_num; ++i) { 551 int idx = (*flow->queue)[i]; 552 struct mlx5_rxq_ctrl *rxq_ctrl = 553 container_of((*priv->rxqs)[idx], 554 struct mlx5_rxq_ctrl, rxq); 555 556 if (mark) { 557 rxq_ctrl->rxq.mark = 1; 558 rxq_ctrl->flow_mark_n++; 559 } 560 if (tunnel) { 561 unsigned int j; 562 563 /* Increase the counter matching the flow. */ 564 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 565 if ((tunnels_info[j].tunnel & 566 dev_flow->layers) == 567 tunnels_info[j].tunnel) { 568 rxq_ctrl->flow_tunnels_n[j]++; 569 break; 570 } 571 } 572 flow_rxq_tunnel_ptype_update(rxq_ctrl); 573 } 574 } 575 } 576 577 /** 578 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow 579 * 580 * @param[in] dev 581 * Pointer to the Ethernet device structure. 582 * @param[in] flow 583 * Pointer to flow structure. 584 */ 585 static void 586 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) 587 { 588 struct mlx5_flow *dev_flow; 589 590 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 591 flow_drv_rxq_flags_set(dev, dev_flow); 592 } 593 594 /** 595 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 596 * device flow if no other flow uses it with the same kind of request. 597 * 598 * @param dev 599 * Pointer to Ethernet device. 600 * @param[in] dev_flow 601 * Pointer to the device flow. 602 */ 603 static void 604 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 605 { 606 struct mlx5_priv *priv = dev->data->dev_private; 607 struct rte_flow *flow = dev_flow->flow; 608 const int mark = !!(flow->actions & 609 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 610 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); 611 unsigned int i; 612 613 assert(dev->data->dev_started); 614 for (i = 0; i != flow->rss.queue_num; ++i) { 615 int idx = (*flow->queue)[i]; 616 struct mlx5_rxq_ctrl *rxq_ctrl = 617 container_of((*priv->rxqs)[idx], 618 struct mlx5_rxq_ctrl, rxq); 619 620 if (mark) { 621 rxq_ctrl->flow_mark_n--; 622 rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; 623 } 624 if (tunnel) { 625 unsigned int j; 626 627 /* Decrease the counter matching the flow. */ 628 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 629 if ((tunnels_info[j].tunnel & 630 dev_flow->layers) == 631 tunnels_info[j].tunnel) { 632 rxq_ctrl->flow_tunnels_n[j]--; 633 break; 634 } 635 } 636 flow_rxq_tunnel_ptype_update(rxq_ctrl); 637 } 638 } 639 } 640 641 /** 642 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 643 * @p flow if no other flow uses it with the same kind of request. 644 * 645 * @param dev 646 * Pointer to Ethernet device. 647 * @param[in] flow 648 * Pointer to the flow. 649 */ 650 static void 651 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) 652 { 653 struct mlx5_flow *dev_flow; 654 655 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 656 flow_drv_rxq_flags_trim(dev, dev_flow); 657 } 658 659 /** 660 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. 661 * 662 * @param dev 663 * Pointer to Ethernet device. 664 */ 665 static void 666 flow_rxq_flags_clear(struct rte_eth_dev *dev) 667 { 668 struct mlx5_priv *priv = dev->data->dev_private; 669 unsigned int i; 670 671 for (i = 0; i != priv->rxqs_n; ++i) { 672 struct mlx5_rxq_ctrl *rxq_ctrl; 673 unsigned int j; 674 675 if (!(*priv->rxqs)[i]) 676 continue; 677 rxq_ctrl = container_of((*priv->rxqs)[i], 678 struct mlx5_rxq_ctrl, rxq); 679 rxq_ctrl->flow_mark_n = 0; 680 rxq_ctrl->rxq.mark = 0; 681 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) 682 rxq_ctrl->flow_tunnels_n[j] = 0; 683 rxq_ctrl->rxq.tunnel = 0; 684 } 685 } 686 687 /* 688 * Validate the flag action. 689 * 690 * @param[in] action_flags 691 * Bit-fields that holds the actions detected until now. 692 * @param[in] attr 693 * Attributes of flow that includes this action. 694 * @param[out] error 695 * Pointer to error structure. 696 * 697 * @return 698 * 0 on success, a negative errno value otherwise and rte_errno is set. 699 */ 700 int 701 mlx5_flow_validate_action_flag(uint64_t action_flags, 702 const struct rte_flow_attr *attr, 703 struct rte_flow_error *error) 704 { 705 706 if (action_flags & MLX5_FLOW_ACTION_DROP) 707 return rte_flow_error_set(error, EINVAL, 708 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 709 "can't drop and flag in same flow"); 710 if (action_flags & MLX5_FLOW_ACTION_MARK) 711 return rte_flow_error_set(error, EINVAL, 712 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 713 "can't mark and flag in same flow"); 714 if (action_flags & MLX5_FLOW_ACTION_FLAG) 715 return rte_flow_error_set(error, EINVAL, 716 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 717 "can't have 2 flag" 718 " actions in same flow"); 719 if (attr->egress) 720 return rte_flow_error_set(error, ENOTSUP, 721 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 722 "flag action not supported for " 723 "egress"); 724 return 0; 725 } 726 727 /* 728 * Validate the mark action. 729 * 730 * @param[in] action 731 * Pointer to the queue action. 732 * @param[in] action_flags 733 * Bit-fields that holds the actions detected until now. 734 * @param[in] attr 735 * Attributes of flow that includes this action. 736 * @param[out] error 737 * Pointer to error structure. 738 * 739 * @return 740 * 0 on success, a negative errno value otherwise and rte_errno is set. 741 */ 742 int 743 mlx5_flow_validate_action_mark(const struct rte_flow_action *action, 744 uint64_t action_flags, 745 const struct rte_flow_attr *attr, 746 struct rte_flow_error *error) 747 { 748 const struct rte_flow_action_mark *mark = action->conf; 749 750 if (!mark) 751 return rte_flow_error_set(error, EINVAL, 752 RTE_FLOW_ERROR_TYPE_ACTION, 753 action, 754 "configuration cannot be null"); 755 if (mark->id >= MLX5_FLOW_MARK_MAX) 756 return rte_flow_error_set(error, EINVAL, 757 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 758 &mark->id, 759 "mark id must in 0 <= id < " 760 RTE_STR(MLX5_FLOW_MARK_MAX)); 761 if (action_flags & MLX5_FLOW_ACTION_DROP) 762 return rte_flow_error_set(error, EINVAL, 763 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 764 "can't drop and mark in same flow"); 765 if (action_flags & MLX5_FLOW_ACTION_FLAG) 766 return rte_flow_error_set(error, EINVAL, 767 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 768 "can't flag and mark in same flow"); 769 if (action_flags & MLX5_FLOW_ACTION_MARK) 770 return rte_flow_error_set(error, EINVAL, 771 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 772 "can't have 2 mark actions in same" 773 " flow"); 774 if (attr->egress) 775 return rte_flow_error_set(error, ENOTSUP, 776 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 777 "mark action not supported for " 778 "egress"); 779 return 0; 780 } 781 782 /* 783 * Validate the drop action. 784 * 785 * @param[in] action_flags 786 * Bit-fields that holds the actions detected until now. 787 * @param[in] attr 788 * Attributes of flow that includes this action. 789 * @param[out] error 790 * Pointer to error structure. 791 * 792 * @return 793 * 0 on success, a negative errno value otherwise and rte_errno is set. 794 */ 795 int 796 mlx5_flow_validate_action_drop(uint64_t action_flags, 797 const struct rte_flow_attr *attr, 798 struct rte_flow_error *error) 799 { 800 if (action_flags & MLX5_FLOW_ACTION_FLAG) 801 return rte_flow_error_set(error, EINVAL, 802 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 803 "can't drop and flag in same flow"); 804 if (action_flags & MLX5_FLOW_ACTION_MARK) 805 return rte_flow_error_set(error, EINVAL, 806 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 807 "can't drop and mark in same flow"); 808 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 809 return rte_flow_error_set(error, EINVAL, 810 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 811 "can't have 2 fate actions in" 812 " same flow"); 813 if (attr->egress) 814 return rte_flow_error_set(error, ENOTSUP, 815 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 816 "drop action not supported for " 817 "egress"); 818 return 0; 819 } 820 821 /* 822 * Validate the queue action. 823 * 824 * @param[in] action 825 * Pointer to the queue action. 826 * @param[in] action_flags 827 * Bit-fields that holds the actions detected until now. 828 * @param[in] dev 829 * Pointer to the Ethernet device structure. 830 * @param[in] attr 831 * Attributes of flow that includes this action. 832 * @param[out] error 833 * Pointer to error structure. 834 * 835 * @return 836 * 0 on success, a negative errno value otherwise and rte_errno is set. 837 */ 838 int 839 mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 840 uint64_t action_flags, 841 struct rte_eth_dev *dev, 842 const struct rte_flow_attr *attr, 843 struct rte_flow_error *error) 844 { 845 struct mlx5_priv *priv = dev->data->dev_private; 846 const struct rte_flow_action_queue *queue = action->conf; 847 848 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 849 return rte_flow_error_set(error, EINVAL, 850 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 851 "can't have 2 fate actions in" 852 " same flow"); 853 if (!priv->rxqs_n) 854 return rte_flow_error_set(error, EINVAL, 855 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 856 NULL, "No Rx queues configured"); 857 if (queue->index >= priv->rxqs_n) 858 return rte_flow_error_set(error, EINVAL, 859 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 860 &queue->index, 861 "queue index out of range"); 862 if (!(*priv->rxqs)[queue->index]) 863 return rte_flow_error_set(error, EINVAL, 864 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 865 &queue->index, 866 "queue is not configured"); 867 if (attr->egress) 868 return rte_flow_error_set(error, ENOTSUP, 869 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 870 "queue action not supported for " 871 "egress"); 872 return 0; 873 } 874 875 /* 876 * Validate the rss action. 877 * 878 * @param[in] action 879 * Pointer to the queue action. 880 * @param[in] action_flags 881 * Bit-fields that holds the actions detected until now. 882 * @param[in] dev 883 * Pointer to the Ethernet device structure. 884 * @param[in] attr 885 * Attributes of flow that includes this action. 886 * @param[in] item_flags 887 * Items that were detected. 888 * @param[out] error 889 * Pointer to error structure. 890 * 891 * @return 892 * 0 on success, a negative errno value otherwise and rte_errno is set. 893 */ 894 int 895 mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 896 uint64_t action_flags, 897 struct rte_eth_dev *dev, 898 const struct rte_flow_attr *attr, 899 uint64_t item_flags, 900 struct rte_flow_error *error) 901 { 902 struct mlx5_priv *priv = dev->data->dev_private; 903 const struct rte_flow_action_rss *rss = action->conf; 904 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 905 unsigned int i; 906 907 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 908 return rte_flow_error_set(error, EINVAL, 909 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 910 "can't have 2 fate actions" 911 " in same flow"); 912 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && 913 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) 914 return rte_flow_error_set(error, ENOTSUP, 915 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 916 &rss->func, 917 "RSS hash function not supported"); 918 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 919 if (rss->level > 2) 920 #else 921 if (rss->level > 1) 922 #endif 923 return rte_flow_error_set(error, ENOTSUP, 924 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 925 &rss->level, 926 "tunnel RSS is not supported"); 927 /* allow RSS key_len 0 in case of NULL (default) RSS key. */ 928 if (rss->key_len == 0 && rss->key != NULL) 929 return rte_flow_error_set(error, ENOTSUP, 930 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 931 &rss->key_len, 932 "RSS hash key length 0"); 933 if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) 934 return rte_flow_error_set(error, ENOTSUP, 935 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 936 &rss->key_len, 937 "RSS hash key too small"); 938 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) 939 return rte_flow_error_set(error, ENOTSUP, 940 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 941 &rss->key_len, 942 "RSS hash key too large"); 943 if (rss->queue_num > priv->config.ind_table_max_size) 944 return rte_flow_error_set(error, ENOTSUP, 945 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 946 &rss->queue_num, 947 "number of queues too large"); 948 if (rss->types & MLX5_RSS_HF_MASK) 949 return rte_flow_error_set(error, ENOTSUP, 950 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 951 &rss->types, 952 "some RSS protocols are not" 953 " supported"); 954 if (!priv->rxqs_n) 955 return rte_flow_error_set(error, EINVAL, 956 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 957 NULL, "No Rx queues configured"); 958 if (!rss->queue_num) 959 return rte_flow_error_set(error, EINVAL, 960 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 961 NULL, "No queues configured"); 962 for (i = 0; i != rss->queue_num; ++i) { 963 if (!(*priv->rxqs)[rss->queue[i]]) 964 return rte_flow_error_set 965 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, 966 &rss->queue[i], "queue is not configured"); 967 } 968 if (attr->egress) 969 return rte_flow_error_set(error, ENOTSUP, 970 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 971 "rss action not supported for " 972 "egress"); 973 if (rss->level > 1 && !tunnel) 974 return rte_flow_error_set(error, EINVAL, 975 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 976 "inner RSS is not supported for " 977 "non-tunnel flows"); 978 return 0; 979 } 980 981 /* 982 * Validate the count action. 983 * 984 * @param[in] dev 985 * Pointer to the Ethernet device structure. 986 * @param[in] attr 987 * Attributes of flow that includes this action. 988 * @param[out] error 989 * Pointer to error structure. 990 * 991 * @return 992 * 0 on success, a negative errno value otherwise and rte_errno is set. 993 */ 994 int 995 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, 996 const struct rte_flow_attr *attr, 997 struct rte_flow_error *error) 998 { 999 if (attr->egress) 1000 return rte_flow_error_set(error, ENOTSUP, 1001 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1002 "count action not supported for " 1003 "egress"); 1004 return 0; 1005 } 1006 1007 /** 1008 * Verify the @p attributes will be correctly understood by the NIC and store 1009 * them in the @p flow if everything is correct. 1010 * 1011 * @param[in] dev 1012 * Pointer to the Ethernet device structure. 1013 * @param[in] attributes 1014 * Pointer to flow attributes 1015 * @param[out] error 1016 * Pointer to error structure. 1017 * 1018 * @return 1019 * 0 on success, a negative errno value otherwise and rte_errno is set. 1020 */ 1021 int 1022 mlx5_flow_validate_attributes(struct rte_eth_dev *dev, 1023 const struct rte_flow_attr *attributes, 1024 struct rte_flow_error *error) 1025 { 1026 struct mlx5_priv *priv = dev->data->dev_private; 1027 uint32_t priority_max = priv->config.flow_prio - 1; 1028 1029 if (attributes->group) 1030 return rte_flow_error_set(error, ENOTSUP, 1031 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 1032 NULL, "groups is not supported"); 1033 if (attributes->priority != MLX5_FLOW_PRIO_RSVD && 1034 attributes->priority >= priority_max) 1035 return rte_flow_error_set(error, ENOTSUP, 1036 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1037 NULL, "priority out of range"); 1038 if (attributes->egress) 1039 return rte_flow_error_set(error, ENOTSUP, 1040 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1041 "egress is not supported"); 1042 if (attributes->transfer && !priv->config.dv_esw_en) 1043 return rte_flow_error_set(error, ENOTSUP, 1044 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 1045 NULL, "transfer is not supported"); 1046 if (!attributes->ingress) 1047 return rte_flow_error_set(error, EINVAL, 1048 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 1049 NULL, 1050 "ingress attribute is mandatory"); 1051 return 0; 1052 } 1053 1054 /** 1055 * Validate ICMP6 item. 1056 * 1057 * @param[in] item 1058 * Item specification. 1059 * @param[in] item_flags 1060 * Bit-fields that holds the items detected until now. 1061 * @param[out] error 1062 * Pointer to error structure. 1063 * 1064 * @return 1065 * 0 on success, a negative errno value otherwise and rte_errno is set. 1066 */ 1067 int 1068 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, 1069 uint64_t item_flags, 1070 uint8_t target_protocol, 1071 struct rte_flow_error *error) 1072 { 1073 const struct rte_flow_item_icmp6 *mask = item->mask; 1074 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1075 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 1076 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 1077 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1078 MLX5_FLOW_LAYER_OUTER_L4; 1079 int ret; 1080 1081 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) 1082 return rte_flow_error_set(error, EINVAL, 1083 RTE_FLOW_ERROR_TYPE_ITEM, item, 1084 "protocol filtering not compatible" 1085 " with ICMP6 layer"); 1086 if (!(item_flags & l3m)) 1087 return rte_flow_error_set(error, EINVAL, 1088 RTE_FLOW_ERROR_TYPE_ITEM, item, 1089 "IPv6 is mandatory to filter on" 1090 " ICMP6"); 1091 if (item_flags & l4m) 1092 return rte_flow_error_set(error, EINVAL, 1093 RTE_FLOW_ERROR_TYPE_ITEM, item, 1094 "multiple L4 layers not supported"); 1095 if (!mask) 1096 mask = &rte_flow_item_icmp6_mask; 1097 ret = mlx5_flow_item_acceptable 1098 (item, (const uint8_t *)mask, 1099 (const uint8_t *)&rte_flow_item_icmp6_mask, 1100 sizeof(struct rte_flow_item_icmp6), error); 1101 if (ret < 0) 1102 return ret; 1103 return 0; 1104 } 1105 1106 /** 1107 * Validate ICMP item. 1108 * 1109 * @param[in] item 1110 * Item specification. 1111 * @param[in] item_flags 1112 * Bit-fields that holds the items detected until now. 1113 * @param[out] error 1114 * Pointer to error structure. 1115 * 1116 * @return 1117 * 0 on success, a negative errno value otherwise and rte_errno is set. 1118 */ 1119 int 1120 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, 1121 uint64_t item_flags, 1122 uint8_t target_protocol, 1123 struct rte_flow_error *error) 1124 { 1125 const struct rte_flow_item_icmp *mask = item->mask; 1126 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1127 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 1128 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 1129 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1130 MLX5_FLOW_LAYER_OUTER_L4; 1131 int ret; 1132 1133 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) 1134 return rte_flow_error_set(error, EINVAL, 1135 RTE_FLOW_ERROR_TYPE_ITEM, item, 1136 "protocol filtering not compatible" 1137 " with ICMP layer"); 1138 if (!(item_flags & l3m)) 1139 return rte_flow_error_set(error, EINVAL, 1140 RTE_FLOW_ERROR_TYPE_ITEM, item, 1141 "IPv4 is mandatory to filter" 1142 " on ICMP"); 1143 if (item_flags & l4m) 1144 return rte_flow_error_set(error, EINVAL, 1145 RTE_FLOW_ERROR_TYPE_ITEM, item, 1146 "multiple L4 layers not supported"); 1147 if (!mask) 1148 mask = &rte_flow_item_icmp_mask; 1149 ret = mlx5_flow_item_acceptable 1150 (item, (const uint8_t *)mask, 1151 (const uint8_t *)&rte_flow_item_icmp_mask, 1152 sizeof(struct rte_flow_item_icmp), error); 1153 if (ret < 0) 1154 return ret; 1155 return 0; 1156 } 1157 1158 /** 1159 * Validate Ethernet item. 1160 * 1161 * @param[in] item 1162 * Item specification. 1163 * @param[in] item_flags 1164 * Bit-fields that holds the items detected until now. 1165 * @param[out] error 1166 * Pointer to error structure. 1167 * 1168 * @return 1169 * 0 on success, a negative errno value otherwise and rte_errno is set. 1170 */ 1171 int 1172 mlx5_flow_validate_item_eth(const struct rte_flow_item *item, 1173 uint64_t item_flags, 1174 struct rte_flow_error *error) 1175 { 1176 const struct rte_flow_item_eth *mask = item->mask; 1177 const struct rte_flow_item_eth nic_mask = { 1178 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1179 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1180 .type = RTE_BE16(0xffff), 1181 }; 1182 int ret; 1183 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1184 const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 1185 MLX5_FLOW_LAYER_OUTER_L2; 1186 1187 if (item_flags & ethm) 1188 return rte_flow_error_set(error, ENOTSUP, 1189 RTE_FLOW_ERROR_TYPE_ITEM, item, 1190 "multiple L2 layers not supported"); 1191 if (!mask) 1192 mask = &rte_flow_item_eth_mask; 1193 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1194 (const uint8_t *)&nic_mask, 1195 sizeof(struct rte_flow_item_eth), 1196 error); 1197 return ret; 1198 } 1199 1200 /** 1201 * Validate VLAN item. 1202 * 1203 * @param[in] item 1204 * Item specification. 1205 * @param[in] item_flags 1206 * Bit-fields that holds the items detected until now. 1207 * @param[in] dev 1208 * Ethernet device flow is being created on. 1209 * @param[out] error 1210 * Pointer to error structure. 1211 * 1212 * @return 1213 * 0 on success, a negative errno value otherwise and rte_errno is set. 1214 */ 1215 int 1216 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 1217 uint64_t item_flags, 1218 struct rte_eth_dev *dev, 1219 struct rte_flow_error *error) 1220 { 1221 const struct rte_flow_item_vlan *spec = item->spec; 1222 const struct rte_flow_item_vlan *mask = item->mask; 1223 const struct rte_flow_item_vlan nic_mask = { 1224 .tci = RTE_BE16(0x0fff), 1225 .inner_type = RTE_BE16(0xffff), 1226 }; 1227 uint16_t vlan_tag = 0; 1228 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1229 int ret; 1230 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 1231 MLX5_FLOW_LAYER_INNER_L4) : 1232 (MLX5_FLOW_LAYER_OUTER_L3 | 1233 MLX5_FLOW_LAYER_OUTER_L4); 1234 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 1235 MLX5_FLOW_LAYER_OUTER_VLAN; 1236 1237 if (item_flags & vlanm) 1238 return rte_flow_error_set(error, EINVAL, 1239 RTE_FLOW_ERROR_TYPE_ITEM, item, 1240 "multiple VLAN layers not supported"); 1241 else if ((item_flags & l34m) != 0) 1242 return rte_flow_error_set(error, EINVAL, 1243 RTE_FLOW_ERROR_TYPE_ITEM, item, 1244 "L2 layer cannot follow L3/L4 layer"); 1245 if (!mask) 1246 mask = &rte_flow_item_vlan_mask; 1247 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1248 (const uint8_t *)&nic_mask, 1249 sizeof(struct rte_flow_item_vlan), 1250 error); 1251 if (ret) 1252 return ret; 1253 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { 1254 struct mlx5_priv *priv = dev->data->dev_private; 1255 1256 if (priv->vmwa_context) { 1257 /* 1258 * Non-NULL context means we have a virtual machine 1259 * and SR-IOV enabled, we have to create VLAN interface 1260 * to make hypervisor to setup E-Switch vport 1261 * context correctly. We avoid creating the multiple 1262 * VLAN interfaces, so we cannot support VLAN tag mask. 1263 */ 1264 return rte_flow_error_set(error, EINVAL, 1265 RTE_FLOW_ERROR_TYPE_ITEM, 1266 item, 1267 "VLAN tag mask is not" 1268 " supported in virtual" 1269 " environment"); 1270 } 1271 } 1272 if (spec) { 1273 vlan_tag = spec->tci; 1274 vlan_tag &= mask->tci; 1275 } 1276 /* 1277 * From verbs perspective an empty VLAN is equivalent 1278 * to a packet without VLAN layer. 1279 */ 1280 if (!vlan_tag) 1281 return rte_flow_error_set(error, EINVAL, 1282 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1283 item->spec, 1284 "VLAN cannot be empty"); 1285 return 0; 1286 } 1287 1288 /** 1289 * Validate IPV4 item. 1290 * 1291 * @param[in] item 1292 * Item specification. 1293 * @param[in] item_flags 1294 * Bit-fields that holds the items detected until now. 1295 * @param[in] acc_mask 1296 * Acceptable mask, if NULL default internal default mask 1297 * will be used to check whether item fields are supported. 1298 * @param[out] error 1299 * Pointer to error structure. 1300 * 1301 * @return 1302 * 0 on success, a negative errno value otherwise and rte_errno is set. 1303 */ 1304 int 1305 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, 1306 uint64_t item_flags, 1307 const struct rte_flow_item_ipv4 *acc_mask, 1308 struct rte_flow_error *error) 1309 { 1310 const struct rte_flow_item_ipv4 *mask = item->mask; 1311 const struct rte_flow_item_ipv4 *spec = item->spec; 1312 const struct rte_flow_item_ipv4 nic_mask = { 1313 .hdr = { 1314 .src_addr = RTE_BE32(0xffffffff), 1315 .dst_addr = RTE_BE32(0xffffffff), 1316 .type_of_service = 0xff, 1317 .next_proto_id = 0xff, 1318 }, 1319 }; 1320 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1321 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1322 MLX5_FLOW_LAYER_OUTER_L3; 1323 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1324 MLX5_FLOW_LAYER_OUTER_L4; 1325 int ret; 1326 uint8_t next_proto = 0xFF; 1327 1328 if (item_flags & MLX5_FLOW_LAYER_IPIP) { 1329 if (mask && spec) 1330 next_proto = mask->hdr.next_proto_id & 1331 spec->hdr.next_proto_id; 1332 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1333 return rte_flow_error_set(error, EINVAL, 1334 RTE_FLOW_ERROR_TYPE_ITEM, 1335 item, 1336 "multiple tunnel " 1337 "not supported"); 1338 } 1339 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) 1340 return rte_flow_error_set(error, EINVAL, 1341 RTE_FLOW_ERROR_TYPE_ITEM, item, 1342 "wrong tunnel type - IPv6 specified " 1343 "but IPv4 item provided"); 1344 if (item_flags & l3m) 1345 return rte_flow_error_set(error, ENOTSUP, 1346 RTE_FLOW_ERROR_TYPE_ITEM, item, 1347 "multiple L3 layers not supported"); 1348 else if (item_flags & l4m) 1349 return rte_flow_error_set(error, EINVAL, 1350 RTE_FLOW_ERROR_TYPE_ITEM, item, 1351 "L3 cannot follow an L4 layer."); 1352 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1353 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1354 return rte_flow_error_set(error, EINVAL, 1355 RTE_FLOW_ERROR_TYPE_ITEM, item, 1356 "L3 cannot follow an NVGRE layer."); 1357 if (!mask) 1358 mask = &rte_flow_item_ipv4_mask; 1359 else if (mask->hdr.next_proto_id != 0 && 1360 mask->hdr.next_proto_id != 0xff) 1361 return rte_flow_error_set(error, EINVAL, 1362 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 1363 "partial mask is not supported" 1364 " for protocol"); 1365 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1366 acc_mask ? (const uint8_t *)acc_mask 1367 : (const uint8_t *)&nic_mask, 1368 sizeof(struct rte_flow_item_ipv4), 1369 error); 1370 if (ret < 0) 1371 return ret; 1372 return 0; 1373 } 1374 1375 /** 1376 * Validate IPV6 item. 1377 * 1378 * @param[in] item 1379 * Item specification. 1380 * @param[in] item_flags 1381 * Bit-fields that holds the items detected until now. 1382 * @param[in] acc_mask 1383 * Acceptable mask, if NULL default internal default mask 1384 * will be used to check whether item fields are supported. 1385 * @param[out] error 1386 * Pointer to error structure. 1387 * 1388 * @return 1389 * 0 on success, a negative errno value otherwise and rte_errno is set. 1390 */ 1391 int 1392 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, 1393 uint64_t item_flags, 1394 const struct rte_flow_item_ipv6 *acc_mask, 1395 struct rte_flow_error *error) 1396 { 1397 const struct rte_flow_item_ipv6 *mask = item->mask; 1398 const struct rte_flow_item_ipv6 *spec = item->spec; 1399 const struct rte_flow_item_ipv6 nic_mask = { 1400 .hdr = { 1401 .src_addr = 1402 "\xff\xff\xff\xff\xff\xff\xff\xff" 1403 "\xff\xff\xff\xff\xff\xff\xff\xff", 1404 .dst_addr = 1405 "\xff\xff\xff\xff\xff\xff\xff\xff" 1406 "\xff\xff\xff\xff\xff\xff\xff\xff", 1407 .vtc_flow = RTE_BE32(0xffffffff), 1408 .proto = 0xff, 1409 .hop_limits = 0xff, 1410 }, 1411 }; 1412 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1413 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1414 MLX5_FLOW_LAYER_OUTER_L3; 1415 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1416 MLX5_FLOW_LAYER_OUTER_L4; 1417 int ret; 1418 uint8_t next_proto = 0xFF; 1419 1420 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { 1421 if (mask && spec) 1422 next_proto = mask->hdr.proto & spec->hdr.proto; 1423 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1424 return rte_flow_error_set(error, EINVAL, 1425 RTE_FLOW_ERROR_TYPE_ITEM, 1426 item, 1427 "multiple tunnel " 1428 "not supported"); 1429 } 1430 if (item_flags & MLX5_FLOW_LAYER_IPIP) 1431 return rte_flow_error_set(error, EINVAL, 1432 RTE_FLOW_ERROR_TYPE_ITEM, item, 1433 "wrong tunnel type - IPv4 specified " 1434 "but IPv6 item provided"); 1435 if (item_flags & l3m) 1436 return rte_flow_error_set(error, ENOTSUP, 1437 RTE_FLOW_ERROR_TYPE_ITEM, item, 1438 "multiple L3 layers not supported"); 1439 else if (item_flags & l4m) 1440 return rte_flow_error_set(error, EINVAL, 1441 RTE_FLOW_ERROR_TYPE_ITEM, item, 1442 "L3 cannot follow an L4 layer."); 1443 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1444 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1445 return rte_flow_error_set(error, EINVAL, 1446 RTE_FLOW_ERROR_TYPE_ITEM, item, 1447 "L3 cannot follow an NVGRE layer."); 1448 if (!mask) 1449 mask = &rte_flow_item_ipv6_mask; 1450 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1451 acc_mask ? (const uint8_t *)acc_mask 1452 : (const uint8_t *)&nic_mask, 1453 sizeof(struct rte_flow_item_ipv6), 1454 error); 1455 if (ret < 0) 1456 return ret; 1457 return 0; 1458 } 1459 1460 /** 1461 * Validate UDP item. 1462 * 1463 * @param[in] item 1464 * Item specification. 1465 * @param[in] item_flags 1466 * Bit-fields that holds the items detected until now. 1467 * @param[in] target_protocol 1468 * The next protocol in the previous item. 1469 * @param[in] flow_mask 1470 * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask. 1471 * @param[out] error 1472 * Pointer to error structure. 1473 * 1474 * @return 1475 * 0 on success, a negative errno value otherwise and rte_errno is set. 1476 */ 1477 int 1478 mlx5_flow_validate_item_udp(const struct rte_flow_item *item, 1479 uint64_t item_flags, 1480 uint8_t target_protocol, 1481 struct rte_flow_error *error) 1482 { 1483 const struct rte_flow_item_udp *mask = item->mask; 1484 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1485 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1486 MLX5_FLOW_LAYER_OUTER_L3; 1487 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1488 MLX5_FLOW_LAYER_OUTER_L4; 1489 int ret; 1490 1491 if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) 1492 return rte_flow_error_set(error, EINVAL, 1493 RTE_FLOW_ERROR_TYPE_ITEM, item, 1494 "protocol filtering not compatible" 1495 " with UDP layer"); 1496 if (!(item_flags & l3m)) 1497 return rte_flow_error_set(error, EINVAL, 1498 RTE_FLOW_ERROR_TYPE_ITEM, item, 1499 "L3 is mandatory to filter on L4"); 1500 if (item_flags & l4m) 1501 return rte_flow_error_set(error, EINVAL, 1502 RTE_FLOW_ERROR_TYPE_ITEM, item, 1503 "multiple L4 layers not supported"); 1504 if (!mask) 1505 mask = &rte_flow_item_udp_mask; 1506 ret = mlx5_flow_item_acceptable 1507 (item, (const uint8_t *)mask, 1508 (const uint8_t *)&rte_flow_item_udp_mask, 1509 sizeof(struct rte_flow_item_udp), error); 1510 if (ret < 0) 1511 return ret; 1512 return 0; 1513 } 1514 1515 /** 1516 * Validate TCP item. 1517 * 1518 * @param[in] item 1519 * Item specification. 1520 * @param[in] item_flags 1521 * Bit-fields that holds the items detected until now. 1522 * @param[in] target_protocol 1523 * The next protocol in the previous item. 1524 * @param[out] error 1525 * Pointer to error structure. 1526 * 1527 * @return 1528 * 0 on success, a negative errno value otherwise and rte_errno is set. 1529 */ 1530 int 1531 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, 1532 uint64_t item_flags, 1533 uint8_t target_protocol, 1534 const struct rte_flow_item_tcp *flow_mask, 1535 struct rte_flow_error *error) 1536 { 1537 const struct rte_flow_item_tcp *mask = item->mask; 1538 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1539 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1540 MLX5_FLOW_LAYER_OUTER_L3; 1541 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1542 MLX5_FLOW_LAYER_OUTER_L4; 1543 int ret; 1544 1545 assert(flow_mask); 1546 if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) 1547 return rte_flow_error_set(error, EINVAL, 1548 RTE_FLOW_ERROR_TYPE_ITEM, item, 1549 "protocol filtering not compatible" 1550 " with TCP layer"); 1551 if (!(item_flags & l3m)) 1552 return rte_flow_error_set(error, EINVAL, 1553 RTE_FLOW_ERROR_TYPE_ITEM, item, 1554 "L3 is mandatory to filter on L4"); 1555 if (item_flags & l4m) 1556 return rte_flow_error_set(error, EINVAL, 1557 RTE_FLOW_ERROR_TYPE_ITEM, item, 1558 "multiple L4 layers not supported"); 1559 if (!mask) 1560 mask = &rte_flow_item_tcp_mask; 1561 ret = mlx5_flow_item_acceptable 1562 (item, (const uint8_t *)mask, 1563 (const uint8_t *)flow_mask, 1564 sizeof(struct rte_flow_item_tcp), error); 1565 if (ret < 0) 1566 return ret; 1567 return 0; 1568 } 1569 1570 /** 1571 * Validate VXLAN item. 1572 * 1573 * @param[in] item 1574 * Item specification. 1575 * @param[in] item_flags 1576 * Bit-fields that holds the items detected until now. 1577 * @param[in] target_protocol 1578 * The next protocol in the previous item. 1579 * @param[out] error 1580 * Pointer to error structure. 1581 * 1582 * @return 1583 * 0 on success, a negative errno value otherwise and rte_errno is set. 1584 */ 1585 int 1586 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, 1587 uint64_t item_flags, 1588 struct rte_flow_error *error) 1589 { 1590 const struct rte_flow_item_vxlan *spec = item->spec; 1591 const struct rte_flow_item_vxlan *mask = item->mask; 1592 int ret; 1593 union vni { 1594 uint32_t vlan_id; 1595 uint8_t vni[4]; 1596 } id = { .vlan_id = 0, }; 1597 uint32_t vlan_id = 0; 1598 1599 1600 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1601 return rte_flow_error_set(error, ENOTSUP, 1602 RTE_FLOW_ERROR_TYPE_ITEM, item, 1603 "multiple tunnel layers not" 1604 " supported"); 1605 /* 1606 * Verify only UDPv4 is present as defined in 1607 * https://tools.ietf.org/html/rfc7348 1608 */ 1609 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1610 return rte_flow_error_set(error, EINVAL, 1611 RTE_FLOW_ERROR_TYPE_ITEM, item, 1612 "no outer UDP layer found"); 1613 if (!mask) 1614 mask = &rte_flow_item_vxlan_mask; 1615 ret = mlx5_flow_item_acceptable 1616 (item, (const uint8_t *)mask, 1617 (const uint8_t *)&rte_flow_item_vxlan_mask, 1618 sizeof(struct rte_flow_item_vxlan), 1619 error); 1620 if (ret < 0) 1621 return ret; 1622 if (spec) { 1623 memcpy(&id.vni[1], spec->vni, 3); 1624 vlan_id = id.vlan_id; 1625 memcpy(&id.vni[1], mask->vni, 3); 1626 vlan_id &= id.vlan_id; 1627 } 1628 /* 1629 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if 1630 * only this layer is defined in the Verbs specification it is 1631 * interpreted as wildcard and all packets will match this 1632 * rule, if it follows a full stack layer (ex: eth / ipv4 / 1633 * udp), all packets matching the layers before will also 1634 * match this rule. To avoid such situation, VNI 0 is 1635 * currently refused. 1636 */ 1637 if (!vlan_id) 1638 return rte_flow_error_set(error, ENOTSUP, 1639 RTE_FLOW_ERROR_TYPE_ITEM, item, 1640 "VXLAN vni cannot be 0"); 1641 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1642 return rte_flow_error_set(error, ENOTSUP, 1643 RTE_FLOW_ERROR_TYPE_ITEM, item, 1644 "VXLAN tunnel must be fully defined"); 1645 return 0; 1646 } 1647 1648 /** 1649 * Validate VXLAN_GPE item. 1650 * 1651 * @param[in] item 1652 * Item specification. 1653 * @param[in] item_flags 1654 * Bit-fields that holds the items detected until now. 1655 * @param[in] priv 1656 * Pointer to the private data structure. 1657 * @param[in] target_protocol 1658 * The next protocol in the previous item. 1659 * @param[out] error 1660 * Pointer to error structure. 1661 * 1662 * @return 1663 * 0 on success, a negative errno value otherwise and rte_errno is set. 1664 */ 1665 int 1666 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 1667 uint64_t item_flags, 1668 struct rte_eth_dev *dev, 1669 struct rte_flow_error *error) 1670 { 1671 struct mlx5_priv *priv = dev->data->dev_private; 1672 const struct rte_flow_item_vxlan_gpe *spec = item->spec; 1673 const struct rte_flow_item_vxlan_gpe *mask = item->mask; 1674 int ret; 1675 union vni { 1676 uint32_t vlan_id; 1677 uint8_t vni[4]; 1678 } id = { .vlan_id = 0, }; 1679 uint32_t vlan_id = 0; 1680 1681 if (!priv->config.l3_vxlan_en) 1682 return rte_flow_error_set(error, ENOTSUP, 1683 RTE_FLOW_ERROR_TYPE_ITEM, item, 1684 "L3 VXLAN is not enabled by device" 1685 " parameter and/or not configured in" 1686 " firmware"); 1687 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1688 return rte_flow_error_set(error, ENOTSUP, 1689 RTE_FLOW_ERROR_TYPE_ITEM, item, 1690 "multiple tunnel layers not" 1691 " supported"); 1692 /* 1693 * Verify only UDPv4 is present as defined in 1694 * https://tools.ietf.org/html/rfc7348 1695 */ 1696 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1697 return rte_flow_error_set(error, EINVAL, 1698 RTE_FLOW_ERROR_TYPE_ITEM, item, 1699 "no outer UDP layer found"); 1700 if (!mask) 1701 mask = &rte_flow_item_vxlan_gpe_mask; 1702 ret = mlx5_flow_item_acceptable 1703 (item, (const uint8_t *)mask, 1704 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, 1705 sizeof(struct rte_flow_item_vxlan_gpe), 1706 error); 1707 if (ret < 0) 1708 return ret; 1709 if (spec) { 1710 if (spec->protocol) 1711 return rte_flow_error_set(error, ENOTSUP, 1712 RTE_FLOW_ERROR_TYPE_ITEM, 1713 item, 1714 "VxLAN-GPE protocol" 1715 " not supported"); 1716 memcpy(&id.vni[1], spec->vni, 3); 1717 vlan_id = id.vlan_id; 1718 memcpy(&id.vni[1], mask->vni, 3); 1719 vlan_id &= id.vlan_id; 1720 } 1721 /* 1722 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this 1723 * layer is defined in the Verbs specification it is interpreted as 1724 * wildcard and all packets will match this rule, if it follows a full 1725 * stack layer (ex: eth / ipv4 / udp), all packets matching the layers 1726 * before will also match this rule. To avoid such situation, VNI 0 1727 * is currently refused. 1728 */ 1729 if (!vlan_id) 1730 return rte_flow_error_set(error, ENOTSUP, 1731 RTE_FLOW_ERROR_TYPE_ITEM, item, 1732 "VXLAN-GPE vni cannot be 0"); 1733 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1734 return rte_flow_error_set(error, ENOTSUP, 1735 RTE_FLOW_ERROR_TYPE_ITEM, item, 1736 "VXLAN-GPE tunnel must be fully" 1737 " defined"); 1738 return 0; 1739 } 1740 /** 1741 * Validate GRE Key item. 1742 * 1743 * @param[in] item 1744 * Item specification. 1745 * @param[in] item_flags 1746 * Bit flags to mark detected items. 1747 * @param[in] gre_item 1748 * Pointer to gre_item 1749 * @param[out] error 1750 * Pointer to error structure. 1751 * 1752 * @return 1753 * 0 on success, a negative errno value otherwise and rte_errno is set. 1754 */ 1755 int 1756 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, 1757 uint64_t item_flags, 1758 const struct rte_flow_item *gre_item, 1759 struct rte_flow_error *error) 1760 { 1761 const rte_be32_t *mask = item->mask; 1762 int ret = 0; 1763 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); 1764 const struct rte_flow_item_gre *gre_spec = gre_item->spec; 1765 const struct rte_flow_item_gre *gre_mask = gre_item->mask; 1766 1767 if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) 1768 return rte_flow_error_set(error, ENOTSUP, 1769 RTE_FLOW_ERROR_TYPE_ITEM, item, 1770 "Multiple GRE key not support"); 1771 if (!(item_flags & MLX5_FLOW_LAYER_GRE)) 1772 return rte_flow_error_set(error, ENOTSUP, 1773 RTE_FLOW_ERROR_TYPE_ITEM, item, 1774 "No preceding GRE header"); 1775 if (item_flags & MLX5_FLOW_LAYER_INNER) 1776 return rte_flow_error_set(error, ENOTSUP, 1777 RTE_FLOW_ERROR_TYPE_ITEM, item, 1778 "GRE key following a wrong item"); 1779 if (!gre_mask) 1780 gre_mask = &rte_flow_item_gre_mask; 1781 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && 1782 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) 1783 return rte_flow_error_set(error, EINVAL, 1784 RTE_FLOW_ERROR_TYPE_ITEM, item, 1785 "Key bit must be on"); 1786 1787 if (!mask) 1788 mask = &gre_key_default_mask; 1789 ret = mlx5_flow_item_acceptable 1790 (item, (const uint8_t *)mask, 1791 (const uint8_t *)&gre_key_default_mask, 1792 sizeof(rte_be32_t), error); 1793 return ret; 1794 } 1795 1796 /** 1797 * Validate GRE item. 1798 * 1799 * @param[in] item 1800 * Item specification. 1801 * @param[in] item_flags 1802 * Bit flags to mark detected items. 1803 * @param[in] target_protocol 1804 * The next protocol in the previous item. 1805 * @param[out] error 1806 * Pointer to error structure. 1807 * 1808 * @return 1809 * 0 on success, a negative errno value otherwise and rte_errno is set. 1810 */ 1811 int 1812 mlx5_flow_validate_item_gre(const struct rte_flow_item *item, 1813 uint64_t item_flags, 1814 uint8_t target_protocol, 1815 struct rte_flow_error *error) 1816 { 1817 const struct rte_flow_item_gre *spec __rte_unused = item->spec; 1818 const struct rte_flow_item_gre *mask = item->mask; 1819 int ret; 1820 const struct rte_flow_item_gre nic_mask = { 1821 .c_rsvd0_ver = RTE_BE16(0xB000), 1822 .protocol = RTE_BE16(UINT16_MAX), 1823 }; 1824 1825 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 1826 return rte_flow_error_set(error, EINVAL, 1827 RTE_FLOW_ERROR_TYPE_ITEM, item, 1828 "protocol filtering not compatible" 1829 " with this GRE layer"); 1830 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1831 return rte_flow_error_set(error, ENOTSUP, 1832 RTE_FLOW_ERROR_TYPE_ITEM, item, 1833 "multiple tunnel layers not" 1834 " supported"); 1835 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 1836 return rte_flow_error_set(error, ENOTSUP, 1837 RTE_FLOW_ERROR_TYPE_ITEM, item, 1838 "L3 Layer is missing"); 1839 if (!mask) 1840 mask = &rte_flow_item_gre_mask; 1841 ret = mlx5_flow_item_acceptable 1842 (item, (const uint8_t *)mask, 1843 (const uint8_t *)&nic_mask, 1844 sizeof(struct rte_flow_item_gre), error); 1845 if (ret < 0) 1846 return ret; 1847 #ifndef HAVE_MLX5DV_DR 1848 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT 1849 if (spec && (spec->protocol & mask->protocol)) 1850 return rte_flow_error_set(error, ENOTSUP, 1851 RTE_FLOW_ERROR_TYPE_ITEM, item, 1852 "without MPLS support the" 1853 " specification cannot be used for" 1854 " filtering"); 1855 #endif 1856 #endif 1857 return 0; 1858 } 1859 1860 /** 1861 * Validate MPLS item. 1862 * 1863 * @param[in] dev 1864 * Pointer to the rte_eth_dev structure. 1865 * @param[in] item 1866 * Item specification. 1867 * @param[in] item_flags 1868 * Bit-fields that holds the items detected until now. 1869 * @param[in] prev_layer 1870 * The protocol layer indicated in previous item. 1871 * @param[out] error 1872 * Pointer to error structure. 1873 * 1874 * @return 1875 * 0 on success, a negative errno value otherwise and rte_errno is set. 1876 */ 1877 int 1878 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, 1879 const struct rte_flow_item *item __rte_unused, 1880 uint64_t item_flags __rte_unused, 1881 uint64_t prev_layer __rte_unused, 1882 struct rte_flow_error *error) 1883 { 1884 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 1885 const struct rte_flow_item_mpls *mask = item->mask; 1886 struct mlx5_priv *priv = dev->data->dev_private; 1887 int ret; 1888 1889 if (!priv->config.mpls_en) 1890 return rte_flow_error_set(error, ENOTSUP, 1891 RTE_FLOW_ERROR_TYPE_ITEM, item, 1892 "MPLS not supported or" 1893 " disabled in firmware" 1894 " configuration."); 1895 /* MPLS over IP, UDP, GRE is allowed */ 1896 if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | 1897 MLX5_FLOW_LAYER_OUTER_L4_UDP | 1898 MLX5_FLOW_LAYER_GRE))) 1899 return rte_flow_error_set(error, EINVAL, 1900 RTE_FLOW_ERROR_TYPE_ITEM, item, 1901 "protocol filtering not compatible" 1902 " with MPLS layer"); 1903 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ 1904 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && 1905 !(item_flags & MLX5_FLOW_LAYER_GRE)) 1906 return rte_flow_error_set(error, ENOTSUP, 1907 RTE_FLOW_ERROR_TYPE_ITEM, item, 1908 "multiple tunnel layers not" 1909 " supported"); 1910 if (!mask) 1911 mask = &rte_flow_item_mpls_mask; 1912 ret = mlx5_flow_item_acceptable 1913 (item, (const uint8_t *)mask, 1914 (const uint8_t *)&rte_flow_item_mpls_mask, 1915 sizeof(struct rte_flow_item_mpls), error); 1916 if (ret < 0) 1917 return ret; 1918 return 0; 1919 #endif 1920 return rte_flow_error_set(error, ENOTSUP, 1921 RTE_FLOW_ERROR_TYPE_ITEM, item, 1922 "MPLS is not supported by Verbs, please" 1923 " update."); 1924 } 1925 1926 /** 1927 * Validate NVGRE item. 1928 * 1929 * @param[in] item 1930 * Item specification. 1931 * @param[in] item_flags 1932 * Bit flags to mark detected items. 1933 * @param[in] target_protocol 1934 * The next protocol in the previous item. 1935 * @param[out] error 1936 * Pointer to error structure. 1937 * 1938 * @return 1939 * 0 on success, a negative errno value otherwise and rte_errno is set. 1940 */ 1941 int 1942 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, 1943 uint64_t item_flags, 1944 uint8_t target_protocol, 1945 struct rte_flow_error *error) 1946 { 1947 const struct rte_flow_item_nvgre *mask = item->mask; 1948 int ret; 1949 1950 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 1951 return rte_flow_error_set(error, EINVAL, 1952 RTE_FLOW_ERROR_TYPE_ITEM, item, 1953 "protocol filtering not compatible" 1954 " with this GRE layer"); 1955 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1956 return rte_flow_error_set(error, ENOTSUP, 1957 RTE_FLOW_ERROR_TYPE_ITEM, item, 1958 "multiple tunnel layers not" 1959 " supported"); 1960 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 1961 return rte_flow_error_set(error, ENOTSUP, 1962 RTE_FLOW_ERROR_TYPE_ITEM, item, 1963 "L3 Layer is missing"); 1964 if (!mask) 1965 mask = &rte_flow_item_nvgre_mask; 1966 ret = mlx5_flow_item_acceptable 1967 (item, (const uint8_t *)mask, 1968 (const uint8_t *)&rte_flow_item_nvgre_mask, 1969 sizeof(struct rte_flow_item_nvgre), error); 1970 if (ret < 0) 1971 return ret; 1972 return 0; 1973 } 1974 1975 static int 1976 flow_null_validate(struct rte_eth_dev *dev __rte_unused, 1977 const struct rte_flow_attr *attr __rte_unused, 1978 const struct rte_flow_item items[] __rte_unused, 1979 const struct rte_flow_action actions[] __rte_unused, 1980 struct rte_flow_error *error) 1981 { 1982 return rte_flow_error_set(error, ENOTSUP, 1983 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 1984 } 1985 1986 static struct mlx5_flow * 1987 flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, 1988 const struct rte_flow_item items[] __rte_unused, 1989 const struct rte_flow_action actions[] __rte_unused, 1990 struct rte_flow_error *error) 1991 { 1992 rte_flow_error_set(error, ENOTSUP, 1993 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 1994 return NULL; 1995 } 1996 1997 static int 1998 flow_null_translate(struct rte_eth_dev *dev __rte_unused, 1999 struct mlx5_flow *dev_flow __rte_unused, 2000 const struct rte_flow_attr *attr __rte_unused, 2001 const struct rte_flow_item items[] __rte_unused, 2002 const struct rte_flow_action actions[] __rte_unused, 2003 struct rte_flow_error *error) 2004 { 2005 return rte_flow_error_set(error, ENOTSUP, 2006 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2007 } 2008 2009 static int 2010 flow_null_apply(struct rte_eth_dev *dev __rte_unused, 2011 struct rte_flow *flow __rte_unused, 2012 struct rte_flow_error *error) 2013 { 2014 return rte_flow_error_set(error, ENOTSUP, 2015 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2016 } 2017 2018 static void 2019 flow_null_remove(struct rte_eth_dev *dev __rte_unused, 2020 struct rte_flow *flow __rte_unused) 2021 { 2022 } 2023 2024 static void 2025 flow_null_destroy(struct rte_eth_dev *dev __rte_unused, 2026 struct rte_flow *flow __rte_unused) 2027 { 2028 } 2029 2030 static int 2031 flow_null_query(struct rte_eth_dev *dev __rte_unused, 2032 struct rte_flow *flow __rte_unused, 2033 const struct rte_flow_action *actions __rte_unused, 2034 void *data __rte_unused, 2035 struct rte_flow_error *error) 2036 { 2037 return rte_flow_error_set(error, ENOTSUP, 2038 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2039 } 2040 2041 /* Void driver to protect from null pointer reference. */ 2042 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { 2043 .validate = flow_null_validate, 2044 .prepare = flow_null_prepare, 2045 .translate = flow_null_translate, 2046 .apply = flow_null_apply, 2047 .remove = flow_null_remove, 2048 .destroy = flow_null_destroy, 2049 .query = flow_null_query, 2050 }; 2051 2052 /** 2053 * Select flow driver type according to flow attributes and device 2054 * configuration. 2055 * 2056 * @param[in] dev 2057 * Pointer to the dev structure. 2058 * @param[in] attr 2059 * Pointer to the flow attributes. 2060 * 2061 * @return 2062 * flow driver type, MLX5_FLOW_TYPE_MAX otherwise. 2063 */ 2064 static enum mlx5_flow_drv_type 2065 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) 2066 { 2067 struct mlx5_priv *priv = dev->data->dev_private; 2068 enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; 2069 2070 if (attr->transfer && priv->config.dv_esw_en) 2071 type = MLX5_FLOW_TYPE_DV; 2072 if (!attr->transfer) 2073 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : 2074 MLX5_FLOW_TYPE_VERBS; 2075 return type; 2076 } 2077 2078 #define flow_get_drv_ops(type) flow_drv_ops[type] 2079 2080 /** 2081 * Flow driver validation API. This abstracts calling driver specific functions. 2082 * The type of flow driver is determined according to flow attributes. 2083 * 2084 * @param[in] dev 2085 * Pointer to the dev structure. 2086 * @param[in] attr 2087 * Pointer to the flow attributes. 2088 * @param[in] items 2089 * Pointer to the list of items. 2090 * @param[in] actions 2091 * Pointer to the list of actions. 2092 * @param[out] error 2093 * Pointer to the error structure. 2094 * 2095 * @return 2096 * 0 on success, a negative errno value otherwise and rte_errno is set. 2097 */ 2098 static inline int 2099 flow_drv_validate(struct rte_eth_dev *dev, 2100 const struct rte_flow_attr *attr, 2101 const struct rte_flow_item items[], 2102 const struct rte_flow_action actions[], 2103 struct rte_flow_error *error) 2104 { 2105 const struct mlx5_flow_driver_ops *fops; 2106 enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); 2107 2108 fops = flow_get_drv_ops(type); 2109 return fops->validate(dev, attr, items, actions, error); 2110 } 2111 2112 /** 2113 * Flow driver preparation API. This abstracts calling driver specific 2114 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2115 * calculates the size of memory required for device flow, allocates the memory, 2116 * initializes the device flow and returns the pointer. 2117 * 2118 * @note 2119 * This function initializes device flow structure such as dv or verbs in 2120 * struct mlx5_flow. However, it is caller's responsibility to initialize the 2121 * rest. For example, adding returning device flow to flow->dev_flow list and 2122 * setting backward reference to the flow should be done out of this function. 2123 * layers field is not filled either. 2124 * 2125 * @param[in] attr 2126 * Pointer to the flow attributes. 2127 * @param[in] items 2128 * Pointer to the list of items. 2129 * @param[in] actions 2130 * Pointer to the list of actions. 2131 * @param[out] error 2132 * Pointer to the error structure. 2133 * 2134 * @return 2135 * Pointer to device flow on success, otherwise NULL and rte_errno is set. 2136 */ 2137 static inline struct mlx5_flow * 2138 flow_drv_prepare(const struct rte_flow *flow, 2139 const struct rte_flow_attr *attr, 2140 const struct rte_flow_item items[], 2141 const struct rte_flow_action actions[], 2142 struct rte_flow_error *error) 2143 { 2144 const struct mlx5_flow_driver_ops *fops; 2145 enum mlx5_flow_drv_type type = flow->drv_type; 2146 2147 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2148 fops = flow_get_drv_ops(type); 2149 return fops->prepare(attr, items, actions, error); 2150 } 2151 2152 /** 2153 * Flow driver translation API. This abstracts calling driver specific 2154 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2155 * translates a generic flow into a driver flow. flow_drv_prepare() must 2156 * precede. 2157 * 2158 * @note 2159 * dev_flow->layers could be filled as a result of parsing during translation 2160 * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled 2161 * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, 2162 * flow->actions could be overwritten even though all the expanded dev_flows 2163 * have the same actions. 2164 * 2165 * @param[in] dev 2166 * Pointer to the rte dev structure. 2167 * @param[in, out] dev_flow 2168 * Pointer to the mlx5 flow. 2169 * @param[in] attr 2170 * Pointer to the flow attributes. 2171 * @param[in] items 2172 * Pointer to the list of items. 2173 * @param[in] actions 2174 * Pointer to the list of actions. 2175 * @param[out] error 2176 * Pointer to the error structure. 2177 * 2178 * @return 2179 * 0 on success, a negative errno value otherwise and rte_errno is set. 2180 */ 2181 static inline int 2182 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, 2183 const struct rte_flow_attr *attr, 2184 const struct rte_flow_item items[], 2185 const struct rte_flow_action actions[], 2186 struct rte_flow_error *error) 2187 { 2188 const struct mlx5_flow_driver_ops *fops; 2189 enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; 2190 2191 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2192 fops = flow_get_drv_ops(type); 2193 return fops->translate(dev, dev_flow, attr, items, actions, error); 2194 } 2195 2196 /** 2197 * Flow driver apply API. This abstracts calling driver specific functions. 2198 * Parent flow (rte_flow) should have driver type (drv_type). It applies 2199 * translated driver flows on to device. flow_drv_translate() must precede. 2200 * 2201 * @param[in] dev 2202 * Pointer to Ethernet device structure. 2203 * @param[in, out] flow 2204 * Pointer to flow structure. 2205 * @param[out] error 2206 * Pointer to error structure. 2207 * 2208 * @return 2209 * 0 on success, a negative errno value otherwise and rte_errno is set. 2210 */ 2211 static inline int 2212 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 2213 struct rte_flow_error *error) 2214 { 2215 const struct mlx5_flow_driver_ops *fops; 2216 enum mlx5_flow_drv_type type = flow->drv_type; 2217 2218 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2219 fops = flow_get_drv_ops(type); 2220 return fops->apply(dev, flow, error); 2221 } 2222 2223 /** 2224 * Flow driver remove API. This abstracts calling driver specific functions. 2225 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2226 * on device. All the resources of the flow should be freed by calling 2227 * flow_drv_destroy(). 2228 * 2229 * @param[in] dev 2230 * Pointer to Ethernet device. 2231 * @param[in, out] flow 2232 * Pointer to flow structure. 2233 */ 2234 static inline void 2235 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 2236 { 2237 const struct mlx5_flow_driver_ops *fops; 2238 enum mlx5_flow_drv_type type = flow->drv_type; 2239 2240 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2241 fops = flow_get_drv_ops(type); 2242 fops->remove(dev, flow); 2243 } 2244 2245 /** 2246 * Flow driver destroy API. This abstracts calling driver specific functions. 2247 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2248 * on device and releases resources of the flow. 2249 * 2250 * @param[in] dev 2251 * Pointer to Ethernet device. 2252 * @param[in, out] flow 2253 * Pointer to flow structure. 2254 */ 2255 static inline void 2256 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 2257 { 2258 const struct mlx5_flow_driver_ops *fops; 2259 enum mlx5_flow_drv_type type = flow->drv_type; 2260 2261 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2262 fops = flow_get_drv_ops(type); 2263 fops->destroy(dev, flow); 2264 } 2265 2266 /** 2267 * Validate a flow supported by the NIC. 2268 * 2269 * @see rte_flow_validate() 2270 * @see rte_flow_ops 2271 */ 2272 int 2273 mlx5_flow_validate(struct rte_eth_dev *dev, 2274 const struct rte_flow_attr *attr, 2275 const struct rte_flow_item items[], 2276 const struct rte_flow_action actions[], 2277 struct rte_flow_error *error) 2278 { 2279 int ret; 2280 2281 ret = flow_drv_validate(dev, attr, items, actions, error); 2282 if (ret < 0) 2283 return ret; 2284 return 0; 2285 } 2286 2287 /** 2288 * Get RSS action from the action list. 2289 * 2290 * @param[in] actions 2291 * Pointer to the list of actions. 2292 * 2293 * @return 2294 * Pointer to the RSS action if exist, else return NULL. 2295 */ 2296 static const struct rte_flow_action_rss* 2297 flow_get_rss_action(const struct rte_flow_action actions[]) 2298 { 2299 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2300 switch (actions->type) { 2301 case RTE_FLOW_ACTION_TYPE_RSS: 2302 return (const struct rte_flow_action_rss *) 2303 actions->conf; 2304 default: 2305 break; 2306 } 2307 } 2308 return NULL; 2309 } 2310 2311 static unsigned int 2312 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) 2313 { 2314 const struct rte_flow_item *item; 2315 unsigned int has_vlan = 0; 2316 2317 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 2318 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2319 has_vlan = 1; 2320 break; 2321 } 2322 } 2323 if (has_vlan) 2324 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : 2325 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; 2326 return rss_level < 2 ? MLX5_EXPANSION_ROOT : 2327 MLX5_EXPANSION_ROOT_OUTER; 2328 } 2329 2330 /** 2331 * Create a flow and add it to @p list. 2332 * 2333 * @param dev 2334 * Pointer to Ethernet device. 2335 * @param list 2336 * Pointer to a TAILQ flow list. 2337 * @param[in] attr 2338 * Flow rule attributes. 2339 * @param[in] items 2340 * Pattern specification (list terminated by the END pattern item). 2341 * @param[in] actions 2342 * Associated actions (list terminated by the END action). 2343 * @param[out] error 2344 * Perform verbose error reporting if not NULL. 2345 * 2346 * @return 2347 * A flow on success, NULL otherwise and rte_errno is set. 2348 */ 2349 static struct rte_flow * 2350 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, 2351 const struct rte_flow_attr *attr, 2352 const struct rte_flow_item items[], 2353 const struct rte_flow_action actions[], 2354 struct rte_flow_error *error) 2355 { 2356 struct rte_flow *flow = NULL; 2357 struct mlx5_flow *dev_flow; 2358 const struct rte_flow_action_rss *rss; 2359 union { 2360 struct rte_flow_expand_rss buf; 2361 uint8_t buffer[2048]; 2362 } expand_buffer; 2363 struct rte_flow_expand_rss *buf = &expand_buffer.buf; 2364 int ret; 2365 uint32_t i; 2366 uint32_t flow_size; 2367 2368 ret = flow_drv_validate(dev, attr, items, actions, error); 2369 if (ret < 0) 2370 return NULL; 2371 flow_size = sizeof(struct rte_flow); 2372 rss = flow_get_rss_action(actions); 2373 if (rss) 2374 flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), 2375 sizeof(void *)); 2376 else 2377 flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); 2378 flow = rte_calloc(__func__, 1, flow_size, 0); 2379 if (!flow) { 2380 rte_errno = ENOMEM; 2381 return NULL; 2382 } 2383 flow->drv_type = flow_get_drv_type(dev, attr); 2384 flow->ingress = attr->ingress; 2385 flow->transfer = attr->transfer; 2386 assert(flow->drv_type > MLX5_FLOW_TYPE_MIN && 2387 flow->drv_type < MLX5_FLOW_TYPE_MAX); 2388 flow->queue = (void *)(flow + 1); 2389 LIST_INIT(&flow->dev_flows); 2390 if (rss && rss->types) { 2391 unsigned int graph_root; 2392 2393 graph_root = find_graph_root(items, rss->level); 2394 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), 2395 items, rss->types, 2396 mlx5_support_expansion, 2397 graph_root); 2398 assert(ret > 0 && 2399 (unsigned int)ret < sizeof(expand_buffer.buffer)); 2400 } else { 2401 buf->entries = 1; 2402 buf->entry[0].pattern = (void *)(uintptr_t)items; 2403 } 2404 for (i = 0; i < buf->entries; ++i) { 2405 dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern, 2406 actions, error); 2407 if (!dev_flow) 2408 goto error; 2409 dev_flow->flow = flow; 2410 LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); 2411 ret = flow_drv_translate(dev, dev_flow, attr, 2412 buf->entry[i].pattern, 2413 actions, error); 2414 if (ret < 0) 2415 goto error; 2416 } 2417 if (dev->data->dev_started) { 2418 ret = flow_drv_apply(dev, flow, error); 2419 if (ret < 0) 2420 goto error; 2421 } 2422 TAILQ_INSERT_TAIL(list, flow, next); 2423 flow_rxq_flags_set(dev, flow); 2424 return flow; 2425 error: 2426 ret = rte_errno; /* Save rte_errno before cleanup. */ 2427 assert(flow); 2428 flow_drv_destroy(dev, flow); 2429 rte_free(flow); 2430 rte_errno = ret; /* Restore rte_errno. */ 2431 return NULL; 2432 } 2433 2434 /** 2435 * Create a flow. 2436 * 2437 * @see rte_flow_create() 2438 * @see rte_flow_ops 2439 */ 2440 struct rte_flow * 2441 mlx5_flow_create(struct rte_eth_dev *dev, 2442 const struct rte_flow_attr *attr, 2443 const struct rte_flow_item items[], 2444 const struct rte_flow_action actions[], 2445 struct rte_flow_error *error) 2446 { 2447 struct mlx5_priv *priv = dev->data->dev_private; 2448 2449 return flow_list_create(dev, &priv->flows, 2450 attr, items, actions, error); 2451 } 2452 2453 /** 2454 * Destroy a flow in a list. 2455 * 2456 * @param dev 2457 * Pointer to Ethernet device. 2458 * @param list 2459 * Pointer to a TAILQ flow list. 2460 * @param[in] flow 2461 * Flow to destroy. 2462 */ 2463 static void 2464 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, 2465 struct rte_flow *flow) 2466 { 2467 /* 2468 * Update RX queue flags only if port is started, otherwise it is 2469 * already clean. 2470 */ 2471 if (dev->data->dev_started) 2472 flow_rxq_flags_trim(dev, flow); 2473 flow_drv_destroy(dev, flow); 2474 TAILQ_REMOVE(list, flow, next); 2475 rte_free(flow->fdir); 2476 rte_free(flow); 2477 } 2478 2479 /** 2480 * Destroy all flows. 2481 * 2482 * @param dev 2483 * Pointer to Ethernet device. 2484 * @param list 2485 * Pointer to a TAILQ flow list. 2486 */ 2487 void 2488 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) 2489 { 2490 while (!TAILQ_EMPTY(list)) { 2491 struct rte_flow *flow; 2492 2493 flow = TAILQ_FIRST(list); 2494 flow_list_destroy(dev, list, flow); 2495 } 2496 } 2497 2498 /** 2499 * Remove all flows. 2500 * 2501 * @param dev 2502 * Pointer to Ethernet device. 2503 * @param list 2504 * Pointer to a TAILQ flow list. 2505 */ 2506 void 2507 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) 2508 { 2509 struct rte_flow *flow; 2510 2511 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) 2512 flow_drv_remove(dev, flow); 2513 flow_rxq_flags_clear(dev); 2514 } 2515 2516 /** 2517 * Add all flows. 2518 * 2519 * @param dev 2520 * Pointer to Ethernet device. 2521 * @param list 2522 * Pointer to a TAILQ flow list. 2523 * 2524 * @return 2525 * 0 on success, a negative errno value otherwise and rte_errno is set. 2526 */ 2527 int 2528 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) 2529 { 2530 struct rte_flow *flow; 2531 struct rte_flow_error error; 2532 int ret = 0; 2533 2534 TAILQ_FOREACH(flow, list, next) { 2535 ret = flow_drv_apply(dev, flow, &error); 2536 if (ret < 0) 2537 goto error; 2538 flow_rxq_flags_set(dev, flow); 2539 } 2540 return 0; 2541 error: 2542 ret = rte_errno; /* Save rte_errno before cleanup. */ 2543 mlx5_flow_stop(dev, list); 2544 rte_errno = ret; /* Restore rte_errno. */ 2545 return -rte_errno; 2546 } 2547 2548 /** 2549 * Verify the flow list is empty 2550 * 2551 * @param dev 2552 * Pointer to Ethernet device. 2553 * 2554 * @return the number of flows not released. 2555 */ 2556 int 2557 mlx5_flow_verify(struct rte_eth_dev *dev) 2558 { 2559 struct mlx5_priv *priv = dev->data->dev_private; 2560 struct rte_flow *flow; 2561 int ret = 0; 2562 2563 TAILQ_FOREACH(flow, &priv->flows, next) { 2564 DRV_LOG(DEBUG, "port %u flow %p still referenced", 2565 dev->data->port_id, (void *)flow); 2566 ++ret; 2567 } 2568 return ret; 2569 } 2570 2571 /** 2572 * Enable a control flow configured from the control plane. 2573 * 2574 * @param dev 2575 * Pointer to Ethernet device. 2576 * @param eth_spec 2577 * An Ethernet flow spec to apply. 2578 * @param eth_mask 2579 * An Ethernet flow mask to apply. 2580 * @param vlan_spec 2581 * A VLAN flow spec to apply. 2582 * @param vlan_mask 2583 * A VLAN flow mask to apply. 2584 * 2585 * @return 2586 * 0 on success, a negative errno value otherwise and rte_errno is set. 2587 */ 2588 int 2589 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, 2590 struct rte_flow_item_eth *eth_spec, 2591 struct rte_flow_item_eth *eth_mask, 2592 struct rte_flow_item_vlan *vlan_spec, 2593 struct rte_flow_item_vlan *vlan_mask) 2594 { 2595 struct mlx5_priv *priv = dev->data->dev_private; 2596 const struct rte_flow_attr attr = { 2597 .ingress = 1, 2598 .priority = MLX5_FLOW_PRIO_RSVD, 2599 }; 2600 struct rte_flow_item items[] = { 2601 { 2602 .type = RTE_FLOW_ITEM_TYPE_ETH, 2603 .spec = eth_spec, 2604 .last = NULL, 2605 .mask = eth_mask, 2606 }, 2607 { 2608 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : 2609 RTE_FLOW_ITEM_TYPE_END, 2610 .spec = vlan_spec, 2611 .last = NULL, 2612 .mask = vlan_mask, 2613 }, 2614 { 2615 .type = RTE_FLOW_ITEM_TYPE_END, 2616 }, 2617 }; 2618 uint16_t queue[priv->reta_idx_n]; 2619 struct rte_flow_action_rss action_rss = { 2620 .func = RTE_ETH_HASH_FUNCTION_DEFAULT, 2621 .level = 0, 2622 .types = priv->rss_conf.rss_hf, 2623 .key_len = priv->rss_conf.rss_key_len, 2624 .queue_num = priv->reta_idx_n, 2625 .key = priv->rss_conf.rss_key, 2626 .queue = queue, 2627 }; 2628 struct rte_flow_action actions[] = { 2629 { 2630 .type = RTE_FLOW_ACTION_TYPE_RSS, 2631 .conf = &action_rss, 2632 }, 2633 { 2634 .type = RTE_FLOW_ACTION_TYPE_END, 2635 }, 2636 }; 2637 struct rte_flow *flow; 2638 struct rte_flow_error error; 2639 unsigned int i; 2640 2641 if (!priv->reta_idx_n || !priv->rxqs_n) { 2642 return 0; 2643 } 2644 for (i = 0; i != priv->reta_idx_n; ++i) 2645 queue[i] = (*priv->reta_idx)[i]; 2646 flow = flow_list_create(dev, &priv->ctrl_flows, 2647 &attr, items, actions, &error); 2648 if (!flow) 2649 return -rte_errno; 2650 return 0; 2651 } 2652 2653 /** 2654 * Enable a flow control configured from the control plane. 2655 * 2656 * @param dev 2657 * Pointer to Ethernet device. 2658 * @param eth_spec 2659 * An Ethernet flow spec to apply. 2660 * @param eth_mask 2661 * An Ethernet flow mask to apply. 2662 * 2663 * @return 2664 * 0 on success, a negative errno value otherwise and rte_errno is set. 2665 */ 2666 int 2667 mlx5_ctrl_flow(struct rte_eth_dev *dev, 2668 struct rte_flow_item_eth *eth_spec, 2669 struct rte_flow_item_eth *eth_mask) 2670 { 2671 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); 2672 } 2673 2674 /** 2675 * Destroy a flow. 2676 * 2677 * @see rte_flow_destroy() 2678 * @see rte_flow_ops 2679 */ 2680 int 2681 mlx5_flow_destroy(struct rte_eth_dev *dev, 2682 struct rte_flow *flow, 2683 struct rte_flow_error *error __rte_unused) 2684 { 2685 struct mlx5_priv *priv = dev->data->dev_private; 2686 2687 flow_list_destroy(dev, &priv->flows, flow); 2688 return 0; 2689 } 2690 2691 /** 2692 * Destroy all flows. 2693 * 2694 * @see rte_flow_flush() 2695 * @see rte_flow_ops 2696 */ 2697 int 2698 mlx5_flow_flush(struct rte_eth_dev *dev, 2699 struct rte_flow_error *error __rte_unused) 2700 { 2701 struct mlx5_priv *priv = dev->data->dev_private; 2702 2703 mlx5_flow_list_flush(dev, &priv->flows); 2704 return 0; 2705 } 2706 2707 /** 2708 * Isolated mode. 2709 * 2710 * @see rte_flow_isolate() 2711 * @see rte_flow_ops 2712 */ 2713 int 2714 mlx5_flow_isolate(struct rte_eth_dev *dev, 2715 int enable, 2716 struct rte_flow_error *error) 2717 { 2718 struct mlx5_priv *priv = dev->data->dev_private; 2719 2720 if (dev->data->dev_started) { 2721 rte_flow_error_set(error, EBUSY, 2722 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2723 NULL, 2724 "port must be stopped first"); 2725 return -rte_errno; 2726 } 2727 priv->isolated = !!enable; 2728 if (enable) 2729 dev->dev_ops = &mlx5_dev_ops_isolate; 2730 else 2731 dev->dev_ops = &mlx5_dev_ops; 2732 return 0; 2733 } 2734 2735 /** 2736 * Query a flow. 2737 * 2738 * @see rte_flow_query() 2739 * @see rte_flow_ops 2740 */ 2741 static int 2742 flow_drv_query(struct rte_eth_dev *dev, 2743 struct rte_flow *flow, 2744 const struct rte_flow_action *actions, 2745 void *data, 2746 struct rte_flow_error *error) 2747 { 2748 const struct mlx5_flow_driver_ops *fops; 2749 enum mlx5_flow_drv_type ftype = flow->drv_type; 2750 2751 assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); 2752 fops = flow_get_drv_ops(ftype); 2753 2754 return fops->query(dev, flow, actions, data, error); 2755 } 2756 2757 /** 2758 * Query a flow. 2759 * 2760 * @see rte_flow_query() 2761 * @see rte_flow_ops 2762 */ 2763 int 2764 mlx5_flow_query(struct rte_eth_dev *dev, 2765 struct rte_flow *flow, 2766 const struct rte_flow_action *actions, 2767 void *data, 2768 struct rte_flow_error *error) 2769 { 2770 int ret; 2771 2772 ret = flow_drv_query(dev, flow, actions, data, error); 2773 if (ret < 0) 2774 return ret; 2775 return 0; 2776 } 2777 2778 /** 2779 * Convert a flow director filter to a generic flow. 2780 * 2781 * @param dev 2782 * Pointer to Ethernet device. 2783 * @param fdir_filter 2784 * Flow director filter to add. 2785 * @param attributes 2786 * Generic flow parameters structure. 2787 * 2788 * @return 2789 * 0 on success, a negative errno value otherwise and rte_errno is set. 2790 */ 2791 static int 2792 flow_fdir_filter_convert(struct rte_eth_dev *dev, 2793 const struct rte_eth_fdir_filter *fdir_filter, 2794 struct mlx5_fdir *attributes) 2795 { 2796 struct mlx5_priv *priv = dev->data->dev_private; 2797 const struct rte_eth_fdir_input *input = &fdir_filter->input; 2798 const struct rte_eth_fdir_masks *mask = 2799 &dev->data->dev_conf.fdir_conf.mask; 2800 2801 /* Validate queue number. */ 2802 if (fdir_filter->action.rx_queue >= priv->rxqs_n) { 2803 DRV_LOG(ERR, "port %u invalid queue number %d", 2804 dev->data->port_id, fdir_filter->action.rx_queue); 2805 rte_errno = EINVAL; 2806 return -rte_errno; 2807 } 2808 attributes->attr.ingress = 1; 2809 attributes->items[0] = (struct rte_flow_item) { 2810 .type = RTE_FLOW_ITEM_TYPE_ETH, 2811 .spec = &attributes->l2, 2812 .mask = &attributes->l2_mask, 2813 }; 2814 switch (fdir_filter->action.behavior) { 2815 case RTE_ETH_FDIR_ACCEPT: 2816 attributes->actions[0] = (struct rte_flow_action){ 2817 .type = RTE_FLOW_ACTION_TYPE_QUEUE, 2818 .conf = &attributes->queue, 2819 }; 2820 break; 2821 case RTE_ETH_FDIR_REJECT: 2822 attributes->actions[0] = (struct rte_flow_action){ 2823 .type = RTE_FLOW_ACTION_TYPE_DROP, 2824 }; 2825 break; 2826 default: 2827 DRV_LOG(ERR, "port %u invalid behavior %d", 2828 dev->data->port_id, 2829 fdir_filter->action.behavior); 2830 rte_errno = ENOTSUP; 2831 return -rte_errno; 2832 } 2833 attributes->queue.index = fdir_filter->action.rx_queue; 2834 /* Handle L3. */ 2835 switch (fdir_filter->input.flow_type) { 2836 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 2837 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 2838 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 2839 attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){ 2840 .src_addr = input->flow.ip4_flow.src_ip, 2841 .dst_addr = input->flow.ip4_flow.dst_ip, 2842 .time_to_live = input->flow.ip4_flow.ttl, 2843 .type_of_service = input->flow.ip4_flow.tos, 2844 }; 2845 attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){ 2846 .src_addr = mask->ipv4_mask.src_ip, 2847 .dst_addr = mask->ipv4_mask.dst_ip, 2848 .time_to_live = mask->ipv4_mask.ttl, 2849 .type_of_service = mask->ipv4_mask.tos, 2850 .next_proto_id = mask->ipv4_mask.proto, 2851 }; 2852 attributes->items[1] = (struct rte_flow_item){ 2853 .type = RTE_FLOW_ITEM_TYPE_IPV4, 2854 .spec = &attributes->l3, 2855 .mask = &attributes->l3_mask, 2856 }; 2857 break; 2858 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 2859 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 2860 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 2861 attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){ 2862 .hop_limits = input->flow.ipv6_flow.hop_limits, 2863 .proto = input->flow.ipv6_flow.proto, 2864 }; 2865 2866 memcpy(attributes->l3.ipv6.hdr.src_addr, 2867 input->flow.ipv6_flow.src_ip, 2868 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 2869 memcpy(attributes->l3.ipv6.hdr.dst_addr, 2870 input->flow.ipv6_flow.dst_ip, 2871 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 2872 memcpy(attributes->l3_mask.ipv6.hdr.src_addr, 2873 mask->ipv6_mask.src_ip, 2874 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 2875 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr, 2876 mask->ipv6_mask.dst_ip, 2877 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 2878 attributes->items[1] = (struct rte_flow_item){ 2879 .type = RTE_FLOW_ITEM_TYPE_IPV6, 2880 .spec = &attributes->l3, 2881 .mask = &attributes->l3_mask, 2882 }; 2883 break; 2884 default: 2885 DRV_LOG(ERR, "port %u invalid flow type%d", 2886 dev->data->port_id, fdir_filter->input.flow_type); 2887 rte_errno = ENOTSUP; 2888 return -rte_errno; 2889 } 2890 /* Handle L4. */ 2891 switch (fdir_filter->input.flow_type) { 2892 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 2893 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 2894 .src_port = input->flow.udp4_flow.src_port, 2895 .dst_port = input->flow.udp4_flow.dst_port, 2896 }; 2897 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 2898 .src_port = mask->src_port_mask, 2899 .dst_port = mask->dst_port_mask, 2900 }; 2901 attributes->items[2] = (struct rte_flow_item){ 2902 .type = RTE_FLOW_ITEM_TYPE_UDP, 2903 .spec = &attributes->l4, 2904 .mask = &attributes->l4_mask, 2905 }; 2906 break; 2907 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 2908 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 2909 .src_port = input->flow.tcp4_flow.src_port, 2910 .dst_port = input->flow.tcp4_flow.dst_port, 2911 }; 2912 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 2913 .src_port = mask->src_port_mask, 2914 .dst_port = mask->dst_port_mask, 2915 }; 2916 attributes->items[2] = (struct rte_flow_item){ 2917 .type = RTE_FLOW_ITEM_TYPE_TCP, 2918 .spec = &attributes->l4, 2919 .mask = &attributes->l4_mask, 2920 }; 2921 break; 2922 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 2923 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 2924 .src_port = input->flow.udp6_flow.src_port, 2925 .dst_port = input->flow.udp6_flow.dst_port, 2926 }; 2927 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 2928 .src_port = mask->src_port_mask, 2929 .dst_port = mask->dst_port_mask, 2930 }; 2931 attributes->items[2] = (struct rte_flow_item){ 2932 .type = RTE_FLOW_ITEM_TYPE_UDP, 2933 .spec = &attributes->l4, 2934 .mask = &attributes->l4_mask, 2935 }; 2936 break; 2937 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 2938 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 2939 .src_port = input->flow.tcp6_flow.src_port, 2940 .dst_port = input->flow.tcp6_flow.dst_port, 2941 }; 2942 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 2943 .src_port = mask->src_port_mask, 2944 .dst_port = mask->dst_port_mask, 2945 }; 2946 attributes->items[2] = (struct rte_flow_item){ 2947 .type = RTE_FLOW_ITEM_TYPE_TCP, 2948 .spec = &attributes->l4, 2949 .mask = &attributes->l4_mask, 2950 }; 2951 break; 2952 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 2953 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 2954 break; 2955 default: 2956 DRV_LOG(ERR, "port %u invalid flow type%d", 2957 dev->data->port_id, fdir_filter->input.flow_type); 2958 rte_errno = ENOTSUP; 2959 return -rte_errno; 2960 } 2961 return 0; 2962 } 2963 2964 #define FLOW_FDIR_CMP(f1, f2, fld) \ 2965 memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld)) 2966 2967 /** 2968 * Compare two FDIR flows. If items and actions are identical, the two flows are 2969 * regarded as same. 2970 * 2971 * @param dev 2972 * Pointer to Ethernet device. 2973 * @param f1 2974 * FDIR flow to compare. 2975 * @param f2 2976 * FDIR flow to compare. 2977 * 2978 * @return 2979 * Zero on match, 1 otherwise. 2980 */ 2981 static int 2982 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) 2983 { 2984 if (FLOW_FDIR_CMP(f1, f2, attr) || 2985 FLOW_FDIR_CMP(f1, f2, l2) || 2986 FLOW_FDIR_CMP(f1, f2, l2_mask) || 2987 FLOW_FDIR_CMP(f1, f2, l3) || 2988 FLOW_FDIR_CMP(f1, f2, l3_mask) || 2989 FLOW_FDIR_CMP(f1, f2, l4) || 2990 FLOW_FDIR_CMP(f1, f2, l4_mask) || 2991 FLOW_FDIR_CMP(f1, f2, actions[0].type)) 2992 return 1; 2993 if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE && 2994 FLOW_FDIR_CMP(f1, f2, queue)) 2995 return 1; 2996 return 0; 2997 } 2998 2999 /** 3000 * Search device flow list to find out a matched FDIR flow. 3001 * 3002 * @param dev 3003 * Pointer to Ethernet device. 3004 * @param fdir_flow 3005 * FDIR flow to lookup. 3006 * 3007 * @return 3008 * Pointer of flow if found, NULL otherwise. 3009 */ 3010 static struct rte_flow * 3011 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) 3012 { 3013 struct mlx5_priv *priv = dev->data->dev_private; 3014 struct rte_flow *flow = NULL; 3015 3016 assert(fdir_flow); 3017 TAILQ_FOREACH(flow, &priv->flows, next) { 3018 if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) { 3019 DRV_LOG(DEBUG, "port %u found FDIR flow %p", 3020 dev->data->port_id, (void *)flow); 3021 break; 3022 } 3023 } 3024 return flow; 3025 } 3026 3027 /** 3028 * Add new flow director filter and store it in list. 3029 * 3030 * @param dev 3031 * Pointer to Ethernet device. 3032 * @param fdir_filter 3033 * Flow director filter to add. 3034 * 3035 * @return 3036 * 0 on success, a negative errno value otherwise and rte_errno is set. 3037 */ 3038 static int 3039 flow_fdir_filter_add(struct rte_eth_dev *dev, 3040 const struct rte_eth_fdir_filter *fdir_filter) 3041 { 3042 struct mlx5_priv *priv = dev->data->dev_private; 3043 struct mlx5_fdir *fdir_flow; 3044 struct rte_flow *flow; 3045 int ret; 3046 3047 fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0); 3048 if (!fdir_flow) { 3049 rte_errno = ENOMEM; 3050 return -rte_errno; 3051 } 3052 ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow); 3053 if (ret) 3054 goto error; 3055 flow = flow_fdir_filter_lookup(dev, fdir_flow); 3056 if (flow) { 3057 rte_errno = EEXIST; 3058 goto error; 3059 } 3060 flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr, 3061 fdir_flow->items, fdir_flow->actions, NULL); 3062 if (!flow) 3063 goto error; 3064 assert(!flow->fdir); 3065 flow->fdir = fdir_flow; 3066 DRV_LOG(DEBUG, "port %u created FDIR flow %p", 3067 dev->data->port_id, (void *)flow); 3068 return 0; 3069 error: 3070 rte_free(fdir_flow); 3071 return -rte_errno; 3072 } 3073 3074 /** 3075 * Delete specific filter. 3076 * 3077 * @param dev 3078 * Pointer to Ethernet device. 3079 * @param fdir_filter 3080 * Filter to be deleted. 3081 * 3082 * @return 3083 * 0 on success, a negative errno value otherwise and rte_errno is set. 3084 */ 3085 static int 3086 flow_fdir_filter_delete(struct rte_eth_dev *dev, 3087 const struct rte_eth_fdir_filter *fdir_filter) 3088 { 3089 struct mlx5_priv *priv = dev->data->dev_private; 3090 struct rte_flow *flow; 3091 struct mlx5_fdir fdir_flow = { 3092 .attr.group = 0, 3093 }; 3094 int ret; 3095 3096 ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow); 3097 if (ret) 3098 return -rte_errno; 3099 flow = flow_fdir_filter_lookup(dev, &fdir_flow); 3100 if (!flow) { 3101 rte_errno = ENOENT; 3102 return -rte_errno; 3103 } 3104 flow_list_destroy(dev, &priv->flows, flow); 3105 DRV_LOG(DEBUG, "port %u deleted FDIR flow %p", 3106 dev->data->port_id, (void *)flow); 3107 return 0; 3108 } 3109 3110 /** 3111 * Update queue for specific filter. 3112 * 3113 * @param dev 3114 * Pointer to Ethernet device. 3115 * @param fdir_filter 3116 * Filter to be updated. 3117 * 3118 * @return 3119 * 0 on success, a negative errno value otherwise and rte_errno is set. 3120 */ 3121 static int 3122 flow_fdir_filter_update(struct rte_eth_dev *dev, 3123 const struct rte_eth_fdir_filter *fdir_filter) 3124 { 3125 int ret; 3126 3127 ret = flow_fdir_filter_delete(dev, fdir_filter); 3128 if (ret) 3129 return ret; 3130 return flow_fdir_filter_add(dev, fdir_filter); 3131 } 3132 3133 /** 3134 * Flush all filters. 3135 * 3136 * @param dev 3137 * Pointer to Ethernet device. 3138 */ 3139 static void 3140 flow_fdir_filter_flush(struct rte_eth_dev *dev) 3141 { 3142 struct mlx5_priv *priv = dev->data->dev_private; 3143 3144 mlx5_flow_list_flush(dev, &priv->flows); 3145 } 3146 3147 /** 3148 * Get flow director information. 3149 * 3150 * @param dev 3151 * Pointer to Ethernet device. 3152 * @param[out] fdir_info 3153 * Resulting flow director information. 3154 */ 3155 static void 3156 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) 3157 { 3158 struct rte_eth_fdir_masks *mask = 3159 &dev->data->dev_conf.fdir_conf.mask; 3160 3161 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; 3162 fdir_info->guarant_spc = 0; 3163 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); 3164 fdir_info->max_flexpayload = 0; 3165 fdir_info->flow_types_mask[0] = 0; 3166 fdir_info->flex_payload_unit = 0; 3167 fdir_info->max_flex_payload_segment_num = 0; 3168 fdir_info->flex_payload_limit = 0; 3169 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf)); 3170 } 3171 3172 /** 3173 * Deal with flow director operations. 3174 * 3175 * @param dev 3176 * Pointer to Ethernet device. 3177 * @param filter_op 3178 * Operation to perform. 3179 * @param arg 3180 * Pointer to operation-specific structure. 3181 * 3182 * @return 3183 * 0 on success, a negative errno value otherwise and rte_errno is set. 3184 */ 3185 static int 3186 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, 3187 void *arg) 3188 { 3189 enum rte_fdir_mode fdir_mode = 3190 dev->data->dev_conf.fdir_conf.mode; 3191 3192 if (filter_op == RTE_ETH_FILTER_NOP) 3193 return 0; 3194 if (fdir_mode != RTE_FDIR_MODE_PERFECT && 3195 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3196 DRV_LOG(ERR, "port %u flow director mode %d not supported", 3197 dev->data->port_id, fdir_mode); 3198 rte_errno = EINVAL; 3199 return -rte_errno; 3200 } 3201 switch (filter_op) { 3202 case RTE_ETH_FILTER_ADD: 3203 return flow_fdir_filter_add(dev, arg); 3204 case RTE_ETH_FILTER_UPDATE: 3205 return flow_fdir_filter_update(dev, arg); 3206 case RTE_ETH_FILTER_DELETE: 3207 return flow_fdir_filter_delete(dev, arg); 3208 case RTE_ETH_FILTER_FLUSH: 3209 flow_fdir_filter_flush(dev); 3210 break; 3211 case RTE_ETH_FILTER_INFO: 3212 flow_fdir_info_get(dev, arg); 3213 break; 3214 default: 3215 DRV_LOG(DEBUG, "port %u unknown operation %u", 3216 dev->data->port_id, filter_op); 3217 rte_errno = EINVAL; 3218 return -rte_errno; 3219 } 3220 return 0; 3221 } 3222 3223 /** 3224 * Manage filter operations. 3225 * 3226 * @param dev 3227 * Pointer to Ethernet device structure. 3228 * @param filter_type 3229 * Filter type. 3230 * @param filter_op 3231 * Operation to perform. 3232 * @param arg 3233 * Pointer to operation-specific structure. 3234 * 3235 * @return 3236 * 0 on success, a negative errno value otherwise and rte_errno is set. 3237 */ 3238 int 3239 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, 3240 enum rte_filter_type filter_type, 3241 enum rte_filter_op filter_op, 3242 void *arg) 3243 { 3244 switch (filter_type) { 3245 case RTE_ETH_FILTER_GENERIC: 3246 if (filter_op != RTE_ETH_FILTER_GET) { 3247 rte_errno = EINVAL; 3248 return -rte_errno; 3249 } 3250 *(const void **)arg = &mlx5_flow_ops; 3251 return 0; 3252 case RTE_ETH_FILTER_FDIR: 3253 return flow_fdir_ctrl_func(dev, filter_op, arg); 3254 default: 3255 DRV_LOG(ERR, "port %u filter type (%d) not supported", 3256 dev->data->port_id, filter_type); 3257 rte_errno = ENOTSUP; 3258 return -rte_errno; 3259 } 3260 return 0; 3261 } 3262 3263 #define MLX5_POOL_QUERY_FREQ_US 1000000 3264 3265 /** 3266 * Set the periodic procedure for triggering asynchronous batch queries for all 3267 * the counter pools. 3268 * 3269 * @param[in] sh 3270 * Pointer to mlx5_ibv_shared object. 3271 */ 3272 void 3273 mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) 3274 { 3275 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0); 3276 uint32_t pools_n = rte_atomic16_read(&cont->n_valid); 3277 uint32_t us; 3278 3279 cont = MLX5_CNT_CONTAINER(sh, 1, 0); 3280 pools_n += rte_atomic16_read(&cont->n_valid); 3281 us = MLX5_POOL_QUERY_FREQ_US / pools_n; 3282 DRV_LOG(DEBUG, "Set alarm for %u pools each %u us\n", pools_n, us); 3283 if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { 3284 sh->cmng.query_thread_on = 0; 3285 DRV_LOG(ERR, "Cannot reinitialize query alarm\n"); 3286 } else { 3287 sh->cmng.query_thread_on = 1; 3288 } 3289 } 3290 3291 /** 3292 * The periodic procedure for triggering asynchronous batch queries for all the 3293 * counter pools. This function is probably called by the host thread. 3294 * 3295 * @param[in] arg 3296 * The parameter for the alarm process. 3297 */ 3298 void 3299 mlx5_flow_query_alarm(void *arg) 3300 { 3301 struct mlx5_ibv_shared *sh = arg; 3302 struct mlx5_devx_obj *dcs; 3303 uint16_t offset; 3304 int ret; 3305 uint8_t batch = sh->cmng.batch; 3306 uint16_t pool_index = sh->cmng.pool_index; 3307 struct mlx5_pools_container *cont; 3308 struct mlx5_pools_container *mcont; 3309 struct mlx5_flow_counter_pool *pool; 3310 3311 if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) 3312 goto set_alarm; 3313 next_container: 3314 cont = MLX5_CNT_CONTAINER(sh, batch, 1); 3315 mcont = MLX5_CNT_CONTAINER(sh, batch, 0); 3316 /* Check if resize was done and need to flip a container. */ 3317 if (cont != mcont) { 3318 if (cont->pools) { 3319 /* Clean the old container. */ 3320 rte_free(cont->pools); 3321 memset(cont, 0, sizeof(*cont)); 3322 } 3323 rte_cio_wmb(); 3324 /* Flip the host container. */ 3325 sh->cmng.mhi[batch] ^= (uint8_t)2; 3326 cont = mcont; 3327 } 3328 if (!cont->pools) { 3329 /* 2 empty containers case is unexpected. */ 3330 if (unlikely(batch != sh->cmng.batch)) 3331 goto set_alarm; 3332 batch ^= 0x1; 3333 pool_index = 0; 3334 goto next_container; 3335 } 3336 pool = cont->pools[pool_index]; 3337 if (pool->raw_hw) 3338 /* There is a pool query in progress. */ 3339 goto set_alarm; 3340 pool->raw_hw = 3341 LIST_FIRST(&sh->cmng.free_stat_raws); 3342 if (!pool->raw_hw) 3343 /* No free counter statistics raw memory. */ 3344 goto set_alarm; 3345 dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read 3346 (&pool->a64_dcs); 3347 offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; 3348 ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - 3349 offset, NULL, NULL, 3350 pool->raw_hw->mem_mng->dm->id, 3351 (void *)(uintptr_t) 3352 (pool->raw_hw->data + offset), 3353 sh->devx_comp, 3354 (uint64_t)(uintptr_t)pool); 3355 if (ret) { 3356 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID" 3357 " %d\n", pool->min_dcs->id); 3358 pool->raw_hw = NULL; 3359 goto set_alarm; 3360 } 3361 pool->raw_hw->min_dcs_id = dcs->id; 3362 LIST_REMOVE(pool->raw_hw, next); 3363 sh->cmng.pending_queries++; 3364 pool_index++; 3365 if (pool_index >= rte_atomic16_read(&cont->n_valid)) { 3366 batch ^= 0x1; 3367 pool_index = 0; 3368 } 3369 set_alarm: 3370 sh->cmng.batch = batch; 3371 sh->cmng.pool_index = pool_index; 3372 mlx5_set_query_alarm(sh); 3373 } 3374 3375 /** 3376 * Handler for the HW respond about ready values from an asynchronous batch 3377 * query. This function is probably called by the host thread. 3378 * 3379 * @param[in] sh 3380 * The pointer to the shared IB device context. 3381 * @param[in] async_id 3382 * The Devx async ID. 3383 * @param[in] status 3384 * The status of the completion. 3385 */ 3386 void 3387 mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, 3388 uint64_t async_id, int status) 3389 { 3390 struct mlx5_flow_counter_pool *pool = 3391 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; 3392 struct mlx5_counter_stats_raw *raw_to_free; 3393 3394 if (unlikely(status)) { 3395 raw_to_free = pool->raw_hw; 3396 } else { 3397 raw_to_free = pool->raw; 3398 rte_spinlock_lock(&pool->sl); 3399 pool->raw = pool->raw_hw; 3400 rte_spinlock_unlock(&pool->sl); 3401 rte_atomic64_add(&pool->query_gen, 1); 3402 /* Be sure the new raw counters data is updated in memory. */ 3403 rte_cio_wmb(); 3404 } 3405 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next); 3406 pool->raw_hw = NULL; 3407 sh->cmng.pending_queries--; 3408 } 3409