1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <netinet/in.h> 7 #include <sys/queue.h> 8 #include <stdalign.h> 9 #include <stdint.h> 10 #include <string.h> 11 12 /* Verbs header. */ 13 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 14 #ifdef PEDANTIC 15 #pragma GCC diagnostic ignored "-Wpedantic" 16 #endif 17 #include <infiniband/verbs.h> 18 #ifdef PEDANTIC 19 #pragma GCC diagnostic error "-Wpedantic" 20 #endif 21 22 #include <rte_common.h> 23 #include <rte_ether.h> 24 #include <rte_ethdev_driver.h> 25 #include <rte_flow.h> 26 #include <rte_flow_driver.h> 27 #include <rte_malloc.h> 28 #include <rte_ip.h> 29 30 #include "mlx5.h" 31 #include "mlx5_defs.h" 32 #include "mlx5_flow.h" 33 #include "mlx5_glue.h" 34 #include "mlx5_prm.h" 35 #include "mlx5_rxtx.h" 36 37 /* Dev ops structure defined in mlx5.c */ 38 extern const struct eth_dev_ops mlx5_dev_ops; 39 extern const struct eth_dev_ops mlx5_dev_ops_isolate; 40 41 /** Device flow drivers. */ 42 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 43 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; 44 #endif 45 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; 46 47 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; 48 49 const struct mlx5_flow_driver_ops *flow_drv_ops[] = { 50 [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, 51 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 52 [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, 53 #endif 54 [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, 55 [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops 56 }; 57 58 enum mlx5_expansion { 59 MLX5_EXPANSION_ROOT, 60 MLX5_EXPANSION_ROOT_OUTER, 61 MLX5_EXPANSION_ROOT_ETH_VLAN, 62 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, 63 MLX5_EXPANSION_OUTER_ETH, 64 MLX5_EXPANSION_OUTER_ETH_VLAN, 65 MLX5_EXPANSION_OUTER_VLAN, 66 MLX5_EXPANSION_OUTER_IPV4, 67 MLX5_EXPANSION_OUTER_IPV4_UDP, 68 MLX5_EXPANSION_OUTER_IPV4_TCP, 69 MLX5_EXPANSION_OUTER_IPV6, 70 MLX5_EXPANSION_OUTER_IPV6_UDP, 71 MLX5_EXPANSION_OUTER_IPV6_TCP, 72 MLX5_EXPANSION_VXLAN, 73 MLX5_EXPANSION_VXLAN_GPE, 74 MLX5_EXPANSION_GRE, 75 MLX5_EXPANSION_MPLS, 76 MLX5_EXPANSION_ETH, 77 MLX5_EXPANSION_ETH_VLAN, 78 MLX5_EXPANSION_VLAN, 79 MLX5_EXPANSION_IPV4, 80 MLX5_EXPANSION_IPV4_UDP, 81 MLX5_EXPANSION_IPV4_TCP, 82 MLX5_EXPANSION_IPV6, 83 MLX5_EXPANSION_IPV6_UDP, 84 MLX5_EXPANSION_IPV6_TCP, 85 }; 86 87 /** Supported expansion of items. */ 88 static const struct rte_flow_expand_node mlx5_support_expansion[] = { 89 [MLX5_EXPANSION_ROOT] = { 90 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 91 MLX5_EXPANSION_IPV4, 92 MLX5_EXPANSION_IPV6), 93 .type = RTE_FLOW_ITEM_TYPE_END, 94 }, 95 [MLX5_EXPANSION_ROOT_OUTER] = { 96 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, 97 MLX5_EXPANSION_OUTER_IPV4, 98 MLX5_EXPANSION_OUTER_IPV6), 99 .type = RTE_FLOW_ITEM_TYPE_END, 100 }, 101 [MLX5_EXPANSION_ROOT_ETH_VLAN] = { 102 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), 103 .type = RTE_FLOW_ITEM_TYPE_END, 104 }, 105 [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { 106 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), 107 .type = RTE_FLOW_ITEM_TYPE_END, 108 }, 109 [MLX5_EXPANSION_OUTER_ETH] = { 110 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 111 MLX5_EXPANSION_OUTER_IPV6, 112 MLX5_EXPANSION_MPLS), 113 .type = RTE_FLOW_ITEM_TYPE_ETH, 114 .rss_types = 0, 115 }, 116 [MLX5_EXPANSION_OUTER_ETH_VLAN] = { 117 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), 118 .type = RTE_FLOW_ITEM_TYPE_ETH, 119 .rss_types = 0, 120 }, 121 [MLX5_EXPANSION_OUTER_VLAN] = { 122 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 123 MLX5_EXPANSION_OUTER_IPV6), 124 .type = RTE_FLOW_ITEM_TYPE_VLAN, 125 }, 126 [MLX5_EXPANSION_OUTER_IPV4] = { 127 .next = RTE_FLOW_EXPAND_RSS_NEXT 128 (MLX5_EXPANSION_OUTER_IPV4_UDP, 129 MLX5_EXPANSION_OUTER_IPV4_TCP, 130 MLX5_EXPANSION_GRE, 131 MLX5_EXPANSION_IPV4, 132 MLX5_EXPANSION_IPV6), 133 .type = RTE_FLOW_ITEM_TYPE_IPV4, 134 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 135 ETH_RSS_NONFRAG_IPV4_OTHER, 136 }, 137 [MLX5_EXPANSION_OUTER_IPV4_UDP] = { 138 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 139 MLX5_EXPANSION_VXLAN_GPE), 140 .type = RTE_FLOW_ITEM_TYPE_UDP, 141 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 142 }, 143 [MLX5_EXPANSION_OUTER_IPV4_TCP] = { 144 .type = RTE_FLOW_ITEM_TYPE_TCP, 145 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 146 }, 147 [MLX5_EXPANSION_OUTER_IPV6] = { 148 .next = RTE_FLOW_EXPAND_RSS_NEXT 149 (MLX5_EXPANSION_OUTER_IPV6_UDP, 150 MLX5_EXPANSION_OUTER_IPV6_TCP, 151 MLX5_EXPANSION_IPV4, 152 MLX5_EXPANSION_IPV6), 153 .type = RTE_FLOW_ITEM_TYPE_IPV6, 154 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 155 ETH_RSS_NONFRAG_IPV6_OTHER, 156 }, 157 [MLX5_EXPANSION_OUTER_IPV6_UDP] = { 158 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 159 MLX5_EXPANSION_VXLAN_GPE), 160 .type = RTE_FLOW_ITEM_TYPE_UDP, 161 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 162 }, 163 [MLX5_EXPANSION_OUTER_IPV6_TCP] = { 164 .type = RTE_FLOW_ITEM_TYPE_TCP, 165 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 166 }, 167 [MLX5_EXPANSION_VXLAN] = { 168 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), 169 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 170 }, 171 [MLX5_EXPANSION_VXLAN_GPE] = { 172 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 173 MLX5_EXPANSION_IPV4, 174 MLX5_EXPANSION_IPV6), 175 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, 176 }, 177 [MLX5_EXPANSION_GRE] = { 178 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), 179 .type = RTE_FLOW_ITEM_TYPE_GRE, 180 }, 181 [MLX5_EXPANSION_MPLS] = { 182 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 183 MLX5_EXPANSION_IPV6), 184 .type = RTE_FLOW_ITEM_TYPE_MPLS, 185 }, 186 [MLX5_EXPANSION_ETH] = { 187 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 188 MLX5_EXPANSION_IPV6), 189 .type = RTE_FLOW_ITEM_TYPE_ETH, 190 }, 191 [MLX5_EXPANSION_ETH_VLAN] = { 192 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), 193 .type = RTE_FLOW_ITEM_TYPE_ETH, 194 }, 195 [MLX5_EXPANSION_VLAN] = { 196 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 197 MLX5_EXPANSION_IPV6), 198 .type = RTE_FLOW_ITEM_TYPE_VLAN, 199 }, 200 [MLX5_EXPANSION_IPV4] = { 201 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, 202 MLX5_EXPANSION_IPV4_TCP), 203 .type = RTE_FLOW_ITEM_TYPE_IPV4, 204 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 205 ETH_RSS_NONFRAG_IPV4_OTHER, 206 }, 207 [MLX5_EXPANSION_IPV4_UDP] = { 208 .type = RTE_FLOW_ITEM_TYPE_UDP, 209 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 210 }, 211 [MLX5_EXPANSION_IPV4_TCP] = { 212 .type = RTE_FLOW_ITEM_TYPE_TCP, 213 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 214 }, 215 [MLX5_EXPANSION_IPV6] = { 216 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, 217 MLX5_EXPANSION_IPV6_TCP), 218 .type = RTE_FLOW_ITEM_TYPE_IPV6, 219 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 220 ETH_RSS_NONFRAG_IPV6_OTHER, 221 }, 222 [MLX5_EXPANSION_IPV6_UDP] = { 223 .type = RTE_FLOW_ITEM_TYPE_UDP, 224 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 225 }, 226 [MLX5_EXPANSION_IPV6_TCP] = { 227 .type = RTE_FLOW_ITEM_TYPE_TCP, 228 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 229 }, 230 }; 231 232 static const struct rte_flow_ops mlx5_flow_ops = { 233 .validate = mlx5_flow_validate, 234 .create = mlx5_flow_create, 235 .destroy = mlx5_flow_destroy, 236 .flush = mlx5_flow_flush, 237 .isolate = mlx5_flow_isolate, 238 .query = mlx5_flow_query, 239 }; 240 241 /* Convert FDIR request to Generic flow. */ 242 struct mlx5_fdir { 243 struct rte_flow_attr attr; 244 struct rte_flow_item items[4]; 245 struct rte_flow_item_eth l2; 246 struct rte_flow_item_eth l2_mask; 247 union { 248 struct rte_flow_item_ipv4 ipv4; 249 struct rte_flow_item_ipv6 ipv6; 250 } l3; 251 union { 252 struct rte_flow_item_ipv4 ipv4; 253 struct rte_flow_item_ipv6 ipv6; 254 } l3_mask; 255 union { 256 struct rte_flow_item_udp udp; 257 struct rte_flow_item_tcp tcp; 258 } l4; 259 union { 260 struct rte_flow_item_udp udp; 261 struct rte_flow_item_tcp tcp; 262 } l4_mask; 263 struct rte_flow_action actions[2]; 264 struct rte_flow_action_queue queue; 265 }; 266 267 /* Map of Verbs to Flow priority with 8 Verbs priorities. */ 268 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { 269 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, 270 }; 271 272 /* Map of Verbs to Flow priority with 16 Verbs priorities. */ 273 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { 274 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, 275 { 9, 10, 11 }, { 12, 13, 14 }, 276 }; 277 278 /* Tunnel information. */ 279 struct mlx5_flow_tunnel_info { 280 uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ 281 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ 282 }; 283 284 static struct mlx5_flow_tunnel_info tunnels_info[] = { 285 { 286 .tunnel = MLX5_FLOW_LAYER_VXLAN, 287 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, 288 }, 289 { 290 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, 291 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, 292 }, 293 { 294 .tunnel = MLX5_FLOW_LAYER_GRE, 295 .ptype = RTE_PTYPE_TUNNEL_GRE, 296 }, 297 { 298 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, 299 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, 300 }, 301 { 302 .tunnel = MLX5_FLOW_LAYER_MPLS, 303 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, 304 }, 305 { 306 .tunnel = MLX5_FLOW_LAYER_NVGRE, 307 .ptype = RTE_PTYPE_TUNNEL_NVGRE, 308 }, 309 }; 310 311 /** 312 * Discover the maximum number of priority available. 313 * 314 * @param[in] dev 315 * Pointer to the Ethernet device structure. 316 * 317 * @return 318 * number of supported flow priority on success, a negative errno 319 * value otherwise and rte_errno is set. 320 */ 321 int 322 mlx5_flow_discover_priorities(struct rte_eth_dev *dev) 323 { 324 struct mlx5_priv *priv = dev->data->dev_private; 325 struct { 326 struct ibv_flow_attr attr; 327 struct ibv_flow_spec_eth eth; 328 struct ibv_flow_spec_action_drop drop; 329 } flow_attr = { 330 .attr = { 331 .num_of_specs = 2, 332 .port = (uint8_t)priv->ibv_port, 333 }, 334 .eth = { 335 .type = IBV_FLOW_SPEC_ETH, 336 .size = sizeof(struct ibv_flow_spec_eth), 337 }, 338 .drop = { 339 .size = sizeof(struct ibv_flow_spec_action_drop), 340 .type = IBV_FLOW_SPEC_ACTION_DROP, 341 }, 342 }; 343 struct ibv_flow *flow; 344 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev); 345 uint16_t vprio[] = { 8, 16 }; 346 int i; 347 int priority = 0; 348 349 if (!drop) { 350 rte_errno = ENOTSUP; 351 return -rte_errno; 352 } 353 for (i = 0; i != RTE_DIM(vprio); i++) { 354 flow_attr.attr.priority = vprio[i] - 1; 355 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); 356 if (!flow) 357 break; 358 claim_zero(mlx5_glue->destroy_flow(flow)); 359 priority = vprio[i]; 360 } 361 mlx5_hrxq_drop_release(dev); 362 switch (priority) { 363 case 8: 364 priority = RTE_DIM(priority_map_3); 365 break; 366 case 16: 367 priority = RTE_DIM(priority_map_5); 368 break; 369 default: 370 rte_errno = ENOTSUP; 371 DRV_LOG(ERR, 372 "port %u verbs maximum priority: %d expected 8/16", 373 dev->data->port_id, priority); 374 return -rte_errno; 375 } 376 DRV_LOG(INFO, "port %u flow maximum priority: %d", 377 dev->data->port_id, priority); 378 return priority; 379 } 380 381 /** 382 * Adjust flow priority based on the highest layer and the request priority. 383 * 384 * @param[in] dev 385 * Pointer to the Ethernet device structure. 386 * @param[in] priority 387 * The rule base priority. 388 * @param[in] subpriority 389 * The priority based on the items. 390 * 391 * @return 392 * The new priority. 393 */ 394 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 395 uint32_t subpriority) 396 { 397 uint32_t res = 0; 398 struct mlx5_priv *priv = dev->data->dev_private; 399 400 switch (priv->config.flow_prio) { 401 case RTE_DIM(priority_map_3): 402 res = priority_map_3[priority][subpriority]; 403 break; 404 case RTE_DIM(priority_map_5): 405 res = priority_map_5[priority][subpriority]; 406 break; 407 } 408 return res; 409 } 410 411 /** 412 * Verify the @p item specifications (spec, last, mask) are compatible with the 413 * NIC capabilities. 414 * 415 * @param[in] item 416 * Item specification. 417 * @param[in] mask 418 * @p item->mask or flow default bit-masks. 419 * @param[in] nic_mask 420 * Bit-masks covering supported fields by the NIC to compare with user mask. 421 * @param[in] size 422 * Bit-masks size in bytes. 423 * @param[out] error 424 * Pointer to error structure. 425 * 426 * @return 427 * 0 on success, a negative errno value otherwise and rte_errno is set. 428 */ 429 int 430 mlx5_flow_item_acceptable(const struct rte_flow_item *item, 431 const uint8_t *mask, 432 const uint8_t *nic_mask, 433 unsigned int size, 434 struct rte_flow_error *error) 435 { 436 unsigned int i; 437 438 assert(nic_mask); 439 for (i = 0; i < size; ++i) 440 if ((nic_mask[i] | mask[i]) != nic_mask[i]) 441 return rte_flow_error_set(error, ENOTSUP, 442 RTE_FLOW_ERROR_TYPE_ITEM, 443 item, 444 "mask enables non supported" 445 " bits"); 446 if (!item->spec && (item->mask || item->last)) 447 return rte_flow_error_set(error, EINVAL, 448 RTE_FLOW_ERROR_TYPE_ITEM, item, 449 "mask/last without a spec is not" 450 " supported"); 451 if (item->spec && item->last) { 452 uint8_t spec[size]; 453 uint8_t last[size]; 454 unsigned int i; 455 int ret; 456 457 for (i = 0; i < size; ++i) { 458 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; 459 last[i] = ((const uint8_t *)item->last)[i] & mask[i]; 460 } 461 ret = memcmp(spec, last, size); 462 if (ret != 0) 463 return rte_flow_error_set(error, EINVAL, 464 RTE_FLOW_ERROR_TYPE_ITEM, 465 item, 466 "range is not valid"); 467 } 468 return 0; 469 } 470 471 /** 472 * Adjust the hash fields according to the @p flow information. 473 * 474 * @param[in] dev_flow. 475 * Pointer to the mlx5_flow. 476 * @param[in] tunnel 477 * 1 when the hash field is for a tunnel item. 478 * @param[in] layer_types 479 * ETH_RSS_* types. 480 * @param[in] hash_fields 481 * Item hash fields. 482 * 483 * @return 484 * The hash fields that should be used. 485 */ 486 uint64_t 487 mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, 488 int tunnel __rte_unused, uint64_t layer_types, 489 uint64_t hash_fields) 490 { 491 struct rte_flow *flow = dev_flow->flow; 492 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 493 int rss_request_inner = flow->rss.level >= 2; 494 495 /* Check RSS hash level for tunnel. */ 496 if (tunnel && rss_request_inner) 497 hash_fields |= IBV_RX_HASH_INNER; 498 else if (tunnel || rss_request_inner) 499 return 0; 500 #endif 501 /* Check if requested layer matches RSS hash fields. */ 502 if (!(flow->rss.types & layer_types)) 503 return 0; 504 return hash_fields; 505 } 506 507 /** 508 * Lookup and set the ptype in the data Rx part. A single Ptype can be used, 509 * if several tunnel rules are used on this queue, the tunnel ptype will be 510 * cleared. 511 * 512 * @param rxq_ctrl 513 * Rx queue to update. 514 */ 515 static void 516 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) 517 { 518 unsigned int i; 519 uint32_t tunnel_ptype = 0; 520 521 /* Look up for the ptype to use. */ 522 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { 523 if (!rxq_ctrl->flow_tunnels_n[i]) 524 continue; 525 if (!tunnel_ptype) { 526 tunnel_ptype = tunnels_info[i].ptype; 527 } else { 528 tunnel_ptype = 0; 529 break; 530 } 531 } 532 rxq_ctrl->rxq.tunnel = tunnel_ptype; 533 } 534 535 /** 536 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive 537 * flow. 538 * 539 * @param[in] dev 540 * Pointer to the Ethernet device structure. 541 * @param[in] dev_flow 542 * Pointer to device flow structure. 543 */ 544 static void 545 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 546 { 547 struct mlx5_priv *priv = dev->data->dev_private; 548 struct rte_flow *flow = dev_flow->flow; 549 const int mark = !!(flow->actions & 550 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 551 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); 552 unsigned int i; 553 554 for (i = 0; i != flow->rss.queue_num; ++i) { 555 int idx = (*flow->queue)[i]; 556 struct mlx5_rxq_ctrl *rxq_ctrl = 557 container_of((*priv->rxqs)[idx], 558 struct mlx5_rxq_ctrl, rxq); 559 560 if (mark) { 561 rxq_ctrl->rxq.mark = 1; 562 rxq_ctrl->flow_mark_n++; 563 } 564 if (tunnel) { 565 unsigned int j; 566 567 /* Increase the counter matching the flow. */ 568 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 569 if ((tunnels_info[j].tunnel & 570 dev_flow->layers) == 571 tunnels_info[j].tunnel) { 572 rxq_ctrl->flow_tunnels_n[j]++; 573 break; 574 } 575 } 576 flow_rxq_tunnel_ptype_update(rxq_ctrl); 577 } 578 } 579 } 580 581 /** 582 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow 583 * 584 * @param[in] dev 585 * Pointer to the Ethernet device structure. 586 * @param[in] flow 587 * Pointer to flow structure. 588 */ 589 static void 590 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) 591 { 592 struct mlx5_flow *dev_flow; 593 594 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 595 flow_drv_rxq_flags_set(dev, dev_flow); 596 } 597 598 /** 599 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 600 * device flow if no other flow uses it with the same kind of request. 601 * 602 * @param dev 603 * Pointer to Ethernet device. 604 * @param[in] dev_flow 605 * Pointer to the device flow. 606 */ 607 static void 608 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 609 { 610 struct mlx5_priv *priv = dev->data->dev_private; 611 struct rte_flow *flow = dev_flow->flow; 612 const int mark = !!(flow->actions & 613 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 614 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); 615 unsigned int i; 616 617 assert(dev->data->dev_started); 618 for (i = 0; i != flow->rss.queue_num; ++i) { 619 int idx = (*flow->queue)[i]; 620 struct mlx5_rxq_ctrl *rxq_ctrl = 621 container_of((*priv->rxqs)[idx], 622 struct mlx5_rxq_ctrl, rxq); 623 624 if (mark) { 625 rxq_ctrl->flow_mark_n--; 626 rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; 627 } 628 if (tunnel) { 629 unsigned int j; 630 631 /* Decrease the counter matching the flow. */ 632 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 633 if ((tunnels_info[j].tunnel & 634 dev_flow->layers) == 635 tunnels_info[j].tunnel) { 636 rxq_ctrl->flow_tunnels_n[j]--; 637 break; 638 } 639 } 640 flow_rxq_tunnel_ptype_update(rxq_ctrl); 641 } 642 } 643 } 644 645 /** 646 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 647 * @p flow if no other flow uses it with the same kind of request. 648 * 649 * @param dev 650 * Pointer to Ethernet device. 651 * @param[in] flow 652 * Pointer to the flow. 653 */ 654 static void 655 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) 656 { 657 struct mlx5_flow *dev_flow; 658 659 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 660 flow_drv_rxq_flags_trim(dev, dev_flow); 661 } 662 663 /** 664 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. 665 * 666 * @param dev 667 * Pointer to Ethernet device. 668 */ 669 static void 670 flow_rxq_flags_clear(struct rte_eth_dev *dev) 671 { 672 struct mlx5_priv *priv = dev->data->dev_private; 673 unsigned int i; 674 675 for (i = 0; i != priv->rxqs_n; ++i) { 676 struct mlx5_rxq_ctrl *rxq_ctrl; 677 unsigned int j; 678 679 if (!(*priv->rxqs)[i]) 680 continue; 681 rxq_ctrl = container_of((*priv->rxqs)[i], 682 struct mlx5_rxq_ctrl, rxq); 683 rxq_ctrl->flow_mark_n = 0; 684 rxq_ctrl->rxq.mark = 0; 685 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) 686 rxq_ctrl->flow_tunnels_n[j] = 0; 687 rxq_ctrl->rxq.tunnel = 0; 688 } 689 } 690 691 /* 692 * Validate the flag action. 693 * 694 * @param[in] action_flags 695 * Bit-fields that holds the actions detected until now. 696 * @param[in] attr 697 * Attributes of flow that includes this action. 698 * @param[out] error 699 * Pointer to error structure. 700 * 701 * @return 702 * 0 on success, a negative errno value otherwise and rte_errno is set. 703 */ 704 int 705 mlx5_flow_validate_action_flag(uint64_t action_flags, 706 const struct rte_flow_attr *attr, 707 struct rte_flow_error *error) 708 { 709 710 if (action_flags & MLX5_FLOW_ACTION_DROP) 711 return rte_flow_error_set(error, EINVAL, 712 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 713 "can't drop and flag in same flow"); 714 if (action_flags & MLX5_FLOW_ACTION_MARK) 715 return rte_flow_error_set(error, EINVAL, 716 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 717 "can't mark and flag in same flow"); 718 if (action_flags & MLX5_FLOW_ACTION_FLAG) 719 return rte_flow_error_set(error, EINVAL, 720 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 721 "can't have 2 flag" 722 " actions in same flow"); 723 if (attr->egress) 724 return rte_flow_error_set(error, ENOTSUP, 725 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 726 "flag action not supported for " 727 "egress"); 728 return 0; 729 } 730 731 /* 732 * Validate the mark action. 733 * 734 * @param[in] action 735 * Pointer to the queue action. 736 * @param[in] action_flags 737 * Bit-fields that holds the actions detected until now. 738 * @param[in] attr 739 * Attributes of flow that includes this action. 740 * @param[out] error 741 * Pointer to error structure. 742 * 743 * @return 744 * 0 on success, a negative errno value otherwise and rte_errno is set. 745 */ 746 int 747 mlx5_flow_validate_action_mark(const struct rte_flow_action *action, 748 uint64_t action_flags, 749 const struct rte_flow_attr *attr, 750 struct rte_flow_error *error) 751 { 752 const struct rte_flow_action_mark *mark = action->conf; 753 754 if (!mark) 755 return rte_flow_error_set(error, EINVAL, 756 RTE_FLOW_ERROR_TYPE_ACTION, 757 action, 758 "configuration cannot be null"); 759 if (mark->id >= MLX5_FLOW_MARK_MAX) 760 return rte_flow_error_set(error, EINVAL, 761 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 762 &mark->id, 763 "mark id must in 0 <= id < " 764 RTE_STR(MLX5_FLOW_MARK_MAX)); 765 if (action_flags & MLX5_FLOW_ACTION_DROP) 766 return rte_flow_error_set(error, EINVAL, 767 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 768 "can't drop and mark in same flow"); 769 if (action_flags & MLX5_FLOW_ACTION_FLAG) 770 return rte_flow_error_set(error, EINVAL, 771 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 772 "can't flag and mark in same flow"); 773 if (action_flags & MLX5_FLOW_ACTION_MARK) 774 return rte_flow_error_set(error, EINVAL, 775 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 776 "can't have 2 mark actions in same" 777 " flow"); 778 if (attr->egress) 779 return rte_flow_error_set(error, ENOTSUP, 780 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 781 "mark action not supported for " 782 "egress"); 783 return 0; 784 } 785 786 /* 787 * Validate the drop action. 788 * 789 * @param[in] action_flags 790 * Bit-fields that holds the actions detected until now. 791 * @param[in] attr 792 * Attributes of flow that includes this action. 793 * @param[out] error 794 * Pointer to error structure. 795 * 796 * @return 797 * 0 on success, a negative errno value otherwise and rte_errno is set. 798 */ 799 int 800 mlx5_flow_validate_action_drop(uint64_t action_flags, 801 const struct rte_flow_attr *attr, 802 struct rte_flow_error *error) 803 { 804 if (action_flags & MLX5_FLOW_ACTION_FLAG) 805 return rte_flow_error_set(error, EINVAL, 806 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 807 "can't drop and flag in same flow"); 808 if (action_flags & MLX5_FLOW_ACTION_MARK) 809 return rte_flow_error_set(error, EINVAL, 810 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 811 "can't drop and mark in same flow"); 812 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 813 return rte_flow_error_set(error, EINVAL, 814 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 815 "can't have 2 fate actions in" 816 " same flow"); 817 if (attr->egress) 818 return rte_flow_error_set(error, ENOTSUP, 819 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 820 "drop action not supported for " 821 "egress"); 822 return 0; 823 } 824 825 /* 826 * Validate the queue action. 827 * 828 * @param[in] action 829 * Pointer to the queue action. 830 * @param[in] action_flags 831 * Bit-fields that holds the actions detected until now. 832 * @param[in] dev 833 * Pointer to the Ethernet device structure. 834 * @param[in] attr 835 * Attributes of flow that includes this action. 836 * @param[out] error 837 * Pointer to error structure. 838 * 839 * @return 840 * 0 on success, a negative errno value otherwise and rte_errno is set. 841 */ 842 int 843 mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 844 uint64_t action_flags, 845 struct rte_eth_dev *dev, 846 const struct rte_flow_attr *attr, 847 struct rte_flow_error *error) 848 { 849 struct mlx5_priv *priv = dev->data->dev_private; 850 const struct rte_flow_action_queue *queue = action->conf; 851 852 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 853 return rte_flow_error_set(error, EINVAL, 854 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 855 "can't have 2 fate actions in" 856 " same flow"); 857 if (!priv->rxqs_n) 858 return rte_flow_error_set(error, EINVAL, 859 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 860 NULL, "No Rx queues configured"); 861 if (queue->index >= priv->rxqs_n) 862 return rte_flow_error_set(error, EINVAL, 863 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 864 &queue->index, 865 "queue index out of range"); 866 if (!(*priv->rxqs)[queue->index]) 867 return rte_flow_error_set(error, EINVAL, 868 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 869 &queue->index, 870 "queue is not configured"); 871 if (attr->egress) 872 return rte_flow_error_set(error, ENOTSUP, 873 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 874 "queue action not supported for " 875 "egress"); 876 return 0; 877 } 878 879 /* 880 * Validate the rss action. 881 * 882 * @param[in] action 883 * Pointer to the queue action. 884 * @param[in] action_flags 885 * Bit-fields that holds the actions detected until now. 886 * @param[in] dev 887 * Pointer to the Ethernet device structure. 888 * @param[in] attr 889 * Attributes of flow that includes this action. 890 * @param[in] item_flags 891 * Items that were detected. 892 * @param[out] error 893 * Pointer to error structure. 894 * 895 * @return 896 * 0 on success, a negative errno value otherwise and rte_errno is set. 897 */ 898 int 899 mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 900 uint64_t action_flags, 901 struct rte_eth_dev *dev, 902 const struct rte_flow_attr *attr, 903 uint64_t item_flags, 904 struct rte_flow_error *error) 905 { 906 struct mlx5_priv *priv = dev->data->dev_private; 907 const struct rte_flow_action_rss *rss = action->conf; 908 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 909 unsigned int i; 910 911 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 912 return rte_flow_error_set(error, EINVAL, 913 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 914 "can't have 2 fate actions" 915 " in same flow"); 916 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && 917 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) 918 return rte_flow_error_set(error, ENOTSUP, 919 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 920 &rss->func, 921 "RSS hash function not supported"); 922 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 923 if (rss->level > 2) 924 #else 925 if (rss->level > 1) 926 #endif 927 return rte_flow_error_set(error, ENOTSUP, 928 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 929 &rss->level, 930 "tunnel RSS is not supported"); 931 /* allow RSS key_len 0 in case of NULL (default) RSS key. */ 932 if (rss->key_len == 0 && rss->key != NULL) 933 return rte_flow_error_set(error, ENOTSUP, 934 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 935 &rss->key_len, 936 "RSS hash key length 0"); 937 if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) 938 return rte_flow_error_set(error, ENOTSUP, 939 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 940 &rss->key_len, 941 "RSS hash key too small"); 942 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) 943 return rte_flow_error_set(error, ENOTSUP, 944 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 945 &rss->key_len, 946 "RSS hash key too large"); 947 if (rss->queue_num > priv->config.ind_table_max_size) 948 return rte_flow_error_set(error, ENOTSUP, 949 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 950 &rss->queue_num, 951 "number of queues too large"); 952 if (rss->types & MLX5_RSS_HF_MASK) 953 return rte_flow_error_set(error, ENOTSUP, 954 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 955 &rss->types, 956 "some RSS protocols are not" 957 " supported"); 958 if (!priv->rxqs_n) 959 return rte_flow_error_set(error, EINVAL, 960 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 961 NULL, "No Rx queues configured"); 962 if (!rss->queue_num) 963 return rte_flow_error_set(error, EINVAL, 964 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 965 NULL, "No queues configured"); 966 for (i = 0; i != rss->queue_num; ++i) { 967 if (!(*priv->rxqs)[rss->queue[i]]) 968 return rte_flow_error_set 969 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, 970 &rss->queue[i], "queue is not configured"); 971 } 972 if (attr->egress) 973 return rte_flow_error_set(error, ENOTSUP, 974 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 975 "rss action not supported for " 976 "egress"); 977 if (rss->level > 1 && !tunnel) 978 return rte_flow_error_set(error, EINVAL, 979 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 980 "inner RSS is not supported for " 981 "non-tunnel flows"); 982 return 0; 983 } 984 985 /* 986 * Validate the count action. 987 * 988 * @param[in] dev 989 * Pointer to the Ethernet device structure. 990 * @param[in] attr 991 * Attributes of flow that includes this action. 992 * @param[out] error 993 * Pointer to error structure. 994 * 995 * @return 996 * 0 on success, a negative errno value otherwise and rte_errno is set. 997 */ 998 int 999 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, 1000 const struct rte_flow_attr *attr, 1001 struct rte_flow_error *error) 1002 { 1003 if (attr->egress) 1004 return rte_flow_error_set(error, ENOTSUP, 1005 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1006 "count action not supported for " 1007 "egress"); 1008 return 0; 1009 } 1010 1011 /** 1012 * Verify the @p attributes will be correctly understood by the NIC and store 1013 * them in the @p flow if everything is correct. 1014 * 1015 * @param[in] dev 1016 * Pointer to the Ethernet device structure. 1017 * @param[in] attributes 1018 * Pointer to flow attributes 1019 * @param[out] error 1020 * Pointer to error structure. 1021 * 1022 * @return 1023 * 0 on success, a negative errno value otherwise and rte_errno is set. 1024 */ 1025 int 1026 mlx5_flow_validate_attributes(struct rte_eth_dev *dev, 1027 const struct rte_flow_attr *attributes, 1028 struct rte_flow_error *error) 1029 { 1030 struct mlx5_priv *priv = dev->data->dev_private; 1031 uint32_t priority_max = priv->config.flow_prio - 1; 1032 1033 if (attributes->group) 1034 return rte_flow_error_set(error, ENOTSUP, 1035 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 1036 NULL, "groups is not supported"); 1037 if (attributes->priority != MLX5_FLOW_PRIO_RSVD && 1038 attributes->priority >= priority_max) 1039 return rte_flow_error_set(error, ENOTSUP, 1040 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1041 NULL, "priority out of range"); 1042 if (attributes->egress) 1043 return rte_flow_error_set(error, ENOTSUP, 1044 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1045 "egress is not supported"); 1046 if (attributes->transfer && !priv->config.dv_esw_en) 1047 return rte_flow_error_set(error, ENOTSUP, 1048 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 1049 NULL, "transfer is not supported"); 1050 if (!attributes->ingress) 1051 return rte_flow_error_set(error, EINVAL, 1052 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 1053 NULL, 1054 "ingress attribute is mandatory"); 1055 return 0; 1056 } 1057 1058 /** 1059 * Validate ICMP6 item. 1060 * 1061 * @param[in] item 1062 * Item specification. 1063 * @param[in] item_flags 1064 * Bit-fields that holds the items detected until now. 1065 * @param[out] error 1066 * Pointer to error structure. 1067 * 1068 * @return 1069 * 0 on success, a negative errno value otherwise and rte_errno is set. 1070 */ 1071 int 1072 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, 1073 uint64_t item_flags, 1074 uint8_t target_protocol, 1075 struct rte_flow_error *error) 1076 { 1077 const struct rte_flow_item_icmp6 *mask = item->mask; 1078 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1079 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 1080 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 1081 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1082 MLX5_FLOW_LAYER_OUTER_L4; 1083 int ret; 1084 1085 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) 1086 return rte_flow_error_set(error, EINVAL, 1087 RTE_FLOW_ERROR_TYPE_ITEM, item, 1088 "protocol filtering not compatible" 1089 " with ICMP6 layer"); 1090 if (!(item_flags & l3m)) 1091 return rte_flow_error_set(error, EINVAL, 1092 RTE_FLOW_ERROR_TYPE_ITEM, item, 1093 "IPv6 is mandatory to filter on" 1094 " ICMP6"); 1095 if (item_flags & l4m) 1096 return rte_flow_error_set(error, EINVAL, 1097 RTE_FLOW_ERROR_TYPE_ITEM, item, 1098 "multiple L4 layers not supported"); 1099 if (!mask) 1100 mask = &rte_flow_item_icmp6_mask; 1101 ret = mlx5_flow_item_acceptable 1102 (item, (const uint8_t *)mask, 1103 (const uint8_t *)&rte_flow_item_icmp6_mask, 1104 sizeof(struct rte_flow_item_icmp6), error); 1105 if (ret < 0) 1106 return ret; 1107 return 0; 1108 } 1109 1110 /** 1111 * Validate ICMP item. 1112 * 1113 * @param[in] item 1114 * Item specification. 1115 * @param[in] item_flags 1116 * Bit-fields that holds the items detected until now. 1117 * @param[out] error 1118 * Pointer to error structure. 1119 * 1120 * @return 1121 * 0 on success, a negative errno value otherwise and rte_errno is set. 1122 */ 1123 int 1124 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, 1125 uint64_t item_flags, 1126 uint8_t target_protocol, 1127 struct rte_flow_error *error) 1128 { 1129 const struct rte_flow_item_icmp *mask = item->mask; 1130 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1131 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 1132 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 1133 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1134 MLX5_FLOW_LAYER_OUTER_L4; 1135 int ret; 1136 1137 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) 1138 return rte_flow_error_set(error, EINVAL, 1139 RTE_FLOW_ERROR_TYPE_ITEM, item, 1140 "protocol filtering not compatible" 1141 " with ICMP layer"); 1142 if (!(item_flags & l3m)) 1143 return rte_flow_error_set(error, EINVAL, 1144 RTE_FLOW_ERROR_TYPE_ITEM, item, 1145 "IPv4 is mandatory to filter" 1146 " on ICMP"); 1147 if (item_flags & l4m) 1148 return rte_flow_error_set(error, EINVAL, 1149 RTE_FLOW_ERROR_TYPE_ITEM, item, 1150 "multiple L4 layers not supported"); 1151 if (!mask) 1152 mask = &rte_flow_item_icmp_mask; 1153 ret = mlx5_flow_item_acceptable 1154 (item, (const uint8_t *)mask, 1155 (const uint8_t *)&rte_flow_item_icmp_mask, 1156 sizeof(struct rte_flow_item_icmp), error); 1157 if (ret < 0) 1158 return ret; 1159 return 0; 1160 } 1161 1162 /** 1163 * Validate Ethernet item. 1164 * 1165 * @param[in] item 1166 * Item specification. 1167 * @param[in] item_flags 1168 * Bit-fields that holds the items detected until now. 1169 * @param[out] error 1170 * Pointer to error structure. 1171 * 1172 * @return 1173 * 0 on success, a negative errno value otherwise and rte_errno is set. 1174 */ 1175 int 1176 mlx5_flow_validate_item_eth(const struct rte_flow_item *item, 1177 uint64_t item_flags, 1178 struct rte_flow_error *error) 1179 { 1180 const struct rte_flow_item_eth *mask = item->mask; 1181 const struct rte_flow_item_eth nic_mask = { 1182 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1183 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1184 .type = RTE_BE16(0xffff), 1185 }; 1186 int ret; 1187 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1188 const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 1189 MLX5_FLOW_LAYER_OUTER_L2; 1190 1191 if (item_flags & ethm) 1192 return rte_flow_error_set(error, ENOTSUP, 1193 RTE_FLOW_ERROR_TYPE_ITEM, item, 1194 "multiple L2 layers not supported"); 1195 if (!mask) 1196 mask = &rte_flow_item_eth_mask; 1197 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1198 (const uint8_t *)&nic_mask, 1199 sizeof(struct rte_flow_item_eth), 1200 error); 1201 return ret; 1202 } 1203 1204 /** 1205 * Validate VLAN item. 1206 * 1207 * @param[in] item 1208 * Item specification. 1209 * @param[in] item_flags 1210 * Bit-fields that holds the items detected until now. 1211 * @param[in] dev 1212 * Ethernet device flow is being created on. 1213 * @param[out] error 1214 * Pointer to error structure. 1215 * 1216 * @return 1217 * 0 on success, a negative errno value otherwise and rte_errno is set. 1218 */ 1219 int 1220 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 1221 uint64_t item_flags, 1222 struct rte_eth_dev *dev, 1223 struct rte_flow_error *error) 1224 { 1225 const struct rte_flow_item_vlan *spec = item->spec; 1226 const struct rte_flow_item_vlan *mask = item->mask; 1227 const struct rte_flow_item_vlan nic_mask = { 1228 .tci = RTE_BE16(UINT16_MAX), 1229 .inner_type = RTE_BE16(UINT16_MAX), 1230 }; 1231 uint16_t vlan_tag = 0; 1232 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1233 int ret; 1234 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 1235 MLX5_FLOW_LAYER_INNER_L4) : 1236 (MLX5_FLOW_LAYER_OUTER_L3 | 1237 MLX5_FLOW_LAYER_OUTER_L4); 1238 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 1239 MLX5_FLOW_LAYER_OUTER_VLAN; 1240 1241 if (item_flags & vlanm) 1242 return rte_flow_error_set(error, EINVAL, 1243 RTE_FLOW_ERROR_TYPE_ITEM, item, 1244 "multiple VLAN layers not supported"); 1245 else if ((item_flags & l34m) != 0) 1246 return rte_flow_error_set(error, EINVAL, 1247 RTE_FLOW_ERROR_TYPE_ITEM, item, 1248 "L2 layer cannot follow L3/L4 layer"); 1249 if (!mask) 1250 mask = &rte_flow_item_vlan_mask; 1251 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1252 (const uint8_t *)&nic_mask, 1253 sizeof(struct rte_flow_item_vlan), 1254 error); 1255 if (ret) 1256 return ret; 1257 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { 1258 struct mlx5_priv *priv = dev->data->dev_private; 1259 1260 if (priv->vmwa_context) { 1261 /* 1262 * Non-NULL context means we have a virtual machine 1263 * and SR-IOV enabled, we have to create VLAN interface 1264 * to make hypervisor to setup E-Switch vport 1265 * context correctly. We avoid creating the multiple 1266 * VLAN interfaces, so we cannot support VLAN tag mask. 1267 */ 1268 return rte_flow_error_set(error, EINVAL, 1269 RTE_FLOW_ERROR_TYPE_ITEM, 1270 item, 1271 "VLAN tag mask is not" 1272 " supported in virtual" 1273 " environment"); 1274 } 1275 } 1276 if (spec) { 1277 vlan_tag = spec->tci; 1278 vlan_tag &= mask->tci; 1279 } 1280 /* 1281 * From verbs perspective an empty VLAN is equivalent 1282 * to a packet without VLAN layer. 1283 */ 1284 if (!vlan_tag) 1285 return rte_flow_error_set(error, EINVAL, 1286 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1287 item->spec, 1288 "VLAN cannot be empty"); 1289 return 0; 1290 } 1291 1292 /** 1293 * Validate IPV4 item. 1294 * 1295 * @param[in] item 1296 * Item specification. 1297 * @param[in] item_flags 1298 * Bit-fields that holds the items detected until now. 1299 * @param[in] acc_mask 1300 * Acceptable mask, if NULL default internal default mask 1301 * will be used to check whether item fields are supported. 1302 * @param[out] error 1303 * Pointer to error structure. 1304 * 1305 * @return 1306 * 0 on success, a negative errno value otherwise and rte_errno is set. 1307 */ 1308 int 1309 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, 1310 uint64_t item_flags, 1311 const struct rte_flow_item_ipv4 *acc_mask, 1312 struct rte_flow_error *error) 1313 { 1314 const struct rte_flow_item_ipv4 *mask = item->mask; 1315 const struct rte_flow_item_ipv4 *spec = item->spec; 1316 const struct rte_flow_item_ipv4 nic_mask = { 1317 .hdr = { 1318 .src_addr = RTE_BE32(0xffffffff), 1319 .dst_addr = RTE_BE32(0xffffffff), 1320 .type_of_service = 0xff, 1321 .next_proto_id = 0xff, 1322 }, 1323 }; 1324 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1325 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1326 MLX5_FLOW_LAYER_OUTER_L3; 1327 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1328 MLX5_FLOW_LAYER_OUTER_L4; 1329 int ret; 1330 uint8_t next_proto = 0xFF; 1331 1332 if (item_flags & MLX5_FLOW_LAYER_IPIP) { 1333 if (mask && spec) 1334 next_proto = mask->hdr.next_proto_id & 1335 spec->hdr.next_proto_id; 1336 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1337 return rte_flow_error_set(error, EINVAL, 1338 RTE_FLOW_ERROR_TYPE_ITEM, 1339 item, 1340 "multiple tunnel " 1341 "not supported"); 1342 } 1343 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) 1344 return rte_flow_error_set(error, EINVAL, 1345 RTE_FLOW_ERROR_TYPE_ITEM, item, 1346 "wrong tunnel type - IPv6 specified " 1347 "but IPv4 item provided"); 1348 if (item_flags & l3m) 1349 return rte_flow_error_set(error, ENOTSUP, 1350 RTE_FLOW_ERROR_TYPE_ITEM, item, 1351 "multiple L3 layers not supported"); 1352 else if (item_flags & l4m) 1353 return rte_flow_error_set(error, EINVAL, 1354 RTE_FLOW_ERROR_TYPE_ITEM, item, 1355 "L3 cannot follow an L4 layer."); 1356 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1357 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1358 return rte_flow_error_set(error, EINVAL, 1359 RTE_FLOW_ERROR_TYPE_ITEM, item, 1360 "L3 cannot follow an NVGRE layer."); 1361 if (!mask) 1362 mask = &rte_flow_item_ipv4_mask; 1363 else if (mask->hdr.next_proto_id != 0 && 1364 mask->hdr.next_proto_id != 0xff) 1365 return rte_flow_error_set(error, EINVAL, 1366 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 1367 "partial mask is not supported" 1368 " for protocol"); 1369 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1370 acc_mask ? (const uint8_t *)acc_mask 1371 : (const uint8_t *)&nic_mask, 1372 sizeof(struct rte_flow_item_ipv4), 1373 error); 1374 if (ret < 0) 1375 return ret; 1376 return 0; 1377 } 1378 1379 /** 1380 * Validate IPV6 item. 1381 * 1382 * @param[in] item 1383 * Item specification. 1384 * @param[in] item_flags 1385 * Bit-fields that holds the items detected until now. 1386 * @param[in] acc_mask 1387 * Acceptable mask, if NULL default internal default mask 1388 * will be used to check whether item fields are supported. 1389 * @param[out] error 1390 * Pointer to error structure. 1391 * 1392 * @return 1393 * 0 on success, a negative errno value otherwise and rte_errno is set. 1394 */ 1395 int 1396 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, 1397 uint64_t item_flags, 1398 const struct rte_flow_item_ipv6 *acc_mask, 1399 struct rte_flow_error *error) 1400 { 1401 const struct rte_flow_item_ipv6 *mask = item->mask; 1402 const struct rte_flow_item_ipv6 *spec = item->spec; 1403 const struct rte_flow_item_ipv6 nic_mask = { 1404 .hdr = { 1405 .src_addr = 1406 "\xff\xff\xff\xff\xff\xff\xff\xff" 1407 "\xff\xff\xff\xff\xff\xff\xff\xff", 1408 .dst_addr = 1409 "\xff\xff\xff\xff\xff\xff\xff\xff" 1410 "\xff\xff\xff\xff\xff\xff\xff\xff", 1411 .vtc_flow = RTE_BE32(0xffffffff), 1412 .proto = 0xff, 1413 .hop_limits = 0xff, 1414 }, 1415 }; 1416 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1417 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1418 MLX5_FLOW_LAYER_OUTER_L3; 1419 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1420 MLX5_FLOW_LAYER_OUTER_L4; 1421 int ret; 1422 uint8_t next_proto = 0xFF; 1423 1424 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { 1425 if (mask && spec) 1426 next_proto = mask->hdr.proto & spec->hdr.proto; 1427 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1428 return rte_flow_error_set(error, EINVAL, 1429 RTE_FLOW_ERROR_TYPE_ITEM, 1430 item, 1431 "multiple tunnel " 1432 "not supported"); 1433 } 1434 if (item_flags & MLX5_FLOW_LAYER_IPIP) 1435 return rte_flow_error_set(error, EINVAL, 1436 RTE_FLOW_ERROR_TYPE_ITEM, item, 1437 "wrong tunnel type - IPv4 specified " 1438 "but IPv6 item provided"); 1439 if (item_flags & l3m) 1440 return rte_flow_error_set(error, ENOTSUP, 1441 RTE_FLOW_ERROR_TYPE_ITEM, item, 1442 "multiple L3 layers not supported"); 1443 else if (item_flags & l4m) 1444 return rte_flow_error_set(error, EINVAL, 1445 RTE_FLOW_ERROR_TYPE_ITEM, item, 1446 "L3 cannot follow an L4 layer."); 1447 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && 1448 !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) 1449 return rte_flow_error_set(error, EINVAL, 1450 RTE_FLOW_ERROR_TYPE_ITEM, item, 1451 "L3 cannot follow an NVGRE layer."); 1452 if (!mask) 1453 mask = &rte_flow_item_ipv6_mask; 1454 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1455 acc_mask ? (const uint8_t *)acc_mask 1456 : (const uint8_t *)&nic_mask, 1457 sizeof(struct rte_flow_item_ipv6), 1458 error); 1459 if (ret < 0) 1460 return ret; 1461 return 0; 1462 } 1463 1464 /** 1465 * Validate UDP item. 1466 * 1467 * @param[in] item 1468 * Item specification. 1469 * @param[in] item_flags 1470 * Bit-fields that holds the items detected until now. 1471 * @param[in] target_protocol 1472 * The next protocol in the previous item. 1473 * @param[in] flow_mask 1474 * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask. 1475 * @param[out] error 1476 * Pointer to error structure. 1477 * 1478 * @return 1479 * 0 on success, a negative errno value otherwise and rte_errno is set. 1480 */ 1481 int 1482 mlx5_flow_validate_item_udp(const struct rte_flow_item *item, 1483 uint64_t item_flags, 1484 uint8_t target_protocol, 1485 struct rte_flow_error *error) 1486 { 1487 const struct rte_flow_item_udp *mask = item->mask; 1488 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1489 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1490 MLX5_FLOW_LAYER_OUTER_L3; 1491 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1492 MLX5_FLOW_LAYER_OUTER_L4; 1493 int ret; 1494 1495 if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) 1496 return rte_flow_error_set(error, EINVAL, 1497 RTE_FLOW_ERROR_TYPE_ITEM, item, 1498 "protocol filtering not compatible" 1499 " with UDP layer"); 1500 if (!(item_flags & l3m)) 1501 return rte_flow_error_set(error, EINVAL, 1502 RTE_FLOW_ERROR_TYPE_ITEM, item, 1503 "L3 is mandatory to filter on L4"); 1504 if (item_flags & l4m) 1505 return rte_flow_error_set(error, EINVAL, 1506 RTE_FLOW_ERROR_TYPE_ITEM, item, 1507 "multiple L4 layers not supported"); 1508 if (!mask) 1509 mask = &rte_flow_item_udp_mask; 1510 ret = mlx5_flow_item_acceptable 1511 (item, (const uint8_t *)mask, 1512 (const uint8_t *)&rte_flow_item_udp_mask, 1513 sizeof(struct rte_flow_item_udp), error); 1514 if (ret < 0) 1515 return ret; 1516 return 0; 1517 } 1518 1519 /** 1520 * Validate TCP item. 1521 * 1522 * @param[in] item 1523 * Item specification. 1524 * @param[in] item_flags 1525 * Bit-fields that holds the items detected until now. 1526 * @param[in] target_protocol 1527 * The next protocol in the previous item. 1528 * @param[out] error 1529 * Pointer to error structure. 1530 * 1531 * @return 1532 * 0 on success, a negative errno value otherwise and rte_errno is set. 1533 */ 1534 int 1535 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, 1536 uint64_t item_flags, 1537 uint8_t target_protocol, 1538 const struct rte_flow_item_tcp *flow_mask, 1539 struct rte_flow_error *error) 1540 { 1541 const struct rte_flow_item_tcp *mask = item->mask; 1542 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1543 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1544 MLX5_FLOW_LAYER_OUTER_L3; 1545 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1546 MLX5_FLOW_LAYER_OUTER_L4; 1547 int ret; 1548 1549 assert(flow_mask); 1550 if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) 1551 return rte_flow_error_set(error, EINVAL, 1552 RTE_FLOW_ERROR_TYPE_ITEM, item, 1553 "protocol filtering not compatible" 1554 " with TCP layer"); 1555 if (!(item_flags & l3m)) 1556 return rte_flow_error_set(error, EINVAL, 1557 RTE_FLOW_ERROR_TYPE_ITEM, item, 1558 "L3 is mandatory to filter on L4"); 1559 if (item_flags & l4m) 1560 return rte_flow_error_set(error, EINVAL, 1561 RTE_FLOW_ERROR_TYPE_ITEM, item, 1562 "multiple L4 layers not supported"); 1563 if (!mask) 1564 mask = &rte_flow_item_tcp_mask; 1565 ret = mlx5_flow_item_acceptable 1566 (item, (const uint8_t *)mask, 1567 (const uint8_t *)flow_mask, 1568 sizeof(struct rte_flow_item_tcp), error); 1569 if (ret < 0) 1570 return ret; 1571 return 0; 1572 } 1573 1574 /** 1575 * Validate VXLAN item. 1576 * 1577 * @param[in] item 1578 * Item specification. 1579 * @param[in] item_flags 1580 * Bit-fields that holds the items detected until now. 1581 * @param[in] target_protocol 1582 * The next protocol in the previous item. 1583 * @param[out] error 1584 * Pointer to error structure. 1585 * 1586 * @return 1587 * 0 on success, a negative errno value otherwise and rte_errno is set. 1588 */ 1589 int 1590 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, 1591 uint64_t item_flags, 1592 struct rte_flow_error *error) 1593 { 1594 const struct rte_flow_item_vxlan *spec = item->spec; 1595 const struct rte_flow_item_vxlan *mask = item->mask; 1596 int ret; 1597 union vni { 1598 uint32_t vlan_id; 1599 uint8_t vni[4]; 1600 } id = { .vlan_id = 0, }; 1601 uint32_t vlan_id = 0; 1602 1603 1604 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1605 return rte_flow_error_set(error, ENOTSUP, 1606 RTE_FLOW_ERROR_TYPE_ITEM, item, 1607 "multiple tunnel layers not" 1608 " supported"); 1609 /* 1610 * Verify only UDPv4 is present as defined in 1611 * https://tools.ietf.org/html/rfc7348 1612 */ 1613 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1614 return rte_flow_error_set(error, EINVAL, 1615 RTE_FLOW_ERROR_TYPE_ITEM, item, 1616 "no outer UDP layer found"); 1617 if (!mask) 1618 mask = &rte_flow_item_vxlan_mask; 1619 ret = mlx5_flow_item_acceptable 1620 (item, (const uint8_t *)mask, 1621 (const uint8_t *)&rte_flow_item_vxlan_mask, 1622 sizeof(struct rte_flow_item_vxlan), 1623 error); 1624 if (ret < 0) 1625 return ret; 1626 if (spec) { 1627 memcpy(&id.vni[1], spec->vni, 3); 1628 vlan_id = id.vlan_id; 1629 memcpy(&id.vni[1], mask->vni, 3); 1630 vlan_id &= id.vlan_id; 1631 } 1632 /* 1633 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if 1634 * only this layer is defined in the Verbs specification it is 1635 * interpreted as wildcard and all packets will match this 1636 * rule, if it follows a full stack layer (ex: eth / ipv4 / 1637 * udp), all packets matching the layers before will also 1638 * match this rule. To avoid such situation, VNI 0 is 1639 * currently refused. 1640 */ 1641 if (!vlan_id) 1642 return rte_flow_error_set(error, ENOTSUP, 1643 RTE_FLOW_ERROR_TYPE_ITEM, item, 1644 "VXLAN vni cannot be 0"); 1645 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1646 return rte_flow_error_set(error, ENOTSUP, 1647 RTE_FLOW_ERROR_TYPE_ITEM, item, 1648 "VXLAN tunnel must be fully defined"); 1649 return 0; 1650 } 1651 1652 /** 1653 * Validate VXLAN_GPE item. 1654 * 1655 * @param[in] item 1656 * Item specification. 1657 * @param[in] item_flags 1658 * Bit-fields that holds the items detected until now. 1659 * @param[in] priv 1660 * Pointer to the private data structure. 1661 * @param[in] target_protocol 1662 * The next protocol in the previous item. 1663 * @param[out] error 1664 * Pointer to error structure. 1665 * 1666 * @return 1667 * 0 on success, a negative errno value otherwise and rte_errno is set. 1668 */ 1669 int 1670 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 1671 uint64_t item_flags, 1672 struct rte_eth_dev *dev, 1673 struct rte_flow_error *error) 1674 { 1675 struct mlx5_priv *priv = dev->data->dev_private; 1676 const struct rte_flow_item_vxlan_gpe *spec = item->spec; 1677 const struct rte_flow_item_vxlan_gpe *mask = item->mask; 1678 int ret; 1679 union vni { 1680 uint32_t vlan_id; 1681 uint8_t vni[4]; 1682 } id = { .vlan_id = 0, }; 1683 uint32_t vlan_id = 0; 1684 1685 if (!priv->config.l3_vxlan_en) 1686 return rte_flow_error_set(error, ENOTSUP, 1687 RTE_FLOW_ERROR_TYPE_ITEM, item, 1688 "L3 VXLAN is not enabled by device" 1689 " parameter and/or not configured in" 1690 " firmware"); 1691 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1692 return rte_flow_error_set(error, ENOTSUP, 1693 RTE_FLOW_ERROR_TYPE_ITEM, item, 1694 "multiple tunnel layers not" 1695 " supported"); 1696 /* 1697 * Verify only UDPv4 is present as defined in 1698 * https://tools.ietf.org/html/rfc7348 1699 */ 1700 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1701 return rte_flow_error_set(error, EINVAL, 1702 RTE_FLOW_ERROR_TYPE_ITEM, item, 1703 "no outer UDP layer found"); 1704 if (!mask) 1705 mask = &rte_flow_item_vxlan_gpe_mask; 1706 ret = mlx5_flow_item_acceptable 1707 (item, (const uint8_t *)mask, 1708 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, 1709 sizeof(struct rte_flow_item_vxlan_gpe), 1710 error); 1711 if (ret < 0) 1712 return ret; 1713 if (spec) { 1714 if (spec->protocol) 1715 return rte_flow_error_set(error, ENOTSUP, 1716 RTE_FLOW_ERROR_TYPE_ITEM, 1717 item, 1718 "VxLAN-GPE protocol" 1719 " not supported"); 1720 memcpy(&id.vni[1], spec->vni, 3); 1721 vlan_id = id.vlan_id; 1722 memcpy(&id.vni[1], mask->vni, 3); 1723 vlan_id &= id.vlan_id; 1724 } 1725 /* 1726 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this 1727 * layer is defined in the Verbs specification it is interpreted as 1728 * wildcard and all packets will match this rule, if it follows a full 1729 * stack layer (ex: eth / ipv4 / udp), all packets matching the layers 1730 * before will also match this rule. To avoid such situation, VNI 0 1731 * is currently refused. 1732 */ 1733 if (!vlan_id) 1734 return rte_flow_error_set(error, ENOTSUP, 1735 RTE_FLOW_ERROR_TYPE_ITEM, item, 1736 "VXLAN-GPE vni cannot be 0"); 1737 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1738 return rte_flow_error_set(error, ENOTSUP, 1739 RTE_FLOW_ERROR_TYPE_ITEM, item, 1740 "VXLAN-GPE tunnel must be fully" 1741 " defined"); 1742 return 0; 1743 } 1744 /** 1745 * Validate GRE Key item. 1746 * 1747 * @param[in] item 1748 * Item specification. 1749 * @param[in] item_flags 1750 * Bit flags to mark detected items. 1751 * @param[in] gre_item 1752 * Pointer to gre_item 1753 * @param[out] error 1754 * Pointer to error structure. 1755 * 1756 * @return 1757 * 0 on success, a negative errno value otherwise and rte_errno is set. 1758 */ 1759 int 1760 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, 1761 uint64_t item_flags, 1762 const struct rte_flow_item *gre_item, 1763 struct rte_flow_error *error) 1764 { 1765 const rte_be32_t *mask = item->mask; 1766 int ret = 0; 1767 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); 1768 const struct rte_flow_item_gre *gre_spec = gre_item->spec; 1769 const struct rte_flow_item_gre *gre_mask = gre_item->mask; 1770 1771 if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) 1772 return rte_flow_error_set(error, ENOTSUP, 1773 RTE_FLOW_ERROR_TYPE_ITEM, item, 1774 "Multiple GRE key not support"); 1775 if (!(item_flags & MLX5_FLOW_LAYER_GRE)) 1776 return rte_flow_error_set(error, ENOTSUP, 1777 RTE_FLOW_ERROR_TYPE_ITEM, item, 1778 "No preceding GRE header"); 1779 if (item_flags & MLX5_FLOW_LAYER_INNER) 1780 return rte_flow_error_set(error, ENOTSUP, 1781 RTE_FLOW_ERROR_TYPE_ITEM, item, 1782 "GRE key following a wrong item"); 1783 if (!gre_mask) 1784 gre_mask = &rte_flow_item_gre_mask; 1785 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && 1786 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) 1787 return rte_flow_error_set(error, EINVAL, 1788 RTE_FLOW_ERROR_TYPE_ITEM, item, 1789 "Key bit must be on"); 1790 1791 if (!mask) 1792 mask = &gre_key_default_mask; 1793 ret = mlx5_flow_item_acceptable 1794 (item, (const uint8_t *)mask, 1795 (const uint8_t *)&gre_key_default_mask, 1796 sizeof(rte_be32_t), error); 1797 return ret; 1798 } 1799 1800 /** 1801 * Validate GRE item. 1802 * 1803 * @param[in] item 1804 * Item specification. 1805 * @param[in] item_flags 1806 * Bit flags to mark detected items. 1807 * @param[in] target_protocol 1808 * The next protocol in the previous item. 1809 * @param[out] error 1810 * Pointer to error structure. 1811 * 1812 * @return 1813 * 0 on success, a negative errno value otherwise and rte_errno is set. 1814 */ 1815 int 1816 mlx5_flow_validate_item_gre(const struct rte_flow_item *item, 1817 uint64_t item_flags, 1818 uint8_t target_protocol, 1819 struct rte_flow_error *error) 1820 { 1821 const struct rte_flow_item_gre *spec __rte_unused = item->spec; 1822 const struct rte_flow_item_gre *mask = item->mask; 1823 int ret; 1824 const struct rte_flow_item_gre nic_mask = { 1825 .c_rsvd0_ver = RTE_BE16(0xB000), 1826 .protocol = RTE_BE16(UINT16_MAX), 1827 }; 1828 1829 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 1830 return rte_flow_error_set(error, EINVAL, 1831 RTE_FLOW_ERROR_TYPE_ITEM, item, 1832 "protocol filtering not compatible" 1833 " with this GRE layer"); 1834 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1835 return rte_flow_error_set(error, ENOTSUP, 1836 RTE_FLOW_ERROR_TYPE_ITEM, item, 1837 "multiple tunnel layers not" 1838 " supported"); 1839 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 1840 return rte_flow_error_set(error, ENOTSUP, 1841 RTE_FLOW_ERROR_TYPE_ITEM, item, 1842 "L3 Layer is missing"); 1843 if (!mask) 1844 mask = &rte_flow_item_gre_mask; 1845 ret = mlx5_flow_item_acceptable 1846 (item, (const uint8_t *)mask, 1847 (const uint8_t *)&nic_mask, 1848 sizeof(struct rte_flow_item_gre), error); 1849 if (ret < 0) 1850 return ret; 1851 #ifndef HAVE_MLX5DV_DR 1852 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT 1853 if (spec && (spec->protocol & mask->protocol)) 1854 return rte_flow_error_set(error, ENOTSUP, 1855 RTE_FLOW_ERROR_TYPE_ITEM, item, 1856 "without MPLS support the" 1857 " specification cannot be used for" 1858 " filtering"); 1859 #endif 1860 #endif 1861 return 0; 1862 } 1863 1864 /** 1865 * Validate MPLS item. 1866 * 1867 * @param[in] dev 1868 * Pointer to the rte_eth_dev structure. 1869 * @param[in] item 1870 * Item specification. 1871 * @param[in] item_flags 1872 * Bit-fields that holds the items detected until now. 1873 * @param[in] prev_layer 1874 * The protocol layer indicated in previous item. 1875 * @param[out] error 1876 * Pointer to error structure. 1877 * 1878 * @return 1879 * 0 on success, a negative errno value otherwise and rte_errno is set. 1880 */ 1881 int 1882 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, 1883 const struct rte_flow_item *item __rte_unused, 1884 uint64_t item_flags __rte_unused, 1885 uint64_t prev_layer __rte_unused, 1886 struct rte_flow_error *error) 1887 { 1888 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 1889 const struct rte_flow_item_mpls *mask = item->mask; 1890 struct mlx5_priv *priv = dev->data->dev_private; 1891 int ret; 1892 1893 if (!priv->config.mpls_en) 1894 return rte_flow_error_set(error, ENOTSUP, 1895 RTE_FLOW_ERROR_TYPE_ITEM, item, 1896 "MPLS not supported or" 1897 " disabled in firmware" 1898 " configuration."); 1899 /* MPLS over IP, UDP, GRE is allowed */ 1900 if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | 1901 MLX5_FLOW_LAYER_OUTER_L4_UDP | 1902 MLX5_FLOW_LAYER_GRE))) 1903 return rte_flow_error_set(error, EINVAL, 1904 RTE_FLOW_ERROR_TYPE_ITEM, item, 1905 "protocol filtering not compatible" 1906 " with MPLS layer"); 1907 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ 1908 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && 1909 !(item_flags & MLX5_FLOW_LAYER_GRE)) 1910 return rte_flow_error_set(error, ENOTSUP, 1911 RTE_FLOW_ERROR_TYPE_ITEM, item, 1912 "multiple tunnel layers not" 1913 " supported"); 1914 if (!mask) 1915 mask = &rte_flow_item_mpls_mask; 1916 ret = mlx5_flow_item_acceptable 1917 (item, (const uint8_t *)mask, 1918 (const uint8_t *)&rte_flow_item_mpls_mask, 1919 sizeof(struct rte_flow_item_mpls), error); 1920 if (ret < 0) 1921 return ret; 1922 return 0; 1923 #endif 1924 return rte_flow_error_set(error, ENOTSUP, 1925 RTE_FLOW_ERROR_TYPE_ITEM, item, 1926 "MPLS is not supported by Verbs, please" 1927 " update."); 1928 } 1929 1930 /** 1931 * Validate NVGRE item. 1932 * 1933 * @param[in] item 1934 * Item specification. 1935 * @param[in] item_flags 1936 * Bit flags to mark detected items. 1937 * @param[in] target_protocol 1938 * The next protocol in the previous item. 1939 * @param[out] error 1940 * Pointer to error structure. 1941 * 1942 * @return 1943 * 0 on success, a negative errno value otherwise and rte_errno is set. 1944 */ 1945 int 1946 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, 1947 uint64_t item_flags, 1948 uint8_t target_protocol, 1949 struct rte_flow_error *error) 1950 { 1951 const struct rte_flow_item_nvgre *mask = item->mask; 1952 int ret; 1953 1954 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 1955 return rte_flow_error_set(error, EINVAL, 1956 RTE_FLOW_ERROR_TYPE_ITEM, item, 1957 "protocol filtering not compatible" 1958 " with this GRE layer"); 1959 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1960 return rte_flow_error_set(error, ENOTSUP, 1961 RTE_FLOW_ERROR_TYPE_ITEM, item, 1962 "multiple tunnel layers not" 1963 " supported"); 1964 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 1965 return rte_flow_error_set(error, ENOTSUP, 1966 RTE_FLOW_ERROR_TYPE_ITEM, item, 1967 "L3 Layer is missing"); 1968 if (!mask) 1969 mask = &rte_flow_item_nvgre_mask; 1970 ret = mlx5_flow_item_acceptable 1971 (item, (const uint8_t *)mask, 1972 (const uint8_t *)&rte_flow_item_nvgre_mask, 1973 sizeof(struct rte_flow_item_nvgre), error); 1974 if (ret < 0) 1975 return ret; 1976 return 0; 1977 } 1978 1979 static int 1980 flow_null_validate(struct rte_eth_dev *dev __rte_unused, 1981 const struct rte_flow_attr *attr __rte_unused, 1982 const struct rte_flow_item items[] __rte_unused, 1983 const struct rte_flow_action actions[] __rte_unused, 1984 struct rte_flow_error *error) 1985 { 1986 return rte_flow_error_set(error, ENOTSUP, 1987 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 1988 } 1989 1990 static struct mlx5_flow * 1991 flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, 1992 const struct rte_flow_item items[] __rte_unused, 1993 const struct rte_flow_action actions[] __rte_unused, 1994 struct rte_flow_error *error) 1995 { 1996 rte_flow_error_set(error, ENOTSUP, 1997 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 1998 return NULL; 1999 } 2000 2001 static int 2002 flow_null_translate(struct rte_eth_dev *dev __rte_unused, 2003 struct mlx5_flow *dev_flow __rte_unused, 2004 const struct rte_flow_attr *attr __rte_unused, 2005 const struct rte_flow_item items[] __rte_unused, 2006 const struct rte_flow_action actions[] __rte_unused, 2007 struct rte_flow_error *error) 2008 { 2009 return rte_flow_error_set(error, ENOTSUP, 2010 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2011 } 2012 2013 static int 2014 flow_null_apply(struct rte_eth_dev *dev __rte_unused, 2015 struct rte_flow *flow __rte_unused, 2016 struct rte_flow_error *error) 2017 { 2018 return rte_flow_error_set(error, ENOTSUP, 2019 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2020 } 2021 2022 static void 2023 flow_null_remove(struct rte_eth_dev *dev __rte_unused, 2024 struct rte_flow *flow __rte_unused) 2025 { 2026 } 2027 2028 static void 2029 flow_null_destroy(struct rte_eth_dev *dev __rte_unused, 2030 struct rte_flow *flow __rte_unused) 2031 { 2032 } 2033 2034 static int 2035 flow_null_query(struct rte_eth_dev *dev __rte_unused, 2036 struct rte_flow *flow __rte_unused, 2037 const struct rte_flow_action *actions __rte_unused, 2038 void *data __rte_unused, 2039 struct rte_flow_error *error) 2040 { 2041 return rte_flow_error_set(error, ENOTSUP, 2042 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 2043 } 2044 2045 /* Void driver to protect from null pointer reference. */ 2046 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { 2047 .validate = flow_null_validate, 2048 .prepare = flow_null_prepare, 2049 .translate = flow_null_translate, 2050 .apply = flow_null_apply, 2051 .remove = flow_null_remove, 2052 .destroy = flow_null_destroy, 2053 .query = flow_null_query, 2054 }; 2055 2056 /** 2057 * Select flow driver type according to flow attributes and device 2058 * configuration. 2059 * 2060 * @param[in] dev 2061 * Pointer to the dev structure. 2062 * @param[in] attr 2063 * Pointer to the flow attributes. 2064 * 2065 * @return 2066 * flow driver type, MLX5_FLOW_TYPE_MAX otherwise. 2067 */ 2068 static enum mlx5_flow_drv_type 2069 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) 2070 { 2071 struct mlx5_priv *priv = dev->data->dev_private; 2072 enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; 2073 2074 if (attr->transfer && priv->config.dv_esw_en) 2075 type = MLX5_FLOW_TYPE_DV; 2076 if (!attr->transfer) 2077 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : 2078 MLX5_FLOW_TYPE_VERBS; 2079 return type; 2080 } 2081 2082 #define flow_get_drv_ops(type) flow_drv_ops[type] 2083 2084 /** 2085 * Flow driver validation API. This abstracts calling driver specific functions. 2086 * The type of flow driver is determined according to flow attributes. 2087 * 2088 * @param[in] dev 2089 * Pointer to the dev structure. 2090 * @param[in] attr 2091 * Pointer to the flow attributes. 2092 * @param[in] items 2093 * Pointer to the list of items. 2094 * @param[in] actions 2095 * Pointer to the list of actions. 2096 * @param[out] error 2097 * Pointer to the error structure. 2098 * 2099 * @return 2100 * 0 on success, a negative errno value otherwise and rte_errno is set. 2101 */ 2102 static inline int 2103 flow_drv_validate(struct rte_eth_dev *dev, 2104 const struct rte_flow_attr *attr, 2105 const struct rte_flow_item items[], 2106 const struct rte_flow_action actions[], 2107 struct rte_flow_error *error) 2108 { 2109 const struct mlx5_flow_driver_ops *fops; 2110 enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); 2111 2112 fops = flow_get_drv_ops(type); 2113 return fops->validate(dev, attr, items, actions, error); 2114 } 2115 2116 /** 2117 * Flow driver preparation API. This abstracts calling driver specific 2118 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2119 * calculates the size of memory required for device flow, allocates the memory, 2120 * initializes the device flow and returns the pointer. 2121 * 2122 * @note 2123 * This function initializes device flow structure such as dv or verbs in 2124 * struct mlx5_flow. However, it is caller's responsibility to initialize the 2125 * rest. For example, adding returning device flow to flow->dev_flow list and 2126 * setting backward reference to the flow should be done out of this function. 2127 * layers field is not filled either. 2128 * 2129 * @param[in] attr 2130 * Pointer to the flow attributes. 2131 * @param[in] items 2132 * Pointer to the list of items. 2133 * @param[in] actions 2134 * Pointer to the list of actions. 2135 * @param[out] error 2136 * Pointer to the error structure. 2137 * 2138 * @return 2139 * Pointer to device flow on success, otherwise NULL and rte_errno is set. 2140 */ 2141 static inline struct mlx5_flow * 2142 flow_drv_prepare(const struct rte_flow *flow, 2143 const struct rte_flow_attr *attr, 2144 const struct rte_flow_item items[], 2145 const struct rte_flow_action actions[], 2146 struct rte_flow_error *error) 2147 { 2148 const struct mlx5_flow_driver_ops *fops; 2149 enum mlx5_flow_drv_type type = flow->drv_type; 2150 2151 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2152 fops = flow_get_drv_ops(type); 2153 return fops->prepare(attr, items, actions, error); 2154 } 2155 2156 /** 2157 * Flow driver translation API. This abstracts calling driver specific 2158 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2159 * translates a generic flow into a driver flow. flow_drv_prepare() must 2160 * precede. 2161 * 2162 * @note 2163 * dev_flow->layers could be filled as a result of parsing during translation 2164 * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled 2165 * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, 2166 * flow->actions could be overwritten even though all the expanded dev_flows 2167 * have the same actions. 2168 * 2169 * @param[in] dev 2170 * Pointer to the rte dev structure. 2171 * @param[in, out] dev_flow 2172 * Pointer to the mlx5 flow. 2173 * @param[in] attr 2174 * Pointer to the flow attributes. 2175 * @param[in] items 2176 * Pointer to the list of items. 2177 * @param[in] actions 2178 * Pointer to the list of actions. 2179 * @param[out] error 2180 * Pointer to the error structure. 2181 * 2182 * @return 2183 * 0 on success, a negative errno value otherwise and rte_errno is set. 2184 */ 2185 static inline int 2186 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, 2187 const struct rte_flow_attr *attr, 2188 const struct rte_flow_item items[], 2189 const struct rte_flow_action actions[], 2190 struct rte_flow_error *error) 2191 { 2192 const struct mlx5_flow_driver_ops *fops; 2193 enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; 2194 2195 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2196 fops = flow_get_drv_ops(type); 2197 return fops->translate(dev, dev_flow, attr, items, actions, error); 2198 } 2199 2200 /** 2201 * Flow driver apply API. This abstracts calling driver specific functions. 2202 * Parent flow (rte_flow) should have driver type (drv_type). It applies 2203 * translated driver flows on to device. flow_drv_translate() must precede. 2204 * 2205 * @param[in] dev 2206 * Pointer to Ethernet device structure. 2207 * @param[in, out] flow 2208 * Pointer to flow structure. 2209 * @param[out] error 2210 * Pointer to error structure. 2211 * 2212 * @return 2213 * 0 on success, a negative errno value otherwise and rte_errno is set. 2214 */ 2215 static inline int 2216 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 2217 struct rte_flow_error *error) 2218 { 2219 const struct mlx5_flow_driver_ops *fops; 2220 enum mlx5_flow_drv_type type = flow->drv_type; 2221 2222 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2223 fops = flow_get_drv_ops(type); 2224 return fops->apply(dev, flow, error); 2225 } 2226 2227 /** 2228 * Flow driver remove API. This abstracts calling driver specific functions. 2229 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2230 * on device. All the resources of the flow should be freed by calling 2231 * flow_drv_destroy(). 2232 * 2233 * @param[in] dev 2234 * Pointer to Ethernet device. 2235 * @param[in, out] flow 2236 * Pointer to flow structure. 2237 */ 2238 static inline void 2239 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 2240 { 2241 const struct mlx5_flow_driver_ops *fops; 2242 enum mlx5_flow_drv_type type = flow->drv_type; 2243 2244 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2245 fops = flow_get_drv_ops(type); 2246 fops->remove(dev, flow); 2247 } 2248 2249 /** 2250 * Flow driver destroy API. This abstracts calling driver specific functions. 2251 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2252 * on device and releases resources of the flow. 2253 * 2254 * @param[in] dev 2255 * Pointer to Ethernet device. 2256 * @param[in, out] flow 2257 * Pointer to flow structure. 2258 */ 2259 static inline void 2260 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 2261 { 2262 const struct mlx5_flow_driver_ops *fops; 2263 enum mlx5_flow_drv_type type = flow->drv_type; 2264 2265 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2266 fops = flow_get_drv_ops(type); 2267 fops->destroy(dev, flow); 2268 } 2269 2270 /** 2271 * Validate a flow supported by the NIC. 2272 * 2273 * @see rte_flow_validate() 2274 * @see rte_flow_ops 2275 */ 2276 int 2277 mlx5_flow_validate(struct rte_eth_dev *dev, 2278 const struct rte_flow_attr *attr, 2279 const struct rte_flow_item items[], 2280 const struct rte_flow_action actions[], 2281 struct rte_flow_error *error) 2282 { 2283 int ret; 2284 2285 ret = flow_drv_validate(dev, attr, items, actions, error); 2286 if (ret < 0) 2287 return ret; 2288 return 0; 2289 } 2290 2291 /** 2292 * Get RSS action from the action list. 2293 * 2294 * @param[in] actions 2295 * Pointer to the list of actions. 2296 * 2297 * @return 2298 * Pointer to the RSS action if exist, else return NULL. 2299 */ 2300 static const struct rte_flow_action_rss* 2301 flow_get_rss_action(const struct rte_flow_action actions[]) 2302 { 2303 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2304 switch (actions->type) { 2305 case RTE_FLOW_ACTION_TYPE_RSS: 2306 return (const struct rte_flow_action_rss *) 2307 actions->conf; 2308 default: 2309 break; 2310 } 2311 } 2312 return NULL; 2313 } 2314 2315 static unsigned int 2316 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) 2317 { 2318 const struct rte_flow_item *item; 2319 unsigned int has_vlan = 0; 2320 2321 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 2322 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2323 has_vlan = 1; 2324 break; 2325 } 2326 } 2327 if (has_vlan) 2328 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : 2329 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; 2330 return rss_level < 2 ? MLX5_EXPANSION_ROOT : 2331 MLX5_EXPANSION_ROOT_OUTER; 2332 } 2333 2334 /** 2335 * Create a flow and add it to @p list. 2336 * 2337 * @param dev 2338 * Pointer to Ethernet device. 2339 * @param list 2340 * Pointer to a TAILQ flow list. 2341 * @param[in] attr 2342 * Flow rule attributes. 2343 * @param[in] items 2344 * Pattern specification (list terminated by the END pattern item). 2345 * @param[in] actions 2346 * Associated actions (list terminated by the END action). 2347 * @param[out] error 2348 * Perform verbose error reporting if not NULL. 2349 * 2350 * @return 2351 * A flow on success, NULL otherwise and rte_errno is set. 2352 */ 2353 static struct rte_flow * 2354 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, 2355 const struct rte_flow_attr *attr, 2356 const struct rte_flow_item items[], 2357 const struct rte_flow_action actions[], 2358 struct rte_flow_error *error) 2359 { 2360 struct rte_flow *flow = NULL; 2361 struct mlx5_flow *dev_flow; 2362 const struct rte_flow_action_rss *rss; 2363 union { 2364 struct rte_flow_expand_rss buf; 2365 uint8_t buffer[2048]; 2366 } expand_buffer; 2367 struct rte_flow_expand_rss *buf = &expand_buffer.buf; 2368 int ret; 2369 uint32_t i; 2370 uint32_t flow_size; 2371 2372 ret = flow_drv_validate(dev, attr, items, actions, error); 2373 if (ret < 0) 2374 return NULL; 2375 flow_size = sizeof(struct rte_flow); 2376 rss = flow_get_rss_action(actions); 2377 if (rss) 2378 flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), 2379 sizeof(void *)); 2380 else 2381 flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); 2382 flow = rte_calloc(__func__, 1, flow_size, 0); 2383 if (!flow) { 2384 rte_errno = ENOMEM; 2385 return NULL; 2386 } 2387 flow->drv_type = flow_get_drv_type(dev, attr); 2388 flow->ingress = attr->ingress; 2389 flow->transfer = attr->transfer; 2390 assert(flow->drv_type > MLX5_FLOW_TYPE_MIN && 2391 flow->drv_type < MLX5_FLOW_TYPE_MAX); 2392 flow->queue = (void *)(flow + 1); 2393 LIST_INIT(&flow->dev_flows); 2394 if (rss && rss->types) { 2395 unsigned int graph_root; 2396 2397 graph_root = find_graph_root(items, rss->level); 2398 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), 2399 items, rss->types, 2400 mlx5_support_expansion, 2401 graph_root); 2402 assert(ret > 0 && 2403 (unsigned int)ret < sizeof(expand_buffer.buffer)); 2404 } else { 2405 buf->entries = 1; 2406 buf->entry[0].pattern = (void *)(uintptr_t)items; 2407 } 2408 for (i = 0; i < buf->entries; ++i) { 2409 dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern, 2410 actions, error); 2411 if (!dev_flow) 2412 goto error; 2413 dev_flow->flow = flow; 2414 LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); 2415 ret = flow_drv_translate(dev, dev_flow, attr, 2416 buf->entry[i].pattern, 2417 actions, error); 2418 if (ret < 0) 2419 goto error; 2420 } 2421 if (dev->data->dev_started) { 2422 ret = flow_drv_apply(dev, flow, error); 2423 if (ret < 0) 2424 goto error; 2425 } 2426 TAILQ_INSERT_TAIL(list, flow, next); 2427 flow_rxq_flags_set(dev, flow); 2428 return flow; 2429 error: 2430 ret = rte_errno; /* Save rte_errno before cleanup. */ 2431 assert(flow); 2432 flow_drv_destroy(dev, flow); 2433 rte_free(flow); 2434 rte_errno = ret; /* Restore rte_errno. */ 2435 return NULL; 2436 } 2437 2438 /** 2439 * Create a flow. 2440 * 2441 * @see rte_flow_create() 2442 * @see rte_flow_ops 2443 */ 2444 struct rte_flow * 2445 mlx5_flow_create(struct rte_eth_dev *dev, 2446 const struct rte_flow_attr *attr, 2447 const struct rte_flow_item items[], 2448 const struct rte_flow_action actions[], 2449 struct rte_flow_error *error) 2450 { 2451 struct mlx5_priv *priv = dev->data->dev_private; 2452 2453 return flow_list_create(dev, &priv->flows, 2454 attr, items, actions, error); 2455 } 2456 2457 /** 2458 * Destroy a flow in a list. 2459 * 2460 * @param dev 2461 * Pointer to Ethernet device. 2462 * @param list 2463 * Pointer to a TAILQ flow list. 2464 * @param[in] flow 2465 * Flow to destroy. 2466 */ 2467 static void 2468 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, 2469 struct rte_flow *flow) 2470 { 2471 /* 2472 * Update RX queue flags only if port is started, otherwise it is 2473 * already clean. 2474 */ 2475 if (dev->data->dev_started) 2476 flow_rxq_flags_trim(dev, flow); 2477 flow_drv_destroy(dev, flow); 2478 TAILQ_REMOVE(list, flow, next); 2479 rte_free(flow->fdir); 2480 rte_free(flow); 2481 } 2482 2483 /** 2484 * Destroy all flows. 2485 * 2486 * @param dev 2487 * Pointer to Ethernet device. 2488 * @param list 2489 * Pointer to a TAILQ flow list. 2490 */ 2491 void 2492 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) 2493 { 2494 while (!TAILQ_EMPTY(list)) { 2495 struct rte_flow *flow; 2496 2497 flow = TAILQ_FIRST(list); 2498 flow_list_destroy(dev, list, flow); 2499 } 2500 } 2501 2502 /** 2503 * Remove all flows. 2504 * 2505 * @param dev 2506 * Pointer to Ethernet device. 2507 * @param list 2508 * Pointer to a TAILQ flow list. 2509 */ 2510 void 2511 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) 2512 { 2513 struct rte_flow *flow; 2514 2515 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) 2516 flow_drv_remove(dev, flow); 2517 flow_rxq_flags_clear(dev); 2518 } 2519 2520 /** 2521 * Add all flows. 2522 * 2523 * @param dev 2524 * Pointer to Ethernet device. 2525 * @param list 2526 * Pointer to a TAILQ flow list. 2527 * 2528 * @return 2529 * 0 on success, a negative errno value otherwise and rte_errno is set. 2530 */ 2531 int 2532 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) 2533 { 2534 struct rte_flow *flow; 2535 struct rte_flow_error error; 2536 int ret = 0; 2537 2538 TAILQ_FOREACH(flow, list, next) { 2539 ret = flow_drv_apply(dev, flow, &error); 2540 if (ret < 0) 2541 goto error; 2542 flow_rxq_flags_set(dev, flow); 2543 } 2544 return 0; 2545 error: 2546 ret = rte_errno; /* Save rte_errno before cleanup. */ 2547 mlx5_flow_stop(dev, list); 2548 rte_errno = ret; /* Restore rte_errno. */ 2549 return -rte_errno; 2550 } 2551 2552 /** 2553 * Verify the flow list is empty 2554 * 2555 * @param dev 2556 * Pointer to Ethernet device. 2557 * 2558 * @return the number of flows not released. 2559 */ 2560 int 2561 mlx5_flow_verify(struct rte_eth_dev *dev) 2562 { 2563 struct mlx5_priv *priv = dev->data->dev_private; 2564 struct rte_flow *flow; 2565 int ret = 0; 2566 2567 TAILQ_FOREACH(flow, &priv->flows, next) { 2568 DRV_LOG(DEBUG, "port %u flow %p still referenced", 2569 dev->data->port_id, (void *)flow); 2570 ++ret; 2571 } 2572 return ret; 2573 } 2574 2575 /** 2576 * Enable a control flow configured from the control plane. 2577 * 2578 * @param dev 2579 * Pointer to Ethernet device. 2580 * @param eth_spec 2581 * An Ethernet flow spec to apply. 2582 * @param eth_mask 2583 * An Ethernet flow mask to apply. 2584 * @param vlan_spec 2585 * A VLAN flow spec to apply. 2586 * @param vlan_mask 2587 * A VLAN flow mask to apply. 2588 * 2589 * @return 2590 * 0 on success, a negative errno value otherwise and rte_errno is set. 2591 */ 2592 int 2593 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, 2594 struct rte_flow_item_eth *eth_spec, 2595 struct rte_flow_item_eth *eth_mask, 2596 struct rte_flow_item_vlan *vlan_spec, 2597 struct rte_flow_item_vlan *vlan_mask) 2598 { 2599 struct mlx5_priv *priv = dev->data->dev_private; 2600 const struct rte_flow_attr attr = { 2601 .ingress = 1, 2602 .priority = MLX5_FLOW_PRIO_RSVD, 2603 }; 2604 struct rte_flow_item items[] = { 2605 { 2606 .type = RTE_FLOW_ITEM_TYPE_ETH, 2607 .spec = eth_spec, 2608 .last = NULL, 2609 .mask = eth_mask, 2610 }, 2611 { 2612 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : 2613 RTE_FLOW_ITEM_TYPE_END, 2614 .spec = vlan_spec, 2615 .last = NULL, 2616 .mask = vlan_mask, 2617 }, 2618 { 2619 .type = RTE_FLOW_ITEM_TYPE_END, 2620 }, 2621 }; 2622 uint16_t queue[priv->reta_idx_n]; 2623 struct rte_flow_action_rss action_rss = { 2624 .func = RTE_ETH_HASH_FUNCTION_DEFAULT, 2625 .level = 0, 2626 .types = priv->rss_conf.rss_hf, 2627 .key_len = priv->rss_conf.rss_key_len, 2628 .queue_num = priv->reta_idx_n, 2629 .key = priv->rss_conf.rss_key, 2630 .queue = queue, 2631 }; 2632 struct rte_flow_action actions[] = { 2633 { 2634 .type = RTE_FLOW_ACTION_TYPE_RSS, 2635 .conf = &action_rss, 2636 }, 2637 { 2638 .type = RTE_FLOW_ACTION_TYPE_END, 2639 }, 2640 }; 2641 struct rte_flow *flow; 2642 struct rte_flow_error error; 2643 unsigned int i; 2644 2645 if (!priv->reta_idx_n || !priv->rxqs_n) { 2646 return 0; 2647 } 2648 for (i = 0; i != priv->reta_idx_n; ++i) 2649 queue[i] = (*priv->reta_idx)[i]; 2650 flow = flow_list_create(dev, &priv->ctrl_flows, 2651 &attr, items, actions, &error); 2652 if (!flow) 2653 return -rte_errno; 2654 return 0; 2655 } 2656 2657 /** 2658 * Enable a flow control configured from the control plane. 2659 * 2660 * @param dev 2661 * Pointer to Ethernet device. 2662 * @param eth_spec 2663 * An Ethernet flow spec to apply. 2664 * @param eth_mask 2665 * An Ethernet flow mask to apply. 2666 * 2667 * @return 2668 * 0 on success, a negative errno value otherwise and rte_errno is set. 2669 */ 2670 int 2671 mlx5_ctrl_flow(struct rte_eth_dev *dev, 2672 struct rte_flow_item_eth *eth_spec, 2673 struct rte_flow_item_eth *eth_mask) 2674 { 2675 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); 2676 } 2677 2678 /** 2679 * Destroy a flow. 2680 * 2681 * @see rte_flow_destroy() 2682 * @see rte_flow_ops 2683 */ 2684 int 2685 mlx5_flow_destroy(struct rte_eth_dev *dev, 2686 struct rte_flow *flow, 2687 struct rte_flow_error *error __rte_unused) 2688 { 2689 struct mlx5_priv *priv = dev->data->dev_private; 2690 2691 flow_list_destroy(dev, &priv->flows, flow); 2692 return 0; 2693 } 2694 2695 /** 2696 * Destroy all flows. 2697 * 2698 * @see rte_flow_flush() 2699 * @see rte_flow_ops 2700 */ 2701 int 2702 mlx5_flow_flush(struct rte_eth_dev *dev, 2703 struct rte_flow_error *error __rte_unused) 2704 { 2705 struct mlx5_priv *priv = dev->data->dev_private; 2706 2707 mlx5_flow_list_flush(dev, &priv->flows); 2708 return 0; 2709 } 2710 2711 /** 2712 * Isolated mode. 2713 * 2714 * @see rte_flow_isolate() 2715 * @see rte_flow_ops 2716 */ 2717 int 2718 mlx5_flow_isolate(struct rte_eth_dev *dev, 2719 int enable, 2720 struct rte_flow_error *error) 2721 { 2722 struct mlx5_priv *priv = dev->data->dev_private; 2723 2724 if (dev->data->dev_started) { 2725 rte_flow_error_set(error, EBUSY, 2726 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2727 NULL, 2728 "port must be stopped first"); 2729 return -rte_errno; 2730 } 2731 priv->isolated = !!enable; 2732 if (enable) 2733 dev->dev_ops = &mlx5_dev_ops_isolate; 2734 else 2735 dev->dev_ops = &mlx5_dev_ops; 2736 return 0; 2737 } 2738 2739 /** 2740 * Query a flow. 2741 * 2742 * @see rte_flow_query() 2743 * @see rte_flow_ops 2744 */ 2745 static int 2746 flow_drv_query(struct rte_eth_dev *dev, 2747 struct rte_flow *flow, 2748 const struct rte_flow_action *actions, 2749 void *data, 2750 struct rte_flow_error *error) 2751 { 2752 const struct mlx5_flow_driver_ops *fops; 2753 enum mlx5_flow_drv_type ftype = flow->drv_type; 2754 2755 assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); 2756 fops = flow_get_drv_ops(ftype); 2757 2758 return fops->query(dev, flow, actions, data, error); 2759 } 2760 2761 /** 2762 * Query a flow. 2763 * 2764 * @see rte_flow_query() 2765 * @see rte_flow_ops 2766 */ 2767 int 2768 mlx5_flow_query(struct rte_eth_dev *dev, 2769 struct rte_flow *flow, 2770 const struct rte_flow_action *actions, 2771 void *data, 2772 struct rte_flow_error *error) 2773 { 2774 int ret; 2775 2776 ret = flow_drv_query(dev, flow, actions, data, error); 2777 if (ret < 0) 2778 return ret; 2779 return 0; 2780 } 2781 2782 /** 2783 * Convert a flow director filter to a generic flow. 2784 * 2785 * @param dev 2786 * Pointer to Ethernet device. 2787 * @param fdir_filter 2788 * Flow director filter to add. 2789 * @param attributes 2790 * Generic flow parameters structure. 2791 * 2792 * @return 2793 * 0 on success, a negative errno value otherwise and rte_errno is set. 2794 */ 2795 static int 2796 flow_fdir_filter_convert(struct rte_eth_dev *dev, 2797 const struct rte_eth_fdir_filter *fdir_filter, 2798 struct mlx5_fdir *attributes) 2799 { 2800 struct mlx5_priv *priv = dev->data->dev_private; 2801 const struct rte_eth_fdir_input *input = &fdir_filter->input; 2802 const struct rte_eth_fdir_masks *mask = 2803 &dev->data->dev_conf.fdir_conf.mask; 2804 2805 /* Validate queue number. */ 2806 if (fdir_filter->action.rx_queue >= priv->rxqs_n) { 2807 DRV_LOG(ERR, "port %u invalid queue number %d", 2808 dev->data->port_id, fdir_filter->action.rx_queue); 2809 rte_errno = EINVAL; 2810 return -rte_errno; 2811 } 2812 attributes->attr.ingress = 1; 2813 attributes->items[0] = (struct rte_flow_item) { 2814 .type = RTE_FLOW_ITEM_TYPE_ETH, 2815 .spec = &attributes->l2, 2816 .mask = &attributes->l2_mask, 2817 }; 2818 switch (fdir_filter->action.behavior) { 2819 case RTE_ETH_FDIR_ACCEPT: 2820 attributes->actions[0] = (struct rte_flow_action){ 2821 .type = RTE_FLOW_ACTION_TYPE_QUEUE, 2822 .conf = &attributes->queue, 2823 }; 2824 break; 2825 case RTE_ETH_FDIR_REJECT: 2826 attributes->actions[0] = (struct rte_flow_action){ 2827 .type = RTE_FLOW_ACTION_TYPE_DROP, 2828 }; 2829 break; 2830 default: 2831 DRV_LOG(ERR, "port %u invalid behavior %d", 2832 dev->data->port_id, 2833 fdir_filter->action.behavior); 2834 rte_errno = ENOTSUP; 2835 return -rte_errno; 2836 } 2837 attributes->queue.index = fdir_filter->action.rx_queue; 2838 /* Handle L3. */ 2839 switch (fdir_filter->input.flow_type) { 2840 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 2841 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 2842 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 2843 attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){ 2844 .src_addr = input->flow.ip4_flow.src_ip, 2845 .dst_addr = input->flow.ip4_flow.dst_ip, 2846 .time_to_live = input->flow.ip4_flow.ttl, 2847 .type_of_service = input->flow.ip4_flow.tos, 2848 }; 2849 attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){ 2850 .src_addr = mask->ipv4_mask.src_ip, 2851 .dst_addr = mask->ipv4_mask.dst_ip, 2852 .time_to_live = mask->ipv4_mask.ttl, 2853 .type_of_service = mask->ipv4_mask.tos, 2854 .next_proto_id = mask->ipv4_mask.proto, 2855 }; 2856 attributes->items[1] = (struct rte_flow_item){ 2857 .type = RTE_FLOW_ITEM_TYPE_IPV4, 2858 .spec = &attributes->l3, 2859 .mask = &attributes->l3_mask, 2860 }; 2861 break; 2862 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 2863 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 2864 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 2865 attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){ 2866 .hop_limits = input->flow.ipv6_flow.hop_limits, 2867 .proto = input->flow.ipv6_flow.proto, 2868 }; 2869 2870 memcpy(attributes->l3.ipv6.hdr.src_addr, 2871 input->flow.ipv6_flow.src_ip, 2872 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 2873 memcpy(attributes->l3.ipv6.hdr.dst_addr, 2874 input->flow.ipv6_flow.dst_ip, 2875 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 2876 memcpy(attributes->l3_mask.ipv6.hdr.src_addr, 2877 mask->ipv6_mask.src_ip, 2878 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 2879 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr, 2880 mask->ipv6_mask.dst_ip, 2881 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 2882 attributes->items[1] = (struct rte_flow_item){ 2883 .type = RTE_FLOW_ITEM_TYPE_IPV6, 2884 .spec = &attributes->l3, 2885 .mask = &attributes->l3_mask, 2886 }; 2887 break; 2888 default: 2889 DRV_LOG(ERR, "port %u invalid flow type%d", 2890 dev->data->port_id, fdir_filter->input.flow_type); 2891 rte_errno = ENOTSUP; 2892 return -rte_errno; 2893 } 2894 /* Handle L4. */ 2895 switch (fdir_filter->input.flow_type) { 2896 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 2897 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 2898 .src_port = input->flow.udp4_flow.src_port, 2899 .dst_port = input->flow.udp4_flow.dst_port, 2900 }; 2901 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 2902 .src_port = mask->src_port_mask, 2903 .dst_port = mask->dst_port_mask, 2904 }; 2905 attributes->items[2] = (struct rte_flow_item){ 2906 .type = RTE_FLOW_ITEM_TYPE_UDP, 2907 .spec = &attributes->l4, 2908 .mask = &attributes->l4_mask, 2909 }; 2910 break; 2911 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 2912 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 2913 .src_port = input->flow.tcp4_flow.src_port, 2914 .dst_port = input->flow.tcp4_flow.dst_port, 2915 }; 2916 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 2917 .src_port = mask->src_port_mask, 2918 .dst_port = mask->dst_port_mask, 2919 }; 2920 attributes->items[2] = (struct rte_flow_item){ 2921 .type = RTE_FLOW_ITEM_TYPE_TCP, 2922 .spec = &attributes->l4, 2923 .mask = &attributes->l4_mask, 2924 }; 2925 break; 2926 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 2927 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 2928 .src_port = input->flow.udp6_flow.src_port, 2929 .dst_port = input->flow.udp6_flow.dst_port, 2930 }; 2931 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 2932 .src_port = mask->src_port_mask, 2933 .dst_port = mask->dst_port_mask, 2934 }; 2935 attributes->items[2] = (struct rte_flow_item){ 2936 .type = RTE_FLOW_ITEM_TYPE_UDP, 2937 .spec = &attributes->l4, 2938 .mask = &attributes->l4_mask, 2939 }; 2940 break; 2941 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 2942 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 2943 .src_port = input->flow.tcp6_flow.src_port, 2944 .dst_port = input->flow.tcp6_flow.dst_port, 2945 }; 2946 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 2947 .src_port = mask->src_port_mask, 2948 .dst_port = mask->dst_port_mask, 2949 }; 2950 attributes->items[2] = (struct rte_flow_item){ 2951 .type = RTE_FLOW_ITEM_TYPE_TCP, 2952 .spec = &attributes->l4, 2953 .mask = &attributes->l4_mask, 2954 }; 2955 break; 2956 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 2957 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 2958 break; 2959 default: 2960 DRV_LOG(ERR, "port %u invalid flow type%d", 2961 dev->data->port_id, fdir_filter->input.flow_type); 2962 rte_errno = ENOTSUP; 2963 return -rte_errno; 2964 } 2965 return 0; 2966 } 2967 2968 #define FLOW_FDIR_CMP(f1, f2, fld) \ 2969 memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld)) 2970 2971 /** 2972 * Compare two FDIR flows. If items and actions are identical, the two flows are 2973 * regarded as same. 2974 * 2975 * @param dev 2976 * Pointer to Ethernet device. 2977 * @param f1 2978 * FDIR flow to compare. 2979 * @param f2 2980 * FDIR flow to compare. 2981 * 2982 * @return 2983 * Zero on match, 1 otherwise. 2984 */ 2985 static int 2986 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) 2987 { 2988 if (FLOW_FDIR_CMP(f1, f2, attr) || 2989 FLOW_FDIR_CMP(f1, f2, l2) || 2990 FLOW_FDIR_CMP(f1, f2, l2_mask) || 2991 FLOW_FDIR_CMP(f1, f2, l3) || 2992 FLOW_FDIR_CMP(f1, f2, l3_mask) || 2993 FLOW_FDIR_CMP(f1, f2, l4) || 2994 FLOW_FDIR_CMP(f1, f2, l4_mask) || 2995 FLOW_FDIR_CMP(f1, f2, actions[0].type)) 2996 return 1; 2997 if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE && 2998 FLOW_FDIR_CMP(f1, f2, queue)) 2999 return 1; 3000 return 0; 3001 } 3002 3003 /** 3004 * Search device flow list to find out a matched FDIR flow. 3005 * 3006 * @param dev 3007 * Pointer to Ethernet device. 3008 * @param fdir_flow 3009 * FDIR flow to lookup. 3010 * 3011 * @return 3012 * Pointer of flow if found, NULL otherwise. 3013 */ 3014 static struct rte_flow * 3015 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) 3016 { 3017 struct mlx5_priv *priv = dev->data->dev_private; 3018 struct rte_flow *flow = NULL; 3019 3020 assert(fdir_flow); 3021 TAILQ_FOREACH(flow, &priv->flows, next) { 3022 if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) { 3023 DRV_LOG(DEBUG, "port %u found FDIR flow %p", 3024 dev->data->port_id, (void *)flow); 3025 break; 3026 } 3027 } 3028 return flow; 3029 } 3030 3031 /** 3032 * Add new flow director filter and store it in list. 3033 * 3034 * @param dev 3035 * Pointer to Ethernet device. 3036 * @param fdir_filter 3037 * Flow director filter to add. 3038 * 3039 * @return 3040 * 0 on success, a negative errno value otherwise and rte_errno is set. 3041 */ 3042 static int 3043 flow_fdir_filter_add(struct rte_eth_dev *dev, 3044 const struct rte_eth_fdir_filter *fdir_filter) 3045 { 3046 struct mlx5_priv *priv = dev->data->dev_private; 3047 struct mlx5_fdir *fdir_flow; 3048 struct rte_flow *flow; 3049 int ret; 3050 3051 fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0); 3052 if (!fdir_flow) { 3053 rte_errno = ENOMEM; 3054 return -rte_errno; 3055 } 3056 ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow); 3057 if (ret) 3058 goto error; 3059 flow = flow_fdir_filter_lookup(dev, fdir_flow); 3060 if (flow) { 3061 rte_errno = EEXIST; 3062 goto error; 3063 } 3064 flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr, 3065 fdir_flow->items, fdir_flow->actions, NULL); 3066 if (!flow) 3067 goto error; 3068 assert(!flow->fdir); 3069 flow->fdir = fdir_flow; 3070 DRV_LOG(DEBUG, "port %u created FDIR flow %p", 3071 dev->data->port_id, (void *)flow); 3072 return 0; 3073 error: 3074 rte_free(fdir_flow); 3075 return -rte_errno; 3076 } 3077 3078 /** 3079 * Delete specific filter. 3080 * 3081 * @param dev 3082 * Pointer to Ethernet device. 3083 * @param fdir_filter 3084 * Filter to be deleted. 3085 * 3086 * @return 3087 * 0 on success, a negative errno value otherwise and rte_errno is set. 3088 */ 3089 static int 3090 flow_fdir_filter_delete(struct rte_eth_dev *dev, 3091 const struct rte_eth_fdir_filter *fdir_filter) 3092 { 3093 struct mlx5_priv *priv = dev->data->dev_private; 3094 struct rte_flow *flow; 3095 struct mlx5_fdir fdir_flow = { 3096 .attr.group = 0, 3097 }; 3098 int ret; 3099 3100 ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow); 3101 if (ret) 3102 return -rte_errno; 3103 flow = flow_fdir_filter_lookup(dev, &fdir_flow); 3104 if (!flow) { 3105 rte_errno = ENOENT; 3106 return -rte_errno; 3107 } 3108 flow_list_destroy(dev, &priv->flows, flow); 3109 DRV_LOG(DEBUG, "port %u deleted FDIR flow %p", 3110 dev->data->port_id, (void *)flow); 3111 return 0; 3112 } 3113 3114 /** 3115 * Update queue for specific filter. 3116 * 3117 * @param dev 3118 * Pointer to Ethernet device. 3119 * @param fdir_filter 3120 * Filter to be updated. 3121 * 3122 * @return 3123 * 0 on success, a negative errno value otherwise and rte_errno is set. 3124 */ 3125 static int 3126 flow_fdir_filter_update(struct rte_eth_dev *dev, 3127 const struct rte_eth_fdir_filter *fdir_filter) 3128 { 3129 int ret; 3130 3131 ret = flow_fdir_filter_delete(dev, fdir_filter); 3132 if (ret) 3133 return ret; 3134 return flow_fdir_filter_add(dev, fdir_filter); 3135 } 3136 3137 /** 3138 * Flush all filters. 3139 * 3140 * @param dev 3141 * Pointer to Ethernet device. 3142 */ 3143 static void 3144 flow_fdir_filter_flush(struct rte_eth_dev *dev) 3145 { 3146 struct mlx5_priv *priv = dev->data->dev_private; 3147 3148 mlx5_flow_list_flush(dev, &priv->flows); 3149 } 3150 3151 /** 3152 * Get flow director information. 3153 * 3154 * @param dev 3155 * Pointer to Ethernet device. 3156 * @param[out] fdir_info 3157 * Resulting flow director information. 3158 */ 3159 static void 3160 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) 3161 { 3162 struct rte_eth_fdir_masks *mask = 3163 &dev->data->dev_conf.fdir_conf.mask; 3164 3165 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; 3166 fdir_info->guarant_spc = 0; 3167 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); 3168 fdir_info->max_flexpayload = 0; 3169 fdir_info->flow_types_mask[0] = 0; 3170 fdir_info->flex_payload_unit = 0; 3171 fdir_info->max_flex_payload_segment_num = 0; 3172 fdir_info->flex_payload_limit = 0; 3173 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf)); 3174 } 3175 3176 /** 3177 * Deal with flow director operations. 3178 * 3179 * @param dev 3180 * Pointer to Ethernet device. 3181 * @param filter_op 3182 * Operation to perform. 3183 * @param arg 3184 * Pointer to operation-specific structure. 3185 * 3186 * @return 3187 * 0 on success, a negative errno value otherwise and rte_errno is set. 3188 */ 3189 static int 3190 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, 3191 void *arg) 3192 { 3193 enum rte_fdir_mode fdir_mode = 3194 dev->data->dev_conf.fdir_conf.mode; 3195 3196 if (filter_op == RTE_ETH_FILTER_NOP) 3197 return 0; 3198 if (fdir_mode != RTE_FDIR_MODE_PERFECT && 3199 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3200 DRV_LOG(ERR, "port %u flow director mode %d not supported", 3201 dev->data->port_id, fdir_mode); 3202 rte_errno = EINVAL; 3203 return -rte_errno; 3204 } 3205 switch (filter_op) { 3206 case RTE_ETH_FILTER_ADD: 3207 return flow_fdir_filter_add(dev, arg); 3208 case RTE_ETH_FILTER_UPDATE: 3209 return flow_fdir_filter_update(dev, arg); 3210 case RTE_ETH_FILTER_DELETE: 3211 return flow_fdir_filter_delete(dev, arg); 3212 case RTE_ETH_FILTER_FLUSH: 3213 flow_fdir_filter_flush(dev); 3214 break; 3215 case RTE_ETH_FILTER_INFO: 3216 flow_fdir_info_get(dev, arg); 3217 break; 3218 default: 3219 DRV_LOG(DEBUG, "port %u unknown operation %u", 3220 dev->data->port_id, filter_op); 3221 rte_errno = EINVAL; 3222 return -rte_errno; 3223 } 3224 return 0; 3225 } 3226 3227 /** 3228 * Manage filter operations. 3229 * 3230 * @param dev 3231 * Pointer to Ethernet device structure. 3232 * @param filter_type 3233 * Filter type. 3234 * @param filter_op 3235 * Operation to perform. 3236 * @param arg 3237 * Pointer to operation-specific structure. 3238 * 3239 * @return 3240 * 0 on success, a negative errno value otherwise and rte_errno is set. 3241 */ 3242 int 3243 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, 3244 enum rte_filter_type filter_type, 3245 enum rte_filter_op filter_op, 3246 void *arg) 3247 { 3248 switch (filter_type) { 3249 case RTE_ETH_FILTER_GENERIC: 3250 if (filter_op != RTE_ETH_FILTER_GET) { 3251 rte_errno = EINVAL; 3252 return -rte_errno; 3253 } 3254 *(const void **)arg = &mlx5_flow_ops; 3255 return 0; 3256 case RTE_ETH_FILTER_FDIR: 3257 return flow_fdir_ctrl_func(dev, filter_op, arg); 3258 default: 3259 DRV_LOG(ERR, "port %u filter type (%d) not supported", 3260 dev->data->port_id, filter_type); 3261 rte_errno = ENOTSUP; 3262 return -rte_errno; 3263 } 3264 return 0; 3265 } 3266 3267 #define MLX5_POOL_QUERY_FREQ_US 1000000 3268 3269 /** 3270 * Set the periodic procedure for triggering asynchronous batch queries for all 3271 * the counter pools. 3272 * 3273 * @param[in] sh 3274 * Pointer to mlx5_ibv_shared object. 3275 */ 3276 void 3277 mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) 3278 { 3279 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0); 3280 uint32_t pools_n = rte_atomic16_read(&cont->n_valid); 3281 uint32_t us; 3282 3283 cont = MLX5_CNT_CONTAINER(sh, 1, 0); 3284 pools_n += rte_atomic16_read(&cont->n_valid); 3285 us = MLX5_POOL_QUERY_FREQ_US / pools_n; 3286 DRV_LOG(DEBUG, "Set alarm for %u pools each %u us\n", pools_n, us); 3287 if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { 3288 sh->cmng.query_thread_on = 0; 3289 DRV_LOG(ERR, "Cannot reinitialize query alarm\n"); 3290 } else { 3291 sh->cmng.query_thread_on = 1; 3292 } 3293 } 3294 3295 /** 3296 * The periodic procedure for triggering asynchronous batch queries for all the 3297 * counter pools. This function is probably called by the host thread. 3298 * 3299 * @param[in] arg 3300 * The parameter for the alarm process. 3301 */ 3302 void 3303 mlx5_flow_query_alarm(void *arg) 3304 { 3305 struct mlx5_ibv_shared *sh = arg; 3306 struct mlx5_devx_obj *dcs; 3307 uint16_t offset; 3308 int ret; 3309 uint8_t batch = sh->cmng.batch; 3310 uint16_t pool_index = sh->cmng.pool_index; 3311 struct mlx5_pools_container *cont; 3312 struct mlx5_pools_container *mcont; 3313 struct mlx5_flow_counter_pool *pool; 3314 3315 if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) 3316 goto set_alarm; 3317 next_container: 3318 cont = MLX5_CNT_CONTAINER(sh, batch, 1); 3319 mcont = MLX5_CNT_CONTAINER(sh, batch, 0); 3320 /* Check if resize was done and need to flip a container. */ 3321 if (cont != mcont) { 3322 if (cont->pools) { 3323 /* Clean the old container. */ 3324 rte_free(cont->pools); 3325 memset(cont, 0, sizeof(*cont)); 3326 } 3327 rte_cio_wmb(); 3328 /* Flip the host container. */ 3329 sh->cmng.mhi[batch] ^= (uint8_t)2; 3330 cont = mcont; 3331 } 3332 if (!cont->pools) { 3333 /* 2 empty containers case is unexpected. */ 3334 if (unlikely(batch != sh->cmng.batch)) 3335 goto set_alarm; 3336 batch ^= 0x1; 3337 pool_index = 0; 3338 goto next_container; 3339 } 3340 pool = cont->pools[pool_index]; 3341 if (pool->raw_hw) 3342 /* There is a pool query in progress. */ 3343 goto set_alarm; 3344 pool->raw_hw = 3345 LIST_FIRST(&sh->cmng.free_stat_raws); 3346 if (!pool->raw_hw) 3347 /* No free counter statistics raw memory. */ 3348 goto set_alarm; 3349 dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read 3350 (&pool->a64_dcs); 3351 offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; 3352 ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - 3353 offset, NULL, NULL, 3354 pool->raw_hw->mem_mng->dm->id, 3355 (void *)(uintptr_t) 3356 (pool->raw_hw->data + offset), 3357 sh->devx_comp, 3358 (uint64_t)(uintptr_t)pool); 3359 if (ret) { 3360 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID" 3361 " %d\n", pool->min_dcs->id); 3362 pool->raw_hw = NULL; 3363 goto set_alarm; 3364 } 3365 pool->raw_hw->min_dcs_id = dcs->id; 3366 LIST_REMOVE(pool->raw_hw, next); 3367 sh->cmng.pending_queries++; 3368 pool_index++; 3369 if (pool_index >= rte_atomic16_read(&cont->n_valid)) { 3370 batch ^= 0x1; 3371 pool_index = 0; 3372 } 3373 set_alarm: 3374 sh->cmng.batch = batch; 3375 sh->cmng.pool_index = pool_index; 3376 mlx5_set_query_alarm(sh); 3377 } 3378 3379 /** 3380 * Handler for the HW respond about ready values from an asynchronous batch 3381 * query. This function is probably called by the host thread. 3382 * 3383 * @param[in] sh 3384 * The pointer to the shared IB device context. 3385 * @param[in] async_id 3386 * The Devx async ID. 3387 * @param[in] status 3388 * The status of the completion. 3389 */ 3390 void 3391 mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, 3392 uint64_t async_id, int status) 3393 { 3394 struct mlx5_flow_counter_pool *pool = 3395 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; 3396 struct mlx5_counter_stats_raw *raw_to_free; 3397 3398 if (unlikely(status)) { 3399 raw_to_free = pool->raw_hw; 3400 } else { 3401 raw_to_free = pool->raw; 3402 rte_spinlock_lock(&pool->sl); 3403 pool->raw = pool->raw_hw; 3404 rte_spinlock_unlock(&pool->sl); 3405 rte_atomic64_add(&pool->query_gen, 1); 3406 /* Be sure the new raw counters data is updated in memory. */ 3407 rte_cio_wmb(); 3408 } 3409 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next); 3410 pool->raw_hw = NULL; 3411 sh->cmng.pending_queries--; 3412 } 3413