1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <netinet/in.h> 7 #include <sys/queue.h> 8 #include <stdalign.h> 9 #include <stdint.h> 10 #include <string.h> 11 12 /* Verbs header. */ 13 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 14 #ifdef PEDANTIC 15 #pragma GCC diagnostic ignored "-Wpedantic" 16 #endif 17 #include <infiniband/verbs.h> 18 #ifdef PEDANTIC 19 #pragma GCC diagnostic error "-Wpedantic" 20 #endif 21 22 #include <rte_common.h> 23 #include <rte_ether.h> 24 #include <rte_ethdev_driver.h> 25 #include <rte_flow.h> 26 #include <rte_flow_driver.h> 27 #include <rte_malloc.h> 28 #include <rte_ip.h> 29 30 #include "mlx5.h" 31 #include "mlx5_defs.h" 32 #include "mlx5_flow.h" 33 #include "mlx5_glue.h" 34 #include "mlx5_prm.h" 35 #include "mlx5_rxtx.h" 36 37 /* Dev ops structure defined in mlx5.c */ 38 extern const struct eth_dev_ops mlx5_dev_ops; 39 extern const struct eth_dev_ops mlx5_dev_ops_isolate; 40 41 /** Device flow drivers. */ 42 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 43 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; 44 #endif 45 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; 46 47 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; 48 49 const struct mlx5_flow_driver_ops *flow_drv_ops[] = { 50 [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, 51 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 52 [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, 53 #endif 54 [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, 55 [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops 56 }; 57 58 enum mlx5_expansion { 59 MLX5_EXPANSION_ROOT, 60 MLX5_EXPANSION_ROOT_OUTER, 61 MLX5_EXPANSION_ROOT_ETH_VLAN, 62 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, 63 MLX5_EXPANSION_OUTER_ETH, 64 MLX5_EXPANSION_OUTER_ETH_VLAN, 65 MLX5_EXPANSION_OUTER_VLAN, 66 MLX5_EXPANSION_OUTER_IPV4, 67 MLX5_EXPANSION_OUTER_IPV4_UDP, 68 MLX5_EXPANSION_OUTER_IPV4_TCP, 69 MLX5_EXPANSION_OUTER_IPV6, 70 MLX5_EXPANSION_OUTER_IPV6_UDP, 71 MLX5_EXPANSION_OUTER_IPV6_TCP, 72 MLX5_EXPANSION_VXLAN, 73 MLX5_EXPANSION_VXLAN_GPE, 74 MLX5_EXPANSION_GRE, 75 MLX5_EXPANSION_MPLS, 76 MLX5_EXPANSION_ETH, 77 MLX5_EXPANSION_ETH_VLAN, 78 MLX5_EXPANSION_VLAN, 79 MLX5_EXPANSION_IPV4, 80 MLX5_EXPANSION_IPV4_UDP, 81 MLX5_EXPANSION_IPV4_TCP, 82 MLX5_EXPANSION_IPV6, 83 MLX5_EXPANSION_IPV6_UDP, 84 MLX5_EXPANSION_IPV6_TCP, 85 }; 86 87 /** Supported expansion of items. */ 88 static const struct rte_flow_expand_node mlx5_support_expansion[] = { 89 [MLX5_EXPANSION_ROOT] = { 90 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 91 MLX5_EXPANSION_IPV4, 92 MLX5_EXPANSION_IPV6), 93 .type = RTE_FLOW_ITEM_TYPE_END, 94 }, 95 [MLX5_EXPANSION_ROOT_OUTER] = { 96 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, 97 MLX5_EXPANSION_OUTER_IPV4, 98 MLX5_EXPANSION_OUTER_IPV6), 99 .type = RTE_FLOW_ITEM_TYPE_END, 100 }, 101 [MLX5_EXPANSION_ROOT_ETH_VLAN] = { 102 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), 103 .type = RTE_FLOW_ITEM_TYPE_END, 104 }, 105 [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { 106 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), 107 .type = RTE_FLOW_ITEM_TYPE_END, 108 }, 109 [MLX5_EXPANSION_OUTER_ETH] = { 110 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 111 MLX5_EXPANSION_OUTER_IPV6, 112 MLX5_EXPANSION_MPLS), 113 .type = RTE_FLOW_ITEM_TYPE_ETH, 114 .rss_types = 0, 115 }, 116 [MLX5_EXPANSION_OUTER_ETH_VLAN] = { 117 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), 118 .type = RTE_FLOW_ITEM_TYPE_ETH, 119 .rss_types = 0, 120 }, 121 [MLX5_EXPANSION_OUTER_VLAN] = { 122 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 123 MLX5_EXPANSION_OUTER_IPV6), 124 .type = RTE_FLOW_ITEM_TYPE_VLAN, 125 }, 126 [MLX5_EXPANSION_OUTER_IPV4] = { 127 .next = RTE_FLOW_EXPAND_RSS_NEXT 128 (MLX5_EXPANSION_OUTER_IPV4_UDP, 129 MLX5_EXPANSION_OUTER_IPV4_TCP, 130 MLX5_EXPANSION_GRE), 131 .type = RTE_FLOW_ITEM_TYPE_IPV4, 132 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 133 ETH_RSS_NONFRAG_IPV4_OTHER, 134 }, 135 [MLX5_EXPANSION_OUTER_IPV4_UDP] = { 136 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 137 MLX5_EXPANSION_VXLAN_GPE), 138 .type = RTE_FLOW_ITEM_TYPE_UDP, 139 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 140 }, 141 [MLX5_EXPANSION_OUTER_IPV4_TCP] = { 142 .type = RTE_FLOW_ITEM_TYPE_TCP, 143 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 144 }, 145 [MLX5_EXPANSION_OUTER_IPV6] = { 146 .next = RTE_FLOW_EXPAND_RSS_NEXT 147 (MLX5_EXPANSION_OUTER_IPV6_UDP, 148 MLX5_EXPANSION_OUTER_IPV6_TCP), 149 .type = RTE_FLOW_ITEM_TYPE_IPV6, 150 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 151 ETH_RSS_NONFRAG_IPV6_OTHER, 152 }, 153 [MLX5_EXPANSION_OUTER_IPV6_UDP] = { 154 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 155 MLX5_EXPANSION_VXLAN_GPE), 156 .type = RTE_FLOW_ITEM_TYPE_UDP, 157 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 158 }, 159 [MLX5_EXPANSION_OUTER_IPV6_TCP] = { 160 .type = RTE_FLOW_ITEM_TYPE_TCP, 161 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 162 }, 163 [MLX5_EXPANSION_VXLAN] = { 164 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), 165 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 166 }, 167 [MLX5_EXPANSION_VXLAN_GPE] = { 168 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 169 MLX5_EXPANSION_IPV4, 170 MLX5_EXPANSION_IPV6), 171 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, 172 }, 173 [MLX5_EXPANSION_GRE] = { 174 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), 175 .type = RTE_FLOW_ITEM_TYPE_GRE, 176 }, 177 [MLX5_EXPANSION_MPLS] = { 178 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 179 MLX5_EXPANSION_IPV6), 180 .type = RTE_FLOW_ITEM_TYPE_MPLS, 181 }, 182 [MLX5_EXPANSION_ETH] = { 183 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 184 MLX5_EXPANSION_IPV6), 185 .type = RTE_FLOW_ITEM_TYPE_ETH, 186 }, 187 [MLX5_EXPANSION_ETH_VLAN] = { 188 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), 189 .type = RTE_FLOW_ITEM_TYPE_ETH, 190 }, 191 [MLX5_EXPANSION_VLAN] = { 192 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 193 MLX5_EXPANSION_IPV6), 194 .type = RTE_FLOW_ITEM_TYPE_VLAN, 195 }, 196 [MLX5_EXPANSION_IPV4] = { 197 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, 198 MLX5_EXPANSION_IPV4_TCP), 199 .type = RTE_FLOW_ITEM_TYPE_IPV4, 200 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 201 ETH_RSS_NONFRAG_IPV4_OTHER, 202 }, 203 [MLX5_EXPANSION_IPV4_UDP] = { 204 .type = RTE_FLOW_ITEM_TYPE_UDP, 205 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 206 }, 207 [MLX5_EXPANSION_IPV4_TCP] = { 208 .type = RTE_FLOW_ITEM_TYPE_TCP, 209 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 210 }, 211 [MLX5_EXPANSION_IPV6] = { 212 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, 213 MLX5_EXPANSION_IPV6_TCP), 214 .type = RTE_FLOW_ITEM_TYPE_IPV6, 215 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 216 ETH_RSS_NONFRAG_IPV6_OTHER, 217 }, 218 [MLX5_EXPANSION_IPV6_UDP] = { 219 .type = RTE_FLOW_ITEM_TYPE_UDP, 220 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 221 }, 222 [MLX5_EXPANSION_IPV6_TCP] = { 223 .type = RTE_FLOW_ITEM_TYPE_TCP, 224 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 225 }, 226 }; 227 228 static const struct rte_flow_ops mlx5_flow_ops = { 229 .validate = mlx5_flow_validate, 230 .create = mlx5_flow_create, 231 .destroy = mlx5_flow_destroy, 232 .flush = mlx5_flow_flush, 233 .isolate = mlx5_flow_isolate, 234 .query = mlx5_flow_query, 235 }; 236 237 /* Convert FDIR request to Generic flow. */ 238 struct mlx5_fdir { 239 struct rte_flow_attr attr; 240 struct rte_flow_item items[4]; 241 struct rte_flow_item_eth l2; 242 struct rte_flow_item_eth l2_mask; 243 union { 244 struct rte_flow_item_ipv4 ipv4; 245 struct rte_flow_item_ipv6 ipv6; 246 } l3; 247 union { 248 struct rte_flow_item_ipv4 ipv4; 249 struct rte_flow_item_ipv6 ipv6; 250 } l3_mask; 251 union { 252 struct rte_flow_item_udp udp; 253 struct rte_flow_item_tcp tcp; 254 } l4; 255 union { 256 struct rte_flow_item_udp udp; 257 struct rte_flow_item_tcp tcp; 258 } l4_mask; 259 struct rte_flow_action actions[2]; 260 struct rte_flow_action_queue queue; 261 }; 262 263 /* Map of Verbs to Flow priority with 8 Verbs priorities. */ 264 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { 265 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, 266 }; 267 268 /* Map of Verbs to Flow priority with 16 Verbs priorities. */ 269 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { 270 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, 271 { 9, 10, 11 }, { 12, 13, 14 }, 272 }; 273 274 /* Tunnel information. */ 275 struct mlx5_flow_tunnel_info { 276 uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ 277 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ 278 }; 279 280 static struct mlx5_flow_tunnel_info tunnels_info[] = { 281 { 282 .tunnel = MLX5_FLOW_LAYER_VXLAN, 283 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, 284 }, 285 { 286 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, 287 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, 288 }, 289 { 290 .tunnel = MLX5_FLOW_LAYER_GRE, 291 .ptype = RTE_PTYPE_TUNNEL_GRE, 292 }, 293 { 294 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, 295 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, 296 }, 297 { 298 .tunnel = MLX5_FLOW_LAYER_MPLS, 299 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, 300 }, 301 }; 302 303 /** 304 * Discover the maximum number of priority available. 305 * 306 * @param[in] dev 307 * Pointer to the Ethernet device structure. 308 * 309 * @return 310 * number of supported flow priority on success, a negative errno 311 * value otherwise and rte_errno is set. 312 */ 313 int 314 mlx5_flow_discover_priorities(struct rte_eth_dev *dev) 315 { 316 struct mlx5_priv *priv = dev->data->dev_private; 317 struct { 318 struct ibv_flow_attr attr; 319 struct ibv_flow_spec_eth eth; 320 struct ibv_flow_spec_action_drop drop; 321 } flow_attr = { 322 .attr = { 323 .num_of_specs = 2, 324 .port = (uint8_t)priv->ibv_port, 325 }, 326 .eth = { 327 .type = IBV_FLOW_SPEC_ETH, 328 .size = sizeof(struct ibv_flow_spec_eth), 329 }, 330 .drop = { 331 .size = sizeof(struct ibv_flow_spec_action_drop), 332 .type = IBV_FLOW_SPEC_ACTION_DROP, 333 }, 334 }; 335 struct ibv_flow *flow; 336 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev); 337 uint16_t vprio[] = { 8, 16 }; 338 int i; 339 int priority = 0; 340 341 if (!drop) { 342 rte_errno = ENOTSUP; 343 return -rte_errno; 344 } 345 for (i = 0; i != RTE_DIM(vprio); i++) { 346 flow_attr.attr.priority = vprio[i] - 1; 347 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); 348 if (!flow) 349 break; 350 claim_zero(mlx5_glue->destroy_flow(flow)); 351 priority = vprio[i]; 352 } 353 mlx5_hrxq_drop_release(dev); 354 switch (priority) { 355 case 8: 356 priority = RTE_DIM(priority_map_3); 357 break; 358 case 16: 359 priority = RTE_DIM(priority_map_5); 360 break; 361 default: 362 rte_errno = ENOTSUP; 363 DRV_LOG(ERR, 364 "port %u verbs maximum priority: %d expected 8/16", 365 dev->data->port_id, priority); 366 return -rte_errno; 367 } 368 DRV_LOG(INFO, "port %u flow maximum priority: %d", 369 dev->data->port_id, priority); 370 return priority; 371 } 372 373 /** 374 * Adjust flow priority based on the highest layer and the request priority. 375 * 376 * @param[in] dev 377 * Pointer to the Ethernet device structure. 378 * @param[in] priority 379 * The rule base priority. 380 * @param[in] subpriority 381 * The priority based on the items. 382 * 383 * @return 384 * The new priority. 385 */ 386 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 387 uint32_t subpriority) 388 { 389 uint32_t res = 0; 390 struct mlx5_priv *priv = dev->data->dev_private; 391 392 switch (priv->config.flow_prio) { 393 case RTE_DIM(priority_map_3): 394 res = priority_map_3[priority][subpriority]; 395 break; 396 case RTE_DIM(priority_map_5): 397 res = priority_map_5[priority][subpriority]; 398 break; 399 } 400 return res; 401 } 402 403 /** 404 * Verify the @p item specifications (spec, last, mask) are compatible with the 405 * NIC capabilities. 406 * 407 * @param[in] item 408 * Item specification. 409 * @param[in] mask 410 * @p item->mask or flow default bit-masks. 411 * @param[in] nic_mask 412 * Bit-masks covering supported fields by the NIC to compare with user mask. 413 * @param[in] size 414 * Bit-masks size in bytes. 415 * @param[out] error 416 * Pointer to error structure. 417 * 418 * @return 419 * 0 on success, a negative errno value otherwise and rte_errno is set. 420 */ 421 int 422 mlx5_flow_item_acceptable(const struct rte_flow_item *item, 423 const uint8_t *mask, 424 const uint8_t *nic_mask, 425 unsigned int size, 426 struct rte_flow_error *error) 427 { 428 unsigned int i; 429 430 assert(nic_mask); 431 for (i = 0; i < size; ++i) 432 if ((nic_mask[i] | mask[i]) != nic_mask[i]) 433 return rte_flow_error_set(error, ENOTSUP, 434 RTE_FLOW_ERROR_TYPE_ITEM, 435 item, 436 "mask enables non supported" 437 " bits"); 438 if (!item->spec && (item->mask || item->last)) 439 return rte_flow_error_set(error, EINVAL, 440 RTE_FLOW_ERROR_TYPE_ITEM, item, 441 "mask/last without a spec is not" 442 " supported"); 443 if (item->spec && item->last) { 444 uint8_t spec[size]; 445 uint8_t last[size]; 446 unsigned int i; 447 int ret; 448 449 for (i = 0; i < size; ++i) { 450 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; 451 last[i] = ((const uint8_t *)item->last)[i] & mask[i]; 452 } 453 ret = memcmp(spec, last, size); 454 if (ret != 0) 455 return rte_flow_error_set(error, EINVAL, 456 RTE_FLOW_ERROR_TYPE_ITEM, 457 item, 458 "range is not valid"); 459 } 460 return 0; 461 } 462 463 /** 464 * Adjust the hash fields according to the @p flow information. 465 * 466 * @param[in] dev_flow. 467 * Pointer to the mlx5_flow. 468 * @param[in] tunnel 469 * 1 when the hash field is for a tunnel item. 470 * @param[in] layer_types 471 * ETH_RSS_* types. 472 * @param[in] hash_fields 473 * Item hash fields. 474 * 475 * @return 476 * The hash fileds that should be used. 477 */ 478 uint64_t 479 mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, 480 int tunnel __rte_unused, uint64_t layer_types, 481 uint64_t hash_fields) 482 { 483 struct rte_flow *flow = dev_flow->flow; 484 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 485 int rss_request_inner = flow->rss.level >= 2; 486 487 /* Check RSS hash level for tunnel. */ 488 if (tunnel && rss_request_inner) 489 hash_fields |= IBV_RX_HASH_INNER; 490 else if (tunnel || rss_request_inner) 491 return 0; 492 #endif 493 /* Check if requested layer matches RSS hash fields. */ 494 if (!(flow->rss.types & layer_types)) 495 return 0; 496 return hash_fields; 497 } 498 499 /** 500 * Lookup and set the ptype in the data Rx part. A single Ptype can be used, 501 * if several tunnel rules are used on this queue, the tunnel ptype will be 502 * cleared. 503 * 504 * @param rxq_ctrl 505 * Rx queue to update. 506 */ 507 static void 508 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) 509 { 510 unsigned int i; 511 uint32_t tunnel_ptype = 0; 512 513 /* Look up for the ptype to use. */ 514 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { 515 if (!rxq_ctrl->flow_tunnels_n[i]) 516 continue; 517 if (!tunnel_ptype) { 518 tunnel_ptype = tunnels_info[i].ptype; 519 } else { 520 tunnel_ptype = 0; 521 break; 522 } 523 } 524 rxq_ctrl->rxq.tunnel = tunnel_ptype; 525 } 526 527 /** 528 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive 529 * flow. 530 * 531 * @param[in] dev 532 * Pointer to the Ethernet device structure. 533 * @param[in] dev_flow 534 * Pointer to device flow structure. 535 */ 536 static void 537 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 538 { 539 struct mlx5_priv *priv = dev->data->dev_private; 540 struct rte_flow *flow = dev_flow->flow; 541 const int mark = !!(flow->actions & 542 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 543 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); 544 unsigned int i; 545 546 for (i = 0; i != flow->rss.queue_num; ++i) { 547 int idx = (*flow->queue)[i]; 548 struct mlx5_rxq_ctrl *rxq_ctrl = 549 container_of((*priv->rxqs)[idx], 550 struct mlx5_rxq_ctrl, rxq); 551 552 if (mark) { 553 rxq_ctrl->rxq.mark = 1; 554 rxq_ctrl->flow_mark_n++; 555 } 556 if (tunnel) { 557 unsigned int j; 558 559 /* Increase the counter matching the flow. */ 560 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 561 if ((tunnels_info[j].tunnel & 562 dev_flow->layers) == 563 tunnels_info[j].tunnel) { 564 rxq_ctrl->flow_tunnels_n[j]++; 565 break; 566 } 567 } 568 flow_rxq_tunnel_ptype_update(rxq_ctrl); 569 } 570 } 571 } 572 573 /** 574 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow 575 * 576 * @param[in] dev 577 * Pointer to the Ethernet device structure. 578 * @param[in] flow 579 * Pointer to flow structure. 580 */ 581 static void 582 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) 583 { 584 struct mlx5_flow *dev_flow; 585 586 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 587 flow_drv_rxq_flags_set(dev, dev_flow); 588 } 589 590 /** 591 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 592 * device flow if no other flow uses it with the same kind of request. 593 * 594 * @param dev 595 * Pointer to Ethernet device. 596 * @param[in] dev_flow 597 * Pointer to the device flow. 598 */ 599 static void 600 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) 601 { 602 struct mlx5_priv *priv = dev->data->dev_private; 603 struct rte_flow *flow = dev_flow->flow; 604 const int mark = !!(flow->actions & 605 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 606 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); 607 unsigned int i; 608 609 assert(dev->data->dev_started); 610 for (i = 0; i != flow->rss.queue_num; ++i) { 611 int idx = (*flow->queue)[i]; 612 struct mlx5_rxq_ctrl *rxq_ctrl = 613 container_of((*priv->rxqs)[idx], 614 struct mlx5_rxq_ctrl, rxq); 615 616 if (mark) { 617 rxq_ctrl->flow_mark_n--; 618 rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; 619 } 620 if (tunnel) { 621 unsigned int j; 622 623 /* Decrease the counter matching the flow. */ 624 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 625 if ((tunnels_info[j].tunnel & 626 dev_flow->layers) == 627 tunnels_info[j].tunnel) { 628 rxq_ctrl->flow_tunnels_n[j]--; 629 break; 630 } 631 } 632 flow_rxq_tunnel_ptype_update(rxq_ctrl); 633 } 634 } 635 } 636 637 /** 638 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 639 * @p flow if no other flow uses it with the same kind of request. 640 * 641 * @param dev 642 * Pointer to Ethernet device. 643 * @param[in] flow 644 * Pointer to the flow. 645 */ 646 static void 647 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) 648 { 649 struct mlx5_flow *dev_flow; 650 651 LIST_FOREACH(dev_flow, &flow->dev_flows, next) 652 flow_drv_rxq_flags_trim(dev, dev_flow); 653 } 654 655 /** 656 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. 657 * 658 * @param dev 659 * Pointer to Ethernet device. 660 */ 661 static void 662 flow_rxq_flags_clear(struct rte_eth_dev *dev) 663 { 664 struct mlx5_priv *priv = dev->data->dev_private; 665 unsigned int i; 666 667 for (i = 0; i != priv->rxqs_n; ++i) { 668 struct mlx5_rxq_ctrl *rxq_ctrl; 669 unsigned int j; 670 671 if (!(*priv->rxqs)[i]) 672 continue; 673 rxq_ctrl = container_of((*priv->rxqs)[i], 674 struct mlx5_rxq_ctrl, rxq); 675 rxq_ctrl->flow_mark_n = 0; 676 rxq_ctrl->rxq.mark = 0; 677 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) 678 rxq_ctrl->flow_tunnels_n[j] = 0; 679 rxq_ctrl->rxq.tunnel = 0; 680 } 681 } 682 683 /* 684 * Validate the flag action. 685 * 686 * @param[in] action_flags 687 * Bit-fields that holds the actions detected until now. 688 * @param[in] attr 689 * Attributes of flow that includes this action. 690 * @param[out] error 691 * Pointer to error structure. 692 * 693 * @return 694 * 0 on success, a negative errno value otherwise and rte_errno is set. 695 */ 696 int 697 mlx5_flow_validate_action_flag(uint64_t action_flags, 698 const struct rte_flow_attr *attr, 699 struct rte_flow_error *error) 700 { 701 702 if (action_flags & MLX5_FLOW_ACTION_DROP) 703 return rte_flow_error_set(error, EINVAL, 704 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 705 "can't drop and flag in same flow"); 706 if (action_flags & MLX5_FLOW_ACTION_MARK) 707 return rte_flow_error_set(error, EINVAL, 708 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 709 "can't mark and flag in same flow"); 710 if (action_flags & MLX5_FLOW_ACTION_FLAG) 711 return rte_flow_error_set(error, EINVAL, 712 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 713 "can't have 2 flag" 714 " actions in same flow"); 715 if (attr->egress) 716 return rte_flow_error_set(error, ENOTSUP, 717 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 718 "flag action not supported for " 719 "egress"); 720 return 0; 721 } 722 723 /* 724 * Validate the mark action. 725 * 726 * @param[in] action 727 * Pointer to the queue action. 728 * @param[in] action_flags 729 * Bit-fields that holds the actions detected until now. 730 * @param[in] attr 731 * Attributes of flow that includes this action. 732 * @param[out] error 733 * Pointer to error structure. 734 * 735 * @return 736 * 0 on success, a negative errno value otherwise and rte_errno is set. 737 */ 738 int 739 mlx5_flow_validate_action_mark(const struct rte_flow_action *action, 740 uint64_t action_flags, 741 const struct rte_flow_attr *attr, 742 struct rte_flow_error *error) 743 { 744 const struct rte_flow_action_mark *mark = action->conf; 745 746 if (!mark) 747 return rte_flow_error_set(error, EINVAL, 748 RTE_FLOW_ERROR_TYPE_ACTION, 749 action, 750 "configuration cannot be null"); 751 if (mark->id >= MLX5_FLOW_MARK_MAX) 752 return rte_flow_error_set(error, EINVAL, 753 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 754 &mark->id, 755 "mark id must in 0 <= id < " 756 RTE_STR(MLX5_FLOW_MARK_MAX)); 757 if (action_flags & MLX5_FLOW_ACTION_DROP) 758 return rte_flow_error_set(error, EINVAL, 759 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 760 "can't drop and mark in same flow"); 761 if (action_flags & MLX5_FLOW_ACTION_FLAG) 762 return rte_flow_error_set(error, EINVAL, 763 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 764 "can't flag and mark in same flow"); 765 if (action_flags & MLX5_FLOW_ACTION_MARK) 766 return rte_flow_error_set(error, EINVAL, 767 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 768 "can't have 2 mark actions in same" 769 " flow"); 770 if (attr->egress) 771 return rte_flow_error_set(error, ENOTSUP, 772 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 773 "mark action not supported for " 774 "egress"); 775 return 0; 776 } 777 778 /* 779 * Validate the drop action. 780 * 781 * @param[in] action_flags 782 * Bit-fields that holds the actions detected until now. 783 * @param[in] attr 784 * Attributes of flow that includes this action. 785 * @param[out] error 786 * Pointer to error structure. 787 * 788 * @return 789 * 0 on success, a negative errno value otherwise and rte_errno is set. 790 */ 791 int 792 mlx5_flow_validate_action_drop(uint64_t action_flags, 793 const struct rte_flow_attr *attr, 794 struct rte_flow_error *error) 795 { 796 if (action_flags & MLX5_FLOW_ACTION_FLAG) 797 return rte_flow_error_set(error, EINVAL, 798 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 799 "can't drop and flag in same flow"); 800 if (action_flags & MLX5_FLOW_ACTION_MARK) 801 return rte_flow_error_set(error, EINVAL, 802 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 803 "can't drop and mark in same flow"); 804 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 805 return rte_flow_error_set(error, EINVAL, 806 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 807 "can't have 2 fate actions in" 808 " same flow"); 809 if (attr->egress) 810 return rte_flow_error_set(error, ENOTSUP, 811 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 812 "drop action not supported for " 813 "egress"); 814 return 0; 815 } 816 817 /* 818 * Validate the queue action. 819 * 820 * @param[in] action 821 * Pointer to the queue action. 822 * @param[in] action_flags 823 * Bit-fields that holds the actions detected until now. 824 * @param[in] dev 825 * Pointer to the Ethernet device structure. 826 * @param[in] attr 827 * Attributes of flow that includes this action. 828 * @param[out] error 829 * Pointer to error structure. 830 * 831 * @return 832 * 0 on success, a negative errno value otherwise and rte_errno is set. 833 */ 834 int 835 mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 836 uint64_t action_flags, 837 struct rte_eth_dev *dev, 838 const struct rte_flow_attr *attr, 839 struct rte_flow_error *error) 840 { 841 struct mlx5_priv *priv = dev->data->dev_private; 842 const struct rte_flow_action_queue *queue = action->conf; 843 844 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 845 return rte_flow_error_set(error, EINVAL, 846 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 847 "can't have 2 fate actions in" 848 " same flow"); 849 if (!priv->rxqs_n) 850 return rte_flow_error_set(error, EINVAL, 851 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 852 NULL, "No Rx queues configured"); 853 if (queue->index >= priv->rxqs_n) 854 return rte_flow_error_set(error, EINVAL, 855 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 856 &queue->index, 857 "queue index out of range"); 858 if (!(*priv->rxqs)[queue->index]) 859 return rte_flow_error_set(error, EINVAL, 860 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 861 &queue->index, 862 "queue is not configured"); 863 if (attr->egress) 864 return rte_flow_error_set(error, ENOTSUP, 865 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 866 "queue action not supported for " 867 "egress"); 868 return 0; 869 } 870 871 /* 872 * Validate the rss action. 873 * 874 * @param[in] action 875 * Pointer to the queue action. 876 * @param[in] action_flags 877 * Bit-fields that holds the actions detected until now. 878 * @param[in] dev 879 * Pointer to the Ethernet device structure. 880 * @param[in] attr 881 * Attributes of flow that includes this action. 882 * @param[in] item_flags 883 * Items that were detected. 884 * @param[out] error 885 * Pointer to error structure. 886 * 887 * @return 888 * 0 on success, a negative errno value otherwise and rte_errno is set. 889 */ 890 int 891 mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 892 uint64_t action_flags, 893 struct rte_eth_dev *dev, 894 const struct rte_flow_attr *attr, 895 uint64_t item_flags, 896 struct rte_flow_error *error) 897 { 898 struct mlx5_priv *priv = dev->data->dev_private; 899 const struct rte_flow_action_rss *rss = action->conf; 900 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 901 unsigned int i; 902 903 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 904 return rte_flow_error_set(error, EINVAL, 905 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 906 "can't have 2 fate actions" 907 " in same flow"); 908 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && 909 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) 910 return rte_flow_error_set(error, ENOTSUP, 911 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 912 &rss->func, 913 "RSS hash function not supported"); 914 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 915 if (rss->level > 2) 916 #else 917 if (rss->level > 1) 918 #endif 919 return rte_flow_error_set(error, ENOTSUP, 920 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 921 &rss->level, 922 "tunnel RSS is not supported"); 923 /* allow RSS key_len 0 in case of NULL (default) RSS key. */ 924 if (rss->key_len == 0 && rss->key != NULL) 925 return rte_flow_error_set(error, ENOTSUP, 926 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 927 &rss->key_len, 928 "RSS hash key length 0"); 929 if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) 930 return rte_flow_error_set(error, ENOTSUP, 931 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 932 &rss->key_len, 933 "RSS hash key too small"); 934 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) 935 return rte_flow_error_set(error, ENOTSUP, 936 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 937 &rss->key_len, 938 "RSS hash key too large"); 939 if (rss->queue_num > priv->config.ind_table_max_size) 940 return rte_flow_error_set(error, ENOTSUP, 941 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 942 &rss->queue_num, 943 "number of queues too large"); 944 if (rss->types & MLX5_RSS_HF_MASK) 945 return rte_flow_error_set(error, ENOTSUP, 946 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 947 &rss->types, 948 "some RSS protocols are not" 949 " supported"); 950 if (!priv->rxqs_n) 951 return rte_flow_error_set(error, EINVAL, 952 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 953 NULL, "No Rx queues configured"); 954 if (!rss->queue_num) 955 return rte_flow_error_set(error, EINVAL, 956 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 957 NULL, "No queues configured"); 958 for (i = 0; i != rss->queue_num; ++i) { 959 if (!(*priv->rxqs)[rss->queue[i]]) 960 return rte_flow_error_set 961 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, 962 &rss->queue[i], "queue is not configured"); 963 } 964 if (attr->egress) 965 return rte_flow_error_set(error, ENOTSUP, 966 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 967 "rss action not supported for " 968 "egress"); 969 if (rss->level > 1 && !tunnel) 970 return rte_flow_error_set(error, EINVAL, 971 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, 972 "inner RSS is not supported for " 973 "non-tunnel flows"); 974 return 0; 975 } 976 977 /* 978 * Validate the count action. 979 * 980 * @param[in] dev 981 * Pointer to the Ethernet device structure. 982 * @param[in] attr 983 * Attributes of flow that includes this action. 984 * @param[out] error 985 * Pointer to error structure. 986 * 987 * @return 988 * 0 on success, a negative errno value otherwise and rte_errno is set. 989 */ 990 int 991 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, 992 const struct rte_flow_attr *attr, 993 struct rte_flow_error *error) 994 { 995 if (attr->egress) 996 return rte_flow_error_set(error, ENOTSUP, 997 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 998 "count action not supported for " 999 "egress"); 1000 return 0; 1001 } 1002 1003 /** 1004 * Verify the @p attributes will be correctly understood by the NIC and store 1005 * them in the @p flow if everything is correct. 1006 * 1007 * @param[in] dev 1008 * Pointer to the Ethernet device structure. 1009 * @param[in] attributes 1010 * Pointer to flow attributes 1011 * @param[out] error 1012 * Pointer to error structure. 1013 * 1014 * @return 1015 * 0 on success, a negative errno value otherwise and rte_errno is set. 1016 */ 1017 int 1018 mlx5_flow_validate_attributes(struct rte_eth_dev *dev, 1019 const struct rte_flow_attr *attributes, 1020 struct rte_flow_error *error) 1021 { 1022 struct mlx5_priv *priv = dev->data->dev_private; 1023 uint32_t priority_max = priv->config.flow_prio - 1; 1024 1025 if (attributes->group) 1026 return rte_flow_error_set(error, ENOTSUP, 1027 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 1028 NULL, "groups is not supported"); 1029 if (attributes->priority != MLX5_FLOW_PRIO_RSVD && 1030 attributes->priority >= priority_max) 1031 return rte_flow_error_set(error, ENOTSUP, 1032 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 1033 NULL, "priority out of range"); 1034 if (attributes->egress) 1035 return rte_flow_error_set(error, ENOTSUP, 1036 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 1037 "egress is not supported"); 1038 if (attributes->transfer && !priv->config.dv_esw_en) 1039 return rte_flow_error_set(error, ENOTSUP, 1040 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 1041 NULL, "transfer is not supported"); 1042 if (!attributes->ingress) 1043 return rte_flow_error_set(error, EINVAL, 1044 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 1045 NULL, 1046 "ingress attribute is mandatory"); 1047 return 0; 1048 } 1049 1050 /** 1051 * Validate ICMP6 item. 1052 * 1053 * @param[in] item 1054 * Item specification. 1055 * @param[in] item_flags 1056 * Bit-fields that holds the items detected until now. 1057 * @param[out] error 1058 * Pointer to error structure. 1059 * 1060 * @return 1061 * 0 on success, a negative errno value otherwise and rte_errno is set. 1062 */ 1063 int 1064 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, 1065 uint64_t item_flags, 1066 uint8_t target_protocol, 1067 struct rte_flow_error *error) 1068 { 1069 const struct rte_flow_item_icmp6 *mask = item->mask; 1070 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1071 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 1072 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 1073 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1074 MLX5_FLOW_LAYER_OUTER_L4; 1075 int ret; 1076 1077 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) 1078 return rte_flow_error_set(error, EINVAL, 1079 RTE_FLOW_ERROR_TYPE_ITEM, item, 1080 "protocol filtering not compatible" 1081 " with ICMP6 layer"); 1082 if (!(item_flags & l3m)) 1083 return rte_flow_error_set(error, EINVAL, 1084 RTE_FLOW_ERROR_TYPE_ITEM, item, 1085 "IPv6 is mandatory to filter on" 1086 " ICMP6"); 1087 if (item_flags & l4m) 1088 return rte_flow_error_set(error, EINVAL, 1089 RTE_FLOW_ERROR_TYPE_ITEM, item, 1090 "multiple L4 layers not supported"); 1091 if (!mask) 1092 mask = &rte_flow_item_icmp6_mask; 1093 ret = mlx5_flow_item_acceptable 1094 (item, (const uint8_t *)mask, 1095 (const uint8_t *)&rte_flow_item_icmp6_mask, 1096 sizeof(struct rte_flow_item_icmp6), error); 1097 if (ret < 0) 1098 return ret; 1099 return 0; 1100 } 1101 1102 /** 1103 * Validate ICMP item. 1104 * 1105 * @param[in] item 1106 * Item specification. 1107 * @param[in] item_flags 1108 * Bit-fields that holds the items detected until now. 1109 * @param[out] error 1110 * Pointer to error structure. 1111 * 1112 * @return 1113 * 0 on success, a negative errno value otherwise and rte_errno is set. 1114 */ 1115 int 1116 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, 1117 uint64_t item_flags, 1118 uint8_t target_protocol, 1119 struct rte_flow_error *error) 1120 { 1121 const struct rte_flow_item_icmp *mask = item->mask; 1122 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1123 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 1124 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 1125 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1126 MLX5_FLOW_LAYER_OUTER_L4; 1127 int ret; 1128 1129 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) 1130 return rte_flow_error_set(error, EINVAL, 1131 RTE_FLOW_ERROR_TYPE_ITEM, item, 1132 "protocol filtering not compatible" 1133 " with ICMP layer"); 1134 if (!(item_flags & l3m)) 1135 return rte_flow_error_set(error, EINVAL, 1136 RTE_FLOW_ERROR_TYPE_ITEM, item, 1137 "IPv4 is mandatory to filter" 1138 " on ICMP"); 1139 if (item_flags & l4m) 1140 return rte_flow_error_set(error, EINVAL, 1141 RTE_FLOW_ERROR_TYPE_ITEM, item, 1142 "multiple L4 layers not supported"); 1143 if (!mask) 1144 mask = &rte_flow_item_icmp_mask; 1145 ret = mlx5_flow_item_acceptable 1146 (item, (const uint8_t *)mask, 1147 (const uint8_t *)&rte_flow_item_icmp_mask, 1148 sizeof(struct rte_flow_item_icmp), error); 1149 if (ret < 0) 1150 return ret; 1151 return 0; 1152 } 1153 1154 /** 1155 * Validate Ethernet item. 1156 * 1157 * @param[in] item 1158 * Item specification. 1159 * @param[in] item_flags 1160 * Bit-fields that holds the items detected until now. 1161 * @param[out] error 1162 * Pointer to error structure. 1163 * 1164 * @return 1165 * 0 on success, a negative errno value otherwise and rte_errno is set. 1166 */ 1167 int 1168 mlx5_flow_validate_item_eth(const struct rte_flow_item *item, 1169 uint64_t item_flags, 1170 struct rte_flow_error *error) 1171 { 1172 const struct rte_flow_item_eth *mask = item->mask; 1173 const struct rte_flow_item_eth nic_mask = { 1174 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1175 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1176 .type = RTE_BE16(0xffff), 1177 }; 1178 int ret; 1179 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1180 const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 1181 MLX5_FLOW_LAYER_OUTER_L2; 1182 1183 if (item_flags & ethm) 1184 return rte_flow_error_set(error, ENOTSUP, 1185 RTE_FLOW_ERROR_TYPE_ITEM, item, 1186 "multiple L2 layers not supported"); 1187 if (!mask) 1188 mask = &rte_flow_item_eth_mask; 1189 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1190 (const uint8_t *)&nic_mask, 1191 sizeof(struct rte_flow_item_eth), 1192 error); 1193 return ret; 1194 } 1195 1196 /** 1197 * Validate VLAN item. 1198 * 1199 * @param[in] item 1200 * Item specification. 1201 * @param[in] item_flags 1202 * Bit-fields that holds the items detected until now. 1203 * @param[out] error 1204 * Pointer to error structure. 1205 * 1206 * @return 1207 * 0 on success, a negative errno value otherwise and rte_errno is set. 1208 */ 1209 int 1210 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 1211 uint64_t item_flags, 1212 struct rte_flow_error *error) 1213 { 1214 const struct rte_flow_item_vlan *spec = item->spec; 1215 const struct rte_flow_item_vlan *mask = item->mask; 1216 const struct rte_flow_item_vlan nic_mask = { 1217 .tci = RTE_BE16(0x0fff), 1218 .inner_type = RTE_BE16(0xffff), 1219 }; 1220 uint16_t vlan_tag = 0; 1221 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1222 int ret; 1223 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 1224 MLX5_FLOW_LAYER_INNER_L4) : 1225 (MLX5_FLOW_LAYER_OUTER_L3 | 1226 MLX5_FLOW_LAYER_OUTER_L4); 1227 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 1228 MLX5_FLOW_LAYER_OUTER_VLAN; 1229 1230 if (item_flags & vlanm) 1231 return rte_flow_error_set(error, EINVAL, 1232 RTE_FLOW_ERROR_TYPE_ITEM, item, 1233 "multiple VLAN layers not supported"); 1234 else if ((item_flags & l34m) != 0) 1235 return rte_flow_error_set(error, EINVAL, 1236 RTE_FLOW_ERROR_TYPE_ITEM, item, 1237 "L2 layer cannot follow L3/L4 layer"); 1238 if (!mask) 1239 mask = &rte_flow_item_vlan_mask; 1240 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1241 (const uint8_t *)&nic_mask, 1242 sizeof(struct rte_flow_item_vlan), 1243 error); 1244 if (ret) 1245 return ret; 1246 if (spec) { 1247 vlan_tag = spec->tci; 1248 vlan_tag &= mask->tci; 1249 } 1250 /* 1251 * From verbs perspective an empty VLAN is equivalent 1252 * to a packet without VLAN layer. 1253 */ 1254 if (!vlan_tag) 1255 return rte_flow_error_set(error, EINVAL, 1256 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1257 item->spec, 1258 "VLAN cannot be empty"); 1259 return 0; 1260 } 1261 1262 /** 1263 * Validate IPV4 item. 1264 * 1265 * @param[in] item 1266 * Item specification. 1267 * @param[in] item_flags 1268 * Bit-fields that holds the items detected until now. 1269 * @param[in] acc_mask 1270 * Acceptable mask, if NULL default internal default mask 1271 * will be used to check whether item fields are supported. 1272 * @param[out] error 1273 * Pointer to error structure. 1274 * 1275 * @return 1276 * 0 on success, a negative errno value otherwise and rte_errno is set. 1277 */ 1278 int 1279 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, 1280 uint64_t item_flags, 1281 const struct rte_flow_item_ipv4 *acc_mask, 1282 struct rte_flow_error *error) 1283 { 1284 const struct rte_flow_item_ipv4 *mask = item->mask; 1285 const struct rte_flow_item_ipv4 *spec = item->spec; 1286 const struct rte_flow_item_ipv4 nic_mask = { 1287 .hdr = { 1288 .src_addr = RTE_BE32(0xffffffff), 1289 .dst_addr = RTE_BE32(0xffffffff), 1290 .type_of_service = 0xff, 1291 .next_proto_id = 0xff, 1292 }, 1293 }; 1294 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1295 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1296 MLX5_FLOW_LAYER_OUTER_L3; 1297 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1298 MLX5_FLOW_LAYER_OUTER_L4; 1299 int ret; 1300 uint8_t next_proto = 0xFF; 1301 1302 if (item_flags & MLX5_FLOW_LAYER_IPIP) { 1303 if (mask && spec) 1304 next_proto = mask->hdr.next_proto_id & 1305 spec->hdr.next_proto_id; 1306 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1307 return rte_flow_error_set(error, EINVAL, 1308 RTE_FLOW_ERROR_TYPE_ITEM, 1309 item, 1310 "multiple tunnel " 1311 "not supported"); 1312 } 1313 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) 1314 return rte_flow_error_set(error, EINVAL, 1315 RTE_FLOW_ERROR_TYPE_ITEM, item, 1316 "wrong tunnel type - IPv6 specified " 1317 "but IPv4 item provided"); 1318 if (item_flags & l3m) 1319 return rte_flow_error_set(error, ENOTSUP, 1320 RTE_FLOW_ERROR_TYPE_ITEM, item, 1321 "multiple L3 layers not supported"); 1322 else if (item_flags & l4m) 1323 return rte_flow_error_set(error, EINVAL, 1324 RTE_FLOW_ERROR_TYPE_ITEM, item, 1325 "L3 cannot follow an L4 layer."); 1326 if (!mask) 1327 mask = &rte_flow_item_ipv4_mask; 1328 else if (mask->hdr.next_proto_id != 0 && 1329 mask->hdr.next_proto_id != 0xff) 1330 return rte_flow_error_set(error, EINVAL, 1331 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, 1332 "partial mask is not supported" 1333 " for protocol"); 1334 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1335 acc_mask ? (const uint8_t *)acc_mask 1336 : (const uint8_t *)&nic_mask, 1337 sizeof(struct rte_flow_item_ipv4), 1338 error); 1339 if (ret < 0) 1340 return ret; 1341 return 0; 1342 } 1343 1344 /** 1345 * Validate IPV6 item. 1346 * 1347 * @param[in] item 1348 * Item specification. 1349 * @param[in] item_flags 1350 * Bit-fields that holds the items detected until now. 1351 * @param[in] acc_mask 1352 * Acceptable mask, if NULL default internal default mask 1353 * will be used to check whether item fields are supported. 1354 * @param[out] error 1355 * Pointer to error structure. 1356 * 1357 * @return 1358 * 0 on success, a negative errno value otherwise and rte_errno is set. 1359 */ 1360 int 1361 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, 1362 uint64_t item_flags, 1363 const struct rte_flow_item_ipv6 *acc_mask, 1364 struct rte_flow_error *error) 1365 { 1366 const struct rte_flow_item_ipv6 *mask = item->mask; 1367 const struct rte_flow_item_ipv6 *spec = item->spec; 1368 const struct rte_flow_item_ipv6 nic_mask = { 1369 .hdr = { 1370 .src_addr = 1371 "\xff\xff\xff\xff\xff\xff\xff\xff" 1372 "\xff\xff\xff\xff\xff\xff\xff\xff", 1373 .dst_addr = 1374 "\xff\xff\xff\xff\xff\xff\xff\xff" 1375 "\xff\xff\xff\xff\xff\xff\xff\xff", 1376 .vtc_flow = RTE_BE32(0xffffffff), 1377 .proto = 0xff, 1378 .hop_limits = 0xff, 1379 }, 1380 }; 1381 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1382 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1383 MLX5_FLOW_LAYER_OUTER_L3; 1384 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1385 MLX5_FLOW_LAYER_OUTER_L4; 1386 int ret; 1387 uint8_t next_proto = 0xFF; 1388 1389 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { 1390 if (mask && spec) 1391 next_proto = mask->hdr.proto & spec->hdr.proto; 1392 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) 1393 return rte_flow_error_set(error, EINVAL, 1394 RTE_FLOW_ERROR_TYPE_ITEM, 1395 item, 1396 "multiple tunnel " 1397 "not supported"); 1398 } 1399 if (item_flags & MLX5_FLOW_LAYER_IPIP) 1400 return rte_flow_error_set(error, EINVAL, 1401 RTE_FLOW_ERROR_TYPE_ITEM, item, 1402 "wrong tunnel type - IPv4 specified " 1403 "but IPv6 item provided"); 1404 if (item_flags & l3m) 1405 return rte_flow_error_set(error, ENOTSUP, 1406 RTE_FLOW_ERROR_TYPE_ITEM, item, 1407 "multiple L3 layers not supported"); 1408 else if (item_flags & l4m) 1409 return rte_flow_error_set(error, EINVAL, 1410 RTE_FLOW_ERROR_TYPE_ITEM, item, 1411 "L3 cannot follow an L4 layer."); 1412 if (!mask) 1413 mask = &rte_flow_item_ipv6_mask; 1414 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1415 acc_mask ? (const uint8_t *)acc_mask 1416 : (const uint8_t *)&nic_mask, 1417 sizeof(struct rte_flow_item_ipv6), 1418 error); 1419 if (ret < 0) 1420 return ret; 1421 return 0; 1422 } 1423 1424 /** 1425 * Validate UDP item. 1426 * 1427 * @param[in] item 1428 * Item specification. 1429 * @param[in] item_flags 1430 * Bit-fields that holds the items detected until now. 1431 * @param[in] target_protocol 1432 * The next protocol in the previous item. 1433 * @param[in] flow_mask 1434 * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask. 1435 * @param[out] error 1436 * Pointer to error structure. 1437 * 1438 * @return 1439 * 0 on success, a negative errno value otherwise and rte_errno is set. 1440 */ 1441 int 1442 mlx5_flow_validate_item_udp(const struct rte_flow_item *item, 1443 uint64_t item_flags, 1444 uint8_t target_protocol, 1445 struct rte_flow_error *error) 1446 { 1447 const struct rte_flow_item_udp *mask = item->mask; 1448 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1449 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1450 MLX5_FLOW_LAYER_OUTER_L3; 1451 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1452 MLX5_FLOW_LAYER_OUTER_L4; 1453 int ret; 1454 1455 if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) 1456 return rte_flow_error_set(error, EINVAL, 1457 RTE_FLOW_ERROR_TYPE_ITEM, item, 1458 "protocol filtering not compatible" 1459 " with UDP layer"); 1460 if (!(item_flags & l3m)) 1461 return rte_flow_error_set(error, EINVAL, 1462 RTE_FLOW_ERROR_TYPE_ITEM, item, 1463 "L3 is mandatory to filter on L4"); 1464 if (item_flags & l4m) 1465 return rte_flow_error_set(error, EINVAL, 1466 RTE_FLOW_ERROR_TYPE_ITEM, item, 1467 "multiple L4 layers not supported"); 1468 if (!mask) 1469 mask = &rte_flow_item_udp_mask; 1470 ret = mlx5_flow_item_acceptable 1471 (item, (const uint8_t *)mask, 1472 (const uint8_t *)&rte_flow_item_udp_mask, 1473 sizeof(struct rte_flow_item_udp), error); 1474 if (ret < 0) 1475 return ret; 1476 return 0; 1477 } 1478 1479 /** 1480 * Validate TCP item. 1481 * 1482 * @param[in] item 1483 * Item specification. 1484 * @param[in] item_flags 1485 * Bit-fields that holds the items detected until now. 1486 * @param[in] target_protocol 1487 * The next protocol in the previous item. 1488 * @param[out] error 1489 * Pointer to error structure. 1490 * 1491 * @return 1492 * 0 on success, a negative errno value otherwise and rte_errno is set. 1493 */ 1494 int 1495 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, 1496 uint64_t item_flags, 1497 uint8_t target_protocol, 1498 const struct rte_flow_item_tcp *flow_mask, 1499 struct rte_flow_error *error) 1500 { 1501 const struct rte_flow_item_tcp *mask = item->mask; 1502 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1503 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1504 MLX5_FLOW_LAYER_OUTER_L3; 1505 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1506 MLX5_FLOW_LAYER_OUTER_L4; 1507 int ret; 1508 1509 assert(flow_mask); 1510 if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) 1511 return rte_flow_error_set(error, EINVAL, 1512 RTE_FLOW_ERROR_TYPE_ITEM, item, 1513 "protocol filtering not compatible" 1514 " with TCP layer"); 1515 if (!(item_flags & l3m)) 1516 return rte_flow_error_set(error, EINVAL, 1517 RTE_FLOW_ERROR_TYPE_ITEM, item, 1518 "L3 is mandatory to filter on L4"); 1519 if (item_flags & l4m) 1520 return rte_flow_error_set(error, EINVAL, 1521 RTE_FLOW_ERROR_TYPE_ITEM, item, 1522 "multiple L4 layers not supported"); 1523 if (!mask) 1524 mask = &rte_flow_item_tcp_mask; 1525 ret = mlx5_flow_item_acceptable 1526 (item, (const uint8_t *)mask, 1527 (const uint8_t *)flow_mask, 1528 sizeof(struct rte_flow_item_tcp), error); 1529 if (ret < 0) 1530 return ret; 1531 return 0; 1532 } 1533 1534 /** 1535 * Validate VXLAN item. 1536 * 1537 * @param[in] item 1538 * Item specification. 1539 * @param[in] item_flags 1540 * Bit-fields that holds the items detected until now. 1541 * @param[in] target_protocol 1542 * The next protocol in the previous item. 1543 * @param[out] error 1544 * Pointer to error structure. 1545 * 1546 * @return 1547 * 0 on success, a negative errno value otherwise and rte_errno is set. 1548 */ 1549 int 1550 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, 1551 uint64_t item_flags, 1552 struct rte_flow_error *error) 1553 { 1554 const struct rte_flow_item_vxlan *spec = item->spec; 1555 const struct rte_flow_item_vxlan *mask = item->mask; 1556 int ret; 1557 union vni { 1558 uint32_t vlan_id; 1559 uint8_t vni[4]; 1560 } id = { .vlan_id = 0, }; 1561 uint32_t vlan_id = 0; 1562 1563 1564 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1565 return rte_flow_error_set(error, ENOTSUP, 1566 RTE_FLOW_ERROR_TYPE_ITEM, item, 1567 "multiple tunnel layers not" 1568 " supported"); 1569 /* 1570 * Verify only UDPv4 is present as defined in 1571 * https://tools.ietf.org/html/rfc7348 1572 */ 1573 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1574 return rte_flow_error_set(error, EINVAL, 1575 RTE_FLOW_ERROR_TYPE_ITEM, item, 1576 "no outer UDP layer found"); 1577 if (!mask) 1578 mask = &rte_flow_item_vxlan_mask; 1579 ret = mlx5_flow_item_acceptable 1580 (item, (const uint8_t *)mask, 1581 (const uint8_t *)&rte_flow_item_vxlan_mask, 1582 sizeof(struct rte_flow_item_vxlan), 1583 error); 1584 if (ret < 0) 1585 return ret; 1586 if (spec) { 1587 memcpy(&id.vni[1], spec->vni, 3); 1588 vlan_id = id.vlan_id; 1589 memcpy(&id.vni[1], mask->vni, 3); 1590 vlan_id &= id.vlan_id; 1591 } 1592 /* 1593 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if 1594 * only this layer is defined in the Verbs specification it is 1595 * interpreted as wildcard and all packets will match this 1596 * rule, if it follows a full stack layer (ex: eth / ipv4 / 1597 * udp), all packets matching the layers before will also 1598 * match this rule. To avoid such situation, VNI 0 is 1599 * currently refused. 1600 */ 1601 if (!vlan_id) 1602 return rte_flow_error_set(error, ENOTSUP, 1603 RTE_FLOW_ERROR_TYPE_ITEM, item, 1604 "VXLAN vni cannot be 0"); 1605 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1606 return rte_flow_error_set(error, ENOTSUP, 1607 RTE_FLOW_ERROR_TYPE_ITEM, item, 1608 "VXLAN tunnel must be fully defined"); 1609 return 0; 1610 } 1611 1612 /** 1613 * Validate VXLAN_GPE item. 1614 * 1615 * @param[in] item 1616 * Item specification. 1617 * @param[in] item_flags 1618 * Bit-fields that holds the items detected until now. 1619 * @param[in] priv 1620 * Pointer to the private data structure. 1621 * @param[in] target_protocol 1622 * The next protocol in the previous item. 1623 * @param[out] error 1624 * Pointer to error structure. 1625 * 1626 * @return 1627 * 0 on success, a negative errno value otherwise and rte_errno is set. 1628 */ 1629 int 1630 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 1631 uint64_t item_flags, 1632 struct rte_eth_dev *dev, 1633 struct rte_flow_error *error) 1634 { 1635 struct mlx5_priv *priv = dev->data->dev_private; 1636 const struct rte_flow_item_vxlan_gpe *spec = item->spec; 1637 const struct rte_flow_item_vxlan_gpe *mask = item->mask; 1638 int ret; 1639 union vni { 1640 uint32_t vlan_id; 1641 uint8_t vni[4]; 1642 } id = { .vlan_id = 0, }; 1643 uint32_t vlan_id = 0; 1644 1645 if (!priv->config.l3_vxlan_en) 1646 return rte_flow_error_set(error, ENOTSUP, 1647 RTE_FLOW_ERROR_TYPE_ITEM, item, 1648 "L3 VXLAN is not enabled by device" 1649 " parameter and/or not configured in" 1650 " firmware"); 1651 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1652 return rte_flow_error_set(error, ENOTSUP, 1653 RTE_FLOW_ERROR_TYPE_ITEM, item, 1654 "multiple tunnel layers not" 1655 " supported"); 1656 /* 1657 * Verify only UDPv4 is present as defined in 1658 * https://tools.ietf.org/html/rfc7348 1659 */ 1660 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1661 return rte_flow_error_set(error, EINVAL, 1662 RTE_FLOW_ERROR_TYPE_ITEM, item, 1663 "no outer UDP layer found"); 1664 if (!mask) 1665 mask = &rte_flow_item_vxlan_gpe_mask; 1666 ret = mlx5_flow_item_acceptable 1667 (item, (const uint8_t *)mask, 1668 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, 1669 sizeof(struct rte_flow_item_vxlan_gpe), 1670 error); 1671 if (ret < 0) 1672 return ret; 1673 if (spec) { 1674 if (spec->protocol) 1675 return rte_flow_error_set(error, ENOTSUP, 1676 RTE_FLOW_ERROR_TYPE_ITEM, 1677 item, 1678 "VxLAN-GPE protocol" 1679 " not supported"); 1680 memcpy(&id.vni[1], spec->vni, 3); 1681 vlan_id = id.vlan_id; 1682 memcpy(&id.vni[1], mask->vni, 3); 1683 vlan_id &= id.vlan_id; 1684 } 1685 /* 1686 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this 1687 * layer is defined in the Verbs specification it is interpreted as 1688 * wildcard and all packets will match this rule, if it follows a full 1689 * stack layer (ex: eth / ipv4 / udp), all packets matching the layers 1690 * before will also match this rule. To avoid such situation, VNI 0 1691 * is currently refused. 1692 */ 1693 if (!vlan_id) 1694 return rte_flow_error_set(error, ENOTSUP, 1695 RTE_FLOW_ERROR_TYPE_ITEM, item, 1696 "VXLAN-GPE vni cannot be 0"); 1697 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1698 return rte_flow_error_set(error, ENOTSUP, 1699 RTE_FLOW_ERROR_TYPE_ITEM, item, 1700 "VXLAN-GPE tunnel must be fully" 1701 " defined"); 1702 return 0; 1703 } 1704 /** 1705 * Validate GRE Key item. 1706 * 1707 * @param[in] item 1708 * Item specification. 1709 * @param[in] item_flags 1710 * Bit flags to mark detected items. 1711 * @param[in] gre_item 1712 * Pointer to gre_item 1713 * @param[out] error 1714 * Pointer to error structure. 1715 * 1716 * @return 1717 * 0 on success, a negative errno value otherwise and rte_errno is set. 1718 */ 1719 int 1720 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, 1721 uint64_t item_flags, 1722 const struct rte_flow_item *gre_item, 1723 struct rte_flow_error *error) 1724 { 1725 const rte_be32_t *mask = item->mask; 1726 int ret = 0; 1727 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); 1728 const struct rte_flow_item_gre *gre_spec = gre_item->spec; 1729 const struct rte_flow_item_gre *gre_mask = gre_item->mask; 1730 1731 if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) 1732 return rte_flow_error_set(error, ENOTSUP, 1733 RTE_FLOW_ERROR_TYPE_ITEM, item, 1734 "Multiple GRE key not support"); 1735 if (!(item_flags & MLX5_FLOW_LAYER_GRE)) 1736 return rte_flow_error_set(error, ENOTSUP, 1737 RTE_FLOW_ERROR_TYPE_ITEM, item, 1738 "No preceding GRE header"); 1739 if (item_flags & MLX5_FLOW_LAYER_INNER) 1740 return rte_flow_error_set(error, ENOTSUP, 1741 RTE_FLOW_ERROR_TYPE_ITEM, item, 1742 "GRE key following a wrong item"); 1743 if (!gre_mask) 1744 gre_mask = &rte_flow_item_gre_mask; 1745 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && 1746 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) 1747 return rte_flow_error_set(error, EINVAL, 1748 RTE_FLOW_ERROR_TYPE_ITEM, item, 1749 "Key bit must be on"); 1750 1751 if (!mask) 1752 mask = &gre_key_default_mask; 1753 ret = mlx5_flow_item_acceptable 1754 (item, (const uint8_t *)mask, 1755 (const uint8_t *)&gre_key_default_mask, 1756 sizeof(rte_be32_t), error); 1757 return ret; 1758 } 1759 1760 /** 1761 * Validate GRE item. 1762 * 1763 * @param[in] item 1764 * Item specification. 1765 * @param[in] item_flags 1766 * Bit flags to mark detected items. 1767 * @param[in] target_protocol 1768 * The next protocol in the previous item. 1769 * @param[out] error 1770 * Pointer to error structure. 1771 * 1772 * @return 1773 * 0 on success, a negative errno value otherwise and rte_errno is set. 1774 */ 1775 int 1776 mlx5_flow_validate_item_gre(const struct rte_flow_item *item, 1777 uint64_t item_flags, 1778 uint8_t target_protocol, 1779 struct rte_flow_error *error) 1780 { 1781 const struct rte_flow_item_gre *spec __rte_unused = item->spec; 1782 const struct rte_flow_item_gre *mask = item->mask; 1783 int ret; 1784 const struct rte_flow_item_gre nic_mask = { 1785 .c_rsvd0_ver = RTE_BE16(0xB000), 1786 .protocol = RTE_BE16(UINT16_MAX), 1787 }; 1788 1789 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 1790 return rte_flow_error_set(error, EINVAL, 1791 RTE_FLOW_ERROR_TYPE_ITEM, item, 1792 "protocol filtering not compatible" 1793 " with this GRE layer"); 1794 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1795 return rte_flow_error_set(error, ENOTSUP, 1796 RTE_FLOW_ERROR_TYPE_ITEM, item, 1797 "multiple tunnel layers not" 1798 " supported"); 1799 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 1800 return rte_flow_error_set(error, ENOTSUP, 1801 RTE_FLOW_ERROR_TYPE_ITEM, item, 1802 "L3 Layer is missing"); 1803 if (!mask) 1804 mask = &rte_flow_item_gre_mask; 1805 ret = mlx5_flow_item_acceptable 1806 (item, (const uint8_t *)mask, 1807 (const uint8_t *)&nic_mask, 1808 sizeof(struct rte_flow_item_gre), error); 1809 if (ret < 0) 1810 return ret; 1811 #ifndef HAVE_MLX5DV_DR 1812 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT 1813 if (spec && (spec->protocol & mask->protocol)) 1814 return rte_flow_error_set(error, ENOTSUP, 1815 RTE_FLOW_ERROR_TYPE_ITEM, item, 1816 "without MPLS support the" 1817 " specification cannot be used for" 1818 " filtering"); 1819 #endif 1820 #endif 1821 return 0; 1822 } 1823 1824 /** 1825 * Validate MPLS item. 1826 * 1827 * @param[in] dev 1828 * Pointer to the rte_eth_dev structure. 1829 * @param[in] item 1830 * Item specification. 1831 * @param[in] item_flags 1832 * Bit-fields that holds the items detected until now. 1833 * @param[in] prev_layer 1834 * The protocol layer indicated in previous item. 1835 * @param[out] error 1836 * Pointer to error structure. 1837 * 1838 * @return 1839 * 0 on success, a negative errno value otherwise and rte_errno is set. 1840 */ 1841 int 1842 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, 1843 const struct rte_flow_item *item __rte_unused, 1844 uint64_t item_flags __rte_unused, 1845 uint64_t prev_layer __rte_unused, 1846 struct rte_flow_error *error) 1847 { 1848 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 1849 const struct rte_flow_item_mpls *mask = item->mask; 1850 struct mlx5_priv *priv = dev->data->dev_private; 1851 int ret; 1852 1853 if (!priv->config.mpls_en) 1854 return rte_flow_error_set(error, ENOTSUP, 1855 RTE_FLOW_ERROR_TYPE_ITEM, item, 1856 "MPLS not supported or" 1857 " disabled in firmware" 1858 " configuration."); 1859 /* MPLS over IP, UDP, GRE is allowed */ 1860 if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | 1861 MLX5_FLOW_LAYER_OUTER_L4_UDP | 1862 MLX5_FLOW_LAYER_GRE))) 1863 return rte_flow_error_set(error, EINVAL, 1864 RTE_FLOW_ERROR_TYPE_ITEM, item, 1865 "protocol filtering not compatible" 1866 " with MPLS layer"); 1867 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ 1868 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && 1869 !(item_flags & MLX5_FLOW_LAYER_GRE)) 1870 return rte_flow_error_set(error, ENOTSUP, 1871 RTE_FLOW_ERROR_TYPE_ITEM, item, 1872 "multiple tunnel layers not" 1873 " supported"); 1874 if (!mask) 1875 mask = &rte_flow_item_mpls_mask; 1876 ret = mlx5_flow_item_acceptable 1877 (item, (const uint8_t *)mask, 1878 (const uint8_t *)&rte_flow_item_mpls_mask, 1879 sizeof(struct rte_flow_item_mpls), error); 1880 if (ret < 0) 1881 return ret; 1882 return 0; 1883 #endif 1884 return rte_flow_error_set(error, ENOTSUP, 1885 RTE_FLOW_ERROR_TYPE_ITEM, item, 1886 "MPLS is not supported by Verbs, please" 1887 " update."); 1888 } 1889 1890 static int 1891 flow_null_validate(struct rte_eth_dev *dev __rte_unused, 1892 const struct rte_flow_attr *attr __rte_unused, 1893 const struct rte_flow_item items[] __rte_unused, 1894 const struct rte_flow_action actions[] __rte_unused, 1895 struct rte_flow_error *error) 1896 { 1897 return rte_flow_error_set(error, ENOTSUP, 1898 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 1899 } 1900 1901 static struct mlx5_flow * 1902 flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, 1903 const struct rte_flow_item items[] __rte_unused, 1904 const struct rte_flow_action actions[] __rte_unused, 1905 struct rte_flow_error *error) 1906 { 1907 rte_flow_error_set(error, ENOTSUP, 1908 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 1909 return NULL; 1910 } 1911 1912 static int 1913 flow_null_translate(struct rte_eth_dev *dev __rte_unused, 1914 struct mlx5_flow *dev_flow __rte_unused, 1915 const struct rte_flow_attr *attr __rte_unused, 1916 const struct rte_flow_item items[] __rte_unused, 1917 const struct rte_flow_action actions[] __rte_unused, 1918 struct rte_flow_error *error) 1919 { 1920 return rte_flow_error_set(error, ENOTSUP, 1921 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 1922 } 1923 1924 static int 1925 flow_null_apply(struct rte_eth_dev *dev __rte_unused, 1926 struct rte_flow *flow __rte_unused, 1927 struct rte_flow_error *error) 1928 { 1929 return rte_flow_error_set(error, ENOTSUP, 1930 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 1931 } 1932 1933 static void 1934 flow_null_remove(struct rte_eth_dev *dev __rte_unused, 1935 struct rte_flow *flow __rte_unused) 1936 { 1937 } 1938 1939 static void 1940 flow_null_destroy(struct rte_eth_dev *dev __rte_unused, 1941 struct rte_flow *flow __rte_unused) 1942 { 1943 } 1944 1945 static int 1946 flow_null_query(struct rte_eth_dev *dev __rte_unused, 1947 struct rte_flow *flow __rte_unused, 1948 const struct rte_flow_action *actions __rte_unused, 1949 void *data __rte_unused, 1950 struct rte_flow_error *error) 1951 { 1952 return rte_flow_error_set(error, ENOTSUP, 1953 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); 1954 } 1955 1956 /* Void driver to protect from null pointer reference. */ 1957 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { 1958 .validate = flow_null_validate, 1959 .prepare = flow_null_prepare, 1960 .translate = flow_null_translate, 1961 .apply = flow_null_apply, 1962 .remove = flow_null_remove, 1963 .destroy = flow_null_destroy, 1964 .query = flow_null_query, 1965 }; 1966 1967 /** 1968 * Select flow driver type according to flow attributes and device 1969 * configuration. 1970 * 1971 * @param[in] dev 1972 * Pointer to the dev structure. 1973 * @param[in] attr 1974 * Pointer to the flow attributes. 1975 * 1976 * @return 1977 * flow driver type, MLX5_FLOW_TYPE_MAX otherwise. 1978 */ 1979 static enum mlx5_flow_drv_type 1980 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) 1981 { 1982 struct mlx5_priv *priv = dev->data->dev_private; 1983 enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; 1984 1985 if (attr->transfer && priv->config.dv_esw_en) 1986 type = MLX5_FLOW_TYPE_DV; 1987 if (!attr->transfer) 1988 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : 1989 MLX5_FLOW_TYPE_VERBS; 1990 return type; 1991 } 1992 1993 #define flow_get_drv_ops(type) flow_drv_ops[type] 1994 1995 /** 1996 * Flow driver validation API. This abstracts calling driver specific functions. 1997 * The type of flow driver is determined according to flow attributes. 1998 * 1999 * @param[in] dev 2000 * Pointer to the dev structure. 2001 * @param[in] attr 2002 * Pointer to the flow attributes. 2003 * @param[in] items 2004 * Pointer to the list of items. 2005 * @param[in] actions 2006 * Pointer to the list of actions. 2007 * @param[out] error 2008 * Pointer to the error structure. 2009 * 2010 * @return 2011 * 0 on success, a negative errno value otherwise and rte_errno is set. 2012 */ 2013 static inline int 2014 flow_drv_validate(struct rte_eth_dev *dev, 2015 const struct rte_flow_attr *attr, 2016 const struct rte_flow_item items[], 2017 const struct rte_flow_action actions[], 2018 struct rte_flow_error *error) 2019 { 2020 const struct mlx5_flow_driver_ops *fops; 2021 enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); 2022 2023 fops = flow_get_drv_ops(type); 2024 return fops->validate(dev, attr, items, actions, error); 2025 } 2026 2027 /** 2028 * Flow driver preparation API. This abstracts calling driver specific 2029 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2030 * calculates the size of memory required for device flow, allocates the memory, 2031 * initializes the device flow and returns the pointer. 2032 * 2033 * @note 2034 * This function initializes device flow structure such as dv or verbs in 2035 * struct mlx5_flow. However, it is caller's responsibility to initialize the 2036 * rest. For example, adding returning device flow to flow->dev_flow list and 2037 * setting backward reference to the flow should be done out of this function. 2038 * layers field is not filled either. 2039 * 2040 * @param[in] attr 2041 * Pointer to the flow attributes. 2042 * @param[in] items 2043 * Pointer to the list of items. 2044 * @param[in] actions 2045 * Pointer to the list of actions. 2046 * @param[out] error 2047 * Pointer to the error structure. 2048 * 2049 * @return 2050 * Pointer to device flow on success, otherwise NULL and rte_errno is set. 2051 */ 2052 static inline struct mlx5_flow * 2053 flow_drv_prepare(const struct rte_flow *flow, 2054 const struct rte_flow_attr *attr, 2055 const struct rte_flow_item items[], 2056 const struct rte_flow_action actions[], 2057 struct rte_flow_error *error) 2058 { 2059 const struct mlx5_flow_driver_ops *fops; 2060 enum mlx5_flow_drv_type type = flow->drv_type; 2061 2062 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2063 fops = flow_get_drv_ops(type); 2064 return fops->prepare(attr, items, actions, error); 2065 } 2066 2067 /** 2068 * Flow driver translation API. This abstracts calling driver specific 2069 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 2070 * translates a generic flow into a driver flow. flow_drv_prepare() must 2071 * precede. 2072 * 2073 * @note 2074 * dev_flow->layers could be filled as a result of parsing during translation 2075 * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled 2076 * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, 2077 * flow->actions could be overwritten even though all the expanded dev_flows 2078 * have the same actions. 2079 * 2080 * @param[in] dev 2081 * Pointer to the rte dev structure. 2082 * @param[in, out] dev_flow 2083 * Pointer to the mlx5 flow. 2084 * @param[in] attr 2085 * Pointer to the flow attributes. 2086 * @param[in] items 2087 * Pointer to the list of items. 2088 * @param[in] actions 2089 * Pointer to the list of actions. 2090 * @param[out] error 2091 * Pointer to the error structure. 2092 * 2093 * @return 2094 * 0 on success, a negative errno value otherwise and rte_errno is set. 2095 */ 2096 static inline int 2097 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, 2098 const struct rte_flow_attr *attr, 2099 const struct rte_flow_item items[], 2100 const struct rte_flow_action actions[], 2101 struct rte_flow_error *error) 2102 { 2103 const struct mlx5_flow_driver_ops *fops; 2104 enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; 2105 2106 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2107 fops = flow_get_drv_ops(type); 2108 return fops->translate(dev, dev_flow, attr, items, actions, error); 2109 } 2110 2111 /** 2112 * Flow driver apply API. This abstracts calling driver specific functions. 2113 * Parent flow (rte_flow) should have driver type (drv_type). It applies 2114 * translated driver flows on to device. flow_drv_translate() must precede. 2115 * 2116 * @param[in] dev 2117 * Pointer to Ethernet device structure. 2118 * @param[in, out] flow 2119 * Pointer to flow structure. 2120 * @param[out] error 2121 * Pointer to error structure. 2122 * 2123 * @return 2124 * 0 on success, a negative errno value otherwise and rte_errno is set. 2125 */ 2126 static inline int 2127 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 2128 struct rte_flow_error *error) 2129 { 2130 const struct mlx5_flow_driver_ops *fops; 2131 enum mlx5_flow_drv_type type = flow->drv_type; 2132 2133 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2134 fops = flow_get_drv_ops(type); 2135 return fops->apply(dev, flow, error); 2136 } 2137 2138 /** 2139 * Flow driver remove API. This abstracts calling driver specific functions. 2140 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2141 * on device. All the resources of the flow should be freed by calling 2142 * flow_drv_destroy(). 2143 * 2144 * @param[in] dev 2145 * Pointer to Ethernet device. 2146 * @param[in, out] flow 2147 * Pointer to flow structure. 2148 */ 2149 static inline void 2150 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 2151 { 2152 const struct mlx5_flow_driver_ops *fops; 2153 enum mlx5_flow_drv_type type = flow->drv_type; 2154 2155 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2156 fops = flow_get_drv_ops(type); 2157 fops->remove(dev, flow); 2158 } 2159 2160 /** 2161 * Flow driver destroy API. This abstracts calling driver specific functions. 2162 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 2163 * on device and releases resources of the flow. 2164 * 2165 * @param[in] dev 2166 * Pointer to Ethernet device. 2167 * @param[in, out] flow 2168 * Pointer to flow structure. 2169 */ 2170 static inline void 2171 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 2172 { 2173 const struct mlx5_flow_driver_ops *fops; 2174 enum mlx5_flow_drv_type type = flow->drv_type; 2175 2176 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 2177 fops = flow_get_drv_ops(type); 2178 fops->destroy(dev, flow); 2179 } 2180 2181 /** 2182 * Validate a flow supported by the NIC. 2183 * 2184 * @see rte_flow_validate() 2185 * @see rte_flow_ops 2186 */ 2187 int 2188 mlx5_flow_validate(struct rte_eth_dev *dev, 2189 const struct rte_flow_attr *attr, 2190 const struct rte_flow_item items[], 2191 const struct rte_flow_action actions[], 2192 struct rte_flow_error *error) 2193 { 2194 int ret; 2195 2196 ret = flow_drv_validate(dev, attr, items, actions, error); 2197 if (ret < 0) 2198 return ret; 2199 return 0; 2200 } 2201 2202 /** 2203 * Get RSS action from the action list. 2204 * 2205 * @param[in] actions 2206 * Pointer to the list of actions. 2207 * 2208 * @return 2209 * Pointer to the RSS action if exist, else return NULL. 2210 */ 2211 static const struct rte_flow_action_rss* 2212 flow_get_rss_action(const struct rte_flow_action actions[]) 2213 { 2214 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2215 switch (actions->type) { 2216 case RTE_FLOW_ACTION_TYPE_RSS: 2217 return (const struct rte_flow_action_rss *) 2218 actions->conf; 2219 default: 2220 break; 2221 } 2222 } 2223 return NULL; 2224 } 2225 2226 static unsigned int 2227 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) 2228 { 2229 const struct rte_flow_item *item; 2230 unsigned int has_vlan = 0; 2231 2232 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 2233 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 2234 has_vlan = 1; 2235 break; 2236 } 2237 } 2238 if (has_vlan) 2239 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : 2240 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; 2241 return rss_level < 2 ? MLX5_EXPANSION_ROOT : 2242 MLX5_EXPANSION_ROOT_OUTER; 2243 } 2244 2245 /** 2246 * Create a flow and add it to @p list. 2247 * 2248 * @param dev 2249 * Pointer to Ethernet device. 2250 * @param list 2251 * Pointer to a TAILQ flow list. 2252 * @param[in] attr 2253 * Flow rule attributes. 2254 * @param[in] items 2255 * Pattern specification (list terminated by the END pattern item). 2256 * @param[in] actions 2257 * Associated actions (list terminated by the END action). 2258 * @param[out] error 2259 * Perform verbose error reporting if not NULL. 2260 * 2261 * @return 2262 * A flow on success, NULL otherwise and rte_errno is set. 2263 */ 2264 static struct rte_flow * 2265 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, 2266 const struct rte_flow_attr *attr, 2267 const struct rte_flow_item items[], 2268 const struct rte_flow_action actions[], 2269 struct rte_flow_error *error) 2270 { 2271 struct rte_flow *flow = NULL; 2272 struct mlx5_flow *dev_flow; 2273 const struct rte_flow_action_rss *rss; 2274 union { 2275 struct rte_flow_expand_rss buf; 2276 uint8_t buffer[2048]; 2277 } expand_buffer; 2278 struct rte_flow_expand_rss *buf = &expand_buffer.buf; 2279 int ret; 2280 uint32_t i; 2281 uint32_t flow_size; 2282 2283 ret = flow_drv_validate(dev, attr, items, actions, error); 2284 if (ret < 0) 2285 return NULL; 2286 flow_size = sizeof(struct rte_flow); 2287 rss = flow_get_rss_action(actions); 2288 if (rss) 2289 flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), 2290 sizeof(void *)); 2291 else 2292 flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); 2293 flow = rte_calloc(__func__, 1, flow_size, 0); 2294 if (!flow) { 2295 rte_errno = ENOMEM; 2296 return NULL; 2297 } 2298 flow->drv_type = flow_get_drv_type(dev, attr); 2299 flow->ingress = attr->ingress; 2300 flow->transfer = attr->transfer; 2301 assert(flow->drv_type > MLX5_FLOW_TYPE_MIN && 2302 flow->drv_type < MLX5_FLOW_TYPE_MAX); 2303 flow->queue = (void *)(flow + 1); 2304 LIST_INIT(&flow->dev_flows); 2305 if (rss && rss->types) { 2306 unsigned int graph_root; 2307 2308 graph_root = find_graph_root(items, rss->level); 2309 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), 2310 items, rss->types, 2311 mlx5_support_expansion, 2312 graph_root); 2313 assert(ret > 0 && 2314 (unsigned int)ret < sizeof(expand_buffer.buffer)); 2315 } else { 2316 buf->entries = 1; 2317 buf->entry[0].pattern = (void *)(uintptr_t)items; 2318 } 2319 for (i = 0; i < buf->entries; ++i) { 2320 dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern, 2321 actions, error); 2322 if (!dev_flow) 2323 goto error; 2324 dev_flow->flow = flow; 2325 LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); 2326 ret = flow_drv_translate(dev, dev_flow, attr, 2327 buf->entry[i].pattern, 2328 actions, error); 2329 if (ret < 0) 2330 goto error; 2331 } 2332 if (dev->data->dev_started) { 2333 ret = flow_drv_apply(dev, flow, error); 2334 if (ret < 0) 2335 goto error; 2336 } 2337 TAILQ_INSERT_TAIL(list, flow, next); 2338 flow_rxq_flags_set(dev, flow); 2339 return flow; 2340 error: 2341 ret = rte_errno; /* Save rte_errno before cleanup. */ 2342 assert(flow); 2343 flow_drv_destroy(dev, flow); 2344 rte_free(flow); 2345 rte_errno = ret; /* Restore rte_errno. */ 2346 return NULL; 2347 } 2348 2349 /** 2350 * Create a flow. 2351 * 2352 * @see rte_flow_create() 2353 * @see rte_flow_ops 2354 */ 2355 struct rte_flow * 2356 mlx5_flow_create(struct rte_eth_dev *dev, 2357 const struct rte_flow_attr *attr, 2358 const struct rte_flow_item items[], 2359 const struct rte_flow_action actions[], 2360 struct rte_flow_error *error) 2361 { 2362 struct mlx5_priv *priv = dev->data->dev_private; 2363 2364 return flow_list_create(dev, &priv->flows, 2365 attr, items, actions, error); 2366 } 2367 2368 /** 2369 * Destroy a flow in a list. 2370 * 2371 * @param dev 2372 * Pointer to Ethernet device. 2373 * @param list 2374 * Pointer to a TAILQ flow list. 2375 * @param[in] flow 2376 * Flow to destroy. 2377 */ 2378 static void 2379 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, 2380 struct rte_flow *flow) 2381 { 2382 /* 2383 * Update RX queue flags only if port is started, otherwise it is 2384 * already clean. 2385 */ 2386 if (dev->data->dev_started) 2387 flow_rxq_flags_trim(dev, flow); 2388 flow_drv_destroy(dev, flow); 2389 TAILQ_REMOVE(list, flow, next); 2390 rte_free(flow->fdir); 2391 rte_free(flow); 2392 } 2393 2394 /** 2395 * Destroy all flows. 2396 * 2397 * @param dev 2398 * Pointer to Ethernet device. 2399 * @param list 2400 * Pointer to a TAILQ flow list. 2401 */ 2402 void 2403 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) 2404 { 2405 while (!TAILQ_EMPTY(list)) { 2406 struct rte_flow *flow; 2407 2408 flow = TAILQ_FIRST(list); 2409 flow_list_destroy(dev, list, flow); 2410 } 2411 } 2412 2413 /** 2414 * Remove all flows. 2415 * 2416 * @param dev 2417 * Pointer to Ethernet device. 2418 * @param list 2419 * Pointer to a TAILQ flow list. 2420 */ 2421 void 2422 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) 2423 { 2424 struct rte_flow *flow; 2425 2426 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) 2427 flow_drv_remove(dev, flow); 2428 flow_rxq_flags_clear(dev); 2429 } 2430 2431 /** 2432 * Add all flows. 2433 * 2434 * @param dev 2435 * Pointer to Ethernet device. 2436 * @param list 2437 * Pointer to a TAILQ flow list. 2438 * 2439 * @return 2440 * 0 on success, a negative errno value otherwise and rte_errno is set. 2441 */ 2442 int 2443 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) 2444 { 2445 struct rte_flow *flow; 2446 struct rte_flow_error error; 2447 int ret = 0; 2448 2449 TAILQ_FOREACH(flow, list, next) { 2450 ret = flow_drv_apply(dev, flow, &error); 2451 if (ret < 0) 2452 goto error; 2453 flow_rxq_flags_set(dev, flow); 2454 } 2455 return 0; 2456 error: 2457 ret = rte_errno; /* Save rte_errno before cleanup. */ 2458 mlx5_flow_stop(dev, list); 2459 rte_errno = ret; /* Restore rte_errno. */ 2460 return -rte_errno; 2461 } 2462 2463 /** 2464 * Verify the flow list is empty 2465 * 2466 * @param dev 2467 * Pointer to Ethernet device. 2468 * 2469 * @return the number of flows not released. 2470 */ 2471 int 2472 mlx5_flow_verify(struct rte_eth_dev *dev) 2473 { 2474 struct mlx5_priv *priv = dev->data->dev_private; 2475 struct rte_flow *flow; 2476 int ret = 0; 2477 2478 TAILQ_FOREACH(flow, &priv->flows, next) { 2479 DRV_LOG(DEBUG, "port %u flow %p still referenced", 2480 dev->data->port_id, (void *)flow); 2481 ++ret; 2482 } 2483 return ret; 2484 } 2485 2486 /** 2487 * Enable a control flow configured from the control plane. 2488 * 2489 * @param dev 2490 * Pointer to Ethernet device. 2491 * @param eth_spec 2492 * An Ethernet flow spec to apply. 2493 * @param eth_mask 2494 * An Ethernet flow mask to apply. 2495 * @param vlan_spec 2496 * A VLAN flow spec to apply. 2497 * @param vlan_mask 2498 * A VLAN flow mask to apply. 2499 * 2500 * @return 2501 * 0 on success, a negative errno value otherwise and rte_errno is set. 2502 */ 2503 int 2504 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, 2505 struct rte_flow_item_eth *eth_spec, 2506 struct rte_flow_item_eth *eth_mask, 2507 struct rte_flow_item_vlan *vlan_spec, 2508 struct rte_flow_item_vlan *vlan_mask) 2509 { 2510 struct mlx5_priv *priv = dev->data->dev_private; 2511 const struct rte_flow_attr attr = { 2512 .ingress = 1, 2513 .priority = MLX5_FLOW_PRIO_RSVD, 2514 }; 2515 struct rte_flow_item items[] = { 2516 { 2517 .type = RTE_FLOW_ITEM_TYPE_ETH, 2518 .spec = eth_spec, 2519 .last = NULL, 2520 .mask = eth_mask, 2521 }, 2522 { 2523 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : 2524 RTE_FLOW_ITEM_TYPE_END, 2525 .spec = vlan_spec, 2526 .last = NULL, 2527 .mask = vlan_mask, 2528 }, 2529 { 2530 .type = RTE_FLOW_ITEM_TYPE_END, 2531 }, 2532 }; 2533 uint16_t queue[priv->reta_idx_n]; 2534 struct rte_flow_action_rss action_rss = { 2535 .func = RTE_ETH_HASH_FUNCTION_DEFAULT, 2536 .level = 0, 2537 .types = priv->rss_conf.rss_hf, 2538 .key_len = priv->rss_conf.rss_key_len, 2539 .queue_num = priv->reta_idx_n, 2540 .key = priv->rss_conf.rss_key, 2541 .queue = queue, 2542 }; 2543 struct rte_flow_action actions[] = { 2544 { 2545 .type = RTE_FLOW_ACTION_TYPE_RSS, 2546 .conf = &action_rss, 2547 }, 2548 { 2549 .type = RTE_FLOW_ACTION_TYPE_END, 2550 }, 2551 }; 2552 struct rte_flow *flow; 2553 struct rte_flow_error error; 2554 unsigned int i; 2555 2556 if (!priv->reta_idx_n || !priv->rxqs_n) { 2557 return 0; 2558 } 2559 for (i = 0; i != priv->reta_idx_n; ++i) 2560 queue[i] = (*priv->reta_idx)[i]; 2561 flow = flow_list_create(dev, &priv->ctrl_flows, 2562 &attr, items, actions, &error); 2563 if (!flow) 2564 return -rte_errno; 2565 return 0; 2566 } 2567 2568 /** 2569 * Enable a flow control configured from the control plane. 2570 * 2571 * @param dev 2572 * Pointer to Ethernet device. 2573 * @param eth_spec 2574 * An Ethernet flow spec to apply. 2575 * @param eth_mask 2576 * An Ethernet flow mask to apply. 2577 * 2578 * @return 2579 * 0 on success, a negative errno value otherwise and rte_errno is set. 2580 */ 2581 int 2582 mlx5_ctrl_flow(struct rte_eth_dev *dev, 2583 struct rte_flow_item_eth *eth_spec, 2584 struct rte_flow_item_eth *eth_mask) 2585 { 2586 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); 2587 } 2588 2589 /** 2590 * Destroy a flow. 2591 * 2592 * @see rte_flow_destroy() 2593 * @see rte_flow_ops 2594 */ 2595 int 2596 mlx5_flow_destroy(struct rte_eth_dev *dev, 2597 struct rte_flow *flow, 2598 struct rte_flow_error *error __rte_unused) 2599 { 2600 struct mlx5_priv *priv = dev->data->dev_private; 2601 2602 flow_list_destroy(dev, &priv->flows, flow); 2603 return 0; 2604 } 2605 2606 /** 2607 * Destroy all flows. 2608 * 2609 * @see rte_flow_flush() 2610 * @see rte_flow_ops 2611 */ 2612 int 2613 mlx5_flow_flush(struct rte_eth_dev *dev, 2614 struct rte_flow_error *error __rte_unused) 2615 { 2616 struct mlx5_priv *priv = dev->data->dev_private; 2617 2618 mlx5_flow_list_flush(dev, &priv->flows); 2619 return 0; 2620 } 2621 2622 /** 2623 * Isolated mode. 2624 * 2625 * @see rte_flow_isolate() 2626 * @see rte_flow_ops 2627 */ 2628 int 2629 mlx5_flow_isolate(struct rte_eth_dev *dev, 2630 int enable, 2631 struct rte_flow_error *error) 2632 { 2633 struct mlx5_priv *priv = dev->data->dev_private; 2634 2635 if (dev->data->dev_started) { 2636 rte_flow_error_set(error, EBUSY, 2637 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2638 NULL, 2639 "port must be stopped first"); 2640 return -rte_errno; 2641 } 2642 priv->isolated = !!enable; 2643 if (enable) 2644 dev->dev_ops = &mlx5_dev_ops_isolate; 2645 else 2646 dev->dev_ops = &mlx5_dev_ops; 2647 return 0; 2648 } 2649 2650 /** 2651 * Query a flow. 2652 * 2653 * @see rte_flow_query() 2654 * @see rte_flow_ops 2655 */ 2656 static int 2657 flow_drv_query(struct rte_eth_dev *dev, 2658 struct rte_flow *flow, 2659 const struct rte_flow_action *actions, 2660 void *data, 2661 struct rte_flow_error *error) 2662 { 2663 const struct mlx5_flow_driver_ops *fops; 2664 enum mlx5_flow_drv_type ftype = flow->drv_type; 2665 2666 assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); 2667 fops = flow_get_drv_ops(ftype); 2668 2669 return fops->query(dev, flow, actions, data, error); 2670 } 2671 2672 /** 2673 * Query a flow. 2674 * 2675 * @see rte_flow_query() 2676 * @see rte_flow_ops 2677 */ 2678 int 2679 mlx5_flow_query(struct rte_eth_dev *dev, 2680 struct rte_flow *flow, 2681 const struct rte_flow_action *actions, 2682 void *data, 2683 struct rte_flow_error *error) 2684 { 2685 int ret; 2686 2687 ret = flow_drv_query(dev, flow, actions, data, error); 2688 if (ret < 0) 2689 return ret; 2690 return 0; 2691 } 2692 2693 /** 2694 * Convert a flow director filter to a generic flow. 2695 * 2696 * @param dev 2697 * Pointer to Ethernet device. 2698 * @param fdir_filter 2699 * Flow director filter to add. 2700 * @param attributes 2701 * Generic flow parameters structure. 2702 * 2703 * @return 2704 * 0 on success, a negative errno value otherwise and rte_errno is set. 2705 */ 2706 static int 2707 flow_fdir_filter_convert(struct rte_eth_dev *dev, 2708 const struct rte_eth_fdir_filter *fdir_filter, 2709 struct mlx5_fdir *attributes) 2710 { 2711 struct mlx5_priv *priv = dev->data->dev_private; 2712 const struct rte_eth_fdir_input *input = &fdir_filter->input; 2713 const struct rte_eth_fdir_masks *mask = 2714 &dev->data->dev_conf.fdir_conf.mask; 2715 2716 /* Validate queue number. */ 2717 if (fdir_filter->action.rx_queue >= priv->rxqs_n) { 2718 DRV_LOG(ERR, "port %u invalid queue number %d", 2719 dev->data->port_id, fdir_filter->action.rx_queue); 2720 rte_errno = EINVAL; 2721 return -rte_errno; 2722 } 2723 attributes->attr.ingress = 1; 2724 attributes->items[0] = (struct rte_flow_item) { 2725 .type = RTE_FLOW_ITEM_TYPE_ETH, 2726 .spec = &attributes->l2, 2727 .mask = &attributes->l2_mask, 2728 }; 2729 switch (fdir_filter->action.behavior) { 2730 case RTE_ETH_FDIR_ACCEPT: 2731 attributes->actions[0] = (struct rte_flow_action){ 2732 .type = RTE_FLOW_ACTION_TYPE_QUEUE, 2733 .conf = &attributes->queue, 2734 }; 2735 break; 2736 case RTE_ETH_FDIR_REJECT: 2737 attributes->actions[0] = (struct rte_flow_action){ 2738 .type = RTE_FLOW_ACTION_TYPE_DROP, 2739 }; 2740 break; 2741 default: 2742 DRV_LOG(ERR, "port %u invalid behavior %d", 2743 dev->data->port_id, 2744 fdir_filter->action.behavior); 2745 rte_errno = ENOTSUP; 2746 return -rte_errno; 2747 } 2748 attributes->queue.index = fdir_filter->action.rx_queue; 2749 /* Handle L3. */ 2750 switch (fdir_filter->input.flow_type) { 2751 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 2752 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 2753 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 2754 attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){ 2755 .src_addr = input->flow.ip4_flow.src_ip, 2756 .dst_addr = input->flow.ip4_flow.dst_ip, 2757 .time_to_live = input->flow.ip4_flow.ttl, 2758 .type_of_service = input->flow.ip4_flow.tos, 2759 }; 2760 attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){ 2761 .src_addr = mask->ipv4_mask.src_ip, 2762 .dst_addr = mask->ipv4_mask.dst_ip, 2763 .time_to_live = mask->ipv4_mask.ttl, 2764 .type_of_service = mask->ipv4_mask.tos, 2765 .next_proto_id = mask->ipv4_mask.proto, 2766 }; 2767 attributes->items[1] = (struct rte_flow_item){ 2768 .type = RTE_FLOW_ITEM_TYPE_IPV4, 2769 .spec = &attributes->l3, 2770 .mask = &attributes->l3_mask, 2771 }; 2772 break; 2773 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 2774 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 2775 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 2776 attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){ 2777 .hop_limits = input->flow.ipv6_flow.hop_limits, 2778 .proto = input->flow.ipv6_flow.proto, 2779 }; 2780 2781 memcpy(attributes->l3.ipv6.hdr.src_addr, 2782 input->flow.ipv6_flow.src_ip, 2783 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 2784 memcpy(attributes->l3.ipv6.hdr.dst_addr, 2785 input->flow.ipv6_flow.dst_ip, 2786 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 2787 memcpy(attributes->l3_mask.ipv6.hdr.src_addr, 2788 mask->ipv6_mask.src_ip, 2789 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 2790 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr, 2791 mask->ipv6_mask.dst_ip, 2792 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 2793 attributes->items[1] = (struct rte_flow_item){ 2794 .type = RTE_FLOW_ITEM_TYPE_IPV6, 2795 .spec = &attributes->l3, 2796 .mask = &attributes->l3_mask, 2797 }; 2798 break; 2799 default: 2800 DRV_LOG(ERR, "port %u invalid flow type%d", 2801 dev->data->port_id, fdir_filter->input.flow_type); 2802 rte_errno = ENOTSUP; 2803 return -rte_errno; 2804 } 2805 /* Handle L4. */ 2806 switch (fdir_filter->input.flow_type) { 2807 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 2808 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 2809 .src_port = input->flow.udp4_flow.src_port, 2810 .dst_port = input->flow.udp4_flow.dst_port, 2811 }; 2812 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 2813 .src_port = mask->src_port_mask, 2814 .dst_port = mask->dst_port_mask, 2815 }; 2816 attributes->items[2] = (struct rte_flow_item){ 2817 .type = RTE_FLOW_ITEM_TYPE_UDP, 2818 .spec = &attributes->l4, 2819 .mask = &attributes->l4_mask, 2820 }; 2821 break; 2822 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 2823 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 2824 .src_port = input->flow.tcp4_flow.src_port, 2825 .dst_port = input->flow.tcp4_flow.dst_port, 2826 }; 2827 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 2828 .src_port = mask->src_port_mask, 2829 .dst_port = mask->dst_port_mask, 2830 }; 2831 attributes->items[2] = (struct rte_flow_item){ 2832 .type = RTE_FLOW_ITEM_TYPE_TCP, 2833 .spec = &attributes->l4, 2834 .mask = &attributes->l4_mask, 2835 }; 2836 break; 2837 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 2838 attributes->l4.udp.hdr = (struct rte_udp_hdr){ 2839 .src_port = input->flow.udp6_flow.src_port, 2840 .dst_port = input->flow.udp6_flow.dst_port, 2841 }; 2842 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ 2843 .src_port = mask->src_port_mask, 2844 .dst_port = mask->dst_port_mask, 2845 }; 2846 attributes->items[2] = (struct rte_flow_item){ 2847 .type = RTE_FLOW_ITEM_TYPE_UDP, 2848 .spec = &attributes->l4, 2849 .mask = &attributes->l4_mask, 2850 }; 2851 break; 2852 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 2853 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ 2854 .src_port = input->flow.tcp6_flow.src_port, 2855 .dst_port = input->flow.tcp6_flow.dst_port, 2856 }; 2857 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ 2858 .src_port = mask->src_port_mask, 2859 .dst_port = mask->dst_port_mask, 2860 }; 2861 attributes->items[2] = (struct rte_flow_item){ 2862 .type = RTE_FLOW_ITEM_TYPE_TCP, 2863 .spec = &attributes->l4, 2864 .mask = &attributes->l4_mask, 2865 }; 2866 break; 2867 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 2868 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 2869 break; 2870 default: 2871 DRV_LOG(ERR, "port %u invalid flow type%d", 2872 dev->data->port_id, fdir_filter->input.flow_type); 2873 rte_errno = ENOTSUP; 2874 return -rte_errno; 2875 } 2876 return 0; 2877 } 2878 2879 #define FLOW_FDIR_CMP(f1, f2, fld) \ 2880 memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld)) 2881 2882 /** 2883 * Compare two FDIR flows. If items and actions are identical, the two flows are 2884 * regarded as same. 2885 * 2886 * @param dev 2887 * Pointer to Ethernet device. 2888 * @param f1 2889 * FDIR flow to compare. 2890 * @param f2 2891 * FDIR flow to compare. 2892 * 2893 * @return 2894 * Zero on match, 1 otherwise. 2895 */ 2896 static int 2897 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) 2898 { 2899 if (FLOW_FDIR_CMP(f1, f2, attr) || 2900 FLOW_FDIR_CMP(f1, f2, l2) || 2901 FLOW_FDIR_CMP(f1, f2, l2_mask) || 2902 FLOW_FDIR_CMP(f1, f2, l3) || 2903 FLOW_FDIR_CMP(f1, f2, l3_mask) || 2904 FLOW_FDIR_CMP(f1, f2, l4) || 2905 FLOW_FDIR_CMP(f1, f2, l4_mask) || 2906 FLOW_FDIR_CMP(f1, f2, actions[0].type)) 2907 return 1; 2908 if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE && 2909 FLOW_FDIR_CMP(f1, f2, queue)) 2910 return 1; 2911 return 0; 2912 } 2913 2914 /** 2915 * Search device flow list to find out a matched FDIR flow. 2916 * 2917 * @param dev 2918 * Pointer to Ethernet device. 2919 * @param fdir_flow 2920 * FDIR flow to lookup. 2921 * 2922 * @return 2923 * Pointer of flow if found, NULL otherwise. 2924 */ 2925 static struct rte_flow * 2926 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) 2927 { 2928 struct mlx5_priv *priv = dev->data->dev_private; 2929 struct rte_flow *flow = NULL; 2930 2931 assert(fdir_flow); 2932 TAILQ_FOREACH(flow, &priv->flows, next) { 2933 if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) { 2934 DRV_LOG(DEBUG, "port %u found FDIR flow %p", 2935 dev->data->port_id, (void *)flow); 2936 break; 2937 } 2938 } 2939 return flow; 2940 } 2941 2942 /** 2943 * Add new flow director filter and store it in list. 2944 * 2945 * @param dev 2946 * Pointer to Ethernet device. 2947 * @param fdir_filter 2948 * Flow director filter to add. 2949 * 2950 * @return 2951 * 0 on success, a negative errno value otherwise and rte_errno is set. 2952 */ 2953 static int 2954 flow_fdir_filter_add(struct rte_eth_dev *dev, 2955 const struct rte_eth_fdir_filter *fdir_filter) 2956 { 2957 struct mlx5_priv *priv = dev->data->dev_private; 2958 struct mlx5_fdir *fdir_flow; 2959 struct rte_flow *flow; 2960 int ret; 2961 2962 fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0); 2963 if (!fdir_flow) { 2964 rte_errno = ENOMEM; 2965 return -rte_errno; 2966 } 2967 ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow); 2968 if (ret) 2969 goto error; 2970 flow = flow_fdir_filter_lookup(dev, fdir_flow); 2971 if (flow) { 2972 rte_errno = EEXIST; 2973 goto error; 2974 } 2975 flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr, 2976 fdir_flow->items, fdir_flow->actions, NULL); 2977 if (!flow) 2978 goto error; 2979 assert(!flow->fdir); 2980 flow->fdir = fdir_flow; 2981 DRV_LOG(DEBUG, "port %u created FDIR flow %p", 2982 dev->data->port_id, (void *)flow); 2983 return 0; 2984 error: 2985 rte_free(fdir_flow); 2986 return -rte_errno; 2987 } 2988 2989 /** 2990 * Delete specific filter. 2991 * 2992 * @param dev 2993 * Pointer to Ethernet device. 2994 * @param fdir_filter 2995 * Filter to be deleted. 2996 * 2997 * @return 2998 * 0 on success, a negative errno value otherwise and rte_errno is set. 2999 */ 3000 static int 3001 flow_fdir_filter_delete(struct rte_eth_dev *dev, 3002 const struct rte_eth_fdir_filter *fdir_filter) 3003 { 3004 struct mlx5_priv *priv = dev->data->dev_private; 3005 struct rte_flow *flow; 3006 struct mlx5_fdir fdir_flow = { 3007 .attr.group = 0, 3008 }; 3009 int ret; 3010 3011 ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow); 3012 if (ret) 3013 return -rte_errno; 3014 flow = flow_fdir_filter_lookup(dev, &fdir_flow); 3015 if (!flow) { 3016 rte_errno = ENOENT; 3017 return -rte_errno; 3018 } 3019 flow_list_destroy(dev, &priv->flows, flow); 3020 DRV_LOG(DEBUG, "port %u deleted FDIR flow %p", 3021 dev->data->port_id, (void *)flow); 3022 return 0; 3023 } 3024 3025 /** 3026 * Update queue for specific filter. 3027 * 3028 * @param dev 3029 * Pointer to Ethernet device. 3030 * @param fdir_filter 3031 * Filter to be updated. 3032 * 3033 * @return 3034 * 0 on success, a negative errno value otherwise and rte_errno is set. 3035 */ 3036 static int 3037 flow_fdir_filter_update(struct rte_eth_dev *dev, 3038 const struct rte_eth_fdir_filter *fdir_filter) 3039 { 3040 int ret; 3041 3042 ret = flow_fdir_filter_delete(dev, fdir_filter); 3043 if (ret) 3044 return ret; 3045 return flow_fdir_filter_add(dev, fdir_filter); 3046 } 3047 3048 /** 3049 * Flush all filters. 3050 * 3051 * @param dev 3052 * Pointer to Ethernet device. 3053 */ 3054 static void 3055 flow_fdir_filter_flush(struct rte_eth_dev *dev) 3056 { 3057 struct mlx5_priv *priv = dev->data->dev_private; 3058 3059 mlx5_flow_list_flush(dev, &priv->flows); 3060 } 3061 3062 /** 3063 * Get flow director information. 3064 * 3065 * @param dev 3066 * Pointer to Ethernet device. 3067 * @param[out] fdir_info 3068 * Resulting flow director information. 3069 */ 3070 static void 3071 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) 3072 { 3073 struct rte_eth_fdir_masks *mask = 3074 &dev->data->dev_conf.fdir_conf.mask; 3075 3076 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; 3077 fdir_info->guarant_spc = 0; 3078 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); 3079 fdir_info->max_flexpayload = 0; 3080 fdir_info->flow_types_mask[0] = 0; 3081 fdir_info->flex_payload_unit = 0; 3082 fdir_info->max_flex_payload_segment_num = 0; 3083 fdir_info->flex_payload_limit = 0; 3084 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf)); 3085 } 3086 3087 /** 3088 * Deal with flow director operations. 3089 * 3090 * @param dev 3091 * Pointer to Ethernet device. 3092 * @param filter_op 3093 * Operation to perform. 3094 * @param arg 3095 * Pointer to operation-specific structure. 3096 * 3097 * @return 3098 * 0 on success, a negative errno value otherwise and rte_errno is set. 3099 */ 3100 static int 3101 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, 3102 void *arg) 3103 { 3104 enum rte_fdir_mode fdir_mode = 3105 dev->data->dev_conf.fdir_conf.mode; 3106 3107 if (filter_op == RTE_ETH_FILTER_NOP) 3108 return 0; 3109 if (fdir_mode != RTE_FDIR_MODE_PERFECT && 3110 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3111 DRV_LOG(ERR, "port %u flow director mode %d not supported", 3112 dev->data->port_id, fdir_mode); 3113 rte_errno = EINVAL; 3114 return -rte_errno; 3115 } 3116 switch (filter_op) { 3117 case RTE_ETH_FILTER_ADD: 3118 return flow_fdir_filter_add(dev, arg); 3119 case RTE_ETH_FILTER_UPDATE: 3120 return flow_fdir_filter_update(dev, arg); 3121 case RTE_ETH_FILTER_DELETE: 3122 return flow_fdir_filter_delete(dev, arg); 3123 case RTE_ETH_FILTER_FLUSH: 3124 flow_fdir_filter_flush(dev); 3125 break; 3126 case RTE_ETH_FILTER_INFO: 3127 flow_fdir_info_get(dev, arg); 3128 break; 3129 default: 3130 DRV_LOG(DEBUG, "port %u unknown operation %u", 3131 dev->data->port_id, filter_op); 3132 rte_errno = EINVAL; 3133 return -rte_errno; 3134 } 3135 return 0; 3136 } 3137 3138 /** 3139 * Manage filter operations. 3140 * 3141 * @param dev 3142 * Pointer to Ethernet device structure. 3143 * @param filter_type 3144 * Filter type. 3145 * @param filter_op 3146 * Operation to perform. 3147 * @param arg 3148 * Pointer to operation-specific structure. 3149 * 3150 * @return 3151 * 0 on success, a negative errno value otherwise and rte_errno is set. 3152 */ 3153 int 3154 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, 3155 enum rte_filter_type filter_type, 3156 enum rte_filter_op filter_op, 3157 void *arg) 3158 { 3159 switch (filter_type) { 3160 case RTE_ETH_FILTER_GENERIC: 3161 if (filter_op != RTE_ETH_FILTER_GET) { 3162 rte_errno = EINVAL; 3163 return -rte_errno; 3164 } 3165 *(const void **)arg = &mlx5_flow_ops; 3166 return 0; 3167 case RTE_ETH_FILTER_FDIR: 3168 return flow_fdir_ctrl_func(dev, filter_op, arg); 3169 default: 3170 DRV_LOG(ERR, "port %u filter type (%d) not supported", 3171 dev->data->port_id, filter_type); 3172 rte_errno = ENOTSUP; 3173 return -rte_errno; 3174 } 3175 return 0; 3176 } 3177 3178 #define MLX5_POOL_QUERY_FREQ_US 1000000 3179 3180 /** 3181 * Set the periodic procedure for triggering asynchronous batch queries for all 3182 * the counter pools. 3183 * 3184 * @param[in] sh 3185 * Pointer to mlx5_ibv_shared object. 3186 */ 3187 void 3188 mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) 3189 { 3190 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0); 3191 uint32_t pools_n = rte_atomic16_read(&cont->n_valid); 3192 uint32_t us; 3193 3194 cont = MLX5_CNT_CONTAINER(sh, 1, 0); 3195 pools_n += rte_atomic16_read(&cont->n_valid); 3196 us = MLX5_POOL_QUERY_FREQ_US / pools_n; 3197 DRV_LOG(DEBUG, "Set alarm for %u pools each %u us\n", pools_n, us); 3198 if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { 3199 sh->cmng.query_thread_on = 0; 3200 DRV_LOG(ERR, "Cannot reinitialize query alarm\n"); 3201 } else { 3202 sh->cmng.query_thread_on = 1; 3203 } 3204 } 3205 3206 /** 3207 * The periodic procedure for triggering asynchronous batch queries for all the 3208 * counter pools. This function is probably called by the host thread. 3209 * 3210 * @param[in] arg 3211 * The parameter for the alarm process. 3212 */ 3213 void 3214 mlx5_flow_query_alarm(void *arg) 3215 { 3216 struct mlx5_ibv_shared *sh = arg; 3217 struct mlx5_devx_obj *dcs; 3218 uint16_t offset; 3219 int ret; 3220 uint8_t batch = sh->cmng.batch; 3221 uint16_t pool_index = sh->cmng.pool_index; 3222 struct mlx5_pools_container *cont; 3223 struct mlx5_pools_container *mcont; 3224 struct mlx5_flow_counter_pool *pool; 3225 3226 if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) 3227 goto set_alarm; 3228 next_container: 3229 cont = MLX5_CNT_CONTAINER(sh, batch, 1); 3230 mcont = MLX5_CNT_CONTAINER(sh, batch, 0); 3231 /* Check if resize was done and need to flip a container. */ 3232 if (cont != mcont) { 3233 if (cont->pools) { 3234 /* Clean the old container. */ 3235 rte_free(cont->pools); 3236 memset(cont, 0, sizeof(*cont)); 3237 } 3238 rte_cio_wmb(); 3239 /* Flip the host container. */ 3240 sh->cmng.mhi[batch] ^= (uint8_t)2; 3241 cont = mcont; 3242 } 3243 if (!cont->pools) { 3244 /* 2 empty containers case is unexpected. */ 3245 if (unlikely(batch != sh->cmng.batch)) 3246 goto set_alarm; 3247 batch ^= 0x1; 3248 pool_index = 0; 3249 goto next_container; 3250 } 3251 pool = cont->pools[pool_index]; 3252 if (pool->raw_hw) 3253 /* There is a pool query in progress. */ 3254 goto set_alarm; 3255 pool->raw_hw = 3256 LIST_FIRST(&sh->cmng.free_stat_raws); 3257 if (!pool->raw_hw) 3258 /* No free counter statistics raw memory. */ 3259 goto set_alarm; 3260 dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read 3261 (&pool->a64_dcs); 3262 offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; 3263 ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - 3264 offset, NULL, NULL, 3265 pool->raw_hw->mem_mng->dm->id, 3266 (void *)(uintptr_t) 3267 (pool->raw_hw->data + offset), 3268 sh->devx_comp, 3269 (uint64_t)(uintptr_t)pool); 3270 if (ret) { 3271 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID" 3272 " %d\n", pool->min_dcs->id); 3273 pool->raw_hw = NULL; 3274 goto set_alarm; 3275 } 3276 pool->raw_hw->min_dcs_id = dcs->id; 3277 LIST_REMOVE(pool->raw_hw, next); 3278 sh->cmng.pending_queries++; 3279 pool_index++; 3280 if (pool_index >= rte_atomic16_read(&cont->n_valid)) { 3281 batch ^= 0x1; 3282 pool_index = 0; 3283 } 3284 set_alarm: 3285 sh->cmng.batch = batch; 3286 sh->cmng.pool_index = pool_index; 3287 mlx5_set_query_alarm(sh); 3288 } 3289 3290 /** 3291 * Handler for the HW respond about ready values from an asynchronous batch 3292 * query. This function is probably called by the host thread. 3293 * 3294 * @param[in] sh 3295 * The pointer to the shared IB device context. 3296 * @param[in] async_id 3297 * The Devx async ID. 3298 * @param[in] status 3299 * The status of the completion. 3300 */ 3301 void 3302 mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, 3303 uint64_t async_id, int status) 3304 { 3305 struct mlx5_flow_counter_pool *pool = 3306 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; 3307 struct mlx5_counter_stats_raw *raw_to_free; 3308 3309 if (unlikely(status)) { 3310 raw_to_free = pool->raw_hw; 3311 } else { 3312 raw_to_free = pool->raw; 3313 rte_spinlock_lock(&pool->sl); 3314 pool->raw = pool->raw_hw; 3315 rte_spinlock_unlock(&pool->sl); 3316 rte_atomic64_add(&pool->query_gen, 1); 3317 /* Be sure the new raw counters data is updated in memory. */ 3318 rte_cio_wmb(); 3319 } 3320 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next); 3321 pool->raw_hw = NULL; 3322 sh->cmng.pending_queries--; 3323 } 3324