1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <netinet/in.h> 7 #include <sys/queue.h> 8 #include <stdalign.h> 9 #include <stdint.h> 10 #include <string.h> 11 12 /* Verbs header. */ 13 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 14 #ifdef PEDANTIC 15 #pragma GCC diagnostic ignored "-Wpedantic" 16 #endif 17 #include <infiniband/verbs.h> 18 #ifdef PEDANTIC 19 #pragma GCC diagnostic error "-Wpedantic" 20 #endif 21 22 #include <rte_common.h> 23 #include <rte_ether.h> 24 #include <rte_eth_ctrl.h> 25 #include <rte_ethdev_driver.h> 26 #include <rte_flow.h> 27 #include <rte_flow_driver.h> 28 #include <rte_malloc.h> 29 #include <rte_ip.h> 30 31 #include "mlx5.h" 32 #include "mlx5_defs.h" 33 #include "mlx5_prm.h" 34 #include "mlx5_glue.h" 35 #include "mlx5_flow.h" 36 37 /* Dev ops structure defined in mlx5.c */ 38 extern const struct eth_dev_ops mlx5_dev_ops; 39 extern const struct eth_dev_ops mlx5_dev_ops_isolate; 40 41 /** Device flow drivers. */ 42 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 43 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; 44 #endif 45 extern const struct mlx5_flow_driver_ops mlx5_flow_tcf_drv_ops; 46 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; 47 48 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; 49 50 const struct mlx5_flow_driver_ops *flow_drv_ops[] = { 51 [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, 52 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 53 [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, 54 #endif 55 [MLX5_FLOW_TYPE_TCF] = &mlx5_flow_tcf_drv_ops, 56 [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, 57 [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops 58 }; 59 60 enum mlx5_expansion { 61 MLX5_EXPANSION_ROOT, 62 MLX5_EXPANSION_ROOT_OUTER, 63 MLX5_EXPANSION_ROOT_ETH_VLAN, 64 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, 65 MLX5_EXPANSION_OUTER_ETH, 66 MLX5_EXPANSION_OUTER_ETH_VLAN, 67 MLX5_EXPANSION_OUTER_VLAN, 68 MLX5_EXPANSION_OUTER_IPV4, 69 MLX5_EXPANSION_OUTER_IPV4_UDP, 70 MLX5_EXPANSION_OUTER_IPV4_TCP, 71 MLX5_EXPANSION_OUTER_IPV6, 72 MLX5_EXPANSION_OUTER_IPV6_UDP, 73 MLX5_EXPANSION_OUTER_IPV6_TCP, 74 MLX5_EXPANSION_VXLAN, 75 MLX5_EXPANSION_VXLAN_GPE, 76 MLX5_EXPANSION_GRE, 77 MLX5_EXPANSION_MPLS, 78 MLX5_EXPANSION_ETH, 79 MLX5_EXPANSION_ETH_VLAN, 80 MLX5_EXPANSION_VLAN, 81 MLX5_EXPANSION_IPV4, 82 MLX5_EXPANSION_IPV4_UDP, 83 MLX5_EXPANSION_IPV4_TCP, 84 MLX5_EXPANSION_IPV6, 85 MLX5_EXPANSION_IPV6_UDP, 86 MLX5_EXPANSION_IPV6_TCP, 87 }; 88 89 /** Supported expansion of items. */ 90 static const struct rte_flow_expand_node mlx5_support_expansion[] = { 91 [MLX5_EXPANSION_ROOT] = { 92 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 93 MLX5_EXPANSION_IPV4, 94 MLX5_EXPANSION_IPV6), 95 .type = RTE_FLOW_ITEM_TYPE_END, 96 }, 97 [MLX5_EXPANSION_ROOT_OUTER] = { 98 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, 99 MLX5_EXPANSION_OUTER_IPV4, 100 MLX5_EXPANSION_OUTER_IPV6), 101 .type = RTE_FLOW_ITEM_TYPE_END, 102 }, 103 [MLX5_EXPANSION_ROOT_ETH_VLAN] = { 104 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), 105 .type = RTE_FLOW_ITEM_TYPE_END, 106 }, 107 [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { 108 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), 109 .type = RTE_FLOW_ITEM_TYPE_END, 110 }, 111 [MLX5_EXPANSION_OUTER_ETH] = { 112 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 113 MLX5_EXPANSION_OUTER_IPV6, 114 MLX5_EXPANSION_MPLS), 115 .type = RTE_FLOW_ITEM_TYPE_ETH, 116 .rss_types = 0, 117 }, 118 [MLX5_EXPANSION_OUTER_ETH_VLAN] = { 119 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), 120 .type = RTE_FLOW_ITEM_TYPE_ETH, 121 .rss_types = 0, 122 }, 123 [MLX5_EXPANSION_OUTER_VLAN] = { 124 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, 125 MLX5_EXPANSION_OUTER_IPV6), 126 .type = RTE_FLOW_ITEM_TYPE_VLAN, 127 }, 128 [MLX5_EXPANSION_OUTER_IPV4] = { 129 .next = RTE_FLOW_EXPAND_RSS_NEXT 130 (MLX5_EXPANSION_OUTER_IPV4_UDP, 131 MLX5_EXPANSION_OUTER_IPV4_TCP, 132 MLX5_EXPANSION_GRE), 133 .type = RTE_FLOW_ITEM_TYPE_IPV4, 134 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 135 ETH_RSS_NONFRAG_IPV4_OTHER, 136 }, 137 [MLX5_EXPANSION_OUTER_IPV4_UDP] = { 138 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 139 MLX5_EXPANSION_VXLAN_GPE), 140 .type = RTE_FLOW_ITEM_TYPE_UDP, 141 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 142 }, 143 [MLX5_EXPANSION_OUTER_IPV4_TCP] = { 144 .type = RTE_FLOW_ITEM_TYPE_TCP, 145 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 146 }, 147 [MLX5_EXPANSION_OUTER_IPV6] = { 148 .next = RTE_FLOW_EXPAND_RSS_NEXT 149 (MLX5_EXPANSION_OUTER_IPV6_UDP, 150 MLX5_EXPANSION_OUTER_IPV6_TCP), 151 .type = RTE_FLOW_ITEM_TYPE_IPV6, 152 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 153 ETH_RSS_NONFRAG_IPV6_OTHER, 154 }, 155 [MLX5_EXPANSION_OUTER_IPV6_UDP] = { 156 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, 157 MLX5_EXPANSION_VXLAN_GPE), 158 .type = RTE_FLOW_ITEM_TYPE_UDP, 159 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 160 }, 161 [MLX5_EXPANSION_OUTER_IPV6_TCP] = { 162 .type = RTE_FLOW_ITEM_TYPE_TCP, 163 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 164 }, 165 [MLX5_EXPANSION_VXLAN] = { 166 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), 167 .type = RTE_FLOW_ITEM_TYPE_VXLAN, 168 }, 169 [MLX5_EXPANSION_VXLAN_GPE] = { 170 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, 171 MLX5_EXPANSION_IPV4, 172 MLX5_EXPANSION_IPV6), 173 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, 174 }, 175 [MLX5_EXPANSION_GRE] = { 176 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), 177 .type = RTE_FLOW_ITEM_TYPE_GRE, 178 }, 179 [MLX5_EXPANSION_MPLS] = { 180 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 181 MLX5_EXPANSION_IPV6), 182 .type = RTE_FLOW_ITEM_TYPE_MPLS, 183 }, 184 [MLX5_EXPANSION_ETH] = { 185 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 186 MLX5_EXPANSION_IPV6), 187 .type = RTE_FLOW_ITEM_TYPE_ETH, 188 }, 189 [MLX5_EXPANSION_ETH_VLAN] = { 190 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), 191 .type = RTE_FLOW_ITEM_TYPE_ETH, 192 }, 193 [MLX5_EXPANSION_VLAN] = { 194 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, 195 MLX5_EXPANSION_IPV6), 196 .type = RTE_FLOW_ITEM_TYPE_VLAN, 197 }, 198 [MLX5_EXPANSION_IPV4] = { 199 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, 200 MLX5_EXPANSION_IPV4_TCP), 201 .type = RTE_FLOW_ITEM_TYPE_IPV4, 202 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 203 ETH_RSS_NONFRAG_IPV4_OTHER, 204 }, 205 [MLX5_EXPANSION_IPV4_UDP] = { 206 .type = RTE_FLOW_ITEM_TYPE_UDP, 207 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, 208 }, 209 [MLX5_EXPANSION_IPV4_TCP] = { 210 .type = RTE_FLOW_ITEM_TYPE_TCP, 211 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, 212 }, 213 [MLX5_EXPANSION_IPV6] = { 214 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, 215 MLX5_EXPANSION_IPV6_TCP), 216 .type = RTE_FLOW_ITEM_TYPE_IPV6, 217 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 218 ETH_RSS_NONFRAG_IPV6_OTHER, 219 }, 220 [MLX5_EXPANSION_IPV6_UDP] = { 221 .type = RTE_FLOW_ITEM_TYPE_UDP, 222 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, 223 }, 224 [MLX5_EXPANSION_IPV6_TCP] = { 225 .type = RTE_FLOW_ITEM_TYPE_TCP, 226 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, 227 }, 228 }; 229 230 static const struct rte_flow_ops mlx5_flow_ops = { 231 .validate = mlx5_flow_validate, 232 .create = mlx5_flow_create, 233 .destroy = mlx5_flow_destroy, 234 .flush = mlx5_flow_flush, 235 .isolate = mlx5_flow_isolate, 236 .query = mlx5_flow_query, 237 }; 238 239 /* Convert FDIR request to Generic flow. */ 240 struct mlx5_fdir { 241 struct rte_flow_attr attr; 242 struct rte_flow_action actions[2]; 243 struct rte_flow_item items[4]; 244 struct rte_flow_item_eth l2; 245 struct rte_flow_item_eth l2_mask; 246 union { 247 struct rte_flow_item_ipv4 ipv4; 248 struct rte_flow_item_ipv6 ipv6; 249 } l3; 250 union { 251 struct rte_flow_item_ipv4 ipv4; 252 struct rte_flow_item_ipv6 ipv6; 253 } l3_mask; 254 union { 255 struct rte_flow_item_udp udp; 256 struct rte_flow_item_tcp tcp; 257 } l4; 258 union { 259 struct rte_flow_item_udp udp; 260 struct rte_flow_item_tcp tcp; 261 } l4_mask; 262 struct rte_flow_action_queue queue; 263 }; 264 265 /* Map of Verbs to Flow priority with 8 Verbs priorities. */ 266 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { 267 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, 268 }; 269 270 /* Map of Verbs to Flow priority with 16 Verbs priorities. */ 271 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { 272 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, 273 { 9, 10, 11 }, { 12, 13, 14 }, 274 }; 275 276 /* Tunnel information. */ 277 struct mlx5_flow_tunnel_info { 278 uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ 279 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ 280 }; 281 282 static struct mlx5_flow_tunnel_info tunnels_info[] = { 283 { 284 .tunnel = MLX5_FLOW_LAYER_VXLAN, 285 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, 286 }, 287 { 288 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, 289 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, 290 }, 291 { 292 .tunnel = MLX5_FLOW_LAYER_GRE, 293 .ptype = RTE_PTYPE_TUNNEL_GRE, 294 }, 295 { 296 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, 297 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP, 298 }, 299 { 300 .tunnel = MLX5_FLOW_LAYER_MPLS, 301 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, 302 }, 303 }; 304 305 /** 306 * Discover the maximum number of priority available. 307 * 308 * @param[in] dev 309 * Pointer to the Ethernet device structure. 310 * 311 * @return 312 * number of supported flow priority on success, a negative errno 313 * value otherwise and rte_errno is set. 314 */ 315 int 316 mlx5_flow_discover_priorities(struct rte_eth_dev *dev) 317 { 318 struct { 319 struct ibv_flow_attr attr; 320 struct ibv_flow_spec_eth eth; 321 struct ibv_flow_spec_action_drop drop; 322 } flow_attr = { 323 .attr = { 324 .num_of_specs = 2, 325 }, 326 .eth = { 327 .type = IBV_FLOW_SPEC_ETH, 328 .size = sizeof(struct ibv_flow_spec_eth), 329 }, 330 .drop = { 331 .size = sizeof(struct ibv_flow_spec_action_drop), 332 .type = IBV_FLOW_SPEC_ACTION_DROP, 333 }, 334 }; 335 struct ibv_flow *flow; 336 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev); 337 uint16_t vprio[] = { 8, 16 }; 338 int i; 339 int priority = 0; 340 341 if (!drop) { 342 rte_errno = ENOTSUP; 343 return -rte_errno; 344 } 345 for (i = 0; i != RTE_DIM(vprio); i++) { 346 flow_attr.attr.priority = vprio[i] - 1; 347 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); 348 if (!flow) 349 break; 350 claim_zero(mlx5_glue->destroy_flow(flow)); 351 priority = vprio[i]; 352 } 353 switch (priority) { 354 case 8: 355 priority = RTE_DIM(priority_map_3); 356 break; 357 case 16: 358 priority = RTE_DIM(priority_map_5); 359 break; 360 default: 361 rte_errno = ENOTSUP; 362 DRV_LOG(ERR, 363 "port %u verbs maximum priority: %d expected 8/16", 364 dev->data->port_id, vprio[i]); 365 return -rte_errno; 366 } 367 mlx5_hrxq_drop_release(dev); 368 DRV_LOG(INFO, "port %u flow maximum priority: %d", 369 dev->data->port_id, priority); 370 return priority; 371 } 372 373 /** 374 * Adjust flow priority based on the highest layer and the request priority. 375 * 376 * @param[in] dev 377 * Pointer to the Ethernet device structure. 378 * @param[in] priority 379 * The rule base priority. 380 * @param[in] subpriority 381 * The priority based on the items. 382 * 383 * @return 384 * The new priority. 385 */ 386 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 387 uint32_t subpriority) 388 { 389 uint32_t res = 0; 390 struct priv *priv = dev->data->dev_private; 391 392 switch (priv->config.flow_prio) { 393 case RTE_DIM(priority_map_3): 394 res = priority_map_3[priority][subpriority]; 395 break; 396 case RTE_DIM(priority_map_5): 397 res = priority_map_5[priority][subpriority]; 398 break; 399 } 400 return res; 401 } 402 403 /** 404 * Verify the @p item specifications (spec, last, mask) are compatible with the 405 * NIC capabilities. 406 * 407 * @param[in] item 408 * Item specification. 409 * @param[in] mask 410 * @p item->mask or flow default bit-masks. 411 * @param[in] nic_mask 412 * Bit-masks covering supported fields by the NIC to compare with user mask. 413 * @param[in] size 414 * Bit-masks size in bytes. 415 * @param[out] error 416 * Pointer to error structure. 417 * 418 * @return 419 * 0 on success, a negative errno value otherwise and rte_errno is set. 420 */ 421 static int 422 mlx5_flow_item_acceptable(const struct rte_flow_item *item, 423 const uint8_t *mask, 424 const uint8_t *nic_mask, 425 unsigned int size, 426 struct rte_flow_error *error) 427 { 428 unsigned int i; 429 430 assert(nic_mask); 431 for (i = 0; i < size; ++i) 432 if ((nic_mask[i] | mask[i]) != nic_mask[i]) 433 return rte_flow_error_set(error, ENOTSUP, 434 RTE_FLOW_ERROR_TYPE_ITEM, 435 item, 436 "mask enables non supported" 437 " bits"); 438 if (!item->spec && (item->mask || item->last)) 439 return rte_flow_error_set(error, EINVAL, 440 RTE_FLOW_ERROR_TYPE_ITEM, item, 441 "mask/last without a spec is not" 442 " supported"); 443 if (item->spec && item->last) { 444 uint8_t spec[size]; 445 uint8_t last[size]; 446 unsigned int i; 447 int ret; 448 449 for (i = 0; i < size; ++i) { 450 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; 451 last[i] = ((const uint8_t *)item->last)[i] & mask[i]; 452 } 453 ret = memcmp(spec, last, size); 454 if (ret != 0) 455 return rte_flow_error_set(error, EINVAL, 456 RTE_FLOW_ERROR_TYPE_ITEM, 457 item, 458 "range is not valid"); 459 } 460 return 0; 461 } 462 463 /** 464 * Adjust the hash fields according to the @p flow information. 465 * 466 * @param[in] dev_flow. 467 * Pointer to the mlx5_flow. 468 * @param[in] tunnel 469 * 1 when the hash field is for a tunnel item. 470 * @param[in] layer_types 471 * ETH_RSS_* types. 472 * @param[in] hash_fields 473 * Item hash fields. 474 * 475 * @return 476 * The hash fileds that should be used. 477 */ 478 uint64_t 479 mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, 480 int tunnel __rte_unused, uint32_t layer_types, 481 uint64_t hash_fields) 482 { 483 struct rte_flow *flow = dev_flow->flow; 484 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 485 int rss_request_inner = flow->rss.level >= 2; 486 487 /* Check RSS hash level for tunnel. */ 488 if (tunnel && rss_request_inner) 489 hash_fields |= IBV_RX_HASH_INNER; 490 else if (tunnel || rss_request_inner) 491 return 0; 492 #endif 493 /* Check if requested layer matches RSS hash fields. */ 494 if (!(flow->rss.types & layer_types)) 495 return 0; 496 return hash_fields; 497 } 498 499 /** 500 * Lookup and set the ptype in the data Rx part. A single Ptype can be used, 501 * if several tunnel rules are used on this queue, the tunnel ptype will be 502 * cleared. 503 * 504 * @param rxq_ctrl 505 * Rx queue to update. 506 */ 507 static void 508 mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) 509 { 510 unsigned int i; 511 uint32_t tunnel_ptype = 0; 512 513 /* Look up for the ptype to use. */ 514 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { 515 if (!rxq_ctrl->flow_tunnels_n[i]) 516 continue; 517 if (!tunnel_ptype) { 518 tunnel_ptype = tunnels_info[i].ptype; 519 } else { 520 tunnel_ptype = 0; 521 break; 522 } 523 } 524 rxq_ctrl->rxq.tunnel = tunnel_ptype; 525 } 526 527 /** 528 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow. 529 * 530 * @param[in] dev 531 * Pointer to the Ethernet device structure. 532 * @param[in] flow 533 * Pointer to flow structure. 534 */ 535 static void 536 mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) 537 { 538 struct priv *priv = dev->data->dev_private; 539 const int mark = !!(flow->actions & 540 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 541 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); 542 unsigned int i; 543 544 for (i = 0; i != flow->rss.queue_num; ++i) { 545 int idx = (*flow->queue)[i]; 546 struct mlx5_rxq_ctrl *rxq_ctrl = 547 container_of((*priv->rxqs)[idx], 548 struct mlx5_rxq_ctrl, rxq); 549 550 if (mark) { 551 rxq_ctrl->rxq.mark = 1; 552 rxq_ctrl->flow_mark_n++; 553 } 554 if (tunnel) { 555 unsigned int j; 556 557 /* Increase the counter matching the flow. */ 558 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 559 if ((tunnels_info[j].tunnel & flow->layers) == 560 tunnels_info[j].tunnel) { 561 rxq_ctrl->flow_tunnels_n[j]++; 562 break; 563 } 564 } 565 mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl); 566 } 567 } 568 } 569 570 /** 571 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the 572 * @p flow if no other flow uses it with the same kind of request. 573 * 574 * @param dev 575 * Pointer to Ethernet device. 576 * @param[in] flow 577 * Pointer to the flow. 578 */ 579 static void 580 mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) 581 { 582 struct priv *priv = dev->data->dev_private; 583 const int mark = !!(flow->actions & 584 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); 585 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); 586 unsigned int i; 587 588 assert(dev->data->dev_started); 589 for (i = 0; i != flow->rss.queue_num; ++i) { 590 int idx = (*flow->queue)[i]; 591 struct mlx5_rxq_ctrl *rxq_ctrl = 592 container_of((*priv->rxqs)[idx], 593 struct mlx5_rxq_ctrl, rxq); 594 595 if (mark) { 596 rxq_ctrl->flow_mark_n--; 597 rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; 598 } 599 if (tunnel) { 600 unsigned int j; 601 602 /* Decrease the counter matching the flow. */ 603 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { 604 if ((tunnels_info[j].tunnel & flow->layers) == 605 tunnels_info[j].tunnel) { 606 rxq_ctrl->flow_tunnels_n[j]--; 607 break; 608 } 609 } 610 mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl); 611 } 612 } 613 } 614 615 /** 616 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. 617 * 618 * @param dev 619 * Pointer to Ethernet device. 620 */ 621 static void 622 mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev) 623 { 624 struct priv *priv = dev->data->dev_private; 625 unsigned int i; 626 627 for (i = 0; i != priv->rxqs_n; ++i) { 628 struct mlx5_rxq_ctrl *rxq_ctrl; 629 unsigned int j; 630 631 if (!(*priv->rxqs)[i]) 632 continue; 633 rxq_ctrl = container_of((*priv->rxqs)[i], 634 struct mlx5_rxq_ctrl, rxq); 635 rxq_ctrl->flow_mark_n = 0; 636 rxq_ctrl->rxq.mark = 0; 637 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) 638 rxq_ctrl->flow_tunnels_n[j] = 0; 639 rxq_ctrl->rxq.tunnel = 0; 640 } 641 } 642 643 /* 644 * Validate the flag action. 645 * 646 * @param[in] action_flags 647 * Bit-fields that holds the actions detected until now. 648 * @param[in] attr 649 * Attributes of flow that includes this action. 650 * @param[out] error 651 * Pointer to error structure. 652 * 653 * @return 654 * 0 on success, a negative errno value otherwise and rte_errno is set. 655 */ 656 int 657 mlx5_flow_validate_action_flag(uint64_t action_flags, 658 const struct rte_flow_attr *attr, 659 struct rte_flow_error *error) 660 { 661 662 if (action_flags & MLX5_FLOW_ACTION_DROP) 663 return rte_flow_error_set(error, EINVAL, 664 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 665 "can't drop and flag in same flow"); 666 if (action_flags & MLX5_FLOW_ACTION_MARK) 667 return rte_flow_error_set(error, EINVAL, 668 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 669 "can't mark and flag in same flow"); 670 if (action_flags & MLX5_FLOW_ACTION_FLAG) 671 return rte_flow_error_set(error, EINVAL, 672 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 673 "can't have 2 flag" 674 " actions in same flow"); 675 if (attr->egress) 676 return rte_flow_error_set(error, ENOTSUP, 677 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 678 "flag action not supported for " 679 "egress"); 680 return 0; 681 } 682 683 /* 684 * Validate the mark action. 685 * 686 * @param[in] action 687 * Pointer to the queue action. 688 * @param[in] action_flags 689 * Bit-fields that holds the actions detected until now. 690 * @param[in] attr 691 * Attributes of flow that includes this action. 692 * @param[out] error 693 * Pointer to error structure. 694 * 695 * @return 696 * 0 on success, a negative errno value otherwise and rte_errno is set. 697 */ 698 int 699 mlx5_flow_validate_action_mark(const struct rte_flow_action *action, 700 uint64_t action_flags, 701 const struct rte_flow_attr *attr, 702 struct rte_flow_error *error) 703 { 704 const struct rte_flow_action_mark *mark = action->conf; 705 706 if (!mark) 707 return rte_flow_error_set(error, EINVAL, 708 RTE_FLOW_ERROR_TYPE_ACTION, 709 action, 710 "configuration cannot be null"); 711 if (mark->id >= MLX5_FLOW_MARK_MAX) 712 return rte_flow_error_set(error, EINVAL, 713 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 714 &mark->id, 715 "mark id must in 0 <= id < " 716 RTE_STR(MLX5_FLOW_MARK_MAX)); 717 if (action_flags & MLX5_FLOW_ACTION_DROP) 718 return rte_flow_error_set(error, EINVAL, 719 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 720 "can't drop and mark in same flow"); 721 if (action_flags & MLX5_FLOW_ACTION_FLAG) 722 return rte_flow_error_set(error, EINVAL, 723 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 724 "can't flag and mark in same flow"); 725 if (action_flags & MLX5_FLOW_ACTION_MARK) 726 return rte_flow_error_set(error, EINVAL, 727 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 728 "can't have 2 mark actions in same" 729 " flow"); 730 if (attr->egress) 731 return rte_flow_error_set(error, ENOTSUP, 732 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 733 "mark action not supported for " 734 "egress"); 735 return 0; 736 } 737 738 /* 739 * Validate the drop action. 740 * 741 * @param[in] action_flags 742 * Bit-fields that holds the actions detected until now. 743 * @param[in] attr 744 * Attributes of flow that includes this action. 745 * @param[out] error 746 * Pointer to error structure. 747 * 748 * @return 749 * 0 on success, a negative errno value otherwise and rte_ernno is set. 750 */ 751 int 752 mlx5_flow_validate_action_drop(uint64_t action_flags, 753 const struct rte_flow_attr *attr, 754 struct rte_flow_error *error) 755 { 756 if (action_flags & MLX5_FLOW_ACTION_FLAG) 757 return rte_flow_error_set(error, EINVAL, 758 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 759 "can't drop and flag in same flow"); 760 if (action_flags & MLX5_FLOW_ACTION_MARK) 761 return rte_flow_error_set(error, EINVAL, 762 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 763 "can't drop and mark in same flow"); 764 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 765 return rte_flow_error_set(error, EINVAL, 766 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 767 "can't have 2 fate actions in" 768 " same flow"); 769 if (attr->egress) 770 return rte_flow_error_set(error, ENOTSUP, 771 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 772 "drop action not supported for " 773 "egress"); 774 return 0; 775 } 776 777 /* 778 * Validate the queue action. 779 * 780 * @param[in] action 781 * Pointer to the queue action. 782 * @param[in] action_flags 783 * Bit-fields that holds the actions detected until now. 784 * @param[in] dev 785 * Pointer to the Ethernet device structure. 786 * @param[in] attr 787 * Attributes of flow that includes this action. 788 * @param[out] error 789 * Pointer to error structure. 790 * 791 * @return 792 * 0 on success, a negative errno value otherwise and rte_ernno is set. 793 */ 794 int 795 mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 796 uint64_t action_flags, 797 struct rte_eth_dev *dev, 798 const struct rte_flow_attr *attr, 799 struct rte_flow_error *error) 800 { 801 struct priv *priv = dev->data->dev_private; 802 const struct rte_flow_action_queue *queue = action->conf; 803 804 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 805 return rte_flow_error_set(error, EINVAL, 806 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 807 "can't have 2 fate actions in" 808 " same flow"); 809 if (queue->index >= priv->rxqs_n) 810 return rte_flow_error_set(error, EINVAL, 811 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 812 &queue->index, 813 "queue index out of range"); 814 if (!(*priv->rxqs)[queue->index]) 815 return rte_flow_error_set(error, EINVAL, 816 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 817 &queue->index, 818 "queue is not configured"); 819 if (attr->egress) 820 return rte_flow_error_set(error, ENOTSUP, 821 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 822 "queue action not supported for " 823 "egress"); 824 return 0; 825 } 826 827 /* 828 * Validate the rss action. 829 * 830 * @param[in] action 831 * Pointer to the queue action. 832 * @param[in] action_flags 833 * Bit-fields that holds the actions detected until now. 834 * @param[in] dev 835 * Pointer to the Ethernet device structure. 836 * @param[in] attr 837 * Attributes of flow that includes this action. 838 * @param[out] error 839 * Pointer to error structure. 840 * 841 * @return 842 * 0 on success, a negative errno value otherwise and rte_ernno is set. 843 */ 844 int 845 mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 846 uint64_t action_flags, 847 struct rte_eth_dev *dev, 848 const struct rte_flow_attr *attr, 849 struct rte_flow_error *error) 850 { 851 struct priv *priv = dev->data->dev_private; 852 const struct rte_flow_action_rss *rss = action->conf; 853 unsigned int i; 854 855 if (action_flags & MLX5_FLOW_FATE_ACTIONS) 856 return rte_flow_error_set(error, EINVAL, 857 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 858 "can't have 2 fate actions" 859 " in same flow"); 860 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && 861 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) 862 return rte_flow_error_set(error, ENOTSUP, 863 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 864 &rss->func, 865 "RSS hash function not supported"); 866 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 867 if (rss->level > 2) 868 #else 869 if (rss->level > 1) 870 #endif 871 return rte_flow_error_set(error, ENOTSUP, 872 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 873 &rss->level, 874 "tunnel RSS is not supported"); 875 if (rss->key_len < MLX5_RSS_HASH_KEY_LEN) 876 return rte_flow_error_set(error, ENOTSUP, 877 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 878 &rss->key_len, 879 "RSS hash key too small"); 880 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) 881 return rte_flow_error_set(error, ENOTSUP, 882 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 883 &rss->key_len, 884 "RSS hash key too large"); 885 if (rss->queue_num > priv->config.ind_table_max_size) 886 return rte_flow_error_set(error, ENOTSUP, 887 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 888 &rss->queue_num, 889 "number of queues too large"); 890 if (rss->types & MLX5_RSS_HF_MASK) 891 return rte_flow_error_set(error, ENOTSUP, 892 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 893 &rss->types, 894 "some RSS protocols are not" 895 " supported"); 896 for (i = 0; i != rss->queue_num; ++i) { 897 if (!(*priv->rxqs)[rss->queue[i]]) 898 return rte_flow_error_set 899 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, 900 &rss->queue[i], "queue is not configured"); 901 } 902 if (attr->egress) 903 return rte_flow_error_set(error, ENOTSUP, 904 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 905 "rss action not supported for " 906 "egress"); 907 return 0; 908 } 909 910 /* 911 * Validate the count action. 912 * 913 * @param[in] dev 914 * Pointer to the Ethernet device structure. 915 * @param[in] attr 916 * Attributes of flow that includes this action. 917 * @param[out] error 918 * Pointer to error structure. 919 * 920 * @return 921 * 0 on success, a negative errno value otherwise and rte_ernno is set. 922 */ 923 int 924 mlx5_flow_validate_action_count(struct rte_eth_dev *dev, 925 const struct rte_flow_attr *attr, 926 struct rte_flow_error *error) 927 { 928 struct priv *priv = dev->data->dev_private; 929 930 if (!priv->config.flow_counter_en) 931 return rte_flow_error_set(error, ENOTSUP, 932 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 933 "flow counters are not supported."); 934 if (attr->egress) 935 return rte_flow_error_set(error, ENOTSUP, 936 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 937 "count action not supported for " 938 "egress"); 939 return 0; 940 } 941 942 /** 943 * Verify the @p attributes will be correctly understood by the NIC and store 944 * them in the @p flow if everything is correct. 945 * 946 * @param[in] dev 947 * Pointer to the Ethernet device structure. 948 * @param[in] attributes 949 * Pointer to flow attributes 950 * @param[out] error 951 * Pointer to error structure. 952 * 953 * @return 954 * 0 on success, a negative errno value otherwise and rte_errno is set. 955 */ 956 int 957 mlx5_flow_validate_attributes(struct rte_eth_dev *dev, 958 const struct rte_flow_attr *attributes, 959 struct rte_flow_error *error) 960 { 961 struct priv *priv = dev->data->dev_private; 962 uint32_t priority_max = priv->config.flow_prio - 1; 963 964 if (attributes->group) 965 return rte_flow_error_set(error, ENOTSUP, 966 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 967 NULL, "groups is not supported"); 968 if (attributes->priority != MLX5_FLOW_PRIO_RSVD && 969 attributes->priority >= priority_max) 970 return rte_flow_error_set(error, ENOTSUP, 971 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 972 NULL, "priority out of range"); 973 if (attributes->egress) 974 return rte_flow_error_set(error, ENOTSUP, 975 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 976 "egress is not supported"); 977 if (attributes->transfer) 978 return rte_flow_error_set(error, ENOTSUP, 979 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 980 NULL, "transfer is not supported"); 981 if (!attributes->ingress) 982 return rte_flow_error_set(error, EINVAL, 983 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 984 NULL, 985 "ingress attribute is mandatory"); 986 return 0; 987 } 988 989 /** 990 * Validate Ethernet item. 991 * 992 * @param[in] item 993 * Item specification. 994 * @param[in] item_flags 995 * Bit-fields that holds the items detected until now. 996 * @param[out] error 997 * Pointer to error structure. 998 * 999 * @return 1000 * 0 on success, a negative errno value otherwise and rte_errno is set. 1001 */ 1002 int 1003 mlx5_flow_validate_item_eth(const struct rte_flow_item *item, 1004 uint64_t item_flags, 1005 struct rte_flow_error *error) 1006 { 1007 const struct rte_flow_item_eth *mask = item->mask; 1008 const struct rte_flow_item_eth nic_mask = { 1009 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1010 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1011 .type = RTE_BE16(0xffff), 1012 }; 1013 int ret; 1014 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1015 1016 if (item_flags & MLX5_FLOW_LAYER_OUTER_L2) 1017 return rte_flow_error_set(error, ENOTSUP, 1018 RTE_FLOW_ERROR_TYPE_ITEM, item, 1019 "3 levels of l2 are not supported"); 1020 if ((item_flags & MLX5_FLOW_LAYER_INNER_L2) && !tunnel) 1021 return rte_flow_error_set(error, ENOTSUP, 1022 RTE_FLOW_ERROR_TYPE_ITEM, item, 1023 "2 L2 without tunnel are not supported"); 1024 if (!mask) 1025 mask = &rte_flow_item_eth_mask; 1026 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1027 (const uint8_t *)&nic_mask, 1028 sizeof(struct rte_flow_item_eth), 1029 error); 1030 return ret; 1031 } 1032 1033 /** 1034 * Validate VLAN item. 1035 * 1036 * @param[in] item 1037 * Item specification. 1038 * @param[in] item_flags 1039 * Bit-fields that holds the items detected until now. 1040 * @param[out] error 1041 * Pointer to error structure. 1042 * 1043 * @return 1044 * 0 on success, a negative errno value otherwise and rte_errno is set. 1045 */ 1046 int 1047 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 1048 int64_t item_flags, 1049 struct rte_flow_error *error) 1050 { 1051 const struct rte_flow_item_vlan *spec = item->spec; 1052 const struct rte_flow_item_vlan *mask = item->mask; 1053 const struct rte_flow_item_vlan nic_mask = { 1054 .tci = RTE_BE16(0x0fff), 1055 .inner_type = RTE_BE16(0xffff), 1056 }; 1057 uint16_t vlan_tag = 0; 1058 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1059 int ret; 1060 const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 1061 MLX5_FLOW_LAYER_INNER_L4) : 1062 (MLX5_FLOW_LAYER_OUTER_L3 | 1063 MLX5_FLOW_LAYER_OUTER_L4); 1064 const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 1065 MLX5_FLOW_LAYER_OUTER_VLAN; 1066 1067 if (item_flags & vlanm) 1068 return rte_flow_error_set(error, EINVAL, 1069 RTE_FLOW_ERROR_TYPE_ITEM, item, 1070 "VLAN layer already configured"); 1071 else if ((item_flags & l34m) != 0) 1072 return rte_flow_error_set(error, EINVAL, 1073 RTE_FLOW_ERROR_TYPE_ITEM, item, 1074 "L2 layer cannot follow L3/L4 layer"); 1075 if (!mask) 1076 mask = &rte_flow_item_vlan_mask; 1077 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1078 (const uint8_t *)&nic_mask, 1079 sizeof(struct rte_flow_item_vlan), 1080 error); 1081 if (ret) 1082 return ret; 1083 if (spec) { 1084 vlan_tag = spec->tci; 1085 vlan_tag &= mask->tci; 1086 } 1087 /* 1088 * From verbs perspective an empty VLAN is equivalent 1089 * to a packet without VLAN layer. 1090 */ 1091 if (!vlan_tag) 1092 return rte_flow_error_set(error, EINVAL, 1093 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1094 item->spec, 1095 "VLAN cannot be empty"); 1096 return 0; 1097 } 1098 1099 /** 1100 * Validate IPV4 item. 1101 * 1102 * @param[in] item 1103 * Item specification. 1104 * @param[in] item_flags 1105 * Bit-fields that holds the items detected until now. 1106 * @param[out] error 1107 * Pointer to error structure. 1108 * 1109 * @return 1110 * 0 on success, a negative errno value otherwise and rte_errno is set. 1111 */ 1112 int 1113 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, 1114 int64_t item_flags, 1115 struct rte_flow_error *error) 1116 { 1117 const struct rte_flow_item_ipv4 *mask = item->mask; 1118 const struct rte_flow_item_ipv4 nic_mask = { 1119 .hdr = { 1120 .src_addr = RTE_BE32(0xffffffff), 1121 .dst_addr = RTE_BE32(0xffffffff), 1122 .type_of_service = 0xff, 1123 .next_proto_id = 0xff, 1124 }, 1125 }; 1126 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1127 int ret; 1128 1129 if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1130 MLX5_FLOW_LAYER_OUTER_L3)) 1131 return rte_flow_error_set(error, ENOTSUP, 1132 RTE_FLOW_ERROR_TYPE_ITEM, item, 1133 "multiple L3 layers not supported"); 1134 else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1135 MLX5_FLOW_LAYER_OUTER_L4)) 1136 return rte_flow_error_set(error, EINVAL, 1137 RTE_FLOW_ERROR_TYPE_ITEM, item, 1138 "L3 cannot follow an L4 layer."); 1139 if (!mask) 1140 mask = &rte_flow_item_ipv4_mask; 1141 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1142 (const uint8_t *)&nic_mask, 1143 sizeof(struct rte_flow_item_ipv4), 1144 error); 1145 if (ret < 0) 1146 return ret; 1147 return 0; 1148 } 1149 1150 /** 1151 * Validate IPV6 item. 1152 * 1153 * @param[in] item 1154 * Item specification. 1155 * @param[in] item_flags 1156 * Bit-fields that holds the items detected until now. 1157 * @param[out] error 1158 * Pointer to error structure. 1159 * 1160 * @return 1161 * 0 on success, a negative errno value otherwise and rte_errno is set. 1162 */ 1163 int 1164 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, 1165 uint64_t item_flags, 1166 struct rte_flow_error *error) 1167 { 1168 const struct rte_flow_item_ipv6 *mask = item->mask; 1169 const struct rte_flow_item_ipv6 nic_mask = { 1170 .hdr = { 1171 .src_addr = 1172 "\xff\xff\xff\xff\xff\xff\xff\xff" 1173 "\xff\xff\xff\xff\xff\xff\xff\xff", 1174 .dst_addr = 1175 "\xff\xff\xff\xff\xff\xff\xff\xff" 1176 "\xff\xff\xff\xff\xff\xff\xff\xff", 1177 .vtc_flow = RTE_BE32(0xffffffff), 1178 .proto = 0xff, 1179 .hop_limits = 0xff, 1180 }, 1181 }; 1182 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1183 int ret; 1184 1185 if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1186 MLX5_FLOW_LAYER_OUTER_L3)) 1187 return rte_flow_error_set(error, ENOTSUP, 1188 RTE_FLOW_ERROR_TYPE_ITEM, item, 1189 "multiple L3 layers not supported"); 1190 else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1191 MLX5_FLOW_LAYER_OUTER_L4)) 1192 return rte_flow_error_set(error, EINVAL, 1193 RTE_FLOW_ERROR_TYPE_ITEM, item, 1194 "L3 cannot follow an L4 layer."); 1195 /* 1196 * IPv6 is not recognised by the NIC inside a GRE tunnel. 1197 * Such support has to be disabled as the rule will be 1198 * accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and 1199 * Mellanox OFED 4.4-1.0.0.0. 1200 */ 1201 if (tunnel && item_flags & MLX5_FLOW_LAYER_GRE) 1202 return rte_flow_error_set(error, ENOTSUP, 1203 RTE_FLOW_ERROR_TYPE_ITEM, item, 1204 "IPv6 inside a GRE tunnel is" 1205 " not recognised."); 1206 if (!mask) 1207 mask = &rte_flow_item_ipv6_mask; 1208 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1209 (const uint8_t *)&nic_mask, 1210 sizeof(struct rte_flow_item_ipv6), 1211 error); 1212 if (ret < 0) 1213 return ret; 1214 return 0; 1215 } 1216 1217 /** 1218 * Validate UDP item. 1219 * 1220 * @param[in] item 1221 * Item specification. 1222 * @param[in] item_flags 1223 * Bit-fields that holds the items detected until now. 1224 * @param[in] target_protocol 1225 * The next protocol in the previous item. 1226 * @param[in] flow_mask 1227 * mlx5 flow-specific (TCF, DV, verbs, etc.) supported header fields mask. 1228 * @param[out] error 1229 * Pointer to error structure. 1230 * 1231 * @return 1232 * 0 on success, a negative errno value otherwise and rte_errno is set. 1233 */ 1234 int 1235 mlx5_flow_validate_item_udp(const struct rte_flow_item *item, 1236 uint64_t item_flags, 1237 uint8_t target_protocol, 1238 struct rte_flow_error *error) 1239 { 1240 const struct rte_flow_item_udp *mask = item->mask; 1241 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1242 int ret; 1243 1244 if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) 1245 return rte_flow_error_set(error, EINVAL, 1246 RTE_FLOW_ERROR_TYPE_ITEM, item, 1247 "protocol filtering not compatible" 1248 " with UDP layer"); 1249 if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1250 MLX5_FLOW_LAYER_OUTER_L3))) 1251 return rte_flow_error_set(error, EINVAL, 1252 RTE_FLOW_ERROR_TYPE_ITEM, item, 1253 "L3 is mandatory to filter on L4"); 1254 if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1255 MLX5_FLOW_LAYER_OUTER_L4)) 1256 return rte_flow_error_set(error, EINVAL, 1257 RTE_FLOW_ERROR_TYPE_ITEM, item, 1258 "L4 layer is already present"); 1259 if (!mask) 1260 mask = &rte_flow_item_udp_mask; 1261 ret = mlx5_flow_item_acceptable 1262 (item, (const uint8_t *)mask, 1263 (const uint8_t *)&rte_flow_item_udp_mask, 1264 sizeof(struct rte_flow_item_udp), error); 1265 if (ret < 0) 1266 return ret; 1267 return 0; 1268 } 1269 1270 /** 1271 * Validate TCP item. 1272 * 1273 * @param[in] item 1274 * Item specification. 1275 * @param[in] item_flags 1276 * Bit-fields that holds the items detected until now. 1277 * @param[in] target_protocol 1278 * The next protocol in the previous item. 1279 * @param[out] error 1280 * Pointer to error structure. 1281 * 1282 * @return 1283 * 0 on success, a negative errno value otherwise and rte_errno is set. 1284 */ 1285 int 1286 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, 1287 uint64_t item_flags, 1288 uint8_t target_protocol, 1289 const struct rte_flow_item_tcp *flow_mask, 1290 struct rte_flow_error *error) 1291 { 1292 const struct rte_flow_item_tcp *mask = item->mask; 1293 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1294 int ret; 1295 1296 assert(flow_mask); 1297 if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) 1298 return rte_flow_error_set(error, EINVAL, 1299 RTE_FLOW_ERROR_TYPE_ITEM, item, 1300 "protocol filtering not compatible" 1301 " with TCP layer"); 1302 if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : 1303 MLX5_FLOW_LAYER_OUTER_L3))) 1304 return rte_flow_error_set(error, EINVAL, 1305 RTE_FLOW_ERROR_TYPE_ITEM, item, 1306 "L3 is mandatory to filter on L4"); 1307 if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : 1308 MLX5_FLOW_LAYER_OUTER_L4)) 1309 return rte_flow_error_set(error, EINVAL, 1310 RTE_FLOW_ERROR_TYPE_ITEM, item, 1311 "L4 layer is already present"); 1312 if (!mask) 1313 mask = &rte_flow_item_tcp_mask; 1314 ret = mlx5_flow_item_acceptable 1315 (item, (const uint8_t *)mask, 1316 (const uint8_t *)flow_mask, 1317 sizeof(struct rte_flow_item_tcp), error); 1318 if (ret < 0) 1319 return ret; 1320 return 0; 1321 } 1322 1323 /** 1324 * Validate VXLAN item. 1325 * 1326 * @param[in] item 1327 * Item specification. 1328 * @param[in] item_flags 1329 * Bit-fields that holds the items detected until now. 1330 * @param[in] target_protocol 1331 * The next protocol in the previous item. 1332 * @param[out] error 1333 * Pointer to error structure. 1334 * 1335 * @return 1336 * 0 on success, a negative errno value otherwise and rte_errno is set. 1337 */ 1338 int 1339 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, 1340 uint64_t item_flags, 1341 struct rte_flow_error *error) 1342 { 1343 const struct rte_flow_item_vxlan *spec = item->spec; 1344 const struct rte_flow_item_vxlan *mask = item->mask; 1345 int ret; 1346 union vni { 1347 uint32_t vlan_id; 1348 uint8_t vni[4]; 1349 } id = { .vlan_id = 0, }; 1350 uint32_t vlan_id = 0; 1351 1352 1353 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1354 return rte_flow_error_set(error, ENOTSUP, 1355 RTE_FLOW_ERROR_TYPE_ITEM, item, 1356 "a tunnel is already present"); 1357 /* 1358 * Verify only UDPv4 is present as defined in 1359 * https://tools.ietf.org/html/rfc7348 1360 */ 1361 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1362 return rte_flow_error_set(error, EINVAL, 1363 RTE_FLOW_ERROR_TYPE_ITEM, item, 1364 "no outer UDP layer found"); 1365 if (!mask) 1366 mask = &rte_flow_item_vxlan_mask; 1367 ret = mlx5_flow_item_acceptable 1368 (item, (const uint8_t *)mask, 1369 (const uint8_t *)&rte_flow_item_vxlan_mask, 1370 sizeof(struct rte_flow_item_vxlan), 1371 error); 1372 if (ret < 0) 1373 return ret; 1374 if (spec) { 1375 memcpy(&id.vni[1], spec->vni, 3); 1376 vlan_id = id.vlan_id; 1377 memcpy(&id.vni[1], mask->vni, 3); 1378 vlan_id &= id.vlan_id; 1379 } 1380 /* 1381 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if 1382 * only this layer is defined in the Verbs specification it is 1383 * interpreted as wildcard and all packets will match this 1384 * rule, if it follows a full stack layer (ex: eth / ipv4 / 1385 * udp), all packets matching the layers before will also 1386 * match this rule. To avoid such situation, VNI 0 is 1387 * currently refused. 1388 */ 1389 if (!vlan_id) 1390 return rte_flow_error_set(error, ENOTSUP, 1391 RTE_FLOW_ERROR_TYPE_ITEM, item, 1392 "VXLAN vni cannot be 0"); 1393 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1394 return rte_flow_error_set(error, ENOTSUP, 1395 RTE_FLOW_ERROR_TYPE_ITEM, item, 1396 "VXLAN tunnel must be fully defined"); 1397 return 0; 1398 } 1399 1400 /** 1401 * Validate VXLAN_GPE item. 1402 * 1403 * @param[in] item 1404 * Item specification. 1405 * @param[in] item_flags 1406 * Bit-fields that holds the items detected until now. 1407 * @param[in] priv 1408 * Pointer to the private data structure. 1409 * @param[in] target_protocol 1410 * The next protocol in the previous item. 1411 * @param[out] error 1412 * Pointer to error structure. 1413 * 1414 * @return 1415 * 0 on success, a negative errno value otherwise and rte_errno is set. 1416 */ 1417 int 1418 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 1419 uint64_t item_flags, 1420 struct rte_eth_dev *dev, 1421 struct rte_flow_error *error) 1422 { 1423 struct priv *priv = dev->data->dev_private; 1424 const struct rte_flow_item_vxlan_gpe *spec = item->spec; 1425 const struct rte_flow_item_vxlan_gpe *mask = item->mask; 1426 int ret; 1427 union vni { 1428 uint32_t vlan_id; 1429 uint8_t vni[4]; 1430 } id = { .vlan_id = 0, }; 1431 uint32_t vlan_id = 0; 1432 1433 if (!priv->config.l3_vxlan_en) 1434 return rte_flow_error_set(error, ENOTSUP, 1435 RTE_FLOW_ERROR_TYPE_ITEM, item, 1436 "L3 VXLAN is not enabled by device" 1437 " parameter and/or not configured in" 1438 " firmware"); 1439 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1440 return rte_flow_error_set(error, ENOTSUP, 1441 RTE_FLOW_ERROR_TYPE_ITEM, item, 1442 "a tunnel is already present"); 1443 /* 1444 * Verify only UDPv4 is present as defined in 1445 * https://tools.ietf.org/html/rfc7348 1446 */ 1447 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1448 return rte_flow_error_set(error, EINVAL, 1449 RTE_FLOW_ERROR_TYPE_ITEM, item, 1450 "no outer UDP layer found"); 1451 if (!mask) 1452 mask = &rte_flow_item_vxlan_gpe_mask; 1453 ret = mlx5_flow_item_acceptable 1454 (item, (const uint8_t *)mask, 1455 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, 1456 sizeof(struct rte_flow_item_vxlan_gpe), 1457 error); 1458 if (ret < 0) 1459 return ret; 1460 if (spec) { 1461 if (spec->protocol) 1462 return rte_flow_error_set(error, ENOTSUP, 1463 RTE_FLOW_ERROR_TYPE_ITEM, 1464 item, 1465 "VxLAN-GPE protocol" 1466 " not supported"); 1467 memcpy(&id.vni[1], spec->vni, 3); 1468 vlan_id = id.vlan_id; 1469 memcpy(&id.vni[1], mask->vni, 3); 1470 vlan_id &= id.vlan_id; 1471 } 1472 /* 1473 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this 1474 * layer is defined in the Verbs specification it is interpreted as 1475 * wildcard and all packets will match this rule, if it follows a full 1476 * stack layer (ex: eth / ipv4 / udp), all packets matching the layers 1477 * before will also match this rule. To avoid such situation, VNI 0 1478 * is currently refused. 1479 */ 1480 if (!vlan_id) 1481 return rte_flow_error_set(error, ENOTSUP, 1482 RTE_FLOW_ERROR_TYPE_ITEM, item, 1483 "VXLAN-GPE vni cannot be 0"); 1484 if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) 1485 return rte_flow_error_set(error, ENOTSUP, 1486 RTE_FLOW_ERROR_TYPE_ITEM, item, 1487 "VXLAN-GPE tunnel must be fully" 1488 " defined"); 1489 return 0; 1490 } 1491 1492 /** 1493 * Validate GRE item. 1494 * 1495 * @param[in] item 1496 * Item specification. 1497 * @param[in] item_flags 1498 * Bit flags to mark detected items. 1499 * @param[in] target_protocol 1500 * The next protocol in the previous item. 1501 * @param[out] error 1502 * Pointer to error structure. 1503 * 1504 * @return 1505 * 0 on success, a negative errno value otherwise and rte_errno is set. 1506 */ 1507 int 1508 mlx5_flow_validate_item_gre(const struct rte_flow_item *item, 1509 uint64_t item_flags, 1510 uint8_t target_protocol, 1511 struct rte_flow_error *error) 1512 { 1513 const struct rte_flow_item_gre *spec __rte_unused = item->spec; 1514 const struct rte_flow_item_gre *mask = item->mask; 1515 int ret; 1516 1517 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) 1518 return rte_flow_error_set(error, EINVAL, 1519 RTE_FLOW_ERROR_TYPE_ITEM, item, 1520 "protocol filtering not compatible" 1521 " with this GRE layer"); 1522 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1523 return rte_flow_error_set(error, ENOTSUP, 1524 RTE_FLOW_ERROR_TYPE_ITEM, item, 1525 "a tunnel is already present"); 1526 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) 1527 return rte_flow_error_set(error, ENOTSUP, 1528 RTE_FLOW_ERROR_TYPE_ITEM, item, 1529 "L3 Layer is missing"); 1530 if (!mask) 1531 mask = &rte_flow_item_gre_mask; 1532 ret = mlx5_flow_item_acceptable 1533 (item, (const uint8_t *)mask, 1534 (const uint8_t *)&rte_flow_item_gre_mask, 1535 sizeof(struct rte_flow_item_gre), error); 1536 if (ret < 0) 1537 return ret; 1538 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT 1539 if (spec && (spec->protocol & mask->protocol)) 1540 return rte_flow_error_set(error, ENOTSUP, 1541 RTE_FLOW_ERROR_TYPE_ITEM, item, 1542 "without MPLS support the" 1543 " specification cannot be used for" 1544 " filtering"); 1545 #endif 1546 return 0; 1547 } 1548 1549 /** 1550 * Validate MPLS item. 1551 * 1552 * @param[in] item 1553 * Item specification. 1554 * @param[in] item_flags 1555 * Bit-fields that holds the items detected until now. 1556 * @param[in] target_protocol 1557 * The next protocol in the previous item. 1558 * @param[out] error 1559 * Pointer to error structure. 1560 * 1561 * @return 1562 * 0 on success, a negative errno value otherwise and rte_errno is set. 1563 */ 1564 int 1565 mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused, 1566 uint64_t item_flags __rte_unused, 1567 uint8_t target_protocol __rte_unused, 1568 struct rte_flow_error *error) 1569 { 1570 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 1571 const struct rte_flow_item_mpls *mask = item->mask; 1572 int ret; 1573 1574 if (target_protocol != 0xff && target_protocol != IPPROTO_MPLS) 1575 return rte_flow_error_set(error, EINVAL, 1576 RTE_FLOW_ERROR_TYPE_ITEM, item, 1577 "protocol filtering not compatible" 1578 " with MPLS layer"); 1579 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1580 return rte_flow_error_set(error, ENOTSUP, 1581 RTE_FLOW_ERROR_TYPE_ITEM, item, 1582 "a tunnel is already" 1583 " present"); 1584 if (!mask) 1585 mask = &rte_flow_item_mpls_mask; 1586 ret = mlx5_flow_item_acceptable 1587 (item, (const uint8_t *)mask, 1588 (const uint8_t *)&rte_flow_item_mpls_mask, 1589 sizeof(struct rte_flow_item_mpls), error); 1590 if (ret < 0) 1591 return ret; 1592 return 0; 1593 #endif 1594 return rte_flow_error_set(error, ENOTSUP, 1595 RTE_FLOW_ERROR_TYPE_ITEM, item, 1596 "MPLS is not supported by Verbs, please" 1597 " update."); 1598 } 1599 1600 static int 1601 flow_null_validate(struct rte_eth_dev *dev __rte_unused, 1602 const struct rte_flow_attr *attr __rte_unused, 1603 const struct rte_flow_item items[] __rte_unused, 1604 const struct rte_flow_action actions[] __rte_unused, 1605 struct rte_flow_error *error __rte_unused) 1606 { 1607 rte_errno = ENOTSUP; 1608 return -rte_errno; 1609 } 1610 1611 static struct mlx5_flow * 1612 flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, 1613 const struct rte_flow_item items[] __rte_unused, 1614 const struct rte_flow_action actions[] __rte_unused, 1615 uint64_t *item_flags __rte_unused, 1616 uint64_t *action_flags __rte_unused, 1617 struct rte_flow_error *error __rte_unused) 1618 { 1619 rte_errno = ENOTSUP; 1620 return NULL; 1621 } 1622 1623 static int 1624 flow_null_translate(struct rte_eth_dev *dev __rte_unused, 1625 struct mlx5_flow *dev_flow __rte_unused, 1626 const struct rte_flow_attr *attr __rte_unused, 1627 const struct rte_flow_item items[] __rte_unused, 1628 const struct rte_flow_action actions[] __rte_unused, 1629 struct rte_flow_error *error __rte_unused) 1630 { 1631 rte_errno = ENOTSUP; 1632 return -rte_errno; 1633 } 1634 1635 static int 1636 flow_null_apply(struct rte_eth_dev *dev __rte_unused, 1637 struct rte_flow *flow __rte_unused, 1638 struct rte_flow_error *error __rte_unused) 1639 { 1640 rte_errno = ENOTSUP; 1641 return -rte_errno; 1642 } 1643 1644 static void 1645 flow_null_remove(struct rte_eth_dev *dev __rte_unused, 1646 struct rte_flow *flow __rte_unused) 1647 { 1648 } 1649 1650 static void 1651 flow_null_destroy(struct rte_eth_dev *dev __rte_unused, 1652 struct rte_flow *flow __rte_unused) 1653 { 1654 } 1655 1656 /* Void driver to protect from null pointer reference. */ 1657 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { 1658 .validate = flow_null_validate, 1659 .prepare = flow_null_prepare, 1660 .translate = flow_null_translate, 1661 .apply = flow_null_apply, 1662 .remove = flow_null_remove, 1663 .destroy = flow_null_destroy, 1664 }; 1665 1666 /** 1667 * Select flow driver type according to flow attributes and device 1668 * configuration. 1669 * 1670 * @param[in] dev 1671 * Pointer to the dev structure. 1672 * @param[in] attr 1673 * Pointer to the flow attributes. 1674 * 1675 * @return 1676 * flow driver type if supported, MLX5_FLOW_TYPE_MAX otherwise. 1677 */ 1678 static enum mlx5_flow_drv_type 1679 flow_get_drv_type(struct rte_eth_dev *dev __rte_unused, 1680 const struct rte_flow_attr *attr) 1681 { 1682 struct priv *priv __rte_unused = dev->data->dev_private; 1683 enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; 1684 1685 if (attr->transfer) { 1686 type = MLX5_FLOW_TYPE_TCF; 1687 } else { 1688 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 1689 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : 1690 MLX5_FLOW_TYPE_VERBS; 1691 #else 1692 type = MLX5_FLOW_TYPE_VERBS; 1693 #endif 1694 } 1695 return type; 1696 } 1697 1698 #define flow_get_drv_ops(type) flow_drv_ops[type] 1699 1700 /** 1701 * Flow driver validation API. This abstracts calling driver specific functions. 1702 * The type of flow driver is determined according to flow attributes. 1703 * 1704 * @param[in] dev 1705 * Pointer to the dev structure. 1706 * @param[in] attr 1707 * Pointer to the flow attributes. 1708 * @param[in] items 1709 * Pointer to the list of items. 1710 * @param[in] actions 1711 * Pointer to the list of actions. 1712 * @param[out] error 1713 * Pointer to the error structure. 1714 * 1715 * @return 1716 * 0 on success, a negative errno value otherwise and rte_ernno is set. 1717 */ 1718 static inline int 1719 flow_drv_validate(struct rte_eth_dev *dev, 1720 const struct rte_flow_attr *attr, 1721 const struct rte_flow_item items[], 1722 const struct rte_flow_action actions[], 1723 struct rte_flow_error *error) 1724 { 1725 const struct mlx5_flow_driver_ops *fops; 1726 enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); 1727 1728 fops = flow_get_drv_ops(type); 1729 return fops->validate(dev, attr, items, actions, error); 1730 } 1731 1732 /** 1733 * Flow driver preparation API. This abstracts calling driver specific 1734 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 1735 * calculates the size of memory required for device flow, allocates the memory, 1736 * initializes the device flow and returns the pointer. 1737 * 1738 * @param[in] attr 1739 * Pointer to the flow attributes. 1740 * @param[in] items 1741 * Pointer to the list of items. 1742 * @param[in] actions 1743 * Pointer to the list of actions. 1744 * @param[out] item_flags 1745 * Pointer to bit mask of all items detected. 1746 * @param[out] action_flags 1747 * Pointer to bit mask of all actions detected. 1748 * @param[out] error 1749 * Pointer to the error structure. 1750 * 1751 * @return 1752 * Pointer to device flow on success, otherwise NULL and rte_ernno is set. 1753 */ 1754 static inline struct mlx5_flow * 1755 flow_drv_prepare(struct rte_flow *flow, 1756 const struct rte_flow_attr *attr, 1757 const struct rte_flow_item items[], 1758 const struct rte_flow_action actions[], 1759 uint64_t *item_flags, 1760 uint64_t *action_flags, 1761 struct rte_flow_error *error) 1762 { 1763 const struct mlx5_flow_driver_ops *fops; 1764 enum mlx5_flow_drv_type type = flow->drv_type; 1765 1766 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 1767 fops = flow_get_drv_ops(type); 1768 return fops->prepare(attr, items, actions, item_flags, action_flags, 1769 error); 1770 } 1771 1772 /** 1773 * Flow driver translation API. This abstracts calling driver specific 1774 * functions. Parent flow (rte_flow) should have driver type (drv_type). It 1775 * translates a generic flow into a driver flow. flow_drv_prepare() must 1776 * precede. 1777 * 1778 * 1779 * @param[in] dev 1780 * Pointer to the rte dev structure. 1781 * @param[in, out] dev_flow 1782 * Pointer to the mlx5 flow. 1783 * @param[in] attr 1784 * Pointer to the flow attributes. 1785 * @param[in] items 1786 * Pointer to the list of items. 1787 * @param[in] actions 1788 * Pointer to the list of actions. 1789 * @param[out] error 1790 * Pointer to the error structure. 1791 * 1792 * @return 1793 * 0 on success, a negative errno value otherwise and rte_ernno is set. 1794 */ 1795 static inline int 1796 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, 1797 const struct rte_flow_attr *attr, 1798 const struct rte_flow_item items[], 1799 const struct rte_flow_action actions[], 1800 struct rte_flow_error *error) 1801 { 1802 const struct mlx5_flow_driver_ops *fops; 1803 enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; 1804 1805 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 1806 fops = flow_get_drv_ops(type); 1807 return fops->translate(dev, dev_flow, attr, items, actions, error); 1808 } 1809 1810 /** 1811 * Flow driver apply API. This abstracts calling driver specific functions. 1812 * Parent flow (rte_flow) should have driver type (drv_type). It applies 1813 * translated driver flows on to device. flow_drv_translate() must precede. 1814 * 1815 * @param[in] dev 1816 * Pointer to Ethernet device structure. 1817 * @param[in, out] flow 1818 * Pointer to flow structure. 1819 * @param[out] error 1820 * Pointer to error structure. 1821 * 1822 * @return 1823 * 0 on success, a negative errno value otherwise and rte_errno is set. 1824 */ 1825 static inline int 1826 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 1827 struct rte_flow_error *error) 1828 { 1829 const struct mlx5_flow_driver_ops *fops; 1830 enum mlx5_flow_drv_type type = flow->drv_type; 1831 1832 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 1833 fops = flow_get_drv_ops(type); 1834 return fops->apply(dev, flow, error); 1835 } 1836 1837 /** 1838 * Flow driver remove API. This abstracts calling driver specific functions. 1839 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 1840 * on device. All the resources of the flow should be freed by calling 1841 * flow_dv_destroy(). 1842 * 1843 * @param[in] dev 1844 * Pointer to Ethernet device. 1845 * @param[in, out] flow 1846 * Pointer to flow structure. 1847 */ 1848 static inline void 1849 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 1850 { 1851 const struct mlx5_flow_driver_ops *fops; 1852 enum mlx5_flow_drv_type type = flow->drv_type; 1853 1854 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 1855 fops = flow_get_drv_ops(type); 1856 fops->remove(dev, flow); 1857 } 1858 1859 /** 1860 * Flow driver destroy API. This abstracts calling driver specific functions. 1861 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow 1862 * on device and releases resources of the flow. 1863 * 1864 * @param[in] dev 1865 * Pointer to Ethernet device. 1866 * @param[in, out] flow 1867 * Pointer to flow structure. 1868 */ 1869 static inline void 1870 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 1871 { 1872 const struct mlx5_flow_driver_ops *fops; 1873 enum mlx5_flow_drv_type type = flow->drv_type; 1874 1875 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); 1876 fops = flow_get_drv_ops(type); 1877 fops->destroy(dev, flow); 1878 } 1879 1880 /** 1881 * Validate a flow supported by the NIC. 1882 * 1883 * @see rte_flow_validate() 1884 * @see rte_flow_ops 1885 */ 1886 int 1887 mlx5_flow_validate(struct rte_eth_dev *dev, 1888 const struct rte_flow_attr *attr, 1889 const struct rte_flow_item items[], 1890 const struct rte_flow_action actions[], 1891 struct rte_flow_error *error) 1892 { 1893 int ret; 1894 1895 ret = flow_drv_validate(dev, attr, items, actions, error); 1896 if (ret < 0) 1897 return ret; 1898 return 0; 1899 } 1900 1901 /** 1902 * Get RSS action from the action list. 1903 * 1904 * @param[in] actions 1905 * Pointer to the list of actions. 1906 * 1907 * @return 1908 * Pointer to the RSS action if exist, else return NULL. 1909 */ 1910 static const struct rte_flow_action_rss* 1911 mlx5_flow_get_rss_action(const struct rte_flow_action actions[]) 1912 { 1913 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1914 switch (actions->type) { 1915 case RTE_FLOW_ACTION_TYPE_RSS: 1916 return (const struct rte_flow_action_rss *) 1917 actions->conf; 1918 default: 1919 break; 1920 } 1921 } 1922 return NULL; 1923 } 1924 1925 static unsigned int 1926 mlx5_find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) 1927 { 1928 const struct rte_flow_item *item; 1929 unsigned int has_vlan = 0; 1930 1931 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 1932 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { 1933 has_vlan = 1; 1934 break; 1935 } 1936 } 1937 if (has_vlan) 1938 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : 1939 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; 1940 return rss_level < 2 ? MLX5_EXPANSION_ROOT : 1941 MLX5_EXPANSION_ROOT_OUTER; 1942 } 1943 1944 /** 1945 * Create a flow and add it to @p list. 1946 * 1947 * @param dev 1948 * Pointer to Ethernet device. 1949 * @param list 1950 * Pointer to a TAILQ flow list. 1951 * @param[in] attr 1952 * Flow rule attributes. 1953 * @param[in] items 1954 * Pattern specification (list terminated by the END pattern item). 1955 * @param[in] actions 1956 * Associated actions (list terminated by the END action). 1957 * @param[out] error 1958 * Perform verbose error reporting if not NULL. 1959 * 1960 * @return 1961 * A flow on success, NULL otherwise and rte_errno is set. 1962 */ 1963 static struct rte_flow * 1964 mlx5_flow_list_create(struct rte_eth_dev *dev, 1965 struct mlx5_flows *list, 1966 const struct rte_flow_attr *attr, 1967 const struct rte_flow_item items[], 1968 const struct rte_flow_action actions[], 1969 struct rte_flow_error *error) 1970 { 1971 struct rte_flow *flow = NULL; 1972 struct mlx5_flow *dev_flow; 1973 uint64_t action_flags = 0; 1974 uint64_t item_flags = 0; 1975 const struct rte_flow_action_rss *rss; 1976 union { 1977 struct rte_flow_expand_rss buf; 1978 uint8_t buffer[2048]; 1979 } expand_buffer; 1980 struct rte_flow_expand_rss *buf = &expand_buffer.buf; 1981 int ret; 1982 uint32_t i; 1983 uint32_t flow_size; 1984 1985 ret = flow_drv_validate(dev, attr, items, actions, error); 1986 if (ret < 0) 1987 return NULL; 1988 flow_size = sizeof(struct rte_flow); 1989 rss = mlx5_flow_get_rss_action(actions); 1990 if (rss) 1991 flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), 1992 sizeof(void *)); 1993 else 1994 flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); 1995 flow = rte_calloc(__func__, 1, flow_size, 0); 1996 flow->drv_type = flow_get_drv_type(dev, attr); 1997 assert(flow->drv_type > MLX5_FLOW_TYPE_MIN && 1998 flow->drv_type < MLX5_FLOW_TYPE_MAX); 1999 flow->queue = (void *)(flow + 1); 2000 LIST_INIT(&flow->dev_flows); 2001 if (rss && rss->types) { 2002 unsigned int graph_root; 2003 2004 graph_root = mlx5_find_graph_root(items, rss->level); 2005 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), 2006 items, rss->types, 2007 mlx5_support_expansion, 2008 graph_root); 2009 assert(ret > 0 && 2010 (unsigned int)ret < sizeof(expand_buffer.buffer)); 2011 } else { 2012 buf->entries = 1; 2013 buf->entry[0].pattern = (void *)(uintptr_t)items; 2014 } 2015 for (i = 0; i < buf->entries; ++i) { 2016 dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern, 2017 actions, &item_flags, &action_flags, 2018 error); 2019 if (!dev_flow) 2020 goto error; 2021 dev_flow->flow = flow; 2022 LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); 2023 ret = flow_drv_translate(dev, dev_flow, attr, 2024 buf->entry[i].pattern, 2025 actions, error); 2026 if (ret < 0) 2027 goto error; 2028 } 2029 if (dev->data->dev_started) { 2030 ret = flow_drv_apply(dev, flow, error); 2031 if (ret < 0) 2032 goto error; 2033 } 2034 TAILQ_INSERT_TAIL(list, flow, next); 2035 mlx5_flow_rxq_flags_set(dev, flow); 2036 return flow; 2037 error: 2038 ret = rte_errno; /* Save rte_errno before cleanup. */ 2039 assert(flow); 2040 flow_drv_destroy(dev, flow); 2041 rte_free(flow); 2042 rte_errno = ret; /* Restore rte_errno. */ 2043 return NULL; 2044 } 2045 2046 /** 2047 * Create a flow. 2048 * 2049 * @see rte_flow_create() 2050 * @see rte_flow_ops 2051 */ 2052 struct rte_flow * 2053 mlx5_flow_create(struct rte_eth_dev *dev, 2054 const struct rte_flow_attr *attr, 2055 const struct rte_flow_item items[], 2056 const struct rte_flow_action actions[], 2057 struct rte_flow_error *error) 2058 { 2059 return mlx5_flow_list_create 2060 (dev, &((struct priv *)dev->data->dev_private)->flows, 2061 attr, items, actions, error); 2062 } 2063 2064 /** 2065 * Destroy a flow in a list. 2066 * 2067 * @param dev 2068 * Pointer to Ethernet device. 2069 * @param list 2070 * Pointer to a TAILQ flow list. 2071 * @param[in] flow 2072 * Flow to destroy. 2073 */ 2074 static void 2075 mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, 2076 struct rte_flow *flow) 2077 { 2078 flow_drv_destroy(dev, flow); 2079 TAILQ_REMOVE(list, flow, next); 2080 /* 2081 * Update RX queue flags only if port is started, otherwise it is 2082 * already clean. 2083 */ 2084 if (dev->data->dev_started) 2085 mlx5_flow_rxq_flags_trim(dev, flow); 2086 rte_free(flow); 2087 } 2088 2089 /** 2090 * Destroy all flows. 2091 * 2092 * @param dev 2093 * Pointer to Ethernet device. 2094 * @param list 2095 * Pointer to a TAILQ flow list. 2096 */ 2097 void 2098 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) 2099 { 2100 while (!TAILQ_EMPTY(list)) { 2101 struct rte_flow *flow; 2102 2103 flow = TAILQ_FIRST(list); 2104 mlx5_flow_list_destroy(dev, list, flow); 2105 } 2106 } 2107 2108 /** 2109 * Remove all flows. 2110 * 2111 * @param dev 2112 * Pointer to Ethernet device. 2113 * @param list 2114 * Pointer to a TAILQ flow list. 2115 */ 2116 void 2117 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) 2118 { 2119 struct rte_flow *flow; 2120 2121 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) 2122 flow_drv_remove(dev, flow); 2123 mlx5_flow_rxq_flags_clear(dev); 2124 } 2125 2126 /** 2127 * Add all flows. 2128 * 2129 * @param dev 2130 * Pointer to Ethernet device. 2131 * @param list 2132 * Pointer to a TAILQ flow list. 2133 * 2134 * @return 2135 * 0 on success, a negative errno value otherwise and rte_errno is set. 2136 */ 2137 int 2138 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) 2139 { 2140 struct rte_flow *flow; 2141 struct rte_flow_error error; 2142 int ret = 0; 2143 2144 TAILQ_FOREACH(flow, list, next) { 2145 ret = flow_drv_apply(dev, flow, &error); 2146 if (ret < 0) 2147 goto error; 2148 mlx5_flow_rxq_flags_set(dev, flow); 2149 } 2150 return 0; 2151 error: 2152 ret = rte_errno; /* Save rte_errno before cleanup. */ 2153 mlx5_flow_stop(dev, list); 2154 rte_errno = ret; /* Restore rte_errno. */ 2155 return -rte_errno; 2156 } 2157 2158 /** 2159 * Verify the flow list is empty 2160 * 2161 * @param dev 2162 * Pointer to Ethernet device. 2163 * 2164 * @return the number of flows not released. 2165 */ 2166 int 2167 mlx5_flow_verify(struct rte_eth_dev *dev) 2168 { 2169 struct priv *priv = dev->data->dev_private; 2170 struct rte_flow *flow; 2171 int ret = 0; 2172 2173 TAILQ_FOREACH(flow, &priv->flows, next) { 2174 DRV_LOG(DEBUG, "port %u flow %p still referenced", 2175 dev->data->port_id, (void *)flow); 2176 ++ret; 2177 } 2178 return ret; 2179 } 2180 2181 /** 2182 * Enable a control flow configured from the control plane. 2183 * 2184 * @param dev 2185 * Pointer to Ethernet device. 2186 * @param eth_spec 2187 * An Ethernet flow spec to apply. 2188 * @param eth_mask 2189 * An Ethernet flow mask to apply. 2190 * @param vlan_spec 2191 * A VLAN flow spec to apply. 2192 * @param vlan_mask 2193 * A VLAN flow mask to apply. 2194 * 2195 * @return 2196 * 0 on success, a negative errno value otherwise and rte_errno is set. 2197 */ 2198 int 2199 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, 2200 struct rte_flow_item_eth *eth_spec, 2201 struct rte_flow_item_eth *eth_mask, 2202 struct rte_flow_item_vlan *vlan_spec, 2203 struct rte_flow_item_vlan *vlan_mask) 2204 { 2205 struct priv *priv = dev->data->dev_private; 2206 const struct rte_flow_attr attr = { 2207 .ingress = 1, 2208 .priority = MLX5_FLOW_PRIO_RSVD, 2209 }; 2210 struct rte_flow_item items[] = { 2211 { 2212 .type = RTE_FLOW_ITEM_TYPE_ETH, 2213 .spec = eth_spec, 2214 .last = NULL, 2215 .mask = eth_mask, 2216 }, 2217 { 2218 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : 2219 RTE_FLOW_ITEM_TYPE_END, 2220 .spec = vlan_spec, 2221 .last = NULL, 2222 .mask = vlan_mask, 2223 }, 2224 { 2225 .type = RTE_FLOW_ITEM_TYPE_END, 2226 }, 2227 }; 2228 uint16_t queue[priv->reta_idx_n]; 2229 struct rte_flow_action_rss action_rss = { 2230 .func = RTE_ETH_HASH_FUNCTION_DEFAULT, 2231 .level = 0, 2232 .types = priv->rss_conf.rss_hf, 2233 .key_len = priv->rss_conf.rss_key_len, 2234 .queue_num = priv->reta_idx_n, 2235 .key = priv->rss_conf.rss_key, 2236 .queue = queue, 2237 }; 2238 struct rte_flow_action actions[] = { 2239 { 2240 .type = RTE_FLOW_ACTION_TYPE_RSS, 2241 .conf = &action_rss, 2242 }, 2243 { 2244 .type = RTE_FLOW_ACTION_TYPE_END, 2245 }, 2246 }; 2247 struct rte_flow *flow; 2248 struct rte_flow_error error; 2249 unsigned int i; 2250 2251 if (!priv->reta_idx_n) { 2252 rte_errno = EINVAL; 2253 return -rte_errno; 2254 } 2255 for (i = 0; i != priv->reta_idx_n; ++i) 2256 queue[i] = (*priv->reta_idx)[i]; 2257 flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items, 2258 actions, &error); 2259 if (!flow) 2260 return -rte_errno; 2261 return 0; 2262 } 2263 2264 /** 2265 * Enable a flow control configured from the control plane. 2266 * 2267 * @param dev 2268 * Pointer to Ethernet device. 2269 * @param eth_spec 2270 * An Ethernet flow spec to apply. 2271 * @param eth_mask 2272 * An Ethernet flow mask to apply. 2273 * 2274 * @return 2275 * 0 on success, a negative errno value otherwise and rte_errno is set. 2276 */ 2277 int 2278 mlx5_ctrl_flow(struct rte_eth_dev *dev, 2279 struct rte_flow_item_eth *eth_spec, 2280 struct rte_flow_item_eth *eth_mask) 2281 { 2282 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); 2283 } 2284 2285 /** 2286 * Destroy a flow. 2287 * 2288 * @see rte_flow_destroy() 2289 * @see rte_flow_ops 2290 */ 2291 int 2292 mlx5_flow_destroy(struct rte_eth_dev *dev, 2293 struct rte_flow *flow, 2294 struct rte_flow_error *error __rte_unused) 2295 { 2296 struct priv *priv = dev->data->dev_private; 2297 2298 mlx5_flow_list_destroy(dev, &priv->flows, flow); 2299 return 0; 2300 } 2301 2302 /** 2303 * Destroy all flows. 2304 * 2305 * @see rte_flow_flush() 2306 * @see rte_flow_ops 2307 */ 2308 int 2309 mlx5_flow_flush(struct rte_eth_dev *dev, 2310 struct rte_flow_error *error __rte_unused) 2311 { 2312 struct priv *priv = dev->data->dev_private; 2313 2314 mlx5_flow_list_flush(dev, &priv->flows); 2315 return 0; 2316 } 2317 2318 /** 2319 * Isolated mode. 2320 * 2321 * @see rte_flow_isolate() 2322 * @see rte_flow_ops 2323 */ 2324 int 2325 mlx5_flow_isolate(struct rte_eth_dev *dev, 2326 int enable, 2327 struct rte_flow_error *error) 2328 { 2329 struct priv *priv = dev->data->dev_private; 2330 2331 if (dev->data->dev_started) { 2332 rte_flow_error_set(error, EBUSY, 2333 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2334 NULL, 2335 "port must be stopped first"); 2336 return -rte_errno; 2337 } 2338 priv->isolated = !!enable; 2339 if (enable) 2340 dev->dev_ops = &mlx5_dev_ops_isolate; 2341 else 2342 dev->dev_ops = &mlx5_dev_ops; 2343 return 0; 2344 } 2345 2346 /** 2347 * Query flow counter. 2348 * 2349 * @param flow 2350 * Pointer to the flow. 2351 * 2352 * @return 2353 * 0 on success, a negative errno value otherwise and rte_errno is set. 2354 */ 2355 static int 2356 mlx5_flow_query_count(struct rte_flow *flow __rte_unused, 2357 void *data __rte_unused, 2358 struct rte_flow_error *error) 2359 { 2360 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT 2361 if (flow->actions & MLX5_FLOW_ACTION_COUNT) { 2362 struct rte_flow_query_count *qc = data; 2363 uint64_t counters[2] = {0, 0}; 2364 struct ibv_query_counter_set_attr query_cs_attr = { 2365 .cs = flow->counter->cs, 2366 .query_flags = IBV_COUNTER_SET_FORCE_UPDATE, 2367 }; 2368 struct ibv_counter_set_data query_out = { 2369 .out = counters, 2370 .outlen = 2 * sizeof(uint64_t), 2371 }; 2372 int err = mlx5_glue->query_counter_set(&query_cs_attr, 2373 &query_out); 2374 2375 if (err) 2376 return rte_flow_error_set 2377 (error, err, 2378 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2379 NULL, 2380 "cannot read counter"); 2381 qc->hits_set = 1; 2382 qc->bytes_set = 1; 2383 qc->hits = counters[0] - flow->counter->hits; 2384 qc->bytes = counters[1] - flow->counter->bytes; 2385 if (qc->reset) { 2386 flow->counter->hits = counters[0]; 2387 flow->counter->bytes = counters[1]; 2388 } 2389 return 0; 2390 } 2391 return rte_flow_error_set(error, EINVAL, 2392 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2393 NULL, 2394 "flow does not have counter"); 2395 #endif 2396 return rte_flow_error_set(error, ENOTSUP, 2397 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2398 NULL, 2399 "counters are not available"); 2400 } 2401 2402 /** 2403 * Query a flows. 2404 * 2405 * @see rte_flow_query() 2406 * @see rte_flow_ops 2407 */ 2408 int 2409 mlx5_flow_query(struct rte_eth_dev *dev __rte_unused, 2410 struct rte_flow *flow, 2411 const struct rte_flow_action *actions, 2412 void *data, 2413 struct rte_flow_error *error) 2414 { 2415 int ret = 0; 2416 2417 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2418 switch (actions->type) { 2419 case RTE_FLOW_ACTION_TYPE_VOID: 2420 break; 2421 case RTE_FLOW_ACTION_TYPE_COUNT: 2422 ret = mlx5_flow_query_count(flow, data, error); 2423 break; 2424 default: 2425 return rte_flow_error_set(error, ENOTSUP, 2426 RTE_FLOW_ERROR_TYPE_ACTION, 2427 actions, 2428 "action not supported"); 2429 } 2430 if (ret < 0) 2431 return ret; 2432 } 2433 return 0; 2434 } 2435 2436 /** 2437 * Convert a flow director filter to a generic flow. 2438 * 2439 * @param dev 2440 * Pointer to Ethernet device. 2441 * @param fdir_filter 2442 * Flow director filter to add. 2443 * @param attributes 2444 * Generic flow parameters structure. 2445 * 2446 * @return 2447 * 0 on success, a negative errno value otherwise and rte_errno is set. 2448 */ 2449 static int 2450 mlx5_fdir_filter_convert(struct rte_eth_dev *dev, 2451 const struct rte_eth_fdir_filter *fdir_filter, 2452 struct mlx5_fdir *attributes) 2453 { 2454 struct priv *priv = dev->data->dev_private; 2455 const struct rte_eth_fdir_input *input = &fdir_filter->input; 2456 const struct rte_eth_fdir_masks *mask = 2457 &dev->data->dev_conf.fdir_conf.mask; 2458 2459 /* Validate queue number. */ 2460 if (fdir_filter->action.rx_queue >= priv->rxqs_n) { 2461 DRV_LOG(ERR, "port %u invalid queue number %d", 2462 dev->data->port_id, fdir_filter->action.rx_queue); 2463 rte_errno = EINVAL; 2464 return -rte_errno; 2465 } 2466 attributes->attr.ingress = 1; 2467 attributes->items[0] = (struct rte_flow_item) { 2468 .type = RTE_FLOW_ITEM_TYPE_ETH, 2469 .spec = &attributes->l2, 2470 .mask = &attributes->l2_mask, 2471 }; 2472 switch (fdir_filter->action.behavior) { 2473 case RTE_ETH_FDIR_ACCEPT: 2474 attributes->actions[0] = (struct rte_flow_action){ 2475 .type = RTE_FLOW_ACTION_TYPE_QUEUE, 2476 .conf = &attributes->queue, 2477 }; 2478 break; 2479 case RTE_ETH_FDIR_REJECT: 2480 attributes->actions[0] = (struct rte_flow_action){ 2481 .type = RTE_FLOW_ACTION_TYPE_DROP, 2482 }; 2483 break; 2484 default: 2485 DRV_LOG(ERR, "port %u invalid behavior %d", 2486 dev->data->port_id, 2487 fdir_filter->action.behavior); 2488 rte_errno = ENOTSUP; 2489 return -rte_errno; 2490 } 2491 attributes->queue.index = fdir_filter->action.rx_queue; 2492 /* Handle L3. */ 2493 switch (fdir_filter->input.flow_type) { 2494 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 2495 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 2496 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 2497 attributes->l3.ipv4.hdr = (struct ipv4_hdr){ 2498 .src_addr = input->flow.ip4_flow.src_ip, 2499 .dst_addr = input->flow.ip4_flow.dst_ip, 2500 .time_to_live = input->flow.ip4_flow.ttl, 2501 .type_of_service = input->flow.ip4_flow.tos, 2502 }; 2503 attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){ 2504 .src_addr = mask->ipv4_mask.src_ip, 2505 .dst_addr = mask->ipv4_mask.dst_ip, 2506 .time_to_live = mask->ipv4_mask.ttl, 2507 .type_of_service = mask->ipv4_mask.tos, 2508 .next_proto_id = mask->ipv4_mask.proto, 2509 }; 2510 attributes->items[1] = (struct rte_flow_item){ 2511 .type = RTE_FLOW_ITEM_TYPE_IPV4, 2512 .spec = &attributes->l3, 2513 .mask = &attributes->l3_mask, 2514 }; 2515 break; 2516 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 2517 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 2518 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 2519 attributes->l3.ipv6.hdr = (struct ipv6_hdr){ 2520 .hop_limits = input->flow.ipv6_flow.hop_limits, 2521 .proto = input->flow.ipv6_flow.proto, 2522 }; 2523 2524 memcpy(attributes->l3.ipv6.hdr.src_addr, 2525 input->flow.ipv6_flow.src_ip, 2526 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 2527 memcpy(attributes->l3.ipv6.hdr.dst_addr, 2528 input->flow.ipv6_flow.dst_ip, 2529 RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); 2530 memcpy(attributes->l3_mask.ipv6.hdr.src_addr, 2531 mask->ipv6_mask.src_ip, 2532 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 2533 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr, 2534 mask->ipv6_mask.dst_ip, 2535 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); 2536 attributes->items[1] = (struct rte_flow_item){ 2537 .type = RTE_FLOW_ITEM_TYPE_IPV6, 2538 .spec = &attributes->l3, 2539 .mask = &attributes->l3_mask, 2540 }; 2541 break; 2542 default: 2543 DRV_LOG(ERR, "port %u invalid flow type%d", 2544 dev->data->port_id, fdir_filter->input.flow_type); 2545 rte_errno = ENOTSUP; 2546 return -rte_errno; 2547 } 2548 /* Handle L4. */ 2549 switch (fdir_filter->input.flow_type) { 2550 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 2551 attributes->l4.udp.hdr = (struct udp_hdr){ 2552 .src_port = input->flow.udp4_flow.src_port, 2553 .dst_port = input->flow.udp4_flow.dst_port, 2554 }; 2555 attributes->l4_mask.udp.hdr = (struct udp_hdr){ 2556 .src_port = mask->src_port_mask, 2557 .dst_port = mask->dst_port_mask, 2558 }; 2559 attributes->items[2] = (struct rte_flow_item){ 2560 .type = RTE_FLOW_ITEM_TYPE_UDP, 2561 .spec = &attributes->l4, 2562 .mask = &attributes->l4_mask, 2563 }; 2564 break; 2565 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 2566 attributes->l4.tcp.hdr = (struct tcp_hdr){ 2567 .src_port = input->flow.tcp4_flow.src_port, 2568 .dst_port = input->flow.tcp4_flow.dst_port, 2569 }; 2570 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){ 2571 .src_port = mask->src_port_mask, 2572 .dst_port = mask->dst_port_mask, 2573 }; 2574 attributes->items[2] = (struct rte_flow_item){ 2575 .type = RTE_FLOW_ITEM_TYPE_TCP, 2576 .spec = &attributes->l4, 2577 .mask = &attributes->l4_mask, 2578 }; 2579 break; 2580 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 2581 attributes->l4.udp.hdr = (struct udp_hdr){ 2582 .src_port = input->flow.udp6_flow.src_port, 2583 .dst_port = input->flow.udp6_flow.dst_port, 2584 }; 2585 attributes->l4_mask.udp.hdr = (struct udp_hdr){ 2586 .src_port = mask->src_port_mask, 2587 .dst_port = mask->dst_port_mask, 2588 }; 2589 attributes->items[2] = (struct rte_flow_item){ 2590 .type = RTE_FLOW_ITEM_TYPE_UDP, 2591 .spec = &attributes->l4, 2592 .mask = &attributes->l4_mask, 2593 }; 2594 break; 2595 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 2596 attributes->l4.tcp.hdr = (struct tcp_hdr){ 2597 .src_port = input->flow.tcp6_flow.src_port, 2598 .dst_port = input->flow.tcp6_flow.dst_port, 2599 }; 2600 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){ 2601 .src_port = mask->src_port_mask, 2602 .dst_port = mask->dst_port_mask, 2603 }; 2604 attributes->items[2] = (struct rte_flow_item){ 2605 .type = RTE_FLOW_ITEM_TYPE_TCP, 2606 .spec = &attributes->l4, 2607 .mask = &attributes->l4_mask, 2608 }; 2609 break; 2610 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 2611 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 2612 break; 2613 default: 2614 DRV_LOG(ERR, "port %u invalid flow type%d", 2615 dev->data->port_id, fdir_filter->input.flow_type); 2616 rte_errno = ENOTSUP; 2617 return -rte_errno; 2618 } 2619 return 0; 2620 } 2621 2622 /** 2623 * Add new flow director filter and store it in list. 2624 * 2625 * @param dev 2626 * Pointer to Ethernet device. 2627 * @param fdir_filter 2628 * Flow director filter to add. 2629 * 2630 * @return 2631 * 0 on success, a negative errno value otherwise and rte_errno is set. 2632 */ 2633 static int 2634 mlx5_fdir_filter_add(struct rte_eth_dev *dev, 2635 const struct rte_eth_fdir_filter *fdir_filter) 2636 { 2637 struct priv *priv = dev->data->dev_private; 2638 struct mlx5_fdir attributes = { 2639 .attr.group = 0, 2640 .l2_mask = { 2641 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00", 2642 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 2643 .type = 0, 2644 }, 2645 }; 2646 struct rte_flow_error error; 2647 struct rte_flow *flow; 2648 int ret; 2649 2650 ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); 2651 if (ret) 2652 return ret; 2653 flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr, 2654 attributes.items, attributes.actions, 2655 &error); 2656 if (flow) { 2657 DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id, 2658 (void *)flow); 2659 return 0; 2660 } 2661 return -rte_errno; 2662 } 2663 2664 /** 2665 * Delete specific filter. 2666 * 2667 * @param dev 2668 * Pointer to Ethernet device. 2669 * @param fdir_filter 2670 * Filter to be deleted. 2671 * 2672 * @return 2673 * 0 on success, a negative errno value otherwise and rte_errno is set. 2674 */ 2675 static int 2676 mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused, 2677 const struct rte_eth_fdir_filter *fdir_filter 2678 __rte_unused) 2679 { 2680 rte_errno = ENOTSUP; 2681 return -rte_errno; 2682 } 2683 2684 /** 2685 * Update queue for specific filter. 2686 * 2687 * @param dev 2688 * Pointer to Ethernet device. 2689 * @param fdir_filter 2690 * Filter to be updated. 2691 * 2692 * @return 2693 * 0 on success, a negative errno value otherwise and rte_errno is set. 2694 */ 2695 static int 2696 mlx5_fdir_filter_update(struct rte_eth_dev *dev, 2697 const struct rte_eth_fdir_filter *fdir_filter) 2698 { 2699 int ret; 2700 2701 ret = mlx5_fdir_filter_delete(dev, fdir_filter); 2702 if (ret) 2703 return ret; 2704 return mlx5_fdir_filter_add(dev, fdir_filter); 2705 } 2706 2707 /** 2708 * Flush all filters. 2709 * 2710 * @param dev 2711 * Pointer to Ethernet device. 2712 */ 2713 static void 2714 mlx5_fdir_filter_flush(struct rte_eth_dev *dev) 2715 { 2716 struct priv *priv = dev->data->dev_private; 2717 2718 mlx5_flow_list_flush(dev, &priv->flows); 2719 } 2720 2721 /** 2722 * Get flow director information. 2723 * 2724 * @param dev 2725 * Pointer to Ethernet device. 2726 * @param[out] fdir_info 2727 * Resulting flow director information. 2728 */ 2729 static void 2730 mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) 2731 { 2732 struct rte_eth_fdir_masks *mask = 2733 &dev->data->dev_conf.fdir_conf.mask; 2734 2735 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; 2736 fdir_info->guarant_spc = 0; 2737 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); 2738 fdir_info->max_flexpayload = 0; 2739 fdir_info->flow_types_mask[0] = 0; 2740 fdir_info->flex_payload_unit = 0; 2741 fdir_info->max_flex_payload_segment_num = 0; 2742 fdir_info->flex_payload_limit = 0; 2743 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf)); 2744 } 2745 2746 /** 2747 * Deal with flow director operations. 2748 * 2749 * @param dev 2750 * Pointer to Ethernet device. 2751 * @param filter_op 2752 * Operation to perform. 2753 * @param arg 2754 * Pointer to operation-specific structure. 2755 * 2756 * @return 2757 * 0 on success, a negative errno value otherwise and rte_errno is set. 2758 */ 2759 static int 2760 mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, 2761 void *arg) 2762 { 2763 enum rte_fdir_mode fdir_mode = 2764 dev->data->dev_conf.fdir_conf.mode; 2765 2766 if (filter_op == RTE_ETH_FILTER_NOP) 2767 return 0; 2768 if (fdir_mode != RTE_FDIR_MODE_PERFECT && 2769 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 2770 DRV_LOG(ERR, "port %u flow director mode %d not supported", 2771 dev->data->port_id, fdir_mode); 2772 rte_errno = EINVAL; 2773 return -rte_errno; 2774 } 2775 switch (filter_op) { 2776 case RTE_ETH_FILTER_ADD: 2777 return mlx5_fdir_filter_add(dev, arg); 2778 case RTE_ETH_FILTER_UPDATE: 2779 return mlx5_fdir_filter_update(dev, arg); 2780 case RTE_ETH_FILTER_DELETE: 2781 return mlx5_fdir_filter_delete(dev, arg); 2782 case RTE_ETH_FILTER_FLUSH: 2783 mlx5_fdir_filter_flush(dev); 2784 break; 2785 case RTE_ETH_FILTER_INFO: 2786 mlx5_fdir_info_get(dev, arg); 2787 break; 2788 default: 2789 DRV_LOG(DEBUG, "port %u unknown operation %u", 2790 dev->data->port_id, filter_op); 2791 rte_errno = EINVAL; 2792 return -rte_errno; 2793 } 2794 return 0; 2795 } 2796 2797 /** 2798 * Manage filter operations. 2799 * 2800 * @param dev 2801 * Pointer to Ethernet device structure. 2802 * @param filter_type 2803 * Filter type. 2804 * @param filter_op 2805 * Operation to perform. 2806 * @param arg 2807 * Pointer to operation-specific structure. 2808 * 2809 * @return 2810 * 0 on success, a negative errno value otherwise and rte_errno is set. 2811 */ 2812 int 2813 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, 2814 enum rte_filter_type filter_type, 2815 enum rte_filter_op filter_op, 2816 void *arg) 2817 { 2818 switch (filter_type) { 2819 case RTE_ETH_FILTER_GENERIC: 2820 if (filter_op != RTE_ETH_FILTER_GET) { 2821 rte_errno = EINVAL; 2822 return -rte_errno; 2823 } 2824 *(const void **)arg = &mlx5_flow_ops; 2825 return 0; 2826 case RTE_ETH_FILTER_FDIR: 2827 return mlx5_fdir_ctrl_func(dev, filter_op, arg); 2828 default: 2829 DRV_LOG(ERR, "port %u filter type (%d) not supported", 2830 dev->data->port_id, filter_type); 2831 rte_errno = ENOTSUP; 2832 return -rte_errno; 2833 } 2834 return 0; 2835 } 2836