1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018 Mellanox Technologies, Ltd 3 */ 4 5 #include <netinet/in.h> 6 #include <sys/queue.h> 7 #include <stdalign.h> 8 #include <stdint.h> 9 #include <string.h> 10 11 #include <rte_common.h> 12 #include <rte_ether.h> 13 #include <ethdev_driver.h> 14 #include <rte_flow.h> 15 #include <rte_flow_driver.h> 16 #include <rte_malloc.h> 17 #include <rte_ip.h> 18 19 #include <mlx5_glue.h> 20 #include <mlx5_prm.h> 21 #include <mlx5_malloc.h> 22 23 #include "mlx5_defs.h" 24 #include "mlx5.h" 25 #include "mlx5_flow.h" 26 #include "mlx5_rx.h" 27 28 #define VERBS_SPEC_INNER(item_flags) \ 29 (!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0) 30 31 /* Verbs specification header. */ 32 struct ibv_spec_header { 33 enum ibv_flow_spec_type type; 34 uint16_t size; 35 }; 36 37 /** 38 * Discover the maximum number of priority available. 39 * 40 * @param[in] dev 41 * Pointer to the Ethernet device structure. 42 * @param[in] vprio 43 * Expected result variants. 44 * @param[in] vprio_n 45 * Number of entries in @p vprio array. 46 * @return 47 * Number of supported flow priority on success, a negative errno 48 * value otherwise and rte_errno is set. 49 */ 50 static int 51 flow_verbs_discover_priorities(struct rte_eth_dev *dev, 52 const uint16_t *vprio, int vprio_n) 53 { 54 struct mlx5_priv *priv = dev->data->dev_private; 55 struct { 56 struct ibv_flow_attr attr; 57 struct ibv_flow_spec_eth eth; 58 struct ibv_flow_spec_action_drop drop; 59 } flow_attr = { 60 .attr = { 61 .num_of_specs = 2, 62 .port = (uint8_t)priv->dev_port, 63 }, 64 .eth = { 65 .type = IBV_FLOW_SPEC_ETH, 66 .size = sizeof(struct ibv_flow_spec_eth), 67 }, 68 .drop = { 69 .size = sizeof(struct ibv_flow_spec_action_drop), 70 .type = IBV_FLOW_SPEC_ACTION_DROP, 71 }, 72 }; 73 struct ibv_flow *flow; 74 struct mlx5_hrxq *drop = priv->drop_queue.hrxq; 75 int i; 76 int priority = 0; 77 78 #if defined(HAVE_MLX5DV_DR_DEVX_PORT) || defined(HAVE_MLX5DV_DR_DEVX_PORT_V35) 79 /* If DevX supported, driver must support 16 verbs flow priorities. */ 80 priority = 16; 81 goto out; 82 #endif 83 if (!drop->qp) { 84 rte_errno = ENOTSUP; 85 return -rte_errno; 86 } 87 for (i = 0; i != vprio_n; i++) { 88 flow_attr.attr.priority = vprio[i] - 1; 89 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); 90 if (!flow) 91 break; 92 claim_zero(mlx5_glue->destroy_flow(flow)); 93 priority = vprio[i]; 94 } 95 #if defined(HAVE_MLX5DV_DR_DEVX_PORT) || defined(HAVE_MLX5DV_DR_DEVX_PORT_V35) 96 out: 97 #endif 98 DRV_LOG(INFO, "port %u supported flow priorities:" 99 " 0-%d for ingress or egress root table," 100 " 0-%d for non-root table or transfer root table.", 101 dev->data->port_id, priority - 2, 102 MLX5_NON_ROOT_FLOW_MAX_PRIO - 1); 103 return priority; 104 } 105 106 /** 107 * Get Verbs flow counter by index. 108 * 109 * @param[in] dev 110 * Pointer to the Ethernet device structure. 111 * @param[in] idx 112 * mlx5 flow counter index in the container. 113 * @param[out] ppool 114 * mlx5 flow counter pool in the container, 115 * 116 * @return 117 * A pointer to the counter, NULL otherwise. 118 */ 119 static struct mlx5_flow_counter * 120 flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev, 121 uint32_t idx, 122 struct mlx5_flow_counter_pool **ppool) 123 { 124 struct mlx5_priv *priv = dev->data->dev_private; 125 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; 126 struct mlx5_flow_counter_pool *pool; 127 128 idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1); 129 pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL]; 130 MLX5_ASSERT(pool); 131 if (ppool) 132 *ppool = pool; 133 return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL); 134 } 135 136 /** 137 * Create Verbs flow counter with Verbs library. 138 * 139 * @param[in] dev 140 * Pointer to the Ethernet device structure. 141 * @param[in, out] counter 142 * mlx5 flow counter object, contains the counter id, 143 * handle of created Verbs flow counter is returned 144 * in cs field (if counters are supported). 145 * 146 * @return 147 * 0 On success else a negative errno value is returned 148 * and rte_errno is set. 149 */ 150 static int 151 flow_verbs_counter_create(struct rte_eth_dev *dev, 152 struct mlx5_flow_counter *counter) 153 { 154 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) 155 struct mlx5_priv *priv = dev->data->dev_private; 156 struct ibv_context *ctx = priv->sh->cdev->ctx; 157 struct ibv_counter_set_init_attr init = { 158 .counter_set_id = counter->shared_info.id}; 159 160 counter->dcs_when_free = mlx5_glue->create_counter_set(ctx, &init); 161 if (!counter->dcs_when_free) { 162 rte_errno = ENOTSUP; 163 return -ENOTSUP; 164 } 165 return 0; 166 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 167 struct mlx5_priv *priv = dev->data->dev_private; 168 struct ibv_context *ctx = priv->sh->cdev->ctx; 169 struct ibv_counters_init_attr init = {0}; 170 struct ibv_counter_attach_attr attach; 171 int ret; 172 173 memset(&attach, 0, sizeof(attach)); 174 counter->dcs_when_free = mlx5_glue->create_counters(ctx, &init); 175 if (!counter->dcs_when_free) { 176 rte_errno = ENOTSUP; 177 return -ENOTSUP; 178 } 179 attach.counter_desc = IBV_COUNTER_PACKETS; 180 attach.index = 0; 181 ret = mlx5_glue->attach_counters(counter->dcs_when_free, &attach, NULL); 182 if (!ret) { 183 attach.counter_desc = IBV_COUNTER_BYTES; 184 attach.index = 1; 185 ret = mlx5_glue->attach_counters 186 (counter->dcs_when_free, &attach, NULL); 187 } 188 if (ret) { 189 claim_zero(mlx5_glue->destroy_counters(counter->dcs_when_free)); 190 counter->dcs_when_free = NULL; 191 rte_errno = ret; 192 return -ret; 193 } 194 return 0; 195 #else 196 (void)dev; 197 (void)counter; 198 rte_errno = ENOTSUP; 199 return -ENOTSUP; 200 #endif 201 } 202 203 /** 204 * Get a flow counter. 205 * 206 * @param[in] dev 207 * Pointer to the Ethernet device structure. 208 * @param[in] id 209 * Counter identifier. 210 * 211 * @return 212 * Index to the counter, 0 otherwise and rte_errno is set. 213 */ 214 static uint32_t 215 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t id __rte_unused) 216 { 217 struct mlx5_priv *priv = dev->data->dev_private; 218 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; 219 struct mlx5_flow_counter_pool *pool = NULL; 220 struct mlx5_flow_counter *cnt = NULL; 221 uint32_t n_valid = cmng->n_valid; 222 uint32_t pool_idx, cnt_idx; 223 uint32_t i; 224 int ret; 225 226 for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) { 227 pool = cmng->pools[pool_idx]; 228 if (!pool) 229 continue; 230 cnt = TAILQ_FIRST(&pool->counters[0]); 231 if (cnt) 232 break; 233 } 234 if (!cnt) { 235 struct mlx5_flow_counter_pool **pools; 236 uint32_t size; 237 238 if (n_valid == cmng->n) { 239 /* Resize the container pool array. */ 240 size = sizeof(struct mlx5_flow_counter_pool *) * 241 (n_valid + MLX5_CNT_CONTAINER_RESIZE); 242 pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0, 243 SOCKET_ID_ANY); 244 if (!pools) 245 return 0; 246 if (n_valid) { 247 memcpy(pools, cmng->pools, 248 sizeof(struct mlx5_flow_counter_pool *) * 249 n_valid); 250 mlx5_free(cmng->pools); 251 } 252 cmng->pools = pools; 253 cmng->n += MLX5_CNT_CONTAINER_RESIZE; 254 } 255 /* Allocate memory for new pool*/ 256 size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL; 257 pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY); 258 if (!pool) 259 return 0; 260 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { 261 cnt = MLX5_POOL_GET_CNT(pool, i); 262 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next); 263 } 264 cnt = MLX5_POOL_GET_CNT(pool, 0); 265 cmng->pools[n_valid] = pool; 266 pool_idx = n_valid; 267 cmng->n_valid++; 268 } 269 TAILQ_REMOVE(&pool->counters[0], cnt, next); 270 i = MLX5_CNT_ARRAY_IDX(pool, cnt); 271 cnt_idx = MLX5_MAKE_CNT_IDX(pool_idx, i); 272 /* Create counter with Verbs. */ 273 ret = flow_verbs_counter_create(dev, cnt); 274 if (!ret) { 275 cnt->dcs_when_active = cnt->dcs_when_free; 276 cnt->hits = 0; 277 cnt->bytes = 0; 278 return cnt_idx; 279 } 280 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next); 281 /* Some error occurred in Verbs library. */ 282 rte_errno = -ret; 283 return 0; 284 } 285 286 /** 287 * Release a flow counter. 288 * 289 * @param[in] dev 290 * Pointer to the Ethernet device structure. 291 * @param[in] counter 292 * Index to the counter handler. 293 */ 294 static void 295 flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter) 296 { 297 struct mlx5_flow_counter_pool *pool; 298 struct mlx5_flow_counter *cnt; 299 300 cnt = flow_verbs_counter_get_by_idx(dev, counter, &pool); 301 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) 302 claim_zero(mlx5_glue->destroy_counter_set 303 ((struct ibv_counter_set *)cnt->dcs_when_active)); 304 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 305 claim_zero(mlx5_glue->destroy_counters 306 ((struct ibv_counters *)cnt->dcs_when_active)); 307 #endif 308 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next); 309 } 310 311 /** 312 * Query a flow counter via Verbs library call. 313 * 314 * @see rte_flow_query() 315 * @see rte_flow_ops 316 */ 317 static int 318 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused, 319 struct rte_flow *flow, void *data, 320 struct rte_flow_error *error) 321 { 322 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ 323 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 324 if (flow->counter) { 325 struct mlx5_flow_counter_pool *pool; 326 struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx 327 (dev, flow->counter, &pool); 328 struct rte_flow_query_count *qc = data; 329 uint64_t counters[2] = {0, 0}; 330 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) 331 struct ibv_query_counter_set_attr query_cs_attr = { 332 .dcs_when_free = (struct ibv_counter_set *) 333 cnt->dcs_when_active, 334 .query_flags = IBV_COUNTER_SET_FORCE_UPDATE, 335 }; 336 struct ibv_counter_set_data query_out = { 337 .out = counters, 338 .outlen = 2 * sizeof(uint64_t), 339 }; 340 int err = mlx5_glue->query_counter_set(&query_cs_attr, 341 &query_out); 342 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 343 int err = mlx5_glue->query_counters 344 ((struct ibv_counters *)cnt->dcs_when_active, counters, 345 RTE_DIM(counters), 346 IBV_READ_COUNTERS_ATTR_PREFER_CACHED); 347 #endif 348 if (err) 349 return rte_flow_error_set 350 (error, err, 351 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 352 NULL, 353 "cannot read counter"); 354 qc->hits_set = 1; 355 qc->bytes_set = 1; 356 qc->hits = counters[0] - cnt->hits; 357 qc->bytes = counters[1] - cnt->bytes; 358 if (qc->reset) { 359 cnt->hits = counters[0]; 360 cnt->bytes = counters[1]; 361 } 362 return 0; 363 } 364 return rte_flow_error_set(error, EINVAL, 365 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 366 NULL, 367 "flow does not have counter"); 368 #else 369 (void)flow; 370 (void)data; 371 return rte_flow_error_set(error, ENOTSUP, 372 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 373 NULL, 374 "counters are not available"); 375 #endif 376 } 377 378 /** 379 * Add a verbs item specification into @p verbs. 380 * 381 * @param[out] verbs 382 * Pointer to verbs structure. 383 * @param[in] src 384 * Create specification. 385 * @param[in] size 386 * Size in bytes of the specification to copy. 387 */ 388 static void 389 flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs, 390 void *src, unsigned int size) 391 { 392 void *dst; 393 394 if (!verbs) 395 return; 396 MLX5_ASSERT(verbs->specs); 397 dst = (void *)(verbs->specs + verbs->size); 398 memcpy(dst, src, size); 399 ++verbs->attr.num_of_specs; 400 verbs->size += size; 401 } 402 403 /** 404 * Convert the @p item into a Verbs specification. This function assumes that 405 * the input is valid and that there is space to insert the requested item 406 * into the flow. 407 * 408 * @param[in, out] dev_flow 409 * Pointer to dev_flow structure. 410 * @param[in] item 411 * Item specification. 412 * @param[in] item_flags 413 * Parsed item flags. 414 */ 415 static void 416 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow, 417 const struct rte_flow_item *item, 418 uint64_t item_flags) 419 { 420 const struct rte_flow_item_eth *spec = item->spec; 421 const struct rte_flow_item_eth *mask = item->mask; 422 const unsigned int size = sizeof(struct ibv_flow_spec_eth); 423 struct ibv_flow_spec_eth eth = { 424 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags), 425 .size = size, 426 }; 427 428 if (!mask) 429 mask = &rte_flow_item_eth_mask; 430 if (spec) { 431 unsigned int i; 432 433 memcpy(ð.val.dst_mac, spec->dst.addr_bytes, 434 RTE_ETHER_ADDR_LEN); 435 memcpy(ð.val.src_mac, spec->src.addr_bytes, 436 RTE_ETHER_ADDR_LEN); 437 eth.val.ether_type = spec->type; 438 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, 439 RTE_ETHER_ADDR_LEN); 440 memcpy(ð.mask.src_mac, mask->src.addr_bytes, 441 RTE_ETHER_ADDR_LEN); 442 eth.mask.ether_type = mask->type; 443 /* Remove unwanted bits from values. */ 444 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) { 445 eth.val.dst_mac[i] &= eth.mask.dst_mac[i]; 446 eth.val.src_mac[i] &= eth.mask.src_mac[i]; 447 } 448 eth.val.ether_type &= eth.mask.ether_type; 449 } 450 flow_verbs_spec_add(&dev_flow->verbs, ð, size); 451 } 452 453 /** 454 * Update the VLAN tag in the Verbs Ethernet specification. 455 * This function assumes that the input is valid and there is space to add 456 * the requested item. 457 * 458 * @param[in, out] attr 459 * Pointer to Verbs attributes structure. 460 * @param[in] eth 461 * Verbs structure containing the VLAN information to copy. 462 */ 463 static void 464 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr, 465 struct ibv_flow_spec_eth *eth) 466 { 467 unsigned int i; 468 const enum ibv_flow_spec_type search = eth->type; 469 struct ibv_spec_header *hdr = (struct ibv_spec_header *) 470 ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); 471 472 for (i = 0; i != attr->num_of_specs; ++i) { 473 if (hdr->type == search) { 474 struct ibv_flow_spec_eth *e = 475 (struct ibv_flow_spec_eth *)hdr; 476 477 e->val.vlan_tag = eth->val.vlan_tag; 478 e->mask.vlan_tag = eth->mask.vlan_tag; 479 e->val.ether_type = eth->val.ether_type; 480 e->mask.ether_type = eth->mask.ether_type; 481 break; 482 } 483 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size); 484 } 485 } 486 487 /** 488 * Convert the @p item into a Verbs specification. This function assumes that 489 * the input is valid and that there is space to insert the requested item 490 * into the flow. 491 * 492 * @param[in, out] dev_flow 493 * Pointer to dev_flow structure. 494 * @param[in] item 495 * Item specification. 496 * @param[in] item_flags 497 * Parsed item flags. 498 */ 499 static void 500 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow, 501 const struct rte_flow_item *item, 502 uint64_t item_flags) 503 { 504 const struct rte_flow_item_vlan *spec = item->spec; 505 const struct rte_flow_item_vlan *mask = item->mask; 506 unsigned int size = sizeof(struct ibv_flow_spec_eth); 507 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 508 struct ibv_flow_spec_eth eth = { 509 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags), 510 .size = size, 511 }; 512 const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 513 MLX5_FLOW_LAYER_OUTER_L2; 514 515 if (!mask) 516 mask = &rte_flow_item_vlan_mask; 517 if (spec) { 518 eth.val.vlan_tag = spec->tci; 519 eth.mask.vlan_tag = mask->tci; 520 eth.val.vlan_tag &= eth.mask.vlan_tag; 521 eth.val.ether_type = spec->inner_type; 522 eth.mask.ether_type = mask->inner_type; 523 eth.val.ether_type &= eth.mask.ether_type; 524 } 525 if (!(item_flags & l2m)) 526 flow_verbs_spec_add(&dev_flow->verbs, ð, size); 527 else 528 flow_verbs_item_vlan_update(&dev_flow->verbs.attr, ð); 529 if (!tunnel) 530 dev_flow->handle->vf_vlan.tag = 531 rte_be_to_cpu_16(spec->tci) & 0x0fff; 532 } 533 534 /** 535 * Convert the @p item into a Verbs specification. This function assumes that 536 * the input is valid and that there is space to insert the requested item 537 * into the flow. 538 * 539 * @param[in, out] dev_flow 540 * Pointer to dev_flow structure. 541 * @param[in] item 542 * Item specification. 543 * @param[in] item_flags 544 * Parsed item flags. 545 */ 546 static void 547 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow, 548 const struct rte_flow_item *item, 549 uint64_t item_flags) 550 { 551 const struct rte_flow_item_ipv4 *spec = item->spec; 552 const struct rte_flow_item_ipv4 *mask = item->mask; 553 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext); 554 struct ibv_flow_spec_ipv4_ext ipv4 = { 555 .type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags), 556 .size = size, 557 }; 558 559 if (!mask) 560 mask = &rte_flow_item_ipv4_mask; 561 if (spec) { 562 ipv4.val = (struct ibv_flow_ipv4_ext_filter){ 563 .src_ip = spec->hdr.src_addr, 564 .dst_ip = spec->hdr.dst_addr, 565 .proto = spec->hdr.next_proto_id, 566 .tos = spec->hdr.type_of_service, 567 }; 568 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){ 569 .src_ip = mask->hdr.src_addr, 570 .dst_ip = mask->hdr.dst_addr, 571 .proto = mask->hdr.next_proto_id, 572 .tos = mask->hdr.type_of_service, 573 }; 574 /* Remove unwanted bits from values. */ 575 ipv4.val.src_ip &= ipv4.mask.src_ip; 576 ipv4.val.dst_ip &= ipv4.mask.dst_ip; 577 ipv4.val.proto &= ipv4.mask.proto; 578 ipv4.val.tos &= ipv4.mask.tos; 579 } 580 flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size); 581 } 582 583 /** 584 * Convert the @p item into a Verbs specification. This function assumes that 585 * the input is valid and that there is space to insert the requested item 586 * into the flow. 587 * 588 * @param[in, out] dev_flow 589 * Pointer to dev_flow structure. 590 * @param[in] item 591 * Item specification. 592 * @param[in] item_flags 593 * Parsed item flags. 594 */ 595 static void 596 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow, 597 const struct rte_flow_item *item, 598 uint64_t item_flags) 599 { 600 const struct rte_flow_item_ipv6 *spec = item->spec; 601 const struct rte_flow_item_ipv6 *mask = item->mask; 602 unsigned int size = sizeof(struct ibv_flow_spec_ipv6); 603 struct ibv_flow_spec_ipv6 ipv6 = { 604 .type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags), 605 .size = size, 606 }; 607 608 if (!mask) 609 mask = &rte_flow_item_ipv6_mask; 610 if (spec) { 611 unsigned int i; 612 uint32_t vtc_flow_val; 613 uint32_t vtc_flow_mask; 614 615 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr, 616 RTE_DIM(ipv6.val.src_ip)); 617 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr, 618 RTE_DIM(ipv6.val.dst_ip)); 619 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr, 620 RTE_DIM(ipv6.mask.src_ip)); 621 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr, 622 RTE_DIM(ipv6.mask.dst_ip)); 623 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow); 624 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow); 625 ipv6.val.flow_label = 626 rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >> 627 RTE_IPV6_HDR_FL_SHIFT); 628 ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >> 629 RTE_IPV6_HDR_TC_SHIFT; 630 ipv6.val.next_hdr = spec->hdr.proto; 631 ipv6.mask.flow_label = 632 rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >> 633 RTE_IPV6_HDR_FL_SHIFT); 634 ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >> 635 RTE_IPV6_HDR_TC_SHIFT; 636 ipv6.mask.next_hdr = mask->hdr.proto; 637 /* Remove unwanted bits from values. */ 638 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) { 639 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i]; 640 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i]; 641 } 642 ipv6.val.flow_label &= ipv6.mask.flow_label; 643 ipv6.val.traffic_class &= ipv6.mask.traffic_class; 644 ipv6.val.next_hdr &= ipv6.mask.next_hdr; 645 } 646 flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size); 647 } 648 649 /** 650 * Convert the @p item into a Verbs specification. This function assumes that 651 * the input is valid and that there is space to insert the requested item 652 * into the flow. 653 * 654 * @param[in, out] dev_flow 655 * Pointer to dev_flow structure. 656 * @param[in] item 657 * Item specification. 658 * @param[in] item_flags 659 * Parsed item flags. 660 */ 661 static void 662 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow, 663 const struct rte_flow_item *item, 664 uint64_t item_flags __rte_unused) 665 { 666 const struct rte_flow_item_tcp *spec = item->spec; 667 const struct rte_flow_item_tcp *mask = item->mask; 668 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); 669 struct ibv_flow_spec_tcp_udp tcp = { 670 .type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags), 671 .size = size, 672 }; 673 674 if (!mask) 675 mask = &rte_flow_item_tcp_mask; 676 if (spec) { 677 tcp.val.dst_port = spec->hdr.dst_port; 678 tcp.val.src_port = spec->hdr.src_port; 679 tcp.mask.dst_port = mask->hdr.dst_port; 680 tcp.mask.src_port = mask->hdr.src_port; 681 /* Remove unwanted bits from values. */ 682 tcp.val.src_port &= tcp.mask.src_port; 683 tcp.val.dst_port &= tcp.mask.dst_port; 684 } 685 flow_verbs_spec_add(&dev_flow->verbs, &tcp, size); 686 } 687 688 /** 689 * Convert the @p item into a Verbs specification. This function assumes that 690 * the input is valid and that there is space to insert the requested item 691 * into the flow. 692 * 693 * @param[in, out] dev_flow 694 * Pointer to dev_flow structure. 695 * @param[in] item 696 * Item specification. 697 * @param[in] item_flags 698 * Parsed item flags. 699 */ 700 static void 701 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow, 702 const struct rte_flow_item *item, 703 uint64_t item_flags __rte_unused) 704 { 705 const struct rte_flow_item_udp *spec = item->spec; 706 const struct rte_flow_item_udp *mask = item->mask; 707 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); 708 struct ibv_flow_spec_tcp_udp udp = { 709 .type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags), 710 .size = size, 711 }; 712 713 if (!mask) 714 mask = &rte_flow_item_udp_mask; 715 if (spec) { 716 udp.val.dst_port = spec->hdr.dst_port; 717 udp.val.src_port = spec->hdr.src_port; 718 udp.mask.dst_port = mask->hdr.dst_port; 719 udp.mask.src_port = mask->hdr.src_port; 720 /* Remove unwanted bits from values. */ 721 udp.val.src_port &= udp.mask.src_port; 722 udp.val.dst_port &= udp.mask.dst_port; 723 } 724 item++; 725 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) 726 item++; 727 if (!(udp.val.dst_port & udp.mask.dst_port)) { 728 switch ((item)->type) { 729 case RTE_FLOW_ITEM_TYPE_VXLAN: 730 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN); 731 udp.mask.dst_port = 0xffff; 732 break; 733 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 734 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE); 735 udp.mask.dst_port = 0xffff; 736 break; 737 case RTE_FLOW_ITEM_TYPE_MPLS: 738 udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS); 739 udp.mask.dst_port = 0xffff; 740 break; 741 default: 742 break; 743 } 744 } 745 746 flow_verbs_spec_add(&dev_flow->verbs, &udp, size); 747 } 748 749 /** 750 * Convert the @p item into a Verbs specification. This function assumes that 751 * the input is valid and that there is space to insert the requested item 752 * into the flow. 753 * 754 * @param[in, out] dev_flow 755 * Pointer to dev_flow structure. 756 * @param[in] item 757 * Item specification. 758 * @param[in] item_flags 759 * Parsed item flags. 760 */ 761 static void 762 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow, 763 const struct rte_flow_item *item, 764 uint64_t item_flags __rte_unused) 765 { 766 const struct rte_flow_item_vxlan *spec = item->spec; 767 const struct rte_flow_item_vxlan *mask = item->mask; 768 unsigned int size = sizeof(struct ibv_flow_spec_tunnel); 769 struct ibv_flow_spec_tunnel vxlan = { 770 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, 771 .size = size, 772 }; 773 union vni { 774 uint32_t vlan_id; 775 uint8_t vni[4]; 776 } id = { .vlan_id = 0, }; 777 778 if (!mask) 779 mask = &rte_flow_item_vxlan_mask; 780 if (spec) { 781 memcpy(&id.vni[1], spec->vni, 3); 782 vxlan.val.tunnel_id = id.vlan_id; 783 memcpy(&id.vni[1], mask->vni, 3); 784 vxlan.mask.tunnel_id = id.vlan_id; 785 /* Remove unwanted bits from values. */ 786 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id; 787 } 788 flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size); 789 } 790 791 /** 792 * Convert the @p item into a Verbs specification. This function assumes that 793 * the input is valid and that there is space to insert the requested item 794 * into the flow. 795 * 796 * @param[in, out] dev_flow 797 * Pointer to dev_flow structure. 798 * @param[in] item 799 * Item specification. 800 * @param[in] item_flags 801 * Parsed item flags. 802 */ 803 static void 804 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow, 805 const struct rte_flow_item *item, 806 uint64_t item_flags __rte_unused) 807 { 808 const struct rte_flow_item_vxlan_gpe *spec = item->spec; 809 const struct rte_flow_item_vxlan_gpe *mask = item->mask; 810 unsigned int size = sizeof(struct ibv_flow_spec_tunnel); 811 struct ibv_flow_spec_tunnel vxlan_gpe = { 812 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, 813 .size = size, 814 }; 815 union vni { 816 uint32_t vlan_id; 817 uint8_t vni[4]; 818 } id = { .vlan_id = 0, }; 819 820 if (!mask) 821 mask = &rte_flow_item_vxlan_gpe_mask; 822 if (spec) { 823 memcpy(&id.vni[1], spec->vni, 3); 824 vxlan_gpe.val.tunnel_id = id.vlan_id; 825 memcpy(&id.vni[1], mask->vni, 3); 826 vxlan_gpe.mask.tunnel_id = id.vlan_id; 827 /* Remove unwanted bits from values. */ 828 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id; 829 } 830 flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size); 831 } 832 833 /** 834 * Update the protocol in Verbs IPv4/IPv6 spec. 835 * 836 * @param[in, out] attr 837 * Pointer to Verbs attributes structure. 838 * @param[in] search 839 * Specification type to search in order to update the IP protocol. 840 * @param[in] protocol 841 * Protocol value to set if none is present in the specification. 842 */ 843 static void 844 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr, 845 enum ibv_flow_spec_type search, 846 uint8_t protocol) 847 { 848 unsigned int i; 849 struct ibv_spec_header *hdr = (struct ibv_spec_header *) 850 ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); 851 852 if (!attr) 853 return; 854 for (i = 0; i != attr->num_of_specs; ++i) { 855 if (hdr->type == search) { 856 union { 857 struct ibv_flow_spec_ipv4_ext *ipv4; 858 struct ibv_flow_spec_ipv6 *ipv6; 859 } ip; 860 861 switch (search) { 862 case IBV_FLOW_SPEC_IPV4_EXT: 863 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr; 864 if (!ip.ipv4->val.proto) { 865 ip.ipv4->val.proto = protocol; 866 ip.ipv4->mask.proto = 0xff; 867 } 868 break; 869 case IBV_FLOW_SPEC_IPV6: 870 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr; 871 if (!ip.ipv6->val.next_hdr) { 872 ip.ipv6->val.next_hdr = protocol; 873 ip.ipv6->mask.next_hdr = 0xff; 874 } 875 break; 876 default: 877 break; 878 } 879 break; 880 } 881 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size); 882 } 883 } 884 885 /** 886 * Convert the @p item into a Verbs specification. This function assumes that 887 * the input is valid and that there is space to insert the requested item 888 * into the flow. 889 * 890 * @param[in, out] dev_flow 891 * Pointer to dev_flow structure. 892 * @param[in] item 893 * Item specification. 894 * @param[in] item_flags 895 * Parsed item flags. 896 */ 897 static void 898 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow, 899 const struct rte_flow_item *item __rte_unused, 900 uint64_t item_flags) 901 { 902 struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs; 903 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT 904 unsigned int size = sizeof(struct ibv_flow_spec_tunnel); 905 struct ibv_flow_spec_tunnel tunnel = { 906 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, 907 .size = size, 908 }; 909 #else 910 static const struct rte_flow_item_gre empty_gre = {0,}; 911 const struct rte_flow_item_gre *spec = item->spec; 912 const struct rte_flow_item_gre *mask = item->mask; 913 unsigned int size = sizeof(struct ibv_flow_spec_gre); 914 struct ibv_flow_spec_gre tunnel = { 915 .type = IBV_FLOW_SPEC_GRE, 916 .size = size, 917 }; 918 919 if (!spec) { 920 spec = &empty_gre; 921 mask = &empty_gre; 922 } else { 923 if (!mask) 924 mask = &rte_flow_item_gre_mask; 925 } 926 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver; 927 tunnel.val.protocol = spec->protocol; 928 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver; 929 tunnel.mask.protocol = mask->protocol; 930 /* Remove unwanted bits from values. */ 931 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver; 932 tunnel.val.key &= tunnel.mask.key; 933 if (tunnel.mask.protocol) { 934 tunnel.val.protocol &= tunnel.mask.protocol; 935 } else { 936 tunnel.val.protocol = mlx5_translate_tunnel_etypes(item_flags); 937 if (tunnel.val.protocol) { 938 tunnel.mask.protocol = 0xFFFF; 939 tunnel.val.protocol = 940 rte_cpu_to_be_16(tunnel.val.protocol); 941 } 942 } 943 #endif 944 if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) 945 flow_verbs_item_gre_ip_protocol_update(&verbs->attr, 946 IBV_FLOW_SPEC_IPV4_EXT, 947 IPPROTO_GRE); 948 else 949 flow_verbs_item_gre_ip_protocol_update(&verbs->attr, 950 IBV_FLOW_SPEC_IPV6, 951 IPPROTO_GRE); 952 flow_verbs_spec_add(verbs, &tunnel, size); 953 } 954 955 /** 956 * Convert the @p action into a Verbs specification. This function assumes that 957 * the input is valid and that there is space to insert the requested action 958 * into the flow. This function also return the action that was added. 959 * 960 * @param[in, out] dev_flow 961 * Pointer to dev_flow structure. 962 * @param[in] item 963 * Item specification. 964 * @param[in] item_flags 965 * Parsed item flags. 966 */ 967 static void 968 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused, 969 const struct rte_flow_item *item __rte_unused, 970 uint64_t item_flags __rte_unused) 971 { 972 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 973 const struct rte_flow_item_mpls *spec = item->spec; 974 const struct rte_flow_item_mpls *mask = item->mask; 975 unsigned int size = sizeof(struct ibv_flow_spec_mpls); 976 struct ibv_flow_spec_mpls mpls = { 977 .type = IBV_FLOW_SPEC_MPLS, 978 .size = size, 979 }; 980 981 if (!mask) 982 mask = &rte_flow_item_mpls_mask; 983 if (spec) { 984 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label)); 985 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label)); 986 /* Remove unwanted bits from values. */ 987 mpls.val.label &= mpls.mask.label; 988 } 989 flow_verbs_spec_add(&dev_flow->verbs, &mpls, size); 990 #endif 991 } 992 993 /** 994 * Convert the @p action into a Verbs specification. This function assumes that 995 * the input is valid and that there is space to insert the requested action 996 * into the flow. 997 * 998 * @param[in] dev_flow 999 * Pointer to mlx5_flow. 1000 * @param[in] action 1001 * Action configuration. 1002 */ 1003 static void 1004 flow_verbs_translate_action_drop 1005 (struct mlx5_flow *dev_flow, 1006 const struct rte_flow_action *action __rte_unused) 1007 { 1008 unsigned int size = sizeof(struct ibv_flow_spec_action_drop); 1009 struct ibv_flow_spec_action_drop drop = { 1010 .type = IBV_FLOW_SPEC_ACTION_DROP, 1011 .size = size, 1012 }; 1013 1014 flow_verbs_spec_add(&dev_flow->verbs, &drop, size); 1015 } 1016 1017 /** 1018 * Convert the @p action into a Verbs specification. This function assumes that 1019 * the input is valid and that there is space to insert the requested action 1020 * into the flow. 1021 * 1022 * @param[in] rss_desc 1023 * Pointer to mlx5_flow_rss_desc. 1024 * @param[in] action 1025 * Action configuration. 1026 */ 1027 static void 1028 flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc, 1029 const struct rte_flow_action *action) 1030 { 1031 const struct rte_flow_action_queue *queue = action->conf; 1032 1033 rss_desc->queue[0] = queue->index; 1034 rss_desc->queue_num = 1; 1035 } 1036 1037 /** 1038 * Convert the @p action into a Verbs specification. This function assumes that 1039 * the input is valid and that there is space to insert the requested action 1040 * into the flow. 1041 * 1042 * @param[in] rss_desc 1043 * Pointer to mlx5_flow_rss_desc. 1044 * @param[in] action 1045 * Action configuration. 1046 */ 1047 static void 1048 flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc, 1049 const struct rte_flow_action *action) 1050 { 1051 const struct rte_flow_action_rss *rss = action->conf; 1052 const uint8_t *rss_key; 1053 1054 memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t)); 1055 rss_desc->queue_num = rss->queue_num; 1056 /* NULL RSS key indicates default RSS key. */ 1057 rss_key = !rss->key ? rss_hash_default_key : rss->key; 1058 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN); 1059 /* 1060 * rss->level and rss.types should be set in advance when expanding 1061 * items for RSS. 1062 */ 1063 } 1064 1065 /** 1066 * Convert the @p action into a Verbs specification. This function assumes that 1067 * the input is valid and that there is space to insert the requested action 1068 * into the flow. 1069 * 1070 * @param[in] dev_flow 1071 * Pointer to mlx5_flow. 1072 * @param[in] action 1073 * Action configuration. 1074 */ 1075 static void 1076 flow_verbs_translate_action_flag 1077 (struct mlx5_flow *dev_flow, 1078 const struct rte_flow_action *action __rte_unused) 1079 { 1080 unsigned int size = sizeof(struct ibv_flow_spec_action_tag); 1081 struct ibv_flow_spec_action_tag tag = { 1082 .type = IBV_FLOW_SPEC_ACTION_TAG, 1083 .size = size, 1084 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT), 1085 }; 1086 1087 flow_verbs_spec_add(&dev_flow->verbs, &tag, size); 1088 } 1089 1090 /** 1091 * Convert the @p action into a Verbs specification. This function assumes that 1092 * the input is valid and that there is space to insert the requested action 1093 * into the flow. 1094 * 1095 * @param[in] dev_flow 1096 * Pointer to mlx5_flow. 1097 * @param[in] action 1098 * Action configuration. 1099 */ 1100 static void 1101 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow, 1102 const struct rte_flow_action *action) 1103 { 1104 const struct rte_flow_action_mark *mark = action->conf; 1105 unsigned int size = sizeof(struct ibv_flow_spec_action_tag); 1106 struct ibv_flow_spec_action_tag tag = { 1107 .type = IBV_FLOW_SPEC_ACTION_TAG, 1108 .size = size, 1109 .tag_id = mlx5_flow_mark_set(mark->id), 1110 }; 1111 1112 flow_verbs_spec_add(&dev_flow->verbs, &tag, size); 1113 } 1114 1115 /** 1116 * Convert the @p action into a Verbs specification. This function assumes that 1117 * the input is valid and that there is space to insert the requested action 1118 * into the flow. 1119 * 1120 * @param[in] dev 1121 * Pointer to the Ethernet device structure. 1122 * @param[in] action 1123 * Action configuration. 1124 * @param[in] dev_flow 1125 * Pointer to mlx5_flow. 1126 * @param[out] error 1127 * Pointer to error structure. 1128 * 1129 * @return 1130 * 0 On success else a negative errno value is returned and rte_errno is set. 1131 */ 1132 static int 1133 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow, 1134 const struct rte_flow_action *action, 1135 struct rte_eth_dev *dev, 1136 struct rte_flow_error *error) 1137 { 1138 const struct rte_flow_action_count *count = action->conf; 1139 struct rte_flow *flow = dev_flow->flow; 1140 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ 1141 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 1142 struct mlx5_flow_counter_pool *pool; 1143 struct mlx5_flow_counter *cnt = NULL; 1144 unsigned int size = sizeof(struct ibv_flow_spec_counter_action); 1145 struct ibv_flow_spec_counter_action counter = { 1146 .type = IBV_FLOW_SPEC_ACTION_COUNT, 1147 .size = size, 1148 }; 1149 #endif 1150 1151 if (!flow->counter) { 1152 flow->counter = flow_verbs_counter_new(dev, count->id); 1153 if (!flow->counter) 1154 return rte_flow_error_set(error, rte_errno, 1155 RTE_FLOW_ERROR_TYPE_ACTION, 1156 action, 1157 "cannot get counter" 1158 " context."); 1159 } 1160 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) 1161 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool); 1162 counter.counter_set_handle = 1163 ((struct ibv_counter_set *)cnt->dcs_when_active)->handle; 1164 flow_verbs_spec_add(&dev_flow->verbs, &counter, size); 1165 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 1166 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool); 1167 counter.counters = (struct ibv_counters *)cnt->dcs_when_active; 1168 flow_verbs_spec_add(&dev_flow->verbs, &counter, size); 1169 #endif 1170 return 0; 1171 } 1172 1173 /** 1174 * Internal validation function. For validating both actions and items. 1175 * 1176 * @param[in] dev 1177 * Pointer to the Ethernet device structure. 1178 * @param[in] attr 1179 * Pointer to the flow attributes. 1180 * @param[in] items 1181 * Pointer to the list of items. 1182 * @param[in] actions 1183 * Pointer to the list of actions. 1184 * @param[in] external 1185 * This flow rule is created by request external to PMD. 1186 * @param[in] hairpin 1187 * Number of hairpin TX actions, 0 means classic flow. 1188 * @param[out] error 1189 * Pointer to the error structure. 1190 * 1191 * @return 1192 * 0 on success, a negative errno value otherwise and rte_errno is set. 1193 */ 1194 static int 1195 flow_verbs_validate(struct rte_eth_dev *dev, 1196 const struct rte_flow_attr *attr, 1197 const struct rte_flow_item items[], 1198 const struct rte_flow_action actions[], 1199 bool external __rte_unused, 1200 int hairpin __rte_unused, 1201 struct rte_flow_error *error) 1202 { 1203 int ret; 1204 uint64_t action_flags = 0; 1205 uint64_t item_flags = 0; 1206 uint64_t last_item = 0; 1207 uint8_t next_protocol = 0xff; 1208 uint16_t ether_type = 0; 1209 bool is_empty_vlan = false; 1210 uint16_t udp_dport = 0; 1211 1212 if (items == NULL) 1213 return -1; 1214 ret = mlx5_flow_validate_attributes(dev, attr, error); 1215 if (ret < 0) 1216 return ret; 1217 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 1218 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1219 int ret = 0; 1220 1221 switch (items->type) { 1222 case RTE_FLOW_ITEM_TYPE_VOID: 1223 break; 1224 case RTE_FLOW_ITEM_TYPE_ETH: 1225 ret = mlx5_flow_validate_item_eth(items, item_flags, 1226 false, error); 1227 if (ret < 0) 1228 return ret; 1229 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 1230 MLX5_FLOW_LAYER_OUTER_L2; 1231 if (items->mask != NULL && items->spec != NULL) { 1232 ether_type = 1233 ((const struct rte_flow_item_eth *) 1234 items->spec)->type; 1235 ether_type &= 1236 ((const struct rte_flow_item_eth *) 1237 items->mask)->type; 1238 if (ether_type == RTE_BE16(RTE_ETHER_TYPE_VLAN)) 1239 is_empty_vlan = true; 1240 ether_type = rte_be_to_cpu_16(ether_type); 1241 } else { 1242 ether_type = 0; 1243 } 1244 break; 1245 case RTE_FLOW_ITEM_TYPE_VLAN: 1246 ret = mlx5_flow_validate_item_vlan(items, item_flags, 1247 dev, error); 1248 if (ret < 0) 1249 return ret; 1250 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | 1251 MLX5_FLOW_LAYER_INNER_VLAN) : 1252 (MLX5_FLOW_LAYER_OUTER_L2 | 1253 MLX5_FLOW_LAYER_OUTER_VLAN); 1254 if (items->mask != NULL && items->spec != NULL) { 1255 ether_type = 1256 ((const struct rte_flow_item_vlan *) 1257 items->spec)->inner_type; 1258 ether_type &= 1259 ((const struct rte_flow_item_vlan *) 1260 items->mask)->inner_type; 1261 ether_type = rte_be_to_cpu_16(ether_type); 1262 } else { 1263 ether_type = 0; 1264 } 1265 is_empty_vlan = false; 1266 break; 1267 case RTE_FLOW_ITEM_TYPE_IPV4: 1268 ret = mlx5_flow_validate_item_ipv4 1269 (items, item_flags, 1270 last_item, ether_type, NULL, 1271 MLX5_ITEM_RANGE_NOT_ACCEPTED, 1272 error); 1273 if (ret < 0) 1274 return ret; 1275 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 1276 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 1277 if (items->mask != NULL && 1278 ((const struct rte_flow_item_ipv4 *) 1279 items->mask)->hdr.next_proto_id) { 1280 next_protocol = 1281 ((const struct rte_flow_item_ipv4 *) 1282 (items->spec))->hdr.next_proto_id; 1283 next_protocol &= 1284 ((const struct rte_flow_item_ipv4 *) 1285 (items->mask))->hdr.next_proto_id; 1286 } else { 1287 /* Reset for inner layer. */ 1288 next_protocol = 0xff; 1289 } 1290 break; 1291 case RTE_FLOW_ITEM_TYPE_IPV6: 1292 ret = mlx5_flow_validate_item_ipv6(items, item_flags, 1293 last_item, 1294 ether_type, NULL, 1295 error); 1296 if (ret < 0) 1297 return ret; 1298 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 1299 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 1300 if (items->mask != NULL && 1301 ((const struct rte_flow_item_ipv6 *) 1302 items->mask)->hdr.proto) { 1303 next_protocol = 1304 ((const struct rte_flow_item_ipv6 *) 1305 items->spec)->hdr.proto; 1306 next_protocol &= 1307 ((const struct rte_flow_item_ipv6 *) 1308 items->mask)->hdr.proto; 1309 } else { 1310 /* Reset for inner layer. */ 1311 next_protocol = 0xff; 1312 } 1313 break; 1314 case RTE_FLOW_ITEM_TYPE_UDP: 1315 ret = mlx5_flow_validate_item_udp(items, item_flags, 1316 next_protocol, 1317 error); 1318 const struct rte_flow_item_udp *spec = items->spec; 1319 const struct rte_flow_item_udp *mask = items->mask; 1320 if (!mask) 1321 mask = &rte_flow_item_udp_mask; 1322 if (spec != NULL) 1323 udp_dport = rte_be_to_cpu_16 1324 (spec->hdr.dst_port & 1325 mask->hdr.dst_port); 1326 1327 if (ret < 0) 1328 return ret; 1329 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : 1330 MLX5_FLOW_LAYER_OUTER_L4_UDP; 1331 break; 1332 case RTE_FLOW_ITEM_TYPE_TCP: 1333 ret = mlx5_flow_validate_item_tcp 1334 (items, item_flags, 1335 next_protocol, 1336 &rte_flow_item_tcp_mask, 1337 error); 1338 if (ret < 0) 1339 return ret; 1340 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : 1341 MLX5_FLOW_LAYER_OUTER_L4_TCP; 1342 break; 1343 case RTE_FLOW_ITEM_TYPE_VXLAN: 1344 ret = mlx5_flow_validate_item_vxlan(dev, udp_dport, 1345 items, item_flags, 1346 attr, error); 1347 if (ret < 0) 1348 return ret; 1349 last_item = MLX5_FLOW_LAYER_VXLAN; 1350 break; 1351 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 1352 ret = mlx5_flow_validate_item_vxlan_gpe(items, 1353 item_flags, 1354 dev, error); 1355 if (ret < 0) 1356 return ret; 1357 last_item = MLX5_FLOW_LAYER_VXLAN_GPE; 1358 break; 1359 case RTE_FLOW_ITEM_TYPE_GRE: 1360 ret = mlx5_flow_validate_item_gre(items, item_flags, 1361 next_protocol, error); 1362 if (ret < 0) 1363 return ret; 1364 last_item = MLX5_FLOW_LAYER_GRE; 1365 break; 1366 case RTE_FLOW_ITEM_TYPE_MPLS: 1367 ret = mlx5_flow_validate_item_mpls(dev, items, 1368 item_flags, 1369 last_item, error); 1370 if (ret < 0) 1371 return ret; 1372 last_item = MLX5_FLOW_LAYER_MPLS; 1373 break; 1374 case RTE_FLOW_ITEM_TYPE_ICMP: 1375 case RTE_FLOW_ITEM_TYPE_ICMP6: 1376 return rte_flow_error_set(error, ENOTSUP, 1377 RTE_FLOW_ERROR_TYPE_ITEM, 1378 NULL, "ICMP/ICMP6 " 1379 "item not supported"); 1380 default: 1381 return rte_flow_error_set(error, ENOTSUP, 1382 RTE_FLOW_ERROR_TYPE_ITEM, 1383 NULL, "item not supported"); 1384 } 1385 item_flags |= last_item; 1386 } 1387 if (is_empty_vlan) 1388 return rte_flow_error_set(error, ENOTSUP, 1389 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 1390 "VLAN matching without vid specification is not supported"); 1391 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1392 switch (actions->type) { 1393 case RTE_FLOW_ACTION_TYPE_VOID: 1394 break; 1395 case RTE_FLOW_ACTION_TYPE_FLAG: 1396 ret = mlx5_flow_validate_action_flag(action_flags, 1397 attr, 1398 error); 1399 if (ret < 0) 1400 return ret; 1401 action_flags |= MLX5_FLOW_ACTION_FLAG; 1402 break; 1403 case RTE_FLOW_ACTION_TYPE_MARK: 1404 ret = mlx5_flow_validate_action_mark(actions, 1405 action_flags, 1406 attr, 1407 error); 1408 if (ret < 0) 1409 return ret; 1410 action_flags |= MLX5_FLOW_ACTION_MARK; 1411 break; 1412 case RTE_FLOW_ACTION_TYPE_DROP: 1413 ret = mlx5_flow_validate_action_drop(action_flags, 1414 attr, 1415 error); 1416 if (ret < 0) 1417 return ret; 1418 action_flags |= MLX5_FLOW_ACTION_DROP; 1419 break; 1420 case RTE_FLOW_ACTION_TYPE_QUEUE: 1421 ret = mlx5_flow_validate_action_queue(actions, 1422 action_flags, dev, 1423 attr, 1424 error); 1425 if (ret < 0) 1426 return ret; 1427 action_flags |= MLX5_FLOW_ACTION_QUEUE; 1428 break; 1429 case RTE_FLOW_ACTION_TYPE_RSS: 1430 ret = mlx5_flow_validate_action_rss(actions, 1431 action_flags, dev, 1432 attr, item_flags, 1433 error); 1434 if (ret < 0) 1435 return ret; 1436 action_flags |= MLX5_FLOW_ACTION_RSS; 1437 break; 1438 case RTE_FLOW_ACTION_TYPE_COUNT: 1439 ret = mlx5_flow_validate_action_count(dev, attr, error); 1440 if (ret < 0) 1441 return ret; 1442 action_flags |= MLX5_FLOW_ACTION_COUNT; 1443 break; 1444 default: 1445 return rte_flow_error_set(error, ENOTSUP, 1446 RTE_FLOW_ERROR_TYPE_ACTION, 1447 actions, 1448 "action not supported"); 1449 } 1450 } 1451 /* 1452 * Validate the drop action mutual exclusion with other actions. 1453 * Drop action is mutually-exclusive with any other action, except for 1454 * Count action. 1455 */ 1456 if ((action_flags & MLX5_FLOW_ACTION_DROP) && 1457 (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT))) 1458 return rte_flow_error_set(error, EINVAL, 1459 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1460 "Drop action is mutually-exclusive " 1461 "with any other action, except for " 1462 "Count action"); 1463 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS)) 1464 return rte_flow_error_set(error, EINVAL, 1465 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1466 "no fate action is found"); 1467 return 0; 1468 } 1469 1470 /** 1471 * Calculate the required bytes that are needed for the action part of the verbs 1472 * flow. 1473 * 1474 * @param[in] actions 1475 * Pointer to the list of actions. 1476 * 1477 * @return 1478 * The size of the memory needed for all actions. 1479 */ 1480 static int 1481 flow_verbs_get_actions_size(const struct rte_flow_action actions[]) 1482 { 1483 int size = 0; 1484 1485 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1486 switch (actions->type) { 1487 case RTE_FLOW_ACTION_TYPE_VOID: 1488 break; 1489 case RTE_FLOW_ACTION_TYPE_FLAG: 1490 size += sizeof(struct ibv_flow_spec_action_tag); 1491 break; 1492 case RTE_FLOW_ACTION_TYPE_MARK: 1493 size += sizeof(struct ibv_flow_spec_action_tag); 1494 break; 1495 case RTE_FLOW_ACTION_TYPE_DROP: 1496 size += sizeof(struct ibv_flow_spec_action_drop); 1497 break; 1498 case RTE_FLOW_ACTION_TYPE_QUEUE: 1499 break; 1500 case RTE_FLOW_ACTION_TYPE_RSS: 1501 break; 1502 case RTE_FLOW_ACTION_TYPE_COUNT: 1503 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ 1504 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 1505 size += sizeof(struct ibv_flow_spec_counter_action); 1506 #endif 1507 break; 1508 default: 1509 break; 1510 } 1511 } 1512 return size; 1513 } 1514 1515 /** 1516 * Calculate the required bytes that are needed for the item part of the verbs 1517 * flow. 1518 * 1519 * @param[in] items 1520 * Pointer to the list of items. 1521 * 1522 * @return 1523 * The size of the memory needed for all items. 1524 */ 1525 static int 1526 flow_verbs_get_items_size(const struct rte_flow_item items[]) 1527 { 1528 int size = 0; 1529 1530 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 1531 switch (items->type) { 1532 case RTE_FLOW_ITEM_TYPE_VOID: 1533 break; 1534 case RTE_FLOW_ITEM_TYPE_ETH: 1535 size += sizeof(struct ibv_flow_spec_eth); 1536 break; 1537 case RTE_FLOW_ITEM_TYPE_VLAN: 1538 size += sizeof(struct ibv_flow_spec_eth); 1539 break; 1540 case RTE_FLOW_ITEM_TYPE_IPV4: 1541 size += sizeof(struct ibv_flow_spec_ipv4_ext); 1542 break; 1543 case RTE_FLOW_ITEM_TYPE_IPV6: 1544 size += sizeof(struct ibv_flow_spec_ipv6); 1545 break; 1546 case RTE_FLOW_ITEM_TYPE_UDP: 1547 size += sizeof(struct ibv_flow_spec_tcp_udp); 1548 break; 1549 case RTE_FLOW_ITEM_TYPE_TCP: 1550 size += sizeof(struct ibv_flow_spec_tcp_udp); 1551 break; 1552 case RTE_FLOW_ITEM_TYPE_VXLAN: 1553 size += sizeof(struct ibv_flow_spec_tunnel); 1554 break; 1555 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 1556 size += sizeof(struct ibv_flow_spec_tunnel); 1557 break; 1558 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 1559 case RTE_FLOW_ITEM_TYPE_GRE: 1560 size += sizeof(struct ibv_flow_spec_gre); 1561 break; 1562 case RTE_FLOW_ITEM_TYPE_MPLS: 1563 size += sizeof(struct ibv_flow_spec_mpls); 1564 break; 1565 #else 1566 case RTE_FLOW_ITEM_TYPE_GRE: 1567 size += sizeof(struct ibv_flow_spec_tunnel); 1568 break; 1569 #endif 1570 default: 1571 break; 1572 } 1573 } 1574 return size; 1575 } 1576 1577 /** 1578 * Internal preparation function. Allocate mlx5_flow with the required size. 1579 * The required size is calculate based on the actions and items. This function 1580 * also returns the detected actions and items for later use. 1581 * 1582 * @param[in] dev 1583 * Pointer to Ethernet device. 1584 * @param[in] attr 1585 * Pointer to the flow attributes. 1586 * @param[in] items 1587 * Pointer to the list of items. 1588 * @param[in] actions 1589 * Pointer to the list of actions. 1590 * @param[out] error 1591 * Pointer to the error structure. 1592 * 1593 * @return 1594 * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno 1595 * is set. 1596 */ 1597 static struct mlx5_flow * 1598 flow_verbs_prepare(struct rte_eth_dev *dev, 1599 const struct rte_flow_attr *attr __rte_unused, 1600 const struct rte_flow_item items[], 1601 const struct rte_flow_action actions[], 1602 struct rte_flow_error *error) 1603 { 1604 size_t size = 0; 1605 uint32_t handle_idx = 0; 1606 struct mlx5_flow *dev_flow; 1607 struct mlx5_flow_handle *dev_handle; 1608 struct mlx5_priv *priv = dev->data->dev_private; 1609 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 1610 1611 MLX5_ASSERT(wks); 1612 size += flow_verbs_get_actions_size(actions); 1613 size += flow_verbs_get_items_size(items); 1614 if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) { 1615 rte_flow_error_set(error, E2BIG, 1616 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1617 "Verbs spec/action size too large"); 1618 return NULL; 1619 } 1620 /* In case of corrupting the memory. */ 1621 if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { 1622 rte_flow_error_set(error, ENOSPC, 1623 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1624 "not free temporary device flow"); 1625 return NULL; 1626 } 1627 dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 1628 &handle_idx); 1629 if (!dev_handle) { 1630 rte_flow_error_set(error, ENOMEM, 1631 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1632 "not enough memory to create flow handle"); 1633 return NULL; 1634 } 1635 MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows)); 1636 dev_flow = &wks->flows[wks->flow_idx++]; 1637 dev_flow->handle = dev_handle; 1638 dev_flow->handle_idx = handle_idx; 1639 /* Memcpy is used, only size needs to be cleared to 0. */ 1640 dev_flow->verbs.size = 0; 1641 dev_flow->verbs.attr.num_of_specs = 0; 1642 dev_flow->ingress = attr->ingress; 1643 dev_flow->hash_fields = 0; 1644 /* Need to set transfer attribute: not supported in Verbs mode. */ 1645 return dev_flow; 1646 } 1647 1648 /** 1649 * Fill the flow with verb spec. 1650 * 1651 * @param[in] dev 1652 * Pointer to Ethernet device. 1653 * @param[in, out] dev_flow 1654 * Pointer to the mlx5 flow. 1655 * @param[in] attr 1656 * Pointer to the flow attributes. 1657 * @param[in] items 1658 * Pointer to the list of items. 1659 * @param[in] actions 1660 * Pointer to the list of actions. 1661 * @param[out] error 1662 * Pointer to the error structure. 1663 * 1664 * @return 1665 * 0 on success, else a negative errno value otherwise and rte_errno is set. 1666 */ 1667 static int 1668 flow_verbs_translate(struct rte_eth_dev *dev, 1669 struct mlx5_flow *dev_flow, 1670 const struct rte_flow_attr *attr, 1671 const struct rte_flow_item items[], 1672 const struct rte_flow_action actions[], 1673 struct rte_flow_error *error) 1674 { 1675 uint64_t item_flags = 0; 1676 uint64_t action_flags = 0; 1677 uint64_t priority = attr->priority; 1678 uint32_t subpriority = 0; 1679 struct mlx5_priv *priv = dev->data->dev_private; 1680 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 1681 struct mlx5_flow_rss_desc *rss_desc; 1682 1683 MLX5_ASSERT(wks); 1684 rss_desc = &wks->rss_desc; 1685 if (priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) 1686 priority = priv->sh->flow_max_priority - 1; 1687 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1688 int ret; 1689 1690 switch (actions->type) { 1691 case RTE_FLOW_ACTION_TYPE_VOID: 1692 break; 1693 case RTE_FLOW_ACTION_TYPE_FLAG: 1694 flow_verbs_translate_action_flag(dev_flow, actions); 1695 action_flags |= MLX5_FLOW_ACTION_FLAG; 1696 wks->mark = 1; 1697 break; 1698 case RTE_FLOW_ACTION_TYPE_MARK: 1699 flow_verbs_translate_action_mark(dev_flow, actions); 1700 action_flags |= MLX5_FLOW_ACTION_MARK; 1701 wks->mark = 1; 1702 break; 1703 case RTE_FLOW_ACTION_TYPE_DROP: 1704 flow_verbs_translate_action_drop(dev_flow, actions); 1705 action_flags |= MLX5_FLOW_ACTION_DROP; 1706 dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP; 1707 break; 1708 case RTE_FLOW_ACTION_TYPE_QUEUE: 1709 flow_verbs_translate_action_queue(rss_desc, actions); 1710 action_flags |= MLX5_FLOW_ACTION_QUEUE; 1711 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; 1712 break; 1713 case RTE_FLOW_ACTION_TYPE_RSS: 1714 flow_verbs_translate_action_rss(rss_desc, actions); 1715 action_flags |= MLX5_FLOW_ACTION_RSS; 1716 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; 1717 break; 1718 case RTE_FLOW_ACTION_TYPE_COUNT: 1719 ret = flow_verbs_translate_action_count(dev_flow, 1720 actions, 1721 dev, error); 1722 if (ret < 0) 1723 return ret; 1724 action_flags |= MLX5_FLOW_ACTION_COUNT; 1725 break; 1726 default: 1727 return rte_flow_error_set(error, ENOTSUP, 1728 RTE_FLOW_ERROR_TYPE_ACTION, 1729 actions, 1730 "action not supported"); 1731 } 1732 } 1733 dev_flow->act_flags = action_flags; 1734 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 1735 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1736 1737 switch (items->type) { 1738 case RTE_FLOW_ITEM_TYPE_VOID: 1739 break; 1740 case RTE_FLOW_ITEM_TYPE_ETH: 1741 flow_verbs_translate_item_eth(dev_flow, items, 1742 item_flags); 1743 subpriority = MLX5_PRIORITY_MAP_L2; 1744 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 1745 MLX5_FLOW_LAYER_OUTER_L2; 1746 break; 1747 case RTE_FLOW_ITEM_TYPE_VLAN: 1748 flow_verbs_translate_item_vlan(dev_flow, items, 1749 item_flags); 1750 subpriority = MLX5_PRIORITY_MAP_L2; 1751 item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | 1752 MLX5_FLOW_LAYER_INNER_VLAN) : 1753 (MLX5_FLOW_LAYER_OUTER_L2 | 1754 MLX5_FLOW_LAYER_OUTER_VLAN); 1755 break; 1756 case RTE_FLOW_ITEM_TYPE_IPV4: 1757 flow_verbs_translate_item_ipv4(dev_flow, items, 1758 item_flags); 1759 subpriority = MLX5_PRIORITY_MAP_L3; 1760 dev_flow->hash_fields |= 1761 mlx5_flow_hashfields_adjust 1762 (rss_desc, tunnel, 1763 MLX5_IPV4_LAYER_TYPES, 1764 MLX5_IPV4_IBV_RX_HASH); 1765 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 1766 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 1767 break; 1768 case RTE_FLOW_ITEM_TYPE_IPV6: 1769 flow_verbs_translate_item_ipv6(dev_flow, items, 1770 item_flags); 1771 subpriority = MLX5_PRIORITY_MAP_L3; 1772 dev_flow->hash_fields |= 1773 mlx5_flow_hashfields_adjust 1774 (rss_desc, tunnel, 1775 MLX5_IPV6_LAYER_TYPES, 1776 MLX5_IPV6_IBV_RX_HASH); 1777 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 1778 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 1779 break; 1780 case RTE_FLOW_ITEM_TYPE_TCP: 1781 flow_verbs_translate_item_tcp(dev_flow, items, 1782 item_flags); 1783 subpriority = MLX5_PRIORITY_MAP_L4; 1784 if (dev_flow->hash_fields != 0) 1785 dev_flow->hash_fields |= 1786 mlx5_flow_hashfields_adjust 1787 (rss_desc, tunnel, RTE_ETH_RSS_TCP, 1788 (IBV_RX_HASH_SRC_PORT_TCP | 1789 IBV_RX_HASH_DST_PORT_TCP)); 1790 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : 1791 MLX5_FLOW_LAYER_OUTER_L4_TCP; 1792 break; 1793 case RTE_FLOW_ITEM_TYPE_UDP: 1794 flow_verbs_translate_item_udp(dev_flow, items, 1795 item_flags); 1796 subpriority = MLX5_PRIORITY_MAP_L4; 1797 if (dev_flow->hash_fields != 0) 1798 dev_flow->hash_fields |= 1799 mlx5_flow_hashfields_adjust 1800 (rss_desc, tunnel, RTE_ETH_RSS_UDP, 1801 (IBV_RX_HASH_SRC_PORT_UDP | 1802 IBV_RX_HASH_DST_PORT_UDP)); 1803 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : 1804 MLX5_FLOW_LAYER_OUTER_L4_UDP; 1805 break; 1806 case RTE_FLOW_ITEM_TYPE_VXLAN: 1807 flow_verbs_translate_item_vxlan(dev_flow, items, 1808 item_flags); 1809 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc); 1810 item_flags |= MLX5_FLOW_LAYER_VXLAN; 1811 break; 1812 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 1813 flow_verbs_translate_item_vxlan_gpe(dev_flow, items, 1814 item_flags); 1815 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc); 1816 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE; 1817 break; 1818 case RTE_FLOW_ITEM_TYPE_GRE: 1819 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc); 1820 item_flags |= MLX5_FLOW_LAYER_GRE; 1821 break; 1822 case RTE_FLOW_ITEM_TYPE_MPLS: 1823 flow_verbs_translate_item_mpls(dev_flow, items, 1824 item_flags); 1825 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc); 1826 item_flags |= MLX5_FLOW_LAYER_MPLS; 1827 break; 1828 default: 1829 return rte_flow_error_set(error, ENOTSUP, 1830 RTE_FLOW_ERROR_TYPE_ITEM, 1831 NULL, "item not supported"); 1832 } 1833 } 1834 if (item_flags & MLX5_FLOW_LAYER_GRE) 1835 flow_verbs_translate_item_gre(dev_flow, items, item_flags); 1836 dev_flow->handle->layers = item_flags; 1837 /* Other members of attr will be ignored. */ 1838 dev_flow->verbs.attr.priority = 1839 mlx5_flow_adjust_priority(dev, priority, subpriority); 1840 dev_flow->verbs.attr.port = (uint8_t)priv->dev_port; 1841 return 0; 1842 } 1843 1844 /** 1845 * Remove the flow from the NIC but keeps it in memory. 1846 * 1847 * @param[in] dev 1848 * Pointer to the Ethernet device structure. 1849 * @param[in, out] flow 1850 * Pointer to flow structure. 1851 */ 1852 static void 1853 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 1854 { 1855 struct mlx5_priv *priv = dev->data->dev_private; 1856 struct mlx5_flow_handle *handle; 1857 uint32_t handle_idx; 1858 1859 if (!flow) 1860 return; 1861 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 1862 handle_idx, handle, next) { 1863 if (handle->drv_flow) { 1864 claim_zero(mlx5_glue->destroy_flow(handle->drv_flow)); 1865 handle->drv_flow = NULL; 1866 } 1867 /* hrxq is union, don't touch it only the flag is set. */ 1868 if (handle->rix_hrxq && 1869 handle->fate_action == MLX5_FLOW_FATE_QUEUE) { 1870 mlx5_hrxq_release(dev, handle->rix_hrxq); 1871 handle->rix_hrxq = 0; 1872 } 1873 if (handle->vf_vlan.tag && handle->vf_vlan.created) 1874 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan); 1875 } 1876 } 1877 1878 /** 1879 * Remove the flow from the NIC and the memory. 1880 * 1881 * @param[in] dev 1882 * Pointer to the Ethernet device structure. 1883 * @param[in, out] flow 1884 * Pointer to flow structure. 1885 */ 1886 static void 1887 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 1888 { 1889 struct mlx5_priv *priv = dev->data->dev_private; 1890 struct mlx5_flow_handle *handle; 1891 1892 if (!flow) 1893 return; 1894 flow_verbs_remove(dev, flow); 1895 while (flow->dev_handles) { 1896 uint32_t tmp_idx = flow->dev_handles; 1897 1898 handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 1899 tmp_idx); 1900 if (!handle) 1901 return; 1902 flow->dev_handles = handle->next.next; 1903 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 1904 tmp_idx); 1905 } 1906 if (flow->counter) { 1907 flow_verbs_counter_release(dev, flow->counter); 1908 flow->counter = 0; 1909 } 1910 } 1911 1912 /** 1913 * Apply the flow to the NIC. 1914 * 1915 * @param[in] dev 1916 * Pointer to the Ethernet device structure. 1917 * @param[in, out] flow 1918 * Pointer to flow structure. 1919 * @param[out] error 1920 * Pointer to error structure. 1921 * 1922 * @return 1923 * 0 on success, a negative errno value otherwise and rte_errno is set. 1924 */ 1925 static int 1926 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 1927 struct rte_flow_error *error) 1928 { 1929 struct mlx5_priv *priv = dev->data->dev_private; 1930 struct mlx5_flow_handle *handle; 1931 struct mlx5_flow *dev_flow; 1932 struct mlx5_hrxq *hrxq; 1933 uint32_t dev_handles; 1934 int err; 1935 int idx; 1936 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 1937 1938 MLX5_ASSERT(wks); 1939 for (idx = wks->flow_idx - 1; idx >= 0; idx--) { 1940 dev_flow = &wks->flows[idx]; 1941 handle = dev_flow->handle; 1942 if (handle->fate_action == MLX5_FLOW_FATE_DROP) { 1943 MLX5_ASSERT(priv->drop_queue.hrxq); 1944 hrxq = priv->drop_queue.hrxq; 1945 } else { 1946 uint32_t hrxq_idx; 1947 struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc; 1948 1949 MLX5_ASSERT(rss_desc->queue_num); 1950 rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN; 1951 rss_desc->hash_fields = dev_flow->hash_fields; 1952 rss_desc->tunnel = !!(handle->layers & 1953 MLX5_FLOW_LAYER_TUNNEL); 1954 rss_desc->shared_rss = 0; 1955 hrxq_idx = mlx5_hrxq_get(dev, rss_desc); 1956 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], 1957 hrxq_idx); 1958 if (!hrxq) { 1959 rte_flow_error_set 1960 (error, rte_errno, 1961 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1962 "cannot get hash queue"); 1963 goto error; 1964 } 1965 handle->rix_hrxq = hrxq_idx; 1966 } 1967 MLX5_ASSERT(hrxq); 1968 handle->drv_flow = mlx5_glue->create_flow 1969 (hrxq->qp, &dev_flow->verbs.attr); 1970 if (!handle->drv_flow) { 1971 rte_flow_error_set(error, errno, 1972 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1973 NULL, 1974 "hardware refuses to create flow"); 1975 goto error; 1976 } 1977 if (priv->vmwa_context && 1978 handle->vf_vlan.tag && !handle->vf_vlan.created) { 1979 /* 1980 * The rule contains the VLAN pattern. 1981 * For VF we are going to create VLAN 1982 * interface to make hypervisor set correct 1983 * e-Switch vport context. 1984 */ 1985 mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan); 1986 } 1987 } 1988 return 0; 1989 error: 1990 err = rte_errno; /* Save rte_errno before cleanup. */ 1991 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 1992 dev_handles, handle, next) { 1993 /* hrxq is union, don't touch it only the flag is set. */ 1994 if (handle->rix_hrxq && 1995 handle->fate_action == MLX5_FLOW_FATE_QUEUE) { 1996 mlx5_hrxq_release(dev, handle->rix_hrxq); 1997 handle->rix_hrxq = 0; 1998 } 1999 if (handle->vf_vlan.tag && handle->vf_vlan.created) 2000 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan); 2001 } 2002 rte_errno = err; /* Restore rte_errno. */ 2003 return -rte_errno; 2004 } 2005 2006 /** 2007 * Query a flow. 2008 * 2009 * @see rte_flow_query() 2010 * @see rte_flow_ops 2011 */ 2012 static int 2013 flow_verbs_query(struct rte_eth_dev *dev, 2014 struct rte_flow *flow, 2015 const struct rte_flow_action *actions, 2016 void *data, 2017 struct rte_flow_error *error) 2018 { 2019 int ret = -EINVAL; 2020 2021 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2022 switch (actions->type) { 2023 case RTE_FLOW_ACTION_TYPE_VOID: 2024 break; 2025 case RTE_FLOW_ACTION_TYPE_COUNT: 2026 ret = flow_verbs_counter_query(dev, flow, data, error); 2027 break; 2028 default: 2029 return rte_flow_error_set(error, ENOTSUP, 2030 RTE_FLOW_ERROR_TYPE_ACTION, 2031 actions, 2032 "action not supported"); 2033 } 2034 } 2035 return ret; 2036 } 2037 2038 static int 2039 flow_verbs_sync_domain(struct rte_eth_dev *dev, uint32_t domains, 2040 uint32_t flags) 2041 { 2042 RTE_SET_USED(dev); 2043 RTE_SET_USED(domains); 2044 RTE_SET_USED(flags); 2045 2046 return 0; 2047 } 2048 2049 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = { 2050 .validate = flow_verbs_validate, 2051 .prepare = flow_verbs_prepare, 2052 .translate = flow_verbs_translate, 2053 .apply = flow_verbs_apply, 2054 .remove = flow_verbs_remove, 2055 .destroy = flow_verbs_destroy, 2056 .query = flow_verbs_query, 2057 .sync_domain = flow_verbs_sync_domain, 2058 .discover_priorities = flow_verbs_discover_priorities, 2059 }; 2060