1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018 Mellanox Technologies, Ltd 3 */ 4 5 #include <netinet/in.h> 6 #include <sys/queue.h> 7 #include <stdalign.h> 8 #include <stdint.h> 9 #include <string.h> 10 11 #include <rte_common.h> 12 #include <rte_ether.h> 13 #include <ethdev_driver.h> 14 #include <rte_flow.h> 15 #include <rte_flow_driver.h> 16 #include <rte_malloc.h> 17 #include <rte_ip.h> 18 19 #include <mlx5_glue.h> 20 #include <mlx5_prm.h> 21 #include <mlx5_malloc.h> 22 23 #include "mlx5_defs.h" 24 #include "mlx5.h" 25 #include "mlx5_flow.h" 26 #include "mlx5_rx.h" 27 28 #define VERBS_SPEC_INNER(item_flags) \ 29 (!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0) 30 31 /* Verbs specification header. */ 32 struct ibv_spec_header { 33 enum ibv_flow_spec_type type; 34 uint16_t size; 35 }; 36 37 /** 38 * Discover the maximum number of priority available. 39 * 40 * @param[in] dev 41 * Pointer to the Ethernet device structure. 42 * @param[in] vprio 43 * Expected result variants. 44 * @param[in] vprio_n 45 * Number of entries in @p vprio array. 46 * @return 47 * Number of supported flow priority on success, a negative errno 48 * value otherwise and rte_errno is set. 49 */ 50 static int 51 flow_verbs_discover_priorities(struct rte_eth_dev *dev, 52 const uint16_t *vprio, int vprio_n) 53 { 54 struct mlx5_priv *priv = dev->data->dev_private; 55 struct { 56 struct ibv_flow_attr attr; 57 struct ibv_flow_spec_eth eth; 58 struct ibv_flow_spec_action_drop drop; 59 } flow_attr = { 60 .attr = { 61 .num_of_specs = 2, 62 .port = (uint8_t)priv->dev_port, 63 }, 64 .eth = { 65 .type = IBV_FLOW_SPEC_ETH, 66 .size = sizeof(struct ibv_flow_spec_eth), 67 }, 68 .drop = { 69 .size = sizeof(struct ibv_flow_spec_action_drop), 70 .type = IBV_FLOW_SPEC_ACTION_DROP, 71 }, 72 }; 73 struct ibv_flow *flow; 74 struct mlx5_hrxq *drop = priv->drop_queue.hrxq; 75 int i; 76 int priority = 0; 77 78 #if defined(HAVE_MLX5DV_DR_DEVX_PORT) || defined(HAVE_MLX5DV_DR_DEVX_PORT_V35) 79 /* If DevX supported, driver must support 16 verbs flow priorities. */ 80 priority = 16; 81 goto out; 82 #endif 83 if (!drop->qp) { 84 rte_errno = ENOTSUP; 85 return -rte_errno; 86 } 87 for (i = 0; i != vprio_n; i++) { 88 flow_attr.attr.priority = vprio[i] - 1; 89 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); 90 if (!flow) 91 break; 92 claim_zero(mlx5_glue->destroy_flow(flow)); 93 priority = vprio[i]; 94 } 95 #if defined(HAVE_MLX5DV_DR_DEVX_PORT) || defined(HAVE_MLX5DV_DR_DEVX_PORT_V35) 96 out: 97 #endif 98 DRV_LOG(INFO, "port %u supported flow priorities:" 99 " 0-%d for ingress or egress root table," 100 " 0-%d for non-root table or transfer root table.", 101 dev->data->port_id, priority - 2, 102 MLX5_NON_ROOT_FLOW_MAX_PRIO - 1); 103 return priority; 104 } 105 106 /** 107 * Get Verbs flow counter by index. 108 * 109 * @param[in] dev 110 * Pointer to the Ethernet device structure. 111 * @param[in] idx 112 * mlx5 flow counter index in the container. 113 * @param[out] ppool 114 * mlx5 flow counter pool in the container, 115 * 116 * @return 117 * A pointer to the counter, NULL otherwise. 118 */ 119 static struct mlx5_flow_counter * 120 flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev, 121 uint32_t idx, 122 struct mlx5_flow_counter_pool **ppool) 123 { 124 struct mlx5_priv *priv = dev->data->dev_private; 125 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; 126 struct mlx5_flow_counter_pool *pool; 127 128 idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1); 129 pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL]; 130 MLX5_ASSERT(pool); 131 if (ppool) 132 *ppool = pool; 133 return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL); 134 } 135 136 /** 137 * Create Verbs flow counter with Verbs library. 138 * 139 * @param[in] dev 140 * Pointer to the Ethernet device structure. 141 * @param[in, out] counter 142 * mlx5 flow counter object, contains the counter id, 143 * handle of created Verbs flow counter is returned 144 * in cs field (if counters are supported). 145 * 146 * @return 147 * 0 On success else a negative errno value is returned 148 * and rte_errno is set. 149 */ 150 static int 151 flow_verbs_counter_create(struct rte_eth_dev *dev, 152 struct mlx5_flow_counter *counter) 153 { 154 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) 155 struct mlx5_priv *priv = dev->data->dev_private; 156 struct ibv_context *ctx = priv->sh->cdev->ctx; 157 struct ibv_counter_set_init_attr init = { 158 .counter_set_id = counter->shared_info.id}; 159 160 counter->dcs_when_free = mlx5_glue->create_counter_set(ctx, &init); 161 if (!counter->dcs_when_free) { 162 rte_errno = ENOTSUP; 163 return -ENOTSUP; 164 } 165 return 0; 166 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 167 struct mlx5_priv *priv = dev->data->dev_private; 168 struct ibv_context *ctx = priv->sh->cdev->ctx; 169 struct ibv_counters_init_attr init = {0}; 170 struct ibv_counter_attach_attr attach; 171 int ret; 172 173 memset(&attach, 0, sizeof(attach)); 174 counter->dcs_when_free = mlx5_glue->create_counters(ctx, &init); 175 if (!counter->dcs_when_free) { 176 rte_errno = ENOTSUP; 177 return -ENOTSUP; 178 } 179 attach.counter_desc = IBV_COUNTER_PACKETS; 180 attach.index = 0; 181 ret = mlx5_glue->attach_counters(counter->dcs_when_free, &attach, NULL); 182 if (!ret) { 183 attach.counter_desc = IBV_COUNTER_BYTES; 184 attach.index = 1; 185 ret = mlx5_glue->attach_counters 186 (counter->dcs_when_free, &attach, NULL); 187 } 188 if (ret) { 189 claim_zero(mlx5_glue->destroy_counters(counter->dcs_when_free)); 190 counter->dcs_when_free = NULL; 191 rte_errno = ret; 192 return -ret; 193 } 194 return 0; 195 #else 196 (void)dev; 197 (void)counter; 198 rte_errno = ENOTSUP; 199 return -ENOTSUP; 200 #endif 201 } 202 203 /** 204 * Get a flow counter. 205 * 206 * @param[in] dev 207 * Pointer to the Ethernet device structure. 208 * @param[in] id 209 * Counter identifier. 210 * 211 * @return 212 * Index to the counter, 0 otherwise and rte_errno is set. 213 */ 214 static uint32_t 215 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t id __rte_unused) 216 { 217 struct mlx5_priv *priv = dev->data->dev_private; 218 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; 219 struct mlx5_flow_counter_pool *pool = NULL; 220 struct mlx5_flow_counter *cnt = NULL; 221 uint32_t n_valid = cmng->n_valid; 222 uint32_t pool_idx, cnt_idx; 223 uint32_t i; 224 int ret; 225 226 for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) { 227 pool = cmng->pools[pool_idx]; 228 if (!pool) 229 continue; 230 cnt = TAILQ_FIRST(&pool->counters[0]); 231 if (cnt) 232 break; 233 } 234 if (!cnt) { 235 struct mlx5_flow_counter_pool **pools; 236 uint32_t size; 237 238 if (n_valid == cmng->n) { 239 /* Resize the container pool array. */ 240 size = sizeof(struct mlx5_flow_counter_pool *) * 241 (n_valid + MLX5_CNT_CONTAINER_RESIZE); 242 pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0, 243 SOCKET_ID_ANY); 244 if (!pools) 245 return 0; 246 if (n_valid) { 247 memcpy(pools, cmng->pools, 248 sizeof(struct mlx5_flow_counter_pool *) * 249 n_valid); 250 mlx5_free(cmng->pools); 251 } 252 cmng->pools = pools; 253 cmng->n += MLX5_CNT_CONTAINER_RESIZE; 254 } 255 /* Allocate memory for new pool*/ 256 size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL; 257 pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY); 258 if (!pool) 259 return 0; 260 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { 261 cnt = MLX5_POOL_GET_CNT(pool, i); 262 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next); 263 } 264 cnt = MLX5_POOL_GET_CNT(pool, 0); 265 cmng->pools[n_valid] = pool; 266 pool_idx = n_valid; 267 cmng->n_valid++; 268 } 269 TAILQ_REMOVE(&pool->counters[0], cnt, next); 270 i = MLX5_CNT_ARRAY_IDX(pool, cnt); 271 cnt_idx = MLX5_MAKE_CNT_IDX(pool_idx, i); 272 /* Create counter with Verbs. */ 273 ret = flow_verbs_counter_create(dev, cnt); 274 if (!ret) { 275 cnt->dcs_when_active = cnt->dcs_when_free; 276 cnt->hits = 0; 277 cnt->bytes = 0; 278 return cnt_idx; 279 } 280 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next); 281 /* Some error occurred in Verbs library. */ 282 rte_errno = -ret; 283 return 0; 284 } 285 286 /** 287 * Release a flow counter. 288 * 289 * @param[in] dev 290 * Pointer to the Ethernet device structure. 291 * @param[in] counter 292 * Index to the counter handler. 293 */ 294 static void 295 flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter) 296 { 297 struct mlx5_flow_counter_pool *pool; 298 struct mlx5_flow_counter *cnt; 299 300 cnt = flow_verbs_counter_get_by_idx(dev, counter, &pool); 301 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) 302 claim_zero(mlx5_glue->destroy_counter_set 303 ((struct ibv_counter_set *)cnt->dcs_when_active)); 304 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 305 claim_zero(mlx5_glue->destroy_counters 306 ((struct ibv_counters *)cnt->dcs_when_active)); 307 #endif 308 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next); 309 } 310 311 /** 312 * Query a flow counter via Verbs library call. 313 * 314 * @see rte_flow_query() 315 * @see rte_flow_ops 316 */ 317 static int 318 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused, 319 struct rte_flow *flow, void *data, 320 struct rte_flow_error *error) 321 { 322 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ 323 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 324 if (flow->counter) { 325 struct mlx5_flow_counter_pool *pool; 326 struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx 327 (dev, flow->counter, &pool); 328 struct rte_flow_query_count *qc = data; 329 uint64_t counters[2] = {0, 0}; 330 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) 331 struct ibv_query_counter_set_attr query_cs_attr = { 332 .dcs_when_free = (struct ibv_counter_set *) 333 cnt->dcs_when_active, 334 .query_flags = IBV_COUNTER_SET_FORCE_UPDATE, 335 }; 336 struct ibv_counter_set_data query_out = { 337 .out = counters, 338 .outlen = 2 * sizeof(uint64_t), 339 }; 340 int err = mlx5_glue->query_counter_set(&query_cs_attr, 341 &query_out); 342 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 343 int err = mlx5_glue->query_counters 344 ((struct ibv_counters *)cnt->dcs_when_active, counters, 345 RTE_DIM(counters), 346 IBV_READ_COUNTERS_ATTR_PREFER_CACHED); 347 #endif 348 if (err) 349 return rte_flow_error_set 350 (error, err, 351 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 352 NULL, 353 "cannot read counter"); 354 qc->hits_set = 1; 355 qc->bytes_set = 1; 356 qc->hits = counters[0] - cnt->hits; 357 qc->bytes = counters[1] - cnt->bytes; 358 if (qc->reset) { 359 cnt->hits = counters[0]; 360 cnt->bytes = counters[1]; 361 } 362 return 0; 363 } 364 return rte_flow_error_set(error, EINVAL, 365 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 366 NULL, 367 "flow does not have counter"); 368 #else 369 (void)flow; 370 (void)data; 371 return rte_flow_error_set(error, ENOTSUP, 372 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 373 NULL, 374 "counters are not available"); 375 #endif 376 } 377 378 /** 379 * Add a verbs item specification into @p verbs. 380 * 381 * @param[out] verbs 382 * Pointer to verbs structure. 383 * @param[in] src 384 * Create specification. 385 * @param[in] size 386 * Size in bytes of the specification to copy. 387 */ 388 static void 389 flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs, 390 void *src, unsigned int size) 391 { 392 void *dst; 393 394 if (!verbs) 395 return; 396 MLX5_ASSERT(verbs->specs); 397 dst = (void *)(verbs->specs + verbs->size); 398 memcpy(dst, src, size); 399 ++verbs->attr.num_of_specs; 400 verbs->size += size; 401 } 402 403 /** 404 * Convert the @p item into a Verbs specification. This function assumes that 405 * the input is valid and that there is space to insert the requested item 406 * into the flow. 407 * 408 * @param[in, out] dev_flow 409 * Pointer to dev_flow structure. 410 * @param[in] item 411 * Item specification. 412 * @param[in] item_flags 413 * Parsed item flags. 414 */ 415 static void 416 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow, 417 const struct rte_flow_item *item, 418 uint64_t item_flags) 419 { 420 const struct rte_flow_item_eth *spec = item->spec; 421 const struct rte_flow_item_eth *mask = item->mask; 422 const unsigned int size = sizeof(struct ibv_flow_spec_eth); 423 struct ibv_flow_spec_eth eth = { 424 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags), 425 .size = size, 426 }; 427 428 if (!mask) 429 mask = &rte_flow_item_eth_mask; 430 if (spec) { 431 unsigned int i; 432 433 memcpy(ð.val.dst_mac, spec->dst.addr_bytes, 434 RTE_ETHER_ADDR_LEN); 435 memcpy(ð.val.src_mac, spec->src.addr_bytes, 436 RTE_ETHER_ADDR_LEN); 437 eth.val.ether_type = spec->type; 438 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, 439 RTE_ETHER_ADDR_LEN); 440 memcpy(ð.mask.src_mac, mask->src.addr_bytes, 441 RTE_ETHER_ADDR_LEN); 442 eth.mask.ether_type = mask->type; 443 /* Remove unwanted bits from values. */ 444 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) { 445 eth.val.dst_mac[i] &= eth.mask.dst_mac[i]; 446 eth.val.src_mac[i] &= eth.mask.src_mac[i]; 447 } 448 eth.val.ether_type &= eth.mask.ether_type; 449 } 450 flow_verbs_spec_add(&dev_flow->verbs, ð, size); 451 } 452 453 /** 454 * Update the VLAN tag in the Verbs Ethernet specification. 455 * This function assumes that the input is valid and there is space to add 456 * the requested item. 457 * 458 * @param[in, out] attr 459 * Pointer to Verbs attributes structure. 460 * @param[in] eth 461 * Verbs structure containing the VLAN information to copy. 462 */ 463 static void 464 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr, 465 struct ibv_flow_spec_eth *eth) 466 { 467 unsigned int i; 468 const enum ibv_flow_spec_type search = eth->type; 469 struct ibv_spec_header *hdr = (struct ibv_spec_header *) 470 ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); 471 472 for (i = 0; i != attr->num_of_specs; ++i) { 473 if (hdr->type == search) { 474 struct ibv_flow_spec_eth *e = 475 (struct ibv_flow_spec_eth *)hdr; 476 477 e->val.vlan_tag = eth->val.vlan_tag; 478 e->mask.vlan_tag = eth->mask.vlan_tag; 479 e->val.ether_type = eth->val.ether_type; 480 e->mask.ether_type = eth->mask.ether_type; 481 break; 482 } 483 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size); 484 } 485 } 486 487 /** 488 * Convert the @p item into a Verbs specification. This function assumes that 489 * the input is valid and that there is space to insert the requested item 490 * into the flow. 491 * 492 * @param[in, out] dev_flow 493 * Pointer to dev_flow structure. 494 * @param[in] item 495 * Item specification. 496 * @param[in] item_flags 497 * Parsed item flags. 498 */ 499 static void 500 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow, 501 const struct rte_flow_item *item, 502 uint64_t item_flags) 503 { 504 const struct rte_flow_item_vlan *spec = item->spec; 505 const struct rte_flow_item_vlan *mask = item->mask; 506 unsigned int size = sizeof(struct ibv_flow_spec_eth); 507 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 508 struct ibv_flow_spec_eth eth = { 509 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags), 510 .size = size, 511 }; 512 const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 513 MLX5_FLOW_LAYER_OUTER_L2; 514 515 if (!mask) 516 mask = &rte_flow_item_vlan_mask; 517 if (spec) { 518 eth.val.vlan_tag = spec->tci; 519 eth.mask.vlan_tag = mask->tci; 520 eth.val.vlan_tag &= eth.mask.vlan_tag; 521 eth.val.ether_type = spec->inner_type; 522 eth.mask.ether_type = mask->inner_type; 523 eth.val.ether_type &= eth.mask.ether_type; 524 } 525 if (!(item_flags & l2m)) 526 flow_verbs_spec_add(&dev_flow->verbs, ð, size); 527 else 528 flow_verbs_item_vlan_update(&dev_flow->verbs.attr, ð); 529 if (!tunnel) 530 dev_flow->handle->vf_vlan.tag = 531 rte_be_to_cpu_16(spec->tci) & 0x0fff; 532 } 533 534 /** 535 * Convert the @p item into a Verbs specification. This function assumes that 536 * the input is valid and that there is space to insert the requested item 537 * into the flow. 538 * 539 * @param[in, out] dev_flow 540 * Pointer to dev_flow structure. 541 * @param[in] item 542 * Item specification. 543 * @param[in] item_flags 544 * Parsed item flags. 545 */ 546 static void 547 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow, 548 const struct rte_flow_item *item, 549 uint64_t item_flags) 550 { 551 const struct rte_flow_item_ipv4 *spec = item->spec; 552 const struct rte_flow_item_ipv4 *mask = item->mask; 553 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext); 554 struct ibv_flow_spec_ipv4_ext ipv4 = { 555 .type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags), 556 .size = size, 557 }; 558 559 if (!mask) 560 mask = &rte_flow_item_ipv4_mask; 561 if (spec) { 562 ipv4.val = (struct ibv_flow_ipv4_ext_filter){ 563 .src_ip = spec->hdr.src_addr, 564 .dst_ip = spec->hdr.dst_addr, 565 .proto = spec->hdr.next_proto_id, 566 .tos = spec->hdr.type_of_service, 567 }; 568 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){ 569 .src_ip = mask->hdr.src_addr, 570 .dst_ip = mask->hdr.dst_addr, 571 .proto = mask->hdr.next_proto_id, 572 .tos = mask->hdr.type_of_service, 573 }; 574 /* Remove unwanted bits from values. */ 575 ipv4.val.src_ip &= ipv4.mask.src_ip; 576 ipv4.val.dst_ip &= ipv4.mask.dst_ip; 577 ipv4.val.proto &= ipv4.mask.proto; 578 ipv4.val.tos &= ipv4.mask.tos; 579 } 580 flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size); 581 } 582 583 /** 584 * Convert the @p item into a Verbs specification. This function assumes that 585 * the input is valid and that there is space to insert the requested item 586 * into the flow. 587 * 588 * @param[in, out] dev_flow 589 * Pointer to dev_flow structure. 590 * @param[in] item 591 * Item specification. 592 * @param[in] item_flags 593 * Parsed item flags. 594 */ 595 static void 596 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow, 597 const struct rte_flow_item *item, 598 uint64_t item_flags) 599 { 600 const struct rte_flow_item_ipv6 *spec = item->spec; 601 const struct rte_flow_item_ipv6 *mask = item->mask; 602 unsigned int size = sizeof(struct ibv_flow_spec_ipv6); 603 struct ibv_flow_spec_ipv6 ipv6 = { 604 .type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags), 605 .size = size, 606 }; 607 608 if (!mask) 609 mask = &rte_flow_item_ipv6_mask; 610 if (spec) { 611 unsigned int i; 612 uint32_t vtc_flow_val; 613 uint32_t vtc_flow_mask; 614 615 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr, 616 RTE_DIM(ipv6.val.src_ip)); 617 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr, 618 RTE_DIM(ipv6.val.dst_ip)); 619 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr, 620 RTE_DIM(ipv6.mask.src_ip)); 621 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr, 622 RTE_DIM(ipv6.mask.dst_ip)); 623 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow); 624 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow); 625 ipv6.val.flow_label = 626 rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >> 627 RTE_IPV6_HDR_FL_SHIFT); 628 ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >> 629 RTE_IPV6_HDR_TC_SHIFT; 630 ipv6.val.next_hdr = spec->hdr.proto; 631 ipv6.mask.flow_label = 632 rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >> 633 RTE_IPV6_HDR_FL_SHIFT); 634 ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >> 635 RTE_IPV6_HDR_TC_SHIFT; 636 ipv6.mask.next_hdr = mask->hdr.proto; 637 /* Remove unwanted bits from values. */ 638 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) { 639 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i]; 640 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i]; 641 } 642 ipv6.val.flow_label &= ipv6.mask.flow_label; 643 ipv6.val.traffic_class &= ipv6.mask.traffic_class; 644 ipv6.val.next_hdr &= ipv6.mask.next_hdr; 645 } 646 flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size); 647 } 648 649 /** 650 * Convert the @p item into a Verbs specification. This function assumes that 651 * the input is valid and that there is space to insert the requested item 652 * into the flow. 653 * 654 * @param[in, out] dev_flow 655 * Pointer to dev_flow structure. 656 * @param[in] item 657 * Item specification. 658 * @param[in] item_flags 659 * Parsed item flags. 660 */ 661 static void 662 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow, 663 const struct rte_flow_item *item, 664 uint64_t item_flags __rte_unused) 665 { 666 const struct rte_flow_item_tcp *spec = item->spec; 667 const struct rte_flow_item_tcp *mask = item->mask; 668 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); 669 struct ibv_flow_spec_tcp_udp tcp = { 670 .type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags), 671 .size = size, 672 }; 673 674 if (!mask) 675 mask = &rte_flow_item_tcp_mask; 676 if (spec) { 677 tcp.val.dst_port = spec->hdr.dst_port; 678 tcp.val.src_port = spec->hdr.src_port; 679 tcp.mask.dst_port = mask->hdr.dst_port; 680 tcp.mask.src_port = mask->hdr.src_port; 681 /* Remove unwanted bits from values. */ 682 tcp.val.src_port &= tcp.mask.src_port; 683 tcp.val.dst_port &= tcp.mask.dst_port; 684 } 685 flow_verbs_spec_add(&dev_flow->verbs, &tcp, size); 686 } 687 688 /** 689 * Convert the @p item into a Verbs specification. This function assumes that 690 * the input is valid and that there is space to insert the requested item 691 * into the flow. 692 * 693 * @param[in, out] dev_flow 694 * Pointer to dev_flow structure. 695 * @param[in] item 696 * Item specification. 697 * @param[in] item_flags 698 * Parsed item flags. 699 */ 700 static void 701 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow, 702 const struct rte_flow_item *item, 703 uint64_t item_flags __rte_unused) 704 { 705 const struct rte_flow_item_udp *spec = item->spec; 706 const struct rte_flow_item_udp *mask = item->mask; 707 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); 708 struct ibv_flow_spec_tcp_udp udp = { 709 .type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags), 710 .size = size, 711 }; 712 713 if (!mask) 714 mask = &rte_flow_item_udp_mask; 715 if (spec) { 716 udp.val.dst_port = spec->hdr.dst_port; 717 udp.val.src_port = spec->hdr.src_port; 718 udp.mask.dst_port = mask->hdr.dst_port; 719 udp.mask.src_port = mask->hdr.src_port; 720 /* Remove unwanted bits from values. */ 721 udp.val.src_port &= udp.mask.src_port; 722 udp.val.dst_port &= udp.mask.dst_port; 723 } 724 item++; 725 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) 726 item++; 727 if (!(udp.val.dst_port & udp.mask.dst_port)) { 728 switch ((item)->type) { 729 case RTE_FLOW_ITEM_TYPE_VXLAN: 730 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN); 731 udp.mask.dst_port = 0xffff; 732 break; 733 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 734 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE); 735 udp.mask.dst_port = 0xffff; 736 break; 737 case RTE_FLOW_ITEM_TYPE_MPLS: 738 udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS); 739 udp.mask.dst_port = 0xffff; 740 break; 741 default: 742 break; 743 } 744 } 745 746 flow_verbs_spec_add(&dev_flow->verbs, &udp, size); 747 } 748 749 /** 750 * Convert the @p item into a Verbs specification. This function assumes that 751 * the input is valid and that there is space to insert the requested item 752 * into the flow. 753 * 754 * @param[in, out] dev_flow 755 * Pointer to dev_flow structure. 756 * @param[in] item 757 * Item specification. 758 * @param[in] item_flags 759 * Parsed item flags. 760 */ 761 static void 762 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow, 763 const struct rte_flow_item *item, 764 uint64_t item_flags __rte_unused) 765 { 766 const struct rte_flow_item_vxlan *spec = item->spec; 767 const struct rte_flow_item_vxlan *mask = item->mask; 768 unsigned int size = sizeof(struct ibv_flow_spec_tunnel); 769 struct ibv_flow_spec_tunnel vxlan = { 770 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, 771 .size = size, 772 }; 773 union vni { 774 uint32_t vlan_id; 775 uint8_t vni[4]; 776 } id = { .vlan_id = 0, }; 777 778 if (!mask) 779 mask = &rte_flow_item_vxlan_mask; 780 if (spec) { 781 memcpy(&id.vni[1], spec->vni, 3); 782 vxlan.val.tunnel_id = id.vlan_id; 783 memcpy(&id.vni[1], mask->vni, 3); 784 vxlan.mask.tunnel_id = id.vlan_id; 785 /* Remove unwanted bits from values. */ 786 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id; 787 } 788 flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size); 789 } 790 791 /** 792 * Convert the @p item into a Verbs specification. This function assumes that 793 * the input is valid and that there is space to insert the requested item 794 * into the flow. 795 * 796 * @param[in, out] dev_flow 797 * Pointer to dev_flow structure. 798 * @param[in] item 799 * Item specification. 800 * @param[in] item_flags 801 * Parsed item flags. 802 */ 803 static void 804 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow, 805 const struct rte_flow_item *item, 806 uint64_t item_flags __rte_unused) 807 { 808 const struct rte_flow_item_vxlan_gpe *spec = item->spec; 809 const struct rte_flow_item_vxlan_gpe *mask = item->mask; 810 unsigned int size = sizeof(struct ibv_flow_spec_tunnel); 811 struct ibv_flow_spec_tunnel vxlan_gpe = { 812 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, 813 .size = size, 814 }; 815 union vni { 816 uint32_t vlan_id; 817 uint8_t vni[4]; 818 } id = { .vlan_id = 0, }; 819 820 if (!mask) 821 mask = &rte_flow_item_vxlan_gpe_mask; 822 if (spec) { 823 memcpy(&id.vni[1], spec->vni, 3); 824 vxlan_gpe.val.tunnel_id = id.vlan_id; 825 memcpy(&id.vni[1], mask->vni, 3); 826 vxlan_gpe.mask.tunnel_id = id.vlan_id; 827 /* Remove unwanted bits from values. */ 828 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id; 829 } 830 flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size); 831 } 832 833 /** 834 * Update the protocol in Verbs IPv4/IPv6 spec. 835 * 836 * @param[in, out] attr 837 * Pointer to Verbs attributes structure. 838 * @param[in] search 839 * Specification type to search in order to update the IP protocol. 840 * @param[in] protocol 841 * Protocol value to set if none is present in the specification. 842 */ 843 static void 844 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr, 845 enum ibv_flow_spec_type search, 846 uint8_t protocol) 847 { 848 unsigned int i; 849 struct ibv_spec_header *hdr = (struct ibv_spec_header *) 850 ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); 851 852 if (!attr) 853 return; 854 for (i = 0; i != attr->num_of_specs; ++i) { 855 if (hdr->type == search) { 856 union { 857 struct ibv_flow_spec_ipv4_ext *ipv4; 858 struct ibv_flow_spec_ipv6 *ipv6; 859 } ip; 860 861 switch (search) { 862 case IBV_FLOW_SPEC_IPV4_EXT: 863 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr; 864 if (!ip.ipv4->val.proto) { 865 ip.ipv4->val.proto = protocol; 866 ip.ipv4->mask.proto = 0xff; 867 } 868 break; 869 case IBV_FLOW_SPEC_IPV6: 870 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr; 871 if (!ip.ipv6->val.next_hdr) { 872 ip.ipv6->val.next_hdr = protocol; 873 ip.ipv6->mask.next_hdr = 0xff; 874 } 875 break; 876 default: 877 break; 878 } 879 break; 880 } 881 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size); 882 } 883 } 884 885 /** 886 * Convert the @p item into a Verbs specification. This function assumes that 887 * the input is valid and that there is space to insert the requested item 888 * into the flow. 889 * 890 * @param[in, out] dev_flow 891 * Pointer to dev_flow structure. 892 * @param[in] item 893 * Item specification. 894 * @param[in] item_flags 895 * Parsed item flags. 896 */ 897 static void 898 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow, 899 const struct rte_flow_item *item __rte_unused, 900 uint64_t item_flags) 901 { 902 struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs; 903 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT 904 unsigned int size = sizeof(struct ibv_flow_spec_tunnel); 905 struct ibv_flow_spec_tunnel tunnel = { 906 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, 907 .size = size, 908 }; 909 #else 910 const struct rte_flow_item_gre *spec = item->spec; 911 const struct rte_flow_item_gre *mask = item->mask; 912 unsigned int size = sizeof(struct ibv_flow_spec_gre); 913 struct ibv_flow_spec_gre tunnel = { 914 .type = IBV_FLOW_SPEC_GRE, 915 .size = size, 916 }; 917 918 if (!mask) 919 mask = &rte_flow_item_gre_mask; 920 if (spec) { 921 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver; 922 tunnel.val.protocol = spec->protocol; 923 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver; 924 tunnel.mask.protocol = mask->protocol; 925 /* Remove unwanted bits from values. */ 926 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver; 927 tunnel.val.protocol &= tunnel.mask.protocol; 928 tunnel.val.key &= tunnel.mask.key; 929 } 930 #endif 931 if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) 932 flow_verbs_item_gre_ip_protocol_update(&verbs->attr, 933 IBV_FLOW_SPEC_IPV4_EXT, 934 IPPROTO_GRE); 935 else 936 flow_verbs_item_gre_ip_protocol_update(&verbs->attr, 937 IBV_FLOW_SPEC_IPV6, 938 IPPROTO_GRE); 939 flow_verbs_spec_add(verbs, &tunnel, size); 940 } 941 942 /** 943 * Convert the @p action into a Verbs specification. This function assumes that 944 * the input is valid and that there is space to insert the requested action 945 * into the flow. This function also return the action that was added. 946 * 947 * @param[in, out] dev_flow 948 * Pointer to dev_flow structure. 949 * @param[in] item 950 * Item specification. 951 * @param[in] item_flags 952 * Parsed item flags. 953 */ 954 static void 955 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused, 956 const struct rte_flow_item *item __rte_unused, 957 uint64_t item_flags __rte_unused) 958 { 959 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 960 const struct rte_flow_item_mpls *spec = item->spec; 961 const struct rte_flow_item_mpls *mask = item->mask; 962 unsigned int size = sizeof(struct ibv_flow_spec_mpls); 963 struct ibv_flow_spec_mpls mpls = { 964 .type = IBV_FLOW_SPEC_MPLS, 965 .size = size, 966 }; 967 968 if (!mask) 969 mask = &rte_flow_item_mpls_mask; 970 if (spec) { 971 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label)); 972 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label)); 973 /* Remove unwanted bits from values. */ 974 mpls.val.label &= mpls.mask.label; 975 } 976 flow_verbs_spec_add(&dev_flow->verbs, &mpls, size); 977 #endif 978 } 979 980 /** 981 * Convert the @p action into a Verbs specification. This function assumes that 982 * the input is valid and that there is space to insert the requested action 983 * into the flow. 984 * 985 * @param[in] dev_flow 986 * Pointer to mlx5_flow. 987 * @param[in] action 988 * Action configuration. 989 */ 990 static void 991 flow_verbs_translate_action_drop 992 (struct mlx5_flow *dev_flow, 993 const struct rte_flow_action *action __rte_unused) 994 { 995 unsigned int size = sizeof(struct ibv_flow_spec_action_drop); 996 struct ibv_flow_spec_action_drop drop = { 997 .type = IBV_FLOW_SPEC_ACTION_DROP, 998 .size = size, 999 }; 1000 1001 flow_verbs_spec_add(&dev_flow->verbs, &drop, size); 1002 } 1003 1004 /** 1005 * Convert the @p action into a Verbs specification. This function assumes that 1006 * the input is valid and that there is space to insert the requested action 1007 * into the flow. 1008 * 1009 * @param[in] rss_desc 1010 * Pointer to mlx5_flow_rss_desc. 1011 * @param[in] action 1012 * Action configuration. 1013 */ 1014 static void 1015 flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc, 1016 const struct rte_flow_action *action) 1017 { 1018 const struct rte_flow_action_queue *queue = action->conf; 1019 1020 rss_desc->queue[0] = queue->index; 1021 rss_desc->queue_num = 1; 1022 } 1023 1024 /** 1025 * Convert the @p action into a Verbs specification. This function assumes that 1026 * the input is valid and that there is space to insert the requested action 1027 * into the flow. 1028 * 1029 * @param[in] rss_desc 1030 * Pointer to mlx5_flow_rss_desc. 1031 * @param[in] action 1032 * Action configuration. 1033 */ 1034 static void 1035 flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc, 1036 const struct rte_flow_action *action) 1037 { 1038 const struct rte_flow_action_rss *rss = action->conf; 1039 const uint8_t *rss_key; 1040 1041 memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t)); 1042 rss_desc->queue_num = rss->queue_num; 1043 /* NULL RSS key indicates default RSS key. */ 1044 rss_key = !rss->key ? rss_hash_default_key : rss->key; 1045 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN); 1046 /* 1047 * rss->level and rss.types should be set in advance when expanding 1048 * items for RSS. 1049 */ 1050 } 1051 1052 /** 1053 * Convert the @p action into a Verbs specification. This function assumes that 1054 * the input is valid and that there is space to insert the requested action 1055 * into the flow. 1056 * 1057 * @param[in] dev_flow 1058 * Pointer to mlx5_flow. 1059 * @param[in] action 1060 * Action configuration. 1061 */ 1062 static void 1063 flow_verbs_translate_action_flag 1064 (struct mlx5_flow *dev_flow, 1065 const struct rte_flow_action *action __rte_unused) 1066 { 1067 unsigned int size = sizeof(struct ibv_flow_spec_action_tag); 1068 struct ibv_flow_spec_action_tag tag = { 1069 .type = IBV_FLOW_SPEC_ACTION_TAG, 1070 .size = size, 1071 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT), 1072 }; 1073 1074 flow_verbs_spec_add(&dev_flow->verbs, &tag, size); 1075 } 1076 1077 /** 1078 * Convert the @p action into a Verbs specification. This function assumes that 1079 * the input is valid and that there is space to insert the requested action 1080 * into the flow. 1081 * 1082 * @param[in] dev_flow 1083 * Pointer to mlx5_flow. 1084 * @param[in] action 1085 * Action configuration. 1086 */ 1087 static void 1088 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow, 1089 const struct rte_flow_action *action) 1090 { 1091 const struct rte_flow_action_mark *mark = action->conf; 1092 unsigned int size = sizeof(struct ibv_flow_spec_action_tag); 1093 struct ibv_flow_spec_action_tag tag = { 1094 .type = IBV_FLOW_SPEC_ACTION_TAG, 1095 .size = size, 1096 .tag_id = mlx5_flow_mark_set(mark->id), 1097 }; 1098 1099 flow_verbs_spec_add(&dev_flow->verbs, &tag, size); 1100 } 1101 1102 /** 1103 * Convert the @p action into a Verbs specification. This function assumes that 1104 * the input is valid and that there is space to insert the requested action 1105 * into the flow. 1106 * 1107 * @param[in] dev 1108 * Pointer to the Ethernet device structure. 1109 * @param[in] action 1110 * Action configuration. 1111 * @param[in] dev_flow 1112 * Pointer to mlx5_flow. 1113 * @param[out] error 1114 * Pointer to error structure. 1115 * 1116 * @return 1117 * 0 On success else a negative errno value is returned and rte_errno is set. 1118 */ 1119 static int 1120 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow, 1121 const struct rte_flow_action *action, 1122 struct rte_eth_dev *dev, 1123 struct rte_flow_error *error) 1124 { 1125 const struct rte_flow_action_count *count = action->conf; 1126 struct rte_flow *flow = dev_flow->flow; 1127 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ 1128 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 1129 struct mlx5_flow_counter_pool *pool; 1130 struct mlx5_flow_counter *cnt = NULL; 1131 unsigned int size = sizeof(struct ibv_flow_spec_counter_action); 1132 struct ibv_flow_spec_counter_action counter = { 1133 .type = IBV_FLOW_SPEC_ACTION_COUNT, 1134 .size = size, 1135 }; 1136 #endif 1137 1138 if (!flow->counter) { 1139 flow->counter = flow_verbs_counter_new(dev, count->id); 1140 if (!flow->counter) 1141 return rte_flow_error_set(error, rte_errno, 1142 RTE_FLOW_ERROR_TYPE_ACTION, 1143 action, 1144 "cannot get counter" 1145 " context."); 1146 } 1147 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) 1148 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool); 1149 counter.counter_set_handle = 1150 ((struct ibv_counter_set *)cnt->dcs_when_active)->handle; 1151 flow_verbs_spec_add(&dev_flow->verbs, &counter, size); 1152 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 1153 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool); 1154 counter.counters = (struct ibv_counters *)cnt->dcs_when_active; 1155 flow_verbs_spec_add(&dev_flow->verbs, &counter, size); 1156 #endif 1157 return 0; 1158 } 1159 1160 /** 1161 * Internal validation function. For validating both actions and items. 1162 * 1163 * @param[in] dev 1164 * Pointer to the Ethernet device structure. 1165 * @param[in] attr 1166 * Pointer to the flow attributes. 1167 * @param[in] items 1168 * Pointer to the list of items. 1169 * @param[in] actions 1170 * Pointer to the list of actions. 1171 * @param[in] external 1172 * This flow rule is created by request external to PMD. 1173 * @param[in] hairpin 1174 * Number of hairpin TX actions, 0 means classic flow. 1175 * @param[out] error 1176 * Pointer to the error structure. 1177 * 1178 * @return 1179 * 0 on success, a negative errno value otherwise and rte_errno is set. 1180 */ 1181 static int 1182 flow_verbs_validate(struct rte_eth_dev *dev, 1183 const struct rte_flow_attr *attr, 1184 const struct rte_flow_item items[], 1185 const struct rte_flow_action actions[], 1186 bool external __rte_unused, 1187 int hairpin __rte_unused, 1188 struct rte_flow_error *error) 1189 { 1190 int ret; 1191 uint64_t action_flags = 0; 1192 uint64_t item_flags = 0; 1193 uint64_t last_item = 0; 1194 uint8_t next_protocol = 0xff; 1195 uint16_t ether_type = 0; 1196 bool is_empty_vlan = false; 1197 uint16_t udp_dport = 0; 1198 1199 if (items == NULL) 1200 return -1; 1201 ret = mlx5_flow_validate_attributes(dev, attr, error); 1202 if (ret < 0) 1203 return ret; 1204 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 1205 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1206 int ret = 0; 1207 1208 switch (items->type) { 1209 case RTE_FLOW_ITEM_TYPE_VOID: 1210 break; 1211 case RTE_FLOW_ITEM_TYPE_ETH: 1212 ret = mlx5_flow_validate_item_eth(items, item_flags, 1213 false, error); 1214 if (ret < 0) 1215 return ret; 1216 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 1217 MLX5_FLOW_LAYER_OUTER_L2; 1218 if (items->mask != NULL && items->spec != NULL) { 1219 ether_type = 1220 ((const struct rte_flow_item_eth *) 1221 items->spec)->type; 1222 ether_type &= 1223 ((const struct rte_flow_item_eth *) 1224 items->mask)->type; 1225 if (ether_type == RTE_BE16(RTE_ETHER_TYPE_VLAN)) 1226 is_empty_vlan = true; 1227 ether_type = rte_be_to_cpu_16(ether_type); 1228 } else { 1229 ether_type = 0; 1230 } 1231 break; 1232 case RTE_FLOW_ITEM_TYPE_VLAN: 1233 ret = mlx5_flow_validate_item_vlan(items, item_flags, 1234 dev, error); 1235 if (ret < 0) 1236 return ret; 1237 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | 1238 MLX5_FLOW_LAYER_INNER_VLAN) : 1239 (MLX5_FLOW_LAYER_OUTER_L2 | 1240 MLX5_FLOW_LAYER_OUTER_VLAN); 1241 if (items->mask != NULL && items->spec != NULL) { 1242 ether_type = 1243 ((const struct rte_flow_item_vlan *) 1244 items->spec)->inner_type; 1245 ether_type &= 1246 ((const struct rte_flow_item_vlan *) 1247 items->mask)->inner_type; 1248 ether_type = rte_be_to_cpu_16(ether_type); 1249 } else { 1250 ether_type = 0; 1251 } 1252 is_empty_vlan = false; 1253 break; 1254 case RTE_FLOW_ITEM_TYPE_IPV4: 1255 ret = mlx5_flow_validate_item_ipv4 1256 (items, item_flags, 1257 last_item, ether_type, NULL, 1258 MLX5_ITEM_RANGE_NOT_ACCEPTED, 1259 error); 1260 if (ret < 0) 1261 return ret; 1262 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 1263 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 1264 if (items->mask != NULL && 1265 ((const struct rte_flow_item_ipv4 *) 1266 items->mask)->hdr.next_proto_id) { 1267 next_protocol = 1268 ((const struct rte_flow_item_ipv4 *) 1269 (items->spec))->hdr.next_proto_id; 1270 next_protocol &= 1271 ((const struct rte_flow_item_ipv4 *) 1272 (items->mask))->hdr.next_proto_id; 1273 } else { 1274 /* Reset for inner layer. */ 1275 next_protocol = 0xff; 1276 } 1277 break; 1278 case RTE_FLOW_ITEM_TYPE_IPV6: 1279 ret = mlx5_flow_validate_item_ipv6(items, item_flags, 1280 last_item, 1281 ether_type, NULL, 1282 error); 1283 if (ret < 0) 1284 return ret; 1285 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 1286 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 1287 if (items->mask != NULL && 1288 ((const struct rte_flow_item_ipv6 *) 1289 items->mask)->hdr.proto) { 1290 next_protocol = 1291 ((const struct rte_flow_item_ipv6 *) 1292 items->spec)->hdr.proto; 1293 next_protocol &= 1294 ((const struct rte_flow_item_ipv6 *) 1295 items->mask)->hdr.proto; 1296 } else { 1297 /* Reset for inner layer. */ 1298 next_protocol = 0xff; 1299 } 1300 break; 1301 case RTE_FLOW_ITEM_TYPE_UDP: 1302 ret = mlx5_flow_validate_item_udp(items, item_flags, 1303 next_protocol, 1304 error); 1305 const struct rte_flow_item_udp *spec = items->spec; 1306 const struct rte_flow_item_udp *mask = items->mask; 1307 if (!mask) 1308 mask = &rte_flow_item_udp_mask; 1309 if (spec != NULL) 1310 udp_dport = rte_be_to_cpu_16 1311 (spec->hdr.dst_port & 1312 mask->hdr.dst_port); 1313 1314 if (ret < 0) 1315 return ret; 1316 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : 1317 MLX5_FLOW_LAYER_OUTER_L4_UDP; 1318 break; 1319 case RTE_FLOW_ITEM_TYPE_TCP: 1320 ret = mlx5_flow_validate_item_tcp 1321 (items, item_flags, 1322 next_protocol, 1323 &rte_flow_item_tcp_mask, 1324 error); 1325 if (ret < 0) 1326 return ret; 1327 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : 1328 MLX5_FLOW_LAYER_OUTER_L4_TCP; 1329 break; 1330 case RTE_FLOW_ITEM_TYPE_VXLAN: 1331 ret = mlx5_flow_validate_item_vxlan(dev, udp_dport, 1332 items, item_flags, 1333 attr, error); 1334 if (ret < 0) 1335 return ret; 1336 last_item = MLX5_FLOW_LAYER_VXLAN; 1337 break; 1338 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 1339 ret = mlx5_flow_validate_item_vxlan_gpe(items, 1340 item_flags, 1341 dev, error); 1342 if (ret < 0) 1343 return ret; 1344 last_item = MLX5_FLOW_LAYER_VXLAN_GPE; 1345 break; 1346 case RTE_FLOW_ITEM_TYPE_GRE: 1347 ret = mlx5_flow_validate_item_gre(items, item_flags, 1348 next_protocol, error); 1349 if (ret < 0) 1350 return ret; 1351 last_item = MLX5_FLOW_LAYER_GRE; 1352 break; 1353 case RTE_FLOW_ITEM_TYPE_MPLS: 1354 ret = mlx5_flow_validate_item_mpls(dev, items, 1355 item_flags, 1356 last_item, error); 1357 if (ret < 0) 1358 return ret; 1359 last_item = MLX5_FLOW_LAYER_MPLS; 1360 break; 1361 case RTE_FLOW_ITEM_TYPE_ICMP: 1362 case RTE_FLOW_ITEM_TYPE_ICMP6: 1363 return rte_flow_error_set(error, ENOTSUP, 1364 RTE_FLOW_ERROR_TYPE_ITEM, 1365 NULL, "ICMP/ICMP6 " 1366 "item not supported"); 1367 default: 1368 return rte_flow_error_set(error, ENOTSUP, 1369 RTE_FLOW_ERROR_TYPE_ITEM, 1370 NULL, "item not supported"); 1371 } 1372 item_flags |= last_item; 1373 } 1374 if (is_empty_vlan) 1375 return rte_flow_error_set(error, ENOTSUP, 1376 RTE_FLOW_ERROR_TYPE_ITEM, NULL, 1377 "VLAN matching without vid specification is not supported"); 1378 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1379 switch (actions->type) { 1380 case RTE_FLOW_ACTION_TYPE_VOID: 1381 break; 1382 case RTE_FLOW_ACTION_TYPE_FLAG: 1383 ret = mlx5_flow_validate_action_flag(action_flags, 1384 attr, 1385 error); 1386 if (ret < 0) 1387 return ret; 1388 action_flags |= MLX5_FLOW_ACTION_FLAG; 1389 break; 1390 case RTE_FLOW_ACTION_TYPE_MARK: 1391 ret = mlx5_flow_validate_action_mark(actions, 1392 action_flags, 1393 attr, 1394 error); 1395 if (ret < 0) 1396 return ret; 1397 action_flags |= MLX5_FLOW_ACTION_MARK; 1398 break; 1399 case RTE_FLOW_ACTION_TYPE_DROP: 1400 ret = mlx5_flow_validate_action_drop(action_flags, 1401 attr, 1402 error); 1403 if (ret < 0) 1404 return ret; 1405 action_flags |= MLX5_FLOW_ACTION_DROP; 1406 break; 1407 case RTE_FLOW_ACTION_TYPE_QUEUE: 1408 ret = mlx5_flow_validate_action_queue(actions, 1409 action_flags, dev, 1410 attr, 1411 error); 1412 if (ret < 0) 1413 return ret; 1414 action_flags |= MLX5_FLOW_ACTION_QUEUE; 1415 break; 1416 case RTE_FLOW_ACTION_TYPE_RSS: 1417 ret = mlx5_flow_validate_action_rss(actions, 1418 action_flags, dev, 1419 attr, item_flags, 1420 error); 1421 if (ret < 0) 1422 return ret; 1423 action_flags |= MLX5_FLOW_ACTION_RSS; 1424 break; 1425 case RTE_FLOW_ACTION_TYPE_COUNT: 1426 ret = mlx5_flow_validate_action_count(dev, attr, error); 1427 if (ret < 0) 1428 return ret; 1429 action_flags |= MLX5_FLOW_ACTION_COUNT; 1430 break; 1431 default: 1432 return rte_flow_error_set(error, ENOTSUP, 1433 RTE_FLOW_ERROR_TYPE_ACTION, 1434 actions, 1435 "action not supported"); 1436 } 1437 } 1438 /* 1439 * Validate the drop action mutual exclusion with other actions. 1440 * Drop action is mutually-exclusive with any other action, except for 1441 * Count action. 1442 */ 1443 if ((action_flags & MLX5_FLOW_ACTION_DROP) && 1444 (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT))) 1445 return rte_flow_error_set(error, EINVAL, 1446 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1447 "Drop action is mutually-exclusive " 1448 "with any other action, except for " 1449 "Count action"); 1450 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS)) 1451 return rte_flow_error_set(error, EINVAL, 1452 RTE_FLOW_ERROR_TYPE_ACTION, actions, 1453 "no fate action is found"); 1454 return 0; 1455 } 1456 1457 /** 1458 * Calculate the required bytes that are needed for the action part of the verbs 1459 * flow. 1460 * 1461 * @param[in] actions 1462 * Pointer to the list of actions. 1463 * 1464 * @return 1465 * The size of the memory needed for all actions. 1466 */ 1467 static int 1468 flow_verbs_get_actions_size(const struct rte_flow_action actions[]) 1469 { 1470 int size = 0; 1471 1472 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1473 switch (actions->type) { 1474 case RTE_FLOW_ACTION_TYPE_VOID: 1475 break; 1476 case RTE_FLOW_ACTION_TYPE_FLAG: 1477 size += sizeof(struct ibv_flow_spec_action_tag); 1478 break; 1479 case RTE_FLOW_ACTION_TYPE_MARK: 1480 size += sizeof(struct ibv_flow_spec_action_tag); 1481 break; 1482 case RTE_FLOW_ACTION_TYPE_DROP: 1483 size += sizeof(struct ibv_flow_spec_action_drop); 1484 break; 1485 case RTE_FLOW_ACTION_TYPE_QUEUE: 1486 break; 1487 case RTE_FLOW_ACTION_TYPE_RSS: 1488 break; 1489 case RTE_FLOW_ACTION_TYPE_COUNT: 1490 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ 1491 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 1492 size += sizeof(struct ibv_flow_spec_counter_action); 1493 #endif 1494 break; 1495 default: 1496 break; 1497 } 1498 } 1499 return size; 1500 } 1501 1502 /** 1503 * Calculate the required bytes that are needed for the item part of the verbs 1504 * flow. 1505 * 1506 * @param[in] items 1507 * Pointer to the list of items. 1508 * 1509 * @return 1510 * The size of the memory needed for all items. 1511 */ 1512 static int 1513 flow_verbs_get_items_size(const struct rte_flow_item items[]) 1514 { 1515 int size = 0; 1516 1517 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 1518 switch (items->type) { 1519 case RTE_FLOW_ITEM_TYPE_VOID: 1520 break; 1521 case RTE_FLOW_ITEM_TYPE_ETH: 1522 size += sizeof(struct ibv_flow_spec_eth); 1523 break; 1524 case RTE_FLOW_ITEM_TYPE_VLAN: 1525 size += sizeof(struct ibv_flow_spec_eth); 1526 break; 1527 case RTE_FLOW_ITEM_TYPE_IPV4: 1528 size += sizeof(struct ibv_flow_spec_ipv4_ext); 1529 break; 1530 case RTE_FLOW_ITEM_TYPE_IPV6: 1531 size += sizeof(struct ibv_flow_spec_ipv6); 1532 break; 1533 case RTE_FLOW_ITEM_TYPE_UDP: 1534 size += sizeof(struct ibv_flow_spec_tcp_udp); 1535 break; 1536 case RTE_FLOW_ITEM_TYPE_TCP: 1537 size += sizeof(struct ibv_flow_spec_tcp_udp); 1538 break; 1539 case RTE_FLOW_ITEM_TYPE_VXLAN: 1540 size += sizeof(struct ibv_flow_spec_tunnel); 1541 break; 1542 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 1543 size += sizeof(struct ibv_flow_spec_tunnel); 1544 break; 1545 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 1546 case RTE_FLOW_ITEM_TYPE_GRE: 1547 size += sizeof(struct ibv_flow_spec_gre); 1548 break; 1549 case RTE_FLOW_ITEM_TYPE_MPLS: 1550 size += sizeof(struct ibv_flow_spec_mpls); 1551 break; 1552 #else 1553 case RTE_FLOW_ITEM_TYPE_GRE: 1554 size += sizeof(struct ibv_flow_spec_tunnel); 1555 break; 1556 #endif 1557 default: 1558 break; 1559 } 1560 } 1561 return size; 1562 } 1563 1564 /** 1565 * Internal preparation function. Allocate mlx5_flow with the required size. 1566 * The required size is calculate based on the actions and items. This function 1567 * also returns the detected actions and items for later use. 1568 * 1569 * @param[in] dev 1570 * Pointer to Ethernet device. 1571 * @param[in] attr 1572 * Pointer to the flow attributes. 1573 * @param[in] items 1574 * Pointer to the list of items. 1575 * @param[in] actions 1576 * Pointer to the list of actions. 1577 * @param[out] error 1578 * Pointer to the error structure. 1579 * 1580 * @return 1581 * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno 1582 * is set. 1583 */ 1584 static struct mlx5_flow * 1585 flow_verbs_prepare(struct rte_eth_dev *dev, 1586 const struct rte_flow_attr *attr __rte_unused, 1587 const struct rte_flow_item items[], 1588 const struct rte_flow_action actions[], 1589 struct rte_flow_error *error) 1590 { 1591 size_t size = 0; 1592 uint32_t handle_idx = 0; 1593 struct mlx5_flow *dev_flow; 1594 struct mlx5_flow_handle *dev_handle; 1595 struct mlx5_priv *priv = dev->data->dev_private; 1596 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 1597 1598 MLX5_ASSERT(wks); 1599 size += flow_verbs_get_actions_size(actions); 1600 size += flow_verbs_get_items_size(items); 1601 if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) { 1602 rte_flow_error_set(error, E2BIG, 1603 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1604 "Verbs spec/action size too large"); 1605 return NULL; 1606 } 1607 /* In case of corrupting the memory. */ 1608 if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { 1609 rte_flow_error_set(error, ENOSPC, 1610 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1611 "not free temporary device flow"); 1612 return NULL; 1613 } 1614 dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 1615 &handle_idx); 1616 if (!dev_handle) { 1617 rte_flow_error_set(error, ENOMEM, 1618 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1619 "not enough memory to create flow handle"); 1620 return NULL; 1621 } 1622 MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows)); 1623 dev_flow = &wks->flows[wks->flow_idx++]; 1624 dev_flow->handle = dev_handle; 1625 dev_flow->handle_idx = handle_idx; 1626 /* Memcpy is used, only size needs to be cleared to 0. */ 1627 dev_flow->verbs.size = 0; 1628 dev_flow->verbs.attr.num_of_specs = 0; 1629 dev_flow->ingress = attr->ingress; 1630 dev_flow->hash_fields = 0; 1631 /* Need to set transfer attribute: not supported in Verbs mode. */ 1632 return dev_flow; 1633 } 1634 1635 /** 1636 * Fill the flow with verb spec. 1637 * 1638 * @param[in] dev 1639 * Pointer to Ethernet device. 1640 * @param[in, out] dev_flow 1641 * Pointer to the mlx5 flow. 1642 * @param[in] attr 1643 * Pointer to the flow attributes. 1644 * @param[in] items 1645 * Pointer to the list of items. 1646 * @param[in] actions 1647 * Pointer to the list of actions. 1648 * @param[out] error 1649 * Pointer to the error structure. 1650 * 1651 * @return 1652 * 0 on success, else a negative errno value otherwise and rte_errno is set. 1653 */ 1654 static int 1655 flow_verbs_translate(struct rte_eth_dev *dev, 1656 struct mlx5_flow *dev_flow, 1657 const struct rte_flow_attr *attr, 1658 const struct rte_flow_item items[], 1659 const struct rte_flow_action actions[], 1660 struct rte_flow_error *error) 1661 { 1662 uint64_t item_flags = 0; 1663 uint64_t action_flags = 0; 1664 uint64_t priority = attr->priority; 1665 uint32_t subpriority = 0; 1666 struct mlx5_priv *priv = dev->data->dev_private; 1667 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 1668 struct mlx5_flow_rss_desc *rss_desc; 1669 1670 MLX5_ASSERT(wks); 1671 rss_desc = &wks->rss_desc; 1672 if (priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) 1673 priority = priv->sh->flow_max_priority - 1; 1674 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 1675 int ret; 1676 1677 switch (actions->type) { 1678 case RTE_FLOW_ACTION_TYPE_VOID: 1679 break; 1680 case RTE_FLOW_ACTION_TYPE_FLAG: 1681 flow_verbs_translate_action_flag(dev_flow, actions); 1682 action_flags |= MLX5_FLOW_ACTION_FLAG; 1683 dev_flow->handle->mark = 1; 1684 break; 1685 case RTE_FLOW_ACTION_TYPE_MARK: 1686 flow_verbs_translate_action_mark(dev_flow, actions); 1687 action_flags |= MLX5_FLOW_ACTION_MARK; 1688 dev_flow->handle->mark = 1; 1689 break; 1690 case RTE_FLOW_ACTION_TYPE_DROP: 1691 flow_verbs_translate_action_drop(dev_flow, actions); 1692 action_flags |= MLX5_FLOW_ACTION_DROP; 1693 dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP; 1694 break; 1695 case RTE_FLOW_ACTION_TYPE_QUEUE: 1696 flow_verbs_translate_action_queue(rss_desc, actions); 1697 action_flags |= MLX5_FLOW_ACTION_QUEUE; 1698 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; 1699 break; 1700 case RTE_FLOW_ACTION_TYPE_RSS: 1701 flow_verbs_translate_action_rss(rss_desc, actions); 1702 action_flags |= MLX5_FLOW_ACTION_RSS; 1703 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; 1704 break; 1705 case RTE_FLOW_ACTION_TYPE_COUNT: 1706 ret = flow_verbs_translate_action_count(dev_flow, 1707 actions, 1708 dev, error); 1709 if (ret < 0) 1710 return ret; 1711 action_flags |= MLX5_FLOW_ACTION_COUNT; 1712 break; 1713 default: 1714 return rte_flow_error_set(error, ENOTSUP, 1715 RTE_FLOW_ERROR_TYPE_ACTION, 1716 actions, 1717 "action not supported"); 1718 } 1719 } 1720 dev_flow->act_flags = action_flags; 1721 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 1722 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1723 1724 switch (items->type) { 1725 case RTE_FLOW_ITEM_TYPE_VOID: 1726 break; 1727 case RTE_FLOW_ITEM_TYPE_ETH: 1728 flow_verbs_translate_item_eth(dev_flow, items, 1729 item_flags); 1730 subpriority = MLX5_PRIORITY_MAP_L2; 1731 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 1732 MLX5_FLOW_LAYER_OUTER_L2; 1733 break; 1734 case RTE_FLOW_ITEM_TYPE_VLAN: 1735 flow_verbs_translate_item_vlan(dev_flow, items, 1736 item_flags); 1737 subpriority = MLX5_PRIORITY_MAP_L2; 1738 item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | 1739 MLX5_FLOW_LAYER_INNER_VLAN) : 1740 (MLX5_FLOW_LAYER_OUTER_L2 | 1741 MLX5_FLOW_LAYER_OUTER_VLAN); 1742 break; 1743 case RTE_FLOW_ITEM_TYPE_IPV4: 1744 flow_verbs_translate_item_ipv4(dev_flow, items, 1745 item_flags); 1746 subpriority = MLX5_PRIORITY_MAP_L3; 1747 dev_flow->hash_fields |= 1748 mlx5_flow_hashfields_adjust 1749 (rss_desc, tunnel, 1750 MLX5_IPV4_LAYER_TYPES, 1751 MLX5_IPV4_IBV_RX_HASH); 1752 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 1753 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 1754 break; 1755 case RTE_FLOW_ITEM_TYPE_IPV6: 1756 flow_verbs_translate_item_ipv6(dev_flow, items, 1757 item_flags); 1758 subpriority = MLX5_PRIORITY_MAP_L3; 1759 dev_flow->hash_fields |= 1760 mlx5_flow_hashfields_adjust 1761 (rss_desc, tunnel, 1762 MLX5_IPV6_LAYER_TYPES, 1763 MLX5_IPV6_IBV_RX_HASH); 1764 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 1765 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 1766 break; 1767 case RTE_FLOW_ITEM_TYPE_TCP: 1768 flow_verbs_translate_item_tcp(dev_flow, items, 1769 item_flags); 1770 subpriority = MLX5_PRIORITY_MAP_L4; 1771 if (dev_flow->hash_fields != 0) 1772 dev_flow->hash_fields |= 1773 mlx5_flow_hashfields_adjust 1774 (rss_desc, tunnel, RTE_ETH_RSS_TCP, 1775 (IBV_RX_HASH_SRC_PORT_TCP | 1776 IBV_RX_HASH_DST_PORT_TCP)); 1777 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : 1778 MLX5_FLOW_LAYER_OUTER_L4_TCP; 1779 break; 1780 case RTE_FLOW_ITEM_TYPE_UDP: 1781 flow_verbs_translate_item_udp(dev_flow, items, 1782 item_flags); 1783 subpriority = MLX5_PRIORITY_MAP_L4; 1784 if (dev_flow->hash_fields != 0) 1785 dev_flow->hash_fields |= 1786 mlx5_flow_hashfields_adjust 1787 (rss_desc, tunnel, RTE_ETH_RSS_UDP, 1788 (IBV_RX_HASH_SRC_PORT_UDP | 1789 IBV_RX_HASH_DST_PORT_UDP)); 1790 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : 1791 MLX5_FLOW_LAYER_OUTER_L4_UDP; 1792 break; 1793 case RTE_FLOW_ITEM_TYPE_VXLAN: 1794 flow_verbs_translate_item_vxlan(dev_flow, items, 1795 item_flags); 1796 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc); 1797 item_flags |= MLX5_FLOW_LAYER_VXLAN; 1798 break; 1799 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 1800 flow_verbs_translate_item_vxlan_gpe(dev_flow, items, 1801 item_flags); 1802 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc); 1803 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE; 1804 break; 1805 case RTE_FLOW_ITEM_TYPE_GRE: 1806 flow_verbs_translate_item_gre(dev_flow, items, 1807 item_flags); 1808 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc); 1809 item_flags |= MLX5_FLOW_LAYER_GRE; 1810 break; 1811 case RTE_FLOW_ITEM_TYPE_MPLS: 1812 flow_verbs_translate_item_mpls(dev_flow, items, 1813 item_flags); 1814 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc); 1815 item_flags |= MLX5_FLOW_LAYER_MPLS; 1816 break; 1817 default: 1818 return rte_flow_error_set(error, ENOTSUP, 1819 RTE_FLOW_ERROR_TYPE_ITEM, 1820 NULL, "item not supported"); 1821 } 1822 } 1823 dev_flow->handle->layers = item_flags; 1824 /* Other members of attr will be ignored. */ 1825 dev_flow->verbs.attr.priority = 1826 mlx5_flow_adjust_priority(dev, priority, subpriority); 1827 dev_flow->verbs.attr.port = (uint8_t)priv->dev_port; 1828 return 0; 1829 } 1830 1831 /** 1832 * Remove the flow from the NIC but keeps it in memory. 1833 * 1834 * @param[in] dev 1835 * Pointer to the Ethernet device structure. 1836 * @param[in, out] flow 1837 * Pointer to flow structure. 1838 */ 1839 static void 1840 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 1841 { 1842 struct mlx5_priv *priv = dev->data->dev_private; 1843 struct mlx5_flow_handle *handle; 1844 uint32_t handle_idx; 1845 1846 if (!flow) 1847 return; 1848 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 1849 handle_idx, handle, next) { 1850 if (handle->drv_flow) { 1851 claim_zero(mlx5_glue->destroy_flow(handle->drv_flow)); 1852 handle->drv_flow = NULL; 1853 } 1854 /* hrxq is union, don't touch it only the flag is set. */ 1855 if (handle->rix_hrxq && 1856 handle->fate_action == MLX5_FLOW_FATE_QUEUE) { 1857 mlx5_hrxq_release(dev, handle->rix_hrxq); 1858 handle->rix_hrxq = 0; 1859 } 1860 if (handle->vf_vlan.tag && handle->vf_vlan.created) 1861 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan); 1862 } 1863 } 1864 1865 /** 1866 * Remove the flow from the NIC and the memory. 1867 * 1868 * @param[in] dev 1869 * Pointer to the Ethernet device structure. 1870 * @param[in, out] flow 1871 * Pointer to flow structure. 1872 */ 1873 static void 1874 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 1875 { 1876 struct mlx5_priv *priv = dev->data->dev_private; 1877 struct mlx5_flow_handle *handle; 1878 1879 if (!flow) 1880 return; 1881 flow_verbs_remove(dev, flow); 1882 while (flow->dev_handles) { 1883 uint32_t tmp_idx = flow->dev_handles; 1884 1885 handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 1886 tmp_idx); 1887 if (!handle) 1888 return; 1889 flow->dev_handles = handle->next.next; 1890 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 1891 tmp_idx); 1892 } 1893 if (flow->counter) { 1894 flow_verbs_counter_release(dev, flow->counter); 1895 flow->counter = 0; 1896 } 1897 } 1898 1899 /** 1900 * Apply the flow to the NIC. 1901 * 1902 * @param[in] dev 1903 * Pointer to the Ethernet device structure. 1904 * @param[in, out] flow 1905 * Pointer to flow structure. 1906 * @param[out] error 1907 * Pointer to error structure. 1908 * 1909 * @return 1910 * 0 on success, a negative errno value otherwise and rte_errno is set. 1911 */ 1912 static int 1913 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 1914 struct rte_flow_error *error) 1915 { 1916 struct mlx5_priv *priv = dev->data->dev_private; 1917 struct mlx5_flow_handle *handle; 1918 struct mlx5_flow *dev_flow; 1919 struct mlx5_hrxq *hrxq; 1920 uint32_t dev_handles; 1921 int err; 1922 int idx; 1923 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); 1924 1925 MLX5_ASSERT(wks); 1926 for (idx = wks->flow_idx - 1; idx >= 0; idx--) { 1927 dev_flow = &wks->flows[idx]; 1928 handle = dev_flow->handle; 1929 if (handle->fate_action == MLX5_FLOW_FATE_DROP) { 1930 MLX5_ASSERT(priv->drop_queue.hrxq); 1931 hrxq = priv->drop_queue.hrxq; 1932 } else { 1933 uint32_t hrxq_idx; 1934 struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc; 1935 1936 MLX5_ASSERT(rss_desc->queue_num); 1937 rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN; 1938 rss_desc->hash_fields = dev_flow->hash_fields; 1939 rss_desc->tunnel = !!(handle->layers & 1940 MLX5_FLOW_LAYER_TUNNEL); 1941 rss_desc->shared_rss = 0; 1942 hrxq_idx = mlx5_hrxq_get(dev, rss_desc); 1943 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], 1944 hrxq_idx); 1945 if (!hrxq) { 1946 rte_flow_error_set 1947 (error, rte_errno, 1948 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1949 "cannot get hash queue"); 1950 goto error; 1951 } 1952 handle->rix_hrxq = hrxq_idx; 1953 } 1954 MLX5_ASSERT(hrxq); 1955 handle->drv_flow = mlx5_glue->create_flow 1956 (hrxq->qp, &dev_flow->verbs.attr); 1957 if (!handle->drv_flow) { 1958 rte_flow_error_set(error, errno, 1959 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1960 NULL, 1961 "hardware refuses to create flow"); 1962 goto error; 1963 } 1964 if (priv->vmwa_context && 1965 handle->vf_vlan.tag && !handle->vf_vlan.created) { 1966 /* 1967 * The rule contains the VLAN pattern. 1968 * For VF we are going to create VLAN 1969 * interface to make hypervisor set correct 1970 * e-Switch vport context. 1971 */ 1972 mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan); 1973 } 1974 } 1975 return 0; 1976 error: 1977 err = rte_errno; /* Save rte_errno before cleanup. */ 1978 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 1979 dev_handles, handle, next) { 1980 /* hrxq is union, don't touch it only the flag is set. */ 1981 if (handle->rix_hrxq && 1982 handle->fate_action == MLX5_FLOW_FATE_QUEUE) { 1983 mlx5_hrxq_release(dev, handle->rix_hrxq); 1984 handle->rix_hrxq = 0; 1985 } 1986 if (handle->vf_vlan.tag && handle->vf_vlan.created) 1987 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan); 1988 } 1989 rte_errno = err; /* Restore rte_errno. */ 1990 return -rte_errno; 1991 } 1992 1993 /** 1994 * Query a flow. 1995 * 1996 * @see rte_flow_query() 1997 * @see rte_flow_ops 1998 */ 1999 static int 2000 flow_verbs_query(struct rte_eth_dev *dev, 2001 struct rte_flow *flow, 2002 const struct rte_flow_action *actions, 2003 void *data, 2004 struct rte_flow_error *error) 2005 { 2006 int ret = -EINVAL; 2007 2008 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2009 switch (actions->type) { 2010 case RTE_FLOW_ACTION_TYPE_VOID: 2011 break; 2012 case RTE_FLOW_ACTION_TYPE_COUNT: 2013 ret = flow_verbs_counter_query(dev, flow, data, error); 2014 break; 2015 default: 2016 return rte_flow_error_set(error, ENOTSUP, 2017 RTE_FLOW_ERROR_TYPE_ACTION, 2018 actions, 2019 "action not supported"); 2020 } 2021 } 2022 return ret; 2023 } 2024 2025 static int 2026 flow_verbs_sync_domain(struct rte_eth_dev *dev, uint32_t domains, 2027 uint32_t flags) 2028 { 2029 RTE_SET_USED(dev); 2030 RTE_SET_USED(domains); 2031 RTE_SET_USED(flags); 2032 2033 return 0; 2034 } 2035 2036 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = { 2037 .validate = flow_verbs_validate, 2038 .prepare = flow_verbs_prepare, 2039 .translate = flow_verbs_translate, 2040 .apply = flow_verbs_apply, 2041 .remove = flow_verbs_remove, 2042 .destroy = flow_verbs_destroy, 2043 .query = flow_verbs_query, 2044 .sync_domain = flow_verbs_sync_domain, 2045 .discover_priorities = flow_verbs_discover_priorities, 2046 }; 2047