1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018 Mellanox Technologies, Ltd 3 */ 4 5 #include <sys/queue.h> 6 #include <stdalign.h> 7 #include <stdint.h> 8 #include <string.h> 9 10 /* Verbs header. */ 11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 12 #ifdef PEDANTIC 13 #pragma GCC diagnostic ignored "-Wpedantic" 14 #endif 15 #include <infiniband/verbs.h> 16 #ifdef PEDANTIC 17 #pragma GCC diagnostic error "-Wpedantic" 18 #endif 19 20 #include <rte_common.h> 21 #include <rte_ether.h> 22 #include <rte_ethdev_driver.h> 23 #include <rte_flow.h> 24 #include <rte_flow_driver.h> 25 #include <rte_malloc.h> 26 #include <rte_ip.h> 27 #include <rte_gre.h> 28 29 #include "mlx5.h" 30 #include "mlx5_defs.h" 31 #include "mlx5_glue.h" 32 #include "mlx5_flow.h" 33 #include "mlx5_prm.h" 34 #include "mlx5_rxtx.h" 35 36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 37 38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS 39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0 40 #endif 41 42 #ifndef HAVE_MLX5DV_DR_ESWITCH 43 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB 44 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0 45 #endif 46 #endif 47 48 #ifndef HAVE_MLX5DV_DR 49 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1 50 #endif 51 52 union flow_dv_attr { 53 struct { 54 uint32_t valid:1; 55 uint32_t ipv4:1; 56 uint32_t ipv6:1; 57 uint32_t tcp:1; 58 uint32_t udp:1; 59 uint32_t reserved:27; 60 }; 61 uint32_t attr; 62 }; 63 64 /** 65 * Initialize flow attributes structure according to flow items' types. 66 * 67 * @param[in] item 68 * Pointer to item specification. 69 * @param[out] attr 70 * Pointer to flow attributes structure. 71 */ 72 static void 73 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr) 74 { 75 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 76 switch (item->type) { 77 case RTE_FLOW_ITEM_TYPE_IPV4: 78 attr->ipv4 = 1; 79 break; 80 case RTE_FLOW_ITEM_TYPE_IPV6: 81 attr->ipv6 = 1; 82 break; 83 case RTE_FLOW_ITEM_TYPE_UDP: 84 attr->udp = 1; 85 break; 86 case RTE_FLOW_ITEM_TYPE_TCP: 87 attr->tcp = 1; 88 break; 89 default: 90 break; 91 } 92 } 93 attr->valid = 1; 94 } 95 96 struct field_modify_info { 97 uint32_t size; /* Size of field in protocol header, in bytes. */ 98 uint32_t offset; /* Offset of field in protocol header, in bytes. */ 99 enum mlx5_modification_field id; 100 }; 101 102 struct field_modify_info modify_eth[] = { 103 {4, 0, MLX5_MODI_OUT_DMAC_47_16}, 104 {2, 4, MLX5_MODI_OUT_DMAC_15_0}, 105 {4, 6, MLX5_MODI_OUT_SMAC_47_16}, 106 {2, 10, MLX5_MODI_OUT_SMAC_15_0}, 107 {0, 0, 0}, 108 }; 109 110 struct field_modify_info modify_ipv4[] = { 111 {1, 8, MLX5_MODI_OUT_IPV4_TTL}, 112 {4, 12, MLX5_MODI_OUT_SIPV4}, 113 {4, 16, MLX5_MODI_OUT_DIPV4}, 114 {0, 0, 0}, 115 }; 116 117 struct field_modify_info modify_ipv6[] = { 118 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT}, 119 {4, 8, MLX5_MODI_OUT_SIPV6_127_96}, 120 {4, 12, MLX5_MODI_OUT_SIPV6_95_64}, 121 {4, 16, MLX5_MODI_OUT_SIPV6_63_32}, 122 {4, 20, MLX5_MODI_OUT_SIPV6_31_0}, 123 {4, 24, MLX5_MODI_OUT_DIPV6_127_96}, 124 {4, 28, MLX5_MODI_OUT_DIPV6_95_64}, 125 {4, 32, MLX5_MODI_OUT_DIPV6_63_32}, 126 {4, 36, MLX5_MODI_OUT_DIPV6_31_0}, 127 {0, 0, 0}, 128 }; 129 130 struct field_modify_info modify_udp[] = { 131 {2, 0, MLX5_MODI_OUT_UDP_SPORT}, 132 {2, 2, MLX5_MODI_OUT_UDP_DPORT}, 133 {0, 0, 0}, 134 }; 135 136 struct field_modify_info modify_tcp[] = { 137 {2, 0, MLX5_MODI_OUT_TCP_SPORT}, 138 {2, 2, MLX5_MODI_OUT_TCP_DPORT}, 139 {0, 0, 0}, 140 }; 141 142 /** 143 * Acquire the synchronizing object to protect multithreaded access 144 * to shared dv context. Lock occurs only if context is actually 145 * shared, i.e. we have multiport IB device and representors are 146 * created. 147 * 148 * @param[in] dev 149 * Pointer to the rte_eth_dev structure. 150 */ 151 static void 152 flow_d_shared_lock(struct rte_eth_dev *dev) 153 { 154 struct mlx5_priv *priv = dev->data->dev_private; 155 struct mlx5_ibv_shared *sh = priv->sh; 156 157 if (sh->dv_refcnt > 1) { 158 int ret; 159 160 ret = pthread_mutex_lock(&sh->dv_mutex); 161 assert(!ret); 162 (void)ret; 163 } 164 } 165 166 static void 167 flow_d_shared_unlock(struct rte_eth_dev *dev) 168 { 169 struct mlx5_priv *priv = dev->data->dev_private; 170 struct mlx5_ibv_shared *sh = priv->sh; 171 172 if (sh->dv_refcnt > 1) { 173 int ret; 174 175 ret = pthread_mutex_unlock(&sh->dv_mutex); 176 assert(!ret); 177 (void)ret; 178 } 179 } 180 181 /** 182 * Convert modify-header action to DV specification. 183 * 184 * @param[in] item 185 * Pointer to item specification. 186 * @param[in] field 187 * Pointer to field modification information. 188 * @param[in,out] resource 189 * Pointer to the modify-header resource. 190 * @param[in] type 191 * Type of modification. 192 * @param[out] error 193 * Pointer to the error structure. 194 * 195 * @return 196 * 0 on success, a negative errno value otherwise and rte_errno is set. 197 */ 198 static int 199 flow_dv_convert_modify_action(struct rte_flow_item *item, 200 struct field_modify_info *field, 201 struct mlx5_flow_dv_modify_hdr_resource *resource, 202 uint32_t type, 203 struct rte_flow_error *error) 204 { 205 uint32_t i = resource->actions_num; 206 struct mlx5_modification_cmd *actions = resource->actions; 207 const uint8_t *spec = item->spec; 208 const uint8_t *mask = item->mask; 209 uint32_t set; 210 211 while (field->size) { 212 set = 0; 213 /* Generate modify command for each mask segment. */ 214 memcpy(&set, &mask[field->offset], field->size); 215 if (set) { 216 if (i >= MLX5_MODIFY_NUM) 217 return rte_flow_error_set(error, EINVAL, 218 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 219 "too many items to modify"); 220 actions[i].action_type = type; 221 actions[i].field = field->id; 222 actions[i].length = field->size == 223 4 ? 0 : field->size * 8; 224 rte_memcpy(&actions[i].data[4 - field->size], 225 &spec[field->offset], field->size); 226 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); 227 ++i; 228 } 229 if (resource->actions_num != i) 230 resource->actions_num = i; 231 field++; 232 } 233 if (!resource->actions_num) 234 return rte_flow_error_set(error, EINVAL, 235 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 236 "invalid modification flow item"); 237 return 0; 238 } 239 240 /** 241 * Convert modify-header set IPv4 address action to DV specification. 242 * 243 * @param[in,out] resource 244 * Pointer to the modify-header resource. 245 * @param[in] action 246 * Pointer to action specification. 247 * @param[out] error 248 * Pointer to the error structure. 249 * 250 * @return 251 * 0 on success, a negative errno value otherwise and rte_errno is set. 252 */ 253 static int 254 flow_dv_convert_action_modify_ipv4 255 (struct mlx5_flow_dv_modify_hdr_resource *resource, 256 const struct rte_flow_action *action, 257 struct rte_flow_error *error) 258 { 259 const struct rte_flow_action_set_ipv4 *conf = 260 (const struct rte_flow_action_set_ipv4 *)(action->conf); 261 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 }; 262 struct rte_flow_item_ipv4 ipv4; 263 struct rte_flow_item_ipv4 ipv4_mask; 264 265 memset(&ipv4, 0, sizeof(ipv4)); 266 memset(&ipv4_mask, 0, sizeof(ipv4_mask)); 267 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) { 268 ipv4.hdr.src_addr = conf->ipv4_addr; 269 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr; 270 } else { 271 ipv4.hdr.dst_addr = conf->ipv4_addr; 272 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr; 273 } 274 item.spec = &ipv4; 275 item.mask = &ipv4_mask; 276 return flow_dv_convert_modify_action(&item, modify_ipv4, resource, 277 MLX5_MODIFICATION_TYPE_SET, error); 278 } 279 280 /** 281 * Convert modify-header set IPv6 address action to DV specification. 282 * 283 * @param[in,out] resource 284 * Pointer to the modify-header resource. 285 * @param[in] action 286 * Pointer to action specification. 287 * @param[out] error 288 * Pointer to the error structure. 289 * 290 * @return 291 * 0 on success, a negative errno value otherwise and rte_errno is set. 292 */ 293 static int 294 flow_dv_convert_action_modify_ipv6 295 (struct mlx5_flow_dv_modify_hdr_resource *resource, 296 const struct rte_flow_action *action, 297 struct rte_flow_error *error) 298 { 299 const struct rte_flow_action_set_ipv6 *conf = 300 (const struct rte_flow_action_set_ipv6 *)(action->conf); 301 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 }; 302 struct rte_flow_item_ipv6 ipv6; 303 struct rte_flow_item_ipv6 ipv6_mask; 304 305 memset(&ipv6, 0, sizeof(ipv6)); 306 memset(&ipv6_mask, 0, sizeof(ipv6_mask)); 307 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) { 308 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr, 309 sizeof(ipv6.hdr.src_addr)); 310 memcpy(&ipv6_mask.hdr.src_addr, 311 &rte_flow_item_ipv6_mask.hdr.src_addr, 312 sizeof(ipv6.hdr.src_addr)); 313 } else { 314 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr, 315 sizeof(ipv6.hdr.dst_addr)); 316 memcpy(&ipv6_mask.hdr.dst_addr, 317 &rte_flow_item_ipv6_mask.hdr.dst_addr, 318 sizeof(ipv6.hdr.dst_addr)); 319 } 320 item.spec = &ipv6; 321 item.mask = &ipv6_mask; 322 return flow_dv_convert_modify_action(&item, modify_ipv6, resource, 323 MLX5_MODIFICATION_TYPE_SET, error); 324 } 325 326 /** 327 * Convert modify-header set MAC address action to DV specification. 328 * 329 * @param[in,out] resource 330 * Pointer to the modify-header resource. 331 * @param[in] action 332 * Pointer to action specification. 333 * @param[out] error 334 * Pointer to the error structure. 335 * 336 * @return 337 * 0 on success, a negative errno value otherwise and rte_errno is set. 338 */ 339 static int 340 flow_dv_convert_action_modify_mac 341 (struct mlx5_flow_dv_modify_hdr_resource *resource, 342 const struct rte_flow_action *action, 343 struct rte_flow_error *error) 344 { 345 const struct rte_flow_action_set_mac *conf = 346 (const struct rte_flow_action_set_mac *)(action->conf); 347 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH }; 348 struct rte_flow_item_eth eth; 349 struct rte_flow_item_eth eth_mask; 350 351 memset(ð, 0, sizeof(eth)); 352 memset(ð_mask, 0, sizeof(eth_mask)); 353 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) { 354 memcpy(ð.src.addr_bytes, &conf->mac_addr, 355 sizeof(eth.src.addr_bytes)); 356 memcpy(ð_mask.src.addr_bytes, 357 &rte_flow_item_eth_mask.src.addr_bytes, 358 sizeof(eth_mask.src.addr_bytes)); 359 } else { 360 memcpy(ð.dst.addr_bytes, &conf->mac_addr, 361 sizeof(eth.dst.addr_bytes)); 362 memcpy(ð_mask.dst.addr_bytes, 363 &rte_flow_item_eth_mask.dst.addr_bytes, 364 sizeof(eth_mask.dst.addr_bytes)); 365 } 366 item.spec = ð 367 item.mask = ð_mask; 368 return flow_dv_convert_modify_action(&item, modify_eth, resource, 369 MLX5_MODIFICATION_TYPE_SET, error); 370 } 371 372 /** 373 * Convert modify-header set TP action to DV specification. 374 * 375 * @param[in,out] resource 376 * Pointer to the modify-header resource. 377 * @param[in] action 378 * Pointer to action specification. 379 * @param[in] items 380 * Pointer to rte_flow_item objects list. 381 * @param[in] attr 382 * Pointer to flow attributes structure. 383 * @param[out] error 384 * Pointer to the error structure. 385 * 386 * @return 387 * 0 on success, a negative errno value otherwise and rte_errno is set. 388 */ 389 static int 390 flow_dv_convert_action_modify_tp 391 (struct mlx5_flow_dv_modify_hdr_resource *resource, 392 const struct rte_flow_action *action, 393 const struct rte_flow_item *items, 394 union flow_dv_attr *attr, 395 struct rte_flow_error *error) 396 { 397 const struct rte_flow_action_set_tp *conf = 398 (const struct rte_flow_action_set_tp *)(action->conf); 399 struct rte_flow_item item; 400 struct rte_flow_item_udp udp; 401 struct rte_flow_item_udp udp_mask; 402 struct rte_flow_item_tcp tcp; 403 struct rte_flow_item_tcp tcp_mask; 404 struct field_modify_info *field; 405 406 if (!attr->valid) 407 flow_dv_attr_init(items, attr); 408 if (attr->udp) { 409 memset(&udp, 0, sizeof(udp)); 410 memset(&udp_mask, 0, sizeof(udp_mask)); 411 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) { 412 udp.hdr.src_port = conf->port; 413 udp_mask.hdr.src_port = 414 rte_flow_item_udp_mask.hdr.src_port; 415 } else { 416 udp.hdr.dst_port = conf->port; 417 udp_mask.hdr.dst_port = 418 rte_flow_item_udp_mask.hdr.dst_port; 419 } 420 item.type = RTE_FLOW_ITEM_TYPE_UDP; 421 item.spec = &udp; 422 item.mask = &udp_mask; 423 field = modify_udp; 424 } 425 if (attr->tcp) { 426 memset(&tcp, 0, sizeof(tcp)); 427 memset(&tcp_mask, 0, sizeof(tcp_mask)); 428 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) { 429 tcp.hdr.src_port = conf->port; 430 tcp_mask.hdr.src_port = 431 rte_flow_item_tcp_mask.hdr.src_port; 432 } else { 433 tcp.hdr.dst_port = conf->port; 434 tcp_mask.hdr.dst_port = 435 rte_flow_item_tcp_mask.hdr.dst_port; 436 } 437 item.type = RTE_FLOW_ITEM_TYPE_TCP; 438 item.spec = &tcp; 439 item.mask = &tcp_mask; 440 field = modify_tcp; 441 } 442 return flow_dv_convert_modify_action(&item, field, resource, 443 MLX5_MODIFICATION_TYPE_SET, error); 444 } 445 446 /** 447 * Convert modify-header set TTL action to DV specification. 448 * 449 * @param[in,out] resource 450 * Pointer to the modify-header resource. 451 * @param[in] action 452 * Pointer to action specification. 453 * @param[in] items 454 * Pointer to rte_flow_item objects list. 455 * @param[in] attr 456 * Pointer to flow attributes structure. 457 * @param[out] error 458 * Pointer to the error structure. 459 * 460 * @return 461 * 0 on success, a negative errno value otherwise and rte_errno is set. 462 */ 463 static int 464 flow_dv_convert_action_modify_ttl 465 (struct mlx5_flow_dv_modify_hdr_resource *resource, 466 const struct rte_flow_action *action, 467 const struct rte_flow_item *items, 468 union flow_dv_attr *attr, 469 struct rte_flow_error *error) 470 { 471 const struct rte_flow_action_set_ttl *conf = 472 (const struct rte_flow_action_set_ttl *)(action->conf); 473 struct rte_flow_item item; 474 struct rte_flow_item_ipv4 ipv4; 475 struct rte_flow_item_ipv4 ipv4_mask; 476 struct rte_flow_item_ipv6 ipv6; 477 struct rte_flow_item_ipv6 ipv6_mask; 478 struct field_modify_info *field; 479 480 if (!attr->valid) 481 flow_dv_attr_init(items, attr); 482 if (attr->ipv4) { 483 memset(&ipv4, 0, sizeof(ipv4)); 484 memset(&ipv4_mask, 0, sizeof(ipv4_mask)); 485 ipv4.hdr.time_to_live = conf->ttl_value; 486 ipv4_mask.hdr.time_to_live = 0xFF; 487 item.type = RTE_FLOW_ITEM_TYPE_IPV4; 488 item.spec = &ipv4; 489 item.mask = &ipv4_mask; 490 field = modify_ipv4; 491 } 492 if (attr->ipv6) { 493 memset(&ipv6, 0, sizeof(ipv6)); 494 memset(&ipv6_mask, 0, sizeof(ipv6_mask)); 495 ipv6.hdr.hop_limits = conf->ttl_value; 496 ipv6_mask.hdr.hop_limits = 0xFF; 497 item.type = RTE_FLOW_ITEM_TYPE_IPV6; 498 item.spec = &ipv6; 499 item.mask = &ipv6_mask; 500 field = modify_ipv6; 501 } 502 return flow_dv_convert_modify_action(&item, field, resource, 503 MLX5_MODIFICATION_TYPE_SET, error); 504 } 505 506 /** 507 * Convert modify-header decrement TTL action to DV specification. 508 * 509 * @param[in,out] resource 510 * Pointer to the modify-header resource. 511 * @param[in] action 512 * Pointer to action specification. 513 * @param[in] items 514 * Pointer to rte_flow_item objects list. 515 * @param[in] attr 516 * Pointer to flow attributes structure. 517 * @param[out] error 518 * Pointer to the error structure. 519 * 520 * @return 521 * 0 on success, a negative errno value otherwise and rte_errno is set. 522 */ 523 static int 524 flow_dv_convert_action_modify_dec_ttl 525 (struct mlx5_flow_dv_modify_hdr_resource *resource, 526 const struct rte_flow_item *items, 527 union flow_dv_attr *attr, 528 struct rte_flow_error *error) 529 { 530 struct rte_flow_item item; 531 struct rte_flow_item_ipv4 ipv4; 532 struct rte_flow_item_ipv4 ipv4_mask; 533 struct rte_flow_item_ipv6 ipv6; 534 struct rte_flow_item_ipv6 ipv6_mask; 535 struct field_modify_info *field; 536 537 if (!attr->valid) 538 flow_dv_attr_init(items, attr); 539 if (attr->ipv4) { 540 memset(&ipv4, 0, sizeof(ipv4)); 541 memset(&ipv4_mask, 0, sizeof(ipv4_mask)); 542 ipv4.hdr.time_to_live = 0xFF; 543 ipv4_mask.hdr.time_to_live = 0xFF; 544 item.type = RTE_FLOW_ITEM_TYPE_IPV4; 545 item.spec = &ipv4; 546 item.mask = &ipv4_mask; 547 field = modify_ipv4; 548 } 549 if (attr->ipv6) { 550 memset(&ipv6, 0, sizeof(ipv6)); 551 memset(&ipv6_mask, 0, sizeof(ipv6_mask)); 552 ipv6.hdr.hop_limits = 0xFF; 553 ipv6_mask.hdr.hop_limits = 0xFF; 554 item.type = RTE_FLOW_ITEM_TYPE_IPV6; 555 item.spec = &ipv6; 556 item.mask = &ipv6_mask; 557 field = modify_ipv6; 558 } 559 return flow_dv_convert_modify_action(&item, field, resource, 560 MLX5_MODIFICATION_TYPE_ADD, error); 561 } 562 563 /** 564 * Validate META item. 565 * 566 * @param[in] dev 567 * Pointer to the rte_eth_dev structure. 568 * @param[in] item 569 * Item specification. 570 * @param[in] attr 571 * Attributes of flow that includes this item. 572 * @param[out] error 573 * Pointer to error structure. 574 * 575 * @return 576 * 0 on success, a negative errno value otherwise and rte_errno is set. 577 */ 578 static int 579 flow_dv_validate_item_meta(struct rte_eth_dev *dev, 580 const struct rte_flow_item *item, 581 const struct rte_flow_attr *attr, 582 struct rte_flow_error *error) 583 { 584 const struct rte_flow_item_meta *spec = item->spec; 585 const struct rte_flow_item_meta *mask = item->mask; 586 const struct rte_flow_item_meta nic_mask = { 587 .data = RTE_BE32(UINT32_MAX) 588 }; 589 int ret; 590 uint64_t offloads = dev->data->dev_conf.txmode.offloads; 591 592 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA)) 593 return rte_flow_error_set(error, EPERM, 594 RTE_FLOW_ERROR_TYPE_ITEM, 595 NULL, 596 "match on metadata offload " 597 "configuration is off for this port"); 598 if (!spec) 599 return rte_flow_error_set(error, EINVAL, 600 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 601 item->spec, 602 "data cannot be empty"); 603 if (!spec->data) 604 return rte_flow_error_set(error, EINVAL, 605 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 606 NULL, 607 "data cannot be zero"); 608 if (!mask) 609 mask = &rte_flow_item_meta_mask; 610 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 611 (const uint8_t *)&nic_mask, 612 sizeof(struct rte_flow_item_meta), 613 error); 614 if (ret < 0) 615 return ret; 616 if (attr->ingress) 617 return rte_flow_error_set(error, ENOTSUP, 618 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 619 NULL, 620 "pattern not supported for ingress"); 621 return 0; 622 } 623 624 /** 625 * Validate vport item. 626 * 627 * @param[in] dev 628 * Pointer to the rte_eth_dev structure. 629 * @param[in] item 630 * Item specification. 631 * @param[in] attr 632 * Attributes of flow that includes this item. 633 * @param[in] item_flags 634 * Bit-fields that holds the items detected until now. 635 * @param[out] error 636 * Pointer to error structure. 637 * 638 * @return 639 * 0 on success, a negative errno value otherwise and rte_errno is set. 640 */ 641 static int 642 flow_dv_validate_item_port_id(struct rte_eth_dev *dev, 643 const struct rte_flow_item *item, 644 const struct rte_flow_attr *attr, 645 uint64_t item_flags, 646 struct rte_flow_error *error) 647 { 648 const struct rte_flow_item_port_id *spec = item->spec; 649 const struct rte_flow_item_port_id *mask = item->mask; 650 const struct rte_flow_item_port_id switch_mask = { 651 .id = 0xffffffff, 652 }; 653 uint16_t esw_domain_id; 654 uint16_t item_port_esw_domain_id; 655 int ret; 656 657 if (!attr->transfer) 658 return rte_flow_error_set(error, EINVAL, 659 RTE_FLOW_ERROR_TYPE_ITEM, 660 NULL, 661 "match on port id is valid only" 662 " when transfer flag is enabled"); 663 if (item_flags & MLX5_FLOW_ITEM_PORT_ID) 664 return rte_flow_error_set(error, ENOTSUP, 665 RTE_FLOW_ERROR_TYPE_ITEM, item, 666 "multiple source ports are not" 667 " supported"); 668 if (!mask) 669 mask = &switch_mask; 670 if (mask->id != 0xffffffff) 671 return rte_flow_error_set(error, ENOTSUP, 672 RTE_FLOW_ERROR_TYPE_ITEM_MASK, 673 mask, 674 "no support for partial mask on" 675 " \"id\" field"); 676 ret = mlx5_flow_item_acceptable 677 (item, (const uint8_t *)mask, 678 (const uint8_t *)&rte_flow_item_port_id_mask, 679 sizeof(struct rte_flow_item_port_id), 680 error); 681 if (ret) 682 return ret; 683 if (!spec) 684 return 0; 685 ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id, 686 NULL); 687 if (ret) 688 return rte_flow_error_set(error, -ret, 689 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec, 690 "failed to obtain E-Switch info for" 691 " port"); 692 ret = mlx5_port_to_eswitch_info(dev->data->port_id, 693 &esw_domain_id, NULL); 694 if (ret < 0) 695 return rte_flow_error_set(error, -ret, 696 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 697 NULL, 698 "failed to obtain E-Switch info"); 699 if (item_port_esw_domain_id != esw_domain_id) 700 return rte_flow_error_set(error, -ret, 701 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec, 702 "cannot match on a port from a" 703 " different E-Switch"); 704 return 0; 705 } 706 707 /** 708 * Validate count action. 709 * 710 * @param[in] dev 711 * device otr. 712 * @param[out] error 713 * Pointer to error structure. 714 * 715 * @return 716 * 0 on success, a negative errno value otherwise and rte_errno is set. 717 */ 718 static int 719 flow_dv_validate_action_count(struct rte_eth_dev *dev, 720 struct rte_flow_error *error) 721 { 722 struct mlx5_priv *priv = dev->data->dev_private; 723 724 if (!priv->config.devx) 725 goto notsup_err; 726 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS 727 return 0; 728 #endif 729 notsup_err: 730 return rte_flow_error_set 731 (error, ENOTSUP, 732 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 733 NULL, 734 "count action not supported"); 735 } 736 737 /** 738 * Validate the L2 encap action. 739 * 740 * @param[in] action_flags 741 * Holds the actions detected until now. 742 * @param[in] action 743 * Pointer to the encap action. 744 * @param[in] attr 745 * Pointer to flow attributes 746 * @param[out] error 747 * Pointer to error structure. 748 * 749 * @return 750 * 0 on success, a negative errno value otherwise and rte_errno is set. 751 */ 752 static int 753 flow_dv_validate_action_l2_encap(uint64_t action_flags, 754 const struct rte_flow_action *action, 755 const struct rte_flow_attr *attr, 756 struct rte_flow_error *error) 757 { 758 if (!(action->conf)) 759 return rte_flow_error_set(error, EINVAL, 760 RTE_FLOW_ERROR_TYPE_ACTION, action, 761 "configuration cannot be null"); 762 if (action_flags & MLX5_FLOW_ACTION_DROP) 763 return rte_flow_error_set(error, EINVAL, 764 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 765 "can't drop and encap in same flow"); 766 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS)) 767 return rte_flow_error_set(error, EINVAL, 768 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 769 "can only have a single encap or" 770 " decap action in a flow"); 771 if (!attr->transfer && attr->ingress) 772 return rte_flow_error_set(error, ENOTSUP, 773 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 774 NULL, 775 "encap action not supported for " 776 "ingress"); 777 return 0; 778 } 779 780 /** 781 * Validate the L2 decap action. 782 * 783 * @param[in] action_flags 784 * Holds the actions detected until now. 785 * @param[in] attr 786 * Pointer to flow attributes 787 * @param[out] error 788 * Pointer to error structure. 789 * 790 * @return 791 * 0 on success, a negative errno value otherwise and rte_errno is set. 792 */ 793 static int 794 flow_dv_validate_action_l2_decap(uint64_t action_flags, 795 const struct rte_flow_attr *attr, 796 struct rte_flow_error *error) 797 { 798 if (action_flags & MLX5_FLOW_ACTION_DROP) 799 return rte_flow_error_set(error, EINVAL, 800 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 801 "can't drop and decap in same flow"); 802 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS)) 803 return rte_flow_error_set(error, EINVAL, 804 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 805 "can only have a single encap or" 806 " decap action in a flow"); 807 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) 808 return rte_flow_error_set(error, EINVAL, 809 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 810 "can't have decap action after" 811 " modify action"); 812 if (attr->egress) 813 return rte_flow_error_set(error, ENOTSUP, 814 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, 815 NULL, 816 "decap action not supported for " 817 "egress"); 818 return 0; 819 } 820 821 /** 822 * Validate the raw encap action. 823 * 824 * @param[in] action_flags 825 * Holds the actions detected until now. 826 * @param[in] action 827 * Pointer to the encap action. 828 * @param[in] attr 829 * Pointer to flow attributes 830 * @param[out] error 831 * Pointer to error structure. 832 * 833 * @return 834 * 0 on success, a negative errno value otherwise and rte_errno is set. 835 */ 836 static int 837 flow_dv_validate_action_raw_encap(uint64_t action_flags, 838 const struct rte_flow_action *action, 839 const struct rte_flow_attr *attr, 840 struct rte_flow_error *error) 841 { 842 if (!(action->conf)) 843 return rte_flow_error_set(error, EINVAL, 844 RTE_FLOW_ERROR_TYPE_ACTION, action, 845 "configuration cannot be null"); 846 if (action_flags & MLX5_FLOW_ACTION_DROP) 847 return rte_flow_error_set(error, EINVAL, 848 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 849 "can't drop and encap in same flow"); 850 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS) 851 return rte_flow_error_set(error, EINVAL, 852 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 853 "can only have a single encap" 854 " action in a flow"); 855 /* encap without preceding decap is not supported for ingress */ 856 if (!attr->transfer && attr->ingress && 857 !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP)) 858 return rte_flow_error_set(error, ENOTSUP, 859 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 860 NULL, 861 "encap action not supported for " 862 "ingress"); 863 return 0; 864 } 865 866 /** 867 * Validate the raw decap action. 868 * 869 * @param[in] action_flags 870 * Holds the actions detected until now. 871 * @param[in] action 872 * Pointer to the encap action. 873 * @param[in] attr 874 * Pointer to flow attributes 875 * @param[out] error 876 * Pointer to error structure. 877 * 878 * @return 879 * 0 on success, a negative errno value otherwise and rte_errno is set. 880 */ 881 static int 882 flow_dv_validate_action_raw_decap(uint64_t action_flags, 883 const struct rte_flow_action *action, 884 const struct rte_flow_attr *attr, 885 struct rte_flow_error *error) 886 { 887 if (action_flags & MLX5_FLOW_ACTION_DROP) 888 return rte_flow_error_set(error, EINVAL, 889 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 890 "can't drop and decap in same flow"); 891 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS) 892 return rte_flow_error_set(error, EINVAL, 893 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 894 "can't have encap action before" 895 " decap action"); 896 if (action_flags & MLX5_FLOW_DECAP_ACTIONS) 897 return rte_flow_error_set(error, EINVAL, 898 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 899 "can only have a single decap" 900 " action in a flow"); 901 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) 902 return rte_flow_error_set(error, EINVAL, 903 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 904 "can't have decap action after" 905 " modify action"); 906 /* decap action is valid on egress only if it is followed by encap */ 907 if (attr->egress) { 908 for (; action->type != RTE_FLOW_ACTION_TYPE_END && 909 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP; 910 action++) { 911 } 912 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) 913 return rte_flow_error_set 914 (error, ENOTSUP, 915 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, 916 NULL, "decap action not supported" 917 " for egress"); 918 } 919 return 0; 920 } 921 922 /** 923 * Find existing encap/decap resource or create and register a new one. 924 * 925 * @param dev[in, out] 926 * Pointer to rte_eth_dev structure. 927 * @param[in, out] resource 928 * Pointer to encap/decap resource. 929 * @parm[in, out] dev_flow 930 * Pointer to the dev_flow. 931 * @param[out] error 932 * pointer to error structure. 933 * 934 * @return 935 * 0 on success otherwise -errno and errno is set. 936 */ 937 static int 938 flow_dv_encap_decap_resource_register 939 (struct rte_eth_dev *dev, 940 struct mlx5_flow_dv_encap_decap_resource *resource, 941 struct mlx5_flow *dev_flow, 942 struct rte_flow_error *error) 943 { 944 struct mlx5_priv *priv = dev->data->dev_private; 945 struct mlx5_ibv_shared *sh = priv->sh; 946 struct mlx5_flow_dv_encap_decap_resource *cache_resource; 947 struct rte_flow *flow = dev_flow->flow; 948 struct mlx5dv_dr_domain *domain; 949 950 resource->flags = flow->group ? 0 : 1; 951 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) 952 domain = sh->fdb_domain; 953 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) 954 domain = sh->rx_domain; 955 else 956 domain = sh->tx_domain; 957 958 /* Lookup a matching resource from cache. */ 959 LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) { 960 if (resource->reformat_type == cache_resource->reformat_type && 961 resource->ft_type == cache_resource->ft_type && 962 resource->flags == cache_resource->flags && 963 resource->size == cache_resource->size && 964 !memcmp((const void *)resource->buf, 965 (const void *)cache_resource->buf, 966 resource->size)) { 967 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++", 968 (void *)cache_resource, 969 rte_atomic32_read(&cache_resource->refcnt)); 970 rte_atomic32_inc(&cache_resource->refcnt); 971 dev_flow->dv.encap_decap = cache_resource; 972 return 0; 973 } 974 } 975 /* Register new encap/decap resource. */ 976 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0); 977 if (!cache_resource) 978 return rte_flow_error_set(error, ENOMEM, 979 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 980 "cannot allocate resource memory"); 981 *cache_resource = *resource; 982 cache_resource->verbs_action = 983 mlx5_glue->dv_create_flow_action_packet_reformat 984 (sh->ctx, cache_resource->reformat_type, 985 cache_resource->ft_type, domain, cache_resource->flags, 986 cache_resource->size, 987 (cache_resource->size ? cache_resource->buf : NULL)); 988 if (!cache_resource->verbs_action) { 989 rte_free(cache_resource); 990 return rte_flow_error_set(error, ENOMEM, 991 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 992 NULL, "cannot create action"); 993 } 994 rte_atomic32_init(&cache_resource->refcnt); 995 rte_atomic32_inc(&cache_resource->refcnt); 996 LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next); 997 dev_flow->dv.encap_decap = cache_resource; 998 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++", 999 (void *)cache_resource, 1000 rte_atomic32_read(&cache_resource->refcnt)); 1001 return 0; 1002 } 1003 1004 /** 1005 * Find existing table jump resource or create and register a new one. 1006 * 1007 * @param dev[in, out] 1008 * Pointer to rte_eth_dev structure. 1009 * @param[in, out] resource 1010 * Pointer to jump table resource. 1011 * @parm[in, out] dev_flow 1012 * Pointer to the dev_flow. 1013 * @param[out] error 1014 * pointer to error structure. 1015 * 1016 * @return 1017 * 0 on success otherwise -errno and errno is set. 1018 */ 1019 static int 1020 flow_dv_jump_tbl_resource_register 1021 (struct rte_eth_dev *dev, 1022 struct mlx5_flow_dv_jump_tbl_resource *resource, 1023 struct mlx5_flow *dev_flow, 1024 struct rte_flow_error *error) 1025 { 1026 struct mlx5_priv *priv = dev->data->dev_private; 1027 struct mlx5_ibv_shared *sh = priv->sh; 1028 struct mlx5_flow_dv_jump_tbl_resource *cache_resource; 1029 1030 /* Lookup a matching resource from cache. */ 1031 LIST_FOREACH(cache_resource, &sh->jump_tbl, next) { 1032 if (resource->tbl == cache_resource->tbl) { 1033 DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++", 1034 (void *)cache_resource, 1035 rte_atomic32_read(&cache_resource->refcnt)); 1036 rte_atomic32_inc(&cache_resource->refcnt); 1037 dev_flow->dv.jump = cache_resource; 1038 return 0; 1039 } 1040 } 1041 /* Register new jump table resource. */ 1042 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0); 1043 if (!cache_resource) 1044 return rte_flow_error_set(error, ENOMEM, 1045 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1046 "cannot allocate resource memory"); 1047 *cache_resource = *resource; 1048 cache_resource->action = 1049 mlx5_glue->dr_create_flow_action_dest_flow_tbl 1050 (resource->tbl->obj); 1051 if (!cache_resource->action) { 1052 rte_free(cache_resource); 1053 return rte_flow_error_set(error, ENOMEM, 1054 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1055 NULL, "cannot create action"); 1056 } 1057 rte_atomic32_init(&cache_resource->refcnt); 1058 rte_atomic32_inc(&cache_resource->refcnt); 1059 LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next); 1060 dev_flow->dv.jump = cache_resource; 1061 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++", 1062 (void *)cache_resource, 1063 rte_atomic32_read(&cache_resource->refcnt)); 1064 return 0; 1065 } 1066 1067 /** 1068 * Find existing table port ID resource or create and register a new one. 1069 * 1070 * @param dev[in, out] 1071 * Pointer to rte_eth_dev structure. 1072 * @param[in, out] resource 1073 * Pointer to port ID action resource. 1074 * @parm[in, out] dev_flow 1075 * Pointer to the dev_flow. 1076 * @param[out] error 1077 * pointer to error structure. 1078 * 1079 * @return 1080 * 0 on success otherwise -errno and errno is set. 1081 */ 1082 static int 1083 flow_dv_port_id_action_resource_register 1084 (struct rte_eth_dev *dev, 1085 struct mlx5_flow_dv_port_id_action_resource *resource, 1086 struct mlx5_flow *dev_flow, 1087 struct rte_flow_error *error) 1088 { 1089 struct mlx5_priv *priv = dev->data->dev_private; 1090 struct mlx5_ibv_shared *sh = priv->sh; 1091 struct mlx5_flow_dv_port_id_action_resource *cache_resource; 1092 1093 /* Lookup a matching resource from cache. */ 1094 LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) { 1095 if (resource->port_id == cache_resource->port_id) { 1096 DRV_LOG(DEBUG, "port id action resource resource %p: " 1097 "refcnt %d++", 1098 (void *)cache_resource, 1099 rte_atomic32_read(&cache_resource->refcnt)); 1100 rte_atomic32_inc(&cache_resource->refcnt); 1101 dev_flow->dv.port_id_action = cache_resource; 1102 return 0; 1103 } 1104 } 1105 /* Register new port id action resource. */ 1106 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0); 1107 if (!cache_resource) 1108 return rte_flow_error_set(error, ENOMEM, 1109 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1110 "cannot allocate resource memory"); 1111 *cache_resource = *resource; 1112 cache_resource->action = 1113 mlx5_glue->dr_create_flow_action_dest_vport 1114 (priv->sh->fdb_domain, resource->port_id); 1115 if (!cache_resource->action) { 1116 rte_free(cache_resource); 1117 return rte_flow_error_set(error, ENOMEM, 1118 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1119 NULL, "cannot create action"); 1120 } 1121 rte_atomic32_init(&cache_resource->refcnt); 1122 rte_atomic32_inc(&cache_resource->refcnt); 1123 LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next); 1124 dev_flow->dv.port_id_action = cache_resource; 1125 DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++", 1126 (void *)cache_resource, 1127 rte_atomic32_read(&cache_resource->refcnt)); 1128 return 0; 1129 } 1130 1131 /** 1132 * Get the size of specific rte_flow_item_type 1133 * 1134 * @param[in] item_type 1135 * Tested rte_flow_item_type. 1136 * 1137 * @return 1138 * sizeof struct item_type, 0 if void or irrelevant. 1139 */ 1140 static size_t 1141 flow_dv_get_item_len(const enum rte_flow_item_type item_type) 1142 { 1143 size_t retval; 1144 1145 switch (item_type) { 1146 case RTE_FLOW_ITEM_TYPE_ETH: 1147 retval = sizeof(struct rte_flow_item_eth); 1148 break; 1149 case RTE_FLOW_ITEM_TYPE_VLAN: 1150 retval = sizeof(struct rte_flow_item_vlan); 1151 break; 1152 case RTE_FLOW_ITEM_TYPE_IPV4: 1153 retval = sizeof(struct rte_flow_item_ipv4); 1154 break; 1155 case RTE_FLOW_ITEM_TYPE_IPV6: 1156 retval = sizeof(struct rte_flow_item_ipv6); 1157 break; 1158 case RTE_FLOW_ITEM_TYPE_UDP: 1159 retval = sizeof(struct rte_flow_item_udp); 1160 break; 1161 case RTE_FLOW_ITEM_TYPE_TCP: 1162 retval = sizeof(struct rte_flow_item_tcp); 1163 break; 1164 case RTE_FLOW_ITEM_TYPE_VXLAN: 1165 retval = sizeof(struct rte_flow_item_vxlan); 1166 break; 1167 case RTE_FLOW_ITEM_TYPE_GRE: 1168 retval = sizeof(struct rte_flow_item_gre); 1169 break; 1170 case RTE_FLOW_ITEM_TYPE_NVGRE: 1171 retval = sizeof(struct rte_flow_item_nvgre); 1172 break; 1173 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 1174 retval = sizeof(struct rte_flow_item_vxlan_gpe); 1175 break; 1176 case RTE_FLOW_ITEM_TYPE_MPLS: 1177 retval = sizeof(struct rte_flow_item_mpls); 1178 break; 1179 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */ 1180 default: 1181 retval = 0; 1182 break; 1183 } 1184 return retval; 1185 } 1186 1187 #define MLX5_ENCAP_IPV4_VERSION 0x40 1188 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05 1189 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40 1190 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000 1191 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff 1192 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000 1193 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04 1194 1195 /** 1196 * Convert the encap action data from list of rte_flow_item to raw buffer 1197 * 1198 * @param[in] items 1199 * Pointer to rte_flow_item objects list. 1200 * @param[out] buf 1201 * Pointer to the output buffer. 1202 * @param[out] size 1203 * Pointer to the output buffer size. 1204 * @param[out] error 1205 * Pointer to the error structure. 1206 * 1207 * @return 1208 * 0 on success, a negative errno value otherwise and rte_errno is set. 1209 */ 1210 static int 1211 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf, 1212 size_t *size, struct rte_flow_error *error) 1213 { 1214 struct rte_ether_hdr *eth = NULL; 1215 struct rte_vlan_hdr *vlan = NULL; 1216 struct rte_ipv4_hdr *ipv4 = NULL; 1217 struct rte_ipv6_hdr *ipv6 = NULL; 1218 struct rte_udp_hdr *udp = NULL; 1219 struct rte_vxlan_hdr *vxlan = NULL; 1220 struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL; 1221 struct rte_gre_hdr *gre = NULL; 1222 size_t len; 1223 size_t temp_size = 0; 1224 1225 if (!items) 1226 return rte_flow_error_set(error, EINVAL, 1227 RTE_FLOW_ERROR_TYPE_ACTION, 1228 NULL, "invalid empty data"); 1229 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 1230 len = flow_dv_get_item_len(items->type); 1231 if (len + temp_size > MLX5_ENCAP_MAX_LEN) 1232 return rte_flow_error_set(error, EINVAL, 1233 RTE_FLOW_ERROR_TYPE_ACTION, 1234 (void *)items->type, 1235 "items total size is too big" 1236 " for encap action"); 1237 rte_memcpy((void *)&buf[temp_size], items->spec, len); 1238 switch (items->type) { 1239 case RTE_FLOW_ITEM_TYPE_ETH: 1240 eth = (struct rte_ether_hdr *)&buf[temp_size]; 1241 break; 1242 case RTE_FLOW_ITEM_TYPE_VLAN: 1243 vlan = (struct rte_vlan_hdr *)&buf[temp_size]; 1244 if (!eth) 1245 return rte_flow_error_set(error, EINVAL, 1246 RTE_FLOW_ERROR_TYPE_ACTION, 1247 (void *)items->type, 1248 "eth header not found"); 1249 if (!eth->ether_type) 1250 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN); 1251 break; 1252 case RTE_FLOW_ITEM_TYPE_IPV4: 1253 ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size]; 1254 if (!vlan && !eth) 1255 return rte_flow_error_set(error, EINVAL, 1256 RTE_FLOW_ERROR_TYPE_ACTION, 1257 (void *)items->type, 1258 "neither eth nor vlan" 1259 " header found"); 1260 if (vlan && !vlan->eth_proto) 1261 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPv4); 1262 else if (eth && !eth->ether_type) 1263 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPv4); 1264 if (!ipv4->version_ihl) 1265 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION | 1266 MLX5_ENCAP_IPV4_IHL_MIN; 1267 if (!ipv4->time_to_live) 1268 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF; 1269 break; 1270 case RTE_FLOW_ITEM_TYPE_IPV6: 1271 ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size]; 1272 if (!vlan && !eth) 1273 return rte_flow_error_set(error, EINVAL, 1274 RTE_FLOW_ERROR_TYPE_ACTION, 1275 (void *)items->type, 1276 "neither eth nor vlan" 1277 " header found"); 1278 if (vlan && !vlan->eth_proto) 1279 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPv6); 1280 else if (eth && !eth->ether_type) 1281 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPv6); 1282 if (!ipv6->vtc_flow) 1283 ipv6->vtc_flow = 1284 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW); 1285 if (!ipv6->hop_limits) 1286 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT; 1287 break; 1288 case RTE_FLOW_ITEM_TYPE_UDP: 1289 udp = (struct rte_udp_hdr *)&buf[temp_size]; 1290 if (!ipv4 && !ipv6) 1291 return rte_flow_error_set(error, EINVAL, 1292 RTE_FLOW_ERROR_TYPE_ACTION, 1293 (void *)items->type, 1294 "ip header not found"); 1295 if (ipv4 && !ipv4->next_proto_id) 1296 ipv4->next_proto_id = IPPROTO_UDP; 1297 else if (ipv6 && !ipv6->proto) 1298 ipv6->proto = IPPROTO_UDP; 1299 break; 1300 case RTE_FLOW_ITEM_TYPE_VXLAN: 1301 vxlan = (struct rte_vxlan_hdr *)&buf[temp_size]; 1302 if (!udp) 1303 return rte_flow_error_set(error, EINVAL, 1304 RTE_FLOW_ERROR_TYPE_ACTION, 1305 (void *)items->type, 1306 "udp header not found"); 1307 if (!udp->dst_port) 1308 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN); 1309 if (!vxlan->vx_flags) 1310 vxlan->vx_flags = 1311 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS); 1312 break; 1313 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 1314 vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size]; 1315 if (!udp) 1316 return rte_flow_error_set(error, EINVAL, 1317 RTE_FLOW_ERROR_TYPE_ACTION, 1318 (void *)items->type, 1319 "udp header not found"); 1320 if (!vxlan_gpe->proto) 1321 return rte_flow_error_set(error, EINVAL, 1322 RTE_FLOW_ERROR_TYPE_ACTION, 1323 (void *)items->type, 1324 "next protocol not found"); 1325 if (!udp->dst_port) 1326 udp->dst_port = 1327 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE); 1328 if (!vxlan_gpe->vx_flags) 1329 vxlan_gpe->vx_flags = 1330 MLX5_ENCAP_VXLAN_GPE_FLAGS; 1331 break; 1332 case RTE_FLOW_ITEM_TYPE_GRE: 1333 case RTE_FLOW_ITEM_TYPE_NVGRE: 1334 gre = (struct rte_gre_hdr *)&buf[temp_size]; 1335 if (!gre->proto) 1336 return rte_flow_error_set(error, EINVAL, 1337 RTE_FLOW_ERROR_TYPE_ACTION, 1338 (void *)items->type, 1339 "next protocol not found"); 1340 if (!ipv4 && !ipv6) 1341 return rte_flow_error_set(error, EINVAL, 1342 RTE_FLOW_ERROR_TYPE_ACTION, 1343 (void *)items->type, 1344 "ip header not found"); 1345 if (ipv4 && !ipv4->next_proto_id) 1346 ipv4->next_proto_id = IPPROTO_GRE; 1347 else if (ipv6 && !ipv6->proto) 1348 ipv6->proto = IPPROTO_GRE; 1349 break; 1350 case RTE_FLOW_ITEM_TYPE_VOID: 1351 break; 1352 default: 1353 return rte_flow_error_set(error, EINVAL, 1354 RTE_FLOW_ERROR_TYPE_ACTION, 1355 (void *)items->type, 1356 "unsupported item type"); 1357 break; 1358 } 1359 temp_size += len; 1360 } 1361 *size = temp_size; 1362 return 0; 1363 } 1364 1365 /** 1366 * Convert L2 encap action to DV specification. 1367 * 1368 * @param[in] dev 1369 * Pointer to rte_eth_dev structure. 1370 * @param[in] action 1371 * Pointer to action structure. 1372 * @param[in, out] dev_flow 1373 * Pointer to the mlx5_flow. 1374 * @param[in] transfer 1375 * Mark if the flow is E-Switch flow. 1376 * @param[out] error 1377 * Pointer to the error structure. 1378 * 1379 * @return 1380 * 0 on success, a negative errno value otherwise and rte_errno is set. 1381 */ 1382 static int 1383 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev, 1384 const struct rte_flow_action *action, 1385 struct mlx5_flow *dev_flow, 1386 uint8_t transfer, 1387 struct rte_flow_error *error) 1388 { 1389 const struct rte_flow_item *encap_data; 1390 const struct rte_flow_action_raw_encap *raw_encap_data; 1391 struct mlx5_flow_dv_encap_decap_resource res = { 1392 .reformat_type = 1393 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL, 1394 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB : 1395 MLX5DV_FLOW_TABLE_TYPE_NIC_TX, 1396 }; 1397 1398 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { 1399 raw_encap_data = 1400 (const struct rte_flow_action_raw_encap *)action->conf; 1401 res.size = raw_encap_data->size; 1402 memcpy(res.buf, raw_encap_data->data, res.size); 1403 } else { 1404 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) 1405 encap_data = 1406 ((const struct rte_flow_action_vxlan_encap *) 1407 action->conf)->definition; 1408 else 1409 encap_data = 1410 ((const struct rte_flow_action_nvgre_encap *) 1411 action->conf)->definition; 1412 if (flow_dv_convert_encap_data(encap_data, res.buf, 1413 &res.size, error)) 1414 return -rte_errno; 1415 } 1416 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error)) 1417 return rte_flow_error_set(error, EINVAL, 1418 RTE_FLOW_ERROR_TYPE_ACTION, 1419 NULL, "can't create L2 encap action"); 1420 return 0; 1421 } 1422 1423 /** 1424 * Convert L2 decap action to DV specification. 1425 * 1426 * @param[in] dev 1427 * Pointer to rte_eth_dev structure. 1428 * @param[in, out] dev_flow 1429 * Pointer to the mlx5_flow. 1430 * @param[in] transfer 1431 * Mark if the flow is E-Switch flow. 1432 * @param[out] error 1433 * Pointer to the error structure. 1434 * 1435 * @return 1436 * 0 on success, a negative errno value otherwise and rte_errno is set. 1437 */ 1438 static int 1439 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev, 1440 struct mlx5_flow *dev_flow, 1441 uint8_t transfer, 1442 struct rte_flow_error *error) 1443 { 1444 struct mlx5_flow_dv_encap_decap_resource res = { 1445 .size = 0, 1446 .reformat_type = 1447 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2, 1448 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB : 1449 MLX5DV_FLOW_TABLE_TYPE_NIC_RX, 1450 }; 1451 1452 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error)) 1453 return rte_flow_error_set(error, EINVAL, 1454 RTE_FLOW_ERROR_TYPE_ACTION, 1455 NULL, "can't create L2 decap action"); 1456 return 0; 1457 } 1458 1459 /** 1460 * Convert raw decap/encap (L3 tunnel) action to DV specification. 1461 * 1462 * @param[in] dev 1463 * Pointer to rte_eth_dev structure. 1464 * @param[in] action 1465 * Pointer to action structure. 1466 * @param[in, out] dev_flow 1467 * Pointer to the mlx5_flow. 1468 * @param[in] attr 1469 * Pointer to the flow attributes. 1470 * @param[out] error 1471 * Pointer to the error structure. 1472 * 1473 * @return 1474 * 0 on success, a negative errno value otherwise and rte_errno is set. 1475 */ 1476 static int 1477 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev, 1478 const struct rte_flow_action *action, 1479 struct mlx5_flow *dev_flow, 1480 const struct rte_flow_attr *attr, 1481 struct rte_flow_error *error) 1482 { 1483 const struct rte_flow_action_raw_encap *encap_data; 1484 struct mlx5_flow_dv_encap_decap_resource res; 1485 1486 encap_data = (const struct rte_flow_action_raw_encap *)action->conf; 1487 res.size = encap_data->size; 1488 memcpy(res.buf, encap_data->data, res.size); 1489 res.reformat_type = attr->egress ? 1490 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL : 1491 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2; 1492 if (attr->transfer) 1493 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; 1494 else 1495 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : 1496 MLX5DV_FLOW_TABLE_TYPE_NIC_RX; 1497 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error)) 1498 return rte_flow_error_set(error, EINVAL, 1499 RTE_FLOW_ERROR_TYPE_ACTION, 1500 NULL, "can't create encap action"); 1501 return 0; 1502 } 1503 1504 /** 1505 * Validate the modify-header actions. 1506 * 1507 * @param[in] action_flags 1508 * Holds the actions detected until now. 1509 * @param[in] action 1510 * Pointer to the modify action. 1511 * @param[out] error 1512 * Pointer to error structure. 1513 * 1514 * @return 1515 * 0 on success, a negative errno value otherwise and rte_errno is set. 1516 */ 1517 static int 1518 flow_dv_validate_action_modify_hdr(const uint64_t action_flags, 1519 const struct rte_flow_action *action, 1520 struct rte_flow_error *error) 1521 { 1522 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf) 1523 return rte_flow_error_set(error, EINVAL, 1524 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1525 NULL, "action configuration not set"); 1526 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS) 1527 return rte_flow_error_set(error, EINVAL, 1528 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1529 "can't have encap action before" 1530 " modify action"); 1531 return 0; 1532 } 1533 1534 /** 1535 * Validate the modify-header MAC address actions. 1536 * 1537 * @param[in] action_flags 1538 * Holds the actions detected until now. 1539 * @param[in] action 1540 * Pointer to the modify action. 1541 * @param[in] item_flags 1542 * Holds the items detected. 1543 * @param[out] error 1544 * Pointer to error structure. 1545 * 1546 * @return 1547 * 0 on success, a negative errno value otherwise and rte_errno is set. 1548 */ 1549 static int 1550 flow_dv_validate_action_modify_mac(const uint64_t action_flags, 1551 const struct rte_flow_action *action, 1552 const uint64_t item_flags, 1553 struct rte_flow_error *error) 1554 { 1555 int ret = 0; 1556 1557 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 1558 if (!ret) { 1559 if (!(item_flags & MLX5_FLOW_LAYER_L2)) 1560 return rte_flow_error_set(error, EINVAL, 1561 RTE_FLOW_ERROR_TYPE_ACTION, 1562 NULL, 1563 "no L2 item in pattern"); 1564 } 1565 return ret; 1566 } 1567 1568 /** 1569 * Validate the modify-header IPv4 address actions. 1570 * 1571 * @param[in] action_flags 1572 * Holds the actions detected until now. 1573 * @param[in] action 1574 * Pointer to the modify action. 1575 * @param[in] item_flags 1576 * Holds the items detected. 1577 * @param[out] error 1578 * Pointer to error structure. 1579 * 1580 * @return 1581 * 0 on success, a negative errno value otherwise and rte_errno is set. 1582 */ 1583 static int 1584 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags, 1585 const struct rte_flow_action *action, 1586 const uint64_t item_flags, 1587 struct rte_flow_error *error) 1588 { 1589 int ret = 0; 1590 1591 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 1592 if (!ret) { 1593 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4)) 1594 return rte_flow_error_set(error, EINVAL, 1595 RTE_FLOW_ERROR_TYPE_ACTION, 1596 NULL, 1597 "no ipv4 item in pattern"); 1598 } 1599 return ret; 1600 } 1601 1602 /** 1603 * Validate the modify-header IPv6 address actions. 1604 * 1605 * @param[in] action_flags 1606 * Holds the actions detected until now. 1607 * @param[in] action 1608 * Pointer to the modify action. 1609 * @param[in] item_flags 1610 * Holds the items detected. 1611 * @param[out] error 1612 * Pointer to error structure. 1613 * 1614 * @return 1615 * 0 on success, a negative errno value otherwise and rte_errno is set. 1616 */ 1617 static int 1618 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags, 1619 const struct rte_flow_action *action, 1620 const uint64_t item_flags, 1621 struct rte_flow_error *error) 1622 { 1623 int ret = 0; 1624 1625 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 1626 if (!ret) { 1627 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6)) 1628 return rte_flow_error_set(error, EINVAL, 1629 RTE_FLOW_ERROR_TYPE_ACTION, 1630 NULL, 1631 "no ipv6 item in pattern"); 1632 } 1633 return ret; 1634 } 1635 1636 /** 1637 * Validate the modify-header TP actions. 1638 * 1639 * @param[in] action_flags 1640 * Holds the actions detected until now. 1641 * @param[in] action 1642 * Pointer to the modify action. 1643 * @param[in] item_flags 1644 * Holds the items detected. 1645 * @param[out] error 1646 * Pointer to error structure. 1647 * 1648 * @return 1649 * 0 on success, a negative errno value otherwise and rte_errno is set. 1650 */ 1651 static int 1652 flow_dv_validate_action_modify_tp(const uint64_t action_flags, 1653 const struct rte_flow_action *action, 1654 const uint64_t item_flags, 1655 struct rte_flow_error *error) 1656 { 1657 int ret = 0; 1658 1659 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 1660 if (!ret) { 1661 if (!(item_flags & MLX5_FLOW_LAYER_L4)) 1662 return rte_flow_error_set(error, EINVAL, 1663 RTE_FLOW_ERROR_TYPE_ACTION, 1664 NULL, "no transport layer " 1665 "in pattern"); 1666 } 1667 return ret; 1668 } 1669 1670 /** 1671 * Validate the modify-header TTL actions. 1672 * 1673 * @param[in] action_flags 1674 * Holds the actions detected until now. 1675 * @param[in] action 1676 * Pointer to the modify action. 1677 * @param[in] item_flags 1678 * Holds the items detected. 1679 * @param[out] error 1680 * Pointer to error structure. 1681 * 1682 * @return 1683 * 0 on success, a negative errno value otherwise and rte_errno is set. 1684 */ 1685 static int 1686 flow_dv_validate_action_modify_ttl(const uint64_t action_flags, 1687 const struct rte_flow_action *action, 1688 const uint64_t item_flags, 1689 struct rte_flow_error *error) 1690 { 1691 int ret = 0; 1692 1693 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 1694 if (!ret) { 1695 if (!(item_flags & MLX5_FLOW_LAYER_L3)) 1696 return rte_flow_error_set(error, EINVAL, 1697 RTE_FLOW_ERROR_TYPE_ACTION, 1698 NULL, 1699 "no IP protocol in pattern"); 1700 } 1701 return ret; 1702 } 1703 1704 /** 1705 * Validate jump action. 1706 * 1707 * @param[in] action 1708 * Pointer to the modify action. 1709 * @param[in] group 1710 * The group of the current flow. 1711 * @param[out] error 1712 * Pointer to error structure. 1713 * 1714 * @return 1715 * 0 on success, a negative errno value otherwise and rte_errno is set. 1716 */ 1717 static int 1718 flow_dv_validate_action_jump(const struct rte_flow_action *action, 1719 uint32_t group, 1720 struct rte_flow_error *error) 1721 { 1722 if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf) 1723 return rte_flow_error_set(error, EINVAL, 1724 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1725 NULL, "action configuration not set"); 1726 if (group >= ((const struct rte_flow_action_jump *)action->conf)->group) 1727 return rte_flow_error_set(error, EINVAL, 1728 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1729 "target group must be higher then" 1730 " the current flow group"); 1731 return 0; 1732 } 1733 1734 /* 1735 * Validate the port_id action. 1736 * 1737 * @param[in] dev 1738 * Pointer to rte_eth_dev structure. 1739 * @param[in] action_flags 1740 * Bit-fields that holds the actions detected until now. 1741 * @param[in] action 1742 * Port_id RTE action structure. 1743 * @param[in] attr 1744 * Attributes of flow that includes this action. 1745 * @param[out] error 1746 * Pointer to error structure. 1747 * 1748 * @return 1749 * 0 on success, a negative errno value otherwise and rte_errno is set. 1750 */ 1751 static int 1752 flow_dv_validate_action_port_id(struct rte_eth_dev *dev, 1753 uint64_t action_flags, 1754 const struct rte_flow_action *action, 1755 const struct rte_flow_attr *attr, 1756 struct rte_flow_error *error) 1757 { 1758 const struct rte_flow_action_port_id *port_id; 1759 uint16_t port; 1760 uint16_t esw_domain_id; 1761 uint16_t act_port_domain_id; 1762 int ret; 1763 1764 if (!attr->transfer) 1765 return rte_flow_error_set(error, ENOTSUP, 1766 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1767 NULL, 1768 "port id action is valid in transfer" 1769 " mode only"); 1770 if (!action || !action->conf) 1771 return rte_flow_error_set(error, ENOTSUP, 1772 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1773 NULL, 1774 "port id action parameters must be" 1775 " specified"); 1776 if (action_flags & (MLX5_FLOW_FATE_ACTIONS | 1777 MLX5_FLOW_FATE_ESWITCH_ACTIONS)) 1778 return rte_flow_error_set(error, EINVAL, 1779 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1780 "can have only one fate actions in" 1781 " a flow"); 1782 ret = mlx5_port_to_eswitch_info(dev->data->port_id, 1783 &esw_domain_id, NULL); 1784 if (ret < 0) 1785 return rte_flow_error_set(error, -ret, 1786 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1787 NULL, 1788 "failed to obtain E-Switch info"); 1789 port_id = action->conf; 1790 port = port_id->original ? dev->data->port_id : port_id->id; 1791 ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL); 1792 if (ret) 1793 return rte_flow_error_set 1794 (error, -ret, 1795 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id, 1796 "failed to obtain E-Switch port id for port"); 1797 if (act_port_domain_id != esw_domain_id) 1798 return rte_flow_error_set 1799 (error, -ret, 1800 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 1801 "port does not belong to" 1802 " E-Switch being configured"); 1803 return 0; 1804 } 1805 1806 /** 1807 * Find existing modify-header resource or create and register a new one. 1808 * 1809 * @param dev[in, out] 1810 * Pointer to rte_eth_dev structure. 1811 * @param[in, out] resource 1812 * Pointer to modify-header resource. 1813 * @parm[in, out] dev_flow 1814 * Pointer to the dev_flow. 1815 * @param[out] error 1816 * pointer to error structure. 1817 * 1818 * @return 1819 * 0 on success otherwise -errno and errno is set. 1820 */ 1821 static int 1822 flow_dv_modify_hdr_resource_register 1823 (struct rte_eth_dev *dev, 1824 struct mlx5_flow_dv_modify_hdr_resource *resource, 1825 struct mlx5_flow *dev_flow, 1826 struct rte_flow_error *error) 1827 { 1828 struct mlx5_priv *priv = dev->data->dev_private; 1829 struct mlx5_ibv_shared *sh = priv->sh; 1830 struct mlx5_flow_dv_modify_hdr_resource *cache_resource; 1831 struct mlx5dv_dr_domain *ns; 1832 1833 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) 1834 ns = sh->fdb_domain; 1835 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX) 1836 ns = sh->tx_domain; 1837 else 1838 ns = sh->rx_domain; 1839 resource->flags = 1840 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; 1841 /* Lookup a matching resource from cache. */ 1842 LIST_FOREACH(cache_resource, &sh->modify_cmds, next) { 1843 if (resource->ft_type == cache_resource->ft_type && 1844 resource->actions_num == cache_resource->actions_num && 1845 resource->flags == cache_resource->flags && 1846 !memcmp((const void *)resource->actions, 1847 (const void *)cache_resource->actions, 1848 (resource->actions_num * 1849 sizeof(resource->actions[0])))) { 1850 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++", 1851 (void *)cache_resource, 1852 rte_atomic32_read(&cache_resource->refcnt)); 1853 rte_atomic32_inc(&cache_resource->refcnt); 1854 dev_flow->dv.modify_hdr = cache_resource; 1855 return 0; 1856 } 1857 } 1858 /* Register new modify-header resource. */ 1859 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0); 1860 if (!cache_resource) 1861 return rte_flow_error_set(error, ENOMEM, 1862 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1863 "cannot allocate resource memory"); 1864 *cache_resource = *resource; 1865 cache_resource->verbs_action = 1866 mlx5_glue->dv_create_flow_action_modify_header 1867 (sh->ctx, cache_resource->ft_type, 1868 ns, cache_resource->flags, 1869 cache_resource->actions_num * 1870 sizeof(cache_resource->actions[0]), 1871 (uint64_t *)cache_resource->actions); 1872 if (!cache_resource->verbs_action) { 1873 rte_free(cache_resource); 1874 return rte_flow_error_set(error, ENOMEM, 1875 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1876 NULL, "cannot create action"); 1877 } 1878 rte_atomic32_init(&cache_resource->refcnt); 1879 rte_atomic32_inc(&cache_resource->refcnt); 1880 LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next); 1881 dev_flow->dv.modify_hdr = cache_resource; 1882 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++", 1883 (void *)cache_resource, 1884 rte_atomic32_read(&cache_resource->refcnt)); 1885 return 0; 1886 } 1887 1888 /** 1889 * Get or create a flow counter. 1890 * 1891 * @param[in] dev 1892 * Pointer to the Ethernet device structure. 1893 * @param[in] shared 1894 * Indicate if this counter is shared with other flows. 1895 * @param[in] id 1896 * Counter identifier. 1897 * 1898 * @return 1899 * pointer to flow counter on success, NULL otherwise and rte_errno is set. 1900 */ 1901 static struct mlx5_flow_counter * 1902 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id) 1903 { 1904 struct mlx5_priv *priv = dev->data->dev_private; 1905 struct mlx5_flow_counter *cnt = NULL; 1906 struct mlx5_devx_counter_set *dcs = NULL; 1907 int ret; 1908 1909 if (!priv->config.devx) { 1910 ret = -ENOTSUP; 1911 goto error_exit; 1912 } 1913 if (shared) { 1914 LIST_FOREACH(cnt, &priv->flow_counters, next) { 1915 if (cnt->shared && cnt->id == id) { 1916 cnt->ref_cnt++; 1917 return cnt; 1918 } 1919 } 1920 } 1921 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0); 1922 dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0); 1923 if (!dcs || !cnt) { 1924 ret = -ENOMEM; 1925 goto error_exit; 1926 } 1927 ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs); 1928 if (ret) 1929 goto error_exit; 1930 struct mlx5_flow_counter tmpl = { 1931 .shared = shared, 1932 .ref_cnt = 1, 1933 .id = id, 1934 .dcs = dcs, 1935 }; 1936 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0); 1937 if (!tmpl.action) { 1938 ret = errno; 1939 goto error_exit; 1940 } 1941 *cnt = tmpl; 1942 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next); 1943 return cnt; 1944 error_exit: 1945 rte_free(cnt); 1946 rte_free(dcs); 1947 rte_errno = -ret; 1948 return NULL; 1949 } 1950 1951 /** 1952 * Release a flow counter. 1953 * 1954 * @param[in] counter 1955 * Pointer to the counter handler. 1956 */ 1957 static void 1958 flow_dv_counter_release(struct mlx5_flow_counter *counter) 1959 { 1960 int ret; 1961 1962 if (!counter) 1963 return; 1964 if (--counter->ref_cnt == 0) { 1965 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj); 1966 if (ret) 1967 DRV_LOG(ERR, "Failed to free devx counters, %d", ret); 1968 LIST_REMOVE(counter, next); 1969 rte_free(counter->dcs); 1970 rte_free(counter); 1971 } 1972 } 1973 1974 /** 1975 * Verify the @p attributes will be correctly understood by the NIC and store 1976 * them in the @p flow if everything is correct. 1977 * 1978 * @param[in] dev 1979 * Pointer to dev struct. 1980 * @param[in] attributes 1981 * Pointer to flow attributes 1982 * @param[out] error 1983 * Pointer to error structure. 1984 * 1985 * @return 1986 * 0 on success, a negative errno value otherwise and rte_errno is set. 1987 */ 1988 static int 1989 flow_dv_validate_attributes(struct rte_eth_dev *dev, 1990 const struct rte_flow_attr *attributes, 1991 struct rte_flow_error *error) 1992 { 1993 struct mlx5_priv *priv = dev->data->dev_private; 1994 uint32_t priority_max = priv->config.flow_prio - 1; 1995 1996 #ifndef HAVE_MLX5DV_DR 1997 if (attributes->group) 1998 return rte_flow_error_set(error, ENOTSUP, 1999 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 2000 NULL, 2001 "groups is not supported"); 2002 #endif 2003 if (attributes->priority != MLX5_FLOW_PRIO_RSVD && 2004 attributes->priority >= priority_max) 2005 return rte_flow_error_set(error, ENOTSUP, 2006 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 2007 NULL, 2008 "priority out of range"); 2009 if (attributes->transfer) { 2010 if (!priv->config.dv_esw_en) 2011 return rte_flow_error_set 2012 (error, ENOTSUP, 2013 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2014 "E-Switch dr is not supported"); 2015 if (!(priv->representor || priv->master)) 2016 return rte_flow_error_set 2017 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2018 NULL, "E-Switch configurationd can only be" 2019 " done by a master or a representor device"); 2020 if (attributes->egress) 2021 return rte_flow_error_set 2022 (error, ENOTSUP, 2023 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes, 2024 "egress is not supported"); 2025 if (attributes->group >= MLX5_MAX_TABLES_FDB) 2026 return rte_flow_error_set 2027 (error, EINVAL, 2028 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, 2029 NULL, "group must be smaller than " 2030 RTE_STR(MLX5_MAX_FDB_TABLES)); 2031 } 2032 if (!(attributes->egress ^ attributes->ingress)) 2033 return rte_flow_error_set(error, ENOTSUP, 2034 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 2035 "must specify exactly one of " 2036 "ingress or egress"); 2037 return 0; 2038 } 2039 2040 /** 2041 * Internal validation function. For validating both actions and items. 2042 * 2043 * @param[in] dev 2044 * Pointer to the rte_eth_dev structure. 2045 * @param[in] attr 2046 * Pointer to the flow attributes. 2047 * @param[in] items 2048 * Pointer to the list of items. 2049 * @param[in] actions 2050 * Pointer to the list of actions. 2051 * @param[out] error 2052 * Pointer to the error structure. 2053 * 2054 * @return 2055 * 0 on success, a negative errno value otherwise and rte_errno is set. 2056 */ 2057 static int 2058 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, 2059 const struct rte_flow_item items[], 2060 const struct rte_flow_action actions[], 2061 struct rte_flow_error *error) 2062 { 2063 int ret; 2064 uint64_t action_flags = 0; 2065 uint64_t item_flags = 0; 2066 uint64_t last_item = 0; 2067 uint8_t next_protocol = 0xff; 2068 int actions_n = 0; 2069 struct rte_flow_item_tcp nic_tcp_mask = { 2070 .hdr = { 2071 .tcp_flags = 0xFF, 2072 .src_port = RTE_BE16(UINT16_MAX), 2073 .dst_port = RTE_BE16(UINT16_MAX), 2074 } 2075 }; 2076 2077 if (items == NULL) 2078 return -1; 2079 ret = flow_dv_validate_attributes(dev, attr, error); 2080 if (ret < 0) 2081 return ret; 2082 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 2083 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 2084 switch (items->type) { 2085 case RTE_FLOW_ITEM_TYPE_VOID: 2086 break; 2087 case RTE_FLOW_ITEM_TYPE_PORT_ID: 2088 ret = flow_dv_validate_item_port_id 2089 (dev, items, attr, item_flags, error); 2090 if (ret < 0) 2091 return ret; 2092 last_item |= MLX5_FLOW_ITEM_PORT_ID; 2093 break; 2094 case RTE_FLOW_ITEM_TYPE_ETH: 2095 ret = mlx5_flow_validate_item_eth(items, item_flags, 2096 error); 2097 if (ret < 0) 2098 return ret; 2099 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 2100 MLX5_FLOW_LAYER_OUTER_L2; 2101 break; 2102 case RTE_FLOW_ITEM_TYPE_VLAN: 2103 ret = mlx5_flow_validate_item_vlan(items, item_flags, 2104 error); 2105 if (ret < 0) 2106 return ret; 2107 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 2108 MLX5_FLOW_LAYER_OUTER_VLAN; 2109 break; 2110 case RTE_FLOW_ITEM_TYPE_IPV4: 2111 ret = mlx5_flow_validate_item_ipv4(items, item_flags, 2112 NULL, error); 2113 if (ret < 0) 2114 return ret; 2115 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 2116 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 2117 if (items->mask != NULL && 2118 ((const struct rte_flow_item_ipv4 *) 2119 items->mask)->hdr.next_proto_id) { 2120 next_protocol = 2121 ((const struct rte_flow_item_ipv4 *) 2122 (items->spec))->hdr.next_proto_id; 2123 next_protocol &= 2124 ((const struct rte_flow_item_ipv4 *) 2125 (items->mask))->hdr.next_proto_id; 2126 } else { 2127 /* Reset for inner layer. */ 2128 next_protocol = 0xff; 2129 } 2130 break; 2131 case RTE_FLOW_ITEM_TYPE_IPV6: 2132 ret = mlx5_flow_validate_item_ipv6(items, item_flags, 2133 NULL, error); 2134 if (ret < 0) 2135 return ret; 2136 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 2137 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 2138 if (items->mask != NULL && 2139 ((const struct rte_flow_item_ipv6 *) 2140 items->mask)->hdr.proto) { 2141 next_protocol = 2142 ((const struct rte_flow_item_ipv6 *) 2143 items->spec)->hdr.proto; 2144 next_protocol &= 2145 ((const struct rte_flow_item_ipv6 *) 2146 items->mask)->hdr.proto; 2147 } else { 2148 /* Reset for inner layer. */ 2149 next_protocol = 0xff; 2150 } 2151 break; 2152 case RTE_FLOW_ITEM_TYPE_TCP: 2153 ret = mlx5_flow_validate_item_tcp 2154 (items, item_flags, 2155 next_protocol, 2156 &nic_tcp_mask, 2157 error); 2158 if (ret < 0) 2159 return ret; 2160 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : 2161 MLX5_FLOW_LAYER_OUTER_L4_TCP; 2162 break; 2163 case RTE_FLOW_ITEM_TYPE_UDP: 2164 ret = mlx5_flow_validate_item_udp(items, item_flags, 2165 next_protocol, 2166 error); 2167 if (ret < 0) 2168 return ret; 2169 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : 2170 MLX5_FLOW_LAYER_OUTER_L4_UDP; 2171 break; 2172 case RTE_FLOW_ITEM_TYPE_GRE: 2173 case RTE_FLOW_ITEM_TYPE_NVGRE: 2174 ret = mlx5_flow_validate_item_gre(items, item_flags, 2175 next_protocol, error); 2176 if (ret < 0) 2177 return ret; 2178 last_item = MLX5_FLOW_LAYER_GRE; 2179 break; 2180 case RTE_FLOW_ITEM_TYPE_VXLAN: 2181 ret = mlx5_flow_validate_item_vxlan(items, item_flags, 2182 error); 2183 if (ret < 0) 2184 return ret; 2185 last_item = MLX5_FLOW_LAYER_VXLAN; 2186 break; 2187 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 2188 ret = mlx5_flow_validate_item_vxlan_gpe(items, 2189 item_flags, dev, 2190 error); 2191 if (ret < 0) 2192 return ret; 2193 last_item = MLX5_FLOW_LAYER_VXLAN_GPE; 2194 break; 2195 case RTE_FLOW_ITEM_TYPE_MPLS: 2196 ret = mlx5_flow_validate_item_mpls(dev, items, 2197 item_flags, 2198 last_item, error); 2199 if (ret < 0) 2200 return ret; 2201 last_item = MLX5_FLOW_LAYER_MPLS; 2202 break; 2203 case RTE_FLOW_ITEM_TYPE_META: 2204 ret = flow_dv_validate_item_meta(dev, items, attr, 2205 error); 2206 if (ret < 0) 2207 return ret; 2208 last_item = MLX5_FLOW_ITEM_METADATA; 2209 break; 2210 default: 2211 return rte_flow_error_set(error, ENOTSUP, 2212 RTE_FLOW_ERROR_TYPE_ITEM, 2213 NULL, "item not supported"); 2214 } 2215 item_flags |= last_item; 2216 } 2217 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2218 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS) 2219 return rte_flow_error_set(error, ENOTSUP, 2220 RTE_FLOW_ERROR_TYPE_ACTION, 2221 actions, "too many actions"); 2222 switch (actions->type) { 2223 case RTE_FLOW_ACTION_TYPE_VOID: 2224 break; 2225 case RTE_FLOW_ACTION_TYPE_PORT_ID: 2226 ret = flow_dv_validate_action_port_id(dev, 2227 action_flags, 2228 actions, 2229 attr, 2230 error); 2231 if (ret) 2232 return ret; 2233 action_flags |= MLX5_FLOW_ACTION_PORT_ID; 2234 ++actions_n; 2235 break; 2236 case RTE_FLOW_ACTION_TYPE_FLAG: 2237 ret = mlx5_flow_validate_action_flag(action_flags, 2238 attr, error); 2239 if (ret < 0) 2240 return ret; 2241 action_flags |= MLX5_FLOW_ACTION_FLAG; 2242 ++actions_n; 2243 break; 2244 case RTE_FLOW_ACTION_TYPE_MARK: 2245 ret = mlx5_flow_validate_action_mark(actions, 2246 action_flags, 2247 attr, error); 2248 if (ret < 0) 2249 return ret; 2250 action_flags |= MLX5_FLOW_ACTION_MARK; 2251 ++actions_n; 2252 break; 2253 case RTE_FLOW_ACTION_TYPE_DROP: 2254 ret = mlx5_flow_validate_action_drop(action_flags, 2255 attr, error); 2256 if (ret < 0) 2257 return ret; 2258 action_flags |= MLX5_FLOW_ACTION_DROP; 2259 ++actions_n; 2260 break; 2261 case RTE_FLOW_ACTION_TYPE_QUEUE: 2262 ret = mlx5_flow_validate_action_queue(actions, 2263 action_flags, dev, 2264 attr, error); 2265 if (ret < 0) 2266 return ret; 2267 action_flags |= MLX5_FLOW_ACTION_QUEUE; 2268 ++actions_n; 2269 break; 2270 case RTE_FLOW_ACTION_TYPE_RSS: 2271 ret = mlx5_flow_validate_action_rss(actions, 2272 action_flags, dev, 2273 attr, item_flags, 2274 error); 2275 if (ret < 0) 2276 return ret; 2277 action_flags |= MLX5_FLOW_ACTION_RSS; 2278 ++actions_n; 2279 break; 2280 case RTE_FLOW_ACTION_TYPE_COUNT: 2281 ret = flow_dv_validate_action_count(dev, error); 2282 if (ret < 0) 2283 return ret; 2284 action_flags |= MLX5_FLOW_ACTION_COUNT; 2285 ++actions_n; 2286 break; 2287 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 2288 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 2289 ret = flow_dv_validate_action_l2_encap(action_flags, 2290 actions, attr, 2291 error); 2292 if (ret < 0) 2293 return ret; 2294 action_flags |= actions->type == 2295 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ? 2296 MLX5_FLOW_ACTION_VXLAN_ENCAP : 2297 MLX5_FLOW_ACTION_NVGRE_ENCAP; 2298 ++actions_n; 2299 break; 2300 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 2301 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 2302 ret = flow_dv_validate_action_l2_decap(action_flags, 2303 attr, error); 2304 if (ret < 0) 2305 return ret; 2306 action_flags |= actions->type == 2307 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ? 2308 MLX5_FLOW_ACTION_VXLAN_DECAP : 2309 MLX5_FLOW_ACTION_NVGRE_DECAP; 2310 ++actions_n; 2311 break; 2312 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 2313 ret = flow_dv_validate_action_raw_encap(action_flags, 2314 actions, attr, 2315 error); 2316 if (ret < 0) 2317 return ret; 2318 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP; 2319 ++actions_n; 2320 break; 2321 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 2322 ret = flow_dv_validate_action_raw_decap(action_flags, 2323 actions, attr, 2324 error); 2325 if (ret < 0) 2326 return ret; 2327 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP; 2328 ++actions_n; 2329 break; 2330 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: 2331 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: 2332 ret = flow_dv_validate_action_modify_mac(action_flags, 2333 actions, 2334 item_flags, 2335 error); 2336 if (ret < 0) 2337 return ret; 2338 /* Count all modify-header actions as one action. */ 2339 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 2340 ++actions_n; 2341 action_flags |= actions->type == 2342 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ? 2343 MLX5_FLOW_ACTION_SET_MAC_SRC : 2344 MLX5_FLOW_ACTION_SET_MAC_DST; 2345 break; 2346 2347 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: 2348 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: 2349 ret = flow_dv_validate_action_modify_ipv4(action_flags, 2350 actions, 2351 item_flags, 2352 error); 2353 if (ret < 0) 2354 return ret; 2355 /* Count all modify-header actions as one action. */ 2356 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 2357 ++actions_n; 2358 action_flags |= actions->type == 2359 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ? 2360 MLX5_FLOW_ACTION_SET_IPV4_SRC : 2361 MLX5_FLOW_ACTION_SET_IPV4_DST; 2362 break; 2363 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: 2364 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: 2365 ret = flow_dv_validate_action_modify_ipv6(action_flags, 2366 actions, 2367 item_flags, 2368 error); 2369 if (ret < 0) 2370 return ret; 2371 /* Count all modify-header actions as one action. */ 2372 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 2373 ++actions_n; 2374 action_flags |= actions->type == 2375 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ? 2376 MLX5_FLOW_ACTION_SET_IPV6_SRC : 2377 MLX5_FLOW_ACTION_SET_IPV6_DST; 2378 break; 2379 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: 2380 case RTE_FLOW_ACTION_TYPE_SET_TP_DST: 2381 ret = flow_dv_validate_action_modify_tp(action_flags, 2382 actions, 2383 item_flags, 2384 error); 2385 if (ret < 0) 2386 return ret; 2387 /* Count all modify-header actions as one action. */ 2388 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 2389 ++actions_n; 2390 action_flags |= actions->type == 2391 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ? 2392 MLX5_FLOW_ACTION_SET_TP_SRC : 2393 MLX5_FLOW_ACTION_SET_TP_DST; 2394 break; 2395 case RTE_FLOW_ACTION_TYPE_DEC_TTL: 2396 case RTE_FLOW_ACTION_TYPE_SET_TTL: 2397 ret = flow_dv_validate_action_modify_ttl(action_flags, 2398 actions, 2399 item_flags, 2400 error); 2401 if (ret < 0) 2402 return ret; 2403 /* Count all modify-header actions as one action. */ 2404 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 2405 ++actions_n; 2406 action_flags |= actions->type == 2407 RTE_FLOW_ACTION_TYPE_SET_TTL ? 2408 MLX5_FLOW_ACTION_SET_TTL : 2409 MLX5_FLOW_ACTION_DEC_TTL; 2410 break; 2411 case RTE_FLOW_ACTION_TYPE_JUMP: 2412 ret = flow_dv_validate_action_jump(actions, 2413 attr->group, error); 2414 if (ret) 2415 return ret; 2416 ++actions_n; 2417 action_flags |= MLX5_FLOW_ACTION_JUMP; 2418 break; 2419 default: 2420 return rte_flow_error_set(error, ENOTSUP, 2421 RTE_FLOW_ERROR_TYPE_ACTION, 2422 actions, 2423 "action not supported"); 2424 } 2425 } 2426 /* Eswitch has few restrictions on using items and actions */ 2427 if (attr->transfer) { 2428 if (action_flags & MLX5_FLOW_ACTION_FLAG) 2429 return rte_flow_error_set(error, ENOTSUP, 2430 RTE_FLOW_ERROR_TYPE_ACTION, 2431 NULL, 2432 "unsupported action FLAG"); 2433 if (action_flags & MLX5_FLOW_ACTION_MARK) 2434 return rte_flow_error_set(error, ENOTSUP, 2435 RTE_FLOW_ERROR_TYPE_ACTION, 2436 NULL, 2437 "unsupported action MARK"); 2438 if (action_flags & MLX5_FLOW_ACTION_QUEUE) 2439 return rte_flow_error_set(error, ENOTSUP, 2440 RTE_FLOW_ERROR_TYPE_ACTION, 2441 NULL, 2442 "unsupported action QUEUE"); 2443 if (action_flags & MLX5_FLOW_ACTION_RSS) 2444 return rte_flow_error_set(error, ENOTSUP, 2445 RTE_FLOW_ERROR_TYPE_ACTION, 2446 NULL, 2447 "unsupported action RSS"); 2448 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS)) 2449 return rte_flow_error_set(error, EINVAL, 2450 RTE_FLOW_ERROR_TYPE_ACTION, 2451 actions, 2452 "no fate action is found"); 2453 } else { 2454 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress) 2455 return rte_flow_error_set(error, EINVAL, 2456 RTE_FLOW_ERROR_TYPE_ACTION, 2457 actions, 2458 "no fate action is found"); 2459 } 2460 return 0; 2461 } 2462 2463 /** 2464 * Internal preparation function. Allocates the DV flow size, 2465 * this size is constant. 2466 * 2467 * @param[in] attr 2468 * Pointer to the flow attributes. 2469 * @param[in] items 2470 * Pointer to the list of items. 2471 * @param[in] actions 2472 * Pointer to the list of actions. 2473 * @param[out] error 2474 * Pointer to the error structure. 2475 * 2476 * @return 2477 * Pointer to mlx5_flow object on success, 2478 * otherwise NULL and rte_errno is set. 2479 */ 2480 static struct mlx5_flow * 2481 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused, 2482 const struct rte_flow_item items[] __rte_unused, 2483 const struct rte_flow_action actions[] __rte_unused, 2484 struct rte_flow_error *error) 2485 { 2486 uint32_t size = sizeof(struct mlx5_flow); 2487 struct mlx5_flow *flow; 2488 2489 flow = rte_calloc(__func__, 1, size, 0); 2490 if (!flow) { 2491 rte_flow_error_set(error, ENOMEM, 2492 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2493 "not enough memory to create flow"); 2494 return NULL; 2495 } 2496 flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param); 2497 return flow; 2498 } 2499 2500 #ifndef NDEBUG 2501 /** 2502 * Sanity check for match mask and value. Similar to check_valid_spec() in 2503 * kernel driver. If unmasked bit is present in value, it returns failure. 2504 * 2505 * @param match_mask 2506 * pointer to match mask buffer. 2507 * @param match_value 2508 * pointer to match value buffer. 2509 * 2510 * @return 2511 * 0 if valid, -EINVAL otherwise. 2512 */ 2513 static int 2514 flow_dv_check_valid_spec(void *match_mask, void *match_value) 2515 { 2516 uint8_t *m = match_mask; 2517 uint8_t *v = match_value; 2518 unsigned int i; 2519 2520 for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) { 2521 if (v[i] & ~m[i]) { 2522 DRV_LOG(ERR, 2523 "match_value differs from match_criteria" 2524 " %p[%u] != %p[%u]", 2525 match_value, i, match_mask, i); 2526 return -EINVAL; 2527 } 2528 } 2529 return 0; 2530 } 2531 #endif 2532 2533 /** 2534 * Add Ethernet item to matcher and to the value. 2535 * 2536 * @param[in, out] matcher 2537 * Flow matcher. 2538 * @param[in, out] key 2539 * Flow matcher value. 2540 * @param[in] item 2541 * Flow pattern to translate. 2542 * @param[in] inner 2543 * Item is inner pattern. 2544 */ 2545 static void 2546 flow_dv_translate_item_eth(void *matcher, void *key, 2547 const struct rte_flow_item *item, int inner) 2548 { 2549 const struct rte_flow_item_eth *eth_m = item->mask; 2550 const struct rte_flow_item_eth *eth_v = item->spec; 2551 const struct rte_flow_item_eth nic_mask = { 2552 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 2553 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", 2554 .type = RTE_BE16(0xffff), 2555 }; 2556 void *headers_m; 2557 void *headers_v; 2558 char *l24_v; 2559 unsigned int i; 2560 2561 if (!eth_v) 2562 return; 2563 if (!eth_m) 2564 eth_m = &nic_mask; 2565 if (inner) { 2566 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 2567 inner_headers); 2568 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 2569 } else { 2570 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 2571 outer_headers); 2572 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 2573 } 2574 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16), 2575 ð_m->dst, sizeof(eth_m->dst)); 2576 /* The value must be in the range of the mask. */ 2577 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16); 2578 for (i = 0; i < sizeof(eth_m->dst); ++i) 2579 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i]; 2580 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16), 2581 ð_m->src, sizeof(eth_m->src)); 2582 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16); 2583 /* The value must be in the range of the mask. */ 2584 for (i = 0; i < sizeof(eth_m->dst); ++i) 2585 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i]; 2586 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 2587 rte_be_to_cpu_16(eth_m->type)); 2588 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype); 2589 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type; 2590 } 2591 2592 /** 2593 * Add VLAN item to matcher and to the value. 2594 * 2595 * @param[in, out] matcher 2596 * Flow matcher. 2597 * @param[in, out] key 2598 * Flow matcher value. 2599 * @param[in] item 2600 * Flow pattern to translate. 2601 * @param[in] inner 2602 * Item is inner pattern. 2603 */ 2604 static void 2605 flow_dv_translate_item_vlan(void *matcher, void *key, 2606 const struct rte_flow_item *item, 2607 int inner) 2608 { 2609 const struct rte_flow_item_vlan *vlan_m = item->mask; 2610 const struct rte_flow_item_vlan *vlan_v = item->spec; 2611 const struct rte_flow_item_vlan nic_mask = { 2612 .tci = RTE_BE16(0x0fff), 2613 .inner_type = RTE_BE16(0xffff), 2614 }; 2615 void *headers_m; 2616 void *headers_v; 2617 uint16_t tci_m; 2618 uint16_t tci_v; 2619 2620 if (!vlan_v) 2621 return; 2622 if (!vlan_m) 2623 vlan_m = &nic_mask; 2624 if (inner) { 2625 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 2626 inner_headers); 2627 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 2628 } else { 2629 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 2630 outer_headers); 2631 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 2632 } 2633 tci_m = rte_be_to_cpu_16(vlan_m->tci); 2634 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci); 2635 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); 2636 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); 2637 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m); 2638 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v); 2639 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12); 2640 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12); 2641 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13); 2642 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13); 2643 } 2644 2645 /** 2646 * Add IPV4 item to matcher and to the value. 2647 * 2648 * @param[in, out] matcher 2649 * Flow matcher. 2650 * @param[in, out] key 2651 * Flow matcher value. 2652 * @param[in] item 2653 * Flow pattern to translate. 2654 * @param[in] inner 2655 * Item is inner pattern. 2656 * @param[in] group 2657 * The group to insert the rule. 2658 */ 2659 static void 2660 flow_dv_translate_item_ipv4(void *matcher, void *key, 2661 const struct rte_flow_item *item, 2662 int inner, uint32_t group) 2663 { 2664 const struct rte_flow_item_ipv4 *ipv4_m = item->mask; 2665 const struct rte_flow_item_ipv4 *ipv4_v = item->spec; 2666 const struct rte_flow_item_ipv4 nic_mask = { 2667 .hdr = { 2668 .src_addr = RTE_BE32(0xffffffff), 2669 .dst_addr = RTE_BE32(0xffffffff), 2670 .type_of_service = 0xff, 2671 .next_proto_id = 0xff, 2672 }, 2673 }; 2674 void *headers_m; 2675 void *headers_v; 2676 char *l24_m; 2677 char *l24_v; 2678 uint8_t tos; 2679 2680 if (inner) { 2681 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 2682 inner_headers); 2683 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 2684 } else { 2685 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 2686 outer_headers); 2687 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 2688 } 2689 if (group == 0) 2690 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf); 2691 else 2692 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4); 2693 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4); 2694 if (!ipv4_v) 2695 return; 2696 if (!ipv4_m) 2697 ipv4_m = &nic_mask; 2698 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, 2699 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 2700 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2701 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 2702 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr; 2703 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr; 2704 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, 2705 src_ipv4_src_ipv6.ipv4_layout.ipv4); 2706 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2707 src_ipv4_src_ipv6.ipv4_layout.ipv4); 2708 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr; 2709 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr; 2710 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service; 2711 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, 2712 ipv4_m->hdr.type_of_service); 2713 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos); 2714 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, 2715 ipv4_m->hdr.type_of_service >> 2); 2716 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2); 2717 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 2718 ipv4_m->hdr.next_proto_id); 2719 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 2720 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id); 2721 } 2722 2723 /** 2724 * Add IPV6 item to matcher and to the value. 2725 * 2726 * @param[in, out] matcher 2727 * Flow matcher. 2728 * @param[in, out] key 2729 * Flow matcher value. 2730 * @param[in] item 2731 * Flow pattern to translate. 2732 * @param[in] inner 2733 * Item is inner pattern. 2734 * @param[in] group 2735 * The group to insert the rule. 2736 */ 2737 static void 2738 flow_dv_translate_item_ipv6(void *matcher, void *key, 2739 const struct rte_flow_item *item, 2740 int inner, uint32_t group) 2741 { 2742 const struct rte_flow_item_ipv6 *ipv6_m = item->mask; 2743 const struct rte_flow_item_ipv6 *ipv6_v = item->spec; 2744 const struct rte_flow_item_ipv6 nic_mask = { 2745 .hdr = { 2746 .src_addr = 2747 "\xff\xff\xff\xff\xff\xff\xff\xff" 2748 "\xff\xff\xff\xff\xff\xff\xff\xff", 2749 .dst_addr = 2750 "\xff\xff\xff\xff\xff\xff\xff\xff" 2751 "\xff\xff\xff\xff\xff\xff\xff\xff", 2752 .vtc_flow = RTE_BE32(0xffffffff), 2753 .proto = 0xff, 2754 .hop_limits = 0xff, 2755 }, 2756 }; 2757 void *headers_m; 2758 void *headers_v; 2759 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 2760 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 2761 char *l24_m; 2762 char *l24_v; 2763 uint32_t vtc_m; 2764 uint32_t vtc_v; 2765 int i; 2766 int size; 2767 2768 if (inner) { 2769 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 2770 inner_headers); 2771 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 2772 } else { 2773 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 2774 outer_headers); 2775 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 2776 } 2777 if (group == 0) 2778 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf); 2779 else 2780 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6); 2781 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6); 2782 if (!ipv6_v) 2783 return; 2784 if (!ipv6_m) 2785 ipv6_m = &nic_mask; 2786 size = sizeof(ipv6_m->hdr.dst_addr); 2787 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, 2788 dst_ipv4_dst_ipv6.ipv6_layout.ipv6); 2789 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2790 dst_ipv4_dst_ipv6.ipv6_layout.ipv6); 2791 memcpy(l24_m, ipv6_m->hdr.dst_addr, size); 2792 for (i = 0; i < size; ++i) 2793 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i]; 2794 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, 2795 src_ipv4_src_ipv6.ipv6_layout.ipv6); 2796 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2797 src_ipv4_src_ipv6.ipv6_layout.ipv6); 2798 memcpy(l24_m, ipv6_m->hdr.src_addr, size); 2799 for (i = 0; i < size; ++i) 2800 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i]; 2801 /* TOS. */ 2802 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow); 2803 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow); 2804 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20); 2805 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20); 2806 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22); 2807 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22); 2808 /* Label. */ 2809 if (inner) { 2810 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label, 2811 vtc_m); 2812 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label, 2813 vtc_v); 2814 } else { 2815 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label, 2816 vtc_m); 2817 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label, 2818 vtc_v); 2819 } 2820 /* Protocol. */ 2821 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 2822 ipv6_m->hdr.proto); 2823 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 2824 ipv6_v->hdr.proto & ipv6_m->hdr.proto); 2825 } 2826 2827 /** 2828 * Add TCP item to matcher and to the value. 2829 * 2830 * @param[in, out] matcher 2831 * Flow matcher. 2832 * @param[in, out] key 2833 * Flow matcher value. 2834 * @param[in] item 2835 * Flow pattern to translate. 2836 * @param[in] inner 2837 * Item is inner pattern. 2838 */ 2839 static void 2840 flow_dv_translate_item_tcp(void *matcher, void *key, 2841 const struct rte_flow_item *item, 2842 int inner) 2843 { 2844 const struct rte_flow_item_tcp *tcp_m = item->mask; 2845 const struct rte_flow_item_tcp *tcp_v = item->spec; 2846 void *headers_m; 2847 void *headers_v; 2848 2849 if (inner) { 2850 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 2851 inner_headers); 2852 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 2853 } else { 2854 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 2855 outer_headers); 2856 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 2857 } 2858 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); 2859 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP); 2860 if (!tcp_v) 2861 return; 2862 if (!tcp_m) 2863 tcp_m = &rte_flow_item_tcp_mask; 2864 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport, 2865 rte_be_to_cpu_16(tcp_m->hdr.src_port)); 2866 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport, 2867 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port)); 2868 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport, 2869 rte_be_to_cpu_16(tcp_m->hdr.dst_port)); 2870 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport, 2871 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port)); 2872 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags, 2873 tcp_m->hdr.tcp_flags); 2874 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags, 2875 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags)); 2876 } 2877 2878 /** 2879 * Add UDP item to matcher and to the value. 2880 * 2881 * @param[in, out] matcher 2882 * Flow matcher. 2883 * @param[in, out] key 2884 * Flow matcher value. 2885 * @param[in] item 2886 * Flow pattern to translate. 2887 * @param[in] inner 2888 * Item is inner pattern. 2889 */ 2890 static void 2891 flow_dv_translate_item_udp(void *matcher, void *key, 2892 const struct rte_flow_item *item, 2893 int inner) 2894 { 2895 const struct rte_flow_item_udp *udp_m = item->mask; 2896 const struct rte_flow_item_udp *udp_v = item->spec; 2897 void *headers_m; 2898 void *headers_v; 2899 2900 if (inner) { 2901 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 2902 inner_headers); 2903 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 2904 } else { 2905 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 2906 outer_headers); 2907 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 2908 } 2909 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); 2910 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP); 2911 if (!udp_v) 2912 return; 2913 if (!udp_m) 2914 udp_m = &rte_flow_item_udp_mask; 2915 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport, 2916 rte_be_to_cpu_16(udp_m->hdr.src_port)); 2917 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, 2918 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port)); 2919 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 2920 rte_be_to_cpu_16(udp_m->hdr.dst_port)); 2921 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 2922 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port)); 2923 } 2924 2925 /** 2926 * Add GRE item to matcher and to the value. 2927 * 2928 * @param[in, out] matcher 2929 * Flow matcher. 2930 * @param[in, out] key 2931 * Flow matcher value. 2932 * @param[in] item 2933 * Flow pattern to translate. 2934 * @param[in] inner 2935 * Item is inner pattern. 2936 */ 2937 static void 2938 flow_dv_translate_item_gre(void *matcher, void *key, 2939 const struct rte_flow_item *item, 2940 int inner) 2941 { 2942 const struct rte_flow_item_gre *gre_m = item->mask; 2943 const struct rte_flow_item_gre *gre_v = item->spec; 2944 void *headers_m; 2945 void *headers_v; 2946 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 2947 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 2948 2949 if (inner) { 2950 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 2951 inner_headers); 2952 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 2953 } else { 2954 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 2955 outer_headers); 2956 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 2957 } 2958 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); 2959 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE); 2960 if (!gre_v) 2961 return; 2962 if (!gre_m) 2963 gre_m = &rte_flow_item_gre_mask; 2964 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 2965 rte_be_to_cpu_16(gre_m->protocol)); 2966 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, 2967 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol)); 2968 } 2969 2970 /** 2971 * Add NVGRE item to matcher and to the value. 2972 * 2973 * @param[in, out] matcher 2974 * Flow matcher. 2975 * @param[in, out] key 2976 * Flow matcher value. 2977 * @param[in] item 2978 * Flow pattern to translate. 2979 * @param[in] inner 2980 * Item is inner pattern. 2981 */ 2982 static void 2983 flow_dv_translate_item_nvgre(void *matcher, void *key, 2984 const struct rte_flow_item *item, 2985 int inner) 2986 { 2987 const struct rte_flow_item_nvgre *nvgre_m = item->mask; 2988 const struct rte_flow_item_nvgre *nvgre_v = item->spec; 2989 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 2990 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 2991 const char *tni_flow_id_m = (const char *)nvgre_m->tni; 2992 const char *tni_flow_id_v = (const char *)nvgre_v->tni; 2993 char *gre_key_m; 2994 char *gre_key_v; 2995 int size; 2996 int i; 2997 2998 flow_dv_translate_item_gre(matcher, key, item, inner); 2999 if (!nvgre_v) 3000 return; 3001 if (!nvgre_m) 3002 nvgre_m = &rte_flow_item_nvgre_mask; 3003 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id); 3004 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h); 3005 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h); 3006 memcpy(gre_key_m, tni_flow_id_m, size); 3007 for (i = 0; i < size; ++i) 3008 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i]; 3009 } 3010 3011 /** 3012 * Add VXLAN item to matcher and to the value. 3013 * 3014 * @param[in, out] matcher 3015 * Flow matcher. 3016 * @param[in, out] key 3017 * Flow matcher value. 3018 * @param[in] item 3019 * Flow pattern to translate. 3020 * @param[in] inner 3021 * Item is inner pattern. 3022 */ 3023 static void 3024 flow_dv_translate_item_vxlan(void *matcher, void *key, 3025 const struct rte_flow_item *item, 3026 int inner) 3027 { 3028 const struct rte_flow_item_vxlan *vxlan_m = item->mask; 3029 const struct rte_flow_item_vxlan *vxlan_v = item->spec; 3030 void *headers_m; 3031 void *headers_v; 3032 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 3033 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 3034 char *vni_m; 3035 char *vni_v; 3036 uint16_t dport; 3037 int size; 3038 int i; 3039 3040 if (inner) { 3041 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 3042 inner_headers); 3043 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 3044 } else { 3045 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 3046 outer_headers); 3047 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 3048 } 3049 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ? 3050 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE; 3051 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { 3052 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); 3053 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); 3054 } 3055 if (!vxlan_v) 3056 return; 3057 if (!vxlan_m) 3058 vxlan_m = &rte_flow_item_vxlan_mask; 3059 size = sizeof(vxlan_m->vni); 3060 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni); 3061 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni); 3062 memcpy(vni_m, vxlan_m->vni, size); 3063 for (i = 0; i < size; ++i) 3064 vni_v[i] = vni_m[i] & vxlan_v->vni[i]; 3065 } 3066 3067 /** 3068 * Add MPLS item to matcher and to the value. 3069 * 3070 * @param[in, out] matcher 3071 * Flow matcher. 3072 * @param[in, out] key 3073 * Flow matcher value. 3074 * @param[in] item 3075 * Flow pattern to translate. 3076 * @param[in] prev_layer 3077 * The protocol layer indicated in previous item. 3078 * @param[in] inner 3079 * Item is inner pattern. 3080 */ 3081 static void 3082 flow_dv_translate_item_mpls(void *matcher, void *key, 3083 const struct rte_flow_item *item, 3084 uint64_t prev_layer, 3085 int inner) 3086 { 3087 const uint32_t *in_mpls_m = item->mask; 3088 const uint32_t *in_mpls_v = item->spec; 3089 uint32_t *out_mpls_m = 0; 3090 uint32_t *out_mpls_v = 0; 3091 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 3092 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 3093 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher, 3094 misc_parameters_2); 3095 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2); 3096 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); 3097 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 3098 3099 switch (prev_layer) { 3100 case MLX5_FLOW_LAYER_OUTER_L4_UDP: 3101 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff); 3102 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 3103 MLX5_UDP_PORT_MPLS); 3104 break; 3105 case MLX5_FLOW_LAYER_GRE: 3106 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff); 3107 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, 3108 RTE_ETHER_TYPE_MPLS); 3109 break; 3110 default: 3111 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); 3112 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 3113 IPPROTO_MPLS); 3114 break; 3115 } 3116 if (!in_mpls_v) 3117 return; 3118 if (!in_mpls_m) 3119 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask; 3120 switch (prev_layer) { 3121 case MLX5_FLOW_LAYER_OUTER_L4_UDP: 3122 out_mpls_m = 3123 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m, 3124 outer_first_mpls_over_udp); 3125 out_mpls_v = 3126 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v, 3127 outer_first_mpls_over_udp); 3128 break; 3129 case MLX5_FLOW_LAYER_GRE: 3130 out_mpls_m = 3131 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m, 3132 outer_first_mpls_over_gre); 3133 out_mpls_v = 3134 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v, 3135 outer_first_mpls_over_gre); 3136 break; 3137 default: 3138 /* Inner MPLS not over GRE is not supported. */ 3139 if (!inner) { 3140 out_mpls_m = 3141 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, 3142 misc2_m, 3143 outer_first_mpls); 3144 out_mpls_v = 3145 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, 3146 misc2_v, 3147 outer_first_mpls); 3148 } 3149 break; 3150 } 3151 if (out_mpls_m && out_mpls_v) { 3152 *out_mpls_m = *in_mpls_m; 3153 *out_mpls_v = *in_mpls_v & *in_mpls_m; 3154 } 3155 } 3156 3157 /** 3158 * Add META item to matcher 3159 * 3160 * @param[in, out] matcher 3161 * Flow matcher. 3162 * @param[in, out] key 3163 * Flow matcher value. 3164 * @param[in] item 3165 * Flow pattern to translate. 3166 * @param[in] inner 3167 * Item is inner pattern. 3168 */ 3169 static void 3170 flow_dv_translate_item_meta(void *matcher, void *key, 3171 const struct rte_flow_item *item) 3172 { 3173 const struct rte_flow_item_meta *meta_m; 3174 const struct rte_flow_item_meta *meta_v; 3175 void *misc2_m = 3176 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2); 3177 void *misc2_v = 3178 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2); 3179 3180 meta_m = (const void *)item->mask; 3181 if (!meta_m) 3182 meta_m = &rte_flow_item_meta_mask; 3183 meta_v = (const void *)item->spec; 3184 if (meta_v) { 3185 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, 3186 rte_be_to_cpu_32(meta_m->data)); 3187 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, 3188 rte_be_to_cpu_32(meta_v->data & meta_m->data)); 3189 } 3190 } 3191 3192 /** 3193 * Add source vport match to the specified matcher. 3194 * 3195 * @param[in, out] matcher 3196 * Flow matcher. 3197 * @param[in, out] key 3198 * Flow matcher value. 3199 * @param[in] port 3200 * Source vport value to match 3201 * @param[in] mask 3202 * Mask 3203 */ 3204 static void 3205 flow_dv_translate_item_source_vport(void *matcher, void *key, 3206 int16_t port, uint16_t mask) 3207 { 3208 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 3209 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 3210 3211 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask); 3212 MLX5_SET(fte_match_set_misc, misc_v, source_port, port); 3213 } 3214 3215 /** 3216 * Translate port-id item to eswitch match on port-id. 3217 * 3218 * @param[in] dev 3219 * The devich to configure through. 3220 * @param[in, out] matcher 3221 * Flow matcher. 3222 * @param[in, out] key 3223 * Flow matcher value. 3224 * @param[in] item 3225 * Flow pattern to translate. 3226 * 3227 * @return 3228 * 0 on success, a negative errno value otherwise. 3229 */ 3230 static int 3231 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, 3232 void *key, const struct rte_flow_item *item) 3233 { 3234 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL; 3235 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL; 3236 uint16_t mask, val, id; 3237 int ret; 3238 3239 mask = pid_m ? pid_m->id : 0xffff; 3240 id = pid_v ? pid_v->id : dev->data->port_id; 3241 ret = mlx5_port_to_eswitch_info(id, NULL, &val); 3242 if (ret) 3243 return ret; 3244 flow_dv_translate_item_source_vport(matcher, key, val, mask); 3245 return 0; 3246 } 3247 3248 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 }; 3249 3250 #define HEADER_IS_ZERO(match_criteria, headers) \ 3251 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \ 3252 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \ 3253 3254 /** 3255 * Calculate flow matcher enable bitmap. 3256 * 3257 * @param match_criteria 3258 * Pointer to flow matcher criteria. 3259 * 3260 * @return 3261 * Bitmap of enabled fields. 3262 */ 3263 static uint8_t 3264 flow_dv_matcher_enable(uint32_t *match_criteria) 3265 { 3266 uint8_t match_criteria_enable; 3267 3268 match_criteria_enable = 3269 (!HEADER_IS_ZERO(match_criteria, outer_headers)) << 3270 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT; 3271 match_criteria_enable |= 3272 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) << 3273 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT; 3274 match_criteria_enable |= 3275 (!HEADER_IS_ZERO(match_criteria, inner_headers)) << 3276 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT; 3277 match_criteria_enable |= 3278 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) << 3279 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT; 3280 #ifdef HAVE_MLX5DV_DR 3281 match_criteria_enable |= 3282 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) << 3283 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT; 3284 #endif 3285 return match_criteria_enable; 3286 } 3287 3288 3289 /** 3290 * Get a flow table. 3291 * 3292 * @param dev[in, out] 3293 * Pointer to rte_eth_dev structure. 3294 * @param[in] table_id 3295 * Table id to use. 3296 * @param[in] egress 3297 * Direction of the table. 3298 * @param[in] transfer 3299 * E-Switch or NIC flow. 3300 * @param[out] error 3301 * pointer to error structure. 3302 * 3303 * @return 3304 * Returns tables resource based on the index, NULL in case of failed. 3305 */ 3306 static struct mlx5_flow_tbl_resource * 3307 flow_dv_tbl_resource_get(struct rte_eth_dev *dev, 3308 uint32_t table_id, uint8_t egress, 3309 uint8_t transfer, 3310 struct rte_flow_error *error) 3311 { 3312 struct mlx5_priv *priv = dev->data->dev_private; 3313 struct mlx5_ibv_shared *sh = priv->sh; 3314 struct mlx5_flow_tbl_resource *tbl; 3315 3316 #ifdef HAVE_MLX5DV_DR 3317 if (transfer) { 3318 tbl = &sh->fdb_tbl[table_id]; 3319 if (!tbl->obj) 3320 tbl->obj = mlx5_glue->dr_create_flow_tbl 3321 (sh->fdb_domain, table_id); 3322 } else if (egress) { 3323 tbl = &sh->tx_tbl[table_id]; 3324 if (!tbl->obj) 3325 tbl->obj = mlx5_glue->dr_create_flow_tbl 3326 (sh->tx_domain, table_id); 3327 } else { 3328 tbl = &sh->rx_tbl[table_id]; 3329 if (!tbl->obj) 3330 tbl->obj = mlx5_glue->dr_create_flow_tbl 3331 (sh->rx_domain, table_id); 3332 } 3333 if (!tbl->obj) { 3334 rte_flow_error_set(error, ENOMEM, 3335 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3336 NULL, "cannot create table"); 3337 return NULL; 3338 } 3339 rte_atomic32_inc(&tbl->refcnt); 3340 return tbl; 3341 #else 3342 (void)error; 3343 (void)tbl; 3344 if (transfer) 3345 return &sh->fdb_tbl[table_id]; 3346 else if (egress) 3347 return &sh->tx_tbl[table_id]; 3348 else 3349 return &sh->rx_tbl[table_id]; 3350 #endif 3351 } 3352 3353 /** 3354 * Release a flow table. 3355 * 3356 * @param[in] tbl 3357 * Table resource to be released. 3358 * 3359 * @return 3360 * Returns 0 if table was released, else return 1; 3361 */ 3362 static int 3363 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl) 3364 { 3365 if (!tbl) 3366 return 0; 3367 if (rte_atomic32_dec_and_test(&tbl->refcnt)) { 3368 mlx5_glue->dr_destroy_flow_tbl(tbl->obj); 3369 tbl->obj = NULL; 3370 return 0; 3371 } 3372 return 1; 3373 } 3374 3375 /** 3376 * Register the flow matcher. 3377 * 3378 * @param dev[in, out] 3379 * Pointer to rte_eth_dev structure. 3380 * @param[in, out] matcher 3381 * Pointer to flow matcher. 3382 * @parm[in, out] dev_flow 3383 * Pointer to the dev_flow. 3384 * @param[out] error 3385 * pointer to error structure. 3386 * 3387 * @return 3388 * 0 on success otherwise -errno and errno is set. 3389 */ 3390 static int 3391 flow_dv_matcher_register(struct rte_eth_dev *dev, 3392 struct mlx5_flow_dv_matcher *matcher, 3393 struct mlx5_flow *dev_flow, 3394 struct rte_flow_error *error) 3395 { 3396 struct mlx5_priv *priv = dev->data->dev_private; 3397 struct mlx5_ibv_shared *sh = priv->sh; 3398 struct mlx5_flow_dv_matcher *cache_matcher; 3399 struct mlx5dv_flow_matcher_attr dv_attr = { 3400 .type = IBV_FLOW_ATTR_NORMAL, 3401 .match_mask = (void *)&matcher->mask, 3402 }; 3403 struct mlx5_flow_tbl_resource *tbl = NULL; 3404 3405 /* Lookup from cache. */ 3406 LIST_FOREACH(cache_matcher, &sh->matchers, next) { 3407 if (matcher->crc == cache_matcher->crc && 3408 matcher->priority == cache_matcher->priority && 3409 matcher->egress == cache_matcher->egress && 3410 matcher->group == cache_matcher->group && 3411 matcher->transfer == cache_matcher->transfer && 3412 !memcmp((const void *)matcher->mask.buf, 3413 (const void *)cache_matcher->mask.buf, 3414 cache_matcher->mask.size)) { 3415 DRV_LOG(DEBUG, 3416 "priority %hd use %s matcher %p: refcnt %d++", 3417 cache_matcher->priority, 3418 cache_matcher->egress ? "tx" : "rx", 3419 (void *)cache_matcher, 3420 rte_atomic32_read(&cache_matcher->refcnt)); 3421 rte_atomic32_inc(&cache_matcher->refcnt); 3422 dev_flow->dv.matcher = cache_matcher; 3423 return 0; 3424 } 3425 } 3426 /* Register new matcher. */ 3427 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0); 3428 if (!cache_matcher) 3429 return rte_flow_error_set(error, ENOMEM, 3430 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 3431 "cannot allocate matcher memory"); 3432 tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR, 3433 matcher->egress, matcher->transfer, 3434 error); 3435 if (!tbl) { 3436 rte_free(cache_matcher); 3437 return rte_flow_error_set(error, ENOMEM, 3438 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3439 NULL, "cannot create table"); 3440 } 3441 *cache_matcher = *matcher; 3442 dv_attr.match_criteria_enable = 3443 flow_dv_matcher_enable(cache_matcher->mask.buf); 3444 dv_attr.priority = matcher->priority; 3445 if (matcher->egress) 3446 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS; 3447 cache_matcher->matcher_object = 3448 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj); 3449 if (!cache_matcher->matcher_object) { 3450 rte_free(cache_matcher); 3451 #ifdef HAVE_MLX5DV_DR 3452 flow_dv_tbl_resource_release(tbl); 3453 #endif 3454 return rte_flow_error_set(error, ENOMEM, 3455 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3456 NULL, "cannot create matcher"); 3457 } 3458 rte_atomic32_inc(&cache_matcher->refcnt); 3459 LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next); 3460 dev_flow->dv.matcher = cache_matcher; 3461 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d", 3462 cache_matcher->priority, 3463 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher, 3464 rte_atomic32_read(&cache_matcher->refcnt)); 3465 rte_atomic32_inc(&tbl->refcnt); 3466 return 0; 3467 } 3468 3469 /** 3470 * Find existing tag resource or create and register a new one. 3471 * 3472 * @param dev[in, out] 3473 * Pointer to rte_eth_dev structure. 3474 * @param[in, out] resource 3475 * Pointer to tag resource. 3476 * @parm[in, out] dev_flow 3477 * Pointer to the dev_flow. 3478 * @param[out] error 3479 * pointer to error structure. 3480 * 3481 * @return 3482 * 0 on success otherwise -errno and errno is set. 3483 */ 3484 static int 3485 flow_dv_tag_resource_register 3486 (struct rte_eth_dev *dev, 3487 struct mlx5_flow_dv_tag_resource *resource, 3488 struct mlx5_flow *dev_flow, 3489 struct rte_flow_error *error) 3490 { 3491 struct mlx5_priv *priv = dev->data->dev_private; 3492 struct mlx5_ibv_shared *sh = priv->sh; 3493 struct mlx5_flow_dv_tag_resource *cache_resource; 3494 3495 /* Lookup a matching resource from cache. */ 3496 LIST_FOREACH(cache_resource, &sh->tags, next) { 3497 if (resource->tag == cache_resource->tag) { 3498 DRV_LOG(DEBUG, "tag resource %p: refcnt %d++", 3499 (void *)cache_resource, 3500 rte_atomic32_read(&cache_resource->refcnt)); 3501 rte_atomic32_inc(&cache_resource->refcnt); 3502 dev_flow->flow->tag_resource = cache_resource; 3503 return 0; 3504 } 3505 } 3506 /* Register new resource. */ 3507 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0); 3508 if (!cache_resource) 3509 return rte_flow_error_set(error, ENOMEM, 3510 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 3511 "cannot allocate resource memory"); 3512 *cache_resource = *resource; 3513 cache_resource->action = mlx5_glue->dv_create_flow_action_tag 3514 (resource->tag); 3515 if (!cache_resource->action) { 3516 rte_free(cache_resource); 3517 return rte_flow_error_set(error, ENOMEM, 3518 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3519 NULL, "cannot create action"); 3520 } 3521 rte_atomic32_init(&cache_resource->refcnt); 3522 rte_atomic32_inc(&cache_resource->refcnt); 3523 LIST_INSERT_HEAD(&sh->tags, cache_resource, next); 3524 dev_flow->flow->tag_resource = cache_resource; 3525 DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++", 3526 (void *)cache_resource, 3527 rte_atomic32_read(&cache_resource->refcnt)); 3528 return 0; 3529 } 3530 3531 /** 3532 * Release the tag. 3533 * 3534 * @param dev 3535 * Pointer to Ethernet device. 3536 * @param flow 3537 * Pointer to mlx5_flow. 3538 * 3539 * @return 3540 * 1 while a reference on it exists, 0 when freed. 3541 */ 3542 static int 3543 flow_dv_tag_release(struct rte_eth_dev *dev, 3544 struct mlx5_flow_dv_tag_resource *tag) 3545 { 3546 assert(tag); 3547 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--", 3548 dev->data->port_id, (void *)tag, 3549 rte_atomic32_read(&tag->refcnt)); 3550 if (rte_atomic32_dec_and_test(&tag->refcnt)) { 3551 claim_zero(mlx5_glue->destroy_flow_action(tag->action)); 3552 LIST_REMOVE(tag, next); 3553 DRV_LOG(DEBUG, "port %u tag %p: removed", 3554 dev->data->port_id, (void *)tag); 3555 rte_free(tag); 3556 return 0; 3557 } 3558 return 1; 3559 } 3560 3561 /** 3562 * Translate port ID action to vport. 3563 * 3564 * @param[in] dev 3565 * Pointer to rte_eth_dev structure. 3566 * @param[in] action 3567 * Pointer to the port ID action. 3568 * @param[out] dst_port_id 3569 * The target port ID. 3570 * @param[out] error 3571 * Pointer to the error structure. 3572 * 3573 * @return 3574 * 0 on success, a negative errno value otherwise and rte_errno is set. 3575 */ 3576 static int 3577 flow_dv_translate_action_port_id(struct rte_eth_dev *dev, 3578 const struct rte_flow_action *action, 3579 uint32_t *dst_port_id, 3580 struct rte_flow_error *error) 3581 { 3582 uint32_t port; 3583 uint16_t port_id; 3584 int ret; 3585 const struct rte_flow_action_port_id *conf = 3586 (const struct rte_flow_action_port_id *)action->conf; 3587 3588 port = conf->original ? dev->data->port_id : conf->id; 3589 ret = mlx5_port_to_eswitch_info(port, NULL, &port_id); 3590 if (ret) 3591 return rte_flow_error_set(error, -ret, 3592 RTE_FLOW_ERROR_TYPE_ACTION, 3593 NULL, 3594 "No eswitch info was found for port"); 3595 *dst_port_id = port_id; 3596 return 0; 3597 } 3598 3599 /** 3600 * Fill the flow with DV spec. 3601 * 3602 * @param[in] dev 3603 * Pointer to rte_eth_dev structure. 3604 * @param[in, out] dev_flow 3605 * Pointer to the sub flow. 3606 * @param[in] attr 3607 * Pointer to the flow attributes. 3608 * @param[in] items 3609 * Pointer to the list of items. 3610 * @param[in] actions 3611 * Pointer to the list of actions. 3612 * @param[out] error 3613 * Pointer to the error structure. 3614 * 3615 * @return 3616 * 0 on success, a negative errno value otherwise and rte_errno is set. 3617 */ 3618 static int 3619 flow_dv_translate(struct rte_eth_dev *dev, 3620 struct mlx5_flow *dev_flow, 3621 const struct rte_flow_attr *attr, 3622 const struct rte_flow_item items[], 3623 const struct rte_flow_action actions[], 3624 struct rte_flow_error *error) 3625 { 3626 struct mlx5_priv *priv = dev->data->dev_private; 3627 struct rte_flow *flow = dev_flow->flow; 3628 uint64_t item_flags = 0; 3629 uint64_t last_item = 0; 3630 uint64_t action_flags = 0; 3631 uint64_t priority = attr->priority; 3632 struct mlx5_flow_dv_matcher matcher = { 3633 .mask = { 3634 .size = sizeof(matcher.mask.buf), 3635 }, 3636 }; 3637 int actions_n = 0; 3638 bool actions_end = false; 3639 struct mlx5_flow_dv_modify_hdr_resource res = { 3640 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : 3641 MLX5DV_FLOW_TABLE_TYPE_NIC_RX 3642 }; 3643 union flow_dv_attr flow_attr = { .attr = 0 }; 3644 struct mlx5_flow_dv_tag_resource tag_resource; 3645 uint32_t modify_action_position = UINT32_MAX; 3646 void *match_mask = matcher.mask.buf; 3647 void *match_value = dev_flow->dv.value.buf; 3648 3649 flow->group = attr->group; 3650 if (attr->transfer) 3651 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; 3652 if (priority == MLX5_FLOW_PRIO_RSVD) 3653 priority = priv->config.flow_prio - 1; 3654 for (; !actions_end ; actions++) { 3655 const struct rte_flow_action_queue *queue; 3656 const struct rte_flow_action_rss *rss; 3657 const struct rte_flow_action *action = actions; 3658 const struct rte_flow_action_count *count = action->conf; 3659 const uint8_t *rss_key; 3660 const struct rte_flow_action_jump *jump_data; 3661 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource; 3662 struct mlx5_flow_tbl_resource *tbl; 3663 uint32_t port_id = 0; 3664 struct mlx5_flow_dv_port_id_action_resource port_id_resource; 3665 3666 switch (actions->type) { 3667 case RTE_FLOW_ACTION_TYPE_VOID: 3668 break; 3669 case RTE_FLOW_ACTION_TYPE_PORT_ID: 3670 if (flow_dv_translate_action_port_id(dev, action, 3671 &port_id, error)) 3672 return -rte_errno; 3673 port_id_resource.port_id = port_id; 3674 if (flow_dv_port_id_action_resource_register 3675 (dev, &port_id_resource, dev_flow, error)) 3676 return -rte_errno; 3677 dev_flow->dv.actions[actions_n++] = 3678 dev_flow->dv.port_id_action->action; 3679 action_flags |= MLX5_FLOW_ACTION_PORT_ID; 3680 break; 3681 case RTE_FLOW_ACTION_TYPE_FLAG: 3682 tag_resource.tag = 3683 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT); 3684 if (!flow->tag_resource) 3685 if (flow_dv_tag_resource_register 3686 (dev, &tag_resource, dev_flow, error)) 3687 return errno; 3688 dev_flow->dv.actions[actions_n++] = 3689 flow->tag_resource->action; 3690 action_flags |= MLX5_FLOW_ACTION_FLAG; 3691 break; 3692 case RTE_FLOW_ACTION_TYPE_MARK: 3693 tag_resource.tag = mlx5_flow_mark_set 3694 (((const struct rte_flow_action_mark *) 3695 (actions->conf))->id); 3696 if (!flow->tag_resource) 3697 if (flow_dv_tag_resource_register 3698 (dev, &tag_resource, dev_flow, error)) 3699 return errno; 3700 dev_flow->dv.actions[actions_n++] = 3701 flow->tag_resource->action; 3702 action_flags |= MLX5_FLOW_ACTION_MARK; 3703 break; 3704 case RTE_FLOW_ACTION_TYPE_DROP: 3705 action_flags |= MLX5_FLOW_ACTION_DROP; 3706 break; 3707 case RTE_FLOW_ACTION_TYPE_QUEUE: 3708 queue = actions->conf; 3709 flow->rss.queue_num = 1; 3710 (*flow->queue)[0] = queue->index; 3711 action_flags |= MLX5_FLOW_ACTION_QUEUE; 3712 break; 3713 case RTE_FLOW_ACTION_TYPE_RSS: 3714 rss = actions->conf; 3715 if (flow->queue) 3716 memcpy((*flow->queue), rss->queue, 3717 rss->queue_num * sizeof(uint16_t)); 3718 flow->rss.queue_num = rss->queue_num; 3719 /* NULL RSS key indicates default RSS key. */ 3720 rss_key = !rss->key ? rss_hash_default_key : rss->key; 3721 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN); 3722 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */ 3723 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; 3724 flow->rss.level = rss->level; 3725 action_flags |= MLX5_FLOW_ACTION_RSS; 3726 break; 3727 case RTE_FLOW_ACTION_TYPE_COUNT: 3728 if (!priv->config.devx) { 3729 rte_errno = ENOTSUP; 3730 goto cnt_err; 3731 } 3732 flow->counter = flow_dv_counter_new(dev, count->shared, 3733 count->id); 3734 if (flow->counter == NULL) 3735 goto cnt_err; 3736 dev_flow->dv.actions[actions_n++] = 3737 flow->counter->action; 3738 action_flags |= MLX5_FLOW_ACTION_COUNT; 3739 break; 3740 cnt_err: 3741 if (rte_errno == ENOTSUP) 3742 return rte_flow_error_set 3743 (error, ENOTSUP, 3744 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3745 NULL, 3746 "count action not supported"); 3747 else 3748 return rte_flow_error_set 3749 (error, rte_errno, 3750 RTE_FLOW_ERROR_TYPE_ACTION, 3751 action, 3752 "cannot create counter" 3753 " object."); 3754 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 3755 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 3756 if (flow_dv_create_action_l2_encap(dev, actions, 3757 dev_flow, 3758 attr->transfer, 3759 error)) 3760 return -rte_errno; 3761 dev_flow->dv.actions[actions_n++] = 3762 dev_flow->dv.encap_decap->verbs_action; 3763 action_flags |= actions->type == 3764 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ? 3765 MLX5_FLOW_ACTION_VXLAN_ENCAP : 3766 MLX5_FLOW_ACTION_NVGRE_ENCAP; 3767 break; 3768 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 3769 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 3770 if (flow_dv_create_action_l2_decap(dev, dev_flow, 3771 attr->transfer, 3772 error)) 3773 return -rte_errno; 3774 dev_flow->dv.actions[actions_n++] = 3775 dev_flow->dv.encap_decap->verbs_action; 3776 action_flags |= actions->type == 3777 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ? 3778 MLX5_FLOW_ACTION_VXLAN_DECAP : 3779 MLX5_FLOW_ACTION_NVGRE_DECAP; 3780 break; 3781 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 3782 /* Handle encap with preceding decap. */ 3783 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) { 3784 if (flow_dv_create_action_raw_encap 3785 (dev, actions, dev_flow, attr, error)) 3786 return -rte_errno; 3787 dev_flow->dv.actions[actions_n++] = 3788 dev_flow->dv.encap_decap->verbs_action; 3789 } else { 3790 /* Handle encap without preceding decap. */ 3791 if (flow_dv_create_action_l2_encap 3792 (dev, actions, dev_flow, attr->transfer, 3793 error)) 3794 return -rte_errno; 3795 dev_flow->dv.actions[actions_n++] = 3796 dev_flow->dv.encap_decap->verbs_action; 3797 } 3798 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP; 3799 break; 3800 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 3801 /* Check if this decap is followed by encap. */ 3802 for (; action->type != RTE_FLOW_ACTION_TYPE_END && 3803 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP; 3804 action++) { 3805 } 3806 /* Handle decap only if it isn't followed by encap. */ 3807 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { 3808 if (flow_dv_create_action_l2_decap 3809 (dev, dev_flow, attr->transfer, error)) 3810 return -rte_errno; 3811 dev_flow->dv.actions[actions_n++] = 3812 dev_flow->dv.encap_decap->verbs_action; 3813 } 3814 /* If decap is followed by encap, handle it at encap. */ 3815 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP; 3816 break; 3817 case RTE_FLOW_ACTION_TYPE_JUMP: 3818 jump_data = action->conf; 3819 tbl = flow_dv_tbl_resource_get(dev, jump_data->group * 3820 MLX5_GROUP_FACTOR, 3821 attr->egress, 3822 attr->transfer, error); 3823 if (!tbl) 3824 return rte_flow_error_set 3825 (error, errno, 3826 RTE_FLOW_ERROR_TYPE_ACTION, 3827 NULL, 3828 "cannot create jump action."); 3829 jump_tbl_resource.tbl = tbl; 3830 if (flow_dv_jump_tbl_resource_register 3831 (dev, &jump_tbl_resource, dev_flow, error)) { 3832 flow_dv_tbl_resource_release(tbl); 3833 return rte_flow_error_set 3834 (error, errno, 3835 RTE_FLOW_ERROR_TYPE_ACTION, 3836 NULL, 3837 "cannot create jump action."); 3838 } 3839 dev_flow->dv.actions[actions_n++] = 3840 dev_flow->dv.jump->action; 3841 action_flags |= MLX5_FLOW_ACTION_JUMP; 3842 break; 3843 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: 3844 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: 3845 if (flow_dv_convert_action_modify_mac(&res, actions, 3846 error)) 3847 return -rte_errno; 3848 action_flags |= actions->type == 3849 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ? 3850 MLX5_FLOW_ACTION_SET_MAC_SRC : 3851 MLX5_FLOW_ACTION_SET_MAC_DST; 3852 break; 3853 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: 3854 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: 3855 if (flow_dv_convert_action_modify_ipv4(&res, actions, 3856 error)) 3857 return -rte_errno; 3858 action_flags |= actions->type == 3859 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ? 3860 MLX5_FLOW_ACTION_SET_IPV4_SRC : 3861 MLX5_FLOW_ACTION_SET_IPV4_DST; 3862 break; 3863 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: 3864 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: 3865 if (flow_dv_convert_action_modify_ipv6(&res, actions, 3866 error)) 3867 return -rte_errno; 3868 action_flags |= actions->type == 3869 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ? 3870 MLX5_FLOW_ACTION_SET_IPV6_SRC : 3871 MLX5_FLOW_ACTION_SET_IPV6_DST; 3872 break; 3873 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: 3874 case RTE_FLOW_ACTION_TYPE_SET_TP_DST: 3875 if (flow_dv_convert_action_modify_tp(&res, actions, 3876 items, &flow_attr, 3877 error)) 3878 return -rte_errno; 3879 action_flags |= actions->type == 3880 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ? 3881 MLX5_FLOW_ACTION_SET_TP_SRC : 3882 MLX5_FLOW_ACTION_SET_TP_DST; 3883 break; 3884 case RTE_FLOW_ACTION_TYPE_DEC_TTL: 3885 if (flow_dv_convert_action_modify_dec_ttl(&res, items, 3886 &flow_attr, 3887 error)) 3888 return -rte_errno; 3889 action_flags |= MLX5_FLOW_ACTION_DEC_TTL; 3890 break; 3891 case RTE_FLOW_ACTION_TYPE_SET_TTL: 3892 if (flow_dv_convert_action_modify_ttl(&res, actions, 3893 items, &flow_attr, 3894 error)) 3895 return -rte_errno; 3896 action_flags |= MLX5_FLOW_ACTION_SET_TTL; 3897 break; 3898 case RTE_FLOW_ACTION_TYPE_END: 3899 actions_end = true; 3900 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) { 3901 /* create modify action if needed. */ 3902 if (flow_dv_modify_hdr_resource_register 3903 (dev, &res, 3904 dev_flow, 3905 error)) 3906 return -rte_errno; 3907 dev_flow->dv.actions[modify_action_position] = 3908 dev_flow->dv.modify_hdr->verbs_action; 3909 } 3910 break; 3911 default: 3912 break; 3913 } 3914 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) && 3915 modify_action_position == UINT32_MAX) 3916 modify_action_position = actions_n++; 3917 } 3918 dev_flow->dv.actions_n = actions_n; 3919 flow->actions = action_flags; 3920 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 3921 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 3922 3923 switch (items->type) { 3924 case RTE_FLOW_ITEM_TYPE_PORT_ID: 3925 flow_dv_translate_item_port_id(dev, match_mask, 3926 match_value, items); 3927 last_item = MLX5_FLOW_ITEM_PORT_ID; 3928 break; 3929 case RTE_FLOW_ITEM_TYPE_ETH: 3930 flow_dv_translate_item_eth(match_mask, match_value, 3931 items, tunnel); 3932 matcher.priority = MLX5_PRIORITY_MAP_L2; 3933 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 3934 MLX5_FLOW_LAYER_OUTER_L2; 3935 break; 3936 case RTE_FLOW_ITEM_TYPE_VLAN: 3937 flow_dv_translate_item_vlan(match_mask, match_value, 3938 items, tunnel); 3939 matcher.priority = MLX5_PRIORITY_MAP_L2; 3940 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | 3941 MLX5_FLOW_LAYER_INNER_VLAN) : 3942 (MLX5_FLOW_LAYER_OUTER_L2 | 3943 MLX5_FLOW_LAYER_OUTER_VLAN); 3944 break; 3945 case RTE_FLOW_ITEM_TYPE_IPV4: 3946 flow_dv_translate_item_ipv4(match_mask, match_value, 3947 items, tunnel, attr->group); 3948 matcher.priority = MLX5_PRIORITY_MAP_L3; 3949 dev_flow->dv.hash_fields |= 3950 mlx5_flow_hashfields_adjust 3951 (dev_flow, tunnel, 3952 MLX5_IPV4_LAYER_TYPES, 3953 MLX5_IPV4_IBV_RX_HASH); 3954 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 3955 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 3956 break; 3957 case RTE_FLOW_ITEM_TYPE_IPV6: 3958 flow_dv_translate_item_ipv6(match_mask, match_value, 3959 items, tunnel, attr->group); 3960 matcher.priority = MLX5_PRIORITY_MAP_L3; 3961 dev_flow->dv.hash_fields |= 3962 mlx5_flow_hashfields_adjust 3963 (dev_flow, tunnel, 3964 MLX5_IPV6_LAYER_TYPES, 3965 MLX5_IPV6_IBV_RX_HASH); 3966 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 3967 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 3968 break; 3969 case RTE_FLOW_ITEM_TYPE_TCP: 3970 flow_dv_translate_item_tcp(match_mask, match_value, 3971 items, tunnel); 3972 matcher.priority = MLX5_PRIORITY_MAP_L4; 3973 dev_flow->dv.hash_fields |= 3974 mlx5_flow_hashfields_adjust 3975 (dev_flow, tunnel, ETH_RSS_TCP, 3976 IBV_RX_HASH_SRC_PORT_TCP | 3977 IBV_RX_HASH_DST_PORT_TCP); 3978 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : 3979 MLX5_FLOW_LAYER_OUTER_L4_TCP; 3980 break; 3981 case RTE_FLOW_ITEM_TYPE_UDP: 3982 flow_dv_translate_item_udp(match_mask, match_value, 3983 items, tunnel); 3984 matcher.priority = MLX5_PRIORITY_MAP_L4; 3985 dev_flow->dv.hash_fields |= 3986 mlx5_flow_hashfields_adjust 3987 (dev_flow, tunnel, ETH_RSS_UDP, 3988 IBV_RX_HASH_SRC_PORT_UDP | 3989 IBV_RX_HASH_DST_PORT_UDP); 3990 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : 3991 MLX5_FLOW_LAYER_OUTER_L4_UDP; 3992 break; 3993 case RTE_FLOW_ITEM_TYPE_GRE: 3994 flow_dv_translate_item_gre(match_mask, match_value, 3995 items, tunnel); 3996 last_item = MLX5_FLOW_LAYER_GRE; 3997 break; 3998 case RTE_FLOW_ITEM_TYPE_NVGRE: 3999 flow_dv_translate_item_nvgre(match_mask, match_value, 4000 items, tunnel); 4001 last_item = MLX5_FLOW_LAYER_GRE; 4002 break; 4003 case RTE_FLOW_ITEM_TYPE_VXLAN: 4004 flow_dv_translate_item_vxlan(match_mask, match_value, 4005 items, tunnel); 4006 last_item = MLX5_FLOW_LAYER_VXLAN; 4007 break; 4008 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 4009 flow_dv_translate_item_vxlan(match_mask, match_value, 4010 items, tunnel); 4011 last_item = MLX5_FLOW_LAYER_VXLAN_GPE; 4012 break; 4013 case RTE_FLOW_ITEM_TYPE_MPLS: 4014 flow_dv_translate_item_mpls(match_mask, match_value, 4015 items, last_item, tunnel); 4016 last_item = MLX5_FLOW_LAYER_MPLS; 4017 break; 4018 case RTE_FLOW_ITEM_TYPE_META: 4019 flow_dv_translate_item_meta(match_mask, match_value, 4020 items); 4021 last_item = MLX5_FLOW_ITEM_METADATA; 4022 break; 4023 default: 4024 break; 4025 } 4026 item_flags |= last_item; 4027 } 4028 /* 4029 * In case of ingress traffic when E-Switch mode is enabled, 4030 * we have two cases where we need to set the source port manually. 4031 * The first one, is in case of Nic steering rule, and the second is 4032 * E-Switch rule where no port_id item was found. In both cases 4033 * the source port is set according the current port in use. 4034 */ 4035 if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) && 4036 (priv->representor || priv->master)) { 4037 if (flow_dv_translate_item_port_id(dev, match_mask, 4038 match_value, NULL)) 4039 return -rte_errno; 4040 } 4041 assert(!flow_dv_check_valid_spec(matcher.mask.buf, 4042 dev_flow->dv.value.buf)); 4043 dev_flow->layers = item_flags; 4044 /* Register matcher. */ 4045 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf, 4046 matcher.mask.size); 4047 matcher.priority = mlx5_flow_adjust_priority(dev, priority, 4048 matcher.priority); 4049 matcher.egress = attr->egress; 4050 matcher.group = attr->group; 4051 matcher.transfer = attr->transfer; 4052 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error)) 4053 return -rte_errno; 4054 return 0; 4055 } 4056 4057 /** 4058 * Apply the flow to the NIC. 4059 * 4060 * @param[in] dev 4061 * Pointer to the Ethernet device structure. 4062 * @param[in, out] flow 4063 * Pointer to flow structure. 4064 * @param[out] error 4065 * Pointer to error structure. 4066 * 4067 * @return 4068 * 0 on success, a negative errno value otherwise and rte_errno is set. 4069 */ 4070 static int 4071 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 4072 struct rte_flow_error *error) 4073 { 4074 struct mlx5_flow_dv *dv; 4075 struct mlx5_flow *dev_flow; 4076 struct mlx5_priv *priv = dev->data->dev_private; 4077 int n; 4078 int err; 4079 4080 LIST_FOREACH(dev_flow, &flow->dev_flows, next) { 4081 dv = &dev_flow->dv; 4082 n = dv->actions_n; 4083 if (flow->actions & MLX5_FLOW_ACTION_DROP) { 4084 if (flow->transfer) { 4085 dv->actions[n++] = priv->sh->esw_drop_action; 4086 } else { 4087 dv->hrxq = mlx5_hrxq_drop_new(dev); 4088 if (!dv->hrxq) { 4089 rte_flow_error_set 4090 (error, errno, 4091 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4092 NULL, 4093 "cannot get drop hash queue"); 4094 goto error; 4095 } 4096 dv->actions[n++] = dv->hrxq->action; 4097 } 4098 } else if (flow->actions & 4099 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) { 4100 struct mlx5_hrxq *hrxq; 4101 4102 hrxq = mlx5_hrxq_get(dev, flow->key, 4103 MLX5_RSS_HASH_KEY_LEN, 4104 dv->hash_fields, 4105 (*flow->queue), 4106 flow->rss.queue_num); 4107 if (!hrxq) 4108 hrxq = mlx5_hrxq_new 4109 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN, 4110 dv->hash_fields, (*flow->queue), 4111 flow->rss.queue_num, 4112 !!(dev_flow->layers & 4113 MLX5_FLOW_LAYER_TUNNEL)); 4114 if (!hrxq) { 4115 rte_flow_error_set 4116 (error, rte_errno, 4117 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 4118 "cannot get hash queue"); 4119 goto error; 4120 } 4121 dv->hrxq = hrxq; 4122 dv->actions[n++] = dv->hrxq->action; 4123 } 4124 dv->flow = 4125 mlx5_glue->dv_create_flow(dv->matcher->matcher_object, 4126 (void *)&dv->value, n, 4127 dv->actions); 4128 if (!dv->flow) { 4129 rte_flow_error_set(error, errno, 4130 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4131 NULL, 4132 "hardware refuses to create flow"); 4133 goto error; 4134 } 4135 } 4136 return 0; 4137 error: 4138 err = rte_errno; /* Save rte_errno before cleanup. */ 4139 LIST_FOREACH(dev_flow, &flow->dev_flows, next) { 4140 struct mlx5_flow_dv *dv = &dev_flow->dv; 4141 if (dv->hrxq) { 4142 if (flow->actions & MLX5_FLOW_ACTION_DROP) 4143 mlx5_hrxq_drop_release(dev); 4144 else 4145 mlx5_hrxq_release(dev, dv->hrxq); 4146 dv->hrxq = NULL; 4147 } 4148 } 4149 rte_errno = err; /* Restore rte_errno. */ 4150 return -rte_errno; 4151 } 4152 4153 /** 4154 * Release the flow matcher. 4155 * 4156 * @param dev 4157 * Pointer to Ethernet device. 4158 * @param flow 4159 * Pointer to mlx5_flow. 4160 * 4161 * @return 4162 * 1 while a reference on it exists, 0 when freed. 4163 */ 4164 static int 4165 flow_dv_matcher_release(struct rte_eth_dev *dev, 4166 struct mlx5_flow *flow) 4167 { 4168 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher; 4169 struct mlx5_priv *priv = dev->data->dev_private; 4170 struct mlx5_ibv_shared *sh = priv->sh; 4171 struct mlx5_flow_tbl_resource *tbl; 4172 4173 assert(matcher->matcher_object); 4174 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--", 4175 dev->data->port_id, (void *)matcher, 4176 rte_atomic32_read(&matcher->refcnt)); 4177 if (rte_atomic32_dec_and_test(&matcher->refcnt)) { 4178 claim_zero(mlx5_glue->dv_destroy_flow_matcher 4179 (matcher->matcher_object)); 4180 LIST_REMOVE(matcher, next); 4181 if (matcher->egress) 4182 tbl = &sh->tx_tbl[matcher->group]; 4183 else 4184 tbl = &sh->rx_tbl[matcher->group]; 4185 flow_dv_tbl_resource_release(tbl); 4186 rte_free(matcher); 4187 DRV_LOG(DEBUG, "port %u matcher %p: removed", 4188 dev->data->port_id, (void *)matcher); 4189 return 0; 4190 } 4191 return 1; 4192 } 4193 4194 /** 4195 * Release an encap/decap resource. 4196 * 4197 * @param flow 4198 * Pointer to mlx5_flow. 4199 * 4200 * @return 4201 * 1 while a reference on it exists, 0 when freed. 4202 */ 4203 static int 4204 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow) 4205 { 4206 struct mlx5_flow_dv_encap_decap_resource *cache_resource = 4207 flow->dv.encap_decap; 4208 4209 assert(cache_resource->verbs_action); 4210 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--", 4211 (void *)cache_resource, 4212 rte_atomic32_read(&cache_resource->refcnt)); 4213 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { 4214 claim_zero(mlx5_glue->destroy_flow_action 4215 (cache_resource->verbs_action)); 4216 LIST_REMOVE(cache_resource, next); 4217 rte_free(cache_resource); 4218 DRV_LOG(DEBUG, "encap/decap resource %p: removed", 4219 (void *)cache_resource); 4220 return 0; 4221 } 4222 return 1; 4223 } 4224 4225 /** 4226 * Release an jump to table action resource. 4227 * 4228 * @param flow 4229 * Pointer to mlx5_flow. 4230 * 4231 * @return 4232 * 1 while a reference on it exists, 0 when freed. 4233 */ 4234 static int 4235 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow) 4236 { 4237 struct mlx5_flow_dv_jump_tbl_resource *cache_resource = 4238 flow->dv.jump; 4239 4240 assert(cache_resource->action); 4241 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--", 4242 (void *)cache_resource, 4243 rte_atomic32_read(&cache_resource->refcnt)); 4244 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { 4245 claim_zero(mlx5_glue->destroy_flow_action 4246 (cache_resource->action)); 4247 LIST_REMOVE(cache_resource, next); 4248 flow_dv_tbl_resource_release(cache_resource->tbl); 4249 rte_free(cache_resource); 4250 DRV_LOG(DEBUG, "jump table resource %p: removed", 4251 (void *)cache_resource); 4252 return 0; 4253 } 4254 return 1; 4255 } 4256 4257 /** 4258 * Release a modify-header resource. 4259 * 4260 * @param flow 4261 * Pointer to mlx5_flow. 4262 * 4263 * @return 4264 * 1 while a reference on it exists, 0 when freed. 4265 */ 4266 static int 4267 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow) 4268 { 4269 struct mlx5_flow_dv_modify_hdr_resource *cache_resource = 4270 flow->dv.modify_hdr; 4271 4272 assert(cache_resource->verbs_action); 4273 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--", 4274 (void *)cache_resource, 4275 rte_atomic32_read(&cache_resource->refcnt)); 4276 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { 4277 claim_zero(mlx5_glue->destroy_flow_action 4278 (cache_resource->verbs_action)); 4279 LIST_REMOVE(cache_resource, next); 4280 rte_free(cache_resource); 4281 DRV_LOG(DEBUG, "modify-header resource %p: removed", 4282 (void *)cache_resource); 4283 return 0; 4284 } 4285 return 1; 4286 } 4287 4288 /** 4289 * Release port ID action resource. 4290 * 4291 * @param flow 4292 * Pointer to mlx5_flow. 4293 * 4294 * @return 4295 * 1 while a reference on it exists, 0 when freed. 4296 */ 4297 static int 4298 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow) 4299 { 4300 struct mlx5_flow_dv_port_id_action_resource *cache_resource = 4301 flow->dv.port_id_action; 4302 4303 assert(cache_resource->action); 4304 DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--", 4305 (void *)cache_resource, 4306 rte_atomic32_read(&cache_resource->refcnt)); 4307 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { 4308 claim_zero(mlx5_glue->destroy_flow_action 4309 (cache_resource->action)); 4310 LIST_REMOVE(cache_resource, next); 4311 rte_free(cache_resource); 4312 DRV_LOG(DEBUG, "port id action resource %p: removed", 4313 (void *)cache_resource); 4314 return 0; 4315 } 4316 return 1; 4317 } 4318 4319 /** 4320 * Remove the flow from the NIC but keeps it in memory. 4321 * 4322 * @param[in] dev 4323 * Pointer to Ethernet device. 4324 * @param[in, out] flow 4325 * Pointer to flow structure. 4326 */ 4327 static void 4328 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 4329 { 4330 struct mlx5_flow_dv *dv; 4331 struct mlx5_flow *dev_flow; 4332 4333 if (!flow) 4334 return; 4335 LIST_FOREACH(dev_flow, &flow->dev_flows, next) { 4336 dv = &dev_flow->dv; 4337 if (dv->flow) { 4338 claim_zero(mlx5_glue->dv_destroy_flow(dv->flow)); 4339 dv->flow = NULL; 4340 } 4341 if (dv->hrxq) { 4342 if (flow->actions & MLX5_FLOW_ACTION_DROP) 4343 mlx5_hrxq_drop_release(dev); 4344 else 4345 mlx5_hrxq_release(dev, dv->hrxq); 4346 dv->hrxq = NULL; 4347 } 4348 } 4349 } 4350 4351 /** 4352 * Remove the flow from the NIC and the memory. 4353 * 4354 * @param[in] dev 4355 * Pointer to the Ethernet device structure. 4356 * @param[in, out] flow 4357 * Pointer to flow structure. 4358 */ 4359 static void 4360 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 4361 { 4362 struct mlx5_flow *dev_flow; 4363 4364 if (!flow) 4365 return; 4366 flow_dv_remove(dev, flow); 4367 if (flow->counter) { 4368 flow_dv_counter_release(flow->counter); 4369 flow->counter = NULL; 4370 } 4371 if (flow->tag_resource) { 4372 flow_dv_tag_release(dev, flow->tag_resource); 4373 flow->tag_resource = NULL; 4374 } 4375 while (!LIST_EMPTY(&flow->dev_flows)) { 4376 dev_flow = LIST_FIRST(&flow->dev_flows); 4377 LIST_REMOVE(dev_flow, next); 4378 if (dev_flow->dv.matcher) 4379 flow_dv_matcher_release(dev, dev_flow); 4380 if (dev_flow->dv.encap_decap) 4381 flow_dv_encap_decap_resource_release(dev_flow); 4382 if (dev_flow->dv.modify_hdr) 4383 flow_dv_modify_hdr_resource_release(dev_flow); 4384 if (dev_flow->dv.jump) 4385 flow_dv_jump_tbl_resource_release(dev_flow); 4386 if (dev_flow->dv.port_id_action) 4387 flow_dv_port_id_action_resource_release(dev_flow); 4388 rte_free(dev_flow); 4389 } 4390 } 4391 4392 /** 4393 * Query a dv flow rule for its statistics via devx. 4394 * 4395 * @param[in] dev 4396 * Pointer to Ethernet device. 4397 * @param[in] flow 4398 * Pointer to the sub flow. 4399 * @param[out] data 4400 * data retrieved by the query. 4401 * @param[out] error 4402 * Perform verbose error reporting if not NULL. 4403 * 4404 * @return 4405 * 0 on success, a negative errno value otherwise and rte_errno is set. 4406 */ 4407 static int 4408 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, 4409 void *data, struct rte_flow_error *error) 4410 { 4411 struct mlx5_priv *priv = dev->data->dev_private; 4412 struct rte_flow_query_count *qc = data; 4413 uint64_t pkts = 0; 4414 uint64_t bytes = 0; 4415 int err; 4416 4417 if (!priv->config.devx) 4418 return rte_flow_error_set(error, ENOTSUP, 4419 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4420 NULL, 4421 "counters are not supported"); 4422 if (flow->counter) { 4423 err = mlx5_devx_cmd_flow_counter_query 4424 (flow->counter->dcs, 4425 qc->reset, &pkts, &bytes); 4426 if (err) 4427 return rte_flow_error_set 4428 (error, err, 4429 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4430 NULL, 4431 "cannot read counters"); 4432 qc->hits_set = 1; 4433 qc->bytes_set = 1; 4434 qc->hits = pkts - flow->counter->hits; 4435 qc->bytes = bytes - flow->counter->bytes; 4436 if (qc->reset) { 4437 flow->counter->hits = pkts; 4438 flow->counter->bytes = bytes; 4439 } 4440 return 0; 4441 } 4442 return rte_flow_error_set(error, EINVAL, 4443 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4444 NULL, 4445 "counters are not available"); 4446 } 4447 4448 /** 4449 * Query a flow. 4450 * 4451 * @see rte_flow_query() 4452 * @see rte_flow_ops 4453 */ 4454 static int 4455 flow_dv_query(struct rte_eth_dev *dev, 4456 struct rte_flow *flow __rte_unused, 4457 const struct rte_flow_action *actions __rte_unused, 4458 void *data __rte_unused, 4459 struct rte_flow_error *error __rte_unused) 4460 { 4461 int ret = -EINVAL; 4462 4463 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 4464 switch (actions->type) { 4465 case RTE_FLOW_ACTION_TYPE_VOID: 4466 break; 4467 case RTE_FLOW_ACTION_TYPE_COUNT: 4468 ret = flow_dv_query_count(dev, flow, data, error); 4469 break; 4470 default: 4471 return rte_flow_error_set(error, ENOTSUP, 4472 RTE_FLOW_ERROR_TYPE_ACTION, 4473 actions, 4474 "action not supported"); 4475 } 4476 } 4477 return ret; 4478 } 4479 4480 /* 4481 * Mutex-protected thunk to flow_dv_translate(). 4482 */ 4483 static int 4484 flow_d_translate(struct rte_eth_dev *dev, 4485 struct mlx5_flow *dev_flow, 4486 const struct rte_flow_attr *attr, 4487 const struct rte_flow_item items[], 4488 const struct rte_flow_action actions[], 4489 struct rte_flow_error *error) 4490 { 4491 int ret; 4492 4493 flow_d_shared_lock(dev); 4494 ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error); 4495 flow_d_shared_unlock(dev); 4496 return ret; 4497 } 4498 4499 /* 4500 * Mutex-protected thunk to flow_dv_apply(). 4501 */ 4502 static int 4503 flow_d_apply(struct rte_eth_dev *dev, 4504 struct rte_flow *flow, 4505 struct rte_flow_error *error) 4506 { 4507 int ret; 4508 4509 flow_d_shared_lock(dev); 4510 ret = flow_dv_apply(dev, flow, error); 4511 flow_d_shared_unlock(dev); 4512 return ret; 4513 } 4514 4515 /* 4516 * Mutex-protected thunk to flow_dv_remove(). 4517 */ 4518 static void 4519 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 4520 { 4521 flow_d_shared_lock(dev); 4522 flow_dv_remove(dev, flow); 4523 flow_d_shared_unlock(dev); 4524 } 4525 4526 /* 4527 * Mutex-protected thunk to flow_dv_destroy(). 4528 */ 4529 static void 4530 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 4531 { 4532 flow_d_shared_lock(dev); 4533 flow_dv_destroy(dev, flow); 4534 flow_d_shared_unlock(dev); 4535 } 4536 4537 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { 4538 .validate = flow_dv_validate, 4539 .prepare = flow_dv_prepare, 4540 .translate = flow_d_translate, 4541 .apply = flow_d_apply, 4542 .remove = flow_d_remove, 4543 .destroy = flow_d_destroy, 4544 .query = flow_dv_query, 4545 }; 4546 4547 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 4548