1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018 Mellanox Technologies, Ltd 3 */ 4 5 #include <sys/queue.h> 6 #include <stdalign.h> 7 #include <stdint.h> 8 #include <string.h> 9 #include <unistd.h> 10 11 #include <rte_common.h> 12 #include <rte_ether.h> 13 #include <rte_ethdev_driver.h> 14 #include <rte_flow.h> 15 #include <rte_flow_driver.h> 16 #include <rte_malloc.h> 17 #include <rte_cycles.h> 18 #include <rte_ip.h> 19 #include <rte_gre.h> 20 #include <rte_vxlan.h> 21 #include <rte_gtp.h> 22 #include <rte_eal_paging.h> 23 #include <rte_mpls.h> 24 25 #include <mlx5_glue.h> 26 #include <mlx5_devx_cmds.h> 27 #include <mlx5_prm.h> 28 #include <mlx5_malloc.h> 29 30 #include "mlx5_defs.h" 31 #include "mlx5.h" 32 #include "mlx5_common_os.h" 33 #include "mlx5_flow.h" 34 #include "mlx5_flow_os.h" 35 #include "mlx5_rxtx.h" 36 37 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 38 39 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS 40 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0 41 #endif 42 43 #ifndef HAVE_MLX5DV_DR_ESWITCH 44 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB 45 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0 46 #endif 47 #endif 48 49 #ifndef HAVE_MLX5DV_DR 50 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1 51 #endif 52 53 /* VLAN header definitions */ 54 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13 55 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT) 56 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff 57 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK) 58 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK) 59 60 union flow_dv_attr { 61 struct { 62 uint32_t valid:1; 63 uint32_t ipv4:1; 64 uint32_t ipv6:1; 65 uint32_t tcp:1; 66 uint32_t udp:1; 67 uint32_t reserved:27; 68 }; 69 uint32_t attr; 70 }; 71 72 static int 73 flow_dv_tbl_resource_release(struct rte_eth_dev *dev, 74 struct mlx5_flow_tbl_resource *tbl); 75 76 static int 77 flow_dv_default_miss_resource_release(struct rte_eth_dev *dev); 78 79 static int 80 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, 81 uint32_t encap_decap_idx); 82 83 static int 84 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, 85 uint32_t port_id); 86 87 /** 88 * Initialize flow attributes structure according to flow items' types. 89 * 90 * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel 91 * mode. For tunnel mode, the items to be modified are the outermost ones. 92 * 93 * @param[in] item 94 * Pointer to item specification. 95 * @param[out] attr 96 * Pointer to flow attributes structure. 97 * @param[in] dev_flow 98 * Pointer to the sub flow. 99 * @param[in] tunnel_decap 100 * Whether action is after tunnel decapsulation. 101 */ 102 static void 103 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr, 104 struct mlx5_flow *dev_flow, bool tunnel_decap) 105 { 106 uint64_t layers = dev_flow->handle->layers; 107 108 /* 109 * If layers is already initialized, it means this dev_flow is the 110 * suffix flow, the layers flags is set by the prefix flow. Need to 111 * use the layer flags from prefix flow as the suffix flow may not 112 * have the user defined items as the flow is split. 113 */ 114 if (layers) { 115 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4) 116 attr->ipv4 = 1; 117 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6) 118 attr->ipv6 = 1; 119 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP) 120 attr->tcp = 1; 121 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP) 122 attr->udp = 1; 123 attr->valid = 1; 124 return; 125 } 126 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 127 uint8_t next_protocol = 0xff; 128 switch (item->type) { 129 case RTE_FLOW_ITEM_TYPE_GRE: 130 case RTE_FLOW_ITEM_TYPE_NVGRE: 131 case RTE_FLOW_ITEM_TYPE_VXLAN: 132 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 133 case RTE_FLOW_ITEM_TYPE_GENEVE: 134 case RTE_FLOW_ITEM_TYPE_MPLS: 135 if (tunnel_decap) 136 attr->attr = 0; 137 break; 138 case RTE_FLOW_ITEM_TYPE_IPV4: 139 if (!attr->ipv6) 140 attr->ipv4 = 1; 141 if (item->mask != NULL && 142 ((const struct rte_flow_item_ipv4 *) 143 item->mask)->hdr.next_proto_id) 144 next_protocol = 145 ((const struct rte_flow_item_ipv4 *) 146 (item->spec))->hdr.next_proto_id & 147 ((const struct rte_flow_item_ipv4 *) 148 (item->mask))->hdr.next_proto_id; 149 if ((next_protocol == IPPROTO_IPIP || 150 next_protocol == IPPROTO_IPV6) && tunnel_decap) 151 attr->attr = 0; 152 break; 153 case RTE_FLOW_ITEM_TYPE_IPV6: 154 if (!attr->ipv4) 155 attr->ipv6 = 1; 156 if (item->mask != NULL && 157 ((const struct rte_flow_item_ipv6 *) 158 item->mask)->hdr.proto) 159 next_protocol = 160 ((const struct rte_flow_item_ipv6 *) 161 (item->spec))->hdr.proto & 162 ((const struct rte_flow_item_ipv6 *) 163 (item->mask))->hdr.proto; 164 if ((next_protocol == IPPROTO_IPIP || 165 next_protocol == IPPROTO_IPV6) && tunnel_decap) 166 attr->attr = 0; 167 break; 168 case RTE_FLOW_ITEM_TYPE_UDP: 169 if (!attr->tcp) 170 attr->udp = 1; 171 break; 172 case RTE_FLOW_ITEM_TYPE_TCP: 173 if (!attr->udp) 174 attr->tcp = 1; 175 break; 176 default: 177 break; 178 } 179 } 180 attr->valid = 1; 181 } 182 183 /** 184 * Convert rte_mtr_color to mlx5 color. 185 * 186 * @param[in] rcol 187 * rte_mtr_color. 188 * 189 * @return 190 * mlx5 color. 191 */ 192 static int 193 rte_col_2_mlx5_col(enum rte_color rcol) 194 { 195 switch (rcol) { 196 case RTE_COLOR_GREEN: 197 return MLX5_FLOW_COLOR_GREEN; 198 case RTE_COLOR_YELLOW: 199 return MLX5_FLOW_COLOR_YELLOW; 200 case RTE_COLOR_RED: 201 return MLX5_FLOW_COLOR_RED; 202 default: 203 break; 204 } 205 return MLX5_FLOW_COLOR_UNDEFINED; 206 } 207 208 struct field_modify_info { 209 uint32_t size; /* Size of field in protocol header, in bytes. */ 210 uint32_t offset; /* Offset of field in protocol header, in bytes. */ 211 enum mlx5_modification_field id; 212 }; 213 214 struct field_modify_info modify_eth[] = { 215 {4, 0, MLX5_MODI_OUT_DMAC_47_16}, 216 {2, 4, MLX5_MODI_OUT_DMAC_15_0}, 217 {4, 6, MLX5_MODI_OUT_SMAC_47_16}, 218 {2, 10, MLX5_MODI_OUT_SMAC_15_0}, 219 {0, 0, 0}, 220 }; 221 222 struct field_modify_info modify_vlan_out_first_vid[] = { 223 /* Size in bits !!! */ 224 {12, 0, MLX5_MODI_OUT_FIRST_VID}, 225 {0, 0, 0}, 226 }; 227 228 struct field_modify_info modify_ipv4[] = { 229 {1, 1, MLX5_MODI_OUT_IP_DSCP}, 230 {1, 8, MLX5_MODI_OUT_IPV4_TTL}, 231 {4, 12, MLX5_MODI_OUT_SIPV4}, 232 {4, 16, MLX5_MODI_OUT_DIPV4}, 233 {0, 0, 0}, 234 }; 235 236 struct field_modify_info modify_ipv6[] = { 237 {1, 0, MLX5_MODI_OUT_IP_DSCP}, 238 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT}, 239 {4, 8, MLX5_MODI_OUT_SIPV6_127_96}, 240 {4, 12, MLX5_MODI_OUT_SIPV6_95_64}, 241 {4, 16, MLX5_MODI_OUT_SIPV6_63_32}, 242 {4, 20, MLX5_MODI_OUT_SIPV6_31_0}, 243 {4, 24, MLX5_MODI_OUT_DIPV6_127_96}, 244 {4, 28, MLX5_MODI_OUT_DIPV6_95_64}, 245 {4, 32, MLX5_MODI_OUT_DIPV6_63_32}, 246 {4, 36, MLX5_MODI_OUT_DIPV6_31_0}, 247 {0, 0, 0}, 248 }; 249 250 struct field_modify_info modify_udp[] = { 251 {2, 0, MLX5_MODI_OUT_UDP_SPORT}, 252 {2, 2, MLX5_MODI_OUT_UDP_DPORT}, 253 {0, 0, 0}, 254 }; 255 256 struct field_modify_info modify_tcp[] = { 257 {2, 0, MLX5_MODI_OUT_TCP_SPORT}, 258 {2, 2, MLX5_MODI_OUT_TCP_DPORT}, 259 {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM}, 260 {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM}, 261 {0, 0, 0}, 262 }; 263 264 static void 265 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused, 266 uint8_t next_protocol, uint64_t *item_flags, 267 int *tunnel) 268 { 269 MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 || 270 item->type == RTE_FLOW_ITEM_TYPE_IPV6); 271 if (next_protocol == IPPROTO_IPIP) { 272 *item_flags |= MLX5_FLOW_LAYER_IPIP; 273 *tunnel = 1; 274 } 275 if (next_protocol == IPPROTO_IPV6) { 276 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP; 277 *tunnel = 1; 278 } 279 } 280 281 /** 282 * Acquire the synchronizing object to protect multithreaded access 283 * to shared dv context. Lock occurs only if context is actually 284 * shared, i.e. we have multiport IB device and representors are 285 * created. 286 * 287 * @param[in] dev 288 * Pointer to the rte_eth_dev structure. 289 */ 290 static void 291 flow_dv_shared_lock(struct rte_eth_dev *dev) 292 { 293 struct mlx5_priv *priv = dev->data->dev_private; 294 struct mlx5_dev_ctx_shared *sh = priv->sh; 295 296 if (sh->dv_refcnt > 1) { 297 int ret; 298 299 ret = pthread_mutex_lock(&sh->dv_mutex); 300 MLX5_ASSERT(!ret); 301 (void)ret; 302 } 303 } 304 305 static void 306 flow_dv_shared_unlock(struct rte_eth_dev *dev) 307 { 308 struct mlx5_priv *priv = dev->data->dev_private; 309 struct mlx5_dev_ctx_shared *sh = priv->sh; 310 311 if (sh->dv_refcnt > 1) { 312 int ret; 313 314 ret = pthread_mutex_unlock(&sh->dv_mutex); 315 MLX5_ASSERT(!ret); 316 (void)ret; 317 } 318 } 319 320 /* Update VLAN's VID/PCP based on input rte_flow_action. 321 * 322 * @param[in] action 323 * Pointer to struct rte_flow_action. 324 * @param[out] vlan 325 * Pointer to struct rte_vlan_hdr. 326 */ 327 static void 328 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action, 329 struct rte_vlan_hdr *vlan) 330 { 331 uint16_t vlan_tci; 332 if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) { 333 vlan_tci = 334 ((const struct rte_flow_action_of_set_vlan_pcp *) 335 action->conf)->vlan_pcp; 336 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT; 337 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK; 338 vlan->vlan_tci |= vlan_tci; 339 } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) { 340 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK; 341 vlan->vlan_tci |= rte_be_to_cpu_16 342 (((const struct rte_flow_action_of_set_vlan_vid *) 343 action->conf)->vlan_vid); 344 } 345 } 346 347 /** 348 * Fetch 1, 2, 3 or 4 byte field from the byte array 349 * and return as unsigned integer in host-endian format. 350 * 351 * @param[in] data 352 * Pointer to data array. 353 * @param[in] size 354 * Size of field to extract. 355 * 356 * @return 357 * converted field in host endian format. 358 */ 359 static inline uint32_t 360 flow_dv_fetch_field(const uint8_t *data, uint32_t size) 361 { 362 uint32_t ret; 363 364 switch (size) { 365 case 1: 366 ret = *data; 367 break; 368 case 2: 369 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data); 370 break; 371 case 3: 372 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data); 373 ret = (ret << 8) | *(data + sizeof(uint16_t)); 374 break; 375 case 4: 376 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data); 377 break; 378 default: 379 MLX5_ASSERT(false); 380 ret = 0; 381 break; 382 } 383 return ret; 384 } 385 386 /** 387 * Convert modify-header action to DV specification. 388 * 389 * Data length of each action is determined by provided field description 390 * and the item mask. Data bit offset and width of each action is determined 391 * by provided item mask. 392 * 393 * @param[in] item 394 * Pointer to item specification. 395 * @param[in] field 396 * Pointer to field modification information. 397 * For MLX5_MODIFICATION_TYPE_SET specifies destination field. 398 * For MLX5_MODIFICATION_TYPE_ADD specifies destination field. 399 * For MLX5_MODIFICATION_TYPE_COPY specifies source field. 400 * @param[in] dcopy 401 * Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type. 402 * Negative offset value sets the same offset as source offset. 403 * size field is ignored, value is taken from source field. 404 * @param[in,out] resource 405 * Pointer to the modify-header resource. 406 * @param[in] type 407 * Type of modification. 408 * @param[out] error 409 * Pointer to the error structure. 410 * 411 * @return 412 * 0 on success, a negative errno value otherwise and rte_errno is set. 413 */ 414 static int 415 flow_dv_convert_modify_action(struct rte_flow_item *item, 416 struct field_modify_info *field, 417 struct field_modify_info *dcopy, 418 struct mlx5_flow_dv_modify_hdr_resource *resource, 419 uint32_t type, struct rte_flow_error *error) 420 { 421 uint32_t i = resource->actions_num; 422 struct mlx5_modification_cmd *actions = resource->actions; 423 424 /* 425 * The item and mask are provided in big-endian format. 426 * The fields should be presented as in big-endian format either. 427 * Mask must be always present, it defines the actual field width. 428 */ 429 MLX5_ASSERT(item->mask); 430 MLX5_ASSERT(field->size); 431 do { 432 unsigned int size_b; 433 unsigned int off_b; 434 uint32_t mask; 435 uint32_t data; 436 437 if (i >= MLX5_MAX_MODIFY_NUM) 438 return rte_flow_error_set(error, EINVAL, 439 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 440 "too many items to modify"); 441 /* Fetch variable byte size mask from the array. */ 442 mask = flow_dv_fetch_field((const uint8_t *)item->mask + 443 field->offset, field->size); 444 if (!mask) { 445 ++field; 446 continue; 447 } 448 /* Deduce actual data width in bits from mask value. */ 449 off_b = rte_bsf32(mask); 450 size_b = sizeof(uint32_t) * CHAR_BIT - 451 off_b - __builtin_clz(mask); 452 MLX5_ASSERT(size_b); 453 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b; 454 actions[i] = (struct mlx5_modification_cmd) { 455 .action_type = type, 456 .field = field->id, 457 .offset = off_b, 458 .length = size_b, 459 }; 460 /* Convert entire record to expected big-endian format. */ 461 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); 462 if (type == MLX5_MODIFICATION_TYPE_COPY) { 463 MLX5_ASSERT(dcopy); 464 actions[i].dst_field = dcopy->id; 465 actions[i].dst_offset = 466 (int)dcopy->offset < 0 ? off_b : dcopy->offset; 467 /* Convert entire record to big-endian format. */ 468 actions[i].data1 = rte_cpu_to_be_32(actions[i].data1); 469 } else { 470 MLX5_ASSERT(item->spec); 471 data = flow_dv_fetch_field((const uint8_t *)item->spec + 472 field->offset, field->size); 473 /* Shift out the trailing masked bits from data. */ 474 data = (data & mask) >> off_b; 475 actions[i].data1 = rte_cpu_to_be_32(data); 476 } 477 ++i; 478 ++field; 479 } while (field->size); 480 if (resource->actions_num == i) 481 return rte_flow_error_set(error, EINVAL, 482 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 483 "invalid modification flow item"); 484 resource->actions_num = i; 485 return 0; 486 } 487 488 /** 489 * Convert modify-header set IPv4 address action to DV specification. 490 * 491 * @param[in,out] resource 492 * Pointer to the modify-header resource. 493 * @param[in] action 494 * Pointer to action specification. 495 * @param[out] error 496 * Pointer to the error structure. 497 * 498 * @return 499 * 0 on success, a negative errno value otherwise and rte_errno is set. 500 */ 501 static int 502 flow_dv_convert_action_modify_ipv4 503 (struct mlx5_flow_dv_modify_hdr_resource *resource, 504 const struct rte_flow_action *action, 505 struct rte_flow_error *error) 506 { 507 const struct rte_flow_action_set_ipv4 *conf = 508 (const struct rte_flow_action_set_ipv4 *)(action->conf); 509 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 }; 510 struct rte_flow_item_ipv4 ipv4; 511 struct rte_flow_item_ipv4 ipv4_mask; 512 513 memset(&ipv4, 0, sizeof(ipv4)); 514 memset(&ipv4_mask, 0, sizeof(ipv4_mask)); 515 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) { 516 ipv4.hdr.src_addr = conf->ipv4_addr; 517 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr; 518 } else { 519 ipv4.hdr.dst_addr = conf->ipv4_addr; 520 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr; 521 } 522 item.spec = &ipv4; 523 item.mask = &ipv4_mask; 524 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource, 525 MLX5_MODIFICATION_TYPE_SET, error); 526 } 527 528 /** 529 * Convert modify-header set IPv6 address action to DV specification. 530 * 531 * @param[in,out] resource 532 * Pointer to the modify-header resource. 533 * @param[in] action 534 * Pointer to action specification. 535 * @param[out] error 536 * Pointer to the error structure. 537 * 538 * @return 539 * 0 on success, a negative errno value otherwise and rte_errno is set. 540 */ 541 static int 542 flow_dv_convert_action_modify_ipv6 543 (struct mlx5_flow_dv_modify_hdr_resource *resource, 544 const struct rte_flow_action *action, 545 struct rte_flow_error *error) 546 { 547 const struct rte_flow_action_set_ipv6 *conf = 548 (const struct rte_flow_action_set_ipv6 *)(action->conf); 549 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 }; 550 struct rte_flow_item_ipv6 ipv6; 551 struct rte_flow_item_ipv6 ipv6_mask; 552 553 memset(&ipv6, 0, sizeof(ipv6)); 554 memset(&ipv6_mask, 0, sizeof(ipv6_mask)); 555 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) { 556 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr, 557 sizeof(ipv6.hdr.src_addr)); 558 memcpy(&ipv6_mask.hdr.src_addr, 559 &rte_flow_item_ipv6_mask.hdr.src_addr, 560 sizeof(ipv6.hdr.src_addr)); 561 } else { 562 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr, 563 sizeof(ipv6.hdr.dst_addr)); 564 memcpy(&ipv6_mask.hdr.dst_addr, 565 &rte_flow_item_ipv6_mask.hdr.dst_addr, 566 sizeof(ipv6.hdr.dst_addr)); 567 } 568 item.spec = &ipv6; 569 item.mask = &ipv6_mask; 570 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource, 571 MLX5_MODIFICATION_TYPE_SET, error); 572 } 573 574 /** 575 * Convert modify-header set MAC address action to DV specification. 576 * 577 * @param[in,out] resource 578 * Pointer to the modify-header resource. 579 * @param[in] action 580 * Pointer to action specification. 581 * @param[out] error 582 * Pointer to the error structure. 583 * 584 * @return 585 * 0 on success, a negative errno value otherwise and rte_errno is set. 586 */ 587 static int 588 flow_dv_convert_action_modify_mac 589 (struct mlx5_flow_dv_modify_hdr_resource *resource, 590 const struct rte_flow_action *action, 591 struct rte_flow_error *error) 592 { 593 const struct rte_flow_action_set_mac *conf = 594 (const struct rte_flow_action_set_mac *)(action->conf); 595 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH }; 596 struct rte_flow_item_eth eth; 597 struct rte_flow_item_eth eth_mask; 598 599 memset(ð, 0, sizeof(eth)); 600 memset(ð_mask, 0, sizeof(eth_mask)); 601 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) { 602 memcpy(ð.src.addr_bytes, &conf->mac_addr, 603 sizeof(eth.src.addr_bytes)); 604 memcpy(ð_mask.src.addr_bytes, 605 &rte_flow_item_eth_mask.src.addr_bytes, 606 sizeof(eth_mask.src.addr_bytes)); 607 } else { 608 memcpy(ð.dst.addr_bytes, &conf->mac_addr, 609 sizeof(eth.dst.addr_bytes)); 610 memcpy(ð_mask.dst.addr_bytes, 611 &rte_flow_item_eth_mask.dst.addr_bytes, 612 sizeof(eth_mask.dst.addr_bytes)); 613 } 614 item.spec = ð 615 item.mask = ð_mask; 616 return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource, 617 MLX5_MODIFICATION_TYPE_SET, error); 618 } 619 620 /** 621 * Convert modify-header set VLAN VID action to DV specification. 622 * 623 * @param[in,out] resource 624 * Pointer to the modify-header resource. 625 * @param[in] action 626 * Pointer to action specification. 627 * @param[out] error 628 * Pointer to the error structure. 629 * 630 * @return 631 * 0 on success, a negative errno value otherwise and rte_errno is set. 632 */ 633 static int 634 flow_dv_convert_action_modify_vlan_vid 635 (struct mlx5_flow_dv_modify_hdr_resource *resource, 636 const struct rte_flow_action *action, 637 struct rte_flow_error *error) 638 { 639 const struct rte_flow_action_of_set_vlan_vid *conf = 640 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf); 641 int i = resource->actions_num; 642 struct mlx5_modification_cmd *actions = resource->actions; 643 struct field_modify_info *field = modify_vlan_out_first_vid; 644 645 if (i >= MLX5_MAX_MODIFY_NUM) 646 return rte_flow_error_set(error, EINVAL, 647 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 648 "too many items to modify"); 649 actions[i] = (struct mlx5_modification_cmd) { 650 .action_type = MLX5_MODIFICATION_TYPE_SET, 651 .field = field->id, 652 .length = field->size, 653 .offset = field->offset, 654 }; 655 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); 656 actions[i].data1 = conf->vlan_vid; 657 actions[i].data1 = actions[i].data1 << 16; 658 resource->actions_num = ++i; 659 return 0; 660 } 661 662 /** 663 * Convert modify-header set TP action to DV specification. 664 * 665 * @param[in,out] resource 666 * Pointer to the modify-header resource. 667 * @param[in] action 668 * Pointer to action specification. 669 * @param[in] items 670 * Pointer to rte_flow_item objects list. 671 * @param[in] attr 672 * Pointer to flow attributes structure. 673 * @param[in] dev_flow 674 * Pointer to the sub flow. 675 * @param[in] tunnel_decap 676 * Whether action is after tunnel decapsulation. 677 * @param[out] error 678 * Pointer to the error structure. 679 * 680 * @return 681 * 0 on success, a negative errno value otherwise and rte_errno is set. 682 */ 683 static int 684 flow_dv_convert_action_modify_tp 685 (struct mlx5_flow_dv_modify_hdr_resource *resource, 686 const struct rte_flow_action *action, 687 const struct rte_flow_item *items, 688 union flow_dv_attr *attr, struct mlx5_flow *dev_flow, 689 bool tunnel_decap, struct rte_flow_error *error) 690 { 691 const struct rte_flow_action_set_tp *conf = 692 (const struct rte_flow_action_set_tp *)(action->conf); 693 struct rte_flow_item item; 694 struct rte_flow_item_udp udp; 695 struct rte_flow_item_udp udp_mask; 696 struct rte_flow_item_tcp tcp; 697 struct rte_flow_item_tcp tcp_mask; 698 struct field_modify_info *field; 699 700 if (!attr->valid) 701 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); 702 if (attr->udp) { 703 memset(&udp, 0, sizeof(udp)); 704 memset(&udp_mask, 0, sizeof(udp_mask)); 705 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) { 706 udp.hdr.src_port = conf->port; 707 udp_mask.hdr.src_port = 708 rte_flow_item_udp_mask.hdr.src_port; 709 } else { 710 udp.hdr.dst_port = conf->port; 711 udp_mask.hdr.dst_port = 712 rte_flow_item_udp_mask.hdr.dst_port; 713 } 714 item.type = RTE_FLOW_ITEM_TYPE_UDP; 715 item.spec = &udp; 716 item.mask = &udp_mask; 717 field = modify_udp; 718 } else { 719 MLX5_ASSERT(attr->tcp); 720 memset(&tcp, 0, sizeof(tcp)); 721 memset(&tcp_mask, 0, sizeof(tcp_mask)); 722 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) { 723 tcp.hdr.src_port = conf->port; 724 tcp_mask.hdr.src_port = 725 rte_flow_item_tcp_mask.hdr.src_port; 726 } else { 727 tcp.hdr.dst_port = conf->port; 728 tcp_mask.hdr.dst_port = 729 rte_flow_item_tcp_mask.hdr.dst_port; 730 } 731 item.type = RTE_FLOW_ITEM_TYPE_TCP; 732 item.spec = &tcp; 733 item.mask = &tcp_mask; 734 field = modify_tcp; 735 } 736 return flow_dv_convert_modify_action(&item, field, NULL, resource, 737 MLX5_MODIFICATION_TYPE_SET, error); 738 } 739 740 /** 741 * Convert modify-header set TTL action to DV specification. 742 * 743 * @param[in,out] resource 744 * Pointer to the modify-header resource. 745 * @param[in] action 746 * Pointer to action specification. 747 * @param[in] items 748 * Pointer to rte_flow_item objects list. 749 * @param[in] attr 750 * Pointer to flow attributes structure. 751 * @param[in] dev_flow 752 * Pointer to the sub flow. 753 * @param[in] tunnel_decap 754 * Whether action is after tunnel decapsulation. 755 * @param[out] error 756 * Pointer to the error structure. 757 * 758 * @return 759 * 0 on success, a negative errno value otherwise and rte_errno is set. 760 */ 761 static int 762 flow_dv_convert_action_modify_ttl 763 (struct mlx5_flow_dv_modify_hdr_resource *resource, 764 const struct rte_flow_action *action, 765 const struct rte_flow_item *items, 766 union flow_dv_attr *attr, struct mlx5_flow *dev_flow, 767 bool tunnel_decap, struct rte_flow_error *error) 768 { 769 const struct rte_flow_action_set_ttl *conf = 770 (const struct rte_flow_action_set_ttl *)(action->conf); 771 struct rte_flow_item item; 772 struct rte_flow_item_ipv4 ipv4; 773 struct rte_flow_item_ipv4 ipv4_mask; 774 struct rte_flow_item_ipv6 ipv6; 775 struct rte_flow_item_ipv6 ipv6_mask; 776 struct field_modify_info *field; 777 778 if (!attr->valid) 779 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); 780 if (attr->ipv4) { 781 memset(&ipv4, 0, sizeof(ipv4)); 782 memset(&ipv4_mask, 0, sizeof(ipv4_mask)); 783 ipv4.hdr.time_to_live = conf->ttl_value; 784 ipv4_mask.hdr.time_to_live = 0xFF; 785 item.type = RTE_FLOW_ITEM_TYPE_IPV4; 786 item.spec = &ipv4; 787 item.mask = &ipv4_mask; 788 field = modify_ipv4; 789 } else { 790 MLX5_ASSERT(attr->ipv6); 791 memset(&ipv6, 0, sizeof(ipv6)); 792 memset(&ipv6_mask, 0, sizeof(ipv6_mask)); 793 ipv6.hdr.hop_limits = conf->ttl_value; 794 ipv6_mask.hdr.hop_limits = 0xFF; 795 item.type = RTE_FLOW_ITEM_TYPE_IPV6; 796 item.spec = &ipv6; 797 item.mask = &ipv6_mask; 798 field = modify_ipv6; 799 } 800 return flow_dv_convert_modify_action(&item, field, NULL, resource, 801 MLX5_MODIFICATION_TYPE_SET, error); 802 } 803 804 /** 805 * Convert modify-header decrement TTL action to DV specification. 806 * 807 * @param[in,out] resource 808 * Pointer to the modify-header resource. 809 * @param[in] action 810 * Pointer to action specification. 811 * @param[in] items 812 * Pointer to rte_flow_item objects list. 813 * @param[in] attr 814 * Pointer to flow attributes structure. 815 * @param[in] dev_flow 816 * Pointer to the sub flow. 817 * @param[in] tunnel_decap 818 * Whether action is after tunnel decapsulation. 819 * @param[out] error 820 * Pointer to the error structure. 821 * 822 * @return 823 * 0 on success, a negative errno value otherwise and rte_errno is set. 824 */ 825 static int 826 flow_dv_convert_action_modify_dec_ttl 827 (struct mlx5_flow_dv_modify_hdr_resource *resource, 828 const struct rte_flow_item *items, 829 union flow_dv_attr *attr, struct mlx5_flow *dev_flow, 830 bool tunnel_decap, struct rte_flow_error *error) 831 { 832 struct rte_flow_item item; 833 struct rte_flow_item_ipv4 ipv4; 834 struct rte_flow_item_ipv4 ipv4_mask; 835 struct rte_flow_item_ipv6 ipv6; 836 struct rte_flow_item_ipv6 ipv6_mask; 837 struct field_modify_info *field; 838 839 if (!attr->valid) 840 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); 841 if (attr->ipv4) { 842 memset(&ipv4, 0, sizeof(ipv4)); 843 memset(&ipv4_mask, 0, sizeof(ipv4_mask)); 844 ipv4.hdr.time_to_live = 0xFF; 845 ipv4_mask.hdr.time_to_live = 0xFF; 846 item.type = RTE_FLOW_ITEM_TYPE_IPV4; 847 item.spec = &ipv4; 848 item.mask = &ipv4_mask; 849 field = modify_ipv4; 850 } else { 851 MLX5_ASSERT(attr->ipv6); 852 memset(&ipv6, 0, sizeof(ipv6)); 853 memset(&ipv6_mask, 0, sizeof(ipv6_mask)); 854 ipv6.hdr.hop_limits = 0xFF; 855 ipv6_mask.hdr.hop_limits = 0xFF; 856 item.type = RTE_FLOW_ITEM_TYPE_IPV6; 857 item.spec = &ipv6; 858 item.mask = &ipv6_mask; 859 field = modify_ipv6; 860 } 861 return flow_dv_convert_modify_action(&item, field, NULL, resource, 862 MLX5_MODIFICATION_TYPE_ADD, error); 863 } 864 865 /** 866 * Convert modify-header increment/decrement TCP Sequence number 867 * to DV specification. 868 * 869 * @param[in,out] resource 870 * Pointer to the modify-header resource. 871 * @param[in] action 872 * Pointer to action specification. 873 * @param[out] error 874 * Pointer to the error structure. 875 * 876 * @return 877 * 0 on success, a negative errno value otherwise and rte_errno is set. 878 */ 879 static int 880 flow_dv_convert_action_modify_tcp_seq 881 (struct mlx5_flow_dv_modify_hdr_resource *resource, 882 const struct rte_flow_action *action, 883 struct rte_flow_error *error) 884 { 885 const rte_be32_t *conf = (const rte_be32_t *)(action->conf); 886 uint64_t value = rte_be_to_cpu_32(*conf); 887 struct rte_flow_item item; 888 struct rte_flow_item_tcp tcp; 889 struct rte_flow_item_tcp tcp_mask; 890 891 memset(&tcp, 0, sizeof(tcp)); 892 memset(&tcp_mask, 0, sizeof(tcp_mask)); 893 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ) 894 /* 895 * The HW has no decrement operation, only increment operation. 896 * To simulate decrement X from Y using increment operation 897 * we need to add UINT32_MAX X times to Y. 898 * Each adding of UINT32_MAX decrements Y by 1. 899 */ 900 value *= UINT32_MAX; 901 tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value); 902 tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX); 903 item.type = RTE_FLOW_ITEM_TYPE_TCP; 904 item.spec = &tcp; 905 item.mask = &tcp_mask; 906 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource, 907 MLX5_MODIFICATION_TYPE_ADD, error); 908 } 909 910 /** 911 * Convert modify-header increment/decrement TCP Acknowledgment number 912 * to DV specification. 913 * 914 * @param[in,out] resource 915 * Pointer to the modify-header resource. 916 * @param[in] action 917 * Pointer to action specification. 918 * @param[out] error 919 * Pointer to the error structure. 920 * 921 * @return 922 * 0 on success, a negative errno value otherwise and rte_errno is set. 923 */ 924 static int 925 flow_dv_convert_action_modify_tcp_ack 926 (struct mlx5_flow_dv_modify_hdr_resource *resource, 927 const struct rte_flow_action *action, 928 struct rte_flow_error *error) 929 { 930 const rte_be32_t *conf = (const rte_be32_t *)(action->conf); 931 uint64_t value = rte_be_to_cpu_32(*conf); 932 struct rte_flow_item item; 933 struct rte_flow_item_tcp tcp; 934 struct rte_flow_item_tcp tcp_mask; 935 936 memset(&tcp, 0, sizeof(tcp)); 937 memset(&tcp_mask, 0, sizeof(tcp_mask)); 938 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK) 939 /* 940 * The HW has no decrement operation, only increment operation. 941 * To simulate decrement X from Y using increment operation 942 * we need to add UINT32_MAX X times to Y. 943 * Each adding of UINT32_MAX decrements Y by 1. 944 */ 945 value *= UINT32_MAX; 946 tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value); 947 tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX); 948 item.type = RTE_FLOW_ITEM_TYPE_TCP; 949 item.spec = &tcp; 950 item.mask = &tcp_mask; 951 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource, 952 MLX5_MODIFICATION_TYPE_ADD, error); 953 } 954 955 static enum mlx5_modification_field reg_to_field[] = { 956 [REG_NON] = MLX5_MODI_OUT_NONE, 957 [REG_A] = MLX5_MODI_META_DATA_REG_A, 958 [REG_B] = MLX5_MODI_META_DATA_REG_B, 959 [REG_C_0] = MLX5_MODI_META_REG_C_0, 960 [REG_C_1] = MLX5_MODI_META_REG_C_1, 961 [REG_C_2] = MLX5_MODI_META_REG_C_2, 962 [REG_C_3] = MLX5_MODI_META_REG_C_3, 963 [REG_C_4] = MLX5_MODI_META_REG_C_4, 964 [REG_C_5] = MLX5_MODI_META_REG_C_5, 965 [REG_C_6] = MLX5_MODI_META_REG_C_6, 966 [REG_C_7] = MLX5_MODI_META_REG_C_7, 967 }; 968 969 /** 970 * Convert register set to DV specification. 971 * 972 * @param[in,out] resource 973 * Pointer to the modify-header resource. 974 * @param[in] action 975 * Pointer to action specification. 976 * @param[out] error 977 * Pointer to the error structure. 978 * 979 * @return 980 * 0 on success, a negative errno value otherwise and rte_errno is set. 981 */ 982 static int 983 flow_dv_convert_action_set_reg 984 (struct mlx5_flow_dv_modify_hdr_resource *resource, 985 const struct rte_flow_action *action, 986 struct rte_flow_error *error) 987 { 988 const struct mlx5_rte_flow_action_set_tag *conf = action->conf; 989 struct mlx5_modification_cmd *actions = resource->actions; 990 uint32_t i = resource->actions_num; 991 992 if (i >= MLX5_MAX_MODIFY_NUM) 993 return rte_flow_error_set(error, EINVAL, 994 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 995 "too many items to modify"); 996 MLX5_ASSERT(conf->id != REG_NON); 997 MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field)); 998 actions[i] = (struct mlx5_modification_cmd) { 999 .action_type = MLX5_MODIFICATION_TYPE_SET, 1000 .field = reg_to_field[conf->id], 1001 }; 1002 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); 1003 actions[i].data1 = rte_cpu_to_be_32(conf->data); 1004 ++i; 1005 resource->actions_num = i; 1006 return 0; 1007 } 1008 1009 /** 1010 * Convert SET_TAG action to DV specification. 1011 * 1012 * @param[in] dev 1013 * Pointer to the rte_eth_dev structure. 1014 * @param[in,out] resource 1015 * Pointer to the modify-header resource. 1016 * @param[in] conf 1017 * Pointer to action specification. 1018 * @param[out] error 1019 * Pointer to the error structure. 1020 * 1021 * @return 1022 * 0 on success, a negative errno value otherwise and rte_errno is set. 1023 */ 1024 static int 1025 flow_dv_convert_action_set_tag 1026 (struct rte_eth_dev *dev, 1027 struct mlx5_flow_dv_modify_hdr_resource *resource, 1028 const struct rte_flow_action_set_tag *conf, 1029 struct rte_flow_error *error) 1030 { 1031 rte_be32_t data = rte_cpu_to_be_32(conf->data); 1032 rte_be32_t mask = rte_cpu_to_be_32(conf->mask); 1033 struct rte_flow_item item = { 1034 .spec = &data, 1035 .mask = &mask, 1036 }; 1037 struct field_modify_info reg_c_x[] = { 1038 [1] = {0, 0, 0}, 1039 }; 1040 enum mlx5_modification_field reg_type; 1041 int ret; 1042 1043 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error); 1044 if (ret < 0) 1045 return ret; 1046 MLX5_ASSERT(ret != REG_NON); 1047 MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field)); 1048 reg_type = reg_to_field[ret]; 1049 MLX5_ASSERT(reg_type > 0); 1050 reg_c_x[0] = (struct field_modify_info){4, 0, reg_type}; 1051 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource, 1052 MLX5_MODIFICATION_TYPE_SET, error); 1053 } 1054 1055 /** 1056 * Convert internal COPY_REG action to DV specification. 1057 * 1058 * @param[in] dev 1059 * Pointer to the rte_eth_dev structure. 1060 * @param[in,out] res 1061 * Pointer to the modify-header resource. 1062 * @param[in] action 1063 * Pointer to action specification. 1064 * @param[out] error 1065 * Pointer to the error structure. 1066 * 1067 * @return 1068 * 0 on success, a negative errno value otherwise and rte_errno is set. 1069 */ 1070 static int 1071 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev, 1072 struct mlx5_flow_dv_modify_hdr_resource *res, 1073 const struct rte_flow_action *action, 1074 struct rte_flow_error *error) 1075 { 1076 const struct mlx5_flow_action_copy_mreg *conf = action->conf; 1077 rte_be32_t mask = RTE_BE32(UINT32_MAX); 1078 struct rte_flow_item item = { 1079 .spec = NULL, 1080 .mask = &mask, 1081 }; 1082 struct field_modify_info reg_src[] = { 1083 {4, 0, reg_to_field[conf->src]}, 1084 {0, 0, 0}, 1085 }; 1086 struct field_modify_info reg_dst = { 1087 .offset = 0, 1088 .id = reg_to_field[conf->dst], 1089 }; 1090 /* Adjust reg_c[0] usage according to reported mask. */ 1091 if (conf->dst == REG_C_0 || conf->src == REG_C_0) { 1092 struct mlx5_priv *priv = dev->data->dev_private; 1093 uint32_t reg_c0 = priv->sh->dv_regc0_mask; 1094 1095 MLX5_ASSERT(reg_c0); 1096 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY); 1097 if (conf->dst == REG_C_0) { 1098 /* Copy to reg_c[0], within mask only. */ 1099 reg_dst.offset = rte_bsf32(reg_c0); 1100 /* 1101 * Mask is ignoring the enianness, because 1102 * there is no conversion in datapath. 1103 */ 1104 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1105 /* Copy from destination lower bits to reg_c[0]. */ 1106 mask = reg_c0 >> reg_dst.offset; 1107 #else 1108 /* Copy from destination upper bits to reg_c[0]. */ 1109 mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT - 1110 rte_fls_u32(reg_c0)); 1111 #endif 1112 } else { 1113 mask = rte_cpu_to_be_32(reg_c0); 1114 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1115 /* Copy from reg_c[0] to destination lower bits. */ 1116 reg_dst.offset = 0; 1117 #else 1118 /* Copy from reg_c[0] to destination upper bits. */ 1119 reg_dst.offset = sizeof(reg_c0) * CHAR_BIT - 1120 (rte_fls_u32(reg_c0) - 1121 rte_bsf32(reg_c0)); 1122 #endif 1123 } 1124 } 1125 return flow_dv_convert_modify_action(&item, 1126 reg_src, ®_dst, res, 1127 MLX5_MODIFICATION_TYPE_COPY, 1128 error); 1129 } 1130 1131 /** 1132 * Convert MARK action to DV specification. This routine is used 1133 * in extensive metadata only and requires metadata register to be 1134 * handled. In legacy mode hardware tag resource is engaged. 1135 * 1136 * @param[in] dev 1137 * Pointer to the rte_eth_dev structure. 1138 * @param[in] conf 1139 * Pointer to MARK action specification. 1140 * @param[in,out] resource 1141 * Pointer to the modify-header resource. 1142 * @param[out] error 1143 * Pointer to the error structure. 1144 * 1145 * @return 1146 * 0 on success, a negative errno value otherwise and rte_errno is set. 1147 */ 1148 static int 1149 flow_dv_convert_action_mark(struct rte_eth_dev *dev, 1150 const struct rte_flow_action_mark *conf, 1151 struct mlx5_flow_dv_modify_hdr_resource *resource, 1152 struct rte_flow_error *error) 1153 { 1154 struct mlx5_priv *priv = dev->data->dev_private; 1155 rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK & 1156 priv->sh->dv_mark_mask); 1157 rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask; 1158 struct rte_flow_item item = { 1159 .spec = &data, 1160 .mask = &mask, 1161 }; 1162 struct field_modify_info reg_c_x[] = { 1163 [1] = {0, 0, 0}, 1164 }; 1165 int reg; 1166 1167 if (!mask) 1168 return rte_flow_error_set(error, EINVAL, 1169 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1170 NULL, "zero mark action mask"); 1171 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 1172 if (reg < 0) 1173 return reg; 1174 MLX5_ASSERT(reg > 0); 1175 if (reg == REG_C_0) { 1176 uint32_t msk_c0 = priv->sh->dv_regc0_mask; 1177 uint32_t shl_c0 = rte_bsf32(msk_c0); 1178 1179 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0); 1180 mask = rte_cpu_to_be_32(mask) & msk_c0; 1181 mask = rte_cpu_to_be_32(mask << shl_c0); 1182 } 1183 reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]}; 1184 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource, 1185 MLX5_MODIFICATION_TYPE_SET, error); 1186 } 1187 1188 /** 1189 * Get metadata register index for specified steering domain. 1190 * 1191 * @param[in] dev 1192 * Pointer to the rte_eth_dev structure. 1193 * @param[in] attr 1194 * Attributes of flow to determine steering domain. 1195 * @param[out] error 1196 * Pointer to the error structure. 1197 * 1198 * @return 1199 * positive index on success, a negative errno value otherwise 1200 * and rte_errno is set. 1201 */ 1202 static enum modify_reg 1203 flow_dv_get_metadata_reg(struct rte_eth_dev *dev, 1204 const struct rte_flow_attr *attr, 1205 struct rte_flow_error *error) 1206 { 1207 int reg = 1208 mlx5_flow_get_reg_id(dev, attr->transfer ? 1209 MLX5_METADATA_FDB : 1210 attr->egress ? 1211 MLX5_METADATA_TX : 1212 MLX5_METADATA_RX, 0, error); 1213 if (reg < 0) 1214 return rte_flow_error_set(error, 1215 ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 1216 NULL, "unavailable " 1217 "metadata register"); 1218 return reg; 1219 } 1220 1221 /** 1222 * Convert SET_META action to DV specification. 1223 * 1224 * @param[in] dev 1225 * Pointer to the rte_eth_dev structure. 1226 * @param[in,out] resource 1227 * Pointer to the modify-header resource. 1228 * @param[in] attr 1229 * Attributes of flow that includes this item. 1230 * @param[in] conf 1231 * Pointer to action specification. 1232 * @param[out] error 1233 * Pointer to the error structure. 1234 * 1235 * @return 1236 * 0 on success, a negative errno value otherwise and rte_errno is set. 1237 */ 1238 static int 1239 flow_dv_convert_action_set_meta 1240 (struct rte_eth_dev *dev, 1241 struct mlx5_flow_dv_modify_hdr_resource *resource, 1242 const struct rte_flow_attr *attr, 1243 const struct rte_flow_action_set_meta *conf, 1244 struct rte_flow_error *error) 1245 { 1246 uint32_t data = conf->data; 1247 uint32_t mask = conf->mask; 1248 struct rte_flow_item item = { 1249 .spec = &data, 1250 .mask = &mask, 1251 }; 1252 struct field_modify_info reg_c_x[] = { 1253 [1] = {0, 0, 0}, 1254 }; 1255 int reg = flow_dv_get_metadata_reg(dev, attr, error); 1256 1257 if (reg < 0) 1258 return reg; 1259 /* 1260 * In datapath code there is no endianness 1261 * coversions for perfromance reasons, all 1262 * pattern conversions are done in rte_flow. 1263 */ 1264 if (reg == REG_C_0) { 1265 struct mlx5_priv *priv = dev->data->dev_private; 1266 uint32_t msk_c0 = priv->sh->dv_regc0_mask; 1267 uint32_t shl_c0; 1268 1269 MLX5_ASSERT(msk_c0); 1270 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1271 shl_c0 = rte_bsf32(msk_c0); 1272 #else 1273 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0); 1274 #endif 1275 mask <<= shl_c0; 1276 data <<= shl_c0; 1277 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask))); 1278 } 1279 reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]}; 1280 /* The routine expects parameters in memory as big-endian ones. */ 1281 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource, 1282 MLX5_MODIFICATION_TYPE_SET, error); 1283 } 1284 1285 /** 1286 * Convert modify-header set IPv4 DSCP action to DV specification. 1287 * 1288 * @param[in,out] resource 1289 * Pointer to the modify-header resource. 1290 * @param[in] action 1291 * Pointer to action specification. 1292 * @param[out] error 1293 * Pointer to the error structure. 1294 * 1295 * @return 1296 * 0 on success, a negative errno value otherwise and rte_errno is set. 1297 */ 1298 static int 1299 flow_dv_convert_action_modify_ipv4_dscp 1300 (struct mlx5_flow_dv_modify_hdr_resource *resource, 1301 const struct rte_flow_action *action, 1302 struct rte_flow_error *error) 1303 { 1304 const struct rte_flow_action_set_dscp *conf = 1305 (const struct rte_flow_action_set_dscp *)(action->conf); 1306 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 }; 1307 struct rte_flow_item_ipv4 ipv4; 1308 struct rte_flow_item_ipv4 ipv4_mask; 1309 1310 memset(&ipv4, 0, sizeof(ipv4)); 1311 memset(&ipv4_mask, 0, sizeof(ipv4_mask)); 1312 ipv4.hdr.type_of_service = conf->dscp; 1313 ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2; 1314 item.spec = &ipv4; 1315 item.mask = &ipv4_mask; 1316 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource, 1317 MLX5_MODIFICATION_TYPE_SET, error); 1318 } 1319 1320 /** 1321 * Convert modify-header set IPv6 DSCP action to DV specification. 1322 * 1323 * @param[in,out] resource 1324 * Pointer to the modify-header resource. 1325 * @param[in] action 1326 * Pointer to action specification. 1327 * @param[out] error 1328 * Pointer to the error structure. 1329 * 1330 * @return 1331 * 0 on success, a negative errno value otherwise and rte_errno is set. 1332 */ 1333 static int 1334 flow_dv_convert_action_modify_ipv6_dscp 1335 (struct mlx5_flow_dv_modify_hdr_resource *resource, 1336 const struct rte_flow_action *action, 1337 struct rte_flow_error *error) 1338 { 1339 const struct rte_flow_action_set_dscp *conf = 1340 (const struct rte_flow_action_set_dscp *)(action->conf); 1341 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 }; 1342 struct rte_flow_item_ipv6 ipv6; 1343 struct rte_flow_item_ipv6 ipv6_mask; 1344 1345 memset(&ipv6, 0, sizeof(ipv6)); 1346 memset(&ipv6_mask, 0, sizeof(ipv6_mask)); 1347 /* 1348 * Even though the DSCP bits offset of IPv6 is not byte aligned, 1349 * rdma-core only accept the DSCP bits byte aligned start from 1350 * bit 0 to 5 as to be compatible with IPv4. No need to shift the 1351 * bits in IPv6 case as rdma-core requires byte aligned value. 1352 */ 1353 ipv6.hdr.vtc_flow = conf->dscp; 1354 ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22; 1355 item.spec = &ipv6; 1356 item.mask = &ipv6_mask; 1357 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource, 1358 MLX5_MODIFICATION_TYPE_SET, error); 1359 } 1360 1361 /** 1362 * Validate MARK item. 1363 * 1364 * @param[in] dev 1365 * Pointer to the rte_eth_dev structure. 1366 * @param[in] item 1367 * Item specification. 1368 * @param[in] attr 1369 * Attributes of flow that includes this item. 1370 * @param[out] error 1371 * Pointer to error structure. 1372 * 1373 * @return 1374 * 0 on success, a negative errno value otherwise and rte_errno is set. 1375 */ 1376 static int 1377 flow_dv_validate_item_mark(struct rte_eth_dev *dev, 1378 const struct rte_flow_item *item, 1379 const struct rte_flow_attr *attr __rte_unused, 1380 struct rte_flow_error *error) 1381 { 1382 struct mlx5_priv *priv = dev->data->dev_private; 1383 struct mlx5_dev_config *config = &priv->config; 1384 const struct rte_flow_item_mark *spec = item->spec; 1385 const struct rte_flow_item_mark *mask = item->mask; 1386 const struct rte_flow_item_mark nic_mask = { 1387 .id = priv->sh->dv_mark_mask, 1388 }; 1389 int ret; 1390 1391 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY) 1392 return rte_flow_error_set(error, ENOTSUP, 1393 RTE_FLOW_ERROR_TYPE_ITEM, item, 1394 "extended metadata feature" 1395 " isn't enabled"); 1396 if (!mlx5_flow_ext_mreg_supported(dev)) 1397 return rte_flow_error_set(error, ENOTSUP, 1398 RTE_FLOW_ERROR_TYPE_ITEM, item, 1399 "extended metadata register" 1400 " isn't supported"); 1401 if (!nic_mask.id) 1402 return rte_flow_error_set(error, ENOTSUP, 1403 RTE_FLOW_ERROR_TYPE_ITEM, item, 1404 "extended metadata register" 1405 " isn't available"); 1406 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 1407 if (ret < 0) 1408 return ret; 1409 if (!spec) 1410 return rte_flow_error_set(error, EINVAL, 1411 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1412 item->spec, 1413 "data cannot be empty"); 1414 if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id)) 1415 return rte_flow_error_set(error, EINVAL, 1416 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1417 &spec->id, 1418 "mark id exceeds the limit"); 1419 if (!mask) 1420 mask = &nic_mask; 1421 if (!mask->id) 1422 return rte_flow_error_set(error, EINVAL, 1423 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, 1424 "mask cannot be zero"); 1425 1426 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1427 (const uint8_t *)&nic_mask, 1428 sizeof(struct rte_flow_item_mark), 1429 error); 1430 if (ret < 0) 1431 return ret; 1432 return 0; 1433 } 1434 1435 /** 1436 * Validate META item. 1437 * 1438 * @param[in] dev 1439 * Pointer to the rte_eth_dev structure. 1440 * @param[in] item 1441 * Item specification. 1442 * @param[in] attr 1443 * Attributes of flow that includes this item. 1444 * @param[out] error 1445 * Pointer to error structure. 1446 * 1447 * @return 1448 * 0 on success, a negative errno value otherwise and rte_errno is set. 1449 */ 1450 static int 1451 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, 1452 const struct rte_flow_item *item, 1453 const struct rte_flow_attr *attr, 1454 struct rte_flow_error *error) 1455 { 1456 struct mlx5_priv *priv = dev->data->dev_private; 1457 struct mlx5_dev_config *config = &priv->config; 1458 const struct rte_flow_item_meta *spec = item->spec; 1459 const struct rte_flow_item_meta *mask = item->mask; 1460 struct rte_flow_item_meta nic_mask = { 1461 .data = UINT32_MAX 1462 }; 1463 int reg; 1464 int ret; 1465 1466 if (!spec) 1467 return rte_flow_error_set(error, EINVAL, 1468 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1469 item->spec, 1470 "data cannot be empty"); 1471 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 1472 if (!mlx5_flow_ext_mreg_supported(dev)) 1473 return rte_flow_error_set(error, ENOTSUP, 1474 RTE_FLOW_ERROR_TYPE_ITEM, item, 1475 "extended metadata register" 1476 " isn't supported"); 1477 reg = flow_dv_get_metadata_reg(dev, attr, error); 1478 if (reg < 0) 1479 return reg; 1480 if (reg == REG_B) 1481 return rte_flow_error_set(error, ENOTSUP, 1482 RTE_FLOW_ERROR_TYPE_ITEM, item, 1483 "match on reg_b " 1484 "isn't supported"); 1485 if (reg != REG_A) 1486 nic_mask.data = priv->sh->dv_meta_mask; 1487 } else if (attr->transfer) { 1488 return rte_flow_error_set(error, ENOTSUP, 1489 RTE_FLOW_ERROR_TYPE_ITEM, item, 1490 "extended metadata feature " 1491 "should be enabled when " 1492 "meta item is requested " 1493 "with e-switch mode "); 1494 } 1495 if (!mask) 1496 mask = &rte_flow_item_meta_mask; 1497 if (!mask->data) 1498 return rte_flow_error_set(error, EINVAL, 1499 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, 1500 "mask cannot be zero"); 1501 1502 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1503 (const uint8_t *)&nic_mask, 1504 sizeof(struct rte_flow_item_meta), 1505 error); 1506 return ret; 1507 } 1508 1509 /** 1510 * Validate TAG item. 1511 * 1512 * @param[in] dev 1513 * Pointer to the rte_eth_dev structure. 1514 * @param[in] item 1515 * Item specification. 1516 * @param[in] attr 1517 * Attributes of flow that includes this item. 1518 * @param[out] error 1519 * Pointer to error structure. 1520 * 1521 * @return 1522 * 0 on success, a negative errno value otherwise and rte_errno is set. 1523 */ 1524 static int 1525 flow_dv_validate_item_tag(struct rte_eth_dev *dev, 1526 const struct rte_flow_item *item, 1527 const struct rte_flow_attr *attr __rte_unused, 1528 struct rte_flow_error *error) 1529 { 1530 const struct rte_flow_item_tag *spec = item->spec; 1531 const struct rte_flow_item_tag *mask = item->mask; 1532 const struct rte_flow_item_tag nic_mask = { 1533 .data = RTE_BE32(UINT32_MAX), 1534 .index = 0xff, 1535 }; 1536 int ret; 1537 1538 if (!mlx5_flow_ext_mreg_supported(dev)) 1539 return rte_flow_error_set(error, ENOTSUP, 1540 RTE_FLOW_ERROR_TYPE_ITEM, item, 1541 "extensive metadata register" 1542 " isn't supported"); 1543 if (!spec) 1544 return rte_flow_error_set(error, EINVAL, 1545 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1546 item->spec, 1547 "data cannot be empty"); 1548 if (!mask) 1549 mask = &rte_flow_item_tag_mask; 1550 if (!mask->data) 1551 return rte_flow_error_set(error, EINVAL, 1552 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, 1553 "mask cannot be zero"); 1554 1555 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1556 (const uint8_t *)&nic_mask, 1557 sizeof(struct rte_flow_item_tag), 1558 error); 1559 if (ret < 0) 1560 return ret; 1561 if (mask->index != 0xff) 1562 return rte_flow_error_set(error, EINVAL, 1563 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, 1564 "partial mask for tag index" 1565 " is not supported"); 1566 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error); 1567 if (ret < 0) 1568 return ret; 1569 MLX5_ASSERT(ret != REG_NON); 1570 return 0; 1571 } 1572 1573 /** 1574 * Validate vport item. 1575 * 1576 * @param[in] dev 1577 * Pointer to the rte_eth_dev structure. 1578 * @param[in] item 1579 * Item specification. 1580 * @param[in] attr 1581 * Attributes of flow that includes this item. 1582 * @param[in] item_flags 1583 * Bit-fields that holds the items detected until now. 1584 * @param[out] error 1585 * Pointer to error structure. 1586 * 1587 * @return 1588 * 0 on success, a negative errno value otherwise and rte_errno is set. 1589 */ 1590 static int 1591 flow_dv_validate_item_port_id(struct rte_eth_dev *dev, 1592 const struct rte_flow_item *item, 1593 const struct rte_flow_attr *attr, 1594 uint64_t item_flags, 1595 struct rte_flow_error *error) 1596 { 1597 const struct rte_flow_item_port_id *spec = item->spec; 1598 const struct rte_flow_item_port_id *mask = item->mask; 1599 const struct rte_flow_item_port_id switch_mask = { 1600 .id = 0xffffffff, 1601 }; 1602 struct mlx5_priv *esw_priv; 1603 struct mlx5_priv *dev_priv; 1604 int ret; 1605 1606 if (!attr->transfer) 1607 return rte_flow_error_set(error, EINVAL, 1608 RTE_FLOW_ERROR_TYPE_ITEM, 1609 NULL, 1610 "match on port id is valid only" 1611 " when transfer flag is enabled"); 1612 if (item_flags & MLX5_FLOW_ITEM_PORT_ID) 1613 return rte_flow_error_set(error, ENOTSUP, 1614 RTE_FLOW_ERROR_TYPE_ITEM, item, 1615 "multiple source ports are not" 1616 " supported"); 1617 if (!mask) 1618 mask = &switch_mask; 1619 if (mask->id != 0xffffffff) 1620 return rte_flow_error_set(error, ENOTSUP, 1621 RTE_FLOW_ERROR_TYPE_ITEM_MASK, 1622 mask, 1623 "no support for partial mask on" 1624 " \"id\" field"); 1625 ret = mlx5_flow_item_acceptable 1626 (item, (const uint8_t *)mask, 1627 (const uint8_t *)&rte_flow_item_port_id_mask, 1628 sizeof(struct rte_flow_item_port_id), 1629 error); 1630 if (ret) 1631 return ret; 1632 if (!spec) 1633 return 0; 1634 esw_priv = mlx5_port_to_eswitch_info(spec->id, false); 1635 if (!esw_priv) 1636 return rte_flow_error_set(error, rte_errno, 1637 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec, 1638 "failed to obtain E-Switch info for" 1639 " port"); 1640 dev_priv = mlx5_dev_to_eswitch_info(dev); 1641 if (!dev_priv) 1642 return rte_flow_error_set(error, rte_errno, 1643 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1644 NULL, 1645 "failed to obtain E-Switch info"); 1646 if (esw_priv->domain_id != dev_priv->domain_id) 1647 return rte_flow_error_set(error, EINVAL, 1648 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec, 1649 "cannot match on a port from a" 1650 " different E-Switch"); 1651 return 0; 1652 } 1653 1654 /** 1655 * Validate VLAN item. 1656 * 1657 * @param[in] item 1658 * Item specification. 1659 * @param[in] item_flags 1660 * Bit-fields that holds the items detected until now. 1661 * @param[in] dev 1662 * Ethernet device flow is being created on. 1663 * @param[out] error 1664 * Pointer to error structure. 1665 * 1666 * @return 1667 * 0 on success, a negative errno value otherwise and rte_errno is set. 1668 */ 1669 static int 1670 flow_dv_validate_item_vlan(const struct rte_flow_item *item, 1671 uint64_t item_flags, 1672 struct rte_eth_dev *dev, 1673 struct rte_flow_error *error) 1674 { 1675 const struct rte_flow_item_vlan *mask = item->mask; 1676 const struct rte_flow_item_vlan nic_mask = { 1677 .tci = RTE_BE16(UINT16_MAX), 1678 .inner_type = RTE_BE16(UINT16_MAX), 1679 }; 1680 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1681 int ret; 1682 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 1683 MLX5_FLOW_LAYER_INNER_L4) : 1684 (MLX5_FLOW_LAYER_OUTER_L3 | 1685 MLX5_FLOW_LAYER_OUTER_L4); 1686 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 1687 MLX5_FLOW_LAYER_OUTER_VLAN; 1688 1689 if (item_flags & vlanm) 1690 return rte_flow_error_set(error, EINVAL, 1691 RTE_FLOW_ERROR_TYPE_ITEM, item, 1692 "multiple VLAN layers not supported"); 1693 else if ((item_flags & l34m) != 0) 1694 return rte_flow_error_set(error, EINVAL, 1695 RTE_FLOW_ERROR_TYPE_ITEM, item, 1696 "VLAN cannot follow L3/L4 layer"); 1697 if (!mask) 1698 mask = &rte_flow_item_vlan_mask; 1699 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1700 (const uint8_t *)&nic_mask, 1701 sizeof(struct rte_flow_item_vlan), 1702 error); 1703 if (ret) 1704 return ret; 1705 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { 1706 struct mlx5_priv *priv = dev->data->dev_private; 1707 1708 if (priv->vmwa_context) { 1709 /* 1710 * Non-NULL context means we have a virtual machine 1711 * and SR-IOV enabled, we have to create VLAN interface 1712 * to make hypervisor to setup E-Switch vport 1713 * context correctly. We avoid creating the multiple 1714 * VLAN interfaces, so we cannot support VLAN tag mask. 1715 */ 1716 return rte_flow_error_set(error, EINVAL, 1717 RTE_FLOW_ERROR_TYPE_ITEM, 1718 item, 1719 "VLAN tag mask is not" 1720 " supported in virtual" 1721 " environment"); 1722 } 1723 } 1724 return 0; 1725 } 1726 1727 /* 1728 * GTP flags are contained in 1 byte of the format: 1729 * ------------------------------------------- 1730 * | bit | 0 - 2 | 3 | 4 | 5 | 6 | 7 | 1731 * |-----------------------------------------| 1732 * | value | Version | PT | Res | E | S | PN | 1733 * ------------------------------------------- 1734 * 1735 * Matching is supported only for GTP flags E, S, PN. 1736 */ 1737 #define MLX5_GTP_FLAGS_MASK 0x07 1738 1739 /** 1740 * Validate GTP item. 1741 * 1742 * @param[in] dev 1743 * Pointer to the rte_eth_dev structure. 1744 * @param[in] item 1745 * Item specification. 1746 * @param[in] item_flags 1747 * Bit-fields that holds the items detected until now. 1748 * @param[out] error 1749 * Pointer to error structure. 1750 * 1751 * @return 1752 * 0 on success, a negative errno value otherwise and rte_errno is set. 1753 */ 1754 static int 1755 flow_dv_validate_item_gtp(struct rte_eth_dev *dev, 1756 const struct rte_flow_item *item, 1757 uint64_t item_flags, 1758 struct rte_flow_error *error) 1759 { 1760 struct mlx5_priv *priv = dev->data->dev_private; 1761 const struct rte_flow_item_gtp *spec = item->spec; 1762 const struct rte_flow_item_gtp *mask = item->mask; 1763 const struct rte_flow_item_gtp nic_mask = { 1764 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK, 1765 .msg_type = 0xff, 1766 .teid = RTE_BE32(0xffffffff), 1767 }; 1768 1769 if (!priv->config.hca_attr.tunnel_stateless_gtp) 1770 return rte_flow_error_set(error, ENOTSUP, 1771 RTE_FLOW_ERROR_TYPE_ITEM, item, 1772 "GTP support is not enabled"); 1773 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1774 return rte_flow_error_set(error, ENOTSUP, 1775 RTE_FLOW_ERROR_TYPE_ITEM, item, 1776 "multiple tunnel layers not" 1777 " supported"); 1778 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1779 return rte_flow_error_set(error, EINVAL, 1780 RTE_FLOW_ERROR_TYPE_ITEM, item, 1781 "no outer UDP layer found"); 1782 if (!mask) 1783 mask = &rte_flow_item_gtp_mask; 1784 if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK) 1785 return rte_flow_error_set(error, ENOTSUP, 1786 RTE_FLOW_ERROR_TYPE_ITEM, item, 1787 "Match is supported for GTP" 1788 " flags only"); 1789 return mlx5_flow_item_acceptable 1790 (item, (const uint8_t *)mask, 1791 (const uint8_t *)&nic_mask, 1792 sizeof(struct rte_flow_item_gtp), 1793 error); 1794 } 1795 1796 /** 1797 * Validate the pop VLAN action. 1798 * 1799 * @param[in] dev 1800 * Pointer to the rte_eth_dev structure. 1801 * @param[in] action_flags 1802 * Holds the actions detected until now. 1803 * @param[in] action 1804 * Pointer to the pop vlan action. 1805 * @param[in] item_flags 1806 * The items found in this flow rule. 1807 * @param[in] attr 1808 * Pointer to flow attributes. 1809 * @param[out] error 1810 * Pointer to error structure. 1811 * 1812 * @return 1813 * 0 on success, a negative errno value otherwise and rte_errno is set. 1814 */ 1815 static int 1816 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev, 1817 uint64_t action_flags, 1818 const struct rte_flow_action *action, 1819 uint64_t item_flags, 1820 const struct rte_flow_attr *attr, 1821 struct rte_flow_error *error) 1822 { 1823 const struct mlx5_priv *priv = dev->data->dev_private; 1824 1825 (void)action; 1826 (void)attr; 1827 if (!priv->sh->pop_vlan_action) 1828 return rte_flow_error_set(error, ENOTSUP, 1829 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1830 NULL, 1831 "pop vlan action is not supported"); 1832 if (attr->egress) 1833 return rte_flow_error_set(error, ENOTSUP, 1834 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, 1835 NULL, 1836 "pop vlan action not supported for " 1837 "egress"); 1838 if (action_flags & MLX5_FLOW_VLAN_ACTIONS) 1839 return rte_flow_error_set(error, ENOTSUP, 1840 RTE_FLOW_ERROR_TYPE_ACTION, action, 1841 "no support for multiple VLAN " 1842 "actions"); 1843 /* Pop VLAN with preceding Decap requires inner header with VLAN. */ 1844 if ((action_flags & MLX5_FLOW_ACTION_DECAP) && 1845 !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN)) 1846 return rte_flow_error_set(error, ENOTSUP, 1847 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1848 NULL, 1849 "cannot pop vlan after decap without " 1850 "match on inner vlan in the flow"); 1851 /* Pop VLAN without preceding Decap requires outer header with VLAN. */ 1852 if (!(action_flags & MLX5_FLOW_ACTION_DECAP) && 1853 !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) 1854 return rte_flow_error_set(error, ENOTSUP, 1855 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1856 NULL, 1857 "cannot pop vlan without a " 1858 "match on (outer) vlan in the flow"); 1859 if (action_flags & MLX5_FLOW_ACTION_PORT_ID) 1860 return rte_flow_error_set(error, EINVAL, 1861 RTE_FLOW_ERROR_TYPE_ACTION, action, 1862 "wrong action order, port_id should " 1863 "be after pop VLAN action"); 1864 if (!attr->transfer && priv->representor) 1865 return rte_flow_error_set(error, ENOTSUP, 1866 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1867 "pop vlan action for VF representor " 1868 "not supported on NIC table"); 1869 return 0; 1870 } 1871 1872 /** 1873 * Get VLAN default info from vlan match info. 1874 * 1875 * @param[in] items 1876 * the list of item specifications. 1877 * @param[out] vlan 1878 * pointer VLAN info to fill to. 1879 * 1880 * @return 1881 * 0 on success, a negative errno value otherwise and rte_errno is set. 1882 */ 1883 static void 1884 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, 1885 struct rte_vlan_hdr *vlan) 1886 { 1887 const struct rte_flow_item_vlan nic_mask = { 1888 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK | 1889 MLX5DV_FLOW_VLAN_VID_MASK), 1890 .inner_type = RTE_BE16(0xffff), 1891 }; 1892 1893 if (items == NULL) 1894 return; 1895 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 1896 int type = items->type; 1897 1898 if (type == RTE_FLOW_ITEM_TYPE_VLAN || 1899 type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN) 1900 break; 1901 } 1902 if (items->type != RTE_FLOW_ITEM_TYPE_END) { 1903 const struct rte_flow_item_vlan *vlan_m = items->mask; 1904 const struct rte_flow_item_vlan *vlan_v = items->spec; 1905 1906 /* If VLAN item in pattern doesn't contain data, return here. */ 1907 if (!vlan_v) 1908 return; 1909 if (!vlan_m) 1910 vlan_m = &nic_mask; 1911 /* Only full match values are accepted */ 1912 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) == 1913 MLX5DV_FLOW_VLAN_PCP_MASK_BE) { 1914 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK; 1915 vlan->vlan_tci |= 1916 rte_be_to_cpu_16(vlan_v->tci & 1917 MLX5DV_FLOW_VLAN_PCP_MASK_BE); 1918 } 1919 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) == 1920 MLX5DV_FLOW_VLAN_VID_MASK_BE) { 1921 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK; 1922 vlan->vlan_tci |= 1923 rte_be_to_cpu_16(vlan_v->tci & 1924 MLX5DV_FLOW_VLAN_VID_MASK_BE); 1925 } 1926 if (vlan_m->inner_type == nic_mask.inner_type) 1927 vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type & 1928 vlan_m->inner_type); 1929 } 1930 } 1931 1932 /** 1933 * Validate the push VLAN action. 1934 * 1935 * @param[in] dev 1936 * Pointer to the rte_eth_dev structure. 1937 * @param[in] action_flags 1938 * Holds the actions detected until now. 1939 * @param[in] item_flags 1940 * The items found in this flow rule. 1941 * @param[in] action 1942 * Pointer to the action structure. 1943 * @param[in] attr 1944 * Pointer to flow attributes 1945 * @param[out] error 1946 * Pointer to error structure. 1947 * 1948 * @return 1949 * 0 on success, a negative errno value otherwise and rte_errno is set. 1950 */ 1951 static int 1952 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev, 1953 uint64_t action_flags, 1954 const struct rte_flow_item_vlan *vlan_m, 1955 const struct rte_flow_action *action, 1956 const struct rte_flow_attr *attr, 1957 struct rte_flow_error *error) 1958 { 1959 const struct rte_flow_action_of_push_vlan *push_vlan = action->conf; 1960 const struct mlx5_priv *priv = dev->data->dev_private; 1961 1962 if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) && 1963 push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ)) 1964 return rte_flow_error_set(error, EINVAL, 1965 RTE_FLOW_ERROR_TYPE_ACTION, action, 1966 "invalid vlan ethertype"); 1967 if (action_flags & MLX5_FLOW_ACTION_PORT_ID) 1968 return rte_flow_error_set(error, EINVAL, 1969 RTE_FLOW_ERROR_TYPE_ACTION, action, 1970 "wrong action order, port_id should " 1971 "be after push VLAN"); 1972 if (!attr->transfer && priv->representor) 1973 return rte_flow_error_set(error, ENOTSUP, 1974 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1975 "push vlan action for VF representor " 1976 "not supported on NIC table"); 1977 if (vlan_m && 1978 (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) && 1979 (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) != 1980 MLX5DV_FLOW_VLAN_PCP_MASK_BE && 1981 !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) && 1982 !(mlx5_flow_find_action 1983 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP))) 1984 return rte_flow_error_set(error, EINVAL, 1985 RTE_FLOW_ERROR_TYPE_ACTION, action, 1986 "not full match mask on VLAN PCP and " 1987 "there is no of_set_vlan_pcp action, " 1988 "push VLAN action cannot figure out " 1989 "PCP value"); 1990 if (vlan_m && 1991 (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) && 1992 (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) != 1993 MLX5DV_FLOW_VLAN_VID_MASK_BE && 1994 !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) && 1995 !(mlx5_flow_find_action 1996 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))) 1997 return rte_flow_error_set(error, EINVAL, 1998 RTE_FLOW_ERROR_TYPE_ACTION, action, 1999 "not full match mask on VLAN VID and " 2000 "there is no of_set_vlan_vid action, " 2001 "push VLAN action cannot figure out " 2002 "VID value"); 2003 (void)attr; 2004 return 0; 2005 } 2006 2007 /** 2008 * Validate the set VLAN PCP. 2009 * 2010 * @param[in] action_flags 2011 * Holds the actions detected until now. 2012 * @param[in] actions 2013 * Pointer to the list of actions remaining in the flow rule. 2014 * @param[out] error 2015 * Pointer to error structure. 2016 * 2017 * @return 2018 * 0 on success, a negative errno value otherwise and rte_errno is set. 2019 */ 2020 static int 2021 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags, 2022 const struct rte_flow_action actions[], 2023 struct rte_flow_error *error) 2024 { 2025 const struct rte_flow_action *action = actions; 2026 const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf; 2027 2028 if (conf->vlan_pcp > 7) 2029 return rte_flow_error_set(error, EINVAL, 2030 RTE_FLOW_ERROR_TYPE_ACTION, action, 2031 "VLAN PCP value is too big"); 2032 if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)) 2033 return rte_flow_error_set(error, ENOTSUP, 2034 RTE_FLOW_ERROR_TYPE_ACTION, action, 2035 "set VLAN PCP action must follow " 2036 "the push VLAN action"); 2037 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) 2038 return rte_flow_error_set(error, ENOTSUP, 2039 RTE_FLOW_ERROR_TYPE_ACTION, action, 2040 "Multiple VLAN PCP modification are " 2041 "not supported"); 2042 if (action_flags & MLX5_FLOW_ACTION_PORT_ID) 2043 return rte_flow_error_set(error, EINVAL, 2044 RTE_FLOW_ERROR_TYPE_ACTION, action, 2045 "wrong action order, port_id should " 2046 "be after set VLAN PCP"); 2047 return 0; 2048 } 2049 2050 /** 2051 * Validate the set VLAN VID. 2052 * 2053 * @param[in] item_flags 2054 * Holds the items detected in this rule. 2055 * @param[in] action_flags 2056 * Holds the actions detected until now. 2057 * @param[in] actions 2058 * Pointer to the list of actions remaining in the flow rule. 2059 * @param[out] error 2060 * Pointer to error structure. 2061 * 2062 * @return 2063 * 0 on success, a negative errno value otherwise and rte_errno is set. 2064 */ 2065 static int 2066 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags, 2067 uint64_t action_flags, 2068 const struct rte_flow_action actions[], 2069 struct rte_flow_error *error) 2070 { 2071 const struct rte_flow_action *action = actions; 2072 const struct rte_flow_action_of_set_vlan_vid *conf = action->conf; 2073 2074 if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE) 2075 return rte_flow_error_set(error, EINVAL, 2076 RTE_FLOW_ERROR_TYPE_ACTION, action, 2077 "VLAN VID value is too big"); 2078 if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) && 2079 !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) 2080 return rte_flow_error_set(error, ENOTSUP, 2081 RTE_FLOW_ERROR_TYPE_ACTION, action, 2082 "set VLAN VID action must follow push" 2083 " VLAN action or match on VLAN item"); 2084 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) 2085 return rte_flow_error_set(error, ENOTSUP, 2086 RTE_FLOW_ERROR_TYPE_ACTION, action, 2087 "Multiple VLAN VID modifications are " 2088 "not supported"); 2089 if (action_flags & MLX5_FLOW_ACTION_PORT_ID) 2090 return rte_flow_error_set(error, EINVAL, 2091 RTE_FLOW_ERROR_TYPE_ACTION, action, 2092 "wrong action order, port_id should " 2093 "be after set VLAN VID"); 2094 return 0; 2095 } 2096 2097 /* 2098 * Validate the FLAG action. 2099 * 2100 * @param[in] dev 2101 * Pointer to the rte_eth_dev structure. 2102 * @param[in] action_flags 2103 * Holds the actions detected until now. 2104 * @param[in] attr 2105 * Pointer to flow attributes 2106 * @param[out] error 2107 * Pointer to error structure. 2108 * 2109 * @return 2110 * 0 on success, a negative errno value otherwise and rte_errno is set. 2111 */ 2112 static int 2113 flow_dv_validate_action_flag(struct rte_eth_dev *dev, 2114 uint64_t action_flags, 2115 const struct rte_flow_attr *attr, 2116 struct rte_flow_error *error) 2117 { 2118 struct mlx5_priv *priv = dev->data->dev_private; 2119 struct mlx5_dev_config *config = &priv->config; 2120 int ret; 2121 2122 /* Fall back if no extended metadata register support. */ 2123 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY) 2124 return mlx5_flow_validate_action_flag(action_flags, attr, 2125 error); 2126 /* Extensive metadata mode requires registers. */ 2127 if (!mlx5_flow_ext_mreg_supported(dev)) 2128 return rte_flow_error_set(error, ENOTSUP, 2129 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2130 "no metadata registers " 2131 "to support flag action"); 2132 if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT)) 2133 return rte_flow_error_set(error, ENOTSUP, 2134 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2135 "extended metadata register" 2136 " isn't available"); 2137 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 2138 if (ret < 0) 2139 return ret; 2140 MLX5_ASSERT(ret > 0); 2141 if (action_flags & MLX5_FLOW_ACTION_MARK) 2142 return rte_flow_error_set(error, EINVAL, 2143 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2144 "can't mark and flag in same flow"); 2145 if (action_flags & MLX5_FLOW_ACTION_FLAG) 2146 return rte_flow_error_set(error, EINVAL, 2147 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2148 "can't have 2 flag" 2149 " actions in same flow"); 2150 return 0; 2151 } 2152 2153 /** 2154 * Validate MARK action. 2155 * 2156 * @param[in] dev 2157 * Pointer to the rte_eth_dev structure. 2158 * @param[in] action 2159 * Pointer to action. 2160 * @param[in] action_flags 2161 * Holds the actions detected until now. 2162 * @param[in] attr 2163 * Pointer to flow attributes 2164 * @param[out] error 2165 * Pointer to error structure. 2166 * 2167 * @return 2168 * 0 on success, a negative errno value otherwise and rte_errno is set. 2169 */ 2170 static int 2171 flow_dv_validate_action_mark(struct rte_eth_dev *dev, 2172 const struct rte_flow_action *action, 2173 uint64_t action_flags, 2174 const struct rte_flow_attr *attr, 2175 struct rte_flow_error *error) 2176 { 2177 struct mlx5_priv *priv = dev->data->dev_private; 2178 struct mlx5_dev_config *config = &priv->config; 2179 const struct rte_flow_action_mark *mark = action->conf; 2180 int ret; 2181 2182 /* Fall back if no extended metadata register support. */ 2183 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY) 2184 return mlx5_flow_validate_action_mark(action, action_flags, 2185 attr, error); 2186 /* Extensive metadata mode requires registers. */ 2187 if (!mlx5_flow_ext_mreg_supported(dev)) 2188 return rte_flow_error_set(error, ENOTSUP, 2189 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2190 "no metadata registers " 2191 "to support mark action"); 2192 if (!priv->sh->dv_mark_mask) 2193 return rte_flow_error_set(error, ENOTSUP, 2194 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2195 "extended metadata register" 2196 " isn't available"); 2197 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 2198 if (ret < 0) 2199 return ret; 2200 MLX5_ASSERT(ret > 0); 2201 if (!mark) 2202 return rte_flow_error_set(error, EINVAL, 2203 RTE_FLOW_ERROR_TYPE_ACTION, action, 2204 "configuration cannot be null"); 2205 if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask)) 2206 return rte_flow_error_set(error, EINVAL, 2207 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2208 &mark->id, 2209 "mark id exceeds the limit"); 2210 if (action_flags & MLX5_FLOW_ACTION_FLAG) 2211 return rte_flow_error_set(error, EINVAL, 2212 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2213 "can't flag and mark in same flow"); 2214 if (action_flags & MLX5_FLOW_ACTION_MARK) 2215 return rte_flow_error_set(error, EINVAL, 2216 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2217 "can't have 2 mark actions in same" 2218 " flow"); 2219 return 0; 2220 } 2221 2222 /** 2223 * Validate SET_META action. 2224 * 2225 * @param[in] dev 2226 * Pointer to the rte_eth_dev structure. 2227 * @param[in] action 2228 * Pointer to the action structure. 2229 * @param[in] action_flags 2230 * Holds the actions detected until now. 2231 * @param[in] attr 2232 * Pointer to flow attributes 2233 * @param[out] error 2234 * Pointer to error structure. 2235 * 2236 * @return 2237 * 0 on success, a negative errno value otherwise and rte_errno is set. 2238 */ 2239 static int 2240 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, 2241 const struct rte_flow_action *action, 2242 uint64_t action_flags __rte_unused, 2243 const struct rte_flow_attr *attr, 2244 struct rte_flow_error *error) 2245 { 2246 const struct rte_flow_action_set_meta *conf; 2247 uint32_t nic_mask = UINT32_MAX; 2248 int reg; 2249 2250 if (!mlx5_flow_ext_mreg_supported(dev)) 2251 return rte_flow_error_set(error, ENOTSUP, 2252 RTE_FLOW_ERROR_TYPE_ACTION, action, 2253 "extended metadata register" 2254 " isn't supported"); 2255 reg = flow_dv_get_metadata_reg(dev, attr, error); 2256 if (reg < 0) 2257 return reg; 2258 if (reg != REG_A && reg != REG_B) { 2259 struct mlx5_priv *priv = dev->data->dev_private; 2260 2261 nic_mask = priv->sh->dv_meta_mask; 2262 } 2263 if (!(action->conf)) 2264 return rte_flow_error_set(error, EINVAL, 2265 RTE_FLOW_ERROR_TYPE_ACTION, action, 2266 "configuration cannot be null"); 2267 conf = (const struct rte_flow_action_set_meta *)action->conf; 2268 if (!conf->mask) 2269 return rte_flow_error_set(error, EINVAL, 2270 RTE_FLOW_ERROR_TYPE_ACTION, action, 2271 "zero mask doesn't have any effect"); 2272 if (conf->mask & ~nic_mask) 2273 return rte_flow_error_set(error, EINVAL, 2274 RTE_FLOW_ERROR_TYPE_ACTION, action, 2275 "meta data must be within reg C0"); 2276 return 0; 2277 } 2278 2279 /** 2280 * Validate SET_TAG action. 2281 * 2282 * @param[in] dev 2283 * Pointer to the rte_eth_dev structure. 2284 * @param[in] action 2285 * Pointer to the action structure. 2286 * @param[in] action_flags 2287 * Holds the actions detected until now. 2288 * @param[in] attr 2289 * Pointer to flow attributes 2290 * @param[out] error 2291 * Pointer to error structure. 2292 * 2293 * @return 2294 * 0 on success, a negative errno value otherwise and rte_errno is set. 2295 */ 2296 static int 2297 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, 2298 const struct rte_flow_action *action, 2299 uint64_t action_flags, 2300 const struct rte_flow_attr *attr, 2301 struct rte_flow_error *error) 2302 { 2303 const struct rte_flow_action_set_tag *conf; 2304 const uint64_t terminal_action_flags = 2305 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | 2306 MLX5_FLOW_ACTION_RSS; 2307 int ret; 2308 2309 if (!mlx5_flow_ext_mreg_supported(dev)) 2310 return rte_flow_error_set(error, ENOTSUP, 2311 RTE_FLOW_ERROR_TYPE_ACTION, action, 2312 "extensive metadata register" 2313 " isn't supported"); 2314 if (!(action->conf)) 2315 return rte_flow_error_set(error, EINVAL, 2316 RTE_FLOW_ERROR_TYPE_ACTION, action, 2317 "configuration cannot be null"); 2318 conf = (const struct rte_flow_action_set_tag *)action->conf; 2319 if (!conf->mask) 2320 return rte_flow_error_set(error, EINVAL, 2321 RTE_FLOW_ERROR_TYPE_ACTION, action, 2322 "zero mask doesn't have any effect"); 2323 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error); 2324 if (ret < 0) 2325 return ret; 2326 if (!attr->transfer && attr->ingress && 2327 (action_flags & terminal_action_flags)) 2328 return rte_flow_error_set(error, EINVAL, 2329 RTE_FLOW_ERROR_TYPE_ACTION, action, 2330 "set_tag has no effect" 2331 " with terminal actions"); 2332 return 0; 2333 } 2334 2335 /** 2336 * Validate count action. 2337 * 2338 * @param[in] dev 2339 * Pointer to rte_eth_dev structure. 2340 * @param[out] error 2341 * Pointer to error structure. 2342 * 2343 * @return 2344 * 0 on success, a negative errno value otherwise and rte_errno is set. 2345 */ 2346 static int 2347 flow_dv_validate_action_count(struct rte_eth_dev *dev, 2348 struct rte_flow_error *error) 2349 { 2350 struct mlx5_priv *priv = dev->data->dev_private; 2351 2352 if (!priv->config.devx) 2353 goto notsup_err; 2354 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS 2355 return 0; 2356 #endif 2357 notsup_err: 2358 return rte_flow_error_set 2359 (error, ENOTSUP, 2360 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2361 NULL, 2362 "count action not supported"); 2363 } 2364 2365 /** 2366 * Validate the L2 encap action. 2367 * 2368 * @param[in] dev 2369 * Pointer to the rte_eth_dev structure. 2370 * @param[in] action_flags 2371 * Holds the actions detected until now. 2372 * @param[in] action 2373 * Pointer to the action structure. 2374 * @param[in] attr 2375 * Pointer to flow attributes. 2376 * @param[out] error 2377 * Pointer to error structure. 2378 * 2379 * @return 2380 * 0 on success, a negative errno value otherwise and rte_errno is set. 2381 */ 2382 static int 2383 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev, 2384 uint64_t action_flags, 2385 const struct rte_flow_action *action, 2386 const struct rte_flow_attr *attr, 2387 struct rte_flow_error *error) 2388 { 2389 const struct mlx5_priv *priv = dev->data->dev_private; 2390 2391 if (!(action->conf)) 2392 return rte_flow_error_set(error, EINVAL, 2393 RTE_FLOW_ERROR_TYPE_ACTION, action, 2394 "configuration cannot be null"); 2395 if (action_flags & MLX5_FLOW_ACTION_ENCAP) 2396 return rte_flow_error_set(error, EINVAL, 2397 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2398 "can only have a single encap action " 2399 "in a flow"); 2400 if (!attr->transfer && priv->representor) 2401 return rte_flow_error_set(error, ENOTSUP, 2402 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2403 "encap action for VF representor " 2404 "not supported on NIC table"); 2405 return 0; 2406 } 2407 2408 /** 2409 * Validate a decap action. 2410 * 2411 * @param[in] dev 2412 * Pointer to the rte_eth_dev structure. 2413 * @param[in] action_flags 2414 * Holds the actions detected until now. 2415 * @param[in] attr 2416 * Pointer to flow attributes 2417 * @param[out] error 2418 * Pointer to error structure. 2419 * 2420 * @return 2421 * 0 on success, a negative errno value otherwise and rte_errno is set. 2422 */ 2423 static int 2424 flow_dv_validate_action_decap(struct rte_eth_dev *dev, 2425 uint64_t action_flags, 2426 const struct rte_flow_attr *attr, 2427 struct rte_flow_error *error) 2428 { 2429 const struct mlx5_priv *priv = dev->data->dev_private; 2430 2431 if (priv->config.hca_attr.scatter_fcs_w_decap_disable && 2432 !priv->config.decap_en) 2433 return rte_flow_error_set(error, ENOTSUP, 2434 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2435 "decap is not enabled"); 2436 if (action_flags & MLX5_FLOW_XCAP_ACTIONS) 2437 return rte_flow_error_set(error, ENOTSUP, 2438 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2439 action_flags & 2440 MLX5_FLOW_ACTION_DECAP ? "can only " 2441 "have a single decap action" : "decap " 2442 "after encap is not supported"); 2443 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) 2444 return rte_flow_error_set(error, EINVAL, 2445 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2446 "can't have decap action after" 2447 " modify action"); 2448 if (attr->egress) 2449 return rte_flow_error_set(error, ENOTSUP, 2450 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, 2451 NULL, 2452 "decap action not supported for " 2453 "egress"); 2454 if (!attr->transfer && priv->representor) 2455 return rte_flow_error_set(error, ENOTSUP, 2456 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2457 "decap action for VF representor " 2458 "not supported on NIC table"); 2459 return 0; 2460 } 2461 2462 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,}; 2463 2464 /** 2465 * Validate the raw encap and decap actions. 2466 * 2467 * @param[in] dev 2468 * Pointer to the rte_eth_dev structure. 2469 * @param[in] decap 2470 * Pointer to the decap action. 2471 * @param[in] encap 2472 * Pointer to the encap action. 2473 * @param[in] attr 2474 * Pointer to flow attributes 2475 * @param[in/out] action_flags 2476 * Holds the actions detected until now. 2477 * @param[out] actions_n 2478 * pointer to the number of actions counter. 2479 * @param[out] error 2480 * Pointer to error structure. 2481 * 2482 * @return 2483 * 0 on success, a negative errno value otherwise and rte_errno is set. 2484 */ 2485 static int 2486 flow_dv_validate_action_raw_encap_decap 2487 (struct rte_eth_dev *dev, 2488 const struct rte_flow_action_raw_decap *decap, 2489 const struct rte_flow_action_raw_encap *encap, 2490 const struct rte_flow_attr *attr, uint64_t *action_flags, 2491 int *actions_n, struct rte_flow_error *error) 2492 { 2493 const struct mlx5_priv *priv = dev->data->dev_private; 2494 int ret; 2495 2496 if (encap && (!encap->size || !encap->data)) 2497 return rte_flow_error_set(error, EINVAL, 2498 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2499 "raw encap data cannot be empty"); 2500 if (decap && encap) { 2501 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE && 2502 encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 2503 /* L3 encap. */ 2504 decap = NULL; 2505 else if (encap->size <= 2506 MLX5_ENCAPSULATION_DECISION_SIZE && 2507 decap->size > 2508 MLX5_ENCAPSULATION_DECISION_SIZE) 2509 /* L3 decap. */ 2510 encap = NULL; 2511 else if (encap->size > 2512 MLX5_ENCAPSULATION_DECISION_SIZE && 2513 decap->size > 2514 MLX5_ENCAPSULATION_DECISION_SIZE) 2515 /* 2 L2 actions: encap and decap. */ 2516 ; 2517 else 2518 return rte_flow_error_set(error, 2519 ENOTSUP, 2520 RTE_FLOW_ERROR_TYPE_ACTION, 2521 NULL, "unsupported too small " 2522 "raw decap and too small raw " 2523 "encap combination"); 2524 } 2525 if (decap) { 2526 ret = flow_dv_validate_action_decap(dev, *action_flags, attr, 2527 error); 2528 if (ret < 0) 2529 return ret; 2530 *action_flags |= MLX5_FLOW_ACTION_DECAP; 2531 ++(*actions_n); 2532 } 2533 if (encap) { 2534 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE) 2535 return rte_flow_error_set(error, ENOTSUP, 2536 RTE_FLOW_ERROR_TYPE_ACTION, 2537 NULL, 2538 "small raw encap size"); 2539 if (*action_flags & MLX5_FLOW_ACTION_ENCAP) 2540 return rte_flow_error_set(error, EINVAL, 2541 RTE_FLOW_ERROR_TYPE_ACTION, 2542 NULL, 2543 "more than one encap action"); 2544 if (!attr->transfer && priv->representor) 2545 return rte_flow_error_set 2546 (error, ENOTSUP, 2547 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2548 "encap action for VF representor " 2549 "not supported on NIC table"); 2550 *action_flags |= MLX5_FLOW_ACTION_ENCAP; 2551 ++(*actions_n); 2552 } 2553 return 0; 2554 } 2555 2556 /** 2557 * Match encap_decap resource. 2558 * 2559 * @param entry 2560 * Pointer to exist resource entry object. 2561 * @param ctx 2562 * Pointer to new encap_decap resource. 2563 * 2564 * @return 2565 * 0 on matching, -1 otherwise. 2566 */ 2567 static int 2568 flow_dv_encap_decap_resource_match(struct mlx5_hlist_entry *entry, void *ctx) 2569 { 2570 struct mlx5_flow_dv_encap_decap_resource *resource; 2571 struct mlx5_flow_dv_encap_decap_resource *cache_resource; 2572 2573 resource = (struct mlx5_flow_dv_encap_decap_resource *)ctx; 2574 cache_resource = container_of(entry, 2575 struct mlx5_flow_dv_encap_decap_resource, 2576 entry); 2577 if (resource->entry.key == cache_resource->entry.key && 2578 resource->reformat_type == cache_resource->reformat_type && 2579 resource->ft_type == cache_resource->ft_type && 2580 resource->flags == cache_resource->flags && 2581 resource->size == cache_resource->size && 2582 !memcmp((const void *)resource->buf, 2583 (const void *)cache_resource->buf, 2584 resource->size)) 2585 return 0; 2586 return -1; 2587 } 2588 2589 /** 2590 * Find existing encap/decap resource or create and register a new one. 2591 * 2592 * @param[in, out] dev 2593 * Pointer to rte_eth_dev structure. 2594 * @param[in, out] resource 2595 * Pointer to encap/decap resource. 2596 * @parm[in, out] dev_flow 2597 * Pointer to the dev_flow. 2598 * @param[out] error 2599 * pointer to error structure. 2600 * 2601 * @return 2602 * 0 on success otherwise -errno and errno is set. 2603 */ 2604 static int 2605 flow_dv_encap_decap_resource_register 2606 (struct rte_eth_dev *dev, 2607 struct mlx5_flow_dv_encap_decap_resource *resource, 2608 struct mlx5_flow *dev_flow, 2609 struct rte_flow_error *error) 2610 { 2611 struct mlx5_priv *priv = dev->data->dev_private; 2612 struct mlx5_dev_ctx_shared *sh = priv->sh; 2613 struct mlx5_flow_dv_encap_decap_resource *cache_resource; 2614 struct mlx5dv_dr_domain *domain; 2615 struct mlx5_hlist_entry *entry; 2616 union mlx5_flow_encap_decap_key encap_decap_key = { 2617 { 2618 .ft_type = resource->ft_type, 2619 .refmt_type = resource->reformat_type, 2620 .buf_size = resource->size, 2621 .table_level = !!dev_flow->dv.group, 2622 .cksum = 0, 2623 } 2624 }; 2625 int ret; 2626 2627 resource->flags = dev_flow->dv.group ? 0 : 1; 2628 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) 2629 domain = sh->fdb_domain; 2630 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) 2631 domain = sh->rx_domain; 2632 else 2633 domain = sh->tx_domain; 2634 encap_decap_key.cksum = __rte_raw_cksum(resource->buf, 2635 resource->size, 0); 2636 resource->entry.key = encap_decap_key.v64; 2637 /* Lookup a matching resource from cache. */ 2638 entry = mlx5_hlist_lookup_ex(sh->encaps_decaps, resource->entry.key, 2639 flow_dv_encap_decap_resource_match, 2640 (void *)resource); 2641 if (entry) { 2642 cache_resource = container_of(entry, 2643 struct mlx5_flow_dv_encap_decap_resource, entry); 2644 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++", 2645 (void *)cache_resource, 2646 rte_atomic32_read(&cache_resource->refcnt)); 2647 rte_atomic32_inc(&cache_resource->refcnt); 2648 dev_flow->handle->dvh.rix_encap_decap = cache_resource->idx; 2649 dev_flow->dv.encap_decap = cache_resource; 2650 return 0; 2651 } 2652 /* Register new encap/decap resource. */ 2653 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], 2654 &dev_flow->handle->dvh.rix_encap_decap); 2655 if (!cache_resource) 2656 return rte_flow_error_set(error, ENOMEM, 2657 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2658 "cannot allocate resource memory"); 2659 *cache_resource = *resource; 2660 cache_resource->idx = dev_flow->handle->dvh.rix_encap_decap; 2661 ret = mlx5_flow_os_create_flow_action_packet_reformat 2662 (sh->ctx, domain, cache_resource, 2663 &cache_resource->action); 2664 if (ret) { 2665 mlx5_free(cache_resource); 2666 return rte_flow_error_set(error, ENOMEM, 2667 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2668 NULL, "cannot create action"); 2669 } 2670 rte_atomic32_init(&cache_resource->refcnt); 2671 rte_atomic32_inc(&cache_resource->refcnt); 2672 if (mlx5_hlist_insert_ex(sh->encaps_decaps, &cache_resource->entry, 2673 flow_dv_encap_decap_resource_match, 2674 (void *)cache_resource)) { 2675 claim_zero(mlx5_flow_os_destroy_flow_action 2676 (cache_resource->action)); 2677 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], 2678 cache_resource->idx); 2679 return rte_flow_error_set(error, EEXIST, 2680 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2681 NULL, "action exist"); 2682 } 2683 dev_flow->dv.encap_decap = cache_resource; 2684 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++", 2685 (void *)cache_resource, 2686 rte_atomic32_read(&cache_resource->refcnt)); 2687 return 0; 2688 } 2689 2690 /** 2691 * Find existing table jump resource or create and register a new one. 2692 * 2693 * @param[in, out] dev 2694 * Pointer to rte_eth_dev structure. 2695 * @param[in, out] tbl 2696 * Pointer to flow table resource. 2697 * @parm[in, out] dev_flow 2698 * Pointer to the dev_flow. 2699 * @param[out] error 2700 * pointer to error structure. 2701 * 2702 * @return 2703 * 0 on success otherwise -errno and errno is set. 2704 */ 2705 static int 2706 flow_dv_jump_tbl_resource_register 2707 (struct rte_eth_dev *dev __rte_unused, 2708 struct mlx5_flow_tbl_resource *tbl, 2709 struct mlx5_flow *dev_flow, 2710 struct rte_flow_error *error) 2711 { 2712 struct mlx5_flow_tbl_data_entry *tbl_data = 2713 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); 2714 int cnt, ret; 2715 2716 MLX5_ASSERT(tbl); 2717 cnt = rte_atomic32_read(&tbl_data->jump.refcnt); 2718 if (!cnt) { 2719 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl 2720 (tbl->obj, &tbl_data->jump.action); 2721 if (ret) 2722 return rte_flow_error_set(error, ENOMEM, 2723 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2724 NULL, "cannot create jump action"); 2725 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++", 2726 (void *)&tbl_data->jump, cnt); 2727 } else { 2728 /* old jump should not make the table ref++. */ 2729 flow_dv_tbl_resource_release(dev, &tbl_data->tbl); 2730 MLX5_ASSERT(tbl_data->jump.action); 2731 DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++", 2732 (void *)&tbl_data->jump, cnt); 2733 } 2734 rte_atomic32_inc(&tbl_data->jump.refcnt); 2735 dev_flow->handle->rix_jump = tbl_data->idx; 2736 dev_flow->dv.jump = &tbl_data->jump; 2737 return 0; 2738 } 2739 2740 /** 2741 * Find existing default miss resource or create and register a new one. 2742 * 2743 * @param[in, out] dev 2744 * Pointer to rte_eth_dev structure. 2745 * @param[out] error 2746 * pointer to error structure. 2747 * 2748 * @return 2749 * 0 on success otherwise -errno and errno is set. 2750 */ 2751 static int 2752 flow_dv_default_miss_resource_register(struct rte_eth_dev *dev, 2753 struct rte_flow_error *error) 2754 { 2755 struct mlx5_priv *priv = dev->data->dev_private; 2756 struct mlx5_dev_ctx_shared *sh = priv->sh; 2757 struct mlx5_flow_default_miss_resource *cache_resource = 2758 &sh->default_miss; 2759 int cnt = rte_atomic32_read(&cache_resource->refcnt); 2760 2761 if (!cnt) { 2762 MLX5_ASSERT(cache_resource->action); 2763 cache_resource->action = 2764 mlx5_glue->dr_create_flow_action_default_miss(); 2765 if (!cache_resource->action) 2766 return rte_flow_error_set(error, ENOMEM, 2767 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2768 "cannot create default miss action"); 2769 DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++", 2770 (void *)cache_resource->action, cnt); 2771 } 2772 rte_atomic32_inc(&cache_resource->refcnt); 2773 return 0; 2774 } 2775 2776 /** 2777 * Find existing table port ID resource or create and register a new one. 2778 * 2779 * @param[in, out] dev 2780 * Pointer to rte_eth_dev structure. 2781 * @param[in, out] resource 2782 * Pointer to port ID action resource. 2783 * @parm[in, out] dev_flow 2784 * Pointer to the dev_flow. 2785 * @param[out] error 2786 * pointer to error structure. 2787 * 2788 * @return 2789 * 0 on success otherwise -errno and errno is set. 2790 */ 2791 static int 2792 flow_dv_port_id_action_resource_register 2793 (struct rte_eth_dev *dev, 2794 struct mlx5_flow_dv_port_id_action_resource *resource, 2795 struct mlx5_flow *dev_flow, 2796 struct rte_flow_error *error) 2797 { 2798 struct mlx5_priv *priv = dev->data->dev_private; 2799 struct mlx5_dev_ctx_shared *sh = priv->sh; 2800 struct mlx5_flow_dv_port_id_action_resource *cache_resource; 2801 uint32_t idx = 0; 2802 int ret; 2803 2804 /* Lookup a matching resource from cache. */ 2805 ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list, 2806 idx, cache_resource, next) { 2807 if (resource->port_id == cache_resource->port_id) { 2808 DRV_LOG(DEBUG, "port id action resource resource %p: " 2809 "refcnt %d++", 2810 (void *)cache_resource, 2811 rte_atomic32_read(&cache_resource->refcnt)); 2812 rte_atomic32_inc(&cache_resource->refcnt); 2813 dev_flow->handle->rix_port_id_action = idx; 2814 dev_flow->dv.port_id_action = cache_resource; 2815 return 0; 2816 } 2817 } 2818 /* Register new port id action resource. */ 2819 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], 2820 &dev_flow->handle->rix_port_id_action); 2821 if (!cache_resource) 2822 return rte_flow_error_set(error, ENOMEM, 2823 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2824 "cannot allocate resource memory"); 2825 *cache_resource = *resource; 2826 ret = mlx5_flow_os_create_flow_action_dest_port 2827 (priv->sh->fdb_domain, resource->port_id, 2828 &cache_resource->action); 2829 if (ret) { 2830 mlx5_free(cache_resource); 2831 return rte_flow_error_set(error, ENOMEM, 2832 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2833 NULL, "cannot create action"); 2834 } 2835 rte_atomic32_init(&cache_resource->refcnt); 2836 rte_atomic32_inc(&cache_resource->refcnt); 2837 ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list, 2838 dev_flow->handle->rix_port_id_action, cache_resource, 2839 next); 2840 dev_flow->dv.port_id_action = cache_resource; 2841 DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++", 2842 (void *)cache_resource, 2843 rte_atomic32_read(&cache_resource->refcnt)); 2844 return 0; 2845 } 2846 2847 /** 2848 * Find existing push vlan resource or create and register a new one. 2849 * 2850 * @param [in, out] dev 2851 * Pointer to rte_eth_dev structure. 2852 * @param[in, out] resource 2853 * Pointer to port ID action resource. 2854 * @parm[in, out] dev_flow 2855 * Pointer to the dev_flow. 2856 * @param[out] error 2857 * pointer to error structure. 2858 * 2859 * @return 2860 * 0 on success otherwise -errno and errno is set. 2861 */ 2862 static int 2863 flow_dv_push_vlan_action_resource_register 2864 (struct rte_eth_dev *dev, 2865 struct mlx5_flow_dv_push_vlan_action_resource *resource, 2866 struct mlx5_flow *dev_flow, 2867 struct rte_flow_error *error) 2868 { 2869 struct mlx5_priv *priv = dev->data->dev_private; 2870 struct mlx5_dev_ctx_shared *sh = priv->sh; 2871 struct mlx5_flow_dv_push_vlan_action_resource *cache_resource; 2872 struct mlx5dv_dr_domain *domain; 2873 uint32_t idx = 0; 2874 int ret; 2875 2876 /* Lookup a matching resource from cache. */ 2877 ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN], 2878 sh->push_vlan_action_list, idx, cache_resource, next) { 2879 if (resource->vlan_tag == cache_resource->vlan_tag && 2880 resource->ft_type == cache_resource->ft_type) { 2881 DRV_LOG(DEBUG, "push-VLAN action resource resource %p: " 2882 "refcnt %d++", 2883 (void *)cache_resource, 2884 rte_atomic32_read(&cache_resource->refcnt)); 2885 rte_atomic32_inc(&cache_resource->refcnt); 2886 dev_flow->handle->dvh.rix_push_vlan = idx; 2887 dev_flow->dv.push_vlan_res = cache_resource; 2888 return 0; 2889 } 2890 } 2891 /* Register new push_vlan action resource. */ 2892 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], 2893 &dev_flow->handle->dvh.rix_push_vlan); 2894 if (!cache_resource) 2895 return rte_flow_error_set(error, ENOMEM, 2896 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2897 "cannot allocate resource memory"); 2898 *cache_resource = *resource; 2899 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) 2900 domain = sh->fdb_domain; 2901 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) 2902 domain = sh->rx_domain; 2903 else 2904 domain = sh->tx_domain; 2905 ret = mlx5_flow_os_create_flow_action_push_vlan 2906 (domain, resource->vlan_tag, 2907 &cache_resource->action); 2908 if (ret) { 2909 mlx5_free(cache_resource); 2910 return rte_flow_error_set(error, ENOMEM, 2911 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2912 NULL, "cannot create action"); 2913 } 2914 rte_atomic32_init(&cache_resource->refcnt); 2915 rte_atomic32_inc(&cache_resource->refcnt); 2916 ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN], 2917 &sh->push_vlan_action_list, 2918 dev_flow->handle->dvh.rix_push_vlan, 2919 cache_resource, next); 2920 dev_flow->dv.push_vlan_res = cache_resource; 2921 DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++", 2922 (void *)cache_resource, 2923 rte_atomic32_read(&cache_resource->refcnt)); 2924 return 0; 2925 } 2926 /** 2927 * Get the size of specific rte_flow_item_type hdr size 2928 * 2929 * @param[in] item_type 2930 * Tested rte_flow_item_type. 2931 * 2932 * @return 2933 * sizeof struct item_type, 0 if void or irrelevant. 2934 */ 2935 static size_t 2936 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type) 2937 { 2938 size_t retval; 2939 2940 switch (item_type) { 2941 case RTE_FLOW_ITEM_TYPE_ETH: 2942 retval = sizeof(struct rte_ether_hdr); 2943 break; 2944 case RTE_FLOW_ITEM_TYPE_VLAN: 2945 retval = sizeof(struct rte_vlan_hdr); 2946 break; 2947 case RTE_FLOW_ITEM_TYPE_IPV4: 2948 retval = sizeof(struct rte_ipv4_hdr); 2949 break; 2950 case RTE_FLOW_ITEM_TYPE_IPV6: 2951 retval = sizeof(struct rte_ipv6_hdr); 2952 break; 2953 case RTE_FLOW_ITEM_TYPE_UDP: 2954 retval = sizeof(struct rte_udp_hdr); 2955 break; 2956 case RTE_FLOW_ITEM_TYPE_TCP: 2957 retval = sizeof(struct rte_tcp_hdr); 2958 break; 2959 case RTE_FLOW_ITEM_TYPE_VXLAN: 2960 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 2961 retval = sizeof(struct rte_vxlan_hdr); 2962 break; 2963 case RTE_FLOW_ITEM_TYPE_GRE: 2964 case RTE_FLOW_ITEM_TYPE_NVGRE: 2965 retval = sizeof(struct rte_gre_hdr); 2966 break; 2967 case RTE_FLOW_ITEM_TYPE_MPLS: 2968 retval = sizeof(struct rte_mpls_hdr); 2969 break; 2970 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */ 2971 default: 2972 retval = 0; 2973 break; 2974 } 2975 return retval; 2976 } 2977 2978 #define MLX5_ENCAP_IPV4_VERSION 0x40 2979 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05 2980 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40 2981 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000 2982 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff 2983 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000 2984 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04 2985 2986 /** 2987 * Convert the encap action data from list of rte_flow_item to raw buffer 2988 * 2989 * @param[in] items 2990 * Pointer to rte_flow_item objects list. 2991 * @param[out] buf 2992 * Pointer to the output buffer. 2993 * @param[out] size 2994 * Pointer to the output buffer size. 2995 * @param[out] error 2996 * Pointer to the error structure. 2997 * 2998 * @return 2999 * 0 on success, a negative errno value otherwise and rte_errno is set. 3000 */ 3001 static int 3002 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf, 3003 size_t *size, struct rte_flow_error *error) 3004 { 3005 struct rte_ether_hdr *eth = NULL; 3006 struct rte_vlan_hdr *vlan = NULL; 3007 struct rte_ipv4_hdr *ipv4 = NULL; 3008 struct rte_ipv6_hdr *ipv6 = NULL; 3009 struct rte_udp_hdr *udp = NULL; 3010 struct rte_vxlan_hdr *vxlan = NULL; 3011 struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL; 3012 struct rte_gre_hdr *gre = NULL; 3013 size_t len; 3014 size_t temp_size = 0; 3015 3016 if (!items) 3017 return rte_flow_error_set(error, EINVAL, 3018 RTE_FLOW_ERROR_TYPE_ACTION, 3019 NULL, "invalid empty data"); 3020 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 3021 len = flow_dv_get_item_hdr_len(items->type); 3022 if (len + temp_size > MLX5_ENCAP_MAX_LEN) 3023 return rte_flow_error_set(error, EINVAL, 3024 RTE_FLOW_ERROR_TYPE_ACTION, 3025 (void *)items->type, 3026 "items total size is too big" 3027 " for encap action"); 3028 rte_memcpy((void *)&buf[temp_size], items->spec, len); 3029 switch (items->type) { 3030 case RTE_FLOW_ITEM_TYPE_ETH: 3031 eth = (struct rte_ether_hdr *)&buf[temp_size]; 3032 break; 3033 case RTE_FLOW_ITEM_TYPE_VLAN: 3034 vlan = (struct rte_vlan_hdr *)&buf[temp_size]; 3035 if (!eth) 3036 return rte_flow_error_set(error, EINVAL, 3037 RTE_FLOW_ERROR_TYPE_ACTION, 3038 (void *)items->type, 3039 "eth header not found"); 3040 if (!eth->ether_type) 3041 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN); 3042 break; 3043 case RTE_FLOW_ITEM_TYPE_IPV4: 3044 ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size]; 3045 if (!vlan && !eth) 3046 return rte_flow_error_set(error, EINVAL, 3047 RTE_FLOW_ERROR_TYPE_ACTION, 3048 (void *)items->type, 3049 "neither eth nor vlan" 3050 " header found"); 3051 if (vlan && !vlan->eth_proto) 3052 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4); 3053 else if (eth && !eth->ether_type) 3054 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4); 3055 if (!ipv4->version_ihl) 3056 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION | 3057 MLX5_ENCAP_IPV4_IHL_MIN; 3058 if (!ipv4->time_to_live) 3059 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF; 3060 break; 3061 case RTE_FLOW_ITEM_TYPE_IPV6: 3062 ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size]; 3063 if (!vlan && !eth) 3064 return rte_flow_error_set(error, EINVAL, 3065 RTE_FLOW_ERROR_TYPE_ACTION, 3066 (void *)items->type, 3067 "neither eth nor vlan" 3068 " header found"); 3069 if (vlan && !vlan->eth_proto) 3070 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6); 3071 else if (eth && !eth->ether_type) 3072 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6); 3073 if (!ipv6->vtc_flow) 3074 ipv6->vtc_flow = 3075 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW); 3076 if (!ipv6->hop_limits) 3077 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT; 3078 break; 3079 case RTE_FLOW_ITEM_TYPE_UDP: 3080 udp = (struct rte_udp_hdr *)&buf[temp_size]; 3081 if (!ipv4 && !ipv6) 3082 return rte_flow_error_set(error, EINVAL, 3083 RTE_FLOW_ERROR_TYPE_ACTION, 3084 (void *)items->type, 3085 "ip header not found"); 3086 if (ipv4 && !ipv4->next_proto_id) 3087 ipv4->next_proto_id = IPPROTO_UDP; 3088 else if (ipv6 && !ipv6->proto) 3089 ipv6->proto = IPPROTO_UDP; 3090 break; 3091 case RTE_FLOW_ITEM_TYPE_VXLAN: 3092 vxlan = (struct rte_vxlan_hdr *)&buf[temp_size]; 3093 if (!udp) 3094 return rte_flow_error_set(error, EINVAL, 3095 RTE_FLOW_ERROR_TYPE_ACTION, 3096 (void *)items->type, 3097 "udp header not found"); 3098 if (!udp->dst_port) 3099 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN); 3100 if (!vxlan->vx_flags) 3101 vxlan->vx_flags = 3102 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS); 3103 break; 3104 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 3105 vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size]; 3106 if (!udp) 3107 return rte_flow_error_set(error, EINVAL, 3108 RTE_FLOW_ERROR_TYPE_ACTION, 3109 (void *)items->type, 3110 "udp header not found"); 3111 if (!vxlan_gpe->proto) 3112 return rte_flow_error_set(error, EINVAL, 3113 RTE_FLOW_ERROR_TYPE_ACTION, 3114 (void *)items->type, 3115 "next protocol not found"); 3116 if (!udp->dst_port) 3117 udp->dst_port = 3118 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE); 3119 if (!vxlan_gpe->vx_flags) 3120 vxlan_gpe->vx_flags = 3121 MLX5_ENCAP_VXLAN_GPE_FLAGS; 3122 break; 3123 case RTE_FLOW_ITEM_TYPE_GRE: 3124 case RTE_FLOW_ITEM_TYPE_NVGRE: 3125 gre = (struct rte_gre_hdr *)&buf[temp_size]; 3126 if (!gre->proto) 3127 return rte_flow_error_set(error, EINVAL, 3128 RTE_FLOW_ERROR_TYPE_ACTION, 3129 (void *)items->type, 3130 "next protocol not found"); 3131 if (!ipv4 && !ipv6) 3132 return rte_flow_error_set(error, EINVAL, 3133 RTE_FLOW_ERROR_TYPE_ACTION, 3134 (void *)items->type, 3135 "ip header not found"); 3136 if (ipv4 && !ipv4->next_proto_id) 3137 ipv4->next_proto_id = IPPROTO_GRE; 3138 else if (ipv6 && !ipv6->proto) 3139 ipv6->proto = IPPROTO_GRE; 3140 break; 3141 case RTE_FLOW_ITEM_TYPE_VOID: 3142 break; 3143 default: 3144 return rte_flow_error_set(error, EINVAL, 3145 RTE_FLOW_ERROR_TYPE_ACTION, 3146 (void *)items->type, 3147 "unsupported item type"); 3148 break; 3149 } 3150 temp_size += len; 3151 } 3152 *size = temp_size; 3153 return 0; 3154 } 3155 3156 static int 3157 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error) 3158 { 3159 struct rte_ether_hdr *eth = NULL; 3160 struct rte_vlan_hdr *vlan = NULL; 3161 struct rte_ipv6_hdr *ipv6 = NULL; 3162 struct rte_udp_hdr *udp = NULL; 3163 char *next_hdr; 3164 uint16_t proto; 3165 3166 eth = (struct rte_ether_hdr *)data; 3167 next_hdr = (char *)(eth + 1); 3168 proto = RTE_BE16(eth->ether_type); 3169 3170 /* VLAN skipping */ 3171 while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) { 3172 vlan = (struct rte_vlan_hdr *)next_hdr; 3173 proto = RTE_BE16(vlan->eth_proto); 3174 next_hdr += sizeof(struct rte_vlan_hdr); 3175 } 3176 3177 /* HW calculates IPv4 csum. no need to proceed */ 3178 if (proto == RTE_ETHER_TYPE_IPV4) 3179 return 0; 3180 3181 /* non IPv4/IPv6 header. not supported */ 3182 if (proto != RTE_ETHER_TYPE_IPV6) { 3183 return rte_flow_error_set(error, ENOTSUP, 3184 RTE_FLOW_ERROR_TYPE_ACTION, 3185 NULL, "Cannot offload non IPv4/IPv6"); 3186 } 3187 3188 ipv6 = (struct rte_ipv6_hdr *)next_hdr; 3189 3190 /* ignore non UDP */ 3191 if (ipv6->proto != IPPROTO_UDP) 3192 return 0; 3193 3194 udp = (struct rte_udp_hdr *)(ipv6 + 1); 3195 udp->dgram_cksum = 0; 3196 3197 return 0; 3198 } 3199 3200 /** 3201 * Convert L2 encap action to DV specification. 3202 * 3203 * @param[in] dev 3204 * Pointer to rte_eth_dev structure. 3205 * @param[in] action 3206 * Pointer to action structure. 3207 * @param[in, out] dev_flow 3208 * Pointer to the mlx5_flow. 3209 * @param[in] transfer 3210 * Mark if the flow is E-Switch flow. 3211 * @param[out] error 3212 * Pointer to the error structure. 3213 * 3214 * @return 3215 * 0 on success, a negative errno value otherwise and rte_errno is set. 3216 */ 3217 static int 3218 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev, 3219 const struct rte_flow_action *action, 3220 struct mlx5_flow *dev_flow, 3221 uint8_t transfer, 3222 struct rte_flow_error *error) 3223 { 3224 const struct rte_flow_item *encap_data; 3225 const struct rte_flow_action_raw_encap *raw_encap_data; 3226 struct mlx5_flow_dv_encap_decap_resource res = { 3227 .reformat_type = 3228 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL, 3229 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB : 3230 MLX5DV_FLOW_TABLE_TYPE_NIC_TX, 3231 }; 3232 3233 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { 3234 raw_encap_data = 3235 (const struct rte_flow_action_raw_encap *)action->conf; 3236 res.size = raw_encap_data->size; 3237 memcpy(res.buf, raw_encap_data->data, res.size); 3238 } else { 3239 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) 3240 encap_data = 3241 ((const struct rte_flow_action_vxlan_encap *) 3242 action->conf)->definition; 3243 else 3244 encap_data = 3245 ((const struct rte_flow_action_nvgre_encap *) 3246 action->conf)->definition; 3247 if (flow_dv_convert_encap_data(encap_data, res.buf, 3248 &res.size, error)) 3249 return -rte_errno; 3250 } 3251 if (flow_dv_zero_encap_udp_csum(res.buf, error)) 3252 return -rte_errno; 3253 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error)) 3254 return rte_flow_error_set(error, EINVAL, 3255 RTE_FLOW_ERROR_TYPE_ACTION, 3256 NULL, "can't create L2 encap action"); 3257 return 0; 3258 } 3259 3260 /** 3261 * Convert L2 decap action to DV specification. 3262 * 3263 * @param[in] dev 3264 * Pointer to rte_eth_dev structure. 3265 * @param[in, out] dev_flow 3266 * Pointer to the mlx5_flow. 3267 * @param[in] transfer 3268 * Mark if the flow is E-Switch flow. 3269 * @param[out] error 3270 * Pointer to the error structure. 3271 * 3272 * @return 3273 * 0 on success, a negative errno value otherwise and rte_errno is set. 3274 */ 3275 static int 3276 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev, 3277 struct mlx5_flow *dev_flow, 3278 uint8_t transfer, 3279 struct rte_flow_error *error) 3280 { 3281 struct mlx5_flow_dv_encap_decap_resource res = { 3282 .size = 0, 3283 .reformat_type = 3284 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2, 3285 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB : 3286 MLX5DV_FLOW_TABLE_TYPE_NIC_RX, 3287 }; 3288 3289 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error)) 3290 return rte_flow_error_set(error, EINVAL, 3291 RTE_FLOW_ERROR_TYPE_ACTION, 3292 NULL, "can't create L2 decap action"); 3293 return 0; 3294 } 3295 3296 /** 3297 * Convert raw decap/encap (L3 tunnel) action to DV specification. 3298 * 3299 * @param[in] dev 3300 * Pointer to rte_eth_dev structure. 3301 * @param[in] action 3302 * Pointer to action structure. 3303 * @param[in, out] dev_flow 3304 * Pointer to the mlx5_flow. 3305 * @param[in] attr 3306 * Pointer to the flow attributes. 3307 * @param[out] error 3308 * Pointer to the error structure. 3309 * 3310 * @return 3311 * 0 on success, a negative errno value otherwise and rte_errno is set. 3312 */ 3313 static int 3314 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev, 3315 const struct rte_flow_action *action, 3316 struct mlx5_flow *dev_flow, 3317 const struct rte_flow_attr *attr, 3318 struct rte_flow_error *error) 3319 { 3320 const struct rte_flow_action_raw_encap *encap_data; 3321 struct mlx5_flow_dv_encap_decap_resource res; 3322 3323 memset(&res, 0, sizeof(res)); 3324 encap_data = (const struct rte_flow_action_raw_encap *)action->conf; 3325 res.size = encap_data->size; 3326 memcpy(res.buf, encap_data->data, res.size); 3327 res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ? 3328 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 : 3329 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL; 3330 if (attr->transfer) 3331 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; 3332 else 3333 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : 3334 MLX5DV_FLOW_TABLE_TYPE_NIC_RX; 3335 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error)) 3336 return rte_flow_error_set(error, EINVAL, 3337 RTE_FLOW_ERROR_TYPE_ACTION, 3338 NULL, "can't create encap action"); 3339 return 0; 3340 } 3341 3342 /** 3343 * Create action push VLAN. 3344 * 3345 * @param[in] dev 3346 * Pointer to rte_eth_dev structure. 3347 * @param[in] attr 3348 * Pointer to the flow attributes. 3349 * @param[in] vlan 3350 * Pointer to the vlan to push to the Ethernet header. 3351 * @param[in, out] dev_flow 3352 * Pointer to the mlx5_flow. 3353 * @param[out] error 3354 * Pointer to the error structure. 3355 * 3356 * @return 3357 * 0 on success, a negative errno value otherwise and rte_errno is set. 3358 */ 3359 static int 3360 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev, 3361 const struct rte_flow_attr *attr, 3362 const struct rte_vlan_hdr *vlan, 3363 struct mlx5_flow *dev_flow, 3364 struct rte_flow_error *error) 3365 { 3366 struct mlx5_flow_dv_push_vlan_action_resource res; 3367 3368 memset(&res, 0, sizeof(res)); 3369 res.vlan_tag = 3370 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 | 3371 vlan->vlan_tci); 3372 if (attr->transfer) 3373 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; 3374 else 3375 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : 3376 MLX5DV_FLOW_TABLE_TYPE_NIC_RX; 3377 return flow_dv_push_vlan_action_resource_register 3378 (dev, &res, dev_flow, error); 3379 } 3380 3381 static int fdb_mirror; 3382 3383 /** 3384 * Validate the modify-header actions. 3385 * 3386 * @param[in] action_flags 3387 * Holds the actions detected until now. 3388 * @param[in] action 3389 * Pointer to the modify action. 3390 * @param[out] error 3391 * Pointer to error structure. 3392 * 3393 * @return 3394 * 0 on success, a negative errno value otherwise and rte_errno is set. 3395 */ 3396 static int 3397 flow_dv_validate_action_modify_hdr(const uint64_t action_flags, 3398 const struct rte_flow_action *action, 3399 struct rte_flow_error *error) 3400 { 3401 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf) 3402 return rte_flow_error_set(error, EINVAL, 3403 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 3404 NULL, "action configuration not set"); 3405 if (action_flags & MLX5_FLOW_ACTION_ENCAP) 3406 return rte_flow_error_set(error, EINVAL, 3407 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3408 "can't have encap action before" 3409 " modify action"); 3410 if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror) 3411 return rte_flow_error_set(error, EINVAL, 3412 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3413 "can't support sample action before" 3414 " modify action for E-Switch" 3415 " mirroring"); 3416 return 0; 3417 } 3418 3419 /** 3420 * Validate the modify-header MAC address actions. 3421 * 3422 * @param[in] action_flags 3423 * Holds the actions detected until now. 3424 * @param[in] action 3425 * Pointer to the modify action. 3426 * @param[in] item_flags 3427 * Holds the items detected. 3428 * @param[out] error 3429 * Pointer to error structure. 3430 * 3431 * @return 3432 * 0 on success, a negative errno value otherwise and rte_errno is set. 3433 */ 3434 static int 3435 flow_dv_validate_action_modify_mac(const uint64_t action_flags, 3436 const struct rte_flow_action *action, 3437 const uint64_t item_flags, 3438 struct rte_flow_error *error) 3439 { 3440 int ret = 0; 3441 3442 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 3443 if (!ret) { 3444 if (!(item_flags & MLX5_FLOW_LAYER_L2)) 3445 return rte_flow_error_set(error, EINVAL, 3446 RTE_FLOW_ERROR_TYPE_ACTION, 3447 NULL, 3448 "no L2 item in pattern"); 3449 } 3450 return ret; 3451 } 3452 3453 /** 3454 * Validate the modify-header IPv4 address actions. 3455 * 3456 * @param[in] action_flags 3457 * Holds the actions detected until now. 3458 * @param[in] action 3459 * Pointer to the modify action. 3460 * @param[in] item_flags 3461 * Holds the items detected. 3462 * @param[out] error 3463 * Pointer to error structure. 3464 * 3465 * @return 3466 * 0 on success, a negative errno value otherwise and rte_errno is set. 3467 */ 3468 static int 3469 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags, 3470 const struct rte_flow_action *action, 3471 const uint64_t item_flags, 3472 struct rte_flow_error *error) 3473 { 3474 int ret = 0; 3475 uint64_t layer; 3476 3477 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 3478 if (!ret) { 3479 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 3480 MLX5_FLOW_LAYER_INNER_L3_IPV4 : 3481 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 3482 if (!(item_flags & layer)) 3483 return rte_flow_error_set(error, EINVAL, 3484 RTE_FLOW_ERROR_TYPE_ACTION, 3485 NULL, 3486 "no ipv4 item in pattern"); 3487 } 3488 return ret; 3489 } 3490 3491 /** 3492 * Validate the modify-header IPv6 address actions. 3493 * 3494 * @param[in] action_flags 3495 * Holds the actions detected until now. 3496 * @param[in] action 3497 * Pointer to the modify action. 3498 * @param[in] item_flags 3499 * Holds the items detected. 3500 * @param[out] error 3501 * Pointer to error structure. 3502 * 3503 * @return 3504 * 0 on success, a negative errno value otherwise and rte_errno is set. 3505 */ 3506 static int 3507 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags, 3508 const struct rte_flow_action *action, 3509 const uint64_t item_flags, 3510 struct rte_flow_error *error) 3511 { 3512 int ret = 0; 3513 uint64_t layer; 3514 3515 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 3516 if (!ret) { 3517 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 3518 MLX5_FLOW_LAYER_INNER_L3_IPV6 : 3519 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 3520 if (!(item_flags & layer)) 3521 return rte_flow_error_set(error, EINVAL, 3522 RTE_FLOW_ERROR_TYPE_ACTION, 3523 NULL, 3524 "no ipv6 item in pattern"); 3525 } 3526 return ret; 3527 } 3528 3529 /** 3530 * Validate the modify-header TP actions. 3531 * 3532 * @param[in] action_flags 3533 * Holds the actions detected until now. 3534 * @param[in] action 3535 * Pointer to the modify action. 3536 * @param[in] item_flags 3537 * Holds the items detected. 3538 * @param[out] error 3539 * Pointer to error structure. 3540 * 3541 * @return 3542 * 0 on success, a negative errno value otherwise and rte_errno is set. 3543 */ 3544 static int 3545 flow_dv_validate_action_modify_tp(const uint64_t action_flags, 3546 const struct rte_flow_action *action, 3547 const uint64_t item_flags, 3548 struct rte_flow_error *error) 3549 { 3550 int ret = 0; 3551 uint64_t layer; 3552 3553 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 3554 if (!ret) { 3555 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 3556 MLX5_FLOW_LAYER_INNER_L4 : 3557 MLX5_FLOW_LAYER_OUTER_L4; 3558 if (!(item_flags & layer)) 3559 return rte_flow_error_set(error, EINVAL, 3560 RTE_FLOW_ERROR_TYPE_ACTION, 3561 NULL, "no transport layer " 3562 "in pattern"); 3563 } 3564 return ret; 3565 } 3566 3567 /** 3568 * Validate the modify-header actions of increment/decrement 3569 * TCP Sequence-number. 3570 * 3571 * @param[in] action_flags 3572 * Holds the actions detected until now. 3573 * @param[in] action 3574 * Pointer to the modify action. 3575 * @param[in] item_flags 3576 * Holds the items detected. 3577 * @param[out] error 3578 * Pointer to error structure. 3579 * 3580 * @return 3581 * 0 on success, a negative errno value otherwise and rte_errno is set. 3582 */ 3583 static int 3584 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags, 3585 const struct rte_flow_action *action, 3586 const uint64_t item_flags, 3587 struct rte_flow_error *error) 3588 { 3589 int ret = 0; 3590 uint64_t layer; 3591 3592 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 3593 if (!ret) { 3594 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 3595 MLX5_FLOW_LAYER_INNER_L4_TCP : 3596 MLX5_FLOW_LAYER_OUTER_L4_TCP; 3597 if (!(item_flags & layer)) 3598 return rte_flow_error_set(error, EINVAL, 3599 RTE_FLOW_ERROR_TYPE_ACTION, 3600 NULL, "no TCP item in" 3601 " pattern"); 3602 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ && 3603 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) || 3604 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ && 3605 (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ))) 3606 return rte_flow_error_set(error, EINVAL, 3607 RTE_FLOW_ERROR_TYPE_ACTION, 3608 NULL, 3609 "cannot decrease and increase" 3610 " TCP sequence number" 3611 " at the same time"); 3612 } 3613 return ret; 3614 } 3615 3616 /** 3617 * Validate the modify-header actions of increment/decrement 3618 * TCP Acknowledgment number. 3619 * 3620 * @param[in] action_flags 3621 * Holds the actions detected until now. 3622 * @param[in] action 3623 * Pointer to the modify action. 3624 * @param[in] item_flags 3625 * Holds the items detected. 3626 * @param[out] error 3627 * Pointer to error structure. 3628 * 3629 * @return 3630 * 0 on success, a negative errno value otherwise and rte_errno is set. 3631 */ 3632 static int 3633 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags, 3634 const struct rte_flow_action *action, 3635 const uint64_t item_flags, 3636 struct rte_flow_error *error) 3637 { 3638 int ret = 0; 3639 uint64_t layer; 3640 3641 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 3642 if (!ret) { 3643 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 3644 MLX5_FLOW_LAYER_INNER_L4_TCP : 3645 MLX5_FLOW_LAYER_OUTER_L4_TCP; 3646 if (!(item_flags & layer)) 3647 return rte_flow_error_set(error, EINVAL, 3648 RTE_FLOW_ERROR_TYPE_ACTION, 3649 NULL, "no TCP item in" 3650 " pattern"); 3651 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK && 3652 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) || 3653 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK && 3654 (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK))) 3655 return rte_flow_error_set(error, EINVAL, 3656 RTE_FLOW_ERROR_TYPE_ACTION, 3657 NULL, 3658 "cannot decrease and increase" 3659 " TCP acknowledgment number" 3660 " at the same time"); 3661 } 3662 return ret; 3663 } 3664 3665 /** 3666 * Validate the modify-header TTL actions. 3667 * 3668 * @param[in] action_flags 3669 * Holds the actions detected until now. 3670 * @param[in] action 3671 * Pointer to the modify action. 3672 * @param[in] item_flags 3673 * Holds the items detected. 3674 * @param[out] error 3675 * Pointer to error structure. 3676 * 3677 * @return 3678 * 0 on success, a negative errno value otherwise and rte_errno is set. 3679 */ 3680 static int 3681 flow_dv_validate_action_modify_ttl(const uint64_t action_flags, 3682 const struct rte_flow_action *action, 3683 const uint64_t item_flags, 3684 struct rte_flow_error *error) 3685 { 3686 int ret = 0; 3687 uint64_t layer; 3688 3689 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 3690 if (!ret) { 3691 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 3692 MLX5_FLOW_LAYER_INNER_L3 : 3693 MLX5_FLOW_LAYER_OUTER_L3; 3694 if (!(item_flags & layer)) 3695 return rte_flow_error_set(error, EINVAL, 3696 RTE_FLOW_ERROR_TYPE_ACTION, 3697 NULL, 3698 "no IP protocol in pattern"); 3699 } 3700 return ret; 3701 } 3702 3703 /** 3704 * Validate jump action. 3705 * 3706 * @param[in] action 3707 * Pointer to the jump action. 3708 * @param[in] action_flags 3709 * Holds the actions detected until now. 3710 * @param[in] attributes 3711 * Pointer to flow attributes 3712 * @param[in] external 3713 * Action belongs to flow rule created by request external to PMD. 3714 * @param[out] error 3715 * Pointer to error structure. 3716 * 3717 * @return 3718 * 0 on success, a negative errno value otherwise and rte_errno is set. 3719 */ 3720 static int 3721 flow_dv_validate_action_jump(const struct rte_flow_action *action, 3722 uint64_t action_flags, 3723 const struct rte_flow_attr *attributes, 3724 bool external, struct rte_flow_error *error) 3725 { 3726 uint32_t target_group, table; 3727 int ret = 0; 3728 3729 if (action_flags & (MLX5_FLOW_FATE_ACTIONS | 3730 MLX5_FLOW_FATE_ESWITCH_ACTIONS)) 3731 return rte_flow_error_set(error, EINVAL, 3732 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3733 "can't have 2 fate actions in" 3734 " same flow"); 3735 if (action_flags & MLX5_FLOW_ACTION_METER) 3736 return rte_flow_error_set(error, ENOTSUP, 3737 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3738 "jump with meter not support"); 3739 if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror) 3740 return rte_flow_error_set(error, EINVAL, 3741 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3742 "E-Switch mirroring can't support" 3743 " Sample action and jump action in" 3744 " same flow now"); 3745 if (!action->conf) 3746 return rte_flow_error_set(error, EINVAL, 3747 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 3748 NULL, "action configuration not set"); 3749 target_group = 3750 ((const struct rte_flow_action_jump *)action->conf)->group; 3751 ret = mlx5_flow_group_to_table(attributes, external, target_group, 3752 true, &table, error); 3753 if (ret) 3754 return ret; 3755 if (attributes->group == target_group) 3756 return rte_flow_error_set(error, EINVAL, 3757 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3758 "target group must be other than" 3759 " the current flow group"); 3760 return 0; 3761 } 3762 3763 /* 3764 * Validate the port_id action. 3765 * 3766 * @param[in] dev 3767 * Pointer to rte_eth_dev structure. 3768 * @param[in] action_flags 3769 * Bit-fields that holds the actions detected until now. 3770 * @param[in] action 3771 * Port_id RTE action structure. 3772 * @param[in] attr 3773 * Attributes of flow that includes this action. 3774 * @param[out] error 3775 * Pointer to error structure. 3776 * 3777 * @return 3778 * 0 on success, a negative errno value otherwise and rte_errno is set. 3779 */ 3780 static int 3781 flow_dv_validate_action_port_id(struct rte_eth_dev *dev, 3782 uint64_t action_flags, 3783 const struct rte_flow_action *action, 3784 const struct rte_flow_attr *attr, 3785 struct rte_flow_error *error) 3786 { 3787 const struct rte_flow_action_port_id *port_id; 3788 struct mlx5_priv *act_priv; 3789 struct mlx5_priv *dev_priv; 3790 uint16_t port; 3791 3792 if (!attr->transfer) 3793 return rte_flow_error_set(error, ENOTSUP, 3794 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3795 NULL, 3796 "port id action is valid in transfer" 3797 " mode only"); 3798 if (!action || !action->conf) 3799 return rte_flow_error_set(error, ENOTSUP, 3800 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 3801 NULL, 3802 "port id action parameters must be" 3803 " specified"); 3804 if (action_flags & (MLX5_FLOW_FATE_ACTIONS | 3805 MLX5_FLOW_FATE_ESWITCH_ACTIONS)) 3806 return rte_flow_error_set(error, EINVAL, 3807 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3808 "can have only one fate actions in" 3809 " a flow"); 3810 dev_priv = mlx5_dev_to_eswitch_info(dev); 3811 if (!dev_priv) 3812 return rte_flow_error_set(error, rte_errno, 3813 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3814 NULL, 3815 "failed to obtain E-Switch info"); 3816 port_id = action->conf; 3817 port = port_id->original ? dev->data->port_id : port_id->id; 3818 act_priv = mlx5_port_to_eswitch_info(port, false); 3819 if (!act_priv) 3820 return rte_flow_error_set 3821 (error, rte_errno, 3822 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id, 3823 "failed to obtain E-Switch port id for port"); 3824 if (act_priv->domain_id != dev_priv->domain_id) 3825 return rte_flow_error_set 3826 (error, EINVAL, 3827 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3828 "port does not belong to" 3829 " E-Switch being configured"); 3830 return 0; 3831 } 3832 3833 /** 3834 * Get the maximum number of modify header actions. 3835 * 3836 * @param dev 3837 * Pointer to rte_eth_dev structure. 3838 * @param flags 3839 * Flags bits to check if root level. 3840 * 3841 * @return 3842 * Max number of modify header actions device can support. 3843 */ 3844 static inline unsigned int 3845 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused, 3846 uint64_t flags) 3847 { 3848 /* 3849 * There's no way to directly query the max capacity from FW. 3850 * The maximal value on root table should be assumed to be supported. 3851 */ 3852 if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL)) 3853 return MLX5_MAX_MODIFY_NUM; 3854 else 3855 return MLX5_ROOT_TBL_MODIFY_NUM; 3856 } 3857 3858 /** 3859 * Validate the meter action. 3860 * 3861 * @param[in] dev 3862 * Pointer to rte_eth_dev structure. 3863 * @param[in] action_flags 3864 * Bit-fields that holds the actions detected until now. 3865 * @param[in] action 3866 * Pointer to the meter action. 3867 * @param[in] attr 3868 * Attributes of flow that includes this action. 3869 * @param[out] error 3870 * Pointer to error structure. 3871 * 3872 * @return 3873 * 0 on success, a negative errno value otherwise and rte_ernno is set. 3874 */ 3875 static int 3876 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, 3877 uint64_t action_flags, 3878 const struct rte_flow_action *action, 3879 const struct rte_flow_attr *attr, 3880 struct rte_flow_error *error) 3881 { 3882 struct mlx5_priv *priv = dev->data->dev_private; 3883 const struct rte_flow_action_meter *am = action->conf; 3884 struct mlx5_flow_meter *fm; 3885 3886 if (!am) 3887 return rte_flow_error_set(error, EINVAL, 3888 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3889 "meter action conf is NULL"); 3890 3891 if (action_flags & MLX5_FLOW_ACTION_METER) 3892 return rte_flow_error_set(error, ENOTSUP, 3893 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3894 "meter chaining not support"); 3895 if (action_flags & MLX5_FLOW_ACTION_JUMP) 3896 return rte_flow_error_set(error, ENOTSUP, 3897 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3898 "meter with jump not support"); 3899 if (!priv->mtr_en) 3900 return rte_flow_error_set(error, ENOTSUP, 3901 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3902 NULL, 3903 "meter action not supported"); 3904 fm = mlx5_flow_meter_find(priv, am->mtr_id); 3905 if (!fm) 3906 return rte_flow_error_set(error, EINVAL, 3907 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3908 "Meter not found"); 3909 if (fm->ref_cnt && (!(fm->transfer == attr->transfer || 3910 (!fm->ingress && !attr->ingress && attr->egress) || 3911 (!fm->egress && !attr->egress && attr->ingress)))) 3912 return rte_flow_error_set(error, EINVAL, 3913 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3914 "Flow attributes are either invalid " 3915 "or have a conflict with current " 3916 "meter attributes"); 3917 return 0; 3918 } 3919 3920 /** 3921 * Validate the age action. 3922 * 3923 * @param[in] action_flags 3924 * Holds the actions detected until now. 3925 * @param[in] action 3926 * Pointer to the age action. 3927 * @param[in] dev 3928 * Pointer to the Ethernet device structure. 3929 * @param[out] error 3930 * Pointer to error structure. 3931 * 3932 * @return 3933 * 0 on success, a negative errno value otherwise and rte_errno is set. 3934 */ 3935 static int 3936 flow_dv_validate_action_age(uint64_t action_flags, 3937 const struct rte_flow_action *action, 3938 struct rte_eth_dev *dev, 3939 struct rte_flow_error *error) 3940 { 3941 struct mlx5_priv *priv = dev->data->dev_private; 3942 const struct rte_flow_action_age *age = action->conf; 3943 3944 if (!priv->config.devx || priv->counter_fallback) 3945 return rte_flow_error_set(error, ENOTSUP, 3946 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3947 NULL, 3948 "age action not supported"); 3949 if (!(action->conf)) 3950 return rte_flow_error_set(error, EINVAL, 3951 RTE_FLOW_ERROR_TYPE_ACTION, action, 3952 "configuration cannot be null"); 3953 if (age->timeout >= UINT16_MAX / 2 / 10) 3954 return rte_flow_error_set(error, ENOTSUP, 3955 RTE_FLOW_ERROR_TYPE_ACTION, action, 3956 "Max age time: 3275 seconds"); 3957 if (action_flags & MLX5_FLOW_ACTION_AGE) 3958 return rte_flow_error_set(error, EINVAL, 3959 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3960 "Duplicate age ctions set"); 3961 return 0; 3962 } 3963 3964 /** 3965 * Validate the modify-header IPv4 DSCP actions. 3966 * 3967 * @param[in] action_flags 3968 * Holds the actions detected until now. 3969 * @param[in] action 3970 * Pointer to the modify action. 3971 * @param[in] item_flags 3972 * Holds the items detected. 3973 * @param[out] error 3974 * Pointer to error structure. 3975 * 3976 * @return 3977 * 0 on success, a negative errno value otherwise and rte_errno is set. 3978 */ 3979 static int 3980 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags, 3981 const struct rte_flow_action *action, 3982 const uint64_t item_flags, 3983 struct rte_flow_error *error) 3984 { 3985 int ret = 0; 3986 3987 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 3988 if (!ret) { 3989 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4)) 3990 return rte_flow_error_set(error, EINVAL, 3991 RTE_FLOW_ERROR_TYPE_ACTION, 3992 NULL, 3993 "no ipv4 item in pattern"); 3994 } 3995 return ret; 3996 } 3997 3998 /** 3999 * Validate the modify-header IPv6 DSCP actions. 4000 * 4001 * @param[in] action_flags 4002 * Holds the actions detected until now. 4003 * @param[in] action 4004 * Pointer to the modify action. 4005 * @param[in] item_flags 4006 * Holds the items detected. 4007 * @param[out] error 4008 * Pointer to error structure. 4009 * 4010 * @return 4011 * 0 on success, a negative errno value otherwise and rte_errno is set. 4012 */ 4013 static int 4014 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags, 4015 const struct rte_flow_action *action, 4016 const uint64_t item_flags, 4017 struct rte_flow_error *error) 4018 { 4019 int ret = 0; 4020 4021 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 4022 if (!ret) { 4023 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6)) 4024 return rte_flow_error_set(error, EINVAL, 4025 RTE_FLOW_ERROR_TYPE_ACTION, 4026 NULL, 4027 "no ipv6 item in pattern"); 4028 } 4029 return ret; 4030 } 4031 4032 /** 4033 * Match modify-header resource. 4034 * 4035 * @param entry 4036 * Pointer to exist resource entry object. 4037 * @param ctx 4038 * Pointer to new modify-header resource. 4039 * 4040 * @return 4041 * 0 on matching, -1 otherwise. 4042 */ 4043 static int 4044 flow_dv_modify_hdr_resource_match(struct mlx5_hlist_entry *entry, void *ctx) 4045 { 4046 struct mlx5_flow_dv_modify_hdr_resource *resource; 4047 struct mlx5_flow_dv_modify_hdr_resource *cache_resource; 4048 uint32_t actions_len; 4049 4050 resource = (struct mlx5_flow_dv_modify_hdr_resource *)ctx; 4051 cache_resource = container_of(entry, 4052 struct mlx5_flow_dv_modify_hdr_resource, 4053 entry); 4054 actions_len = resource->actions_num * sizeof(resource->actions[0]); 4055 if (resource->entry.key == cache_resource->entry.key && 4056 resource->ft_type == cache_resource->ft_type && 4057 resource->actions_num == cache_resource->actions_num && 4058 resource->flags == cache_resource->flags && 4059 !memcmp((const void *)resource->actions, 4060 (const void *)cache_resource->actions, 4061 actions_len)) 4062 return 0; 4063 return -1; 4064 } 4065 4066 /** 4067 * Validate the sample action. 4068 * 4069 * @param[in] action_flags 4070 * Holds the actions detected until now. 4071 * @param[in] action 4072 * Pointer to the sample action. 4073 * @param[in] dev 4074 * Pointer to the Ethernet device structure. 4075 * @param[in] attr 4076 * Attributes of flow that includes this action. 4077 * @param[out] error 4078 * Pointer to error structure. 4079 * 4080 * @return 4081 * 0 on success, a negative errno value otherwise and rte_errno is set. 4082 */ 4083 static int 4084 flow_dv_validate_action_sample(uint64_t action_flags, 4085 const struct rte_flow_action *action, 4086 struct rte_eth_dev *dev, 4087 const struct rte_flow_attr *attr, 4088 struct rte_flow_error *error) 4089 { 4090 struct mlx5_priv *priv = dev->data->dev_private; 4091 struct mlx5_dev_config *dev_conf = &priv->config; 4092 const struct rte_flow_action_sample *sample = action->conf; 4093 const struct rte_flow_action *act; 4094 uint64_t sub_action_flags = 0; 4095 uint16_t queue_index = 0xFFFF; 4096 int actions_n = 0; 4097 int ret; 4098 fdb_mirror = 0; 4099 4100 if (!sample) 4101 return rte_flow_error_set(error, EINVAL, 4102 RTE_FLOW_ERROR_TYPE_ACTION, action, 4103 "configuration cannot be NULL"); 4104 if (sample->ratio == 0) 4105 return rte_flow_error_set(error, EINVAL, 4106 RTE_FLOW_ERROR_TYPE_ACTION, action, 4107 "ratio value starts from 1"); 4108 if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en)) 4109 return rte_flow_error_set(error, ENOTSUP, 4110 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4111 NULL, 4112 "sample action not supported"); 4113 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 4114 return rte_flow_error_set(error, EINVAL, 4115 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 4116 "Multiple sample actions not " 4117 "supported"); 4118 if (action_flags & MLX5_FLOW_ACTION_METER) 4119 return rte_flow_error_set(error, EINVAL, 4120 RTE_FLOW_ERROR_TYPE_ACTION, action, 4121 "wrong action order, meter should " 4122 "be after sample action"); 4123 if (action_flags & MLX5_FLOW_ACTION_JUMP) 4124 return rte_flow_error_set(error, EINVAL, 4125 RTE_FLOW_ERROR_TYPE_ACTION, action, 4126 "wrong action order, jump should " 4127 "be after sample action"); 4128 act = sample->actions; 4129 for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) { 4130 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS) 4131 return rte_flow_error_set(error, ENOTSUP, 4132 RTE_FLOW_ERROR_TYPE_ACTION, 4133 act, "too many actions"); 4134 switch (act->type) { 4135 case RTE_FLOW_ACTION_TYPE_QUEUE: 4136 ret = mlx5_flow_validate_action_queue(act, 4137 sub_action_flags, 4138 dev, 4139 attr, error); 4140 if (ret < 0) 4141 return ret; 4142 queue_index = ((const struct rte_flow_action_queue *) 4143 (act->conf))->index; 4144 sub_action_flags |= MLX5_FLOW_ACTION_QUEUE; 4145 ++actions_n; 4146 break; 4147 case RTE_FLOW_ACTION_TYPE_MARK: 4148 ret = flow_dv_validate_action_mark(dev, act, 4149 sub_action_flags, 4150 attr, error); 4151 if (ret < 0) 4152 return ret; 4153 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) 4154 sub_action_flags |= MLX5_FLOW_ACTION_MARK | 4155 MLX5_FLOW_ACTION_MARK_EXT; 4156 else 4157 sub_action_flags |= MLX5_FLOW_ACTION_MARK; 4158 ++actions_n; 4159 break; 4160 case RTE_FLOW_ACTION_TYPE_COUNT: 4161 ret = flow_dv_validate_action_count(dev, error); 4162 if (ret < 0) 4163 return ret; 4164 sub_action_flags |= MLX5_FLOW_ACTION_COUNT; 4165 ++actions_n; 4166 break; 4167 case RTE_FLOW_ACTION_TYPE_PORT_ID: 4168 ret = flow_dv_validate_action_port_id(dev, 4169 sub_action_flags, 4170 act, 4171 attr, 4172 error); 4173 if (ret) 4174 return ret; 4175 sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID; 4176 ++actions_n; 4177 break; 4178 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 4179 ret = flow_dv_validate_action_raw_encap_decap 4180 (dev, NULL, act->conf, attr, &sub_action_flags, 4181 &actions_n, error); 4182 if (ret < 0) 4183 return ret; 4184 ++actions_n; 4185 break; 4186 default: 4187 return rte_flow_error_set(error, ENOTSUP, 4188 RTE_FLOW_ERROR_TYPE_ACTION, 4189 NULL, 4190 "Doesn't support optional " 4191 "action"); 4192 } 4193 } 4194 if (attr->ingress && !attr->transfer) { 4195 if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE)) 4196 return rte_flow_error_set(error, EINVAL, 4197 RTE_FLOW_ERROR_TYPE_ACTION, 4198 NULL, 4199 "Ingress must has a dest " 4200 "QUEUE for Sample"); 4201 } else if (attr->egress && !attr->transfer) { 4202 return rte_flow_error_set(error, ENOTSUP, 4203 RTE_FLOW_ERROR_TYPE_ACTION, 4204 NULL, 4205 "Sample Only support Ingress " 4206 "or E-Switch"); 4207 } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) { 4208 MLX5_ASSERT(attr->transfer); 4209 if (sample->ratio > 1) 4210 return rte_flow_error_set(error, ENOTSUP, 4211 RTE_FLOW_ERROR_TYPE_ACTION, 4212 NULL, 4213 "E-Switch doesn't support " 4214 "any optional action " 4215 "for sampling"); 4216 fdb_mirror = 1; 4217 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE) 4218 return rte_flow_error_set(error, ENOTSUP, 4219 RTE_FLOW_ERROR_TYPE_ACTION, 4220 NULL, 4221 "unsupported action QUEUE"); 4222 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID)) 4223 return rte_flow_error_set(error, EINVAL, 4224 RTE_FLOW_ERROR_TYPE_ACTION, 4225 NULL, 4226 "E-Switch must has a dest " 4227 "port for mirroring"); 4228 } 4229 /* Continue validation for Xcap actions.*/ 4230 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) && 4231 (queue_index == 0xFFFF || 4232 mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) { 4233 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) == 4234 MLX5_FLOW_XCAP_ACTIONS) 4235 return rte_flow_error_set(error, ENOTSUP, 4236 RTE_FLOW_ERROR_TYPE_ACTION, 4237 NULL, "encap and decap " 4238 "combination aren't " 4239 "supported"); 4240 if (!attr->transfer && attr->ingress && (sub_action_flags & 4241 MLX5_FLOW_ACTION_ENCAP)) 4242 return rte_flow_error_set(error, ENOTSUP, 4243 RTE_FLOW_ERROR_TYPE_ACTION, 4244 NULL, "encap is not supported" 4245 " for ingress traffic"); 4246 } 4247 return 0; 4248 } 4249 4250 /** 4251 * Find existing modify-header resource or create and register a new one. 4252 * 4253 * @param dev[in, out] 4254 * Pointer to rte_eth_dev structure. 4255 * @param[in, out] resource 4256 * Pointer to modify-header resource. 4257 * @parm[in, out] dev_flow 4258 * Pointer to the dev_flow. 4259 * @param[out] error 4260 * pointer to error structure. 4261 * 4262 * @return 4263 * 0 on success otherwise -errno and errno is set. 4264 */ 4265 static int 4266 flow_dv_modify_hdr_resource_register 4267 (struct rte_eth_dev *dev, 4268 struct mlx5_flow_dv_modify_hdr_resource *resource, 4269 struct mlx5_flow *dev_flow, 4270 struct rte_flow_error *error) 4271 { 4272 struct mlx5_priv *priv = dev->data->dev_private; 4273 struct mlx5_dev_ctx_shared *sh = priv->sh; 4274 struct mlx5_flow_dv_modify_hdr_resource *cache_resource; 4275 struct mlx5dv_dr_domain *ns; 4276 uint32_t actions_len; 4277 struct mlx5_hlist_entry *entry; 4278 union mlx5_flow_modify_hdr_key hdr_mod_key = { 4279 { 4280 .ft_type = resource->ft_type, 4281 .actions_num = resource->actions_num, 4282 .group = dev_flow->dv.group, 4283 .cksum = 0, 4284 } 4285 }; 4286 int ret; 4287 4288 resource->flags = dev_flow->dv.group ? 0 : 4289 MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; 4290 if (resource->actions_num > flow_dv_modify_hdr_action_max(dev, 4291 resource->flags)) 4292 return rte_flow_error_set(error, EOVERFLOW, 4293 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 4294 "too many modify header items"); 4295 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) 4296 ns = sh->fdb_domain; 4297 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX) 4298 ns = sh->tx_domain; 4299 else 4300 ns = sh->rx_domain; 4301 /* Lookup a matching resource from cache. */ 4302 actions_len = resource->actions_num * sizeof(resource->actions[0]); 4303 hdr_mod_key.cksum = __rte_raw_cksum(resource->actions, actions_len, 0); 4304 resource->entry.key = hdr_mod_key.v64; 4305 entry = mlx5_hlist_lookup_ex(sh->modify_cmds, resource->entry.key, 4306 flow_dv_modify_hdr_resource_match, 4307 (void *)resource); 4308 if (entry) { 4309 cache_resource = container_of(entry, 4310 struct mlx5_flow_dv_modify_hdr_resource, 4311 entry); 4312 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++", 4313 (void *)cache_resource, 4314 rte_atomic32_read(&cache_resource->refcnt)); 4315 rte_atomic32_inc(&cache_resource->refcnt); 4316 dev_flow->handle->dvh.modify_hdr = cache_resource; 4317 return 0; 4318 4319 } 4320 /* Register new modify-header resource. */ 4321 cache_resource = mlx5_malloc(MLX5_MEM_ZERO, 4322 sizeof(*cache_resource) + actions_len, 0, 4323 SOCKET_ID_ANY); 4324 if (!cache_resource) 4325 return rte_flow_error_set(error, ENOMEM, 4326 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 4327 "cannot allocate resource memory"); 4328 *cache_resource = *resource; 4329 rte_memcpy(cache_resource->actions, resource->actions, actions_len); 4330 ret = mlx5_flow_os_create_flow_action_modify_header 4331 (sh->ctx, ns, cache_resource, 4332 actions_len, &cache_resource->action); 4333 if (ret) { 4334 mlx5_free(cache_resource); 4335 return rte_flow_error_set(error, ENOMEM, 4336 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4337 NULL, "cannot create action"); 4338 } 4339 rte_atomic32_init(&cache_resource->refcnt); 4340 rte_atomic32_inc(&cache_resource->refcnt); 4341 if (mlx5_hlist_insert_ex(sh->modify_cmds, &cache_resource->entry, 4342 flow_dv_modify_hdr_resource_match, 4343 (void *)cache_resource)) { 4344 claim_zero(mlx5_flow_os_destroy_flow_action 4345 (cache_resource->action)); 4346 mlx5_free(cache_resource); 4347 return rte_flow_error_set(error, EEXIST, 4348 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4349 NULL, "action exist"); 4350 } 4351 dev_flow->handle->dvh.modify_hdr = cache_resource; 4352 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++", 4353 (void *)cache_resource, 4354 rte_atomic32_read(&cache_resource->refcnt)); 4355 return 0; 4356 } 4357 4358 /** 4359 * Get DV flow counter by index. 4360 * 4361 * @param[in] dev 4362 * Pointer to the Ethernet device structure. 4363 * @param[in] idx 4364 * mlx5 flow counter index in the container. 4365 * @param[out] ppool 4366 * mlx5 flow counter pool in the container, 4367 * 4368 * @return 4369 * Pointer to the counter, NULL otherwise. 4370 */ 4371 static struct mlx5_flow_counter * 4372 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev, 4373 uint32_t idx, 4374 struct mlx5_flow_counter_pool **ppool) 4375 { 4376 struct mlx5_priv *priv = dev->data->dev_private; 4377 struct mlx5_pools_container *cont; 4378 struct mlx5_flow_counter_pool *pool; 4379 uint32_t batch = 0, age = 0; 4380 4381 idx--; 4382 age = MLX_CNT_IS_AGE(idx); 4383 idx = age ? idx - MLX5_CNT_AGE_OFFSET : idx; 4384 if (idx >= MLX5_CNT_BATCH_OFFSET) { 4385 idx -= MLX5_CNT_BATCH_OFFSET; 4386 batch = 1; 4387 } 4388 cont = MLX5_CNT_CONTAINER(priv->sh, batch, age); 4389 MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n); 4390 pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL]; 4391 MLX5_ASSERT(pool); 4392 if (ppool) 4393 *ppool = pool; 4394 return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL); 4395 } 4396 4397 /** 4398 * Check the devx counter belongs to the pool. 4399 * 4400 * @param[in] pool 4401 * Pointer to the counter pool. 4402 * @param[in] id 4403 * The counter devx ID. 4404 * 4405 * @return 4406 * True if counter belongs to the pool, false otherwise. 4407 */ 4408 static bool 4409 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id) 4410 { 4411 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) * 4412 MLX5_COUNTERS_PER_POOL; 4413 4414 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) 4415 return true; 4416 return false; 4417 } 4418 4419 /** 4420 * Get a pool by devx counter ID. 4421 * 4422 * @param[in] cont 4423 * Pointer to the counter container. 4424 * @param[in] id 4425 * The counter devx ID. 4426 * 4427 * @return 4428 * The counter pool pointer if exists, NULL otherwise, 4429 */ 4430 static struct mlx5_flow_counter_pool * 4431 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id) 4432 { 4433 uint32_t i; 4434 4435 /* Check last used pool. */ 4436 if (cont->last_pool_idx != POOL_IDX_INVALID && 4437 flow_dv_is_counter_in_pool(cont->pools[cont->last_pool_idx], id)) 4438 return cont->pools[cont->last_pool_idx]; 4439 /* ID out of range means no suitable pool in the container. */ 4440 if (id > cont->max_id || id < cont->min_id) 4441 return NULL; 4442 /* 4443 * Find the pool from the end of the container, since mostly counter 4444 * ID is sequence increasing, and the last pool should be the needed 4445 * one. 4446 */ 4447 i = rte_atomic16_read(&cont->n_valid); 4448 while (i--) { 4449 struct mlx5_flow_counter_pool *pool = cont->pools[i]; 4450 4451 if (flow_dv_is_counter_in_pool(pool, id)) 4452 return pool; 4453 } 4454 return NULL; 4455 } 4456 4457 /** 4458 * Allocate a new memory for the counter values wrapped by all the needed 4459 * management. 4460 * 4461 * @param[in] dev 4462 * Pointer to the Ethernet device structure. 4463 * @param[in] raws_n 4464 * The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters. 4465 * 4466 * @return 4467 * The new memory management pointer on success, otherwise NULL and rte_errno 4468 * is set. 4469 */ 4470 static struct mlx5_counter_stats_mem_mng * 4471 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n) 4472 { 4473 struct mlx5_priv *priv = dev->data->dev_private; 4474 struct mlx5_dev_ctx_shared *sh = priv->sh; 4475 struct mlx5_devx_mkey_attr mkey_attr; 4476 struct mlx5_counter_stats_mem_mng *mem_mng; 4477 volatile struct flow_counter_stats *raw_data; 4478 int size = (sizeof(struct flow_counter_stats) * 4479 MLX5_COUNTERS_PER_POOL + 4480 sizeof(struct mlx5_counter_stats_raw)) * raws_n + 4481 sizeof(struct mlx5_counter_stats_mem_mng); 4482 size_t pgsize = rte_mem_page_size(); 4483 if (pgsize == (size_t)-1) { 4484 DRV_LOG(ERR, "Failed to get mem page size"); 4485 rte_errno = ENOMEM; 4486 return NULL; 4487 } 4488 uint8_t *mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, 4489 SOCKET_ID_ANY); 4490 int i; 4491 4492 if (!mem) { 4493 rte_errno = ENOMEM; 4494 return NULL; 4495 } 4496 mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1; 4497 size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n; 4498 mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size, 4499 IBV_ACCESS_LOCAL_WRITE); 4500 if (!mem_mng->umem) { 4501 rte_errno = errno; 4502 mlx5_free(mem); 4503 return NULL; 4504 } 4505 mkey_attr.addr = (uintptr_t)mem; 4506 mkey_attr.size = size; 4507 mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem); 4508 mkey_attr.pd = sh->pdn; 4509 mkey_attr.log_entity_size = 0; 4510 mkey_attr.pg_access = 0; 4511 mkey_attr.klm_array = NULL; 4512 mkey_attr.klm_num = 0; 4513 if (priv->config.hca_attr.relaxed_ordering_write && 4514 priv->config.hca_attr.relaxed_ordering_read && 4515 !haswell_broadwell_cpu) 4516 mkey_attr.relaxed_ordering = 1; 4517 mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr); 4518 if (!mem_mng->dm) { 4519 mlx5_glue->devx_umem_dereg(mem_mng->umem); 4520 rte_errno = errno; 4521 mlx5_free(mem); 4522 return NULL; 4523 } 4524 mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size); 4525 raw_data = (volatile struct flow_counter_stats *)mem; 4526 for (i = 0; i < raws_n; ++i) { 4527 mem_mng->raws[i].mem_mng = mem_mng; 4528 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL; 4529 } 4530 LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next); 4531 return mem_mng; 4532 } 4533 4534 /** 4535 * Resize a counter container. 4536 * 4537 * @param[in] dev 4538 * Pointer to the Ethernet device structure. 4539 * @param[in] batch 4540 * Whether the pool is for counter that was allocated by batch command. 4541 * @param[in] age 4542 * Whether the pool is for Aging counter. 4543 * 4544 * @return 4545 * 0 on success, otherwise negative errno value and rte_errno is set. 4546 */ 4547 static int 4548 flow_dv_container_resize(struct rte_eth_dev *dev, 4549 uint32_t batch, uint32_t age) 4550 { 4551 struct mlx5_priv *priv = dev->data->dev_private; 4552 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, 4553 age); 4554 struct mlx5_counter_stats_mem_mng *mem_mng = NULL; 4555 void *old_pools = cont->pools; 4556 uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE; 4557 uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize; 4558 void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY); 4559 4560 if (!pools) { 4561 rte_errno = ENOMEM; 4562 return -ENOMEM; 4563 } 4564 if (old_pools) 4565 memcpy(pools, old_pools, cont->n * 4566 sizeof(struct mlx5_flow_counter_pool *)); 4567 /* 4568 * Fallback mode query the counter directly, no background query 4569 * resources are needed. 4570 */ 4571 if (!priv->counter_fallback) { 4572 int i; 4573 4574 mem_mng = flow_dv_create_counter_stat_mem_mng(dev, 4575 MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES); 4576 if (!mem_mng) { 4577 mlx5_free(pools); 4578 return -ENOMEM; 4579 } 4580 for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) 4581 LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws, 4582 mem_mng->raws + 4583 MLX5_CNT_CONTAINER_RESIZE + 4584 i, next); 4585 } 4586 rte_spinlock_lock(&cont->resize_sl); 4587 cont->n = resize; 4588 cont->mem_mng = mem_mng; 4589 cont->pools = pools; 4590 rte_spinlock_unlock(&cont->resize_sl); 4591 if (old_pools) 4592 mlx5_free(old_pools); 4593 return 0; 4594 } 4595 4596 /** 4597 * Query a devx flow counter. 4598 * 4599 * @param[in] dev 4600 * Pointer to the Ethernet device structure. 4601 * @param[in] cnt 4602 * Index to the flow counter. 4603 * @param[out] pkts 4604 * The statistics value of packets. 4605 * @param[out] bytes 4606 * The statistics value of bytes. 4607 * 4608 * @return 4609 * 0 on success, otherwise a negative errno value and rte_errno is set. 4610 */ 4611 static inline int 4612 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, 4613 uint64_t *bytes) 4614 { 4615 struct mlx5_priv *priv = dev->data->dev_private; 4616 struct mlx5_flow_counter_pool *pool = NULL; 4617 struct mlx5_flow_counter *cnt; 4618 struct mlx5_flow_counter_ext *cnt_ext = NULL; 4619 int offset; 4620 4621 cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); 4622 MLX5_ASSERT(pool); 4623 if (counter < MLX5_CNT_BATCH_OFFSET) { 4624 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); 4625 if (priv->counter_fallback) 4626 return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0, 4627 0, pkts, bytes, 0, NULL, NULL, 0); 4628 } 4629 4630 rte_spinlock_lock(&pool->sl); 4631 /* 4632 * The single counters allocation may allocate smaller ID than the 4633 * current allocated in parallel to the host reading. 4634 * In this case the new counter values must be reported as 0. 4635 */ 4636 if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) { 4637 *pkts = 0; 4638 *bytes = 0; 4639 } else { 4640 offset = MLX5_CNT_ARRAY_IDX(pool, cnt); 4641 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits); 4642 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes); 4643 } 4644 rte_spinlock_unlock(&pool->sl); 4645 return 0; 4646 } 4647 4648 /** 4649 * Create and initialize a new counter pool. 4650 * 4651 * @param[in] dev 4652 * Pointer to the Ethernet device structure. 4653 * @param[out] dcs 4654 * The devX counter handle. 4655 * @param[in] batch 4656 * Whether the pool is for counter that was allocated by batch command. 4657 * @param[in] age 4658 * Whether the pool is for counter that was allocated for aging. 4659 * @param[in/out] cont_cur 4660 * Pointer to the container pointer, it will be update in pool resize. 4661 * 4662 * @return 4663 * The pool container pointer on success, NULL otherwise and rte_errno is set. 4664 */ 4665 static struct mlx5_flow_counter_pool * 4666 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, 4667 uint32_t batch, uint32_t age) 4668 { 4669 struct mlx5_priv *priv = dev->data->dev_private; 4670 struct mlx5_flow_counter_pool *pool; 4671 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, 4672 age); 4673 int16_t n_valid = rte_atomic16_read(&cont->n_valid); 4674 uint32_t size = sizeof(*pool); 4675 4676 if (cont->n == n_valid && flow_dv_container_resize(dev, batch, age)) 4677 return NULL; 4678 size += MLX5_COUNTERS_PER_POOL * CNT_SIZE; 4679 size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE); 4680 size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE); 4681 pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY); 4682 if (!pool) { 4683 rte_errno = ENOMEM; 4684 return NULL; 4685 } 4686 pool->min_dcs = dcs; 4687 if (!priv->counter_fallback) 4688 pool->raw = cont->mem_mng->raws + n_valid % 4689 MLX5_CNT_CONTAINER_RESIZE; 4690 pool->raw_hw = NULL; 4691 pool->type = 0; 4692 pool->type |= (batch ? 0 : CNT_POOL_TYPE_EXT); 4693 pool->type |= (!age ? 0 : CNT_POOL_TYPE_AGE); 4694 pool->query_gen = 0; 4695 rte_spinlock_init(&pool->sl); 4696 TAILQ_INIT(&pool->counters[0]); 4697 TAILQ_INIT(&pool->counters[1]); 4698 TAILQ_INSERT_HEAD(&cont->pool_list, pool, next); 4699 pool->index = n_valid; 4700 cont->pools[n_valid] = pool; 4701 if (!batch) { 4702 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL); 4703 4704 if (base < cont->min_id) 4705 cont->min_id = base; 4706 if (base > cont->max_id) 4707 cont->max_id = base + MLX5_COUNTERS_PER_POOL - 1; 4708 cont->last_pool_idx = pool->index; 4709 } 4710 /* Pool initialization must be updated before host thread access. */ 4711 rte_io_wmb(); 4712 rte_atomic16_add(&cont->n_valid, 1); 4713 return pool; 4714 } 4715 4716 /** 4717 * Restore skipped counters in the pool. 4718 * 4719 * As counter pool query requires the first counter dcs 4720 * ID start with 4 alinged, if the pool counters with 4721 * min_dcs ID are not aligned with 4, the counters will 4722 * be skipped. 4723 * Once other min_dcs ID less than these skipped counter 4724 * dcs ID appears, the skipped counters will be safe to 4725 * use. 4726 * Should be called when min_dcs is updated. 4727 * 4728 * @param[in] pool 4729 * Current counter pool. 4730 * @param[in] last_min_dcs 4731 * Last min_dcs. 4732 */ 4733 static void 4734 flow_dv_counter_restore(struct mlx5_flow_counter_pool *pool, 4735 struct mlx5_devx_obj *last_min_dcs) 4736 { 4737 struct mlx5_flow_counter_ext *cnt_ext; 4738 uint32_t offset, new_offset; 4739 uint32_t skip_cnt = 0; 4740 uint32_t i; 4741 4742 if (!pool->skip_cnt) 4743 return; 4744 /* 4745 * If last min_dcs is not valid. The skipped counter may even after 4746 * last min_dcs, set the offset to the whole pool. 4747 */ 4748 if (last_min_dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) 4749 offset = MLX5_COUNTERS_PER_POOL; 4750 else 4751 offset = last_min_dcs->id % MLX5_COUNTERS_PER_POOL; 4752 new_offset = pool->min_dcs->id % MLX5_COUNTERS_PER_POOL; 4753 /* 4754 * Check the counters from 1 to the last_min_dcs range. Counters 4755 * before new min_dcs indicates pool still has skipped counters. 4756 * Counters be skipped after new min_dcs will be ready to use. 4757 * Offset 0 counter must be empty or min_dcs, start from 1. 4758 */ 4759 for (i = 1; i < offset; i++) { 4760 cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i); 4761 if (cnt_ext->skipped) { 4762 if (i > new_offset) { 4763 cnt_ext->skipped = 0; 4764 TAILQ_INSERT_TAIL 4765 (&pool->counters[pool->query_gen], 4766 MLX5_POOL_GET_CNT(pool, i), next); 4767 } else { 4768 skip_cnt++; 4769 } 4770 } 4771 } 4772 if (!skip_cnt) 4773 pool->skip_cnt = 0; 4774 } 4775 4776 /** 4777 * Prepare a new counter and/or a new counter pool. 4778 * 4779 * @param[in] dev 4780 * Pointer to the Ethernet device structure. 4781 * @param[out] cnt_free 4782 * Where to put the pointer of a new counter. 4783 * @param[in] batch 4784 * Whether the pool is for counter that was allocated by batch command. 4785 * @param[in] age 4786 * Whether the pool is for counter that was allocated for aging. 4787 * 4788 * @return 4789 * The counter pool pointer and @p cnt_free is set on success, 4790 * NULL otherwise and rte_errno is set. 4791 */ 4792 static struct mlx5_flow_counter_pool * 4793 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, 4794 struct mlx5_flow_counter **cnt_free, 4795 uint32_t batch, uint32_t age) 4796 { 4797 struct mlx5_priv *priv = dev->data->dev_private; 4798 struct mlx5_pools_container *cont; 4799 struct mlx5_flow_counter_pool *pool; 4800 struct mlx5_counters tmp_tq; 4801 struct mlx5_devx_obj *last_min_dcs; 4802 struct mlx5_devx_obj *dcs = NULL; 4803 struct mlx5_flow_counter *cnt; 4804 uint32_t add2other; 4805 uint32_t i; 4806 4807 cont = MLX5_CNT_CONTAINER(priv->sh, batch, age); 4808 if (!batch) { 4809 retry: 4810 add2other = 0; 4811 /* bulk_bitmap must be 0 for single counter allocation. */ 4812 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0); 4813 if (!dcs) 4814 return NULL; 4815 pool = flow_dv_find_pool_by_id(cont, dcs->id); 4816 /* Check if counter belongs to exist pool ID range. */ 4817 if (!pool) { 4818 pool = flow_dv_find_pool_by_id 4819 (MLX5_CNT_CONTAINER 4820 (priv->sh, batch, (age ^ 0x1)), dcs->id); 4821 /* 4822 * Pool eixsts, counter will be added to the other 4823 * container, need to reallocate it later. 4824 */ 4825 if (pool) { 4826 add2other = 1; 4827 } else { 4828 pool = flow_dv_pool_create(dev, dcs, batch, 4829 age); 4830 if (!pool) { 4831 mlx5_devx_cmd_destroy(dcs); 4832 return NULL; 4833 } 4834 } 4835 } 4836 if ((dcs->id < pool->min_dcs->id || 4837 pool->min_dcs->id & 4838 (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) && 4839 !(dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1))) { 4840 /* 4841 * Update the pool min_dcs only if current dcs is 4842 * valid and exist min_dcs is not valid or greater 4843 * than new dcs. 4844 */ 4845 last_min_dcs = pool->min_dcs; 4846 rte_atomic64_set(&pool->a64_dcs, 4847 (int64_t)(uintptr_t)dcs); 4848 /* 4849 * Restore any skipped counters if the new min_dcs 4850 * ID is smaller or min_dcs is not valid. 4851 */ 4852 if (dcs->id < last_min_dcs->id || 4853 last_min_dcs->id & 4854 (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) 4855 flow_dv_counter_restore(pool, last_min_dcs); 4856 } 4857 i = dcs->id % MLX5_COUNTERS_PER_POOL; 4858 cnt = MLX5_POOL_GET_CNT(pool, i); 4859 cnt->pool = pool; 4860 MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs; 4861 /* 4862 * If min_dcs is not valid, it means the new allocated dcs 4863 * also fail to become the valid min_dcs, just skip it. 4864 * Or if min_dcs is valid, and new dcs ID is smaller than 4865 * min_dcs, but not become the min_dcs, also skip it. 4866 */ 4867 if (pool->min_dcs->id & 4868 (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1) || 4869 dcs->id < pool->min_dcs->id) { 4870 MLX5_GET_POOL_CNT_EXT(pool, i)->skipped = 1; 4871 pool->skip_cnt = 1; 4872 goto retry; 4873 } 4874 if (add2other) { 4875 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], 4876 cnt, next); 4877 goto retry; 4878 } 4879 *cnt_free = cnt; 4880 return pool; 4881 } 4882 /* bulk_bitmap is in 128 counters units. */ 4883 if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) 4884 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4); 4885 if (!dcs) { 4886 rte_errno = ENODATA; 4887 return NULL; 4888 } 4889 pool = flow_dv_pool_create(dev, dcs, batch, age); 4890 if (!pool) { 4891 mlx5_devx_cmd_destroy(dcs); 4892 return NULL; 4893 } 4894 TAILQ_INIT(&tmp_tq); 4895 for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) { 4896 cnt = MLX5_POOL_GET_CNT(pool, i); 4897 cnt->pool = pool; 4898 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next); 4899 } 4900 rte_spinlock_lock(&cont->csl); 4901 TAILQ_CONCAT(&cont->counters, &tmp_tq, next); 4902 rte_spinlock_unlock(&cont->csl); 4903 *cnt_free = MLX5_POOL_GET_CNT(pool, 0); 4904 (*cnt_free)->pool = pool; 4905 return pool; 4906 } 4907 4908 /** 4909 * Search for existed shared counter. 4910 * 4911 * @param[in] dev 4912 * Pointer to the Ethernet device structure. 4913 * @param[in] id 4914 * The shared counter ID to search. 4915 * @param[out] ppool 4916 * mlx5 flow counter pool in the container, 4917 * 4918 * @return 4919 * NULL if not existed, otherwise pointer to the shared extend counter. 4920 */ 4921 static struct mlx5_flow_counter_ext * 4922 flow_dv_counter_shared_search(struct rte_eth_dev *dev, uint32_t id, 4923 struct mlx5_flow_counter_pool **ppool) 4924 { 4925 struct mlx5_priv *priv = dev->data->dev_private; 4926 union mlx5_l3t_data data; 4927 uint32_t cnt_idx; 4928 4929 if (mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) || !data.dword) 4930 return NULL; 4931 cnt_idx = data.dword; 4932 /* 4933 * Shared counters don't have age info. The counter extend is after 4934 * the counter datat structure. 4935 */ 4936 return (struct mlx5_flow_counter_ext *) 4937 ((flow_dv_counter_get_by_idx(dev, cnt_idx, ppool)) + 1); 4938 } 4939 4940 /** 4941 * Allocate a flow counter. 4942 * 4943 * @param[in] dev 4944 * Pointer to the Ethernet device structure. 4945 * @param[in] shared 4946 * Indicate if this counter is shared with other flows. 4947 * @param[in] id 4948 * Counter identifier. 4949 * @param[in] group 4950 * Counter flow group. 4951 * @param[in] age 4952 * Whether the counter was allocated for aging. 4953 * 4954 * @return 4955 * Index to flow counter on success, 0 otherwise and rte_errno is set. 4956 */ 4957 static uint32_t 4958 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, 4959 uint16_t group, uint32_t age) 4960 { 4961 struct mlx5_priv *priv = dev->data->dev_private; 4962 struct mlx5_flow_counter_pool *pool = NULL; 4963 struct mlx5_flow_counter *cnt_free = NULL; 4964 struct mlx5_flow_counter_ext *cnt_ext = NULL; 4965 /* 4966 * Currently group 0 flow counter cannot be assigned to a flow if it is 4967 * not the first one in the batch counter allocation, so it is better 4968 * to allocate counters one by one for these flows in a separate 4969 * container. 4970 * A counter can be shared between different groups so need to take 4971 * shared counters from the single container. 4972 */ 4973 uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0; 4974 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, 4975 age); 4976 uint32_t cnt_idx; 4977 4978 if (!priv->config.devx) { 4979 rte_errno = ENOTSUP; 4980 return 0; 4981 } 4982 if (shared) { 4983 cnt_ext = flow_dv_counter_shared_search(dev, id, &pool); 4984 if (cnt_ext) { 4985 if (cnt_ext->ref_cnt + 1 == 0) { 4986 rte_errno = E2BIG; 4987 return 0; 4988 } 4989 cnt_ext->ref_cnt++; 4990 cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL + 4991 (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL) 4992 + 1; 4993 return cnt_idx; 4994 } 4995 } 4996 /* Get free counters from container. */ 4997 rte_spinlock_lock(&cont->csl); 4998 cnt_free = TAILQ_FIRST(&cont->counters); 4999 if (cnt_free) 5000 TAILQ_REMOVE(&cont->counters, cnt_free, next); 5001 rte_spinlock_unlock(&cont->csl); 5002 if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, 5003 batch, age)) 5004 goto err; 5005 pool = cnt_free->pool; 5006 if (!batch) 5007 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free); 5008 /* Create a DV counter action only in the first time usage. */ 5009 if (!cnt_free->action) { 5010 uint16_t offset; 5011 struct mlx5_devx_obj *dcs; 5012 int ret; 5013 5014 if (batch) { 5015 offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free); 5016 dcs = pool->min_dcs; 5017 } else { 5018 offset = 0; 5019 dcs = cnt_ext->dcs; 5020 } 5021 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset, 5022 &cnt_free->action); 5023 if (ret) { 5024 rte_errno = errno; 5025 goto err; 5026 } 5027 } 5028 cnt_idx = MLX5_MAKE_CNT_IDX(pool->index, 5029 MLX5_CNT_ARRAY_IDX(pool, cnt_free)); 5030 cnt_idx += batch * MLX5_CNT_BATCH_OFFSET; 5031 cnt_idx += age * MLX5_CNT_AGE_OFFSET; 5032 /* Update the counter reset values. */ 5033 if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits, 5034 &cnt_free->bytes)) 5035 goto err; 5036 if (cnt_ext) { 5037 cnt_ext->shared = shared; 5038 cnt_ext->ref_cnt = 1; 5039 cnt_ext->id = id; 5040 if (shared) { 5041 union mlx5_l3t_data data; 5042 5043 data.dword = cnt_idx; 5044 if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data)) 5045 return 0; 5046 } 5047 } 5048 if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on) 5049 /* Start the asynchronous batch query by the host thread. */ 5050 mlx5_set_query_alarm(priv->sh); 5051 return cnt_idx; 5052 err: 5053 if (cnt_free) { 5054 cnt_free->pool = pool; 5055 rte_spinlock_lock(&cont->csl); 5056 TAILQ_INSERT_TAIL(&cont->counters, cnt_free, next); 5057 rte_spinlock_unlock(&cont->csl); 5058 } 5059 return 0; 5060 } 5061 5062 /** 5063 * Get age param from counter index. 5064 * 5065 * @param[in] dev 5066 * Pointer to the Ethernet device structure. 5067 * @param[in] counter 5068 * Index to the counter handler. 5069 * 5070 * @return 5071 * The aging parameter specified for the counter index. 5072 */ 5073 static struct mlx5_age_param* 5074 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev, 5075 uint32_t counter) 5076 { 5077 struct mlx5_flow_counter *cnt; 5078 struct mlx5_flow_counter_pool *pool = NULL; 5079 5080 flow_dv_counter_get_by_idx(dev, counter, &pool); 5081 counter = (counter - 1) % MLX5_COUNTERS_PER_POOL; 5082 cnt = MLX5_POOL_GET_CNT(pool, counter); 5083 return MLX5_CNT_TO_AGE(cnt); 5084 } 5085 5086 /** 5087 * Remove a flow counter from aged counter list. 5088 * 5089 * @param[in] dev 5090 * Pointer to the Ethernet device structure. 5091 * @param[in] counter 5092 * Index to the counter handler. 5093 * @param[in] cnt 5094 * Pointer to the counter handler. 5095 */ 5096 static void 5097 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev, 5098 uint32_t counter, struct mlx5_flow_counter *cnt) 5099 { 5100 struct mlx5_age_info *age_info; 5101 struct mlx5_age_param *age_param; 5102 struct mlx5_priv *priv = dev->data->dev_private; 5103 5104 age_info = GET_PORT_AGE_INFO(priv); 5105 age_param = flow_dv_counter_idx_get_age(dev, counter); 5106 if (rte_atomic16_cmpset((volatile uint16_t *) 5107 &age_param->state, 5108 AGE_CANDIDATE, AGE_FREE) 5109 != AGE_CANDIDATE) { 5110 /** 5111 * We need the lock even it is age timeout, 5112 * since counter may still in process. 5113 */ 5114 rte_spinlock_lock(&age_info->aged_sl); 5115 TAILQ_REMOVE(&age_info->aged_counters, cnt, next); 5116 rte_spinlock_unlock(&age_info->aged_sl); 5117 } 5118 rte_atomic16_set(&age_param->state, AGE_FREE); 5119 } 5120 /** 5121 * Release a flow counter. 5122 * 5123 * @param[in] dev 5124 * Pointer to the Ethernet device structure. 5125 * @param[in] counter 5126 * Index to the counter handler. 5127 */ 5128 static void 5129 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) 5130 { 5131 struct mlx5_priv *priv = dev->data->dev_private; 5132 struct mlx5_flow_counter_pool *pool = NULL; 5133 struct mlx5_flow_counter *cnt; 5134 struct mlx5_flow_counter_ext *cnt_ext = NULL; 5135 5136 if (!counter) 5137 return; 5138 cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); 5139 MLX5_ASSERT(pool); 5140 if (counter < MLX5_CNT_BATCH_OFFSET) { 5141 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); 5142 if (cnt_ext) { 5143 if (--cnt_ext->ref_cnt) 5144 return; 5145 if (cnt_ext->shared) 5146 mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, 5147 cnt_ext->id); 5148 } 5149 } 5150 if (IS_AGE_POOL(pool)) 5151 flow_dv_counter_remove_from_age(dev, counter, cnt); 5152 cnt->pool = pool; 5153 /* 5154 * Put the counter back to list to be updated in none fallback mode. 5155 * Currently, we are using two list alternately, while one is in query, 5156 * add the freed counter to the other list based on the pool query_gen 5157 * value. After query finishes, add counter the list to the global 5158 * container counter list. The list changes while query starts. In 5159 * this case, lock will not be needed as query callback and release 5160 * function both operate with the different list. 5161 * 5162 */ 5163 if (!priv->counter_fallback) 5164 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next); 5165 else 5166 TAILQ_INSERT_TAIL(&((MLX5_CNT_CONTAINER 5167 (priv->sh, 0, 0))->counters), 5168 cnt, next); 5169 } 5170 5171 /** 5172 * Verify the @p attributes will be correctly understood by the NIC and store 5173 * them in the @p flow if everything is correct. 5174 * 5175 * @param[in] dev 5176 * Pointer to dev struct. 5177 * @param[in] attributes 5178 * Pointer to flow attributes 5179 * @param[in] external 5180 * This flow rule is created by request external to PMD. 5181 * @param[out] error 5182 * Pointer to error structure. 5183 * 5184 * @return 5185 * - 0 on success and non root table. 5186 * - 1 on success and root table. 5187 * - a negative errno value otherwise and rte_errno is set. 5188 */ 5189 static int 5190 flow_dv_validate_attributes(struct rte_eth_dev *dev, 5191 const struct rte_flow_attr *attributes, 5192 bool external __rte_unused, 5193 struct rte_flow_error *error) 5194 { 5195 struct mlx5_priv *priv = dev->data->dev_private; 5196 uint32_t priority_max = priv->config.flow_prio - 1; 5197 int ret = 0; 5198 5199 #ifndef HAVE_MLX5DV_DR 5200 if (attributes->group) 5201 return rte_flow_error_set(error, ENOTSUP, 5202 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 5203 NULL, 5204 "groups are not supported"); 5205 #else 5206 uint32_t table = 0; 5207 5208 ret = mlx5_flow_group_to_table(attributes, external, 5209 attributes->group, !!priv->fdb_def_rule, 5210 &table, error); 5211 if (ret) 5212 return ret; 5213 if (!table) 5214 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; 5215 #endif 5216 if (attributes->priority != MLX5_FLOW_PRIO_RSVD && 5217 attributes->priority >= priority_max) 5218 return rte_flow_error_set(error, ENOTSUP, 5219 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 5220 NULL, 5221 "priority out of range"); 5222 if (attributes->transfer) { 5223 if (!priv->config.dv_esw_en) 5224 return rte_flow_error_set 5225 (error, ENOTSUP, 5226 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 5227 "E-Switch dr is not supported"); 5228 if (!(priv->representor || priv->master)) 5229 return rte_flow_error_set 5230 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 5231 NULL, "E-Switch configuration can only be" 5232 " done by a master or a representor device"); 5233 if (attributes->egress) 5234 return rte_flow_error_set 5235 (error, ENOTSUP, 5236 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes, 5237 "egress is not supported"); 5238 } 5239 if (!(attributes->egress ^ attributes->ingress)) 5240 return rte_flow_error_set(error, ENOTSUP, 5241 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 5242 "must specify exactly one of " 5243 "ingress or egress"); 5244 return ret; 5245 } 5246 5247 /** 5248 * Internal validation function. For validating both actions and items. 5249 * 5250 * @param[in] dev 5251 * Pointer to the rte_eth_dev structure. 5252 * @param[in] attr 5253 * Pointer to the flow attributes. 5254 * @param[in] items 5255 * Pointer to the list of items. 5256 * @param[in] actions 5257 * Pointer to the list of actions. 5258 * @param[in] external 5259 * This flow rule is created by request external to PMD. 5260 * @param[in] hairpin 5261 * Number of hairpin TX actions, 0 means classic flow. 5262 * @param[out] error 5263 * Pointer to the error structure. 5264 * 5265 * @return 5266 * 0 on success, a negative errno value otherwise and rte_errno is set. 5267 */ 5268 static int 5269 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, 5270 const struct rte_flow_item items[], 5271 const struct rte_flow_action actions[], 5272 bool external, int hairpin, struct rte_flow_error *error) 5273 { 5274 int ret; 5275 uint64_t action_flags = 0; 5276 uint64_t item_flags = 0; 5277 uint64_t last_item = 0; 5278 uint8_t next_protocol = 0xff; 5279 uint16_t ether_type = 0; 5280 int actions_n = 0; 5281 uint8_t item_ipv6_proto = 0; 5282 const struct rte_flow_item *gre_item = NULL; 5283 const struct rte_flow_action_raw_decap *decap; 5284 const struct rte_flow_action_raw_encap *encap; 5285 const struct rte_flow_action_rss *rss; 5286 const struct rte_flow_item_tcp nic_tcp_mask = { 5287 .hdr = { 5288 .tcp_flags = 0xFF, 5289 .src_port = RTE_BE16(UINT16_MAX), 5290 .dst_port = RTE_BE16(UINT16_MAX), 5291 } 5292 }; 5293 const struct rte_flow_item_ipv4 nic_ipv4_mask = { 5294 .hdr = { 5295 .src_addr = RTE_BE32(0xffffffff), 5296 .dst_addr = RTE_BE32(0xffffffff), 5297 .type_of_service = 0xff, 5298 .next_proto_id = 0xff, 5299 .time_to_live = 0xff, 5300 }, 5301 }; 5302 const struct rte_flow_item_ipv6 nic_ipv6_mask = { 5303 .hdr = { 5304 .src_addr = 5305 "\xff\xff\xff\xff\xff\xff\xff\xff" 5306 "\xff\xff\xff\xff\xff\xff\xff\xff", 5307 .dst_addr = 5308 "\xff\xff\xff\xff\xff\xff\xff\xff" 5309 "\xff\xff\xff\xff\xff\xff\xff\xff", 5310 .vtc_flow = RTE_BE32(0xffffffff), 5311 .proto = 0xff, 5312 .hop_limits = 0xff, 5313 }, 5314 }; 5315 const struct rte_flow_item_ecpri nic_ecpri_mask = { 5316 .hdr = { 5317 .common = { 5318 .u32 = 5319 RTE_BE32(((const struct rte_ecpri_common_hdr) { 5320 .type = 0xFF, 5321 }).u32), 5322 }, 5323 .dummy[0] = 0xffffffff, 5324 }, 5325 }; 5326 struct mlx5_priv *priv = dev->data->dev_private; 5327 struct mlx5_dev_config *dev_conf = &priv->config; 5328 uint16_t queue_index = 0xFFFF; 5329 const struct rte_flow_item_vlan *vlan_m = NULL; 5330 int16_t rw_act_num = 0; 5331 uint64_t is_root; 5332 5333 if (items == NULL) 5334 return -1; 5335 ret = flow_dv_validate_attributes(dev, attr, external, error); 5336 if (ret < 0) 5337 return ret; 5338 is_root = (uint64_t)ret; 5339 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 5340 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 5341 int type = items->type; 5342 5343 if (!mlx5_flow_os_item_supported(type)) 5344 return rte_flow_error_set(error, ENOTSUP, 5345 RTE_FLOW_ERROR_TYPE_ITEM, 5346 NULL, "item not supported"); 5347 switch (type) { 5348 case RTE_FLOW_ITEM_TYPE_VOID: 5349 break; 5350 case RTE_FLOW_ITEM_TYPE_PORT_ID: 5351 ret = flow_dv_validate_item_port_id 5352 (dev, items, attr, item_flags, error); 5353 if (ret < 0) 5354 return ret; 5355 last_item = MLX5_FLOW_ITEM_PORT_ID; 5356 break; 5357 case RTE_FLOW_ITEM_TYPE_ETH: 5358 ret = mlx5_flow_validate_item_eth(items, item_flags, 5359 error); 5360 if (ret < 0) 5361 return ret; 5362 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 5363 MLX5_FLOW_LAYER_OUTER_L2; 5364 if (items->mask != NULL && items->spec != NULL) { 5365 ether_type = 5366 ((const struct rte_flow_item_eth *) 5367 items->spec)->type; 5368 ether_type &= 5369 ((const struct rte_flow_item_eth *) 5370 items->mask)->type; 5371 ether_type = rte_be_to_cpu_16(ether_type); 5372 } else { 5373 ether_type = 0; 5374 } 5375 break; 5376 case RTE_FLOW_ITEM_TYPE_VLAN: 5377 ret = flow_dv_validate_item_vlan(items, item_flags, 5378 dev, error); 5379 if (ret < 0) 5380 return ret; 5381 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 5382 MLX5_FLOW_LAYER_OUTER_VLAN; 5383 if (items->mask != NULL && items->spec != NULL) { 5384 ether_type = 5385 ((const struct rte_flow_item_vlan *) 5386 items->spec)->inner_type; 5387 ether_type &= 5388 ((const struct rte_flow_item_vlan *) 5389 items->mask)->inner_type; 5390 ether_type = rte_be_to_cpu_16(ether_type); 5391 } else { 5392 ether_type = 0; 5393 } 5394 /* Store outer VLAN mask for of_push_vlan action. */ 5395 if (!tunnel) 5396 vlan_m = items->mask; 5397 break; 5398 case RTE_FLOW_ITEM_TYPE_IPV4: 5399 mlx5_flow_tunnel_ip_check(items, next_protocol, 5400 &item_flags, &tunnel); 5401 ret = mlx5_flow_validate_item_ipv4(items, item_flags, 5402 last_item, 5403 ether_type, 5404 &nic_ipv4_mask, 5405 error); 5406 if (ret < 0) 5407 return ret; 5408 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 5409 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 5410 if (items->mask != NULL && 5411 ((const struct rte_flow_item_ipv4 *) 5412 items->mask)->hdr.next_proto_id) { 5413 next_protocol = 5414 ((const struct rte_flow_item_ipv4 *) 5415 (items->spec))->hdr.next_proto_id; 5416 next_protocol &= 5417 ((const struct rte_flow_item_ipv4 *) 5418 (items->mask))->hdr.next_proto_id; 5419 } else { 5420 /* Reset for inner layer. */ 5421 next_protocol = 0xff; 5422 } 5423 break; 5424 case RTE_FLOW_ITEM_TYPE_IPV6: 5425 mlx5_flow_tunnel_ip_check(items, next_protocol, 5426 &item_flags, &tunnel); 5427 ret = mlx5_flow_validate_item_ipv6(items, item_flags, 5428 last_item, 5429 ether_type, 5430 &nic_ipv6_mask, 5431 error); 5432 if (ret < 0) 5433 return ret; 5434 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 5435 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 5436 if (items->mask != NULL && 5437 ((const struct rte_flow_item_ipv6 *) 5438 items->mask)->hdr.proto) { 5439 item_ipv6_proto = 5440 ((const struct rte_flow_item_ipv6 *) 5441 items->spec)->hdr.proto; 5442 next_protocol = 5443 ((const struct rte_flow_item_ipv6 *) 5444 items->spec)->hdr.proto; 5445 next_protocol &= 5446 ((const struct rte_flow_item_ipv6 *) 5447 items->mask)->hdr.proto; 5448 } else { 5449 /* Reset for inner layer. */ 5450 next_protocol = 0xff; 5451 } 5452 break; 5453 case RTE_FLOW_ITEM_TYPE_TCP: 5454 ret = mlx5_flow_validate_item_tcp 5455 (items, item_flags, 5456 next_protocol, 5457 &nic_tcp_mask, 5458 error); 5459 if (ret < 0) 5460 return ret; 5461 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : 5462 MLX5_FLOW_LAYER_OUTER_L4_TCP; 5463 break; 5464 case RTE_FLOW_ITEM_TYPE_UDP: 5465 ret = mlx5_flow_validate_item_udp(items, item_flags, 5466 next_protocol, 5467 error); 5468 if (ret < 0) 5469 return ret; 5470 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : 5471 MLX5_FLOW_LAYER_OUTER_L4_UDP; 5472 break; 5473 case RTE_FLOW_ITEM_TYPE_GRE: 5474 ret = mlx5_flow_validate_item_gre(items, item_flags, 5475 next_protocol, error); 5476 if (ret < 0) 5477 return ret; 5478 gre_item = items; 5479 last_item = MLX5_FLOW_LAYER_GRE; 5480 break; 5481 case RTE_FLOW_ITEM_TYPE_NVGRE: 5482 ret = mlx5_flow_validate_item_nvgre(items, item_flags, 5483 next_protocol, 5484 error); 5485 if (ret < 0) 5486 return ret; 5487 last_item = MLX5_FLOW_LAYER_NVGRE; 5488 break; 5489 case RTE_FLOW_ITEM_TYPE_GRE_KEY: 5490 ret = mlx5_flow_validate_item_gre_key 5491 (items, item_flags, gre_item, error); 5492 if (ret < 0) 5493 return ret; 5494 last_item = MLX5_FLOW_LAYER_GRE_KEY; 5495 break; 5496 case RTE_FLOW_ITEM_TYPE_VXLAN: 5497 ret = mlx5_flow_validate_item_vxlan(items, item_flags, 5498 error); 5499 if (ret < 0) 5500 return ret; 5501 last_item = MLX5_FLOW_LAYER_VXLAN; 5502 break; 5503 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 5504 ret = mlx5_flow_validate_item_vxlan_gpe(items, 5505 item_flags, dev, 5506 error); 5507 if (ret < 0) 5508 return ret; 5509 last_item = MLX5_FLOW_LAYER_VXLAN_GPE; 5510 break; 5511 case RTE_FLOW_ITEM_TYPE_GENEVE: 5512 ret = mlx5_flow_validate_item_geneve(items, 5513 item_flags, dev, 5514 error); 5515 if (ret < 0) 5516 return ret; 5517 last_item = MLX5_FLOW_LAYER_GENEVE; 5518 break; 5519 case RTE_FLOW_ITEM_TYPE_MPLS: 5520 ret = mlx5_flow_validate_item_mpls(dev, items, 5521 item_flags, 5522 last_item, error); 5523 if (ret < 0) 5524 return ret; 5525 last_item = MLX5_FLOW_LAYER_MPLS; 5526 break; 5527 5528 case RTE_FLOW_ITEM_TYPE_MARK: 5529 ret = flow_dv_validate_item_mark(dev, items, attr, 5530 error); 5531 if (ret < 0) 5532 return ret; 5533 last_item = MLX5_FLOW_ITEM_MARK; 5534 break; 5535 case RTE_FLOW_ITEM_TYPE_META: 5536 ret = flow_dv_validate_item_meta(dev, items, attr, 5537 error); 5538 if (ret < 0) 5539 return ret; 5540 last_item = MLX5_FLOW_ITEM_METADATA; 5541 break; 5542 case RTE_FLOW_ITEM_TYPE_ICMP: 5543 ret = mlx5_flow_validate_item_icmp(items, item_flags, 5544 next_protocol, 5545 error); 5546 if (ret < 0) 5547 return ret; 5548 last_item = MLX5_FLOW_LAYER_ICMP; 5549 break; 5550 case RTE_FLOW_ITEM_TYPE_ICMP6: 5551 ret = mlx5_flow_validate_item_icmp6(items, item_flags, 5552 next_protocol, 5553 error); 5554 if (ret < 0) 5555 return ret; 5556 item_ipv6_proto = IPPROTO_ICMPV6; 5557 last_item = MLX5_FLOW_LAYER_ICMP6; 5558 break; 5559 case RTE_FLOW_ITEM_TYPE_TAG: 5560 ret = flow_dv_validate_item_tag(dev, items, 5561 attr, error); 5562 if (ret < 0) 5563 return ret; 5564 last_item = MLX5_FLOW_ITEM_TAG; 5565 break; 5566 case MLX5_RTE_FLOW_ITEM_TYPE_TAG: 5567 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE: 5568 break; 5569 case RTE_FLOW_ITEM_TYPE_GTP: 5570 ret = flow_dv_validate_item_gtp(dev, items, item_flags, 5571 error); 5572 if (ret < 0) 5573 return ret; 5574 last_item = MLX5_FLOW_LAYER_GTP; 5575 break; 5576 case RTE_FLOW_ITEM_TYPE_ECPRI: 5577 /* Capacity will be checked in the translate stage. */ 5578 ret = mlx5_flow_validate_item_ecpri(items, item_flags, 5579 last_item, 5580 ether_type, 5581 &nic_ecpri_mask, 5582 error); 5583 if (ret < 0) 5584 return ret; 5585 last_item = MLX5_FLOW_LAYER_ECPRI; 5586 break; 5587 default: 5588 return rte_flow_error_set(error, ENOTSUP, 5589 RTE_FLOW_ERROR_TYPE_ITEM, 5590 NULL, "item not supported"); 5591 } 5592 item_flags |= last_item; 5593 } 5594 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 5595 int type = actions->type; 5596 5597 if (!mlx5_flow_os_action_supported(type)) 5598 return rte_flow_error_set(error, ENOTSUP, 5599 RTE_FLOW_ERROR_TYPE_ACTION, 5600 actions, 5601 "action not supported"); 5602 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS) 5603 return rte_flow_error_set(error, ENOTSUP, 5604 RTE_FLOW_ERROR_TYPE_ACTION, 5605 actions, "too many actions"); 5606 switch (type) { 5607 case RTE_FLOW_ACTION_TYPE_VOID: 5608 break; 5609 case RTE_FLOW_ACTION_TYPE_PORT_ID: 5610 ret = flow_dv_validate_action_port_id(dev, 5611 action_flags, 5612 actions, 5613 attr, 5614 error); 5615 if (ret) 5616 return ret; 5617 action_flags |= MLX5_FLOW_ACTION_PORT_ID; 5618 ++actions_n; 5619 break; 5620 case RTE_FLOW_ACTION_TYPE_FLAG: 5621 ret = flow_dv_validate_action_flag(dev, action_flags, 5622 attr, error); 5623 if (ret < 0) 5624 return ret; 5625 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 5626 /* Count all modify-header actions as one. */ 5627 if (!(action_flags & 5628 MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5629 ++actions_n; 5630 action_flags |= MLX5_FLOW_ACTION_FLAG | 5631 MLX5_FLOW_ACTION_MARK_EXT; 5632 } else { 5633 action_flags |= MLX5_FLOW_ACTION_FLAG; 5634 ++actions_n; 5635 } 5636 rw_act_num += MLX5_ACT_NUM_SET_MARK; 5637 break; 5638 case RTE_FLOW_ACTION_TYPE_MARK: 5639 ret = flow_dv_validate_action_mark(dev, actions, 5640 action_flags, 5641 attr, error); 5642 if (ret < 0) 5643 return ret; 5644 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 5645 /* Count all modify-header actions as one. */ 5646 if (!(action_flags & 5647 MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5648 ++actions_n; 5649 action_flags |= MLX5_FLOW_ACTION_MARK | 5650 MLX5_FLOW_ACTION_MARK_EXT; 5651 } else { 5652 action_flags |= MLX5_FLOW_ACTION_MARK; 5653 ++actions_n; 5654 } 5655 rw_act_num += MLX5_ACT_NUM_SET_MARK; 5656 break; 5657 case RTE_FLOW_ACTION_TYPE_SET_META: 5658 ret = flow_dv_validate_action_set_meta(dev, actions, 5659 action_flags, 5660 attr, error); 5661 if (ret < 0) 5662 return ret; 5663 /* Count all modify-header actions as one action. */ 5664 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5665 ++actions_n; 5666 action_flags |= MLX5_FLOW_ACTION_SET_META; 5667 rw_act_num += MLX5_ACT_NUM_SET_META; 5668 break; 5669 case RTE_FLOW_ACTION_TYPE_SET_TAG: 5670 ret = flow_dv_validate_action_set_tag(dev, actions, 5671 action_flags, 5672 attr, error); 5673 if (ret < 0) 5674 return ret; 5675 /* Count all modify-header actions as one action. */ 5676 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5677 ++actions_n; 5678 action_flags |= MLX5_FLOW_ACTION_SET_TAG; 5679 rw_act_num += MLX5_ACT_NUM_SET_TAG; 5680 break; 5681 case RTE_FLOW_ACTION_TYPE_DROP: 5682 ret = mlx5_flow_validate_action_drop(action_flags, 5683 attr, error); 5684 if (ret < 0) 5685 return ret; 5686 action_flags |= MLX5_FLOW_ACTION_DROP; 5687 ++actions_n; 5688 break; 5689 case RTE_FLOW_ACTION_TYPE_QUEUE: 5690 ret = mlx5_flow_validate_action_queue(actions, 5691 action_flags, dev, 5692 attr, error); 5693 if (ret < 0) 5694 return ret; 5695 queue_index = ((const struct rte_flow_action_queue *) 5696 (actions->conf))->index; 5697 action_flags |= MLX5_FLOW_ACTION_QUEUE; 5698 ++actions_n; 5699 break; 5700 case RTE_FLOW_ACTION_TYPE_RSS: 5701 rss = actions->conf; 5702 ret = mlx5_flow_validate_action_rss(actions, 5703 action_flags, dev, 5704 attr, item_flags, 5705 error); 5706 if (ret < 0) 5707 return ret; 5708 if (rss != NULL && rss->queue_num) 5709 queue_index = rss->queue[0]; 5710 action_flags |= MLX5_FLOW_ACTION_RSS; 5711 ++actions_n; 5712 break; 5713 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS: 5714 ret = 5715 mlx5_flow_validate_action_default_miss(action_flags, 5716 attr, error); 5717 if (ret < 0) 5718 return ret; 5719 action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS; 5720 ++actions_n; 5721 break; 5722 case RTE_FLOW_ACTION_TYPE_COUNT: 5723 ret = flow_dv_validate_action_count(dev, error); 5724 if (ret < 0) 5725 return ret; 5726 action_flags |= MLX5_FLOW_ACTION_COUNT; 5727 ++actions_n; 5728 break; 5729 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: 5730 if (flow_dv_validate_action_pop_vlan(dev, 5731 action_flags, 5732 actions, 5733 item_flags, attr, 5734 error)) 5735 return -rte_errno; 5736 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN; 5737 ++actions_n; 5738 break; 5739 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 5740 ret = flow_dv_validate_action_push_vlan(dev, 5741 action_flags, 5742 vlan_m, 5743 actions, attr, 5744 error); 5745 if (ret < 0) 5746 return ret; 5747 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN; 5748 ++actions_n; 5749 break; 5750 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 5751 ret = flow_dv_validate_action_set_vlan_pcp 5752 (action_flags, actions, error); 5753 if (ret < 0) 5754 return ret; 5755 /* Count PCP with push_vlan command. */ 5756 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP; 5757 break; 5758 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 5759 ret = flow_dv_validate_action_set_vlan_vid 5760 (item_flags, action_flags, 5761 actions, error); 5762 if (ret < 0) 5763 return ret; 5764 /* Count VID with push_vlan command. */ 5765 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID; 5766 rw_act_num += MLX5_ACT_NUM_MDF_VID; 5767 break; 5768 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 5769 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 5770 ret = flow_dv_validate_action_l2_encap(dev, 5771 action_flags, 5772 actions, attr, 5773 error); 5774 if (ret < 0) 5775 return ret; 5776 action_flags |= MLX5_FLOW_ACTION_ENCAP; 5777 ++actions_n; 5778 break; 5779 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 5780 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 5781 ret = flow_dv_validate_action_decap(dev, action_flags, 5782 attr, error); 5783 if (ret < 0) 5784 return ret; 5785 action_flags |= MLX5_FLOW_ACTION_DECAP; 5786 ++actions_n; 5787 break; 5788 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 5789 ret = flow_dv_validate_action_raw_encap_decap 5790 (dev, NULL, actions->conf, attr, &action_flags, 5791 &actions_n, error); 5792 if (ret < 0) 5793 return ret; 5794 break; 5795 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 5796 decap = actions->conf; 5797 while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID) 5798 ; 5799 if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { 5800 encap = NULL; 5801 actions--; 5802 } else { 5803 encap = actions->conf; 5804 } 5805 ret = flow_dv_validate_action_raw_encap_decap 5806 (dev, 5807 decap ? decap : &empty_decap, encap, 5808 attr, &action_flags, &actions_n, 5809 error); 5810 if (ret < 0) 5811 return ret; 5812 break; 5813 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: 5814 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: 5815 ret = flow_dv_validate_action_modify_mac(action_flags, 5816 actions, 5817 item_flags, 5818 error); 5819 if (ret < 0) 5820 return ret; 5821 /* Count all modify-header actions as one action. */ 5822 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5823 ++actions_n; 5824 action_flags |= actions->type == 5825 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ? 5826 MLX5_FLOW_ACTION_SET_MAC_SRC : 5827 MLX5_FLOW_ACTION_SET_MAC_DST; 5828 /* 5829 * Even if the source and destination MAC addresses have 5830 * overlap in the header with 4B alignment, the convert 5831 * function will handle them separately and 4 SW actions 5832 * will be created. And 2 actions will be added each 5833 * time no matter how many bytes of address will be set. 5834 */ 5835 rw_act_num += MLX5_ACT_NUM_MDF_MAC; 5836 break; 5837 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: 5838 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: 5839 ret = flow_dv_validate_action_modify_ipv4(action_flags, 5840 actions, 5841 item_flags, 5842 error); 5843 if (ret < 0) 5844 return ret; 5845 /* Count all modify-header actions as one action. */ 5846 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5847 ++actions_n; 5848 action_flags |= actions->type == 5849 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ? 5850 MLX5_FLOW_ACTION_SET_IPV4_SRC : 5851 MLX5_FLOW_ACTION_SET_IPV4_DST; 5852 rw_act_num += MLX5_ACT_NUM_MDF_IPV4; 5853 break; 5854 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: 5855 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: 5856 ret = flow_dv_validate_action_modify_ipv6(action_flags, 5857 actions, 5858 item_flags, 5859 error); 5860 if (ret < 0) 5861 return ret; 5862 if (item_ipv6_proto == IPPROTO_ICMPV6) 5863 return rte_flow_error_set(error, ENOTSUP, 5864 RTE_FLOW_ERROR_TYPE_ACTION, 5865 actions, 5866 "Can't change header " 5867 "with ICMPv6 proto"); 5868 /* Count all modify-header actions as one action. */ 5869 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5870 ++actions_n; 5871 action_flags |= actions->type == 5872 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ? 5873 MLX5_FLOW_ACTION_SET_IPV6_SRC : 5874 MLX5_FLOW_ACTION_SET_IPV6_DST; 5875 rw_act_num += MLX5_ACT_NUM_MDF_IPV6; 5876 break; 5877 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: 5878 case RTE_FLOW_ACTION_TYPE_SET_TP_DST: 5879 ret = flow_dv_validate_action_modify_tp(action_flags, 5880 actions, 5881 item_flags, 5882 error); 5883 if (ret < 0) 5884 return ret; 5885 /* Count all modify-header actions as one action. */ 5886 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5887 ++actions_n; 5888 action_flags |= actions->type == 5889 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ? 5890 MLX5_FLOW_ACTION_SET_TP_SRC : 5891 MLX5_FLOW_ACTION_SET_TP_DST; 5892 rw_act_num += MLX5_ACT_NUM_MDF_PORT; 5893 break; 5894 case RTE_FLOW_ACTION_TYPE_DEC_TTL: 5895 case RTE_FLOW_ACTION_TYPE_SET_TTL: 5896 ret = flow_dv_validate_action_modify_ttl(action_flags, 5897 actions, 5898 item_flags, 5899 error); 5900 if (ret < 0) 5901 return ret; 5902 /* Count all modify-header actions as one action. */ 5903 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5904 ++actions_n; 5905 action_flags |= actions->type == 5906 RTE_FLOW_ACTION_TYPE_SET_TTL ? 5907 MLX5_FLOW_ACTION_SET_TTL : 5908 MLX5_FLOW_ACTION_DEC_TTL; 5909 rw_act_num += MLX5_ACT_NUM_MDF_TTL; 5910 break; 5911 case RTE_FLOW_ACTION_TYPE_JUMP: 5912 ret = flow_dv_validate_action_jump(actions, 5913 action_flags, 5914 attr, external, 5915 error); 5916 if (ret) 5917 return ret; 5918 ++actions_n; 5919 action_flags |= MLX5_FLOW_ACTION_JUMP; 5920 break; 5921 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: 5922 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: 5923 ret = flow_dv_validate_action_modify_tcp_seq 5924 (action_flags, 5925 actions, 5926 item_flags, 5927 error); 5928 if (ret < 0) 5929 return ret; 5930 /* Count all modify-header actions as one action. */ 5931 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5932 ++actions_n; 5933 action_flags |= actions->type == 5934 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ? 5935 MLX5_FLOW_ACTION_INC_TCP_SEQ : 5936 MLX5_FLOW_ACTION_DEC_TCP_SEQ; 5937 rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ; 5938 break; 5939 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: 5940 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: 5941 ret = flow_dv_validate_action_modify_tcp_ack 5942 (action_flags, 5943 actions, 5944 item_flags, 5945 error); 5946 if (ret < 0) 5947 return ret; 5948 /* Count all modify-header actions as one action. */ 5949 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5950 ++actions_n; 5951 action_flags |= actions->type == 5952 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ? 5953 MLX5_FLOW_ACTION_INC_TCP_ACK : 5954 MLX5_FLOW_ACTION_DEC_TCP_ACK; 5955 rw_act_num += MLX5_ACT_NUM_MDF_TCPACK; 5956 break; 5957 case MLX5_RTE_FLOW_ACTION_TYPE_MARK: 5958 break; 5959 case MLX5_RTE_FLOW_ACTION_TYPE_TAG: 5960 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG: 5961 rw_act_num += MLX5_ACT_NUM_SET_TAG; 5962 break; 5963 case RTE_FLOW_ACTION_TYPE_METER: 5964 ret = mlx5_flow_validate_action_meter(dev, 5965 action_flags, 5966 actions, attr, 5967 error); 5968 if (ret < 0) 5969 return ret; 5970 action_flags |= MLX5_FLOW_ACTION_METER; 5971 ++actions_n; 5972 /* Meter action will add one more TAG action. */ 5973 rw_act_num += MLX5_ACT_NUM_SET_TAG; 5974 break; 5975 case RTE_FLOW_ACTION_TYPE_AGE: 5976 ret = flow_dv_validate_action_age(action_flags, 5977 actions, dev, 5978 error); 5979 if (ret < 0) 5980 return ret; 5981 action_flags |= MLX5_FLOW_ACTION_AGE; 5982 ++actions_n; 5983 break; 5984 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: 5985 ret = flow_dv_validate_action_modify_ipv4_dscp 5986 (action_flags, 5987 actions, 5988 item_flags, 5989 error); 5990 if (ret < 0) 5991 return ret; 5992 /* Count all modify-header actions as one action. */ 5993 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5994 ++actions_n; 5995 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP; 5996 rw_act_num += MLX5_ACT_NUM_SET_DSCP; 5997 break; 5998 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: 5999 ret = flow_dv_validate_action_modify_ipv6_dscp 6000 (action_flags, 6001 actions, 6002 item_flags, 6003 error); 6004 if (ret < 0) 6005 return ret; 6006 /* Count all modify-header actions as one action. */ 6007 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 6008 ++actions_n; 6009 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP; 6010 rw_act_num += MLX5_ACT_NUM_SET_DSCP; 6011 break; 6012 case RTE_FLOW_ACTION_TYPE_SAMPLE: 6013 ret = flow_dv_validate_action_sample(action_flags, 6014 actions, dev, 6015 attr, error); 6016 if (ret < 0) 6017 return ret; 6018 action_flags |= MLX5_FLOW_ACTION_SAMPLE; 6019 ++actions_n; 6020 break; 6021 default: 6022 return rte_flow_error_set(error, ENOTSUP, 6023 RTE_FLOW_ERROR_TYPE_ACTION, 6024 actions, 6025 "action not supported"); 6026 } 6027 } 6028 /* 6029 * Validate the drop action mutual exclusion with other actions. 6030 * Drop action is mutually-exclusive with any other action, except for 6031 * Count action. 6032 */ 6033 if ((action_flags & MLX5_FLOW_ACTION_DROP) && 6034 (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT))) 6035 return rte_flow_error_set(error, EINVAL, 6036 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 6037 "Drop action is mutually-exclusive " 6038 "with any other action, except for " 6039 "Count action"); 6040 /* Eswitch has few restrictions on using items and actions */ 6041 if (attr->transfer) { 6042 if (!mlx5_flow_ext_mreg_supported(dev) && 6043 action_flags & MLX5_FLOW_ACTION_FLAG) 6044 return rte_flow_error_set(error, ENOTSUP, 6045 RTE_FLOW_ERROR_TYPE_ACTION, 6046 NULL, 6047 "unsupported action FLAG"); 6048 if (!mlx5_flow_ext_mreg_supported(dev) && 6049 action_flags & MLX5_FLOW_ACTION_MARK) 6050 return rte_flow_error_set(error, ENOTSUP, 6051 RTE_FLOW_ERROR_TYPE_ACTION, 6052 NULL, 6053 "unsupported action MARK"); 6054 if (action_flags & MLX5_FLOW_ACTION_QUEUE) 6055 return rte_flow_error_set(error, ENOTSUP, 6056 RTE_FLOW_ERROR_TYPE_ACTION, 6057 NULL, 6058 "unsupported action QUEUE"); 6059 if (action_flags & MLX5_FLOW_ACTION_RSS) 6060 return rte_flow_error_set(error, ENOTSUP, 6061 RTE_FLOW_ERROR_TYPE_ACTION, 6062 NULL, 6063 "unsupported action RSS"); 6064 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS)) 6065 return rte_flow_error_set(error, EINVAL, 6066 RTE_FLOW_ERROR_TYPE_ACTION, 6067 actions, 6068 "no fate action is found"); 6069 } else { 6070 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress) 6071 return rte_flow_error_set(error, EINVAL, 6072 RTE_FLOW_ERROR_TYPE_ACTION, 6073 actions, 6074 "no fate action is found"); 6075 } 6076 /* Continue validation for Xcap and VLAN actions.*/ 6077 if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS | 6078 MLX5_FLOW_VLAN_ACTIONS)) && 6079 (queue_index == 0xFFFF || 6080 mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) { 6081 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) == 6082 MLX5_FLOW_XCAP_ACTIONS) 6083 return rte_flow_error_set(error, ENOTSUP, 6084 RTE_FLOW_ERROR_TYPE_ACTION, 6085 NULL, "encap and decap " 6086 "combination aren't supported"); 6087 if (!attr->transfer && attr->ingress) { 6088 if (action_flags & MLX5_FLOW_ACTION_ENCAP) 6089 return rte_flow_error_set 6090 (error, ENOTSUP, 6091 RTE_FLOW_ERROR_TYPE_ACTION, 6092 NULL, "encap is not supported" 6093 " for ingress traffic"); 6094 else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) 6095 return rte_flow_error_set 6096 (error, ENOTSUP, 6097 RTE_FLOW_ERROR_TYPE_ACTION, 6098 NULL, "push VLAN action not " 6099 "supported for ingress"); 6100 else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) == 6101 MLX5_FLOW_VLAN_ACTIONS) 6102 return rte_flow_error_set 6103 (error, ENOTSUP, 6104 RTE_FLOW_ERROR_TYPE_ACTION, 6105 NULL, "no support for " 6106 "multiple VLAN actions"); 6107 } 6108 } 6109 /* Hairpin flow will add one more TAG action. */ 6110 if (hairpin > 0) 6111 rw_act_num += MLX5_ACT_NUM_SET_TAG; 6112 /* extra metadata enabled: one more TAG action will be add. */ 6113 if (dev_conf->dv_flow_en && 6114 dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 6115 mlx5_flow_ext_mreg_supported(dev)) 6116 rw_act_num += MLX5_ACT_NUM_SET_TAG; 6117 if ((uint32_t)rw_act_num > 6118 flow_dv_modify_hdr_action_max(dev, is_root)) { 6119 return rte_flow_error_set(error, ENOTSUP, 6120 RTE_FLOW_ERROR_TYPE_ACTION, 6121 NULL, "too many header modify" 6122 " actions to support"); 6123 } 6124 return 0; 6125 } 6126 6127 /** 6128 * Internal preparation function. Allocates the DV flow size, 6129 * this size is constant. 6130 * 6131 * @param[in] dev 6132 * Pointer to the rte_eth_dev structure. 6133 * @param[in] attr 6134 * Pointer to the flow attributes. 6135 * @param[in] items 6136 * Pointer to the list of items. 6137 * @param[in] actions 6138 * Pointer to the list of actions. 6139 * @param[out] error 6140 * Pointer to the error structure. 6141 * 6142 * @return 6143 * Pointer to mlx5_flow object on success, 6144 * otherwise NULL and rte_errno is set. 6145 */ 6146 static struct mlx5_flow * 6147 flow_dv_prepare(struct rte_eth_dev *dev, 6148 const struct rte_flow_attr *attr __rte_unused, 6149 const struct rte_flow_item items[] __rte_unused, 6150 const struct rte_flow_action actions[] __rte_unused, 6151 struct rte_flow_error *error) 6152 { 6153 uint32_t handle_idx = 0; 6154 struct mlx5_flow *dev_flow; 6155 struct mlx5_flow_handle *dev_handle; 6156 struct mlx5_priv *priv = dev->data->dev_private; 6157 6158 /* In case of corrupting the memory. */ 6159 if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { 6160 rte_flow_error_set(error, ENOSPC, 6161 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 6162 "not free temporary device flow"); 6163 return NULL; 6164 } 6165 dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 6166 &handle_idx); 6167 if (!dev_handle) { 6168 rte_flow_error_set(error, ENOMEM, 6169 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 6170 "not enough memory to create flow handle"); 6171 return NULL; 6172 } 6173 /* No multi-thread supporting. */ 6174 dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++]; 6175 dev_flow->handle = dev_handle; 6176 dev_flow->handle_idx = handle_idx; 6177 /* 6178 * In some old rdma-core releases, before continuing, a check of the 6179 * length of matching parameter will be done at first. It needs to use 6180 * the length without misc4 param. If the flow has misc4 support, then 6181 * the length needs to be adjusted accordingly. Each param member is 6182 * aligned with a 64B boundary naturally. 6183 */ 6184 dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) - 6185 MLX5_ST_SZ_BYTES(fte_match_set_misc4); 6186 /* 6187 * The matching value needs to be cleared to 0 before using. In the 6188 * past, it will be automatically cleared when using rte_*alloc 6189 * API. The time consumption will be almost the same as before. 6190 */ 6191 memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param)); 6192 dev_flow->ingress = attr->ingress; 6193 dev_flow->dv.transfer = attr->transfer; 6194 return dev_flow; 6195 } 6196 6197 #ifdef RTE_LIBRTE_MLX5_DEBUG 6198 /** 6199 * Sanity check for match mask and value. Similar to check_valid_spec() in 6200 * kernel driver. If unmasked bit is present in value, it returns failure. 6201 * 6202 * @param match_mask 6203 * pointer to match mask buffer. 6204 * @param match_value 6205 * pointer to match value buffer. 6206 * 6207 * @return 6208 * 0 if valid, -EINVAL otherwise. 6209 */ 6210 static int 6211 flow_dv_check_valid_spec(void *match_mask, void *match_value) 6212 { 6213 uint8_t *m = match_mask; 6214 uint8_t *v = match_value; 6215 unsigned int i; 6216 6217 for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) { 6218 if (v[i] & ~m[i]) { 6219 DRV_LOG(ERR, 6220 "match_value differs from match_criteria" 6221 " %p[%u] != %p[%u]", 6222 match_value, i, match_mask, i); 6223 return -EINVAL; 6224 } 6225 } 6226 return 0; 6227 } 6228 #endif 6229 6230 /** 6231 * Add match of ip_version. 6232 * 6233 * @param[in] group 6234 * Flow group. 6235 * @param[in] headers_v 6236 * Values header pointer. 6237 * @param[in] headers_m 6238 * Masks header pointer. 6239 * @param[in] ip_version 6240 * The IP version to set. 6241 */ 6242 static inline void 6243 flow_dv_set_match_ip_version(uint32_t group, 6244 void *headers_v, 6245 void *headers_m, 6246 uint8_t ip_version) 6247 { 6248 if (group == 0) 6249 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf); 6250 else 6251 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 6252 ip_version); 6253 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version); 6254 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0); 6255 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0); 6256 } 6257 6258 /** 6259 * Add Ethernet item to matcher and to the value. 6260 * 6261 * @param[in, out] matcher 6262 * Flow matcher. 6263 * @param[in, out] key 6264 * Flow matcher value. 6265 * @param[in] item 6266 * Flow pattern to translate. 6267 * @param[in] inner 6268 * Item is inner pattern. 6269 */ 6270 static void 6271 flow_dv_translate_item_eth(void *matcher, void *key, 6272 const struct rte_flow_item *item, int inner, 6273 uint32_t group) 6274 { 6275 const struct rte_flow_item_eth *eth_m = item->mask; 6276 const struct rte_flow_item_eth *eth_v = item->spec; 6277 const struct rte_flow_item_eth nic_mask = { 6278 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 6279 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", 6280 .type = RTE_BE16(0xffff), 6281 }; 6282 void *headers_m; 6283 void *headers_v; 6284 char *l24_v; 6285 unsigned int i; 6286 6287 if (!eth_v) 6288 return; 6289 if (!eth_m) 6290 eth_m = &nic_mask; 6291 if (inner) { 6292 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6293 inner_headers); 6294 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 6295 } else { 6296 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6297 outer_headers); 6298 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6299 } 6300 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16), 6301 ð_m->dst, sizeof(eth_m->dst)); 6302 /* The value must be in the range of the mask. */ 6303 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16); 6304 for (i = 0; i < sizeof(eth_m->dst); ++i) 6305 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i]; 6306 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16), 6307 ð_m->src, sizeof(eth_m->src)); 6308 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16); 6309 /* The value must be in the range of the mask. */ 6310 for (i = 0; i < sizeof(eth_m->dst); ++i) 6311 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i]; 6312 if (eth_v->type) { 6313 /* When ethertype is present set mask for tagged VLAN. */ 6314 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); 6315 /* Set value for tagged VLAN if ethertype is 802.1Q. */ 6316 if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) || 6317 eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) { 6318 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 6319 1); 6320 /* Return here to avoid setting match on ethertype. */ 6321 return; 6322 } 6323 } 6324 /* 6325 * HW supports match on one Ethertype, the Ethertype following the last 6326 * VLAN tag of the packet (see PRM). 6327 * Set match on ethertype only if ETH header is not followed by VLAN. 6328 * HW is optimized for IPv4/IPv6. In such cases, avoid setting 6329 * ethertype, and use ip_version field instead. 6330 * eCPRI over Ether layer will use type value 0xAEFE. 6331 */ 6332 if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) && 6333 eth_m->type == 0xFFFF) { 6334 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); 6335 } else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) && 6336 eth_m->type == 0xFFFF) { 6337 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); 6338 } else { 6339 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 6340 rte_be_to_cpu_16(eth_m->type)); 6341 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 6342 ethertype); 6343 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type; 6344 } 6345 } 6346 6347 /** 6348 * Add VLAN item to matcher and to the value. 6349 * 6350 * @param[in, out] dev_flow 6351 * Flow descriptor. 6352 * @param[in, out] matcher 6353 * Flow matcher. 6354 * @param[in, out] key 6355 * Flow matcher value. 6356 * @param[in] item 6357 * Flow pattern to translate. 6358 * @param[in] inner 6359 * Item is inner pattern. 6360 */ 6361 static void 6362 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, 6363 void *matcher, void *key, 6364 const struct rte_flow_item *item, 6365 int inner, uint32_t group) 6366 { 6367 const struct rte_flow_item_vlan *vlan_m = item->mask; 6368 const struct rte_flow_item_vlan *vlan_v = item->spec; 6369 void *headers_m; 6370 void *headers_v; 6371 uint16_t tci_m; 6372 uint16_t tci_v; 6373 6374 if (inner) { 6375 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6376 inner_headers); 6377 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 6378 } else { 6379 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6380 outer_headers); 6381 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6382 /* 6383 * This is workaround, masks are not supported, 6384 * and pre-validated. 6385 */ 6386 if (vlan_v) 6387 dev_flow->handle->vf_vlan.tag = 6388 rte_be_to_cpu_16(vlan_v->tci) & 0x0fff; 6389 } 6390 /* 6391 * When VLAN item exists in flow, mark packet as tagged, 6392 * even if TCI is not specified. 6393 */ 6394 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); 6395 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); 6396 if (!vlan_v) 6397 return; 6398 if (!vlan_m) 6399 vlan_m = &rte_flow_item_vlan_mask; 6400 tci_m = rte_be_to_cpu_16(vlan_m->tci); 6401 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci); 6402 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m); 6403 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v); 6404 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12); 6405 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12); 6406 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13); 6407 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13); 6408 /* 6409 * HW is optimized for IPv4/IPv6. In such cases, avoid setting 6410 * ethertype, and use ip_version field instead. 6411 */ 6412 if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) && 6413 vlan_m->inner_type == 0xFFFF) { 6414 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); 6415 } else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) && 6416 vlan_m->inner_type == 0xFFFF) { 6417 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); 6418 } else { 6419 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 6420 rte_be_to_cpu_16(vlan_m->inner_type)); 6421 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 6422 rte_be_to_cpu_16(vlan_m->inner_type & 6423 vlan_v->inner_type)); 6424 } 6425 } 6426 6427 /** 6428 * Add IPV4 item to matcher and to the value. 6429 * 6430 * @param[in, out] matcher 6431 * Flow matcher. 6432 * @param[in, out] key 6433 * Flow matcher value. 6434 * @param[in] item 6435 * Flow pattern to translate. 6436 * @param[in] item_flags 6437 * Bit-fields that holds the items detected until now. 6438 * @param[in] inner 6439 * Item is inner pattern. 6440 * @param[in] group 6441 * The group to insert the rule. 6442 */ 6443 static void 6444 flow_dv_translate_item_ipv4(void *matcher, void *key, 6445 const struct rte_flow_item *item, 6446 const uint64_t item_flags, 6447 int inner, uint32_t group) 6448 { 6449 const struct rte_flow_item_ipv4 *ipv4_m = item->mask; 6450 const struct rte_flow_item_ipv4 *ipv4_v = item->spec; 6451 const struct rte_flow_item_ipv4 nic_mask = { 6452 .hdr = { 6453 .src_addr = RTE_BE32(0xffffffff), 6454 .dst_addr = RTE_BE32(0xffffffff), 6455 .type_of_service = 0xff, 6456 .next_proto_id = 0xff, 6457 .time_to_live = 0xff, 6458 }, 6459 }; 6460 void *headers_m; 6461 void *headers_v; 6462 char *l24_m; 6463 char *l24_v; 6464 uint8_t tos; 6465 6466 if (inner) { 6467 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6468 inner_headers); 6469 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 6470 } else { 6471 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6472 outer_headers); 6473 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6474 } 6475 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); 6476 /* 6477 * On outer header (which must contains L2), or inner header with L2, 6478 * set cvlan_tag mask bit to mark this packet as untagged. 6479 * This should be done even if item->spec is empty. 6480 */ 6481 if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) 6482 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); 6483 if (!ipv4_v) 6484 return; 6485 if (!ipv4_m) 6486 ipv4_m = &nic_mask; 6487 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, 6488 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 6489 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 6490 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 6491 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr; 6492 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr; 6493 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, 6494 src_ipv4_src_ipv6.ipv4_layout.ipv4); 6495 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 6496 src_ipv4_src_ipv6.ipv4_layout.ipv4); 6497 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr; 6498 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr; 6499 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service; 6500 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, 6501 ipv4_m->hdr.type_of_service); 6502 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos); 6503 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, 6504 ipv4_m->hdr.type_of_service >> 2); 6505 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2); 6506 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 6507 ipv4_m->hdr.next_proto_id); 6508 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 6509 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id); 6510 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit, 6511 ipv4_m->hdr.time_to_live); 6512 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit, 6513 ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live); 6514 } 6515 6516 /** 6517 * Add IPV6 item to matcher and to the value. 6518 * 6519 * @param[in, out] matcher 6520 * Flow matcher. 6521 * @param[in, out] key 6522 * Flow matcher value. 6523 * @param[in] item 6524 * Flow pattern to translate. 6525 * @param[in] item_flags 6526 * Bit-fields that holds the items detected until now. 6527 * @param[in] inner 6528 * Item is inner pattern. 6529 * @param[in] group 6530 * The group to insert the rule. 6531 */ 6532 static void 6533 flow_dv_translate_item_ipv6(void *matcher, void *key, 6534 const struct rte_flow_item *item, 6535 const uint64_t item_flags, 6536 int inner, uint32_t group) 6537 { 6538 const struct rte_flow_item_ipv6 *ipv6_m = item->mask; 6539 const struct rte_flow_item_ipv6 *ipv6_v = item->spec; 6540 const struct rte_flow_item_ipv6 nic_mask = { 6541 .hdr = { 6542 .src_addr = 6543 "\xff\xff\xff\xff\xff\xff\xff\xff" 6544 "\xff\xff\xff\xff\xff\xff\xff\xff", 6545 .dst_addr = 6546 "\xff\xff\xff\xff\xff\xff\xff\xff" 6547 "\xff\xff\xff\xff\xff\xff\xff\xff", 6548 .vtc_flow = RTE_BE32(0xffffffff), 6549 .proto = 0xff, 6550 .hop_limits = 0xff, 6551 }, 6552 }; 6553 void *headers_m; 6554 void *headers_v; 6555 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 6556 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 6557 char *l24_m; 6558 char *l24_v; 6559 uint32_t vtc_m; 6560 uint32_t vtc_v; 6561 int i; 6562 int size; 6563 6564 if (inner) { 6565 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6566 inner_headers); 6567 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 6568 } else { 6569 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6570 outer_headers); 6571 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6572 } 6573 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); 6574 /* 6575 * On outer header (which must contains L2), or inner header with L2, 6576 * set cvlan_tag mask bit to mark this packet as untagged. 6577 * This should be done even if item->spec is empty. 6578 */ 6579 if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) 6580 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); 6581 if (!ipv6_v) 6582 return; 6583 if (!ipv6_m) 6584 ipv6_m = &nic_mask; 6585 size = sizeof(ipv6_m->hdr.dst_addr); 6586 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, 6587 dst_ipv4_dst_ipv6.ipv6_layout.ipv6); 6588 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 6589 dst_ipv4_dst_ipv6.ipv6_layout.ipv6); 6590 memcpy(l24_m, ipv6_m->hdr.dst_addr, size); 6591 for (i = 0; i < size; ++i) 6592 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i]; 6593 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, 6594 src_ipv4_src_ipv6.ipv6_layout.ipv6); 6595 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 6596 src_ipv4_src_ipv6.ipv6_layout.ipv6); 6597 memcpy(l24_m, ipv6_m->hdr.src_addr, size); 6598 for (i = 0; i < size; ++i) 6599 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i]; 6600 /* TOS. */ 6601 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow); 6602 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow); 6603 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20); 6604 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20); 6605 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22); 6606 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22); 6607 /* Label. */ 6608 if (inner) { 6609 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label, 6610 vtc_m); 6611 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label, 6612 vtc_v); 6613 } else { 6614 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label, 6615 vtc_m); 6616 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label, 6617 vtc_v); 6618 } 6619 /* Protocol. */ 6620 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 6621 ipv6_m->hdr.proto); 6622 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 6623 ipv6_v->hdr.proto & ipv6_m->hdr.proto); 6624 /* Hop limit. */ 6625 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit, 6626 ipv6_m->hdr.hop_limits); 6627 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit, 6628 ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits); 6629 } 6630 6631 /** 6632 * Add TCP item to matcher and to the value. 6633 * 6634 * @param[in, out] matcher 6635 * Flow matcher. 6636 * @param[in, out] key 6637 * Flow matcher value. 6638 * @param[in] item 6639 * Flow pattern to translate. 6640 * @param[in] inner 6641 * Item is inner pattern. 6642 */ 6643 static void 6644 flow_dv_translate_item_tcp(void *matcher, void *key, 6645 const struct rte_flow_item *item, 6646 int inner) 6647 { 6648 const struct rte_flow_item_tcp *tcp_m = item->mask; 6649 const struct rte_flow_item_tcp *tcp_v = item->spec; 6650 void *headers_m; 6651 void *headers_v; 6652 6653 if (inner) { 6654 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6655 inner_headers); 6656 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 6657 } else { 6658 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6659 outer_headers); 6660 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6661 } 6662 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); 6663 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP); 6664 if (!tcp_v) 6665 return; 6666 if (!tcp_m) 6667 tcp_m = &rte_flow_item_tcp_mask; 6668 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport, 6669 rte_be_to_cpu_16(tcp_m->hdr.src_port)); 6670 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport, 6671 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port)); 6672 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport, 6673 rte_be_to_cpu_16(tcp_m->hdr.dst_port)); 6674 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport, 6675 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port)); 6676 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags, 6677 tcp_m->hdr.tcp_flags); 6678 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags, 6679 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags)); 6680 } 6681 6682 /** 6683 * Add UDP item to matcher and to the value. 6684 * 6685 * @param[in, out] matcher 6686 * Flow matcher. 6687 * @param[in, out] key 6688 * Flow matcher value. 6689 * @param[in] item 6690 * Flow pattern to translate. 6691 * @param[in] inner 6692 * Item is inner pattern. 6693 */ 6694 static void 6695 flow_dv_translate_item_udp(void *matcher, void *key, 6696 const struct rte_flow_item *item, 6697 int inner) 6698 { 6699 const struct rte_flow_item_udp *udp_m = item->mask; 6700 const struct rte_flow_item_udp *udp_v = item->spec; 6701 void *headers_m; 6702 void *headers_v; 6703 6704 if (inner) { 6705 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6706 inner_headers); 6707 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 6708 } else { 6709 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6710 outer_headers); 6711 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6712 } 6713 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); 6714 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP); 6715 if (!udp_v) 6716 return; 6717 if (!udp_m) 6718 udp_m = &rte_flow_item_udp_mask; 6719 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport, 6720 rte_be_to_cpu_16(udp_m->hdr.src_port)); 6721 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, 6722 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port)); 6723 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 6724 rte_be_to_cpu_16(udp_m->hdr.dst_port)); 6725 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 6726 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port)); 6727 } 6728 6729 /** 6730 * Add GRE optional Key item to matcher and to the value. 6731 * 6732 * @param[in, out] matcher 6733 * Flow matcher. 6734 * @param[in, out] key 6735 * Flow matcher value. 6736 * @param[in] item 6737 * Flow pattern to translate. 6738 * @param[in] inner 6739 * Item is inner pattern. 6740 */ 6741 static void 6742 flow_dv_translate_item_gre_key(void *matcher, void *key, 6743 const struct rte_flow_item *item) 6744 { 6745 const rte_be32_t *key_m = item->mask; 6746 const rte_be32_t *key_v = item->spec; 6747 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 6748 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 6749 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); 6750 6751 /* GRE K bit must be on and should already be validated */ 6752 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1); 6753 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1); 6754 if (!key_v) 6755 return; 6756 if (!key_m) 6757 key_m = &gre_key_default_mask; 6758 MLX5_SET(fte_match_set_misc, misc_m, gre_key_h, 6759 rte_be_to_cpu_32(*key_m) >> 8); 6760 MLX5_SET(fte_match_set_misc, misc_v, gre_key_h, 6761 rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8); 6762 MLX5_SET(fte_match_set_misc, misc_m, gre_key_l, 6763 rte_be_to_cpu_32(*key_m) & 0xFF); 6764 MLX5_SET(fte_match_set_misc, misc_v, gre_key_l, 6765 rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF); 6766 } 6767 6768 /** 6769 * Add GRE item to matcher and to the value. 6770 * 6771 * @param[in, out] matcher 6772 * Flow matcher. 6773 * @param[in, out] key 6774 * Flow matcher value. 6775 * @param[in] item 6776 * Flow pattern to translate. 6777 * @param[in] inner 6778 * Item is inner pattern. 6779 */ 6780 static void 6781 flow_dv_translate_item_gre(void *matcher, void *key, 6782 const struct rte_flow_item *item, 6783 int inner) 6784 { 6785 const struct rte_flow_item_gre *gre_m = item->mask; 6786 const struct rte_flow_item_gre *gre_v = item->spec; 6787 void *headers_m; 6788 void *headers_v; 6789 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 6790 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 6791 struct { 6792 union { 6793 __extension__ 6794 struct { 6795 uint16_t version:3; 6796 uint16_t rsvd0:9; 6797 uint16_t s_present:1; 6798 uint16_t k_present:1; 6799 uint16_t rsvd_bit1:1; 6800 uint16_t c_present:1; 6801 }; 6802 uint16_t value; 6803 }; 6804 } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v; 6805 6806 if (inner) { 6807 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6808 inner_headers); 6809 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 6810 } else { 6811 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6812 outer_headers); 6813 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6814 } 6815 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); 6816 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE); 6817 if (!gre_v) 6818 return; 6819 if (!gre_m) 6820 gre_m = &rte_flow_item_gre_mask; 6821 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 6822 rte_be_to_cpu_16(gre_m->protocol)); 6823 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, 6824 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol)); 6825 gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver); 6826 gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver); 6827 MLX5_SET(fte_match_set_misc, misc_m, gre_c_present, 6828 gre_crks_rsvd0_ver_m.c_present); 6829 MLX5_SET(fte_match_set_misc, misc_v, gre_c_present, 6830 gre_crks_rsvd0_ver_v.c_present & 6831 gre_crks_rsvd0_ver_m.c_present); 6832 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 6833 gre_crks_rsvd0_ver_m.k_present); 6834 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 6835 gre_crks_rsvd0_ver_v.k_present & 6836 gre_crks_rsvd0_ver_m.k_present); 6837 MLX5_SET(fte_match_set_misc, misc_m, gre_s_present, 6838 gre_crks_rsvd0_ver_m.s_present); 6839 MLX5_SET(fte_match_set_misc, misc_v, gre_s_present, 6840 gre_crks_rsvd0_ver_v.s_present & 6841 gre_crks_rsvd0_ver_m.s_present); 6842 } 6843 6844 /** 6845 * Add NVGRE item to matcher and to the value. 6846 * 6847 * @param[in, out] matcher 6848 * Flow matcher. 6849 * @param[in, out] key 6850 * Flow matcher value. 6851 * @param[in] item 6852 * Flow pattern to translate. 6853 * @param[in] inner 6854 * Item is inner pattern. 6855 */ 6856 static void 6857 flow_dv_translate_item_nvgre(void *matcher, void *key, 6858 const struct rte_flow_item *item, 6859 int inner) 6860 { 6861 const struct rte_flow_item_nvgre *nvgre_m = item->mask; 6862 const struct rte_flow_item_nvgre *nvgre_v = item->spec; 6863 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 6864 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 6865 const char *tni_flow_id_m; 6866 const char *tni_flow_id_v; 6867 char *gre_key_m; 6868 char *gre_key_v; 6869 int size; 6870 int i; 6871 6872 /* For NVGRE, GRE header fields must be set with defined values. */ 6873 const struct rte_flow_item_gre gre_spec = { 6874 .c_rsvd0_ver = RTE_BE16(0x2000), 6875 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB) 6876 }; 6877 const struct rte_flow_item_gre gre_mask = { 6878 .c_rsvd0_ver = RTE_BE16(0xB000), 6879 .protocol = RTE_BE16(UINT16_MAX), 6880 }; 6881 const struct rte_flow_item gre_item = { 6882 .spec = &gre_spec, 6883 .mask = &gre_mask, 6884 .last = NULL, 6885 }; 6886 flow_dv_translate_item_gre(matcher, key, &gre_item, inner); 6887 if (!nvgre_v) 6888 return; 6889 if (!nvgre_m) 6890 nvgre_m = &rte_flow_item_nvgre_mask; 6891 tni_flow_id_m = (const char *)nvgre_m->tni; 6892 tni_flow_id_v = (const char *)nvgre_v->tni; 6893 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id); 6894 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h); 6895 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h); 6896 memcpy(gre_key_m, tni_flow_id_m, size); 6897 for (i = 0; i < size; ++i) 6898 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i]; 6899 } 6900 6901 /** 6902 * Add VXLAN item to matcher and to the value. 6903 * 6904 * @param[in, out] matcher 6905 * Flow matcher. 6906 * @param[in, out] key 6907 * Flow matcher value. 6908 * @param[in] item 6909 * Flow pattern to translate. 6910 * @param[in] inner 6911 * Item is inner pattern. 6912 */ 6913 static void 6914 flow_dv_translate_item_vxlan(void *matcher, void *key, 6915 const struct rte_flow_item *item, 6916 int inner) 6917 { 6918 const struct rte_flow_item_vxlan *vxlan_m = item->mask; 6919 const struct rte_flow_item_vxlan *vxlan_v = item->spec; 6920 void *headers_m; 6921 void *headers_v; 6922 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 6923 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 6924 char *vni_m; 6925 char *vni_v; 6926 uint16_t dport; 6927 int size; 6928 int i; 6929 6930 if (inner) { 6931 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6932 inner_headers); 6933 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 6934 } else { 6935 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6936 outer_headers); 6937 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6938 } 6939 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ? 6940 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE; 6941 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { 6942 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); 6943 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); 6944 } 6945 if (!vxlan_v) 6946 return; 6947 if (!vxlan_m) 6948 vxlan_m = &rte_flow_item_vxlan_mask; 6949 size = sizeof(vxlan_m->vni); 6950 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni); 6951 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni); 6952 memcpy(vni_m, vxlan_m->vni, size); 6953 for (i = 0; i < size; ++i) 6954 vni_v[i] = vni_m[i] & vxlan_v->vni[i]; 6955 } 6956 6957 /** 6958 * Add VXLAN-GPE item to matcher and to the value. 6959 * 6960 * @param[in, out] matcher 6961 * Flow matcher. 6962 * @param[in, out] key 6963 * Flow matcher value. 6964 * @param[in] item 6965 * Flow pattern to translate. 6966 * @param[in] inner 6967 * Item is inner pattern. 6968 */ 6969 6970 static void 6971 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key, 6972 const struct rte_flow_item *item, int inner) 6973 { 6974 const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask; 6975 const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec; 6976 void *headers_m; 6977 void *headers_v; 6978 void *misc_m = 6979 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3); 6980 void *misc_v = 6981 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); 6982 char *vni_m; 6983 char *vni_v; 6984 uint16_t dport; 6985 int size; 6986 int i; 6987 uint8_t flags_m = 0xff; 6988 uint8_t flags_v = 0xc; 6989 6990 if (inner) { 6991 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6992 inner_headers); 6993 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 6994 } else { 6995 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6996 outer_headers); 6997 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6998 } 6999 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ? 7000 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE; 7001 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { 7002 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); 7003 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); 7004 } 7005 if (!vxlan_v) 7006 return; 7007 if (!vxlan_m) 7008 vxlan_m = &rte_flow_item_vxlan_gpe_mask; 7009 size = sizeof(vxlan_m->vni); 7010 vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni); 7011 vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni); 7012 memcpy(vni_m, vxlan_m->vni, size); 7013 for (i = 0; i < size; ++i) 7014 vni_v[i] = vni_m[i] & vxlan_v->vni[i]; 7015 if (vxlan_m->flags) { 7016 flags_m = vxlan_m->flags; 7017 flags_v = vxlan_v->flags; 7018 } 7019 MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m); 7020 MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v); 7021 MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol, 7022 vxlan_m->protocol); 7023 MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol, 7024 vxlan_v->protocol); 7025 } 7026 7027 /** 7028 * Add Geneve item to matcher and to the value. 7029 * 7030 * @param[in, out] matcher 7031 * Flow matcher. 7032 * @param[in, out] key 7033 * Flow matcher value. 7034 * @param[in] item 7035 * Flow pattern to translate. 7036 * @param[in] inner 7037 * Item is inner pattern. 7038 */ 7039 7040 static void 7041 flow_dv_translate_item_geneve(void *matcher, void *key, 7042 const struct rte_flow_item *item, int inner) 7043 { 7044 const struct rte_flow_item_geneve *geneve_m = item->mask; 7045 const struct rte_flow_item_geneve *geneve_v = item->spec; 7046 void *headers_m; 7047 void *headers_v; 7048 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 7049 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 7050 uint16_t dport; 7051 uint16_t gbhdr_m; 7052 uint16_t gbhdr_v; 7053 char *vni_m; 7054 char *vni_v; 7055 size_t size, i; 7056 7057 if (inner) { 7058 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7059 inner_headers); 7060 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 7061 } else { 7062 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7063 outer_headers); 7064 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 7065 } 7066 dport = MLX5_UDP_PORT_GENEVE; 7067 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { 7068 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); 7069 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); 7070 } 7071 if (!geneve_v) 7072 return; 7073 if (!geneve_m) 7074 geneve_m = &rte_flow_item_geneve_mask; 7075 size = sizeof(geneve_m->vni); 7076 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni); 7077 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni); 7078 memcpy(vni_m, geneve_m->vni, size); 7079 for (i = 0; i < size; ++i) 7080 vni_v[i] = vni_m[i] & geneve_v->vni[i]; 7081 MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, 7082 rte_be_to_cpu_16(geneve_m->protocol)); 7083 MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, 7084 rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol)); 7085 gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0); 7086 gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0); 7087 MLX5_SET(fte_match_set_misc, misc_m, geneve_oam, 7088 MLX5_GENEVE_OAMF_VAL(gbhdr_m)); 7089 MLX5_SET(fte_match_set_misc, misc_v, geneve_oam, 7090 MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m)); 7091 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len, 7092 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m)); 7093 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, 7094 MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) & 7095 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m)); 7096 } 7097 7098 /** 7099 * Add MPLS item to matcher and to the value. 7100 * 7101 * @param[in, out] matcher 7102 * Flow matcher. 7103 * @param[in, out] key 7104 * Flow matcher value. 7105 * @param[in] item 7106 * Flow pattern to translate. 7107 * @param[in] prev_layer 7108 * The protocol layer indicated in previous item. 7109 * @param[in] inner 7110 * Item is inner pattern. 7111 */ 7112 static void 7113 flow_dv_translate_item_mpls(void *matcher, void *key, 7114 const struct rte_flow_item *item, 7115 uint64_t prev_layer, 7116 int inner) 7117 { 7118 const uint32_t *in_mpls_m = item->mask; 7119 const uint32_t *in_mpls_v = item->spec; 7120 uint32_t *out_mpls_m = 0; 7121 uint32_t *out_mpls_v = 0; 7122 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 7123 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 7124 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher, 7125 misc_parameters_2); 7126 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2); 7127 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); 7128 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 7129 7130 switch (prev_layer) { 7131 case MLX5_FLOW_LAYER_OUTER_L4_UDP: 7132 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff); 7133 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 7134 MLX5_UDP_PORT_MPLS); 7135 break; 7136 case MLX5_FLOW_LAYER_GRE: 7137 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff); 7138 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, 7139 RTE_ETHER_TYPE_MPLS); 7140 break; 7141 default: 7142 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); 7143 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 7144 IPPROTO_MPLS); 7145 break; 7146 } 7147 if (!in_mpls_v) 7148 return; 7149 if (!in_mpls_m) 7150 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask; 7151 switch (prev_layer) { 7152 case MLX5_FLOW_LAYER_OUTER_L4_UDP: 7153 out_mpls_m = 7154 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m, 7155 outer_first_mpls_over_udp); 7156 out_mpls_v = 7157 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v, 7158 outer_first_mpls_over_udp); 7159 break; 7160 case MLX5_FLOW_LAYER_GRE: 7161 out_mpls_m = 7162 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m, 7163 outer_first_mpls_over_gre); 7164 out_mpls_v = 7165 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v, 7166 outer_first_mpls_over_gre); 7167 break; 7168 default: 7169 /* Inner MPLS not over GRE is not supported. */ 7170 if (!inner) { 7171 out_mpls_m = 7172 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, 7173 misc2_m, 7174 outer_first_mpls); 7175 out_mpls_v = 7176 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, 7177 misc2_v, 7178 outer_first_mpls); 7179 } 7180 break; 7181 } 7182 if (out_mpls_m && out_mpls_v) { 7183 *out_mpls_m = *in_mpls_m; 7184 *out_mpls_v = *in_mpls_v & *in_mpls_m; 7185 } 7186 } 7187 7188 /** 7189 * Add metadata register item to matcher 7190 * 7191 * @param[in, out] matcher 7192 * Flow matcher. 7193 * @param[in, out] key 7194 * Flow matcher value. 7195 * @param[in] reg_type 7196 * Type of device metadata register 7197 * @param[in] value 7198 * Register value 7199 * @param[in] mask 7200 * Register mask 7201 */ 7202 static void 7203 flow_dv_match_meta_reg(void *matcher, void *key, 7204 enum modify_reg reg_type, 7205 uint32_t data, uint32_t mask) 7206 { 7207 void *misc2_m = 7208 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2); 7209 void *misc2_v = 7210 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2); 7211 uint32_t temp; 7212 7213 data &= mask; 7214 switch (reg_type) { 7215 case REG_A: 7216 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask); 7217 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data); 7218 break; 7219 case REG_B: 7220 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask); 7221 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data); 7222 break; 7223 case REG_C_0: 7224 /* 7225 * The metadata register C0 field might be divided into 7226 * source vport index and META item value, we should set 7227 * this field according to specified mask, not as whole one. 7228 */ 7229 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0); 7230 temp |= mask; 7231 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp); 7232 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0); 7233 temp &= ~mask; 7234 temp |= data; 7235 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp); 7236 break; 7237 case REG_C_1: 7238 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask); 7239 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data); 7240 break; 7241 case REG_C_2: 7242 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask); 7243 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data); 7244 break; 7245 case REG_C_3: 7246 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask); 7247 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data); 7248 break; 7249 case REG_C_4: 7250 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask); 7251 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data); 7252 break; 7253 case REG_C_5: 7254 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask); 7255 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data); 7256 break; 7257 case REG_C_6: 7258 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask); 7259 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data); 7260 break; 7261 case REG_C_7: 7262 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask); 7263 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data); 7264 break; 7265 default: 7266 MLX5_ASSERT(false); 7267 break; 7268 } 7269 } 7270 7271 /** 7272 * Add MARK item to matcher 7273 * 7274 * @param[in] dev 7275 * The device to configure through. 7276 * @param[in, out] matcher 7277 * Flow matcher. 7278 * @param[in, out] key 7279 * Flow matcher value. 7280 * @param[in] item 7281 * Flow pattern to translate. 7282 */ 7283 static void 7284 flow_dv_translate_item_mark(struct rte_eth_dev *dev, 7285 void *matcher, void *key, 7286 const struct rte_flow_item *item) 7287 { 7288 struct mlx5_priv *priv = dev->data->dev_private; 7289 const struct rte_flow_item_mark *mark; 7290 uint32_t value; 7291 uint32_t mask; 7292 7293 mark = item->mask ? (const void *)item->mask : 7294 &rte_flow_item_mark_mask; 7295 mask = mark->id & priv->sh->dv_mark_mask; 7296 mark = (const void *)item->spec; 7297 MLX5_ASSERT(mark); 7298 value = mark->id & priv->sh->dv_mark_mask & mask; 7299 if (mask) { 7300 enum modify_reg reg; 7301 7302 /* Get the metadata register index for the mark. */ 7303 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL); 7304 MLX5_ASSERT(reg > 0); 7305 if (reg == REG_C_0) { 7306 struct mlx5_priv *priv = dev->data->dev_private; 7307 uint32_t msk_c0 = priv->sh->dv_regc0_mask; 7308 uint32_t shl_c0 = rte_bsf32(msk_c0); 7309 7310 mask &= msk_c0; 7311 mask <<= shl_c0; 7312 value <<= shl_c0; 7313 } 7314 flow_dv_match_meta_reg(matcher, key, reg, value, mask); 7315 } 7316 } 7317 7318 /** 7319 * Add META item to matcher 7320 * 7321 * @param[in] dev 7322 * The devich to configure through. 7323 * @param[in, out] matcher 7324 * Flow matcher. 7325 * @param[in, out] key 7326 * Flow matcher value. 7327 * @param[in] attr 7328 * Attributes of flow that includes this item. 7329 * @param[in] item 7330 * Flow pattern to translate. 7331 */ 7332 static void 7333 flow_dv_translate_item_meta(struct rte_eth_dev *dev, 7334 void *matcher, void *key, 7335 const struct rte_flow_attr *attr, 7336 const struct rte_flow_item *item) 7337 { 7338 const struct rte_flow_item_meta *meta_m; 7339 const struct rte_flow_item_meta *meta_v; 7340 7341 meta_m = (const void *)item->mask; 7342 if (!meta_m) 7343 meta_m = &rte_flow_item_meta_mask; 7344 meta_v = (const void *)item->spec; 7345 if (meta_v) { 7346 int reg; 7347 uint32_t value = meta_v->data; 7348 uint32_t mask = meta_m->data; 7349 7350 reg = flow_dv_get_metadata_reg(dev, attr, NULL); 7351 if (reg < 0) 7352 return; 7353 /* 7354 * In datapath code there is no endianness 7355 * coversions for perfromance reasons, all 7356 * pattern conversions are done in rte_flow. 7357 */ 7358 value = rte_cpu_to_be_32(value); 7359 mask = rte_cpu_to_be_32(mask); 7360 if (reg == REG_C_0) { 7361 struct mlx5_priv *priv = dev->data->dev_private; 7362 uint32_t msk_c0 = priv->sh->dv_regc0_mask; 7363 uint32_t shl_c0 = rte_bsf32(msk_c0); 7364 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 7365 uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask); 7366 7367 value >>= shr_c0; 7368 mask >>= shr_c0; 7369 #endif 7370 value <<= shl_c0; 7371 mask <<= shl_c0; 7372 MLX5_ASSERT(msk_c0); 7373 MLX5_ASSERT(!(~msk_c0 & mask)); 7374 } 7375 flow_dv_match_meta_reg(matcher, key, reg, value, mask); 7376 } 7377 } 7378 7379 /** 7380 * Add vport metadata Reg C0 item to matcher 7381 * 7382 * @param[in, out] matcher 7383 * Flow matcher. 7384 * @param[in, out] key 7385 * Flow matcher value. 7386 * @param[in] reg 7387 * Flow pattern to translate. 7388 */ 7389 static void 7390 flow_dv_translate_item_meta_vport(void *matcher, void *key, 7391 uint32_t value, uint32_t mask) 7392 { 7393 flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask); 7394 } 7395 7396 /** 7397 * Add tag item to matcher 7398 * 7399 * @param[in] dev 7400 * The devich to configure through. 7401 * @param[in, out] matcher 7402 * Flow matcher. 7403 * @param[in, out] key 7404 * Flow matcher value. 7405 * @param[in] item 7406 * Flow pattern to translate. 7407 */ 7408 static void 7409 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev, 7410 void *matcher, void *key, 7411 const struct rte_flow_item *item) 7412 { 7413 const struct mlx5_rte_flow_item_tag *tag_v = item->spec; 7414 const struct mlx5_rte_flow_item_tag *tag_m = item->mask; 7415 uint32_t mask, value; 7416 7417 MLX5_ASSERT(tag_v); 7418 value = tag_v->data; 7419 mask = tag_m ? tag_m->data : UINT32_MAX; 7420 if (tag_v->id == REG_C_0) { 7421 struct mlx5_priv *priv = dev->data->dev_private; 7422 uint32_t msk_c0 = priv->sh->dv_regc0_mask; 7423 uint32_t shl_c0 = rte_bsf32(msk_c0); 7424 7425 mask &= msk_c0; 7426 mask <<= shl_c0; 7427 value <<= shl_c0; 7428 } 7429 flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask); 7430 } 7431 7432 /** 7433 * Add TAG item to matcher 7434 * 7435 * @param[in] dev 7436 * The devich to configure through. 7437 * @param[in, out] matcher 7438 * Flow matcher. 7439 * @param[in, out] key 7440 * Flow matcher value. 7441 * @param[in] item 7442 * Flow pattern to translate. 7443 */ 7444 static void 7445 flow_dv_translate_item_tag(struct rte_eth_dev *dev, 7446 void *matcher, void *key, 7447 const struct rte_flow_item *item) 7448 { 7449 const struct rte_flow_item_tag *tag_v = item->spec; 7450 const struct rte_flow_item_tag *tag_m = item->mask; 7451 enum modify_reg reg; 7452 7453 MLX5_ASSERT(tag_v); 7454 tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask; 7455 /* Get the metadata register index for the tag. */ 7456 reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL); 7457 MLX5_ASSERT(reg > 0); 7458 flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data); 7459 } 7460 7461 /** 7462 * Add source vport match to the specified matcher. 7463 * 7464 * @param[in, out] matcher 7465 * Flow matcher. 7466 * @param[in, out] key 7467 * Flow matcher value. 7468 * @param[in] port 7469 * Source vport value to match 7470 * @param[in] mask 7471 * Mask 7472 */ 7473 static void 7474 flow_dv_translate_item_source_vport(void *matcher, void *key, 7475 int16_t port, uint16_t mask) 7476 { 7477 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 7478 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 7479 7480 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask); 7481 MLX5_SET(fte_match_set_misc, misc_v, source_port, port); 7482 } 7483 7484 /** 7485 * Translate port-id item to eswitch match on port-id. 7486 * 7487 * @param[in] dev 7488 * The devich to configure through. 7489 * @param[in, out] matcher 7490 * Flow matcher. 7491 * @param[in, out] key 7492 * Flow matcher value. 7493 * @param[in] item 7494 * Flow pattern to translate. 7495 * 7496 * @return 7497 * 0 on success, a negative errno value otherwise. 7498 */ 7499 static int 7500 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, 7501 void *key, const struct rte_flow_item *item) 7502 { 7503 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL; 7504 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL; 7505 struct mlx5_priv *priv; 7506 uint16_t mask, id; 7507 7508 mask = pid_m ? pid_m->id : 0xffff; 7509 id = pid_v ? pid_v->id : dev->data->port_id; 7510 priv = mlx5_port_to_eswitch_info(id, item == NULL); 7511 if (!priv) 7512 return -rte_errno; 7513 /* Translate to vport field or to metadata, depending on mode. */ 7514 if (priv->vport_meta_mask) 7515 flow_dv_translate_item_meta_vport(matcher, key, 7516 priv->vport_meta_tag, 7517 priv->vport_meta_mask); 7518 else 7519 flow_dv_translate_item_source_vport(matcher, key, 7520 priv->vport_id, mask); 7521 return 0; 7522 } 7523 7524 /** 7525 * Add ICMP6 item to matcher and to the value. 7526 * 7527 * @param[in, out] matcher 7528 * Flow matcher. 7529 * @param[in, out] key 7530 * Flow matcher value. 7531 * @param[in] item 7532 * Flow pattern to translate. 7533 * @param[in] inner 7534 * Item is inner pattern. 7535 */ 7536 static void 7537 flow_dv_translate_item_icmp6(void *matcher, void *key, 7538 const struct rte_flow_item *item, 7539 int inner) 7540 { 7541 const struct rte_flow_item_icmp6 *icmp6_m = item->mask; 7542 const struct rte_flow_item_icmp6 *icmp6_v = item->spec; 7543 void *headers_m; 7544 void *headers_v; 7545 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher, 7546 misc_parameters_3); 7547 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); 7548 if (inner) { 7549 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7550 inner_headers); 7551 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 7552 } else { 7553 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7554 outer_headers); 7555 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 7556 } 7557 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF); 7558 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6); 7559 if (!icmp6_v) 7560 return; 7561 if (!icmp6_m) 7562 icmp6_m = &rte_flow_item_icmp6_mask; 7563 /* 7564 * Force flow only to match the non-fragmented IPv6 ICMPv6 packets. 7565 * If only the protocol is specified, no need to match the frag. 7566 */ 7567 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1); 7568 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); 7569 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type); 7570 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type, 7571 icmp6_v->type & icmp6_m->type); 7572 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code); 7573 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code, 7574 icmp6_v->code & icmp6_m->code); 7575 } 7576 7577 /** 7578 * Add ICMP item to matcher and to the value. 7579 * 7580 * @param[in, out] matcher 7581 * Flow matcher. 7582 * @param[in, out] key 7583 * Flow matcher value. 7584 * @param[in] item 7585 * Flow pattern to translate. 7586 * @param[in] inner 7587 * Item is inner pattern. 7588 */ 7589 static void 7590 flow_dv_translate_item_icmp(void *matcher, void *key, 7591 const struct rte_flow_item *item, 7592 int inner) 7593 { 7594 const struct rte_flow_item_icmp *icmp_m = item->mask; 7595 const struct rte_flow_item_icmp *icmp_v = item->spec; 7596 uint32_t icmp_header_data_m = 0; 7597 uint32_t icmp_header_data_v = 0; 7598 void *headers_m; 7599 void *headers_v; 7600 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher, 7601 misc_parameters_3); 7602 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); 7603 if (inner) { 7604 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7605 inner_headers); 7606 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 7607 } else { 7608 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7609 outer_headers); 7610 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 7611 } 7612 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF); 7613 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP); 7614 if (!icmp_v) 7615 return; 7616 if (!icmp_m) 7617 icmp_m = &rte_flow_item_icmp_mask; 7618 /* 7619 * Force flow only to match the non-fragmented IPv4 ICMP packets. 7620 * If only the protocol is specified, no need to match the frag. 7621 */ 7622 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1); 7623 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); 7624 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type, 7625 icmp_m->hdr.icmp_type); 7626 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type, 7627 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type); 7628 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code, 7629 icmp_m->hdr.icmp_code); 7630 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code, 7631 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code); 7632 icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb); 7633 icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16; 7634 if (icmp_header_data_m) { 7635 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb); 7636 icmp_header_data_v |= 7637 rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16; 7638 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data, 7639 icmp_header_data_m); 7640 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data, 7641 icmp_header_data_v & icmp_header_data_m); 7642 } 7643 } 7644 7645 /** 7646 * Add GTP item to matcher and to the value. 7647 * 7648 * @param[in, out] matcher 7649 * Flow matcher. 7650 * @param[in, out] key 7651 * Flow matcher value. 7652 * @param[in] item 7653 * Flow pattern to translate. 7654 * @param[in] inner 7655 * Item is inner pattern. 7656 */ 7657 static void 7658 flow_dv_translate_item_gtp(void *matcher, void *key, 7659 const struct rte_flow_item *item, int inner) 7660 { 7661 const struct rte_flow_item_gtp *gtp_m = item->mask; 7662 const struct rte_flow_item_gtp *gtp_v = item->spec; 7663 void *headers_m; 7664 void *headers_v; 7665 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher, 7666 misc_parameters_3); 7667 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); 7668 uint16_t dport = RTE_GTPU_UDP_PORT; 7669 7670 if (inner) { 7671 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7672 inner_headers); 7673 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 7674 } else { 7675 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7676 outer_headers); 7677 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 7678 } 7679 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { 7680 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); 7681 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); 7682 } 7683 if (!gtp_v) 7684 return; 7685 if (!gtp_m) 7686 gtp_m = &rte_flow_item_gtp_mask; 7687 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, 7688 gtp_m->v_pt_rsv_flags); 7689 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, 7690 gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags); 7691 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type); 7692 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type, 7693 gtp_v->msg_type & gtp_m->msg_type); 7694 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid, 7695 rte_be_to_cpu_32(gtp_m->teid)); 7696 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid, 7697 rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid)); 7698 } 7699 7700 /** 7701 * Add eCPRI item to matcher and to the value. 7702 * 7703 * @param[in] dev 7704 * The devich to configure through. 7705 * @param[in, out] matcher 7706 * Flow matcher. 7707 * @param[in, out] key 7708 * Flow matcher value. 7709 * @param[in] item 7710 * Flow pattern to translate. 7711 * @param[in] samples 7712 * Sample IDs to be used in the matching. 7713 */ 7714 static void 7715 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher, 7716 void *key, const struct rte_flow_item *item) 7717 { 7718 struct mlx5_priv *priv = dev->data->dev_private; 7719 const struct rte_flow_item_ecpri *ecpri_m = item->mask; 7720 const struct rte_flow_item_ecpri *ecpri_v = item->spec; 7721 void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher, 7722 misc_parameters_4); 7723 void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4); 7724 uint32_t *samples; 7725 void *dw_m; 7726 void *dw_v; 7727 7728 if (!ecpri_v) 7729 return; 7730 if (!ecpri_m) 7731 ecpri_m = &rte_flow_item_ecpri_mask; 7732 /* 7733 * Maximal four DW samples are supported in a single matching now. 7734 * Two are used now for a eCPRI matching: 7735 * 1. Type: one byte, mask should be 0x00ff0000 in network order 7736 * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000 7737 * if any. 7738 */ 7739 if (!ecpri_m->hdr.common.u32) 7740 return; 7741 samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids; 7742 /* Need to take the whole DW as the mask to fill the entry. */ 7743 dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m, 7744 prog_sample_field_value_0); 7745 dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v, 7746 prog_sample_field_value_0); 7747 /* Already big endian (network order) in the header. */ 7748 *(uint32_t *)dw_m = ecpri_m->hdr.common.u32; 7749 *(uint32_t *)dw_v = ecpri_v->hdr.common.u32; 7750 /* Sample#0, used for matching type, offset 0. */ 7751 MLX5_SET(fte_match_set_misc4, misc4_m, 7752 prog_sample_field_id_0, samples[0]); 7753 /* It makes no sense to set the sample ID in the mask field. */ 7754 MLX5_SET(fte_match_set_misc4, misc4_v, 7755 prog_sample_field_id_0, samples[0]); 7756 /* 7757 * Checking if message body part needs to be matched. 7758 * Some wildcard rules only matching type field should be supported. 7759 */ 7760 if (ecpri_m->hdr.dummy[0]) { 7761 switch (ecpri_v->hdr.common.type) { 7762 case RTE_ECPRI_MSG_TYPE_IQ_DATA: 7763 case RTE_ECPRI_MSG_TYPE_RTC_CTRL: 7764 case RTE_ECPRI_MSG_TYPE_DLY_MSR: 7765 dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m, 7766 prog_sample_field_value_1); 7767 dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v, 7768 prog_sample_field_value_1); 7769 *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0]; 7770 *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0]; 7771 /* Sample#1, to match message body, offset 4. */ 7772 MLX5_SET(fte_match_set_misc4, misc4_m, 7773 prog_sample_field_id_1, samples[1]); 7774 MLX5_SET(fte_match_set_misc4, misc4_v, 7775 prog_sample_field_id_1, samples[1]); 7776 break; 7777 default: 7778 /* Others, do not match any sample ID. */ 7779 break; 7780 } 7781 } 7782 } 7783 7784 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 }; 7785 7786 #define HEADER_IS_ZERO(match_criteria, headers) \ 7787 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \ 7788 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \ 7789 7790 /** 7791 * Calculate flow matcher enable bitmap. 7792 * 7793 * @param match_criteria 7794 * Pointer to flow matcher criteria. 7795 * 7796 * @return 7797 * Bitmap of enabled fields. 7798 */ 7799 static uint8_t 7800 flow_dv_matcher_enable(uint32_t *match_criteria) 7801 { 7802 uint8_t match_criteria_enable; 7803 7804 match_criteria_enable = 7805 (!HEADER_IS_ZERO(match_criteria, outer_headers)) << 7806 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT; 7807 match_criteria_enable |= 7808 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) << 7809 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT; 7810 match_criteria_enable |= 7811 (!HEADER_IS_ZERO(match_criteria, inner_headers)) << 7812 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT; 7813 match_criteria_enable |= 7814 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) << 7815 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT; 7816 match_criteria_enable |= 7817 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) << 7818 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT; 7819 match_criteria_enable |= 7820 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) << 7821 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT; 7822 return match_criteria_enable; 7823 } 7824 7825 7826 /** 7827 * Get a flow table. 7828 * 7829 * @param[in, out] dev 7830 * Pointer to rte_eth_dev structure. 7831 * @param[in] table_id 7832 * Table id to use. 7833 * @param[in] egress 7834 * Direction of the table. 7835 * @param[in] transfer 7836 * E-Switch or NIC flow. 7837 * @param[out] error 7838 * pointer to error structure. 7839 * 7840 * @return 7841 * Returns tables resource based on the index, NULL in case of failed. 7842 */ 7843 static struct mlx5_flow_tbl_resource * 7844 flow_dv_tbl_resource_get(struct rte_eth_dev *dev, 7845 uint32_t table_id, uint8_t egress, 7846 uint8_t transfer, 7847 struct rte_flow_error *error) 7848 { 7849 struct mlx5_priv *priv = dev->data->dev_private; 7850 struct mlx5_dev_ctx_shared *sh = priv->sh; 7851 struct mlx5_flow_tbl_resource *tbl; 7852 union mlx5_flow_tbl_key table_key = { 7853 { 7854 .table_id = table_id, 7855 .reserved = 0, 7856 .domain = !!transfer, 7857 .direction = !!egress, 7858 } 7859 }; 7860 struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls, 7861 table_key.v64); 7862 struct mlx5_flow_tbl_data_entry *tbl_data; 7863 uint32_t idx = 0; 7864 int ret; 7865 void *domain; 7866 7867 if (pos) { 7868 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, 7869 entry); 7870 tbl = &tbl_data->tbl; 7871 rte_atomic32_inc(&tbl->refcnt); 7872 return tbl; 7873 } 7874 tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx); 7875 if (!tbl_data) { 7876 rte_flow_error_set(error, ENOMEM, 7877 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 7878 NULL, 7879 "cannot allocate flow table data entry"); 7880 return NULL; 7881 } 7882 tbl_data->idx = idx; 7883 tbl = &tbl_data->tbl; 7884 pos = &tbl_data->entry; 7885 if (transfer) 7886 domain = sh->fdb_domain; 7887 else if (egress) 7888 domain = sh->tx_domain; 7889 else 7890 domain = sh->rx_domain; 7891 ret = mlx5_flow_os_create_flow_tbl(domain, table_id, &tbl->obj); 7892 if (ret) { 7893 rte_flow_error_set(error, ENOMEM, 7894 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 7895 NULL, "cannot create flow table object"); 7896 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); 7897 return NULL; 7898 } 7899 /* 7900 * No multi-threads now, but still better to initialize the reference 7901 * count before insert it into the hash list. 7902 */ 7903 rte_atomic32_init(&tbl->refcnt); 7904 /* Jump action reference count is initialized here. */ 7905 rte_atomic32_init(&tbl_data->jump.refcnt); 7906 pos->key = table_key.v64; 7907 ret = mlx5_hlist_insert(sh->flow_tbls, pos); 7908 if (ret < 0) { 7909 rte_flow_error_set(error, -ret, 7910 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 7911 "cannot insert flow table data entry"); 7912 mlx5_flow_os_destroy_flow_tbl(tbl->obj); 7913 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); 7914 } 7915 rte_atomic32_inc(&tbl->refcnt); 7916 return tbl; 7917 } 7918 7919 /** 7920 * Release a flow table. 7921 * 7922 * @param[in] dev 7923 * Pointer to rte_eth_dev structure. 7924 * @param[in] tbl 7925 * Table resource to be released. 7926 * 7927 * @return 7928 * Returns 0 if table was released, else return 1; 7929 */ 7930 static int 7931 flow_dv_tbl_resource_release(struct rte_eth_dev *dev, 7932 struct mlx5_flow_tbl_resource *tbl) 7933 { 7934 struct mlx5_priv *priv = dev->data->dev_private; 7935 struct mlx5_dev_ctx_shared *sh = priv->sh; 7936 struct mlx5_flow_tbl_data_entry *tbl_data = 7937 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); 7938 7939 if (!tbl) 7940 return 0; 7941 if (rte_atomic32_dec_and_test(&tbl->refcnt)) { 7942 struct mlx5_hlist_entry *pos = &tbl_data->entry; 7943 7944 mlx5_flow_os_destroy_flow_tbl(tbl->obj); 7945 tbl->obj = NULL; 7946 /* remove the entry from the hash list and free memory. */ 7947 mlx5_hlist_remove(sh->flow_tbls, pos); 7948 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP], 7949 tbl_data->idx); 7950 return 0; 7951 } 7952 return 1; 7953 } 7954 7955 /** 7956 * Register the flow matcher. 7957 * 7958 * @param[in, out] dev 7959 * Pointer to rte_eth_dev structure. 7960 * @param[in, out] matcher 7961 * Pointer to flow matcher. 7962 * @param[in, out] key 7963 * Pointer to flow table key. 7964 * @parm[in, out] dev_flow 7965 * Pointer to the dev_flow. 7966 * @param[out] error 7967 * pointer to error structure. 7968 * 7969 * @return 7970 * 0 on success otherwise -errno and errno is set. 7971 */ 7972 static int 7973 flow_dv_matcher_register(struct rte_eth_dev *dev, 7974 struct mlx5_flow_dv_matcher *matcher, 7975 union mlx5_flow_tbl_key *key, 7976 struct mlx5_flow *dev_flow, 7977 struct rte_flow_error *error) 7978 { 7979 struct mlx5_priv *priv = dev->data->dev_private; 7980 struct mlx5_dev_ctx_shared *sh = priv->sh; 7981 struct mlx5_flow_dv_matcher *cache_matcher; 7982 struct mlx5dv_flow_matcher_attr dv_attr = { 7983 .type = IBV_FLOW_ATTR_NORMAL, 7984 .match_mask = (void *)&matcher->mask, 7985 }; 7986 struct mlx5_flow_tbl_resource *tbl; 7987 struct mlx5_flow_tbl_data_entry *tbl_data; 7988 int ret; 7989 7990 tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction, 7991 key->domain, error); 7992 if (!tbl) 7993 return -rte_errno; /* No need to refill the error info */ 7994 tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); 7995 /* Lookup from cache. */ 7996 LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) { 7997 if (matcher->crc == cache_matcher->crc && 7998 matcher->priority == cache_matcher->priority && 7999 !memcmp((const void *)matcher->mask.buf, 8000 (const void *)cache_matcher->mask.buf, 8001 cache_matcher->mask.size)) { 8002 DRV_LOG(DEBUG, 8003 "%s group %u priority %hd use %s " 8004 "matcher %p: refcnt %d++", 8005 key->domain ? "FDB" : "NIC", key->table_id, 8006 cache_matcher->priority, 8007 key->direction ? "tx" : "rx", 8008 (void *)cache_matcher, 8009 rte_atomic32_read(&cache_matcher->refcnt)); 8010 rte_atomic32_inc(&cache_matcher->refcnt); 8011 dev_flow->handle->dvh.matcher = cache_matcher; 8012 /* old matcher should not make the table ref++. */ 8013 flow_dv_tbl_resource_release(dev, tbl); 8014 return 0; 8015 } 8016 } 8017 /* Register new matcher. */ 8018 cache_matcher = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache_matcher), 0, 8019 SOCKET_ID_ANY); 8020 if (!cache_matcher) { 8021 flow_dv_tbl_resource_release(dev, tbl); 8022 return rte_flow_error_set(error, ENOMEM, 8023 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 8024 "cannot allocate matcher memory"); 8025 } 8026 *cache_matcher = *matcher; 8027 dv_attr.match_criteria_enable = 8028 flow_dv_matcher_enable(cache_matcher->mask.buf); 8029 dv_attr.priority = matcher->priority; 8030 if (key->direction) 8031 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS; 8032 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj, 8033 &cache_matcher->matcher_object); 8034 if (ret) { 8035 mlx5_free(cache_matcher); 8036 #ifdef HAVE_MLX5DV_DR 8037 flow_dv_tbl_resource_release(dev, tbl); 8038 #endif 8039 return rte_flow_error_set(error, ENOMEM, 8040 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8041 NULL, "cannot create matcher"); 8042 } 8043 /* Save the table information */ 8044 cache_matcher->tbl = tbl; 8045 rte_atomic32_init(&cache_matcher->refcnt); 8046 /* only matcher ref++, table ref++ already done above in get API. */ 8047 rte_atomic32_inc(&cache_matcher->refcnt); 8048 LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next); 8049 dev_flow->handle->dvh.matcher = cache_matcher; 8050 DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d", 8051 key->domain ? "FDB" : "NIC", key->table_id, 8052 cache_matcher->priority, 8053 key->direction ? "tx" : "rx", (void *)cache_matcher, 8054 rte_atomic32_read(&cache_matcher->refcnt)); 8055 return 0; 8056 } 8057 8058 /** 8059 * Find existing tag resource or create and register a new one. 8060 * 8061 * @param dev[in, out] 8062 * Pointer to rte_eth_dev structure. 8063 * @param[in, out] tag_be24 8064 * Tag value in big endian then R-shift 8. 8065 * @parm[in, out] dev_flow 8066 * Pointer to the dev_flow. 8067 * @param[out] error 8068 * pointer to error structure. 8069 * 8070 * @return 8071 * 0 on success otherwise -errno and errno is set. 8072 */ 8073 static int 8074 flow_dv_tag_resource_register 8075 (struct rte_eth_dev *dev, 8076 uint32_t tag_be24, 8077 struct mlx5_flow *dev_flow, 8078 struct rte_flow_error *error) 8079 { 8080 struct mlx5_priv *priv = dev->data->dev_private; 8081 struct mlx5_dev_ctx_shared *sh = priv->sh; 8082 struct mlx5_flow_dv_tag_resource *cache_resource; 8083 struct mlx5_hlist_entry *entry; 8084 int ret; 8085 8086 /* Lookup a matching resource from cache. */ 8087 entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24); 8088 if (entry) { 8089 cache_resource = container_of 8090 (entry, struct mlx5_flow_dv_tag_resource, entry); 8091 rte_atomic32_inc(&cache_resource->refcnt); 8092 dev_flow->handle->dvh.rix_tag = cache_resource->idx; 8093 dev_flow->dv.tag_resource = cache_resource; 8094 DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++", 8095 (void *)cache_resource, 8096 rte_atomic32_read(&cache_resource->refcnt)); 8097 return 0; 8098 } 8099 /* Register new resource. */ 8100 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], 8101 &dev_flow->handle->dvh.rix_tag); 8102 if (!cache_resource) 8103 return rte_flow_error_set(error, ENOMEM, 8104 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 8105 "cannot allocate resource memory"); 8106 cache_resource->entry.key = (uint64_t)tag_be24; 8107 ret = mlx5_flow_os_create_flow_action_tag(tag_be24, 8108 &cache_resource->action); 8109 if (ret) { 8110 mlx5_free(cache_resource); 8111 return rte_flow_error_set(error, ENOMEM, 8112 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8113 NULL, "cannot create action"); 8114 } 8115 rte_atomic32_init(&cache_resource->refcnt); 8116 rte_atomic32_inc(&cache_resource->refcnt); 8117 if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) { 8118 mlx5_flow_os_destroy_flow_action(cache_resource->action); 8119 mlx5_free(cache_resource); 8120 return rte_flow_error_set(error, EEXIST, 8121 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8122 NULL, "cannot insert tag"); 8123 } 8124 dev_flow->dv.tag_resource = cache_resource; 8125 DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++", 8126 (void *)cache_resource, 8127 rte_atomic32_read(&cache_resource->refcnt)); 8128 return 0; 8129 } 8130 8131 /** 8132 * Release the tag. 8133 * 8134 * @param dev 8135 * Pointer to Ethernet device. 8136 * @param tag_idx 8137 * Tag index. 8138 * 8139 * @return 8140 * 1 while a reference on it exists, 0 when freed. 8141 */ 8142 static int 8143 flow_dv_tag_release(struct rte_eth_dev *dev, 8144 uint32_t tag_idx) 8145 { 8146 struct mlx5_priv *priv = dev->data->dev_private; 8147 struct mlx5_dev_ctx_shared *sh = priv->sh; 8148 struct mlx5_flow_dv_tag_resource *tag; 8149 8150 tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx); 8151 if (!tag) 8152 return 0; 8153 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--", 8154 dev->data->port_id, (void *)tag, 8155 rte_atomic32_read(&tag->refcnt)); 8156 if (rte_atomic32_dec_and_test(&tag->refcnt)) { 8157 claim_zero(mlx5_flow_os_destroy_flow_action(tag->action)); 8158 mlx5_hlist_remove(sh->tag_table, &tag->entry); 8159 DRV_LOG(DEBUG, "port %u tag %p: removed", 8160 dev->data->port_id, (void *)tag); 8161 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx); 8162 return 0; 8163 } 8164 return 1; 8165 } 8166 8167 /** 8168 * Translate port ID action to vport. 8169 * 8170 * @param[in] dev 8171 * Pointer to rte_eth_dev structure. 8172 * @param[in] action 8173 * Pointer to the port ID action. 8174 * @param[out] dst_port_id 8175 * The target port ID. 8176 * @param[out] error 8177 * Pointer to the error structure. 8178 * 8179 * @return 8180 * 0 on success, a negative errno value otherwise and rte_errno is set. 8181 */ 8182 static int 8183 flow_dv_translate_action_port_id(struct rte_eth_dev *dev, 8184 const struct rte_flow_action *action, 8185 uint32_t *dst_port_id, 8186 struct rte_flow_error *error) 8187 { 8188 uint32_t port; 8189 struct mlx5_priv *priv; 8190 const struct rte_flow_action_port_id *conf = 8191 (const struct rte_flow_action_port_id *)action->conf; 8192 8193 port = conf->original ? dev->data->port_id : conf->id; 8194 priv = mlx5_port_to_eswitch_info(port, false); 8195 if (!priv) 8196 return rte_flow_error_set(error, -rte_errno, 8197 RTE_FLOW_ERROR_TYPE_ACTION, 8198 NULL, 8199 "No eswitch info was found for port"); 8200 #ifdef HAVE_MLX5DV_DR_DEVX_PORT 8201 /* 8202 * This parameter is transferred to 8203 * mlx5dv_dr_action_create_dest_ib_port(). 8204 */ 8205 *dst_port_id = priv->dev_port; 8206 #else 8207 /* 8208 * Legacy mode, no LAG configurations is supported. 8209 * This parameter is transferred to 8210 * mlx5dv_dr_action_create_dest_vport(). 8211 */ 8212 *dst_port_id = priv->vport_id; 8213 #endif 8214 return 0; 8215 } 8216 8217 /** 8218 * Create a counter with aging configuration. 8219 * 8220 * @param[in] dev 8221 * Pointer to rte_eth_dev structure. 8222 * @param[out] count 8223 * Pointer to the counter action configuration. 8224 * @param[in] age 8225 * Pointer to the aging action configuration. 8226 * 8227 * @return 8228 * Index to flow counter on success, 0 otherwise. 8229 */ 8230 static uint32_t 8231 flow_dv_translate_create_counter(struct rte_eth_dev *dev, 8232 struct mlx5_flow *dev_flow, 8233 const struct rte_flow_action_count *count, 8234 const struct rte_flow_action_age *age) 8235 { 8236 uint32_t counter; 8237 struct mlx5_age_param *age_param; 8238 8239 counter = flow_dv_counter_alloc(dev, 8240 count ? count->shared : 0, 8241 count ? count->id : 0, 8242 dev_flow->dv.group, !!age); 8243 if (!counter || age == NULL) 8244 return counter; 8245 age_param = flow_dv_counter_idx_get_age(dev, counter); 8246 /* 8247 * The counter age accuracy may have a bit delay. Have 3/4 8248 * second bias on the timeount in order to let it age in time. 8249 */ 8250 age_param->context = age->context ? age->context : 8251 (void *)(uintptr_t)(dev_flow->flow_idx); 8252 /* 8253 * The counter age accuracy may have a bit delay. Have 3/4 8254 * second bias on the timeount in order to let it age in time. 8255 */ 8256 age_param->timeout = age->timeout * 10 - MLX5_AGING_TIME_DELAY; 8257 /* Set expire time in unit of 0.1 sec. */ 8258 age_param->port_id = dev->data->port_id; 8259 age_param->expire = age_param->timeout + 8260 rte_rdtsc() / (rte_get_tsc_hz() / 10); 8261 rte_atomic16_set(&age_param->state, AGE_CANDIDATE); 8262 return counter; 8263 } 8264 /** 8265 * Add Tx queue matcher 8266 * 8267 * @param[in] dev 8268 * Pointer to the dev struct. 8269 * @param[in, out] matcher 8270 * Flow matcher. 8271 * @param[in, out] key 8272 * Flow matcher value. 8273 * @param[in] item 8274 * Flow pattern to translate. 8275 * @param[in] inner 8276 * Item is inner pattern. 8277 */ 8278 static void 8279 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev, 8280 void *matcher, void *key, 8281 const struct rte_flow_item *item) 8282 { 8283 const struct mlx5_rte_flow_item_tx_queue *queue_m; 8284 const struct mlx5_rte_flow_item_tx_queue *queue_v; 8285 void *misc_m = 8286 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 8287 void *misc_v = 8288 MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 8289 struct mlx5_txq_ctrl *txq; 8290 uint32_t queue; 8291 8292 8293 queue_m = (const void *)item->mask; 8294 if (!queue_m) 8295 return; 8296 queue_v = (const void *)item->spec; 8297 if (!queue_v) 8298 return; 8299 txq = mlx5_txq_get(dev, queue_v->queue); 8300 if (!txq) 8301 return; 8302 queue = txq->obj->sq->id; 8303 MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue); 8304 MLX5_SET(fte_match_set_misc, misc_v, source_sqn, 8305 queue & queue_m->queue); 8306 mlx5_txq_release(dev, queue_v->queue); 8307 } 8308 8309 /** 8310 * Set the hash fields according to the @p flow information. 8311 * 8312 * @param[in] dev_flow 8313 * Pointer to the mlx5_flow. 8314 * @param[in] rss_desc 8315 * Pointer to the mlx5_flow_rss_desc. 8316 */ 8317 static void 8318 flow_dv_hashfields_set(struct mlx5_flow *dev_flow, 8319 struct mlx5_flow_rss_desc *rss_desc) 8320 { 8321 uint64_t items = dev_flow->handle->layers; 8322 int rss_inner = 0; 8323 uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types); 8324 8325 dev_flow->hash_fields = 0; 8326 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 8327 if (rss_desc->level >= 2) { 8328 dev_flow->hash_fields |= IBV_RX_HASH_INNER; 8329 rss_inner = 1; 8330 } 8331 #endif 8332 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) || 8333 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) { 8334 if (rss_types & MLX5_IPV4_LAYER_TYPES) { 8335 if (rss_types & ETH_RSS_L3_SRC_ONLY) 8336 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4; 8337 else if (rss_types & ETH_RSS_L3_DST_ONLY) 8338 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4; 8339 else 8340 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH; 8341 } 8342 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) || 8343 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) { 8344 if (rss_types & MLX5_IPV6_LAYER_TYPES) { 8345 if (rss_types & ETH_RSS_L3_SRC_ONLY) 8346 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6; 8347 else if (rss_types & ETH_RSS_L3_DST_ONLY) 8348 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6; 8349 else 8350 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH; 8351 } 8352 } 8353 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) || 8354 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) { 8355 if (rss_types & ETH_RSS_UDP) { 8356 if (rss_types & ETH_RSS_L4_SRC_ONLY) 8357 dev_flow->hash_fields |= 8358 IBV_RX_HASH_SRC_PORT_UDP; 8359 else if (rss_types & ETH_RSS_L4_DST_ONLY) 8360 dev_flow->hash_fields |= 8361 IBV_RX_HASH_DST_PORT_UDP; 8362 else 8363 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH; 8364 } 8365 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) || 8366 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) { 8367 if (rss_types & ETH_RSS_TCP) { 8368 if (rss_types & ETH_RSS_L4_SRC_ONLY) 8369 dev_flow->hash_fields |= 8370 IBV_RX_HASH_SRC_PORT_TCP; 8371 else if (rss_types & ETH_RSS_L4_DST_ONLY) 8372 dev_flow->hash_fields |= 8373 IBV_RX_HASH_DST_PORT_TCP; 8374 else 8375 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH; 8376 } 8377 } 8378 } 8379 8380 /** 8381 * Create an Rx Hash queue. 8382 * 8383 * @param dev 8384 * Pointer to Ethernet device. 8385 * @param[in] dev_flow 8386 * Pointer to the mlx5_flow. 8387 * @param[in] rss_desc 8388 * Pointer to the mlx5_flow_rss_desc. 8389 * @param[out] hrxq_idx 8390 * Hash Rx queue index. 8391 * 8392 * @return 8393 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. 8394 */ 8395 static struct mlx5_hrxq * 8396 flow_dv_handle_rx_queue(struct rte_eth_dev *dev, 8397 struct mlx5_flow *dev_flow, 8398 struct mlx5_flow_rss_desc *rss_desc, 8399 uint32_t *hrxq_idx) 8400 { 8401 struct mlx5_priv *priv = dev->data->dev_private; 8402 struct mlx5_flow_handle *dh = dev_flow->handle; 8403 struct mlx5_hrxq *hrxq; 8404 8405 MLX5_ASSERT(rss_desc->queue_num); 8406 *hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, 8407 MLX5_RSS_HASH_KEY_LEN, 8408 dev_flow->hash_fields, 8409 rss_desc->queue, 8410 rss_desc->queue_num); 8411 if (!*hrxq_idx) { 8412 *hrxq_idx = mlx5_hrxq_new 8413 (dev, rss_desc->key, 8414 MLX5_RSS_HASH_KEY_LEN, 8415 dev_flow->hash_fields, 8416 rss_desc->queue, 8417 rss_desc->queue_num, 8418 !!(dh->layers & 8419 MLX5_FLOW_LAYER_TUNNEL)); 8420 if (!*hrxq_idx) 8421 return NULL; 8422 } 8423 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], 8424 *hrxq_idx); 8425 return hrxq; 8426 } 8427 8428 /** 8429 * Find existing sample resource or create and register a new one. 8430 * 8431 * @param[in, out] dev 8432 * Pointer to rte_eth_dev structure. 8433 * @param[in] attr 8434 * Attributes of flow that includes this item. 8435 * @param[in] resource 8436 * Pointer to sample resource. 8437 * @parm[in, out] dev_flow 8438 * Pointer to the dev_flow. 8439 * @param[in, out] sample_dv_actions 8440 * Pointer to sample actions list. 8441 * @param[out] error 8442 * pointer to error structure. 8443 * 8444 * @return 8445 * 0 on success otherwise -errno and errno is set. 8446 */ 8447 static int 8448 flow_dv_sample_resource_register(struct rte_eth_dev *dev, 8449 const struct rte_flow_attr *attr, 8450 struct mlx5_flow_dv_sample_resource *resource, 8451 struct mlx5_flow *dev_flow, 8452 void **sample_dv_actions, 8453 struct rte_flow_error *error) 8454 { 8455 struct mlx5_flow_dv_sample_resource *cache_resource; 8456 struct mlx5dv_dr_flow_sampler_attr sampler_attr; 8457 struct mlx5_priv *priv = dev->data->dev_private; 8458 struct mlx5_dev_ctx_shared *sh = priv->sh; 8459 struct mlx5_flow_tbl_resource *tbl; 8460 uint32_t idx = 0; 8461 const uint32_t next_ft_step = 1; 8462 uint32_t next_ft_id = resource->ft_id + next_ft_step; 8463 8464 /* Lookup a matching resource from cache. */ 8465 ILIST_FOREACH(sh->ipool[MLX5_IPOOL_SAMPLE], sh->sample_action_list, 8466 idx, cache_resource, next) { 8467 if (resource->ratio == cache_resource->ratio && 8468 resource->ft_type == cache_resource->ft_type && 8469 resource->ft_id == cache_resource->ft_id && 8470 resource->set_action == cache_resource->set_action && 8471 !memcmp((void *)&resource->sample_act, 8472 (void *)&cache_resource->sample_act, 8473 sizeof(struct mlx5_flow_sub_actions_list))) { 8474 DRV_LOG(DEBUG, "sample resource %p: refcnt %d++", 8475 (void *)cache_resource, 8476 __atomic_load_n(&cache_resource->refcnt, 8477 __ATOMIC_RELAXED)); 8478 __atomic_fetch_add(&cache_resource->refcnt, 1, 8479 __ATOMIC_RELAXED); 8480 dev_flow->handle->dvh.rix_sample = idx; 8481 dev_flow->dv.sample_res = cache_resource; 8482 return 0; 8483 } 8484 } 8485 /* Register new sample resource. */ 8486 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], 8487 &dev_flow->handle->dvh.rix_sample); 8488 if (!cache_resource) 8489 return rte_flow_error_set(error, ENOMEM, 8490 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8491 NULL, 8492 "cannot allocate resource memory"); 8493 *cache_resource = *resource; 8494 /* Create normal path table level */ 8495 tbl = flow_dv_tbl_resource_get(dev, next_ft_id, 8496 attr->egress, attr->transfer, error); 8497 if (!tbl) { 8498 rte_flow_error_set(error, ENOMEM, 8499 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8500 NULL, 8501 "fail to create normal path table " 8502 "for sample"); 8503 goto error; 8504 } 8505 cache_resource->normal_path_tbl = tbl; 8506 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { 8507 cache_resource->default_miss = 8508 mlx5_glue->dr_create_flow_action_default_miss(); 8509 if (!cache_resource->default_miss) { 8510 rte_flow_error_set(error, ENOMEM, 8511 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8512 NULL, 8513 "cannot create default miss " 8514 "action"); 8515 goto error; 8516 } 8517 sample_dv_actions[resource->sample_act.actions_num++] = 8518 cache_resource->default_miss; 8519 } 8520 /* Create a DR sample action */ 8521 sampler_attr.sample_ratio = cache_resource->ratio; 8522 sampler_attr.default_next_table = tbl->obj; 8523 sampler_attr.num_sample_actions = resource->sample_act.actions_num; 8524 sampler_attr.sample_actions = (struct mlx5dv_dr_action **) 8525 &sample_dv_actions[0]; 8526 sampler_attr.action = cache_resource->set_action; 8527 cache_resource->verbs_action = 8528 mlx5_glue->dr_create_flow_action_sampler(&sampler_attr); 8529 if (!cache_resource->verbs_action) { 8530 rte_flow_error_set(error, ENOMEM, 8531 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8532 NULL, "cannot create sample action"); 8533 goto error; 8534 } 8535 __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); 8536 ILIST_INSERT(sh->ipool[MLX5_IPOOL_SAMPLE], &sh->sample_action_list, 8537 dev_flow->handle->dvh.rix_sample, cache_resource, 8538 next); 8539 dev_flow->dv.sample_res = cache_resource; 8540 DRV_LOG(DEBUG, "new sample resource %p: refcnt %d++", 8541 (void *)cache_resource, 8542 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); 8543 return 0; 8544 error: 8545 if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { 8546 if (cache_resource->default_miss) 8547 claim_zero(mlx5_glue->destroy_flow_action 8548 (cache_resource->default_miss)); 8549 } else { 8550 if (cache_resource->sample_idx.rix_hrxq && 8551 !mlx5_hrxq_release(dev, 8552 cache_resource->sample_idx.rix_hrxq)) 8553 cache_resource->sample_idx.rix_hrxq = 0; 8554 if (cache_resource->sample_idx.rix_tag && 8555 !flow_dv_tag_release(dev, 8556 cache_resource->sample_idx.rix_tag)) 8557 cache_resource->sample_idx.rix_tag = 0; 8558 if (cache_resource->sample_idx.cnt) { 8559 flow_dv_counter_release(dev, 8560 cache_resource->sample_idx.cnt); 8561 cache_resource->sample_idx.cnt = 0; 8562 } 8563 } 8564 if (cache_resource->normal_path_tbl) 8565 flow_dv_tbl_resource_release(dev, 8566 cache_resource->normal_path_tbl); 8567 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], 8568 dev_flow->handle->dvh.rix_sample); 8569 dev_flow->handle->dvh.rix_sample = 0; 8570 return -rte_errno; 8571 } 8572 8573 /** 8574 * Find existing destination array resource or create and register a new one. 8575 * 8576 * @param[in, out] dev 8577 * Pointer to rte_eth_dev structure. 8578 * @param[in] attr 8579 * Attributes of flow that includes this item. 8580 * @param[in] resource 8581 * Pointer to destination array resource. 8582 * @parm[in, out] dev_flow 8583 * Pointer to the dev_flow. 8584 * @param[out] error 8585 * pointer to error structure. 8586 * 8587 * @return 8588 * 0 on success otherwise -errno and errno is set. 8589 */ 8590 static int 8591 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev, 8592 const struct rte_flow_attr *attr, 8593 struct mlx5_flow_dv_dest_array_resource *resource, 8594 struct mlx5_flow *dev_flow, 8595 struct rte_flow_error *error) 8596 { 8597 struct mlx5_flow_dv_dest_array_resource *cache_resource; 8598 struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 }; 8599 struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM]; 8600 struct mlx5_priv *priv = dev->data->dev_private; 8601 struct mlx5_dev_ctx_shared *sh = priv->sh; 8602 struct mlx5_flow_sub_actions_list *sample_act; 8603 struct mlx5dv_dr_domain *domain; 8604 uint32_t idx = 0; 8605 8606 /* Lookup a matching resource from cache. */ 8607 ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DEST_ARRAY], 8608 sh->dest_array_list, 8609 idx, cache_resource, next) { 8610 if (resource->num_of_dest == cache_resource->num_of_dest && 8611 resource->ft_type == cache_resource->ft_type && 8612 !memcmp((void *)cache_resource->sample_act, 8613 (void *)resource->sample_act, 8614 (resource->num_of_dest * 8615 sizeof(struct mlx5_flow_sub_actions_list)))) { 8616 DRV_LOG(DEBUG, "dest array resource %p: refcnt %d++", 8617 (void *)cache_resource, 8618 __atomic_load_n(&cache_resource->refcnt, 8619 __ATOMIC_RELAXED)); 8620 __atomic_fetch_add(&cache_resource->refcnt, 1, 8621 __ATOMIC_RELAXED); 8622 dev_flow->handle->dvh.rix_dest_array = idx; 8623 dev_flow->dv.dest_array_res = cache_resource; 8624 return 0; 8625 } 8626 } 8627 /* Register new destination array resource. */ 8628 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY], 8629 &dev_flow->handle->dvh.rix_dest_array); 8630 if (!cache_resource) 8631 return rte_flow_error_set(error, ENOMEM, 8632 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8633 NULL, 8634 "cannot allocate resource memory"); 8635 *cache_resource = *resource; 8636 if (attr->transfer) 8637 domain = sh->fdb_domain; 8638 else if (attr->ingress) 8639 domain = sh->rx_domain; 8640 else 8641 domain = sh->tx_domain; 8642 for (idx = 0; idx < resource->num_of_dest; idx++) { 8643 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *) 8644 mlx5_malloc(MLX5_MEM_ZERO, 8645 sizeof(struct mlx5dv_dr_action_dest_attr), 8646 0, SOCKET_ID_ANY); 8647 if (!dest_attr[idx]) { 8648 rte_flow_error_set(error, ENOMEM, 8649 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8650 NULL, 8651 "cannot allocate resource memory"); 8652 goto error; 8653 } 8654 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST; 8655 sample_act = &resource->sample_act[idx]; 8656 if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) { 8657 dest_attr[idx]->dest = sample_act->dr_queue_action; 8658 } else if (sample_act->action_flags == 8659 (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) { 8660 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT; 8661 dest_attr[idx]->dest_reformat = &dest_reformat[idx]; 8662 dest_attr[idx]->dest_reformat->reformat = 8663 sample_act->dr_encap_action; 8664 dest_attr[idx]->dest_reformat->dest = 8665 sample_act->dr_port_id_action; 8666 } else if (sample_act->action_flags == 8667 MLX5_FLOW_ACTION_PORT_ID) { 8668 dest_attr[idx]->dest = sample_act->dr_port_id_action; 8669 } 8670 } 8671 /* create a dest array actioin */ 8672 cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array 8673 (domain, 8674 cache_resource->num_of_dest, 8675 dest_attr); 8676 if (!cache_resource->action) { 8677 rte_flow_error_set(error, ENOMEM, 8678 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8679 NULL, 8680 "cannot create destination array action"); 8681 goto error; 8682 } 8683 __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); 8684 ILIST_INSERT(sh->ipool[MLX5_IPOOL_DEST_ARRAY], 8685 &sh->dest_array_list, 8686 dev_flow->handle->dvh.rix_dest_array, cache_resource, 8687 next); 8688 dev_flow->dv.dest_array_res = cache_resource; 8689 DRV_LOG(DEBUG, "new destination array resource %p: refcnt %d++", 8690 (void *)cache_resource, 8691 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); 8692 for (idx = 0; idx < resource->num_of_dest; idx++) 8693 mlx5_free(dest_attr[idx]); 8694 return 0; 8695 error: 8696 for (idx = 0; idx < resource->num_of_dest; idx++) { 8697 struct mlx5_flow_sub_actions_idx *act_res = 8698 &cache_resource->sample_idx[idx]; 8699 if (act_res->rix_hrxq && 8700 !mlx5_hrxq_release(dev, 8701 act_res->rix_hrxq)) 8702 act_res->rix_hrxq = 0; 8703 if (act_res->rix_encap_decap && 8704 !flow_dv_encap_decap_resource_release(dev, 8705 act_res->rix_encap_decap)) 8706 act_res->rix_encap_decap = 0; 8707 if (act_res->rix_port_id_action && 8708 !flow_dv_port_id_action_resource_release(dev, 8709 act_res->rix_port_id_action)) 8710 act_res->rix_port_id_action = 0; 8711 if (dest_attr[idx]) 8712 mlx5_free(dest_attr[idx]); 8713 } 8714 8715 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], 8716 dev_flow->handle->dvh.rix_dest_array); 8717 dev_flow->handle->dvh.rix_dest_array = 0; 8718 return -rte_errno; 8719 } 8720 8721 /** 8722 * Convert Sample action to DV specification. 8723 * 8724 * @param[in] dev 8725 * Pointer to rte_eth_dev structure. 8726 * @param[in] action 8727 * Pointer to action structure. 8728 * @param[in, out] dev_flow 8729 * Pointer to the mlx5_flow. 8730 * @param[in] attr 8731 * Pointer to the flow attributes. 8732 * @param[in, out] num_of_dest 8733 * Pointer to the num of destination. 8734 * @param[in, out] sample_actions 8735 * Pointer to sample actions list. 8736 * @param[in, out] res 8737 * Pointer to sample resource. 8738 * @param[out] error 8739 * Pointer to the error structure. 8740 * 8741 * @return 8742 * 0 on success, a negative errno value otherwise and rte_errno is set. 8743 */ 8744 static int 8745 flow_dv_translate_action_sample(struct rte_eth_dev *dev, 8746 const struct rte_flow_action *action, 8747 struct mlx5_flow *dev_flow, 8748 const struct rte_flow_attr *attr, 8749 uint32_t *num_of_dest, 8750 void **sample_actions, 8751 struct mlx5_flow_dv_sample_resource *res, 8752 struct rte_flow_error *error) 8753 { 8754 struct mlx5_priv *priv = dev->data->dev_private; 8755 const struct rte_flow_action_sample *sample_action; 8756 const struct rte_flow_action *sub_actions; 8757 const struct rte_flow_action_queue *queue; 8758 struct mlx5_flow_sub_actions_list *sample_act; 8759 struct mlx5_flow_sub_actions_idx *sample_idx; 8760 struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) 8761 priv->rss_desc) 8762 [!!priv->flow_nested_idx]; 8763 uint64_t action_flags = 0; 8764 8765 sample_act = &res->sample_act; 8766 sample_idx = &res->sample_idx; 8767 sample_action = (const struct rte_flow_action_sample *)action->conf; 8768 res->ratio = sample_action->ratio; 8769 sub_actions = sample_action->actions; 8770 for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) { 8771 int type = sub_actions->type; 8772 uint32_t pre_rix = 0; 8773 void *pre_r; 8774 switch (type) { 8775 case RTE_FLOW_ACTION_TYPE_QUEUE: 8776 { 8777 struct mlx5_hrxq *hrxq; 8778 uint32_t hrxq_idx; 8779 8780 queue = sub_actions->conf; 8781 rss_desc->queue_num = 1; 8782 rss_desc->queue[0] = queue->index; 8783 hrxq = flow_dv_handle_rx_queue(dev, dev_flow, 8784 rss_desc, &hrxq_idx); 8785 if (!hrxq) 8786 return rte_flow_error_set 8787 (error, rte_errno, 8788 RTE_FLOW_ERROR_TYPE_ACTION, 8789 NULL, 8790 "cannot create fate queue"); 8791 sample_act->dr_queue_action = hrxq->action; 8792 sample_idx->rix_hrxq = hrxq_idx; 8793 sample_actions[sample_act->actions_num++] = 8794 hrxq->action; 8795 (*num_of_dest)++; 8796 action_flags |= MLX5_FLOW_ACTION_QUEUE; 8797 if (action_flags & MLX5_FLOW_ACTION_MARK) 8798 dev_flow->handle->rix_hrxq = hrxq_idx; 8799 dev_flow->handle->fate_action = 8800 MLX5_FLOW_FATE_QUEUE; 8801 break; 8802 } 8803 case RTE_FLOW_ACTION_TYPE_MARK: 8804 { 8805 uint32_t tag_be = mlx5_flow_mark_set 8806 (((const struct rte_flow_action_mark *) 8807 (sub_actions->conf))->id); 8808 8809 dev_flow->handle->mark = 1; 8810 pre_rix = dev_flow->handle->dvh.rix_tag; 8811 /* Save the mark resource before sample */ 8812 pre_r = dev_flow->dv.tag_resource; 8813 if (flow_dv_tag_resource_register(dev, tag_be, 8814 dev_flow, error)) 8815 return -rte_errno; 8816 MLX5_ASSERT(dev_flow->dv.tag_resource); 8817 sample_act->dr_tag_action = 8818 dev_flow->dv.tag_resource->action; 8819 sample_idx->rix_tag = 8820 dev_flow->handle->dvh.rix_tag; 8821 sample_actions[sample_act->actions_num++] = 8822 sample_act->dr_tag_action; 8823 /* Recover the mark resource after sample */ 8824 dev_flow->dv.tag_resource = pre_r; 8825 dev_flow->handle->dvh.rix_tag = pre_rix; 8826 action_flags |= MLX5_FLOW_ACTION_MARK; 8827 break; 8828 } 8829 case RTE_FLOW_ACTION_TYPE_COUNT: 8830 { 8831 uint32_t counter; 8832 8833 counter = flow_dv_translate_create_counter(dev, 8834 dev_flow, sub_actions->conf, 0); 8835 if (!counter) 8836 return rte_flow_error_set 8837 (error, rte_errno, 8838 RTE_FLOW_ERROR_TYPE_ACTION, 8839 NULL, 8840 "cannot create counter" 8841 " object."); 8842 sample_idx->cnt = counter; 8843 sample_act->dr_cnt_action = 8844 (flow_dv_counter_get_by_idx(dev, 8845 counter, NULL))->action; 8846 sample_actions[sample_act->actions_num++] = 8847 sample_act->dr_cnt_action; 8848 action_flags |= MLX5_FLOW_ACTION_COUNT; 8849 break; 8850 } 8851 case RTE_FLOW_ACTION_TYPE_PORT_ID: 8852 { 8853 struct mlx5_flow_dv_port_id_action_resource 8854 port_id_resource; 8855 uint32_t port_id = 0; 8856 8857 memset(&port_id_resource, 0, sizeof(port_id_resource)); 8858 /* Save the port id resource before sample */ 8859 pre_rix = dev_flow->handle->rix_port_id_action; 8860 pre_r = dev_flow->dv.port_id_action; 8861 if (flow_dv_translate_action_port_id(dev, sub_actions, 8862 &port_id, error)) 8863 return -rte_errno; 8864 port_id_resource.port_id = port_id; 8865 if (flow_dv_port_id_action_resource_register 8866 (dev, &port_id_resource, dev_flow, error)) 8867 return -rte_errno; 8868 sample_act->dr_port_id_action = 8869 dev_flow->dv.port_id_action->action; 8870 sample_idx->rix_port_id_action = 8871 dev_flow->handle->rix_port_id_action; 8872 sample_actions[sample_act->actions_num++] = 8873 sample_act->dr_port_id_action; 8874 /* Recover the port id resource after sample */ 8875 dev_flow->dv.port_id_action = pre_r; 8876 dev_flow->handle->rix_port_id_action = pre_rix; 8877 (*num_of_dest)++; 8878 action_flags |= MLX5_FLOW_ACTION_PORT_ID; 8879 break; 8880 } 8881 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 8882 /* Save the encap resource before sample */ 8883 pre_rix = dev_flow->handle->dvh.rix_encap_decap; 8884 pre_r = dev_flow->dv.encap_decap; 8885 if (flow_dv_create_action_l2_encap(dev, sub_actions, 8886 dev_flow, 8887 attr->transfer, 8888 error)) 8889 return -rte_errno; 8890 sample_act->dr_encap_action = 8891 dev_flow->dv.encap_decap->action; 8892 sample_idx->rix_encap_decap = 8893 dev_flow->handle->dvh.rix_encap_decap; 8894 sample_actions[sample_act->actions_num++] = 8895 sample_act->dr_encap_action; 8896 /* Recover the encap resource after sample */ 8897 dev_flow->dv.encap_decap = pre_r; 8898 dev_flow->handle->dvh.rix_encap_decap = pre_rix; 8899 action_flags |= MLX5_FLOW_ACTION_ENCAP; 8900 break; 8901 default: 8902 return rte_flow_error_set(error, EINVAL, 8903 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8904 NULL, 8905 "Not support for sampler action"); 8906 } 8907 } 8908 sample_act->action_flags = action_flags; 8909 res->ft_id = dev_flow->dv.group; 8910 if (attr->transfer) { 8911 union { 8912 uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)]; 8913 uint64_t set_action; 8914 } action_ctx = { .set_action = 0 }; 8915 8916 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; 8917 MLX5_SET(set_action_in, action_ctx.action_in, action_type, 8918 MLX5_MODIFICATION_TYPE_SET); 8919 MLX5_SET(set_action_in, action_ctx.action_in, field, 8920 MLX5_MODI_META_REG_C_0); 8921 MLX5_SET(set_action_in, action_ctx.action_in, data, 8922 priv->vport_meta_tag); 8923 res->set_action = action_ctx.set_action; 8924 } else if (attr->ingress) { 8925 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX; 8926 } 8927 return 0; 8928 } 8929 8930 /** 8931 * Convert Sample action to DV specification. 8932 * 8933 * @param[in] dev 8934 * Pointer to rte_eth_dev structure. 8935 * @param[in, out] dev_flow 8936 * Pointer to the mlx5_flow. 8937 * @param[in] attr 8938 * Pointer to the flow attributes. 8939 * @param[in] num_of_dest 8940 * The num of destination. 8941 * @param[in, out] res 8942 * Pointer to sample resource. 8943 * @param[in, out] mdest_res 8944 * Pointer to destination array resource. 8945 * @param[in] sample_actions 8946 * Pointer to sample path actions list. 8947 * @param[in] action_flags 8948 * Holds the actions detected until now. 8949 * @param[out] error 8950 * Pointer to the error structure. 8951 * 8952 * @return 8953 * 0 on success, a negative errno value otherwise and rte_errno is set. 8954 */ 8955 static int 8956 flow_dv_create_action_sample(struct rte_eth_dev *dev, 8957 struct mlx5_flow *dev_flow, 8958 const struct rte_flow_attr *attr, 8959 uint32_t num_of_dest, 8960 struct mlx5_flow_dv_sample_resource *res, 8961 struct mlx5_flow_dv_dest_array_resource *mdest_res, 8962 void **sample_actions, 8963 uint64_t action_flags, 8964 struct rte_flow_error *error) 8965 { 8966 struct mlx5_priv *priv = dev->data->dev_private; 8967 /* update normal path action resource into last index of array */ 8968 uint32_t dest_index = MLX5_MAX_DEST_NUM - 1; 8969 struct mlx5_flow_sub_actions_list *sample_act = 8970 &mdest_res->sample_act[dest_index]; 8971 struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) 8972 priv->rss_desc) 8973 [!!priv->flow_nested_idx]; 8974 uint32_t normal_idx = 0; 8975 struct mlx5_hrxq *hrxq; 8976 uint32_t hrxq_idx; 8977 8978 if (num_of_dest > 1) { 8979 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) { 8980 /* Handle QP action for mirroring */ 8981 hrxq = flow_dv_handle_rx_queue(dev, dev_flow, 8982 rss_desc, &hrxq_idx); 8983 if (!hrxq) 8984 return rte_flow_error_set 8985 (error, rte_errno, 8986 RTE_FLOW_ERROR_TYPE_ACTION, 8987 NULL, 8988 "cannot create rx queue"); 8989 normal_idx++; 8990 mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx; 8991 sample_act->dr_queue_action = hrxq->action; 8992 if (action_flags & MLX5_FLOW_ACTION_MARK) 8993 dev_flow->handle->rix_hrxq = hrxq_idx; 8994 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; 8995 } 8996 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) { 8997 normal_idx++; 8998 mdest_res->sample_idx[dest_index].rix_encap_decap = 8999 dev_flow->handle->dvh.rix_encap_decap; 9000 sample_act->dr_encap_action = 9001 dev_flow->dv.encap_decap->action; 9002 } 9003 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) { 9004 normal_idx++; 9005 mdest_res->sample_idx[dest_index].rix_port_id_action = 9006 dev_flow->handle->rix_port_id_action; 9007 sample_act->dr_port_id_action = 9008 dev_flow->dv.port_id_action->action; 9009 } 9010 sample_act->actions_num = normal_idx; 9011 /* update sample action resource into first index of array */ 9012 mdest_res->ft_type = res->ft_type; 9013 memcpy(&mdest_res->sample_idx[0], &res->sample_idx, 9014 sizeof(struct mlx5_flow_sub_actions_idx)); 9015 memcpy(&mdest_res->sample_act[0], &res->sample_act, 9016 sizeof(struct mlx5_flow_sub_actions_list)); 9017 mdest_res->num_of_dest = num_of_dest; 9018 if (flow_dv_dest_array_resource_register(dev, attr, mdest_res, 9019 dev_flow, error)) 9020 return rte_flow_error_set(error, EINVAL, 9021 RTE_FLOW_ERROR_TYPE_ACTION, 9022 NULL, "can't create sample " 9023 "action"); 9024 } else { 9025 if (flow_dv_sample_resource_register(dev, attr, res, dev_flow, 9026 sample_actions, error)) 9027 return rte_flow_error_set(error, EINVAL, 9028 RTE_FLOW_ERROR_TYPE_ACTION, 9029 NULL, 9030 "can't create sample action"); 9031 } 9032 return 0; 9033 } 9034 9035 /** 9036 * Fill the flow with DV spec, lock free 9037 * (mutex should be acquired by caller). 9038 * 9039 * @param[in] dev 9040 * Pointer to rte_eth_dev structure. 9041 * @param[in, out] dev_flow 9042 * Pointer to the sub flow. 9043 * @param[in] attr 9044 * Pointer to the flow attributes. 9045 * @param[in] items 9046 * Pointer to the list of items. 9047 * @param[in] actions 9048 * Pointer to the list of actions. 9049 * @param[out] error 9050 * Pointer to the error structure. 9051 * 9052 * @return 9053 * 0 on success, a negative errno value otherwise and rte_errno is set. 9054 */ 9055 static int 9056 __flow_dv_translate(struct rte_eth_dev *dev, 9057 struct mlx5_flow *dev_flow, 9058 const struct rte_flow_attr *attr, 9059 const struct rte_flow_item items[], 9060 const struct rte_flow_action actions[], 9061 struct rte_flow_error *error) 9062 { 9063 struct mlx5_priv *priv = dev->data->dev_private; 9064 struct mlx5_dev_config *dev_conf = &priv->config; 9065 struct rte_flow *flow = dev_flow->flow; 9066 struct mlx5_flow_handle *handle = dev_flow->handle; 9067 struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) 9068 priv->rss_desc) 9069 [!!priv->flow_nested_idx]; 9070 uint64_t item_flags = 0; 9071 uint64_t last_item = 0; 9072 uint64_t action_flags = 0; 9073 uint64_t priority = attr->priority; 9074 struct mlx5_flow_dv_matcher matcher = { 9075 .mask = { 9076 .size = sizeof(matcher.mask.buf) - 9077 MLX5_ST_SZ_BYTES(fte_match_set_misc4), 9078 }, 9079 }; 9080 int actions_n = 0; 9081 bool actions_end = false; 9082 union { 9083 struct mlx5_flow_dv_modify_hdr_resource res; 9084 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) + 9085 sizeof(struct mlx5_modification_cmd) * 9086 (MLX5_MAX_MODIFY_NUM + 1)]; 9087 } mhdr_dummy; 9088 struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res; 9089 const struct rte_flow_action_count *count = NULL; 9090 const struct rte_flow_action_age *age = NULL; 9091 union flow_dv_attr flow_attr = { .attr = 0 }; 9092 uint32_t tag_be; 9093 union mlx5_flow_tbl_key tbl_key; 9094 uint32_t modify_action_position = UINT32_MAX; 9095 void *match_mask = matcher.mask.buf; 9096 void *match_value = dev_flow->dv.value.buf; 9097 uint8_t next_protocol = 0xff; 9098 struct rte_vlan_hdr vlan = { 0 }; 9099 struct mlx5_flow_dv_dest_array_resource mdest_res; 9100 struct mlx5_flow_dv_sample_resource sample_res; 9101 void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0}; 9102 struct mlx5_flow_sub_actions_list *sample_act; 9103 uint32_t sample_act_pos = UINT32_MAX; 9104 uint32_t num_of_dest = 0; 9105 int tmp_actions_n = 0; 9106 uint32_t table; 9107 int ret = 0; 9108 9109 memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource)); 9110 memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource)); 9111 mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : 9112 MLX5DV_FLOW_TABLE_TYPE_NIC_RX; 9113 /* update normal path action resource into last index of array */ 9114 sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1]; 9115 ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group, 9116 !!priv->fdb_def_rule, &table, error); 9117 if (ret) 9118 return ret; 9119 dev_flow->dv.group = table; 9120 if (attr->transfer) 9121 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; 9122 if (priority == MLX5_FLOW_PRIO_RSVD) 9123 priority = dev_conf->flow_prio - 1; 9124 /* number of actions must be set to 0 in case of dirty stack. */ 9125 mhdr_res->actions_num = 0; 9126 for (; !actions_end ; actions++) { 9127 const struct rte_flow_action_queue *queue; 9128 const struct rte_flow_action_rss *rss; 9129 const struct rte_flow_action *action = actions; 9130 const uint8_t *rss_key; 9131 const struct rte_flow_action_meter *mtr; 9132 struct mlx5_flow_tbl_resource *tbl; 9133 uint32_t port_id = 0; 9134 struct mlx5_flow_dv_port_id_action_resource port_id_resource; 9135 int action_type = actions->type; 9136 const struct rte_flow_action *found_action = NULL; 9137 struct mlx5_flow_meter *fm = NULL; 9138 uint32_t jump_group = 0; 9139 9140 if (!mlx5_flow_os_action_supported(action_type)) 9141 return rte_flow_error_set(error, ENOTSUP, 9142 RTE_FLOW_ERROR_TYPE_ACTION, 9143 actions, 9144 "action not supported"); 9145 switch (action_type) { 9146 case RTE_FLOW_ACTION_TYPE_VOID: 9147 break; 9148 case RTE_FLOW_ACTION_TYPE_PORT_ID: 9149 if (flow_dv_translate_action_port_id(dev, action, 9150 &port_id, error)) 9151 return -rte_errno; 9152 port_id_resource.port_id = port_id; 9153 MLX5_ASSERT(!handle->rix_port_id_action); 9154 if (flow_dv_port_id_action_resource_register 9155 (dev, &port_id_resource, dev_flow, error)) 9156 return -rte_errno; 9157 dev_flow->dv.actions[actions_n++] = 9158 dev_flow->dv.port_id_action->action; 9159 action_flags |= MLX5_FLOW_ACTION_PORT_ID; 9160 dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID; 9161 sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID; 9162 num_of_dest++; 9163 break; 9164 case RTE_FLOW_ACTION_TYPE_FLAG: 9165 action_flags |= MLX5_FLOW_ACTION_FLAG; 9166 dev_flow->handle->mark = 1; 9167 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 9168 struct rte_flow_action_mark mark = { 9169 .id = MLX5_FLOW_MARK_DEFAULT, 9170 }; 9171 9172 if (flow_dv_convert_action_mark(dev, &mark, 9173 mhdr_res, 9174 error)) 9175 return -rte_errno; 9176 action_flags |= MLX5_FLOW_ACTION_MARK_EXT; 9177 break; 9178 } 9179 tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT); 9180 /* 9181 * Only one FLAG or MARK is supported per device flow 9182 * right now. So the pointer to the tag resource must be 9183 * zero before the register process. 9184 */ 9185 MLX5_ASSERT(!handle->dvh.rix_tag); 9186 if (flow_dv_tag_resource_register(dev, tag_be, 9187 dev_flow, error)) 9188 return -rte_errno; 9189 MLX5_ASSERT(dev_flow->dv.tag_resource); 9190 dev_flow->dv.actions[actions_n++] = 9191 dev_flow->dv.tag_resource->action; 9192 break; 9193 case RTE_FLOW_ACTION_TYPE_MARK: 9194 action_flags |= MLX5_FLOW_ACTION_MARK; 9195 dev_flow->handle->mark = 1; 9196 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 9197 const struct rte_flow_action_mark *mark = 9198 (const struct rte_flow_action_mark *) 9199 actions->conf; 9200 9201 if (flow_dv_convert_action_mark(dev, mark, 9202 mhdr_res, 9203 error)) 9204 return -rte_errno; 9205 action_flags |= MLX5_FLOW_ACTION_MARK_EXT; 9206 break; 9207 } 9208 /* Fall-through */ 9209 case MLX5_RTE_FLOW_ACTION_TYPE_MARK: 9210 /* Legacy (non-extensive) MARK action. */ 9211 tag_be = mlx5_flow_mark_set 9212 (((const struct rte_flow_action_mark *) 9213 (actions->conf))->id); 9214 MLX5_ASSERT(!handle->dvh.rix_tag); 9215 if (flow_dv_tag_resource_register(dev, tag_be, 9216 dev_flow, error)) 9217 return -rte_errno; 9218 MLX5_ASSERT(dev_flow->dv.tag_resource); 9219 dev_flow->dv.actions[actions_n++] = 9220 dev_flow->dv.tag_resource->action; 9221 break; 9222 case RTE_FLOW_ACTION_TYPE_SET_META: 9223 if (flow_dv_convert_action_set_meta 9224 (dev, mhdr_res, attr, 9225 (const struct rte_flow_action_set_meta *) 9226 actions->conf, error)) 9227 return -rte_errno; 9228 action_flags |= MLX5_FLOW_ACTION_SET_META; 9229 break; 9230 case RTE_FLOW_ACTION_TYPE_SET_TAG: 9231 if (flow_dv_convert_action_set_tag 9232 (dev, mhdr_res, 9233 (const struct rte_flow_action_set_tag *) 9234 actions->conf, error)) 9235 return -rte_errno; 9236 action_flags |= MLX5_FLOW_ACTION_SET_TAG; 9237 break; 9238 case RTE_FLOW_ACTION_TYPE_DROP: 9239 action_flags |= MLX5_FLOW_ACTION_DROP; 9240 dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP; 9241 break; 9242 case RTE_FLOW_ACTION_TYPE_QUEUE: 9243 queue = actions->conf; 9244 rss_desc->queue_num = 1; 9245 rss_desc->queue[0] = queue->index; 9246 action_flags |= MLX5_FLOW_ACTION_QUEUE; 9247 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; 9248 sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE; 9249 num_of_dest++; 9250 break; 9251 case RTE_FLOW_ACTION_TYPE_RSS: 9252 rss = actions->conf; 9253 memcpy(rss_desc->queue, rss->queue, 9254 rss->queue_num * sizeof(uint16_t)); 9255 rss_desc->queue_num = rss->queue_num; 9256 /* NULL RSS key indicates default RSS key. */ 9257 rss_key = !rss->key ? rss_hash_default_key : rss->key; 9258 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN); 9259 /* 9260 * rss->level and rss.types should be set in advance 9261 * when expanding items for RSS. 9262 */ 9263 action_flags |= MLX5_FLOW_ACTION_RSS; 9264 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; 9265 break; 9266 case RTE_FLOW_ACTION_TYPE_AGE: 9267 case RTE_FLOW_ACTION_TYPE_COUNT: 9268 if (!dev_conf->devx) { 9269 return rte_flow_error_set 9270 (error, ENOTSUP, 9271 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9272 NULL, 9273 "count action not supported"); 9274 } 9275 /* Save information first, will apply later. */ 9276 if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT) 9277 count = action->conf; 9278 else 9279 age = action->conf; 9280 action_flags |= MLX5_FLOW_ACTION_COUNT; 9281 break; 9282 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: 9283 dev_flow->dv.actions[actions_n++] = 9284 priv->sh->pop_vlan_action; 9285 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN; 9286 break; 9287 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 9288 if (!(action_flags & 9289 MLX5_FLOW_ACTION_OF_SET_VLAN_VID)) 9290 flow_dev_get_vlan_info_from_items(items, &vlan); 9291 vlan.eth_proto = rte_be_to_cpu_16 9292 ((((const struct rte_flow_action_of_push_vlan *) 9293 actions->conf)->ethertype)); 9294 found_action = mlx5_flow_find_action 9295 (actions + 1, 9296 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID); 9297 if (found_action) 9298 mlx5_update_vlan_vid_pcp(found_action, &vlan); 9299 found_action = mlx5_flow_find_action 9300 (actions + 1, 9301 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP); 9302 if (found_action) 9303 mlx5_update_vlan_vid_pcp(found_action, &vlan); 9304 if (flow_dv_create_action_push_vlan 9305 (dev, attr, &vlan, dev_flow, error)) 9306 return -rte_errno; 9307 dev_flow->dv.actions[actions_n++] = 9308 dev_flow->dv.push_vlan_res->action; 9309 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN; 9310 break; 9311 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 9312 /* of_vlan_push action handled this action */ 9313 MLX5_ASSERT(action_flags & 9314 MLX5_FLOW_ACTION_OF_PUSH_VLAN); 9315 break; 9316 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 9317 if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) 9318 break; 9319 flow_dev_get_vlan_info_from_items(items, &vlan); 9320 mlx5_update_vlan_vid_pcp(actions, &vlan); 9321 /* If no VLAN push - this is a modify header action */ 9322 if (flow_dv_convert_action_modify_vlan_vid 9323 (mhdr_res, actions, error)) 9324 return -rte_errno; 9325 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID; 9326 break; 9327 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 9328 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 9329 if (flow_dv_create_action_l2_encap(dev, actions, 9330 dev_flow, 9331 attr->transfer, 9332 error)) 9333 return -rte_errno; 9334 dev_flow->dv.actions[actions_n++] = 9335 dev_flow->dv.encap_decap->action; 9336 action_flags |= MLX5_FLOW_ACTION_ENCAP; 9337 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 9338 sample_act->action_flags |= 9339 MLX5_FLOW_ACTION_ENCAP; 9340 break; 9341 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 9342 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 9343 if (flow_dv_create_action_l2_decap(dev, dev_flow, 9344 attr->transfer, 9345 error)) 9346 return -rte_errno; 9347 dev_flow->dv.actions[actions_n++] = 9348 dev_flow->dv.encap_decap->action; 9349 action_flags |= MLX5_FLOW_ACTION_DECAP; 9350 break; 9351 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 9352 /* Handle encap with preceding decap. */ 9353 if (action_flags & MLX5_FLOW_ACTION_DECAP) { 9354 if (flow_dv_create_action_raw_encap 9355 (dev, actions, dev_flow, attr, error)) 9356 return -rte_errno; 9357 dev_flow->dv.actions[actions_n++] = 9358 dev_flow->dv.encap_decap->action; 9359 } else { 9360 /* Handle encap without preceding decap. */ 9361 if (flow_dv_create_action_l2_encap 9362 (dev, actions, dev_flow, attr->transfer, 9363 error)) 9364 return -rte_errno; 9365 dev_flow->dv.actions[actions_n++] = 9366 dev_flow->dv.encap_decap->action; 9367 } 9368 action_flags |= MLX5_FLOW_ACTION_ENCAP; 9369 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) 9370 sample_act->action_flags |= 9371 MLX5_FLOW_ACTION_ENCAP; 9372 break; 9373 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 9374 while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID) 9375 ; 9376 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { 9377 if (flow_dv_create_action_l2_decap 9378 (dev, dev_flow, attr->transfer, error)) 9379 return -rte_errno; 9380 dev_flow->dv.actions[actions_n++] = 9381 dev_flow->dv.encap_decap->action; 9382 } 9383 /* If decap is followed by encap, handle it at encap. */ 9384 action_flags |= MLX5_FLOW_ACTION_DECAP; 9385 break; 9386 case RTE_FLOW_ACTION_TYPE_JUMP: 9387 jump_group = ((const struct rte_flow_action_jump *) 9388 action->conf)->group; 9389 if (dev_flow->external && jump_group < 9390 MLX5_MAX_TABLES_EXTERNAL) 9391 jump_group *= MLX5_FLOW_TABLE_FACTOR; 9392 ret = mlx5_flow_group_to_table(attr, dev_flow->external, 9393 jump_group, 9394 !!priv->fdb_def_rule, 9395 &table, error); 9396 if (ret) 9397 return ret; 9398 tbl = flow_dv_tbl_resource_get(dev, table, 9399 attr->egress, 9400 attr->transfer, error); 9401 if (!tbl) 9402 return rte_flow_error_set 9403 (error, errno, 9404 RTE_FLOW_ERROR_TYPE_ACTION, 9405 NULL, 9406 "cannot create jump action."); 9407 if (flow_dv_jump_tbl_resource_register 9408 (dev, tbl, dev_flow, error)) { 9409 flow_dv_tbl_resource_release(dev, tbl); 9410 return rte_flow_error_set 9411 (error, errno, 9412 RTE_FLOW_ERROR_TYPE_ACTION, 9413 NULL, 9414 "cannot create jump action."); 9415 } 9416 dev_flow->dv.actions[actions_n++] = 9417 dev_flow->dv.jump->action; 9418 action_flags |= MLX5_FLOW_ACTION_JUMP; 9419 dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP; 9420 break; 9421 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: 9422 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: 9423 if (flow_dv_convert_action_modify_mac 9424 (mhdr_res, actions, error)) 9425 return -rte_errno; 9426 action_flags |= actions->type == 9427 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ? 9428 MLX5_FLOW_ACTION_SET_MAC_SRC : 9429 MLX5_FLOW_ACTION_SET_MAC_DST; 9430 break; 9431 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: 9432 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: 9433 if (flow_dv_convert_action_modify_ipv4 9434 (mhdr_res, actions, error)) 9435 return -rte_errno; 9436 action_flags |= actions->type == 9437 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ? 9438 MLX5_FLOW_ACTION_SET_IPV4_SRC : 9439 MLX5_FLOW_ACTION_SET_IPV4_DST; 9440 break; 9441 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: 9442 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: 9443 if (flow_dv_convert_action_modify_ipv6 9444 (mhdr_res, actions, error)) 9445 return -rte_errno; 9446 action_flags |= actions->type == 9447 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ? 9448 MLX5_FLOW_ACTION_SET_IPV6_SRC : 9449 MLX5_FLOW_ACTION_SET_IPV6_DST; 9450 break; 9451 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: 9452 case RTE_FLOW_ACTION_TYPE_SET_TP_DST: 9453 if (flow_dv_convert_action_modify_tp 9454 (mhdr_res, actions, items, 9455 &flow_attr, dev_flow, !!(action_flags & 9456 MLX5_FLOW_ACTION_DECAP), error)) 9457 return -rte_errno; 9458 action_flags |= actions->type == 9459 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ? 9460 MLX5_FLOW_ACTION_SET_TP_SRC : 9461 MLX5_FLOW_ACTION_SET_TP_DST; 9462 break; 9463 case RTE_FLOW_ACTION_TYPE_DEC_TTL: 9464 if (flow_dv_convert_action_modify_dec_ttl 9465 (mhdr_res, items, &flow_attr, dev_flow, 9466 !!(action_flags & 9467 MLX5_FLOW_ACTION_DECAP), error)) 9468 return -rte_errno; 9469 action_flags |= MLX5_FLOW_ACTION_DEC_TTL; 9470 break; 9471 case RTE_FLOW_ACTION_TYPE_SET_TTL: 9472 if (flow_dv_convert_action_modify_ttl 9473 (mhdr_res, actions, items, &flow_attr, 9474 dev_flow, !!(action_flags & 9475 MLX5_FLOW_ACTION_DECAP), error)) 9476 return -rte_errno; 9477 action_flags |= MLX5_FLOW_ACTION_SET_TTL; 9478 break; 9479 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: 9480 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: 9481 if (flow_dv_convert_action_modify_tcp_seq 9482 (mhdr_res, actions, error)) 9483 return -rte_errno; 9484 action_flags |= actions->type == 9485 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ? 9486 MLX5_FLOW_ACTION_INC_TCP_SEQ : 9487 MLX5_FLOW_ACTION_DEC_TCP_SEQ; 9488 break; 9489 9490 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: 9491 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: 9492 if (flow_dv_convert_action_modify_tcp_ack 9493 (mhdr_res, actions, error)) 9494 return -rte_errno; 9495 action_flags |= actions->type == 9496 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ? 9497 MLX5_FLOW_ACTION_INC_TCP_ACK : 9498 MLX5_FLOW_ACTION_DEC_TCP_ACK; 9499 break; 9500 case MLX5_RTE_FLOW_ACTION_TYPE_TAG: 9501 if (flow_dv_convert_action_set_reg 9502 (mhdr_res, actions, error)) 9503 return -rte_errno; 9504 action_flags |= MLX5_FLOW_ACTION_SET_TAG; 9505 break; 9506 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG: 9507 if (flow_dv_convert_action_copy_mreg 9508 (dev, mhdr_res, actions, error)) 9509 return -rte_errno; 9510 action_flags |= MLX5_FLOW_ACTION_SET_TAG; 9511 break; 9512 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS: 9513 action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS; 9514 dev_flow->handle->fate_action = 9515 MLX5_FLOW_FATE_DEFAULT_MISS; 9516 break; 9517 case RTE_FLOW_ACTION_TYPE_METER: 9518 mtr = actions->conf; 9519 if (!flow->meter) { 9520 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id, 9521 attr, error); 9522 if (!fm) 9523 return rte_flow_error_set(error, 9524 rte_errno, 9525 RTE_FLOW_ERROR_TYPE_ACTION, 9526 NULL, 9527 "meter not found " 9528 "or invalid parameters"); 9529 flow->meter = fm->idx; 9530 } 9531 /* Set the meter action. */ 9532 if (!fm) { 9533 fm = mlx5_ipool_get(priv->sh->ipool 9534 [MLX5_IPOOL_MTR], flow->meter); 9535 if (!fm) 9536 return rte_flow_error_set(error, 9537 rte_errno, 9538 RTE_FLOW_ERROR_TYPE_ACTION, 9539 NULL, 9540 "meter not found " 9541 "or invalid parameters"); 9542 } 9543 dev_flow->dv.actions[actions_n++] = 9544 fm->mfts->meter_action; 9545 action_flags |= MLX5_FLOW_ACTION_METER; 9546 break; 9547 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: 9548 if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res, 9549 actions, error)) 9550 return -rte_errno; 9551 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP; 9552 break; 9553 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: 9554 if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res, 9555 actions, error)) 9556 return -rte_errno; 9557 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP; 9558 break; 9559 case RTE_FLOW_ACTION_TYPE_SAMPLE: 9560 sample_act_pos = actions_n; 9561 ret = flow_dv_translate_action_sample(dev, 9562 actions, 9563 dev_flow, attr, 9564 &num_of_dest, 9565 sample_actions, 9566 &sample_res, 9567 error); 9568 if (ret < 0) 9569 return ret; 9570 actions_n++; 9571 action_flags |= MLX5_FLOW_ACTION_SAMPLE; 9572 /* put encap action into group if work with port id */ 9573 if ((action_flags & MLX5_FLOW_ACTION_ENCAP) && 9574 (action_flags & MLX5_FLOW_ACTION_PORT_ID)) 9575 sample_act->action_flags |= 9576 MLX5_FLOW_ACTION_ENCAP; 9577 break; 9578 case RTE_FLOW_ACTION_TYPE_END: 9579 actions_end = true; 9580 if (mhdr_res->actions_num) { 9581 /* create modify action if needed. */ 9582 if (flow_dv_modify_hdr_resource_register 9583 (dev, mhdr_res, dev_flow, error)) 9584 return -rte_errno; 9585 dev_flow->dv.actions[modify_action_position] = 9586 handle->dvh.modify_hdr->action; 9587 } 9588 if (action_flags & MLX5_FLOW_ACTION_COUNT) { 9589 flow->counter = 9590 flow_dv_translate_create_counter(dev, 9591 dev_flow, count, age); 9592 9593 if (!flow->counter) 9594 return rte_flow_error_set 9595 (error, rte_errno, 9596 RTE_FLOW_ERROR_TYPE_ACTION, 9597 NULL, 9598 "cannot create counter" 9599 " object."); 9600 dev_flow->dv.actions[actions_n] = 9601 (flow_dv_counter_get_by_idx(dev, 9602 flow->counter, NULL))->action; 9603 actions_n++; 9604 } 9605 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) { 9606 ret = flow_dv_create_action_sample(dev, 9607 dev_flow, attr, 9608 num_of_dest, 9609 &sample_res, 9610 &mdest_res, 9611 sample_actions, 9612 action_flags, 9613 error); 9614 if (ret < 0) 9615 return rte_flow_error_set 9616 (error, rte_errno, 9617 RTE_FLOW_ERROR_TYPE_ACTION, 9618 NULL, 9619 "cannot create sample action"); 9620 if (num_of_dest > 1) { 9621 dev_flow->dv.actions[sample_act_pos] = 9622 dev_flow->dv.dest_array_res->action; 9623 } else { 9624 dev_flow->dv.actions[sample_act_pos] = 9625 dev_flow->dv.sample_res->verbs_action; 9626 } 9627 } 9628 break; 9629 default: 9630 break; 9631 } 9632 if (mhdr_res->actions_num && 9633 modify_action_position == UINT32_MAX) 9634 modify_action_position = actions_n++; 9635 } 9636 /* 9637 * For multiple destination (sample action with ratio=1), the encap 9638 * action and port id action will be combined into group action. 9639 * So need remove the original these actions in the flow and only 9640 * use the sample action instead of. 9641 */ 9642 if (num_of_dest > 1 && sample_act->dr_port_id_action) { 9643 int i; 9644 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0}; 9645 9646 for (i = 0; i < actions_n; i++) { 9647 if ((sample_act->dr_encap_action && 9648 sample_act->dr_encap_action == 9649 dev_flow->dv.actions[i]) || 9650 (sample_act->dr_port_id_action && 9651 sample_act->dr_port_id_action == 9652 dev_flow->dv.actions[i])) 9653 continue; 9654 temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i]; 9655 } 9656 memcpy((void *)dev_flow->dv.actions, 9657 (void *)temp_actions, 9658 tmp_actions_n * sizeof(void *)); 9659 actions_n = tmp_actions_n; 9660 } 9661 dev_flow->dv.actions_n = actions_n; 9662 dev_flow->act_flags = action_flags; 9663 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 9664 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 9665 int item_type = items->type; 9666 9667 if (!mlx5_flow_os_item_supported(item_type)) 9668 return rte_flow_error_set(error, ENOTSUP, 9669 RTE_FLOW_ERROR_TYPE_ITEM, 9670 NULL, "item not supported"); 9671 switch (item_type) { 9672 case RTE_FLOW_ITEM_TYPE_PORT_ID: 9673 flow_dv_translate_item_port_id(dev, match_mask, 9674 match_value, items); 9675 last_item = MLX5_FLOW_ITEM_PORT_ID; 9676 break; 9677 case RTE_FLOW_ITEM_TYPE_ETH: 9678 flow_dv_translate_item_eth(match_mask, match_value, 9679 items, tunnel, 9680 dev_flow->dv.group); 9681 matcher.priority = action_flags & 9682 MLX5_FLOW_ACTION_DEFAULT_MISS && 9683 !dev_flow->external ? 9684 MLX5_PRIORITY_MAP_L3 : 9685 MLX5_PRIORITY_MAP_L2; 9686 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 9687 MLX5_FLOW_LAYER_OUTER_L2; 9688 break; 9689 case RTE_FLOW_ITEM_TYPE_VLAN: 9690 flow_dv_translate_item_vlan(dev_flow, 9691 match_mask, match_value, 9692 items, tunnel, 9693 dev_flow->dv.group); 9694 matcher.priority = MLX5_PRIORITY_MAP_L2; 9695 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | 9696 MLX5_FLOW_LAYER_INNER_VLAN) : 9697 (MLX5_FLOW_LAYER_OUTER_L2 | 9698 MLX5_FLOW_LAYER_OUTER_VLAN); 9699 break; 9700 case RTE_FLOW_ITEM_TYPE_IPV4: 9701 mlx5_flow_tunnel_ip_check(items, next_protocol, 9702 &item_flags, &tunnel); 9703 flow_dv_translate_item_ipv4(match_mask, match_value, 9704 items, item_flags, tunnel, 9705 dev_flow->dv.group); 9706 matcher.priority = MLX5_PRIORITY_MAP_L3; 9707 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 9708 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 9709 if (items->mask != NULL && 9710 ((const struct rte_flow_item_ipv4 *) 9711 items->mask)->hdr.next_proto_id) { 9712 next_protocol = 9713 ((const struct rte_flow_item_ipv4 *) 9714 (items->spec))->hdr.next_proto_id; 9715 next_protocol &= 9716 ((const struct rte_flow_item_ipv4 *) 9717 (items->mask))->hdr.next_proto_id; 9718 } else { 9719 /* Reset for inner layer. */ 9720 next_protocol = 0xff; 9721 } 9722 break; 9723 case RTE_FLOW_ITEM_TYPE_IPV6: 9724 mlx5_flow_tunnel_ip_check(items, next_protocol, 9725 &item_flags, &tunnel); 9726 flow_dv_translate_item_ipv6(match_mask, match_value, 9727 items, item_flags, tunnel, 9728 dev_flow->dv.group); 9729 matcher.priority = MLX5_PRIORITY_MAP_L3; 9730 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 9731 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 9732 if (items->mask != NULL && 9733 ((const struct rte_flow_item_ipv6 *) 9734 items->mask)->hdr.proto) { 9735 next_protocol = 9736 ((const struct rte_flow_item_ipv6 *) 9737 items->spec)->hdr.proto; 9738 next_protocol &= 9739 ((const struct rte_flow_item_ipv6 *) 9740 items->mask)->hdr.proto; 9741 } else { 9742 /* Reset for inner layer. */ 9743 next_protocol = 0xff; 9744 } 9745 break; 9746 case RTE_FLOW_ITEM_TYPE_TCP: 9747 flow_dv_translate_item_tcp(match_mask, match_value, 9748 items, tunnel); 9749 matcher.priority = MLX5_PRIORITY_MAP_L4; 9750 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : 9751 MLX5_FLOW_LAYER_OUTER_L4_TCP; 9752 break; 9753 case RTE_FLOW_ITEM_TYPE_UDP: 9754 flow_dv_translate_item_udp(match_mask, match_value, 9755 items, tunnel); 9756 matcher.priority = MLX5_PRIORITY_MAP_L4; 9757 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : 9758 MLX5_FLOW_LAYER_OUTER_L4_UDP; 9759 break; 9760 case RTE_FLOW_ITEM_TYPE_GRE: 9761 flow_dv_translate_item_gre(match_mask, match_value, 9762 items, tunnel); 9763 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); 9764 last_item = MLX5_FLOW_LAYER_GRE; 9765 break; 9766 case RTE_FLOW_ITEM_TYPE_GRE_KEY: 9767 flow_dv_translate_item_gre_key(match_mask, 9768 match_value, items); 9769 last_item = MLX5_FLOW_LAYER_GRE_KEY; 9770 break; 9771 case RTE_FLOW_ITEM_TYPE_NVGRE: 9772 flow_dv_translate_item_nvgre(match_mask, match_value, 9773 items, tunnel); 9774 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); 9775 last_item = MLX5_FLOW_LAYER_GRE; 9776 break; 9777 case RTE_FLOW_ITEM_TYPE_VXLAN: 9778 flow_dv_translate_item_vxlan(match_mask, match_value, 9779 items, tunnel); 9780 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); 9781 last_item = MLX5_FLOW_LAYER_VXLAN; 9782 break; 9783 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 9784 flow_dv_translate_item_vxlan_gpe(match_mask, 9785 match_value, items, 9786 tunnel); 9787 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); 9788 last_item = MLX5_FLOW_LAYER_VXLAN_GPE; 9789 break; 9790 case RTE_FLOW_ITEM_TYPE_GENEVE: 9791 flow_dv_translate_item_geneve(match_mask, match_value, 9792 items, tunnel); 9793 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); 9794 last_item = MLX5_FLOW_LAYER_GENEVE; 9795 break; 9796 case RTE_FLOW_ITEM_TYPE_MPLS: 9797 flow_dv_translate_item_mpls(match_mask, match_value, 9798 items, last_item, tunnel); 9799 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); 9800 last_item = MLX5_FLOW_LAYER_MPLS; 9801 break; 9802 case RTE_FLOW_ITEM_TYPE_MARK: 9803 flow_dv_translate_item_mark(dev, match_mask, 9804 match_value, items); 9805 last_item = MLX5_FLOW_ITEM_MARK; 9806 break; 9807 case RTE_FLOW_ITEM_TYPE_META: 9808 flow_dv_translate_item_meta(dev, match_mask, 9809 match_value, attr, items); 9810 last_item = MLX5_FLOW_ITEM_METADATA; 9811 break; 9812 case RTE_FLOW_ITEM_TYPE_ICMP: 9813 flow_dv_translate_item_icmp(match_mask, match_value, 9814 items, tunnel); 9815 last_item = MLX5_FLOW_LAYER_ICMP; 9816 break; 9817 case RTE_FLOW_ITEM_TYPE_ICMP6: 9818 flow_dv_translate_item_icmp6(match_mask, match_value, 9819 items, tunnel); 9820 last_item = MLX5_FLOW_LAYER_ICMP6; 9821 break; 9822 case RTE_FLOW_ITEM_TYPE_TAG: 9823 flow_dv_translate_item_tag(dev, match_mask, 9824 match_value, items); 9825 last_item = MLX5_FLOW_ITEM_TAG; 9826 break; 9827 case MLX5_RTE_FLOW_ITEM_TYPE_TAG: 9828 flow_dv_translate_mlx5_item_tag(dev, match_mask, 9829 match_value, items); 9830 last_item = MLX5_FLOW_ITEM_TAG; 9831 break; 9832 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE: 9833 flow_dv_translate_item_tx_queue(dev, match_mask, 9834 match_value, 9835 items); 9836 last_item = MLX5_FLOW_ITEM_TX_QUEUE; 9837 break; 9838 case RTE_FLOW_ITEM_TYPE_GTP: 9839 flow_dv_translate_item_gtp(match_mask, match_value, 9840 items, tunnel); 9841 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); 9842 last_item = MLX5_FLOW_LAYER_GTP; 9843 break; 9844 case RTE_FLOW_ITEM_TYPE_ECPRI: 9845 if (!mlx5_flex_parser_ecpri_exist(dev)) { 9846 /* Create it only the first time to be used. */ 9847 ret = mlx5_flex_parser_ecpri_alloc(dev); 9848 if (ret) 9849 return rte_flow_error_set 9850 (error, -ret, 9851 RTE_FLOW_ERROR_TYPE_ITEM, 9852 NULL, 9853 "cannot create eCPRI parser"); 9854 } 9855 /* Adjust the length matcher and device flow value. */ 9856 matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param); 9857 dev_flow->dv.value.size = 9858 MLX5_ST_SZ_BYTES(fte_match_param); 9859 flow_dv_translate_item_ecpri(dev, match_mask, 9860 match_value, items); 9861 /* No other protocol should follow eCPRI layer. */ 9862 last_item = MLX5_FLOW_LAYER_ECPRI; 9863 break; 9864 default: 9865 break; 9866 } 9867 item_flags |= last_item; 9868 } 9869 /* 9870 * When E-Switch mode is enabled, we have two cases where we need to 9871 * set the source port manually. 9872 * The first one, is in case of Nic steering rule, and the second is 9873 * E-Switch rule where no port_id item was found. In both cases 9874 * the source port is set according the current port in use. 9875 */ 9876 if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) && 9877 (priv->representor || priv->master)) { 9878 if (flow_dv_translate_item_port_id(dev, match_mask, 9879 match_value, NULL)) 9880 return -rte_errno; 9881 } 9882 #ifdef RTE_LIBRTE_MLX5_DEBUG 9883 MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf, 9884 dev_flow->dv.value.buf)); 9885 #endif 9886 /* 9887 * Layers may be already initialized from prefix flow if this dev_flow 9888 * is the suffix flow. 9889 */ 9890 handle->layers |= item_flags; 9891 if (action_flags & MLX5_FLOW_ACTION_RSS) 9892 flow_dv_hashfields_set(dev_flow, rss_desc); 9893 /* Register matcher. */ 9894 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf, 9895 matcher.mask.size); 9896 matcher.priority = mlx5_flow_adjust_priority(dev, priority, 9897 matcher.priority); 9898 /* reserved field no needs to be set to 0 here. */ 9899 tbl_key.domain = attr->transfer; 9900 tbl_key.direction = attr->egress; 9901 tbl_key.table_id = dev_flow->dv.group; 9902 if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error)) 9903 return -rte_errno; 9904 return 0; 9905 } 9906 9907 /** 9908 * Apply the flow to the NIC, lock free, 9909 * (mutex should be acquired by caller). 9910 * 9911 * @param[in] dev 9912 * Pointer to the Ethernet device structure. 9913 * @param[in, out] flow 9914 * Pointer to flow structure. 9915 * @param[out] error 9916 * Pointer to error structure. 9917 * 9918 * @return 9919 * 0 on success, a negative errno value otherwise and rte_errno is set. 9920 */ 9921 static int 9922 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 9923 struct rte_flow_error *error) 9924 { 9925 struct mlx5_flow_dv_workspace *dv; 9926 struct mlx5_flow_handle *dh; 9927 struct mlx5_flow_handle_dv *dv_h; 9928 struct mlx5_flow *dev_flow; 9929 struct mlx5_priv *priv = dev->data->dev_private; 9930 uint32_t handle_idx; 9931 int n; 9932 int err; 9933 int idx; 9934 9935 for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) { 9936 dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx]; 9937 dv = &dev_flow->dv; 9938 dh = dev_flow->handle; 9939 dv_h = &dh->dvh; 9940 n = dv->actions_n; 9941 if (dh->fate_action == MLX5_FLOW_FATE_DROP) { 9942 if (dv->transfer) { 9943 dv->actions[n++] = priv->sh->esw_drop_action; 9944 } else { 9945 struct mlx5_hrxq *drop_hrxq; 9946 drop_hrxq = mlx5_drop_action_create(dev); 9947 if (!drop_hrxq) { 9948 rte_flow_error_set 9949 (error, errno, 9950 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9951 NULL, 9952 "cannot get drop hash queue"); 9953 goto error; 9954 } 9955 /* 9956 * Drop queues will be released by the specify 9957 * mlx5_drop_action_destroy() function. Assign 9958 * the special index to hrxq to mark the queue 9959 * has been allocated. 9960 */ 9961 dh->rix_hrxq = UINT32_MAX; 9962 dv->actions[n++] = drop_hrxq->action; 9963 } 9964 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && 9965 !dv_h->rix_sample && !dv_h->rix_dest_array) { 9966 struct mlx5_hrxq *hrxq; 9967 uint32_t hrxq_idx; 9968 struct mlx5_flow_rss_desc *rss_desc = 9969 &((struct mlx5_flow_rss_desc *)priv->rss_desc) 9970 [!!priv->flow_nested_idx]; 9971 9972 MLX5_ASSERT(rss_desc->queue_num); 9973 hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, 9974 MLX5_RSS_HASH_KEY_LEN, 9975 dev_flow->hash_fields, 9976 rss_desc->queue, 9977 rss_desc->queue_num); 9978 if (!hrxq_idx) { 9979 hrxq_idx = mlx5_hrxq_new 9980 (dev, rss_desc->key, 9981 MLX5_RSS_HASH_KEY_LEN, 9982 dev_flow->hash_fields, 9983 rss_desc->queue, 9984 rss_desc->queue_num, 9985 !!(dh->layers & 9986 MLX5_FLOW_LAYER_TUNNEL)); 9987 } 9988 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], 9989 hrxq_idx); 9990 if (!hrxq) { 9991 rte_flow_error_set 9992 (error, rte_errno, 9993 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 9994 "cannot get hash queue"); 9995 goto error; 9996 } 9997 dh->rix_hrxq = hrxq_idx; 9998 dv->actions[n++] = hrxq->action; 9999 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) { 10000 if (flow_dv_default_miss_resource_register 10001 (dev, error)) { 10002 rte_flow_error_set 10003 (error, rte_errno, 10004 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 10005 "cannot create default miss resource"); 10006 goto error_default_miss; 10007 } 10008 dh->rix_default_fate = MLX5_FLOW_FATE_DEFAULT_MISS; 10009 dv->actions[n++] = priv->sh->default_miss.action; 10010 } 10011 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object, 10012 (void *)&dv->value, n, 10013 dv->actions, &dh->drv_flow); 10014 if (err) { 10015 rte_flow_error_set(error, errno, 10016 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 10017 NULL, 10018 "hardware refuses to create flow"); 10019 goto error; 10020 } 10021 if (priv->vmwa_context && 10022 dh->vf_vlan.tag && !dh->vf_vlan.created) { 10023 /* 10024 * The rule contains the VLAN pattern. 10025 * For VF we are going to create VLAN 10026 * interface to make hypervisor set correct 10027 * e-Switch vport context. 10028 */ 10029 mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan); 10030 } 10031 } 10032 return 0; 10033 error: 10034 if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) 10035 flow_dv_default_miss_resource_release(dev); 10036 error_default_miss: 10037 err = rte_errno; /* Save rte_errno before cleanup. */ 10038 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 10039 handle_idx, dh, next) { 10040 /* hrxq is union, don't clear it if the flag is not set. */ 10041 if (dh->rix_hrxq) { 10042 if (dh->fate_action == MLX5_FLOW_FATE_DROP) { 10043 mlx5_drop_action_destroy(dev); 10044 dh->rix_hrxq = 0; 10045 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) { 10046 mlx5_hrxq_release(dev, dh->rix_hrxq); 10047 dh->rix_hrxq = 0; 10048 } 10049 } 10050 if (dh->vf_vlan.tag && dh->vf_vlan.created) 10051 mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); 10052 } 10053 rte_errno = err; /* Restore rte_errno. */ 10054 return -rte_errno; 10055 } 10056 10057 /** 10058 * Release the flow matcher. 10059 * 10060 * @param dev 10061 * Pointer to Ethernet device. 10062 * @param handle 10063 * Pointer to mlx5_flow_handle. 10064 * 10065 * @return 10066 * 1 while a reference on it exists, 0 when freed. 10067 */ 10068 static int 10069 flow_dv_matcher_release(struct rte_eth_dev *dev, 10070 struct mlx5_flow_handle *handle) 10071 { 10072 struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher; 10073 10074 MLX5_ASSERT(matcher->matcher_object); 10075 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--", 10076 dev->data->port_id, (void *)matcher, 10077 rte_atomic32_read(&matcher->refcnt)); 10078 if (rte_atomic32_dec_and_test(&matcher->refcnt)) { 10079 claim_zero(mlx5_flow_os_destroy_flow_matcher 10080 (matcher->matcher_object)); 10081 LIST_REMOVE(matcher, next); 10082 /* table ref-- in release interface. */ 10083 flow_dv_tbl_resource_release(dev, matcher->tbl); 10084 mlx5_free(matcher); 10085 DRV_LOG(DEBUG, "port %u matcher %p: removed", 10086 dev->data->port_id, (void *)matcher); 10087 return 0; 10088 } 10089 return 1; 10090 } 10091 10092 /** 10093 * Release an encap/decap resource. 10094 * 10095 * @param dev 10096 * Pointer to Ethernet device. 10097 * @param encap_decap_idx 10098 * Index of encap decap resource. 10099 * 10100 * @return 10101 * 1 while a reference on it exists, 0 when freed. 10102 */ 10103 static int 10104 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, 10105 uint32_t encap_decap_idx) 10106 { 10107 struct mlx5_priv *priv = dev->data->dev_private; 10108 uint32_t idx = encap_decap_idx; 10109 struct mlx5_flow_dv_encap_decap_resource *cache_resource; 10110 10111 cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], 10112 idx); 10113 if (!cache_resource) 10114 return 0; 10115 MLX5_ASSERT(cache_resource->action); 10116 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--", 10117 (void *)cache_resource, 10118 rte_atomic32_read(&cache_resource->refcnt)); 10119 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { 10120 claim_zero(mlx5_flow_os_destroy_flow_action 10121 (cache_resource->action)); 10122 mlx5_hlist_remove(priv->sh->encaps_decaps, 10123 &cache_resource->entry); 10124 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx); 10125 DRV_LOG(DEBUG, "encap/decap resource %p: removed", 10126 (void *)cache_resource); 10127 return 0; 10128 } 10129 return 1; 10130 } 10131 10132 /** 10133 * Release an jump to table action resource. 10134 * 10135 * @param dev 10136 * Pointer to Ethernet device. 10137 * @param handle 10138 * Pointer to mlx5_flow_handle. 10139 * 10140 * @return 10141 * 1 while a reference on it exists, 0 when freed. 10142 */ 10143 static int 10144 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, 10145 struct mlx5_flow_handle *handle) 10146 { 10147 struct mlx5_priv *priv = dev->data->dev_private; 10148 struct mlx5_flow_dv_jump_tbl_resource *cache_resource; 10149 struct mlx5_flow_tbl_data_entry *tbl_data; 10150 10151 tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP], 10152 handle->rix_jump); 10153 if (!tbl_data) 10154 return 0; 10155 cache_resource = &tbl_data->jump; 10156 MLX5_ASSERT(cache_resource->action); 10157 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--", 10158 (void *)cache_resource, 10159 rte_atomic32_read(&cache_resource->refcnt)); 10160 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { 10161 claim_zero(mlx5_flow_os_destroy_flow_action 10162 (cache_resource->action)); 10163 /* jump action memory free is inside the table release. */ 10164 flow_dv_tbl_resource_release(dev, &tbl_data->tbl); 10165 DRV_LOG(DEBUG, "jump table resource %p: removed", 10166 (void *)cache_resource); 10167 return 0; 10168 } 10169 return 1; 10170 } 10171 10172 /** 10173 * Release a default miss resource. 10174 * 10175 * @param dev 10176 * Pointer to Ethernet device. 10177 * @return 10178 * 1 while a reference on it exists, 0 when freed. 10179 */ 10180 static int 10181 flow_dv_default_miss_resource_release(struct rte_eth_dev *dev) 10182 { 10183 struct mlx5_priv *priv = dev->data->dev_private; 10184 struct mlx5_dev_ctx_shared *sh = priv->sh; 10185 struct mlx5_flow_default_miss_resource *cache_resource = 10186 &sh->default_miss; 10187 10188 MLX5_ASSERT(cache_resource->action); 10189 DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--", 10190 (void *)cache_resource->action, 10191 rte_atomic32_read(&cache_resource->refcnt)); 10192 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { 10193 claim_zero(mlx5_glue->destroy_flow_action 10194 (cache_resource->action)); 10195 DRV_LOG(DEBUG, "default miss resource %p: removed", 10196 (void *)cache_resource->action); 10197 return 0; 10198 } 10199 return 1; 10200 } 10201 10202 /** 10203 * Release a modify-header resource. 10204 * 10205 * @param dev 10206 * Pointer to Ethernet device. 10207 * @param handle 10208 * Pointer to mlx5_flow_handle. 10209 * 10210 * @return 10211 * 1 while a reference on it exists, 0 when freed. 10212 */ 10213 static int 10214 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev, 10215 struct mlx5_flow_handle *handle) 10216 { 10217 struct mlx5_priv *priv = dev->data->dev_private; 10218 struct mlx5_flow_dv_modify_hdr_resource *cache_resource = 10219 handle->dvh.modify_hdr; 10220 10221 MLX5_ASSERT(cache_resource->action); 10222 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--", 10223 (void *)cache_resource, 10224 rte_atomic32_read(&cache_resource->refcnt)); 10225 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { 10226 claim_zero(mlx5_flow_os_destroy_flow_action 10227 (cache_resource->action)); 10228 mlx5_hlist_remove(priv->sh->modify_cmds, 10229 &cache_resource->entry); 10230 mlx5_free(cache_resource); 10231 DRV_LOG(DEBUG, "modify-header resource %p: removed", 10232 (void *)cache_resource); 10233 return 0; 10234 } 10235 return 1; 10236 } 10237 10238 /** 10239 * Release port ID action resource. 10240 * 10241 * @param dev 10242 * Pointer to Ethernet device. 10243 * @param handle 10244 * Pointer to mlx5_flow_handle. 10245 * 10246 * @return 10247 * 1 while a reference on it exists, 0 when freed. 10248 */ 10249 static int 10250 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, 10251 uint32_t port_id) 10252 { 10253 struct mlx5_priv *priv = dev->data->dev_private; 10254 struct mlx5_flow_dv_port_id_action_resource *cache_resource; 10255 uint32_t idx = port_id; 10256 10257 cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], 10258 idx); 10259 if (!cache_resource) 10260 return 0; 10261 MLX5_ASSERT(cache_resource->action); 10262 DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--", 10263 (void *)cache_resource, 10264 rte_atomic32_read(&cache_resource->refcnt)); 10265 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { 10266 claim_zero(mlx5_flow_os_destroy_flow_action 10267 (cache_resource->action)); 10268 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID], 10269 &priv->sh->port_id_action_list, idx, 10270 cache_resource, next); 10271 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PORT_ID], idx); 10272 DRV_LOG(DEBUG, "port id action resource %p: removed", 10273 (void *)cache_resource); 10274 return 0; 10275 } 10276 return 1; 10277 } 10278 10279 /** 10280 * Release push vlan action resource. 10281 * 10282 * @param dev 10283 * Pointer to Ethernet device. 10284 * @param handle 10285 * Pointer to mlx5_flow_handle. 10286 * 10287 * @return 10288 * 1 while a reference on it exists, 0 when freed. 10289 */ 10290 static int 10291 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev, 10292 struct mlx5_flow_handle *handle) 10293 { 10294 struct mlx5_priv *priv = dev->data->dev_private; 10295 uint32_t idx = handle->dvh.rix_push_vlan; 10296 struct mlx5_flow_dv_push_vlan_action_resource *cache_resource; 10297 10298 cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], 10299 idx); 10300 if (!cache_resource) 10301 return 0; 10302 MLX5_ASSERT(cache_resource->action); 10303 DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--", 10304 (void *)cache_resource, 10305 rte_atomic32_read(&cache_resource->refcnt)); 10306 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { 10307 claim_zero(mlx5_flow_os_destroy_flow_action 10308 (cache_resource->action)); 10309 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], 10310 &priv->sh->push_vlan_action_list, idx, 10311 cache_resource, next); 10312 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx); 10313 DRV_LOG(DEBUG, "push vlan action resource %p: removed", 10314 (void *)cache_resource); 10315 return 0; 10316 } 10317 return 1; 10318 } 10319 10320 /** 10321 * Release the fate resource. 10322 * 10323 * @param dev 10324 * Pointer to Ethernet device. 10325 * @param handle 10326 * Pointer to mlx5_flow_handle. 10327 */ 10328 static void 10329 flow_dv_fate_resource_release(struct rte_eth_dev *dev, 10330 struct mlx5_flow_handle *handle) 10331 { 10332 if (!handle->rix_fate) 10333 return; 10334 switch (handle->fate_action) { 10335 case MLX5_FLOW_FATE_DROP: 10336 mlx5_drop_action_destroy(dev); 10337 break; 10338 case MLX5_FLOW_FATE_QUEUE: 10339 mlx5_hrxq_release(dev, handle->rix_hrxq); 10340 break; 10341 case MLX5_FLOW_FATE_JUMP: 10342 flow_dv_jump_tbl_resource_release(dev, handle); 10343 break; 10344 case MLX5_FLOW_FATE_PORT_ID: 10345 flow_dv_port_id_action_resource_release(dev, 10346 handle->rix_port_id_action); 10347 break; 10348 case MLX5_FLOW_FATE_DEFAULT_MISS: 10349 flow_dv_default_miss_resource_release(dev); 10350 break; 10351 default: 10352 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action); 10353 break; 10354 } 10355 handle->rix_fate = 0; 10356 } 10357 10358 /** 10359 * Release an sample resource. 10360 * 10361 * @param dev 10362 * Pointer to Ethernet device. 10363 * @param handle 10364 * Pointer to mlx5_flow_handle. 10365 * 10366 * @return 10367 * 1 while a reference on it exists, 0 when freed. 10368 */ 10369 static int 10370 flow_dv_sample_resource_release(struct rte_eth_dev *dev, 10371 struct mlx5_flow_handle *handle) 10372 { 10373 struct mlx5_priv *priv = dev->data->dev_private; 10374 uint32_t idx = handle->dvh.rix_sample; 10375 struct mlx5_flow_dv_sample_resource *cache_resource; 10376 10377 cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE], 10378 idx); 10379 if (!cache_resource) 10380 return 0; 10381 MLX5_ASSERT(cache_resource->verbs_action); 10382 DRV_LOG(DEBUG, "sample resource %p: refcnt %d--", 10383 (void *)cache_resource, 10384 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); 10385 if (__atomic_sub_fetch(&cache_resource->refcnt, 1, 10386 __ATOMIC_RELAXED) == 0) { 10387 if (cache_resource->verbs_action) 10388 claim_zero(mlx5_glue->destroy_flow_action 10389 (cache_resource->verbs_action)); 10390 if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { 10391 if (cache_resource->default_miss) 10392 claim_zero(mlx5_glue->destroy_flow_action 10393 (cache_resource->default_miss)); 10394 } 10395 if (cache_resource->normal_path_tbl) 10396 flow_dv_tbl_resource_release(dev, 10397 cache_resource->normal_path_tbl); 10398 } 10399 if (cache_resource->sample_idx.rix_hrxq && 10400 !mlx5_hrxq_release(dev, 10401 cache_resource->sample_idx.rix_hrxq)) 10402 cache_resource->sample_idx.rix_hrxq = 0; 10403 if (cache_resource->sample_idx.rix_tag && 10404 !flow_dv_tag_release(dev, 10405 cache_resource->sample_idx.rix_tag)) 10406 cache_resource->sample_idx.rix_tag = 0; 10407 if (cache_resource->sample_idx.cnt) { 10408 flow_dv_counter_release(dev, 10409 cache_resource->sample_idx.cnt); 10410 cache_resource->sample_idx.cnt = 0; 10411 } 10412 if (!__atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)) { 10413 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_SAMPLE], 10414 &priv->sh->sample_action_list, idx, 10415 cache_resource, next); 10416 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], idx); 10417 DRV_LOG(DEBUG, "sample resource %p: removed", 10418 (void *)cache_resource); 10419 return 0; 10420 } 10421 return 1; 10422 } 10423 10424 /** 10425 * Release an destination array resource. 10426 * 10427 * @param dev 10428 * Pointer to Ethernet device. 10429 * @param handle 10430 * Pointer to mlx5_flow_handle. 10431 * 10432 * @return 10433 * 1 while a reference on it exists, 0 when freed. 10434 */ 10435 static int 10436 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev, 10437 struct mlx5_flow_handle *handle) 10438 { 10439 struct mlx5_priv *priv = dev->data->dev_private; 10440 struct mlx5_flow_dv_dest_array_resource *cache_resource; 10441 struct mlx5_flow_sub_actions_idx *mdest_act_res; 10442 uint32_t idx = handle->dvh.rix_dest_array; 10443 uint32_t i = 0; 10444 10445 cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], 10446 idx); 10447 if (!cache_resource) 10448 return 0; 10449 MLX5_ASSERT(cache_resource->action); 10450 DRV_LOG(DEBUG, "destination array resource %p: refcnt %d--", 10451 (void *)cache_resource, 10452 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); 10453 if (__atomic_sub_fetch(&cache_resource->refcnt, 1, 10454 __ATOMIC_RELAXED) == 0) { 10455 if (cache_resource->action) 10456 claim_zero(mlx5_glue->destroy_flow_action 10457 (cache_resource->action)); 10458 for (; i < cache_resource->num_of_dest; i++) { 10459 mdest_act_res = &cache_resource->sample_idx[i]; 10460 if (mdest_act_res->rix_hrxq) { 10461 mlx5_hrxq_release(dev, 10462 mdest_act_res->rix_hrxq); 10463 mdest_act_res->rix_hrxq = 0; 10464 } 10465 if (mdest_act_res->rix_encap_decap) { 10466 flow_dv_encap_decap_resource_release(dev, 10467 mdest_act_res->rix_encap_decap); 10468 mdest_act_res->rix_encap_decap = 0; 10469 } 10470 if (mdest_act_res->rix_port_id_action) { 10471 flow_dv_port_id_action_resource_release(dev, 10472 mdest_act_res->rix_port_id_action); 10473 mdest_act_res->rix_port_id_action = 0; 10474 } 10475 if (mdest_act_res->rix_tag) { 10476 flow_dv_tag_release(dev, 10477 mdest_act_res->rix_tag); 10478 mdest_act_res->rix_tag = 0; 10479 } 10480 } 10481 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], 10482 &priv->sh->dest_array_list, idx, 10483 cache_resource, next); 10484 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], idx); 10485 DRV_LOG(DEBUG, "destination array resource %p: removed", 10486 (void *)cache_resource); 10487 return 0; 10488 } 10489 return 1; 10490 } 10491 10492 /** 10493 * Remove the flow from the NIC but keeps it in memory. 10494 * Lock free, (mutex should be acquired by caller). 10495 * 10496 * @param[in] dev 10497 * Pointer to Ethernet device. 10498 * @param[in, out] flow 10499 * Pointer to flow structure. 10500 */ 10501 static void 10502 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 10503 { 10504 struct mlx5_flow_handle *dh; 10505 uint32_t handle_idx; 10506 struct mlx5_priv *priv = dev->data->dev_private; 10507 10508 if (!flow) 10509 return; 10510 handle_idx = flow->dev_handles; 10511 while (handle_idx) { 10512 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 10513 handle_idx); 10514 if (!dh) 10515 return; 10516 if (dh->drv_flow) { 10517 claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow)); 10518 dh->drv_flow = NULL; 10519 } 10520 if (dh->fate_action == MLX5_FLOW_FATE_DROP || 10521 dh->fate_action == MLX5_FLOW_FATE_QUEUE || 10522 dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) 10523 flow_dv_fate_resource_release(dev, dh); 10524 if (dh->vf_vlan.tag && dh->vf_vlan.created) 10525 mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); 10526 handle_idx = dh->next.next; 10527 } 10528 } 10529 10530 /** 10531 * Remove the flow from the NIC and the memory. 10532 * Lock free, (mutex should be acquired by caller). 10533 * 10534 * @param[in] dev 10535 * Pointer to the Ethernet device structure. 10536 * @param[in, out] flow 10537 * Pointer to flow structure. 10538 */ 10539 static void 10540 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 10541 { 10542 struct mlx5_flow_handle *dev_handle; 10543 struct mlx5_priv *priv = dev->data->dev_private; 10544 10545 if (!flow) 10546 return; 10547 __flow_dv_remove(dev, flow); 10548 if (flow->counter) { 10549 flow_dv_counter_release(dev, flow->counter); 10550 flow->counter = 0; 10551 } 10552 if (flow->meter) { 10553 struct mlx5_flow_meter *fm; 10554 10555 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR], 10556 flow->meter); 10557 if (fm) 10558 mlx5_flow_meter_detach(fm); 10559 flow->meter = 0; 10560 } 10561 while (flow->dev_handles) { 10562 uint32_t tmp_idx = flow->dev_handles; 10563 10564 dev_handle = mlx5_ipool_get(priv->sh->ipool 10565 [MLX5_IPOOL_MLX5_FLOW], tmp_idx); 10566 if (!dev_handle) 10567 return; 10568 flow->dev_handles = dev_handle->next.next; 10569 if (dev_handle->dvh.matcher) 10570 flow_dv_matcher_release(dev, dev_handle); 10571 if (dev_handle->dvh.rix_sample) 10572 flow_dv_sample_resource_release(dev, dev_handle); 10573 if (dev_handle->dvh.rix_dest_array) 10574 flow_dv_dest_array_resource_release(dev, dev_handle); 10575 if (dev_handle->dvh.rix_encap_decap) 10576 flow_dv_encap_decap_resource_release(dev, 10577 dev_handle->dvh.rix_encap_decap); 10578 if (dev_handle->dvh.modify_hdr) 10579 flow_dv_modify_hdr_resource_release(dev, dev_handle); 10580 if (dev_handle->dvh.rix_push_vlan) 10581 flow_dv_push_vlan_action_resource_release(dev, 10582 dev_handle); 10583 if (dev_handle->dvh.rix_tag) 10584 flow_dv_tag_release(dev, 10585 dev_handle->dvh.rix_tag); 10586 flow_dv_fate_resource_release(dev, dev_handle); 10587 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 10588 tmp_idx); 10589 } 10590 } 10591 10592 /** 10593 * Query a dv flow rule for its statistics via devx. 10594 * 10595 * @param[in] dev 10596 * Pointer to Ethernet device. 10597 * @param[in] flow 10598 * Pointer to the sub flow. 10599 * @param[out] data 10600 * data retrieved by the query. 10601 * @param[out] error 10602 * Perform verbose error reporting if not NULL. 10603 * 10604 * @return 10605 * 0 on success, a negative errno value otherwise and rte_errno is set. 10606 */ 10607 static int 10608 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, 10609 void *data, struct rte_flow_error *error) 10610 { 10611 struct mlx5_priv *priv = dev->data->dev_private; 10612 struct rte_flow_query_count *qc = data; 10613 10614 if (!priv->config.devx) 10615 return rte_flow_error_set(error, ENOTSUP, 10616 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 10617 NULL, 10618 "counters are not supported"); 10619 if (flow->counter) { 10620 uint64_t pkts, bytes; 10621 struct mlx5_flow_counter *cnt; 10622 10623 cnt = flow_dv_counter_get_by_idx(dev, flow->counter, 10624 NULL); 10625 int err = _flow_dv_query_count(dev, flow->counter, &pkts, 10626 &bytes); 10627 10628 if (err) 10629 return rte_flow_error_set(error, -err, 10630 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 10631 NULL, "cannot read counters"); 10632 qc->hits_set = 1; 10633 qc->bytes_set = 1; 10634 qc->hits = pkts - cnt->hits; 10635 qc->bytes = bytes - cnt->bytes; 10636 if (qc->reset) { 10637 cnt->hits = pkts; 10638 cnt->bytes = bytes; 10639 } 10640 return 0; 10641 } 10642 return rte_flow_error_set(error, EINVAL, 10643 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 10644 NULL, 10645 "counters are not available"); 10646 } 10647 10648 /** 10649 * Query a flow. 10650 * 10651 * @see rte_flow_query() 10652 * @see rte_flow_ops 10653 */ 10654 static int 10655 flow_dv_query(struct rte_eth_dev *dev, 10656 struct rte_flow *flow __rte_unused, 10657 const struct rte_flow_action *actions __rte_unused, 10658 void *data __rte_unused, 10659 struct rte_flow_error *error __rte_unused) 10660 { 10661 int ret = -EINVAL; 10662 10663 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 10664 switch (actions->type) { 10665 case RTE_FLOW_ACTION_TYPE_VOID: 10666 break; 10667 case RTE_FLOW_ACTION_TYPE_COUNT: 10668 ret = flow_dv_query_count(dev, flow, data, error); 10669 break; 10670 default: 10671 return rte_flow_error_set(error, ENOTSUP, 10672 RTE_FLOW_ERROR_TYPE_ACTION, 10673 actions, 10674 "action not supported"); 10675 } 10676 } 10677 return ret; 10678 } 10679 10680 /** 10681 * Destroy the meter table set. 10682 * Lock free, (mutex should be acquired by caller). 10683 * 10684 * @param[in] dev 10685 * Pointer to Ethernet device. 10686 * @param[in] tbl 10687 * Pointer to the meter table set. 10688 * 10689 * @return 10690 * Always 0. 10691 */ 10692 static int 10693 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev, 10694 struct mlx5_meter_domains_infos *tbl) 10695 { 10696 struct mlx5_priv *priv = dev->data->dev_private; 10697 struct mlx5_meter_domains_infos *mtd = 10698 (struct mlx5_meter_domains_infos *)tbl; 10699 10700 if (!mtd || !priv->config.dv_flow_en) 10701 return 0; 10702 if (mtd->ingress.policer_rules[RTE_MTR_DROPPED]) 10703 claim_zero(mlx5_flow_os_destroy_flow 10704 (mtd->ingress.policer_rules[RTE_MTR_DROPPED])); 10705 if (mtd->egress.policer_rules[RTE_MTR_DROPPED]) 10706 claim_zero(mlx5_flow_os_destroy_flow 10707 (mtd->egress.policer_rules[RTE_MTR_DROPPED])); 10708 if (mtd->transfer.policer_rules[RTE_MTR_DROPPED]) 10709 claim_zero(mlx5_flow_os_destroy_flow 10710 (mtd->transfer.policer_rules[RTE_MTR_DROPPED])); 10711 if (mtd->egress.color_matcher) 10712 claim_zero(mlx5_flow_os_destroy_flow_matcher 10713 (mtd->egress.color_matcher)); 10714 if (mtd->egress.any_matcher) 10715 claim_zero(mlx5_flow_os_destroy_flow_matcher 10716 (mtd->egress.any_matcher)); 10717 if (mtd->egress.tbl) 10718 flow_dv_tbl_resource_release(dev, mtd->egress.tbl); 10719 if (mtd->egress.sfx_tbl) 10720 flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl); 10721 if (mtd->ingress.color_matcher) 10722 claim_zero(mlx5_flow_os_destroy_flow_matcher 10723 (mtd->ingress.color_matcher)); 10724 if (mtd->ingress.any_matcher) 10725 claim_zero(mlx5_flow_os_destroy_flow_matcher 10726 (mtd->ingress.any_matcher)); 10727 if (mtd->ingress.tbl) 10728 flow_dv_tbl_resource_release(dev, mtd->ingress.tbl); 10729 if (mtd->ingress.sfx_tbl) 10730 flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl); 10731 if (mtd->transfer.color_matcher) 10732 claim_zero(mlx5_flow_os_destroy_flow_matcher 10733 (mtd->transfer.color_matcher)); 10734 if (mtd->transfer.any_matcher) 10735 claim_zero(mlx5_flow_os_destroy_flow_matcher 10736 (mtd->transfer.any_matcher)); 10737 if (mtd->transfer.tbl) 10738 flow_dv_tbl_resource_release(dev, mtd->transfer.tbl); 10739 if (mtd->transfer.sfx_tbl) 10740 flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl); 10741 if (mtd->drop_actn) 10742 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn)); 10743 mlx5_free(mtd); 10744 return 0; 10745 } 10746 10747 /* Number of meter flow actions, count and jump or count and drop. */ 10748 #define METER_ACTIONS 2 10749 10750 /** 10751 * Create specify domain meter table and suffix table. 10752 * 10753 * @param[in] dev 10754 * Pointer to Ethernet device. 10755 * @param[in,out] mtb 10756 * Pointer to DV meter table set. 10757 * @param[in] egress 10758 * Table attribute. 10759 * @param[in] transfer 10760 * Table attribute. 10761 * @param[in] color_reg_c_idx 10762 * Reg C index for color match. 10763 * 10764 * @return 10765 * 0 on success, -1 otherwise and rte_errno is set. 10766 */ 10767 static int 10768 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, 10769 struct mlx5_meter_domains_infos *mtb, 10770 uint8_t egress, uint8_t transfer, 10771 uint32_t color_reg_c_idx) 10772 { 10773 struct mlx5_priv *priv = dev->data->dev_private; 10774 struct mlx5_dev_ctx_shared *sh = priv->sh; 10775 struct mlx5_flow_dv_match_params mask = { 10776 .size = sizeof(mask.buf), 10777 }; 10778 struct mlx5_flow_dv_match_params value = { 10779 .size = sizeof(value.buf), 10780 }; 10781 struct mlx5dv_flow_matcher_attr dv_attr = { 10782 .type = IBV_FLOW_ATTR_NORMAL, 10783 .priority = 0, 10784 .match_criteria_enable = 0, 10785 .match_mask = (void *)&mask, 10786 }; 10787 void *actions[METER_ACTIONS]; 10788 struct mlx5_meter_domain_info *dtb; 10789 struct rte_flow_error error; 10790 int i = 0; 10791 int ret; 10792 10793 if (transfer) 10794 dtb = &mtb->transfer; 10795 else if (egress) 10796 dtb = &mtb->egress; 10797 else 10798 dtb = &mtb->ingress; 10799 /* Create the meter table with METER level. */ 10800 dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER, 10801 egress, transfer, &error); 10802 if (!dtb->tbl) { 10803 DRV_LOG(ERR, "Failed to create meter policer table."); 10804 return -1; 10805 } 10806 /* Create the meter suffix table with SUFFIX level. */ 10807 dtb->sfx_tbl = flow_dv_tbl_resource_get(dev, 10808 MLX5_FLOW_TABLE_LEVEL_SUFFIX, 10809 egress, transfer, &error); 10810 if (!dtb->sfx_tbl) { 10811 DRV_LOG(ERR, "Failed to create meter suffix table."); 10812 return -1; 10813 } 10814 /* Create matchers, Any and Color. */ 10815 dv_attr.priority = 3; 10816 dv_attr.match_criteria_enable = 0; 10817 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj, 10818 &dtb->any_matcher); 10819 if (ret) { 10820 DRV_LOG(ERR, "Failed to create meter" 10821 " policer default matcher."); 10822 goto error_exit; 10823 } 10824 dv_attr.priority = 0; 10825 dv_attr.match_criteria_enable = 10826 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT; 10827 flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx, 10828 rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX); 10829 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj, 10830 &dtb->color_matcher); 10831 if (ret) { 10832 DRV_LOG(ERR, "Failed to create meter policer color matcher."); 10833 goto error_exit; 10834 } 10835 if (mtb->count_actns[RTE_MTR_DROPPED]) 10836 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED]; 10837 actions[i++] = mtb->drop_actn; 10838 /* Default rule: lowest priority, match any, actions: drop. */ 10839 ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i, 10840 actions, 10841 &dtb->policer_rules[RTE_MTR_DROPPED]); 10842 if (ret) { 10843 DRV_LOG(ERR, "Failed to create meter policer drop rule."); 10844 goto error_exit; 10845 } 10846 return 0; 10847 error_exit: 10848 return -1; 10849 } 10850 10851 /** 10852 * Create the needed meter and suffix tables. 10853 * Lock free, (mutex should be acquired by caller). 10854 * 10855 * @param[in] dev 10856 * Pointer to Ethernet device. 10857 * @param[in] fm 10858 * Pointer to the flow meter. 10859 * 10860 * @return 10861 * Pointer to table set on success, NULL otherwise and rte_errno is set. 10862 */ 10863 static struct mlx5_meter_domains_infos * 10864 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev, 10865 const struct mlx5_flow_meter *fm) 10866 { 10867 struct mlx5_priv *priv = dev->data->dev_private; 10868 struct mlx5_meter_domains_infos *mtb; 10869 int ret; 10870 int i; 10871 10872 if (!priv->mtr_en) { 10873 rte_errno = ENOTSUP; 10874 return NULL; 10875 } 10876 mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY); 10877 if (!mtb) { 10878 DRV_LOG(ERR, "Failed to allocate memory for meter."); 10879 return NULL; 10880 } 10881 /* Create meter count actions */ 10882 for (i = 0; i <= RTE_MTR_DROPPED; i++) { 10883 struct mlx5_flow_counter *cnt; 10884 if (!fm->policer_stats.cnt[i]) 10885 continue; 10886 cnt = flow_dv_counter_get_by_idx(dev, 10887 fm->policer_stats.cnt[i], NULL); 10888 mtb->count_actns[i] = cnt->action; 10889 } 10890 /* Create drop action. */ 10891 ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn); 10892 if (ret) { 10893 DRV_LOG(ERR, "Failed to create drop action."); 10894 goto error_exit; 10895 } 10896 /* Egress meter table. */ 10897 ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg); 10898 if (ret) { 10899 DRV_LOG(ERR, "Failed to prepare egress meter table."); 10900 goto error_exit; 10901 } 10902 /* Ingress meter table. */ 10903 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg); 10904 if (ret) { 10905 DRV_LOG(ERR, "Failed to prepare ingress meter table."); 10906 goto error_exit; 10907 } 10908 /* FDB meter table. */ 10909 if (priv->config.dv_esw_en) { 10910 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1, 10911 priv->mtr_color_reg); 10912 if (ret) { 10913 DRV_LOG(ERR, "Failed to prepare fdb meter table."); 10914 goto error_exit; 10915 } 10916 } 10917 return mtb; 10918 error_exit: 10919 flow_dv_destroy_mtr_tbl(dev, mtb); 10920 return NULL; 10921 } 10922 10923 /** 10924 * Destroy domain policer rule. 10925 * 10926 * @param[in] dt 10927 * Pointer to domain table. 10928 */ 10929 static void 10930 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt) 10931 { 10932 int i; 10933 10934 for (i = 0; i < RTE_MTR_DROPPED; i++) { 10935 if (dt->policer_rules[i]) { 10936 claim_zero(mlx5_flow_os_destroy_flow 10937 (dt->policer_rules[i])); 10938 dt->policer_rules[i] = NULL; 10939 } 10940 } 10941 if (dt->jump_actn) { 10942 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn)); 10943 dt->jump_actn = NULL; 10944 } 10945 } 10946 10947 /** 10948 * Destroy policer rules. 10949 * 10950 * @param[in] dev 10951 * Pointer to Ethernet device. 10952 * @param[in] fm 10953 * Pointer to flow meter structure. 10954 * @param[in] attr 10955 * Pointer to flow attributes. 10956 * 10957 * @return 10958 * Always 0. 10959 */ 10960 static int 10961 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused, 10962 const struct mlx5_flow_meter *fm, 10963 const struct rte_flow_attr *attr) 10964 { 10965 struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL; 10966 10967 if (!mtb) 10968 return 0; 10969 if (attr->egress) 10970 flow_dv_destroy_domain_policer_rule(&mtb->egress); 10971 if (attr->ingress) 10972 flow_dv_destroy_domain_policer_rule(&mtb->ingress); 10973 if (attr->transfer) 10974 flow_dv_destroy_domain_policer_rule(&mtb->transfer); 10975 return 0; 10976 } 10977 10978 /** 10979 * Create specify domain meter policer rule. 10980 * 10981 * @param[in] fm 10982 * Pointer to flow meter structure. 10983 * @param[in] mtb 10984 * Pointer to DV meter table set. 10985 * @param[in] mtr_reg_c 10986 * Color match REG_C. 10987 * 10988 * @return 10989 * 0 on success, -1 otherwise. 10990 */ 10991 static int 10992 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm, 10993 struct mlx5_meter_domain_info *dtb, 10994 uint8_t mtr_reg_c) 10995 { 10996 struct mlx5_flow_dv_match_params matcher = { 10997 .size = sizeof(matcher.buf), 10998 }; 10999 struct mlx5_flow_dv_match_params value = { 11000 .size = sizeof(value.buf), 11001 }; 11002 struct mlx5_meter_domains_infos *mtb = fm->mfts; 11003 void *actions[METER_ACTIONS]; 11004 int i; 11005 int ret = 0; 11006 11007 /* Create jump action. */ 11008 if (!dtb->jump_actn) 11009 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl 11010 (dtb->sfx_tbl->obj, &dtb->jump_actn); 11011 if (ret) { 11012 DRV_LOG(ERR, "Failed to create policer jump action."); 11013 goto error; 11014 } 11015 for (i = 0; i < RTE_MTR_DROPPED; i++) { 11016 int j = 0; 11017 11018 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c, 11019 rte_col_2_mlx5_col(i), UINT8_MAX); 11020 if (mtb->count_actns[i]) 11021 actions[j++] = mtb->count_actns[i]; 11022 if (fm->action[i] == MTR_POLICER_ACTION_DROP) 11023 actions[j++] = mtb->drop_actn; 11024 else 11025 actions[j++] = dtb->jump_actn; 11026 ret = mlx5_flow_os_create_flow(dtb->color_matcher, 11027 (void *)&value, j, actions, 11028 &dtb->policer_rules[i]); 11029 if (ret) { 11030 DRV_LOG(ERR, "Failed to create policer rule."); 11031 goto error; 11032 } 11033 } 11034 return 0; 11035 error: 11036 rte_errno = errno; 11037 return -1; 11038 } 11039 11040 /** 11041 * Create policer rules. 11042 * 11043 * @param[in] dev 11044 * Pointer to Ethernet device. 11045 * @param[in] fm 11046 * Pointer to flow meter structure. 11047 * @param[in] attr 11048 * Pointer to flow attributes. 11049 * 11050 * @return 11051 * 0 on success, -1 otherwise. 11052 */ 11053 static int 11054 flow_dv_create_policer_rules(struct rte_eth_dev *dev, 11055 struct mlx5_flow_meter *fm, 11056 const struct rte_flow_attr *attr) 11057 { 11058 struct mlx5_priv *priv = dev->data->dev_private; 11059 struct mlx5_meter_domains_infos *mtb = fm->mfts; 11060 int ret; 11061 11062 if (attr->egress) { 11063 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress, 11064 priv->mtr_color_reg); 11065 if (ret) { 11066 DRV_LOG(ERR, "Failed to create egress policer."); 11067 goto error; 11068 } 11069 } 11070 if (attr->ingress) { 11071 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress, 11072 priv->mtr_color_reg); 11073 if (ret) { 11074 DRV_LOG(ERR, "Failed to create ingress policer."); 11075 goto error; 11076 } 11077 } 11078 if (attr->transfer) { 11079 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer, 11080 priv->mtr_color_reg); 11081 if (ret) { 11082 DRV_LOG(ERR, "Failed to create transfer policer."); 11083 goto error; 11084 } 11085 } 11086 return 0; 11087 error: 11088 flow_dv_destroy_policer_rules(dev, fm, attr); 11089 return -1; 11090 } 11091 11092 /** 11093 * Query a devx counter. 11094 * 11095 * @param[in] dev 11096 * Pointer to the Ethernet device structure. 11097 * @param[in] cnt 11098 * Index to the flow counter. 11099 * @param[in] clear 11100 * Set to clear the counter statistics. 11101 * @param[out] pkts 11102 * The statistics value of packets. 11103 * @param[out] bytes 11104 * The statistics value of bytes. 11105 * 11106 * @return 11107 * 0 on success, otherwise return -1. 11108 */ 11109 static int 11110 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear, 11111 uint64_t *pkts, uint64_t *bytes) 11112 { 11113 struct mlx5_priv *priv = dev->data->dev_private; 11114 struct mlx5_flow_counter *cnt; 11115 uint64_t inn_pkts, inn_bytes; 11116 int ret; 11117 11118 if (!priv->config.devx) 11119 return -1; 11120 11121 ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes); 11122 if (ret) 11123 return -1; 11124 cnt = flow_dv_counter_get_by_idx(dev, counter, NULL); 11125 *pkts = inn_pkts - cnt->hits; 11126 *bytes = inn_bytes - cnt->bytes; 11127 if (clear) { 11128 cnt->hits = inn_pkts; 11129 cnt->bytes = inn_bytes; 11130 } 11131 return 0; 11132 } 11133 11134 /** 11135 * Get aged-out flows. 11136 * 11137 * @param[in] dev 11138 * Pointer to the Ethernet device structure. 11139 * @param[in] context 11140 * The address of an array of pointers to the aged-out flows contexts. 11141 * @param[in] nb_contexts 11142 * The length of context array pointers. 11143 * @param[out] error 11144 * Perform verbose error reporting if not NULL. Initialized in case of 11145 * error only. 11146 * 11147 * @return 11148 * how many contexts get in success, otherwise negative errno value. 11149 * if nb_contexts is 0, return the amount of all aged contexts. 11150 * if nb_contexts is not 0 , return the amount of aged flows reported 11151 * in the context array. 11152 * @note: only stub for now 11153 */ 11154 static int 11155 flow_get_aged_flows(struct rte_eth_dev *dev, 11156 void **context, 11157 uint32_t nb_contexts, 11158 struct rte_flow_error *error) 11159 { 11160 struct mlx5_priv *priv = dev->data->dev_private; 11161 struct mlx5_age_info *age_info; 11162 struct mlx5_age_param *age_param; 11163 struct mlx5_flow_counter *counter; 11164 int nb_flows = 0; 11165 11166 if (nb_contexts && !context) 11167 return rte_flow_error_set(error, EINVAL, 11168 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 11169 NULL, 11170 "Should assign at least one flow or" 11171 " context to get if nb_contexts != 0"); 11172 age_info = GET_PORT_AGE_INFO(priv); 11173 rte_spinlock_lock(&age_info->aged_sl); 11174 TAILQ_FOREACH(counter, &age_info->aged_counters, next) { 11175 nb_flows++; 11176 if (nb_contexts) { 11177 age_param = MLX5_CNT_TO_AGE(counter); 11178 context[nb_flows - 1] = age_param->context; 11179 if (!(--nb_contexts)) 11180 break; 11181 } 11182 } 11183 rte_spinlock_unlock(&age_info->aged_sl); 11184 MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER); 11185 return nb_flows; 11186 } 11187 11188 /* 11189 * Mutex-protected thunk to lock-free __flow_dv_translate(). 11190 */ 11191 static int 11192 flow_dv_translate(struct rte_eth_dev *dev, 11193 struct mlx5_flow *dev_flow, 11194 const struct rte_flow_attr *attr, 11195 const struct rte_flow_item items[], 11196 const struct rte_flow_action actions[], 11197 struct rte_flow_error *error) 11198 { 11199 int ret; 11200 11201 flow_dv_shared_lock(dev); 11202 ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error); 11203 flow_dv_shared_unlock(dev); 11204 return ret; 11205 } 11206 11207 /* 11208 * Mutex-protected thunk to lock-free __flow_dv_apply(). 11209 */ 11210 static int 11211 flow_dv_apply(struct rte_eth_dev *dev, 11212 struct rte_flow *flow, 11213 struct rte_flow_error *error) 11214 { 11215 int ret; 11216 11217 flow_dv_shared_lock(dev); 11218 ret = __flow_dv_apply(dev, flow, error); 11219 flow_dv_shared_unlock(dev); 11220 return ret; 11221 } 11222 11223 /* 11224 * Mutex-protected thunk to lock-free __flow_dv_remove(). 11225 */ 11226 static void 11227 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 11228 { 11229 flow_dv_shared_lock(dev); 11230 __flow_dv_remove(dev, flow); 11231 flow_dv_shared_unlock(dev); 11232 } 11233 11234 /* 11235 * Mutex-protected thunk to lock-free __flow_dv_destroy(). 11236 */ 11237 static void 11238 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 11239 { 11240 flow_dv_shared_lock(dev); 11241 __flow_dv_destroy(dev, flow); 11242 flow_dv_shared_unlock(dev); 11243 } 11244 11245 /* 11246 * Mutex-protected thunk to lock-free flow_dv_counter_alloc(). 11247 */ 11248 static uint32_t 11249 flow_dv_counter_allocate(struct rte_eth_dev *dev) 11250 { 11251 uint32_t cnt; 11252 11253 flow_dv_shared_lock(dev); 11254 cnt = flow_dv_counter_alloc(dev, 0, 0, 1, 0); 11255 flow_dv_shared_unlock(dev); 11256 return cnt; 11257 } 11258 11259 /* 11260 * Mutex-protected thunk to lock-free flow_dv_counter_release(). 11261 */ 11262 static void 11263 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt) 11264 { 11265 flow_dv_shared_lock(dev); 11266 flow_dv_counter_release(dev, cnt); 11267 flow_dv_shared_unlock(dev); 11268 } 11269 11270 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { 11271 .validate = flow_dv_validate, 11272 .prepare = flow_dv_prepare, 11273 .translate = flow_dv_translate, 11274 .apply = flow_dv_apply, 11275 .remove = flow_dv_remove, 11276 .destroy = flow_dv_destroy, 11277 .query = flow_dv_query, 11278 .create_mtr_tbls = flow_dv_create_mtr_tbl, 11279 .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl, 11280 .create_policer_rules = flow_dv_create_policer_rules, 11281 .destroy_policer_rules = flow_dv_destroy_policer_rules, 11282 .counter_alloc = flow_dv_counter_allocate, 11283 .counter_free = flow_dv_counter_free, 11284 .counter_query = flow_dv_counter_query, 11285 .get_aged_flows = flow_get_aged_flows, 11286 }; 11287 11288 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 11289 11290