1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018 Mellanox Technologies, Ltd 3 */ 4 5 #include <sys/queue.h> 6 #include <stdalign.h> 7 #include <stdint.h> 8 #include <string.h> 9 #include <unistd.h> 10 11 /* Verbs header. */ 12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 13 #ifdef PEDANTIC 14 #pragma GCC diagnostic ignored "-Wpedantic" 15 #endif 16 #include <infiniband/verbs.h> 17 #ifdef PEDANTIC 18 #pragma GCC diagnostic error "-Wpedantic" 19 #endif 20 21 #include <rte_common.h> 22 #include <rte_ether.h> 23 #include <rte_ethdev_driver.h> 24 #include <rte_flow.h> 25 #include <rte_flow_driver.h> 26 #include <rte_malloc.h> 27 #include <rte_cycles.h> 28 #include <rte_ip.h> 29 #include <rte_gre.h> 30 #include <rte_vxlan.h> 31 #include <rte_gtp.h> 32 33 #include <mlx5_devx_cmds.h> 34 #include <mlx5_prm.h> 35 36 #include "mlx5_defs.h" 37 #include "mlx5.h" 38 #include "mlx5_common_os.h" 39 #include "mlx5_flow.h" 40 #include "mlx5_flow_os.h" 41 #include "mlx5_rxtx.h" 42 43 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 44 45 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS 46 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0 47 #endif 48 49 #ifndef HAVE_MLX5DV_DR_ESWITCH 50 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB 51 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0 52 #endif 53 #endif 54 55 #ifndef HAVE_MLX5DV_DR 56 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1 57 #endif 58 59 /* VLAN header definitions */ 60 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13 61 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT) 62 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff 63 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK) 64 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK) 65 66 union flow_dv_attr { 67 struct { 68 uint32_t valid:1; 69 uint32_t ipv4:1; 70 uint32_t ipv6:1; 71 uint32_t tcp:1; 72 uint32_t udp:1; 73 uint32_t reserved:27; 74 }; 75 uint32_t attr; 76 }; 77 78 static int 79 flow_dv_tbl_resource_release(struct rte_eth_dev *dev, 80 struct mlx5_flow_tbl_resource *tbl); 81 82 static int 83 flow_dv_default_miss_resource_release(struct rte_eth_dev *dev); 84 85 /** 86 * Initialize flow attributes structure according to flow items' types. 87 * 88 * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel 89 * mode. For tunnel mode, the items to be modified are the outermost ones. 90 * 91 * @param[in] item 92 * Pointer to item specification. 93 * @param[out] attr 94 * Pointer to flow attributes structure. 95 * @param[in] dev_flow 96 * Pointer to the sub flow. 97 * @param[in] tunnel_decap 98 * Whether action is after tunnel decapsulation. 99 */ 100 static void 101 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr, 102 struct mlx5_flow *dev_flow, bool tunnel_decap) 103 { 104 uint64_t layers = dev_flow->handle->layers; 105 106 /* 107 * If layers is already initialized, it means this dev_flow is the 108 * suffix flow, the layers flags is set by the prefix flow. Need to 109 * use the layer flags from prefix flow as the suffix flow may not 110 * have the user defined items as the flow is split. 111 */ 112 if (layers) { 113 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4) 114 attr->ipv4 = 1; 115 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6) 116 attr->ipv6 = 1; 117 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP) 118 attr->tcp = 1; 119 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP) 120 attr->udp = 1; 121 attr->valid = 1; 122 return; 123 } 124 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 125 uint8_t next_protocol = 0xff; 126 switch (item->type) { 127 case RTE_FLOW_ITEM_TYPE_GRE: 128 case RTE_FLOW_ITEM_TYPE_NVGRE: 129 case RTE_FLOW_ITEM_TYPE_VXLAN: 130 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 131 case RTE_FLOW_ITEM_TYPE_GENEVE: 132 case RTE_FLOW_ITEM_TYPE_MPLS: 133 if (tunnel_decap) 134 attr->attr = 0; 135 break; 136 case RTE_FLOW_ITEM_TYPE_IPV4: 137 if (!attr->ipv6) 138 attr->ipv4 = 1; 139 if (item->mask != NULL && 140 ((const struct rte_flow_item_ipv4 *) 141 item->mask)->hdr.next_proto_id) 142 next_protocol = 143 ((const struct rte_flow_item_ipv4 *) 144 (item->spec))->hdr.next_proto_id & 145 ((const struct rte_flow_item_ipv4 *) 146 (item->mask))->hdr.next_proto_id; 147 if ((next_protocol == IPPROTO_IPIP || 148 next_protocol == IPPROTO_IPV6) && tunnel_decap) 149 attr->attr = 0; 150 break; 151 case RTE_FLOW_ITEM_TYPE_IPV6: 152 if (!attr->ipv4) 153 attr->ipv6 = 1; 154 if (item->mask != NULL && 155 ((const struct rte_flow_item_ipv6 *) 156 item->mask)->hdr.proto) 157 next_protocol = 158 ((const struct rte_flow_item_ipv6 *) 159 (item->spec))->hdr.proto & 160 ((const struct rte_flow_item_ipv6 *) 161 (item->mask))->hdr.proto; 162 if ((next_protocol == IPPROTO_IPIP || 163 next_protocol == IPPROTO_IPV6) && tunnel_decap) 164 attr->attr = 0; 165 break; 166 case RTE_FLOW_ITEM_TYPE_UDP: 167 if (!attr->tcp) 168 attr->udp = 1; 169 break; 170 case RTE_FLOW_ITEM_TYPE_TCP: 171 if (!attr->udp) 172 attr->tcp = 1; 173 break; 174 default: 175 break; 176 } 177 } 178 attr->valid = 1; 179 } 180 181 /** 182 * Convert rte_mtr_color to mlx5 color. 183 * 184 * @param[in] rcol 185 * rte_mtr_color. 186 * 187 * @return 188 * mlx5 color. 189 */ 190 static int 191 rte_col_2_mlx5_col(enum rte_color rcol) 192 { 193 switch (rcol) { 194 case RTE_COLOR_GREEN: 195 return MLX5_FLOW_COLOR_GREEN; 196 case RTE_COLOR_YELLOW: 197 return MLX5_FLOW_COLOR_YELLOW; 198 case RTE_COLOR_RED: 199 return MLX5_FLOW_COLOR_RED; 200 default: 201 break; 202 } 203 return MLX5_FLOW_COLOR_UNDEFINED; 204 } 205 206 struct field_modify_info { 207 uint32_t size; /* Size of field in protocol header, in bytes. */ 208 uint32_t offset; /* Offset of field in protocol header, in bytes. */ 209 enum mlx5_modification_field id; 210 }; 211 212 struct field_modify_info modify_eth[] = { 213 {4, 0, MLX5_MODI_OUT_DMAC_47_16}, 214 {2, 4, MLX5_MODI_OUT_DMAC_15_0}, 215 {4, 6, MLX5_MODI_OUT_SMAC_47_16}, 216 {2, 10, MLX5_MODI_OUT_SMAC_15_0}, 217 {0, 0, 0}, 218 }; 219 220 struct field_modify_info modify_vlan_out_first_vid[] = { 221 /* Size in bits !!! */ 222 {12, 0, MLX5_MODI_OUT_FIRST_VID}, 223 {0, 0, 0}, 224 }; 225 226 struct field_modify_info modify_ipv4[] = { 227 {1, 1, MLX5_MODI_OUT_IP_DSCP}, 228 {1, 8, MLX5_MODI_OUT_IPV4_TTL}, 229 {4, 12, MLX5_MODI_OUT_SIPV4}, 230 {4, 16, MLX5_MODI_OUT_DIPV4}, 231 {0, 0, 0}, 232 }; 233 234 struct field_modify_info modify_ipv6[] = { 235 {1, 0, MLX5_MODI_OUT_IP_DSCP}, 236 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT}, 237 {4, 8, MLX5_MODI_OUT_SIPV6_127_96}, 238 {4, 12, MLX5_MODI_OUT_SIPV6_95_64}, 239 {4, 16, MLX5_MODI_OUT_SIPV6_63_32}, 240 {4, 20, MLX5_MODI_OUT_SIPV6_31_0}, 241 {4, 24, MLX5_MODI_OUT_DIPV6_127_96}, 242 {4, 28, MLX5_MODI_OUT_DIPV6_95_64}, 243 {4, 32, MLX5_MODI_OUT_DIPV6_63_32}, 244 {4, 36, MLX5_MODI_OUT_DIPV6_31_0}, 245 {0, 0, 0}, 246 }; 247 248 struct field_modify_info modify_udp[] = { 249 {2, 0, MLX5_MODI_OUT_UDP_SPORT}, 250 {2, 2, MLX5_MODI_OUT_UDP_DPORT}, 251 {0, 0, 0}, 252 }; 253 254 struct field_modify_info modify_tcp[] = { 255 {2, 0, MLX5_MODI_OUT_TCP_SPORT}, 256 {2, 2, MLX5_MODI_OUT_TCP_DPORT}, 257 {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM}, 258 {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM}, 259 {0, 0, 0}, 260 }; 261 262 static void 263 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused, 264 uint8_t next_protocol, uint64_t *item_flags, 265 int *tunnel) 266 { 267 MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 || 268 item->type == RTE_FLOW_ITEM_TYPE_IPV6); 269 if (next_protocol == IPPROTO_IPIP) { 270 *item_flags |= MLX5_FLOW_LAYER_IPIP; 271 *tunnel = 1; 272 } 273 if (next_protocol == IPPROTO_IPV6) { 274 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP; 275 *tunnel = 1; 276 } 277 } 278 279 /** 280 * Acquire the synchronizing object to protect multithreaded access 281 * to shared dv context. Lock occurs only if context is actually 282 * shared, i.e. we have multiport IB device and representors are 283 * created. 284 * 285 * @param[in] dev 286 * Pointer to the rte_eth_dev structure. 287 */ 288 static void 289 flow_dv_shared_lock(struct rte_eth_dev *dev) 290 { 291 struct mlx5_priv *priv = dev->data->dev_private; 292 struct mlx5_dev_ctx_shared *sh = priv->sh; 293 294 if (sh->dv_refcnt > 1) { 295 int ret; 296 297 ret = pthread_mutex_lock(&sh->dv_mutex); 298 MLX5_ASSERT(!ret); 299 (void)ret; 300 } 301 } 302 303 static void 304 flow_dv_shared_unlock(struct rte_eth_dev *dev) 305 { 306 struct mlx5_priv *priv = dev->data->dev_private; 307 struct mlx5_dev_ctx_shared *sh = priv->sh; 308 309 if (sh->dv_refcnt > 1) { 310 int ret; 311 312 ret = pthread_mutex_unlock(&sh->dv_mutex); 313 MLX5_ASSERT(!ret); 314 (void)ret; 315 } 316 } 317 318 /* Update VLAN's VID/PCP based on input rte_flow_action. 319 * 320 * @param[in] action 321 * Pointer to struct rte_flow_action. 322 * @param[out] vlan 323 * Pointer to struct rte_vlan_hdr. 324 */ 325 static void 326 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action, 327 struct rte_vlan_hdr *vlan) 328 { 329 uint16_t vlan_tci; 330 if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) { 331 vlan_tci = 332 ((const struct rte_flow_action_of_set_vlan_pcp *) 333 action->conf)->vlan_pcp; 334 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT; 335 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK; 336 vlan->vlan_tci |= vlan_tci; 337 } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) { 338 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK; 339 vlan->vlan_tci |= rte_be_to_cpu_16 340 (((const struct rte_flow_action_of_set_vlan_vid *) 341 action->conf)->vlan_vid); 342 } 343 } 344 345 /** 346 * Fetch 1, 2, 3 or 4 byte field from the byte array 347 * and return as unsigned integer in host-endian format. 348 * 349 * @param[in] data 350 * Pointer to data array. 351 * @param[in] size 352 * Size of field to extract. 353 * 354 * @return 355 * converted field in host endian format. 356 */ 357 static inline uint32_t 358 flow_dv_fetch_field(const uint8_t *data, uint32_t size) 359 { 360 uint32_t ret; 361 362 switch (size) { 363 case 1: 364 ret = *data; 365 break; 366 case 2: 367 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data); 368 break; 369 case 3: 370 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data); 371 ret = (ret << 8) | *(data + sizeof(uint16_t)); 372 break; 373 case 4: 374 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data); 375 break; 376 default: 377 MLX5_ASSERT(false); 378 ret = 0; 379 break; 380 } 381 return ret; 382 } 383 384 /** 385 * Convert modify-header action to DV specification. 386 * 387 * Data length of each action is determined by provided field description 388 * and the item mask. Data bit offset and width of each action is determined 389 * by provided item mask. 390 * 391 * @param[in] item 392 * Pointer to item specification. 393 * @param[in] field 394 * Pointer to field modification information. 395 * For MLX5_MODIFICATION_TYPE_SET specifies destination field. 396 * For MLX5_MODIFICATION_TYPE_ADD specifies destination field. 397 * For MLX5_MODIFICATION_TYPE_COPY specifies source field. 398 * @param[in] dcopy 399 * Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type. 400 * Negative offset value sets the same offset as source offset. 401 * size field is ignored, value is taken from source field. 402 * @param[in,out] resource 403 * Pointer to the modify-header resource. 404 * @param[in] type 405 * Type of modification. 406 * @param[out] error 407 * Pointer to the error structure. 408 * 409 * @return 410 * 0 on success, a negative errno value otherwise and rte_errno is set. 411 */ 412 static int 413 flow_dv_convert_modify_action(struct rte_flow_item *item, 414 struct field_modify_info *field, 415 struct field_modify_info *dcopy, 416 struct mlx5_flow_dv_modify_hdr_resource *resource, 417 uint32_t type, struct rte_flow_error *error) 418 { 419 uint32_t i = resource->actions_num; 420 struct mlx5_modification_cmd *actions = resource->actions; 421 422 /* 423 * The item and mask are provided in big-endian format. 424 * The fields should be presented as in big-endian format either. 425 * Mask must be always present, it defines the actual field width. 426 */ 427 MLX5_ASSERT(item->mask); 428 MLX5_ASSERT(field->size); 429 do { 430 unsigned int size_b; 431 unsigned int off_b; 432 uint32_t mask; 433 uint32_t data; 434 435 if (i >= MLX5_MAX_MODIFY_NUM) 436 return rte_flow_error_set(error, EINVAL, 437 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 438 "too many items to modify"); 439 /* Fetch variable byte size mask from the array. */ 440 mask = flow_dv_fetch_field((const uint8_t *)item->mask + 441 field->offset, field->size); 442 if (!mask) { 443 ++field; 444 continue; 445 } 446 /* Deduce actual data width in bits from mask value. */ 447 off_b = rte_bsf32(mask); 448 size_b = sizeof(uint32_t) * CHAR_BIT - 449 off_b - __builtin_clz(mask); 450 MLX5_ASSERT(size_b); 451 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b; 452 actions[i] = (struct mlx5_modification_cmd) { 453 .action_type = type, 454 .field = field->id, 455 .offset = off_b, 456 .length = size_b, 457 }; 458 /* Convert entire record to expected big-endian format. */ 459 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); 460 if (type == MLX5_MODIFICATION_TYPE_COPY) { 461 MLX5_ASSERT(dcopy); 462 actions[i].dst_field = dcopy->id; 463 actions[i].dst_offset = 464 (int)dcopy->offset < 0 ? off_b : dcopy->offset; 465 /* Convert entire record to big-endian format. */ 466 actions[i].data1 = rte_cpu_to_be_32(actions[i].data1); 467 } else { 468 MLX5_ASSERT(item->spec); 469 data = flow_dv_fetch_field((const uint8_t *)item->spec + 470 field->offset, field->size); 471 /* Shift out the trailing masked bits from data. */ 472 data = (data & mask) >> off_b; 473 actions[i].data1 = rte_cpu_to_be_32(data); 474 } 475 ++i; 476 ++field; 477 } while (field->size); 478 if (resource->actions_num == i) 479 return rte_flow_error_set(error, EINVAL, 480 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 481 "invalid modification flow item"); 482 resource->actions_num = i; 483 return 0; 484 } 485 486 /** 487 * Convert modify-header set IPv4 address action to DV specification. 488 * 489 * @param[in,out] resource 490 * Pointer to the modify-header resource. 491 * @param[in] action 492 * Pointer to action specification. 493 * @param[out] error 494 * Pointer to the error structure. 495 * 496 * @return 497 * 0 on success, a negative errno value otherwise and rte_errno is set. 498 */ 499 static int 500 flow_dv_convert_action_modify_ipv4 501 (struct mlx5_flow_dv_modify_hdr_resource *resource, 502 const struct rte_flow_action *action, 503 struct rte_flow_error *error) 504 { 505 const struct rte_flow_action_set_ipv4 *conf = 506 (const struct rte_flow_action_set_ipv4 *)(action->conf); 507 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 }; 508 struct rte_flow_item_ipv4 ipv4; 509 struct rte_flow_item_ipv4 ipv4_mask; 510 511 memset(&ipv4, 0, sizeof(ipv4)); 512 memset(&ipv4_mask, 0, sizeof(ipv4_mask)); 513 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) { 514 ipv4.hdr.src_addr = conf->ipv4_addr; 515 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr; 516 } else { 517 ipv4.hdr.dst_addr = conf->ipv4_addr; 518 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr; 519 } 520 item.spec = &ipv4; 521 item.mask = &ipv4_mask; 522 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource, 523 MLX5_MODIFICATION_TYPE_SET, error); 524 } 525 526 /** 527 * Convert modify-header set IPv6 address action to DV specification. 528 * 529 * @param[in,out] resource 530 * Pointer to the modify-header resource. 531 * @param[in] action 532 * Pointer to action specification. 533 * @param[out] error 534 * Pointer to the error structure. 535 * 536 * @return 537 * 0 on success, a negative errno value otherwise and rte_errno is set. 538 */ 539 static int 540 flow_dv_convert_action_modify_ipv6 541 (struct mlx5_flow_dv_modify_hdr_resource *resource, 542 const struct rte_flow_action *action, 543 struct rte_flow_error *error) 544 { 545 const struct rte_flow_action_set_ipv6 *conf = 546 (const struct rte_flow_action_set_ipv6 *)(action->conf); 547 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 }; 548 struct rte_flow_item_ipv6 ipv6; 549 struct rte_flow_item_ipv6 ipv6_mask; 550 551 memset(&ipv6, 0, sizeof(ipv6)); 552 memset(&ipv6_mask, 0, sizeof(ipv6_mask)); 553 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) { 554 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr, 555 sizeof(ipv6.hdr.src_addr)); 556 memcpy(&ipv6_mask.hdr.src_addr, 557 &rte_flow_item_ipv6_mask.hdr.src_addr, 558 sizeof(ipv6.hdr.src_addr)); 559 } else { 560 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr, 561 sizeof(ipv6.hdr.dst_addr)); 562 memcpy(&ipv6_mask.hdr.dst_addr, 563 &rte_flow_item_ipv6_mask.hdr.dst_addr, 564 sizeof(ipv6.hdr.dst_addr)); 565 } 566 item.spec = &ipv6; 567 item.mask = &ipv6_mask; 568 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource, 569 MLX5_MODIFICATION_TYPE_SET, error); 570 } 571 572 /** 573 * Convert modify-header set MAC address action to DV specification. 574 * 575 * @param[in,out] resource 576 * Pointer to the modify-header resource. 577 * @param[in] action 578 * Pointer to action specification. 579 * @param[out] error 580 * Pointer to the error structure. 581 * 582 * @return 583 * 0 on success, a negative errno value otherwise and rte_errno is set. 584 */ 585 static int 586 flow_dv_convert_action_modify_mac 587 (struct mlx5_flow_dv_modify_hdr_resource *resource, 588 const struct rte_flow_action *action, 589 struct rte_flow_error *error) 590 { 591 const struct rte_flow_action_set_mac *conf = 592 (const struct rte_flow_action_set_mac *)(action->conf); 593 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH }; 594 struct rte_flow_item_eth eth; 595 struct rte_flow_item_eth eth_mask; 596 597 memset(ð, 0, sizeof(eth)); 598 memset(ð_mask, 0, sizeof(eth_mask)); 599 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) { 600 memcpy(ð.src.addr_bytes, &conf->mac_addr, 601 sizeof(eth.src.addr_bytes)); 602 memcpy(ð_mask.src.addr_bytes, 603 &rte_flow_item_eth_mask.src.addr_bytes, 604 sizeof(eth_mask.src.addr_bytes)); 605 } else { 606 memcpy(ð.dst.addr_bytes, &conf->mac_addr, 607 sizeof(eth.dst.addr_bytes)); 608 memcpy(ð_mask.dst.addr_bytes, 609 &rte_flow_item_eth_mask.dst.addr_bytes, 610 sizeof(eth_mask.dst.addr_bytes)); 611 } 612 item.spec = ð 613 item.mask = ð_mask; 614 return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource, 615 MLX5_MODIFICATION_TYPE_SET, error); 616 } 617 618 /** 619 * Convert modify-header set VLAN VID action to DV specification. 620 * 621 * @param[in,out] resource 622 * Pointer to the modify-header resource. 623 * @param[in] action 624 * Pointer to action specification. 625 * @param[out] error 626 * Pointer to the error structure. 627 * 628 * @return 629 * 0 on success, a negative errno value otherwise and rte_errno is set. 630 */ 631 static int 632 flow_dv_convert_action_modify_vlan_vid 633 (struct mlx5_flow_dv_modify_hdr_resource *resource, 634 const struct rte_flow_action *action, 635 struct rte_flow_error *error) 636 { 637 const struct rte_flow_action_of_set_vlan_vid *conf = 638 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf); 639 int i = resource->actions_num; 640 struct mlx5_modification_cmd *actions = resource->actions; 641 struct field_modify_info *field = modify_vlan_out_first_vid; 642 643 if (i >= MLX5_MAX_MODIFY_NUM) 644 return rte_flow_error_set(error, EINVAL, 645 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 646 "too many items to modify"); 647 actions[i] = (struct mlx5_modification_cmd) { 648 .action_type = MLX5_MODIFICATION_TYPE_SET, 649 .field = field->id, 650 .length = field->size, 651 .offset = field->offset, 652 }; 653 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); 654 actions[i].data1 = conf->vlan_vid; 655 actions[i].data1 = actions[i].data1 << 16; 656 resource->actions_num = ++i; 657 return 0; 658 } 659 660 /** 661 * Convert modify-header set TP action to DV specification. 662 * 663 * @param[in,out] resource 664 * Pointer to the modify-header resource. 665 * @param[in] action 666 * Pointer to action specification. 667 * @param[in] items 668 * Pointer to rte_flow_item objects list. 669 * @param[in] attr 670 * Pointer to flow attributes structure. 671 * @param[in] dev_flow 672 * Pointer to the sub flow. 673 * @param[in] tunnel_decap 674 * Whether action is after tunnel decapsulation. 675 * @param[out] error 676 * Pointer to the error structure. 677 * 678 * @return 679 * 0 on success, a negative errno value otherwise and rte_errno is set. 680 */ 681 static int 682 flow_dv_convert_action_modify_tp 683 (struct mlx5_flow_dv_modify_hdr_resource *resource, 684 const struct rte_flow_action *action, 685 const struct rte_flow_item *items, 686 union flow_dv_attr *attr, struct mlx5_flow *dev_flow, 687 bool tunnel_decap, struct rte_flow_error *error) 688 { 689 const struct rte_flow_action_set_tp *conf = 690 (const struct rte_flow_action_set_tp *)(action->conf); 691 struct rte_flow_item item; 692 struct rte_flow_item_udp udp; 693 struct rte_flow_item_udp udp_mask; 694 struct rte_flow_item_tcp tcp; 695 struct rte_flow_item_tcp tcp_mask; 696 struct field_modify_info *field; 697 698 if (!attr->valid) 699 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); 700 if (attr->udp) { 701 memset(&udp, 0, sizeof(udp)); 702 memset(&udp_mask, 0, sizeof(udp_mask)); 703 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) { 704 udp.hdr.src_port = conf->port; 705 udp_mask.hdr.src_port = 706 rte_flow_item_udp_mask.hdr.src_port; 707 } else { 708 udp.hdr.dst_port = conf->port; 709 udp_mask.hdr.dst_port = 710 rte_flow_item_udp_mask.hdr.dst_port; 711 } 712 item.type = RTE_FLOW_ITEM_TYPE_UDP; 713 item.spec = &udp; 714 item.mask = &udp_mask; 715 field = modify_udp; 716 } else { 717 MLX5_ASSERT(attr->tcp); 718 memset(&tcp, 0, sizeof(tcp)); 719 memset(&tcp_mask, 0, sizeof(tcp_mask)); 720 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) { 721 tcp.hdr.src_port = conf->port; 722 tcp_mask.hdr.src_port = 723 rte_flow_item_tcp_mask.hdr.src_port; 724 } else { 725 tcp.hdr.dst_port = conf->port; 726 tcp_mask.hdr.dst_port = 727 rte_flow_item_tcp_mask.hdr.dst_port; 728 } 729 item.type = RTE_FLOW_ITEM_TYPE_TCP; 730 item.spec = &tcp; 731 item.mask = &tcp_mask; 732 field = modify_tcp; 733 } 734 return flow_dv_convert_modify_action(&item, field, NULL, resource, 735 MLX5_MODIFICATION_TYPE_SET, error); 736 } 737 738 /** 739 * Convert modify-header set TTL action to DV specification. 740 * 741 * @param[in,out] resource 742 * Pointer to the modify-header resource. 743 * @param[in] action 744 * Pointer to action specification. 745 * @param[in] items 746 * Pointer to rte_flow_item objects list. 747 * @param[in] attr 748 * Pointer to flow attributes structure. 749 * @param[in] dev_flow 750 * Pointer to the sub flow. 751 * @param[in] tunnel_decap 752 * Whether action is after tunnel decapsulation. 753 * @param[out] error 754 * Pointer to the error structure. 755 * 756 * @return 757 * 0 on success, a negative errno value otherwise and rte_errno is set. 758 */ 759 static int 760 flow_dv_convert_action_modify_ttl 761 (struct mlx5_flow_dv_modify_hdr_resource *resource, 762 const struct rte_flow_action *action, 763 const struct rte_flow_item *items, 764 union flow_dv_attr *attr, struct mlx5_flow *dev_flow, 765 bool tunnel_decap, struct rte_flow_error *error) 766 { 767 const struct rte_flow_action_set_ttl *conf = 768 (const struct rte_flow_action_set_ttl *)(action->conf); 769 struct rte_flow_item item; 770 struct rte_flow_item_ipv4 ipv4; 771 struct rte_flow_item_ipv4 ipv4_mask; 772 struct rte_flow_item_ipv6 ipv6; 773 struct rte_flow_item_ipv6 ipv6_mask; 774 struct field_modify_info *field; 775 776 if (!attr->valid) 777 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); 778 if (attr->ipv4) { 779 memset(&ipv4, 0, sizeof(ipv4)); 780 memset(&ipv4_mask, 0, sizeof(ipv4_mask)); 781 ipv4.hdr.time_to_live = conf->ttl_value; 782 ipv4_mask.hdr.time_to_live = 0xFF; 783 item.type = RTE_FLOW_ITEM_TYPE_IPV4; 784 item.spec = &ipv4; 785 item.mask = &ipv4_mask; 786 field = modify_ipv4; 787 } else { 788 MLX5_ASSERT(attr->ipv6); 789 memset(&ipv6, 0, sizeof(ipv6)); 790 memset(&ipv6_mask, 0, sizeof(ipv6_mask)); 791 ipv6.hdr.hop_limits = conf->ttl_value; 792 ipv6_mask.hdr.hop_limits = 0xFF; 793 item.type = RTE_FLOW_ITEM_TYPE_IPV6; 794 item.spec = &ipv6; 795 item.mask = &ipv6_mask; 796 field = modify_ipv6; 797 } 798 return flow_dv_convert_modify_action(&item, field, NULL, resource, 799 MLX5_MODIFICATION_TYPE_SET, error); 800 } 801 802 /** 803 * Convert modify-header decrement TTL action to DV specification. 804 * 805 * @param[in,out] resource 806 * Pointer to the modify-header resource. 807 * @param[in] action 808 * Pointer to action specification. 809 * @param[in] items 810 * Pointer to rte_flow_item objects list. 811 * @param[in] attr 812 * Pointer to flow attributes structure. 813 * @param[in] dev_flow 814 * Pointer to the sub flow. 815 * @param[in] tunnel_decap 816 * Whether action is after tunnel decapsulation. 817 * @param[out] error 818 * Pointer to the error structure. 819 * 820 * @return 821 * 0 on success, a negative errno value otherwise and rte_errno is set. 822 */ 823 static int 824 flow_dv_convert_action_modify_dec_ttl 825 (struct mlx5_flow_dv_modify_hdr_resource *resource, 826 const struct rte_flow_item *items, 827 union flow_dv_attr *attr, struct mlx5_flow *dev_flow, 828 bool tunnel_decap, struct rte_flow_error *error) 829 { 830 struct rte_flow_item item; 831 struct rte_flow_item_ipv4 ipv4; 832 struct rte_flow_item_ipv4 ipv4_mask; 833 struct rte_flow_item_ipv6 ipv6; 834 struct rte_flow_item_ipv6 ipv6_mask; 835 struct field_modify_info *field; 836 837 if (!attr->valid) 838 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); 839 if (attr->ipv4) { 840 memset(&ipv4, 0, sizeof(ipv4)); 841 memset(&ipv4_mask, 0, sizeof(ipv4_mask)); 842 ipv4.hdr.time_to_live = 0xFF; 843 ipv4_mask.hdr.time_to_live = 0xFF; 844 item.type = RTE_FLOW_ITEM_TYPE_IPV4; 845 item.spec = &ipv4; 846 item.mask = &ipv4_mask; 847 field = modify_ipv4; 848 } else { 849 MLX5_ASSERT(attr->ipv6); 850 memset(&ipv6, 0, sizeof(ipv6)); 851 memset(&ipv6_mask, 0, sizeof(ipv6_mask)); 852 ipv6.hdr.hop_limits = 0xFF; 853 ipv6_mask.hdr.hop_limits = 0xFF; 854 item.type = RTE_FLOW_ITEM_TYPE_IPV6; 855 item.spec = &ipv6; 856 item.mask = &ipv6_mask; 857 field = modify_ipv6; 858 } 859 return flow_dv_convert_modify_action(&item, field, NULL, resource, 860 MLX5_MODIFICATION_TYPE_ADD, error); 861 } 862 863 /** 864 * Convert modify-header increment/decrement TCP Sequence number 865 * to DV specification. 866 * 867 * @param[in,out] resource 868 * Pointer to the modify-header resource. 869 * @param[in] action 870 * Pointer to action specification. 871 * @param[out] error 872 * Pointer to the error structure. 873 * 874 * @return 875 * 0 on success, a negative errno value otherwise and rte_errno is set. 876 */ 877 static int 878 flow_dv_convert_action_modify_tcp_seq 879 (struct mlx5_flow_dv_modify_hdr_resource *resource, 880 const struct rte_flow_action *action, 881 struct rte_flow_error *error) 882 { 883 const rte_be32_t *conf = (const rte_be32_t *)(action->conf); 884 uint64_t value = rte_be_to_cpu_32(*conf); 885 struct rte_flow_item item; 886 struct rte_flow_item_tcp tcp; 887 struct rte_flow_item_tcp tcp_mask; 888 889 memset(&tcp, 0, sizeof(tcp)); 890 memset(&tcp_mask, 0, sizeof(tcp_mask)); 891 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ) 892 /* 893 * The HW has no decrement operation, only increment operation. 894 * To simulate decrement X from Y using increment operation 895 * we need to add UINT32_MAX X times to Y. 896 * Each adding of UINT32_MAX decrements Y by 1. 897 */ 898 value *= UINT32_MAX; 899 tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value); 900 tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX); 901 item.type = RTE_FLOW_ITEM_TYPE_TCP; 902 item.spec = &tcp; 903 item.mask = &tcp_mask; 904 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource, 905 MLX5_MODIFICATION_TYPE_ADD, error); 906 } 907 908 /** 909 * Convert modify-header increment/decrement TCP Acknowledgment number 910 * to DV specification. 911 * 912 * @param[in,out] resource 913 * Pointer to the modify-header resource. 914 * @param[in] action 915 * Pointer to action specification. 916 * @param[out] error 917 * Pointer to the error structure. 918 * 919 * @return 920 * 0 on success, a negative errno value otherwise and rte_errno is set. 921 */ 922 static int 923 flow_dv_convert_action_modify_tcp_ack 924 (struct mlx5_flow_dv_modify_hdr_resource *resource, 925 const struct rte_flow_action *action, 926 struct rte_flow_error *error) 927 { 928 const rte_be32_t *conf = (const rte_be32_t *)(action->conf); 929 uint64_t value = rte_be_to_cpu_32(*conf); 930 struct rte_flow_item item; 931 struct rte_flow_item_tcp tcp; 932 struct rte_flow_item_tcp tcp_mask; 933 934 memset(&tcp, 0, sizeof(tcp)); 935 memset(&tcp_mask, 0, sizeof(tcp_mask)); 936 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK) 937 /* 938 * The HW has no decrement operation, only increment operation. 939 * To simulate decrement X from Y using increment operation 940 * we need to add UINT32_MAX X times to Y. 941 * Each adding of UINT32_MAX decrements Y by 1. 942 */ 943 value *= UINT32_MAX; 944 tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value); 945 tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX); 946 item.type = RTE_FLOW_ITEM_TYPE_TCP; 947 item.spec = &tcp; 948 item.mask = &tcp_mask; 949 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource, 950 MLX5_MODIFICATION_TYPE_ADD, error); 951 } 952 953 static enum mlx5_modification_field reg_to_field[] = { 954 [REG_NONE] = MLX5_MODI_OUT_NONE, 955 [REG_A] = MLX5_MODI_META_DATA_REG_A, 956 [REG_B] = MLX5_MODI_META_DATA_REG_B, 957 [REG_C_0] = MLX5_MODI_META_REG_C_0, 958 [REG_C_1] = MLX5_MODI_META_REG_C_1, 959 [REG_C_2] = MLX5_MODI_META_REG_C_2, 960 [REG_C_3] = MLX5_MODI_META_REG_C_3, 961 [REG_C_4] = MLX5_MODI_META_REG_C_4, 962 [REG_C_5] = MLX5_MODI_META_REG_C_5, 963 [REG_C_6] = MLX5_MODI_META_REG_C_6, 964 [REG_C_7] = MLX5_MODI_META_REG_C_7, 965 }; 966 967 /** 968 * Convert register set to DV specification. 969 * 970 * @param[in,out] resource 971 * Pointer to the modify-header resource. 972 * @param[in] action 973 * Pointer to action specification. 974 * @param[out] error 975 * Pointer to the error structure. 976 * 977 * @return 978 * 0 on success, a negative errno value otherwise and rte_errno is set. 979 */ 980 static int 981 flow_dv_convert_action_set_reg 982 (struct mlx5_flow_dv_modify_hdr_resource *resource, 983 const struct rte_flow_action *action, 984 struct rte_flow_error *error) 985 { 986 const struct mlx5_rte_flow_action_set_tag *conf = action->conf; 987 struct mlx5_modification_cmd *actions = resource->actions; 988 uint32_t i = resource->actions_num; 989 990 if (i >= MLX5_MAX_MODIFY_NUM) 991 return rte_flow_error_set(error, EINVAL, 992 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 993 "too many items to modify"); 994 MLX5_ASSERT(conf->id != REG_NONE); 995 MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field)); 996 actions[i] = (struct mlx5_modification_cmd) { 997 .action_type = MLX5_MODIFICATION_TYPE_SET, 998 .field = reg_to_field[conf->id], 999 }; 1000 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); 1001 actions[i].data1 = rte_cpu_to_be_32(conf->data); 1002 ++i; 1003 resource->actions_num = i; 1004 return 0; 1005 } 1006 1007 /** 1008 * Convert SET_TAG action to DV specification. 1009 * 1010 * @param[in] dev 1011 * Pointer to the rte_eth_dev structure. 1012 * @param[in,out] resource 1013 * Pointer to the modify-header resource. 1014 * @param[in] conf 1015 * Pointer to action specification. 1016 * @param[out] error 1017 * Pointer to the error structure. 1018 * 1019 * @return 1020 * 0 on success, a negative errno value otherwise and rte_errno is set. 1021 */ 1022 static int 1023 flow_dv_convert_action_set_tag 1024 (struct rte_eth_dev *dev, 1025 struct mlx5_flow_dv_modify_hdr_resource *resource, 1026 const struct rte_flow_action_set_tag *conf, 1027 struct rte_flow_error *error) 1028 { 1029 rte_be32_t data = rte_cpu_to_be_32(conf->data); 1030 rte_be32_t mask = rte_cpu_to_be_32(conf->mask); 1031 struct rte_flow_item item = { 1032 .spec = &data, 1033 .mask = &mask, 1034 }; 1035 struct field_modify_info reg_c_x[] = { 1036 [1] = {0, 0, 0}, 1037 }; 1038 enum mlx5_modification_field reg_type; 1039 int ret; 1040 1041 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error); 1042 if (ret < 0) 1043 return ret; 1044 MLX5_ASSERT(ret != REG_NONE); 1045 MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field)); 1046 reg_type = reg_to_field[ret]; 1047 MLX5_ASSERT(reg_type > 0); 1048 reg_c_x[0] = (struct field_modify_info){4, 0, reg_type}; 1049 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource, 1050 MLX5_MODIFICATION_TYPE_SET, error); 1051 } 1052 1053 /** 1054 * Convert internal COPY_REG action to DV specification. 1055 * 1056 * @param[in] dev 1057 * Pointer to the rte_eth_dev structure. 1058 * @param[in,out] res 1059 * Pointer to the modify-header resource. 1060 * @param[in] action 1061 * Pointer to action specification. 1062 * @param[out] error 1063 * Pointer to the error structure. 1064 * 1065 * @return 1066 * 0 on success, a negative errno value otherwise and rte_errno is set. 1067 */ 1068 static int 1069 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev, 1070 struct mlx5_flow_dv_modify_hdr_resource *res, 1071 const struct rte_flow_action *action, 1072 struct rte_flow_error *error) 1073 { 1074 const struct mlx5_flow_action_copy_mreg *conf = action->conf; 1075 rte_be32_t mask = RTE_BE32(UINT32_MAX); 1076 struct rte_flow_item item = { 1077 .spec = NULL, 1078 .mask = &mask, 1079 }; 1080 struct field_modify_info reg_src[] = { 1081 {4, 0, reg_to_field[conf->src]}, 1082 {0, 0, 0}, 1083 }; 1084 struct field_modify_info reg_dst = { 1085 .offset = 0, 1086 .id = reg_to_field[conf->dst], 1087 }; 1088 /* Adjust reg_c[0] usage according to reported mask. */ 1089 if (conf->dst == REG_C_0 || conf->src == REG_C_0) { 1090 struct mlx5_priv *priv = dev->data->dev_private; 1091 uint32_t reg_c0 = priv->sh->dv_regc0_mask; 1092 1093 MLX5_ASSERT(reg_c0); 1094 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY); 1095 if (conf->dst == REG_C_0) { 1096 /* Copy to reg_c[0], within mask only. */ 1097 reg_dst.offset = rte_bsf32(reg_c0); 1098 /* 1099 * Mask is ignoring the enianness, because 1100 * there is no conversion in datapath. 1101 */ 1102 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1103 /* Copy from destination lower bits to reg_c[0]. */ 1104 mask = reg_c0 >> reg_dst.offset; 1105 #else 1106 /* Copy from destination upper bits to reg_c[0]. */ 1107 mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT - 1108 rte_fls_u32(reg_c0)); 1109 #endif 1110 } else { 1111 mask = rte_cpu_to_be_32(reg_c0); 1112 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1113 /* Copy from reg_c[0] to destination lower bits. */ 1114 reg_dst.offset = 0; 1115 #else 1116 /* Copy from reg_c[0] to destination upper bits. */ 1117 reg_dst.offset = sizeof(reg_c0) * CHAR_BIT - 1118 (rte_fls_u32(reg_c0) - 1119 rte_bsf32(reg_c0)); 1120 #endif 1121 } 1122 } 1123 return flow_dv_convert_modify_action(&item, 1124 reg_src, ®_dst, res, 1125 MLX5_MODIFICATION_TYPE_COPY, 1126 error); 1127 } 1128 1129 /** 1130 * Convert MARK action to DV specification. This routine is used 1131 * in extensive metadata only and requires metadata register to be 1132 * handled. In legacy mode hardware tag resource is engaged. 1133 * 1134 * @param[in] dev 1135 * Pointer to the rte_eth_dev structure. 1136 * @param[in] conf 1137 * Pointer to MARK action specification. 1138 * @param[in,out] resource 1139 * Pointer to the modify-header resource. 1140 * @param[out] error 1141 * Pointer to the error structure. 1142 * 1143 * @return 1144 * 0 on success, a negative errno value otherwise and rte_errno is set. 1145 */ 1146 static int 1147 flow_dv_convert_action_mark(struct rte_eth_dev *dev, 1148 const struct rte_flow_action_mark *conf, 1149 struct mlx5_flow_dv_modify_hdr_resource *resource, 1150 struct rte_flow_error *error) 1151 { 1152 struct mlx5_priv *priv = dev->data->dev_private; 1153 rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK & 1154 priv->sh->dv_mark_mask); 1155 rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask; 1156 struct rte_flow_item item = { 1157 .spec = &data, 1158 .mask = &mask, 1159 }; 1160 struct field_modify_info reg_c_x[] = { 1161 {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */ 1162 {0, 0, 0}, 1163 }; 1164 int reg; 1165 1166 if (!mask) 1167 return rte_flow_error_set(error, EINVAL, 1168 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1169 NULL, "zero mark action mask"); 1170 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 1171 if (reg < 0) 1172 return reg; 1173 MLX5_ASSERT(reg > 0); 1174 if (reg == REG_C_0) { 1175 uint32_t msk_c0 = priv->sh->dv_regc0_mask; 1176 uint32_t shl_c0 = rte_bsf32(msk_c0); 1177 1178 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0); 1179 mask = rte_cpu_to_be_32(mask) & msk_c0; 1180 mask = rte_cpu_to_be_32(mask << shl_c0); 1181 } 1182 reg_c_x[0].id = reg_to_field[reg]; 1183 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource, 1184 MLX5_MODIFICATION_TYPE_SET, error); 1185 } 1186 1187 /** 1188 * Get metadata register index for specified steering domain. 1189 * 1190 * @param[in] dev 1191 * Pointer to the rte_eth_dev structure. 1192 * @param[in] attr 1193 * Attributes of flow to determine steering domain. 1194 * @param[out] error 1195 * Pointer to the error structure. 1196 * 1197 * @return 1198 * positive index on success, a negative errno value otherwise 1199 * and rte_errno is set. 1200 */ 1201 static enum modify_reg 1202 flow_dv_get_metadata_reg(struct rte_eth_dev *dev, 1203 const struct rte_flow_attr *attr, 1204 struct rte_flow_error *error) 1205 { 1206 int reg = 1207 mlx5_flow_get_reg_id(dev, attr->transfer ? 1208 MLX5_METADATA_FDB : 1209 attr->egress ? 1210 MLX5_METADATA_TX : 1211 MLX5_METADATA_RX, 0, error); 1212 if (reg < 0) 1213 return rte_flow_error_set(error, 1214 ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 1215 NULL, "unavailable " 1216 "metadata register"); 1217 return reg; 1218 } 1219 1220 /** 1221 * Convert SET_META action to DV specification. 1222 * 1223 * @param[in] dev 1224 * Pointer to the rte_eth_dev structure. 1225 * @param[in,out] resource 1226 * Pointer to the modify-header resource. 1227 * @param[in] attr 1228 * Attributes of flow that includes this item. 1229 * @param[in] conf 1230 * Pointer to action specification. 1231 * @param[out] error 1232 * Pointer to the error structure. 1233 * 1234 * @return 1235 * 0 on success, a negative errno value otherwise and rte_errno is set. 1236 */ 1237 static int 1238 flow_dv_convert_action_set_meta 1239 (struct rte_eth_dev *dev, 1240 struct mlx5_flow_dv_modify_hdr_resource *resource, 1241 const struct rte_flow_attr *attr, 1242 const struct rte_flow_action_set_meta *conf, 1243 struct rte_flow_error *error) 1244 { 1245 uint32_t data = conf->data; 1246 uint32_t mask = conf->mask; 1247 struct rte_flow_item item = { 1248 .spec = &data, 1249 .mask = &mask, 1250 }; 1251 struct field_modify_info reg_c_x[] = { 1252 [1] = {0, 0, 0}, 1253 }; 1254 int reg = flow_dv_get_metadata_reg(dev, attr, error); 1255 1256 if (reg < 0) 1257 return reg; 1258 /* 1259 * In datapath code there is no endianness 1260 * coversions for perfromance reasons, all 1261 * pattern conversions are done in rte_flow. 1262 */ 1263 if (reg == REG_C_0) { 1264 struct mlx5_priv *priv = dev->data->dev_private; 1265 uint32_t msk_c0 = priv->sh->dv_regc0_mask; 1266 uint32_t shl_c0; 1267 1268 MLX5_ASSERT(msk_c0); 1269 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 1270 shl_c0 = rte_bsf32(msk_c0); 1271 #else 1272 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0); 1273 #endif 1274 mask <<= shl_c0; 1275 data <<= shl_c0; 1276 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask))); 1277 } 1278 reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]}; 1279 /* The routine expects parameters in memory as big-endian ones. */ 1280 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource, 1281 MLX5_MODIFICATION_TYPE_SET, error); 1282 } 1283 1284 /** 1285 * Convert modify-header set IPv4 DSCP action to DV specification. 1286 * 1287 * @param[in,out] resource 1288 * Pointer to the modify-header resource. 1289 * @param[in] action 1290 * Pointer to action specification. 1291 * @param[out] error 1292 * Pointer to the error structure. 1293 * 1294 * @return 1295 * 0 on success, a negative errno value otherwise and rte_errno is set. 1296 */ 1297 static int 1298 flow_dv_convert_action_modify_ipv4_dscp 1299 (struct mlx5_flow_dv_modify_hdr_resource *resource, 1300 const struct rte_flow_action *action, 1301 struct rte_flow_error *error) 1302 { 1303 const struct rte_flow_action_set_dscp *conf = 1304 (const struct rte_flow_action_set_dscp *)(action->conf); 1305 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 }; 1306 struct rte_flow_item_ipv4 ipv4; 1307 struct rte_flow_item_ipv4 ipv4_mask; 1308 1309 memset(&ipv4, 0, sizeof(ipv4)); 1310 memset(&ipv4_mask, 0, sizeof(ipv4_mask)); 1311 ipv4.hdr.type_of_service = conf->dscp; 1312 ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2; 1313 item.spec = &ipv4; 1314 item.mask = &ipv4_mask; 1315 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource, 1316 MLX5_MODIFICATION_TYPE_SET, error); 1317 } 1318 1319 /** 1320 * Convert modify-header set IPv6 DSCP action to DV specification. 1321 * 1322 * @param[in,out] resource 1323 * Pointer to the modify-header resource. 1324 * @param[in] action 1325 * Pointer to action specification. 1326 * @param[out] error 1327 * Pointer to the error structure. 1328 * 1329 * @return 1330 * 0 on success, a negative errno value otherwise and rte_errno is set. 1331 */ 1332 static int 1333 flow_dv_convert_action_modify_ipv6_dscp 1334 (struct mlx5_flow_dv_modify_hdr_resource *resource, 1335 const struct rte_flow_action *action, 1336 struct rte_flow_error *error) 1337 { 1338 const struct rte_flow_action_set_dscp *conf = 1339 (const struct rte_flow_action_set_dscp *)(action->conf); 1340 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 }; 1341 struct rte_flow_item_ipv6 ipv6; 1342 struct rte_flow_item_ipv6 ipv6_mask; 1343 1344 memset(&ipv6, 0, sizeof(ipv6)); 1345 memset(&ipv6_mask, 0, sizeof(ipv6_mask)); 1346 /* 1347 * Even though the DSCP bits offset of IPv6 is not byte aligned, 1348 * rdma-core only accept the DSCP bits byte aligned start from 1349 * bit 0 to 5 as to be compatible with IPv4. No need to shift the 1350 * bits in IPv6 case as rdma-core requires byte aligned value. 1351 */ 1352 ipv6.hdr.vtc_flow = conf->dscp; 1353 ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22; 1354 item.spec = &ipv6; 1355 item.mask = &ipv6_mask; 1356 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource, 1357 MLX5_MODIFICATION_TYPE_SET, error); 1358 } 1359 1360 /** 1361 * Validate MARK item. 1362 * 1363 * @param[in] dev 1364 * Pointer to the rte_eth_dev structure. 1365 * @param[in] item 1366 * Item specification. 1367 * @param[in] attr 1368 * Attributes of flow that includes this item. 1369 * @param[out] error 1370 * Pointer to error structure. 1371 * 1372 * @return 1373 * 0 on success, a negative errno value otherwise and rte_errno is set. 1374 */ 1375 static int 1376 flow_dv_validate_item_mark(struct rte_eth_dev *dev, 1377 const struct rte_flow_item *item, 1378 const struct rte_flow_attr *attr __rte_unused, 1379 struct rte_flow_error *error) 1380 { 1381 struct mlx5_priv *priv = dev->data->dev_private; 1382 struct mlx5_dev_config *config = &priv->config; 1383 const struct rte_flow_item_mark *spec = item->spec; 1384 const struct rte_flow_item_mark *mask = item->mask; 1385 const struct rte_flow_item_mark nic_mask = { 1386 .id = priv->sh->dv_mark_mask, 1387 }; 1388 int ret; 1389 1390 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY) 1391 return rte_flow_error_set(error, ENOTSUP, 1392 RTE_FLOW_ERROR_TYPE_ITEM, item, 1393 "extended metadata feature" 1394 " isn't enabled"); 1395 if (!mlx5_flow_ext_mreg_supported(dev)) 1396 return rte_flow_error_set(error, ENOTSUP, 1397 RTE_FLOW_ERROR_TYPE_ITEM, item, 1398 "extended metadata register" 1399 " isn't supported"); 1400 if (!nic_mask.id) 1401 return rte_flow_error_set(error, ENOTSUP, 1402 RTE_FLOW_ERROR_TYPE_ITEM, item, 1403 "extended metadata register" 1404 " isn't available"); 1405 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 1406 if (ret < 0) 1407 return ret; 1408 if (!spec) 1409 return rte_flow_error_set(error, EINVAL, 1410 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1411 item->spec, 1412 "data cannot be empty"); 1413 if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id)) 1414 return rte_flow_error_set(error, EINVAL, 1415 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 1416 &spec->id, 1417 "mark id exceeds the limit"); 1418 if (!mask) 1419 mask = &nic_mask; 1420 if (!mask->id) 1421 return rte_flow_error_set(error, EINVAL, 1422 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, 1423 "mask cannot be zero"); 1424 1425 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1426 (const uint8_t *)&nic_mask, 1427 sizeof(struct rte_flow_item_mark), 1428 error); 1429 if (ret < 0) 1430 return ret; 1431 return 0; 1432 } 1433 1434 /** 1435 * Validate META item. 1436 * 1437 * @param[in] dev 1438 * Pointer to the rte_eth_dev structure. 1439 * @param[in] item 1440 * Item specification. 1441 * @param[in] attr 1442 * Attributes of flow that includes this item. 1443 * @param[out] error 1444 * Pointer to error structure. 1445 * 1446 * @return 1447 * 0 on success, a negative errno value otherwise and rte_errno is set. 1448 */ 1449 static int 1450 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, 1451 const struct rte_flow_item *item, 1452 const struct rte_flow_attr *attr, 1453 struct rte_flow_error *error) 1454 { 1455 struct mlx5_priv *priv = dev->data->dev_private; 1456 struct mlx5_dev_config *config = &priv->config; 1457 const struct rte_flow_item_meta *spec = item->spec; 1458 const struct rte_flow_item_meta *mask = item->mask; 1459 struct rte_flow_item_meta nic_mask = { 1460 .data = UINT32_MAX 1461 }; 1462 int reg; 1463 int ret; 1464 1465 if (!spec) 1466 return rte_flow_error_set(error, EINVAL, 1467 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1468 item->spec, 1469 "data cannot be empty"); 1470 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 1471 if (!mlx5_flow_ext_mreg_supported(dev)) 1472 return rte_flow_error_set(error, ENOTSUP, 1473 RTE_FLOW_ERROR_TYPE_ITEM, item, 1474 "extended metadata register" 1475 " isn't supported"); 1476 reg = flow_dv_get_metadata_reg(dev, attr, error); 1477 if (reg < 0) 1478 return reg; 1479 if (reg == REG_B) 1480 return rte_flow_error_set(error, ENOTSUP, 1481 RTE_FLOW_ERROR_TYPE_ITEM, item, 1482 "match on reg_b " 1483 "isn't supported"); 1484 if (reg != REG_A) 1485 nic_mask.data = priv->sh->dv_meta_mask; 1486 } else if (attr->transfer) { 1487 return rte_flow_error_set(error, ENOTSUP, 1488 RTE_FLOW_ERROR_TYPE_ITEM, item, 1489 "extended metadata feature " 1490 "should be enabled when " 1491 "meta item is requested " 1492 "with e-switch mode "); 1493 } 1494 if (!mask) 1495 mask = &rte_flow_item_meta_mask; 1496 if (!mask->data) 1497 return rte_flow_error_set(error, EINVAL, 1498 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, 1499 "mask cannot be zero"); 1500 1501 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1502 (const uint8_t *)&nic_mask, 1503 sizeof(struct rte_flow_item_meta), 1504 error); 1505 return ret; 1506 } 1507 1508 /** 1509 * Validate TAG item. 1510 * 1511 * @param[in] dev 1512 * Pointer to the rte_eth_dev structure. 1513 * @param[in] item 1514 * Item specification. 1515 * @param[in] attr 1516 * Attributes of flow that includes this item. 1517 * @param[out] error 1518 * Pointer to error structure. 1519 * 1520 * @return 1521 * 0 on success, a negative errno value otherwise and rte_errno is set. 1522 */ 1523 static int 1524 flow_dv_validate_item_tag(struct rte_eth_dev *dev, 1525 const struct rte_flow_item *item, 1526 const struct rte_flow_attr *attr __rte_unused, 1527 struct rte_flow_error *error) 1528 { 1529 const struct rte_flow_item_tag *spec = item->spec; 1530 const struct rte_flow_item_tag *mask = item->mask; 1531 const struct rte_flow_item_tag nic_mask = { 1532 .data = RTE_BE32(UINT32_MAX), 1533 .index = 0xff, 1534 }; 1535 int ret; 1536 1537 if (!mlx5_flow_ext_mreg_supported(dev)) 1538 return rte_flow_error_set(error, ENOTSUP, 1539 RTE_FLOW_ERROR_TYPE_ITEM, item, 1540 "extensive metadata register" 1541 " isn't supported"); 1542 if (!spec) 1543 return rte_flow_error_set(error, EINVAL, 1544 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, 1545 item->spec, 1546 "data cannot be empty"); 1547 if (!mask) 1548 mask = &rte_flow_item_tag_mask; 1549 if (!mask->data) 1550 return rte_flow_error_set(error, EINVAL, 1551 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, 1552 "mask cannot be zero"); 1553 1554 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1555 (const uint8_t *)&nic_mask, 1556 sizeof(struct rte_flow_item_tag), 1557 error); 1558 if (ret < 0) 1559 return ret; 1560 if (mask->index != 0xff) 1561 return rte_flow_error_set(error, EINVAL, 1562 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, 1563 "partial mask for tag index" 1564 " is not supported"); 1565 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error); 1566 if (ret < 0) 1567 return ret; 1568 MLX5_ASSERT(ret != REG_NONE); 1569 return 0; 1570 } 1571 1572 /** 1573 * Validate vport item. 1574 * 1575 * @param[in] dev 1576 * Pointer to the rte_eth_dev structure. 1577 * @param[in] item 1578 * Item specification. 1579 * @param[in] attr 1580 * Attributes of flow that includes this item. 1581 * @param[in] item_flags 1582 * Bit-fields that holds the items detected until now. 1583 * @param[out] error 1584 * Pointer to error structure. 1585 * 1586 * @return 1587 * 0 on success, a negative errno value otherwise and rte_errno is set. 1588 */ 1589 static int 1590 flow_dv_validate_item_port_id(struct rte_eth_dev *dev, 1591 const struct rte_flow_item *item, 1592 const struct rte_flow_attr *attr, 1593 uint64_t item_flags, 1594 struct rte_flow_error *error) 1595 { 1596 const struct rte_flow_item_port_id *spec = item->spec; 1597 const struct rte_flow_item_port_id *mask = item->mask; 1598 const struct rte_flow_item_port_id switch_mask = { 1599 .id = 0xffffffff, 1600 }; 1601 struct mlx5_priv *esw_priv; 1602 struct mlx5_priv *dev_priv; 1603 int ret; 1604 1605 if (!attr->transfer) 1606 return rte_flow_error_set(error, EINVAL, 1607 RTE_FLOW_ERROR_TYPE_ITEM, 1608 NULL, 1609 "match on port id is valid only" 1610 " when transfer flag is enabled"); 1611 if (item_flags & MLX5_FLOW_ITEM_PORT_ID) 1612 return rte_flow_error_set(error, ENOTSUP, 1613 RTE_FLOW_ERROR_TYPE_ITEM, item, 1614 "multiple source ports are not" 1615 " supported"); 1616 if (!mask) 1617 mask = &switch_mask; 1618 if (mask->id != 0xffffffff) 1619 return rte_flow_error_set(error, ENOTSUP, 1620 RTE_FLOW_ERROR_TYPE_ITEM_MASK, 1621 mask, 1622 "no support for partial mask on" 1623 " \"id\" field"); 1624 ret = mlx5_flow_item_acceptable 1625 (item, (const uint8_t *)mask, 1626 (const uint8_t *)&rte_flow_item_port_id_mask, 1627 sizeof(struct rte_flow_item_port_id), 1628 error); 1629 if (ret) 1630 return ret; 1631 if (!spec) 1632 return 0; 1633 esw_priv = mlx5_port_to_eswitch_info(spec->id, false); 1634 if (!esw_priv) 1635 return rte_flow_error_set(error, rte_errno, 1636 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec, 1637 "failed to obtain E-Switch info for" 1638 " port"); 1639 dev_priv = mlx5_dev_to_eswitch_info(dev); 1640 if (!dev_priv) 1641 return rte_flow_error_set(error, rte_errno, 1642 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1643 NULL, 1644 "failed to obtain E-Switch info"); 1645 if (esw_priv->domain_id != dev_priv->domain_id) 1646 return rte_flow_error_set(error, EINVAL, 1647 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec, 1648 "cannot match on a port from a" 1649 " different E-Switch"); 1650 return 0; 1651 } 1652 1653 /** 1654 * Validate VLAN item. 1655 * 1656 * @param[in] item 1657 * Item specification. 1658 * @param[in] item_flags 1659 * Bit-fields that holds the items detected until now. 1660 * @param[in] dev 1661 * Ethernet device flow is being created on. 1662 * @param[out] error 1663 * Pointer to error structure. 1664 * 1665 * @return 1666 * 0 on success, a negative errno value otherwise and rte_errno is set. 1667 */ 1668 static int 1669 flow_dv_validate_item_vlan(const struct rte_flow_item *item, 1670 uint64_t item_flags, 1671 struct rte_eth_dev *dev, 1672 struct rte_flow_error *error) 1673 { 1674 const struct rte_flow_item_vlan *mask = item->mask; 1675 const struct rte_flow_item_vlan nic_mask = { 1676 .tci = RTE_BE16(UINT16_MAX), 1677 .inner_type = RTE_BE16(UINT16_MAX), 1678 }; 1679 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 1680 int ret; 1681 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | 1682 MLX5_FLOW_LAYER_INNER_L4) : 1683 (MLX5_FLOW_LAYER_OUTER_L3 | 1684 MLX5_FLOW_LAYER_OUTER_L4); 1685 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 1686 MLX5_FLOW_LAYER_OUTER_VLAN; 1687 1688 if (item_flags & vlanm) 1689 return rte_flow_error_set(error, EINVAL, 1690 RTE_FLOW_ERROR_TYPE_ITEM, item, 1691 "multiple VLAN layers not supported"); 1692 else if ((item_flags & l34m) != 0) 1693 return rte_flow_error_set(error, EINVAL, 1694 RTE_FLOW_ERROR_TYPE_ITEM, item, 1695 "VLAN cannot follow L3/L4 layer"); 1696 if (!mask) 1697 mask = &rte_flow_item_vlan_mask; 1698 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, 1699 (const uint8_t *)&nic_mask, 1700 sizeof(struct rte_flow_item_vlan), 1701 error); 1702 if (ret) 1703 return ret; 1704 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { 1705 struct mlx5_priv *priv = dev->data->dev_private; 1706 1707 if (priv->vmwa_context) { 1708 /* 1709 * Non-NULL context means we have a virtual machine 1710 * and SR-IOV enabled, we have to create VLAN interface 1711 * to make hypervisor to setup E-Switch vport 1712 * context correctly. We avoid creating the multiple 1713 * VLAN interfaces, so we cannot support VLAN tag mask. 1714 */ 1715 return rte_flow_error_set(error, EINVAL, 1716 RTE_FLOW_ERROR_TYPE_ITEM, 1717 item, 1718 "VLAN tag mask is not" 1719 " supported in virtual" 1720 " environment"); 1721 } 1722 } 1723 return 0; 1724 } 1725 1726 /* 1727 * GTP flags are contained in 1 byte of the format: 1728 * ------------------------------------------- 1729 * | bit | 0 - 2 | 3 | 4 | 5 | 6 | 7 | 1730 * |-----------------------------------------| 1731 * | value | Version | PT | Res | E | S | PN | 1732 * ------------------------------------------- 1733 * 1734 * Matching is supported only for GTP flags E, S, PN. 1735 */ 1736 #define MLX5_GTP_FLAGS_MASK 0x07 1737 1738 /** 1739 * Validate GTP item. 1740 * 1741 * @param[in] dev 1742 * Pointer to the rte_eth_dev structure. 1743 * @param[in] item 1744 * Item specification. 1745 * @param[in] item_flags 1746 * Bit-fields that holds the items detected until now. 1747 * @param[out] error 1748 * Pointer to error structure. 1749 * 1750 * @return 1751 * 0 on success, a negative errno value otherwise and rte_errno is set. 1752 */ 1753 static int 1754 flow_dv_validate_item_gtp(struct rte_eth_dev *dev, 1755 const struct rte_flow_item *item, 1756 uint64_t item_flags, 1757 struct rte_flow_error *error) 1758 { 1759 struct mlx5_priv *priv = dev->data->dev_private; 1760 const struct rte_flow_item_gtp *spec = item->spec; 1761 const struct rte_flow_item_gtp *mask = item->mask; 1762 const struct rte_flow_item_gtp nic_mask = { 1763 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK, 1764 .msg_type = 0xff, 1765 .teid = RTE_BE32(0xffffffff), 1766 }; 1767 1768 if (!priv->config.hca_attr.tunnel_stateless_gtp) 1769 return rte_flow_error_set(error, ENOTSUP, 1770 RTE_FLOW_ERROR_TYPE_ITEM, item, 1771 "GTP support is not enabled"); 1772 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) 1773 return rte_flow_error_set(error, ENOTSUP, 1774 RTE_FLOW_ERROR_TYPE_ITEM, item, 1775 "multiple tunnel layers not" 1776 " supported"); 1777 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) 1778 return rte_flow_error_set(error, EINVAL, 1779 RTE_FLOW_ERROR_TYPE_ITEM, item, 1780 "no outer UDP layer found"); 1781 if (!mask) 1782 mask = &rte_flow_item_gtp_mask; 1783 if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK) 1784 return rte_flow_error_set(error, ENOTSUP, 1785 RTE_FLOW_ERROR_TYPE_ITEM, item, 1786 "Match is supported for GTP" 1787 " flags only"); 1788 return mlx5_flow_item_acceptable 1789 (item, (const uint8_t *)mask, 1790 (const uint8_t *)&nic_mask, 1791 sizeof(struct rte_flow_item_gtp), 1792 error); 1793 } 1794 1795 /** 1796 * Validate the pop VLAN action. 1797 * 1798 * @param[in] dev 1799 * Pointer to the rte_eth_dev structure. 1800 * @param[in] action_flags 1801 * Holds the actions detected until now. 1802 * @param[in] action 1803 * Pointer to the pop vlan action. 1804 * @param[in] item_flags 1805 * The items found in this flow rule. 1806 * @param[in] attr 1807 * Pointer to flow attributes. 1808 * @param[out] error 1809 * Pointer to error structure. 1810 * 1811 * @return 1812 * 0 on success, a negative errno value otherwise and rte_errno is set. 1813 */ 1814 static int 1815 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev, 1816 uint64_t action_flags, 1817 const struct rte_flow_action *action, 1818 uint64_t item_flags, 1819 const struct rte_flow_attr *attr, 1820 struct rte_flow_error *error) 1821 { 1822 const struct mlx5_priv *priv = dev->data->dev_private; 1823 1824 (void)action; 1825 (void)attr; 1826 if (!priv->sh->pop_vlan_action) 1827 return rte_flow_error_set(error, ENOTSUP, 1828 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1829 NULL, 1830 "pop vlan action is not supported"); 1831 if (attr->egress) 1832 return rte_flow_error_set(error, ENOTSUP, 1833 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, 1834 NULL, 1835 "pop vlan action not supported for " 1836 "egress"); 1837 if (action_flags & MLX5_FLOW_VLAN_ACTIONS) 1838 return rte_flow_error_set(error, ENOTSUP, 1839 RTE_FLOW_ERROR_TYPE_ACTION, action, 1840 "no support for multiple VLAN " 1841 "actions"); 1842 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) 1843 return rte_flow_error_set(error, ENOTSUP, 1844 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1845 NULL, 1846 "cannot pop vlan without a " 1847 "match on (outer) vlan in the flow"); 1848 if (action_flags & MLX5_FLOW_ACTION_PORT_ID) 1849 return rte_flow_error_set(error, EINVAL, 1850 RTE_FLOW_ERROR_TYPE_ACTION, action, 1851 "wrong action order, port_id should " 1852 "be after pop VLAN action"); 1853 if (!attr->transfer && priv->representor) 1854 return rte_flow_error_set(error, ENOTSUP, 1855 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1856 "pop vlan action for VF representor " 1857 "not supported on NIC table"); 1858 return 0; 1859 } 1860 1861 /** 1862 * Get VLAN default info from vlan match info. 1863 * 1864 * @param[in] items 1865 * the list of item specifications. 1866 * @param[out] vlan 1867 * pointer VLAN info to fill to. 1868 * 1869 * @return 1870 * 0 on success, a negative errno value otherwise and rte_errno is set. 1871 */ 1872 static void 1873 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, 1874 struct rte_vlan_hdr *vlan) 1875 { 1876 const struct rte_flow_item_vlan nic_mask = { 1877 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK | 1878 MLX5DV_FLOW_VLAN_VID_MASK), 1879 .inner_type = RTE_BE16(0xffff), 1880 }; 1881 1882 if (items == NULL) 1883 return; 1884 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 1885 int type = items->type; 1886 1887 if (type == RTE_FLOW_ITEM_TYPE_VLAN || 1888 type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN) 1889 break; 1890 } 1891 if (items->type != RTE_FLOW_ITEM_TYPE_END) { 1892 const struct rte_flow_item_vlan *vlan_m = items->mask; 1893 const struct rte_flow_item_vlan *vlan_v = items->spec; 1894 1895 /* If VLAN item in pattern doesn't contain data, return here. */ 1896 if (!vlan_v) 1897 return; 1898 if (!vlan_m) 1899 vlan_m = &nic_mask; 1900 /* Only full match values are accepted */ 1901 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) == 1902 MLX5DV_FLOW_VLAN_PCP_MASK_BE) { 1903 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK; 1904 vlan->vlan_tci |= 1905 rte_be_to_cpu_16(vlan_v->tci & 1906 MLX5DV_FLOW_VLAN_PCP_MASK_BE); 1907 } 1908 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) == 1909 MLX5DV_FLOW_VLAN_VID_MASK_BE) { 1910 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK; 1911 vlan->vlan_tci |= 1912 rte_be_to_cpu_16(vlan_v->tci & 1913 MLX5DV_FLOW_VLAN_VID_MASK_BE); 1914 } 1915 if (vlan_m->inner_type == nic_mask.inner_type) 1916 vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type & 1917 vlan_m->inner_type); 1918 } 1919 } 1920 1921 /** 1922 * Validate the push VLAN action. 1923 * 1924 * @param[in] dev 1925 * Pointer to the rte_eth_dev structure. 1926 * @param[in] action_flags 1927 * Holds the actions detected until now. 1928 * @param[in] item_flags 1929 * The items found in this flow rule. 1930 * @param[in] action 1931 * Pointer to the action structure. 1932 * @param[in] attr 1933 * Pointer to flow attributes 1934 * @param[out] error 1935 * Pointer to error structure. 1936 * 1937 * @return 1938 * 0 on success, a negative errno value otherwise and rte_errno is set. 1939 */ 1940 static int 1941 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev, 1942 uint64_t action_flags, 1943 const struct rte_flow_item_vlan *vlan_m, 1944 const struct rte_flow_action *action, 1945 const struct rte_flow_attr *attr, 1946 struct rte_flow_error *error) 1947 { 1948 const struct rte_flow_action_of_push_vlan *push_vlan = action->conf; 1949 const struct mlx5_priv *priv = dev->data->dev_private; 1950 1951 if (!attr->transfer && attr->ingress) 1952 return rte_flow_error_set(error, ENOTSUP, 1953 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, 1954 NULL, 1955 "push VLAN action not supported for " 1956 "ingress"); 1957 if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) && 1958 push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ)) 1959 return rte_flow_error_set(error, EINVAL, 1960 RTE_FLOW_ERROR_TYPE_ACTION, action, 1961 "invalid vlan ethertype"); 1962 if (action_flags & MLX5_FLOW_VLAN_ACTIONS) 1963 return rte_flow_error_set(error, ENOTSUP, 1964 RTE_FLOW_ERROR_TYPE_ACTION, action, 1965 "no support for multiple VLAN " 1966 "actions"); 1967 if (action_flags & MLX5_FLOW_ACTION_PORT_ID) 1968 return rte_flow_error_set(error, EINVAL, 1969 RTE_FLOW_ERROR_TYPE_ACTION, action, 1970 "wrong action order, port_id should " 1971 "be after push VLAN"); 1972 if (!attr->transfer && priv->representor) 1973 return rte_flow_error_set(error, ENOTSUP, 1974 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1975 "push vlan action for VF representor " 1976 "not supported on NIC table"); 1977 if (vlan_m && 1978 (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) && 1979 (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) != 1980 MLX5DV_FLOW_VLAN_PCP_MASK_BE && 1981 !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) && 1982 !(mlx5_flow_find_action 1983 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP))) 1984 return rte_flow_error_set(error, EINVAL, 1985 RTE_FLOW_ERROR_TYPE_ACTION, action, 1986 "not full match mask on VLAN PCP and " 1987 "there is no of_set_vlan_pcp action, " 1988 "push VLAN action cannot figure out " 1989 "PCP value"); 1990 if (vlan_m && 1991 (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) && 1992 (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) != 1993 MLX5DV_FLOW_VLAN_VID_MASK_BE && 1994 !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) && 1995 !(mlx5_flow_find_action 1996 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))) 1997 return rte_flow_error_set(error, EINVAL, 1998 RTE_FLOW_ERROR_TYPE_ACTION, action, 1999 "not full match mask on VLAN VID and " 2000 "there is no of_set_vlan_vid action, " 2001 "push VLAN action cannot figure out " 2002 "VID value"); 2003 (void)attr; 2004 return 0; 2005 } 2006 2007 /** 2008 * Validate the set VLAN PCP. 2009 * 2010 * @param[in] action_flags 2011 * Holds the actions detected until now. 2012 * @param[in] actions 2013 * Pointer to the list of actions remaining in the flow rule. 2014 * @param[out] error 2015 * Pointer to error structure. 2016 * 2017 * @return 2018 * 0 on success, a negative errno value otherwise and rte_errno is set. 2019 */ 2020 static int 2021 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags, 2022 const struct rte_flow_action actions[], 2023 struct rte_flow_error *error) 2024 { 2025 const struct rte_flow_action *action = actions; 2026 const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf; 2027 2028 if (conf->vlan_pcp > 7) 2029 return rte_flow_error_set(error, EINVAL, 2030 RTE_FLOW_ERROR_TYPE_ACTION, action, 2031 "VLAN PCP value is too big"); 2032 if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)) 2033 return rte_flow_error_set(error, ENOTSUP, 2034 RTE_FLOW_ERROR_TYPE_ACTION, action, 2035 "set VLAN PCP action must follow " 2036 "the push VLAN action"); 2037 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) 2038 return rte_flow_error_set(error, ENOTSUP, 2039 RTE_FLOW_ERROR_TYPE_ACTION, action, 2040 "Multiple VLAN PCP modification are " 2041 "not supported"); 2042 if (action_flags & MLX5_FLOW_ACTION_PORT_ID) 2043 return rte_flow_error_set(error, EINVAL, 2044 RTE_FLOW_ERROR_TYPE_ACTION, action, 2045 "wrong action order, port_id should " 2046 "be after set VLAN PCP"); 2047 return 0; 2048 } 2049 2050 /** 2051 * Validate the set VLAN VID. 2052 * 2053 * @param[in] item_flags 2054 * Holds the items detected in this rule. 2055 * @param[in] action_flags 2056 * Holds the actions detected until now. 2057 * @param[in] actions 2058 * Pointer to the list of actions remaining in the flow rule. 2059 * @param[out] error 2060 * Pointer to error structure. 2061 * 2062 * @return 2063 * 0 on success, a negative errno value otherwise and rte_errno is set. 2064 */ 2065 static int 2066 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags, 2067 uint64_t action_flags, 2068 const struct rte_flow_action actions[], 2069 struct rte_flow_error *error) 2070 { 2071 const struct rte_flow_action *action = actions; 2072 const struct rte_flow_action_of_set_vlan_vid *conf = action->conf; 2073 2074 if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE) 2075 return rte_flow_error_set(error, EINVAL, 2076 RTE_FLOW_ERROR_TYPE_ACTION, action, 2077 "VLAN VID value is too big"); 2078 if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) && 2079 !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) 2080 return rte_flow_error_set(error, ENOTSUP, 2081 RTE_FLOW_ERROR_TYPE_ACTION, action, 2082 "set VLAN VID action must follow push" 2083 " VLAN action or match on VLAN item"); 2084 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) 2085 return rte_flow_error_set(error, ENOTSUP, 2086 RTE_FLOW_ERROR_TYPE_ACTION, action, 2087 "Multiple VLAN VID modifications are " 2088 "not supported"); 2089 if (action_flags & MLX5_FLOW_ACTION_PORT_ID) 2090 return rte_flow_error_set(error, EINVAL, 2091 RTE_FLOW_ERROR_TYPE_ACTION, action, 2092 "wrong action order, port_id should " 2093 "be after set VLAN VID"); 2094 return 0; 2095 } 2096 2097 /* 2098 * Validate the FLAG action. 2099 * 2100 * @param[in] dev 2101 * Pointer to the rte_eth_dev structure. 2102 * @param[in] action_flags 2103 * Holds the actions detected until now. 2104 * @param[in] attr 2105 * Pointer to flow attributes 2106 * @param[out] error 2107 * Pointer to error structure. 2108 * 2109 * @return 2110 * 0 on success, a negative errno value otherwise and rte_errno is set. 2111 */ 2112 static int 2113 flow_dv_validate_action_flag(struct rte_eth_dev *dev, 2114 uint64_t action_flags, 2115 const struct rte_flow_attr *attr, 2116 struct rte_flow_error *error) 2117 { 2118 struct mlx5_priv *priv = dev->data->dev_private; 2119 struct mlx5_dev_config *config = &priv->config; 2120 int ret; 2121 2122 /* Fall back if no extended metadata register support. */ 2123 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY) 2124 return mlx5_flow_validate_action_flag(action_flags, attr, 2125 error); 2126 /* Extensive metadata mode requires registers. */ 2127 if (!mlx5_flow_ext_mreg_supported(dev)) 2128 return rte_flow_error_set(error, ENOTSUP, 2129 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2130 "no metadata registers " 2131 "to support flag action"); 2132 if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT)) 2133 return rte_flow_error_set(error, ENOTSUP, 2134 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2135 "extended metadata register" 2136 " isn't available"); 2137 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 2138 if (ret < 0) 2139 return ret; 2140 MLX5_ASSERT(ret > 0); 2141 if (action_flags & MLX5_FLOW_ACTION_MARK) 2142 return rte_flow_error_set(error, EINVAL, 2143 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2144 "can't mark and flag in same flow"); 2145 if (action_flags & MLX5_FLOW_ACTION_FLAG) 2146 return rte_flow_error_set(error, EINVAL, 2147 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2148 "can't have 2 flag" 2149 " actions in same flow"); 2150 return 0; 2151 } 2152 2153 /** 2154 * Validate MARK action. 2155 * 2156 * @param[in] dev 2157 * Pointer to the rte_eth_dev structure. 2158 * @param[in] action 2159 * Pointer to action. 2160 * @param[in] action_flags 2161 * Holds the actions detected until now. 2162 * @param[in] attr 2163 * Pointer to flow attributes 2164 * @param[out] error 2165 * Pointer to error structure. 2166 * 2167 * @return 2168 * 0 on success, a negative errno value otherwise and rte_errno is set. 2169 */ 2170 static int 2171 flow_dv_validate_action_mark(struct rte_eth_dev *dev, 2172 const struct rte_flow_action *action, 2173 uint64_t action_flags, 2174 const struct rte_flow_attr *attr, 2175 struct rte_flow_error *error) 2176 { 2177 struct mlx5_priv *priv = dev->data->dev_private; 2178 struct mlx5_dev_config *config = &priv->config; 2179 const struct rte_flow_action_mark *mark = action->conf; 2180 int ret; 2181 2182 /* Fall back if no extended metadata register support. */ 2183 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY) 2184 return mlx5_flow_validate_action_mark(action, action_flags, 2185 attr, error); 2186 /* Extensive metadata mode requires registers. */ 2187 if (!mlx5_flow_ext_mreg_supported(dev)) 2188 return rte_flow_error_set(error, ENOTSUP, 2189 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2190 "no metadata registers " 2191 "to support mark action"); 2192 if (!priv->sh->dv_mark_mask) 2193 return rte_flow_error_set(error, ENOTSUP, 2194 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2195 "extended metadata register" 2196 " isn't available"); 2197 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); 2198 if (ret < 0) 2199 return ret; 2200 MLX5_ASSERT(ret > 0); 2201 if (!mark) 2202 return rte_flow_error_set(error, EINVAL, 2203 RTE_FLOW_ERROR_TYPE_ACTION, action, 2204 "configuration cannot be null"); 2205 if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask)) 2206 return rte_flow_error_set(error, EINVAL, 2207 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 2208 &mark->id, 2209 "mark id exceeds the limit"); 2210 if (action_flags & MLX5_FLOW_ACTION_FLAG) 2211 return rte_flow_error_set(error, EINVAL, 2212 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2213 "can't flag and mark in same flow"); 2214 if (action_flags & MLX5_FLOW_ACTION_MARK) 2215 return rte_flow_error_set(error, EINVAL, 2216 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2217 "can't have 2 mark actions in same" 2218 " flow"); 2219 return 0; 2220 } 2221 2222 /** 2223 * Validate SET_META action. 2224 * 2225 * @param[in] dev 2226 * Pointer to the rte_eth_dev structure. 2227 * @param[in] action 2228 * Pointer to the action structure. 2229 * @param[in] action_flags 2230 * Holds the actions detected until now. 2231 * @param[in] attr 2232 * Pointer to flow attributes 2233 * @param[out] error 2234 * Pointer to error structure. 2235 * 2236 * @return 2237 * 0 on success, a negative errno value otherwise and rte_errno is set. 2238 */ 2239 static int 2240 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, 2241 const struct rte_flow_action *action, 2242 uint64_t action_flags __rte_unused, 2243 const struct rte_flow_attr *attr, 2244 struct rte_flow_error *error) 2245 { 2246 const struct rte_flow_action_set_meta *conf; 2247 uint32_t nic_mask = UINT32_MAX; 2248 int reg; 2249 2250 if (!mlx5_flow_ext_mreg_supported(dev)) 2251 return rte_flow_error_set(error, ENOTSUP, 2252 RTE_FLOW_ERROR_TYPE_ACTION, action, 2253 "extended metadata register" 2254 " isn't supported"); 2255 reg = flow_dv_get_metadata_reg(dev, attr, error); 2256 if (reg < 0) 2257 return reg; 2258 if (reg != REG_A && reg != REG_B) { 2259 struct mlx5_priv *priv = dev->data->dev_private; 2260 2261 nic_mask = priv->sh->dv_meta_mask; 2262 } 2263 if (!(action->conf)) 2264 return rte_flow_error_set(error, EINVAL, 2265 RTE_FLOW_ERROR_TYPE_ACTION, action, 2266 "configuration cannot be null"); 2267 conf = (const struct rte_flow_action_set_meta *)action->conf; 2268 if (!conf->mask) 2269 return rte_flow_error_set(error, EINVAL, 2270 RTE_FLOW_ERROR_TYPE_ACTION, action, 2271 "zero mask doesn't have any effect"); 2272 if (conf->mask & ~nic_mask) 2273 return rte_flow_error_set(error, EINVAL, 2274 RTE_FLOW_ERROR_TYPE_ACTION, action, 2275 "meta data must be within reg C0"); 2276 return 0; 2277 } 2278 2279 /** 2280 * Validate SET_TAG action. 2281 * 2282 * @param[in] dev 2283 * Pointer to the rte_eth_dev structure. 2284 * @param[in] action 2285 * Pointer to the action structure. 2286 * @param[in] action_flags 2287 * Holds the actions detected until now. 2288 * @param[in] attr 2289 * Pointer to flow attributes 2290 * @param[out] error 2291 * Pointer to error structure. 2292 * 2293 * @return 2294 * 0 on success, a negative errno value otherwise and rte_errno is set. 2295 */ 2296 static int 2297 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, 2298 const struct rte_flow_action *action, 2299 uint64_t action_flags, 2300 const struct rte_flow_attr *attr, 2301 struct rte_flow_error *error) 2302 { 2303 const struct rte_flow_action_set_tag *conf; 2304 const uint64_t terminal_action_flags = 2305 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | 2306 MLX5_FLOW_ACTION_RSS; 2307 int ret; 2308 2309 if (!mlx5_flow_ext_mreg_supported(dev)) 2310 return rte_flow_error_set(error, ENOTSUP, 2311 RTE_FLOW_ERROR_TYPE_ACTION, action, 2312 "extensive metadata register" 2313 " isn't supported"); 2314 if (!(action->conf)) 2315 return rte_flow_error_set(error, EINVAL, 2316 RTE_FLOW_ERROR_TYPE_ACTION, action, 2317 "configuration cannot be null"); 2318 conf = (const struct rte_flow_action_set_tag *)action->conf; 2319 if (!conf->mask) 2320 return rte_flow_error_set(error, EINVAL, 2321 RTE_FLOW_ERROR_TYPE_ACTION, action, 2322 "zero mask doesn't have any effect"); 2323 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error); 2324 if (ret < 0) 2325 return ret; 2326 if (!attr->transfer && attr->ingress && 2327 (action_flags & terminal_action_flags)) 2328 return rte_flow_error_set(error, EINVAL, 2329 RTE_FLOW_ERROR_TYPE_ACTION, action, 2330 "set_tag has no effect" 2331 " with terminal actions"); 2332 return 0; 2333 } 2334 2335 /** 2336 * Validate count action. 2337 * 2338 * @param[in] dev 2339 * Pointer to rte_eth_dev structure. 2340 * @param[out] error 2341 * Pointer to error structure. 2342 * 2343 * @return 2344 * 0 on success, a negative errno value otherwise and rte_errno is set. 2345 */ 2346 static int 2347 flow_dv_validate_action_count(struct rte_eth_dev *dev, 2348 struct rte_flow_error *error) 2349 { 2350 struct mlx5_priv *priv = dev->data->dev_private; 2351 2352 if (!priv->config.devx) 2353 goto notsup_err; 2354 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS 2355 return 0; 2356 #endif 2357 notsup_err: 2358 return rte_flow_error_set 2359 (error, ENOTSUP, 2360 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2361 NULL, 2362 "count action not supported"); 2363 } 2364 2365 /** 2366 * Validate the L2 encap action. 2367 * 2368 * @param[in] dev 2369 * Pointer to the rte_eth_dev structure. 2370 * @param[in] action_flags 2371 * Holds the actions detected until now. 2372 * @param[in] action 2373 * Pointer to the action structure. 2374 * @param[in] attr 2375 * Pointer to flow attributes. 2376 * @param[out] error 2377 * Pointer to error structure. 2378 * 2379 * @return 2380 * 0 on success, a negative errno value otherwise and rte_errno is set. 2381 */ 2382 static int 2383 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev, 2384 uint64_t action_flags, 2385 const struct rte_flow_action *action, 2386 const struct rte_flow_attr *attr, 2387 struct rte_flow_error *error) 2388 { 2389 const struct mlx5_priv *priv = dev->data->dev_private; 2390 2391 if (!(action->conf)) 2392 return rte_flow_error_set(error, EINVAL, 2393 RTE_FLOW_ERROR_TYPE_ACTION, action, 2394 "configuration cannot be null"); 2395 if (action_flags & MLX5_FLOW_ACTION_ENCAP) 2396 return rte_flow_error_set(error, EINVAL, 2397 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2398 "can only have a single encap action " 2399 "in a flow"); 2400 if (!attr->transfer && priv->representor) 2401 return rte_flow_error_set(error, ENOTSUP, 2402 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2403 "encap action for VF representor " 2404 "not supported on NIC table"); 2405 return 0; 2406 } 2407 2408 /** 2409 * Validate a decap action. 2410 * 2411 * @param[in] dev 2412 * Pointer to the rte_eth_dev structure. 2413 * @param[in] action_flags 2414 * Holds the actions detected until now. 2415 * @param[in] attr 2416 * Pointer to flow attributes 2417 * @param[out] error 2418 * Pointer to error structure. 2419 * 2420 * @return 2421 * 0 on success, a negative errno value otherwise and rte_errno is set. 2422 */ 2423 static int 2424 flow_dv_validate_action_decap(struct rte_eth_dev *dev, 2425 uint64_t action_flags, 2426 const struct rte_flow_attr *attr, 2427 struct rte_flow_error *error) 2428 { 2429 const struct mlx5_priv *priv = dev->data->dev_private; 2430 2431 if (action_flags & MLX5_FLOW_XCAP_ACTIONS) 2432 return rte_flow_error_set(error, ENOTSUP, 2433 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2434 action_flags & 2435 MLX5_FLOW_ACTION_DECAP ? "can only " 2436 "have a single decap action" : "decap " 2437 "after encap is not supported"); 2438 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) 2439 return rte_flow_error_set(error, EINVAL, 2440 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2441 "can't have decap action after" 2442 " modify action"); 2443 if (attr->egress) 2444 return rte_flow_error_set(error, ENOTSUP, 2445 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, 2446 NULL, 2447 "decap action not supported for " 2448 "egress"); 2449 if (!attr->transfer && priv->representor) 2450 return rte_flow_error_set(error, ENOTSUP, 2451 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2452 "decap action for VF representor " 2453 "not supported on NIC table"); 2454 return 0; 2455 } 2456 2457 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,}; 2458 2459 /** 2460 * Validate the raw encap and decap actions. 2461 * 2462 * @param[in] dev 2463 * Pointer to the rte_eth_dev structure. 2464 * @param[in] decap 2465 * Pointer to the decap action. 2466 * @param[in] encap 2467 * Pointer to the encap action. 2468 * @param[in] attr 2469 * Pointer to flow attributes 2470 * @param[in/out] action_flags 2471 * Holds the actions detected until now. 2472 * @param[out] actions_n 2473 * pointer to the number of actions counter. 2474 * @param[out] error 2475 * Pointer to error structure. 2476 * 2477 * @return 2478 * 0 on success, a negative errno value otherwise and rte_errno is set. 2479 */ 2480 static int 2481 flow_dv_validate_action_raw_encap_decap 2482 (struct rte_eth_dev *dev, 2483 const struct rte_flow_action_raw_decap *decap, 2484 const struct rte_flow_action_raw_encap *encap, 2485 const struct rte_flow_attr *attr, uint64_t *action_flags, 2486 int *actions_n, struct rte_flow_error *error) 2487 { 2488 const struct mlx5_priv *priv = dev->data->dev_private; 2489 int ret; 2490 2491 if (encap && (!encap->size || !encap->data)) 2492 return rte_flow_error_set(error, EINVAL, 2493 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 2494 "raw encap data cannot be empty"); 2495 if (decap && encap) { 2496 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE && 2497 encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) 2498 /* L3 encap. */ 2499 decap = NULL; 2500 else if (encap->size <= 2501 MLX5_ENCAPSULATION_DECISION_SIZE && 2502 decap->size > 2503 MLX5_ENCAPSULATION_DECISION_SIZE) 2504 /* L3 decap. */ 2505 encap = NULL; 2506 else if (encap->size > 2507 MLX5_ENCAPSULATION_DECISION_SIZE && 2508 decap->size > 2509 MLX5_ENCAPSULATION_DECISION_SIZE) 2510 /* 2 L2 actions: encap and decap. */ 2511 ; 2512 else 2513 return rte_flow_error_set(error, 2514 ENOTSUP, 2515 RTE_FLOW_ERROR_TYPE_ACTION, 2516 NULL, "unsupported too small " 2517 "raw decap and too small raw " 2518 "encap combination"); 2519 } 2520 if (decap) { 2521 ret = flow_dv_validate_action_decap(dev, *action_flags, attr, 2522 error); 2523 if (ret < 0) 2524 return ret; 2525 *action_flags |= MLX5_FLOW_ACTION_DECAP; 2526 ++(*actions_n); 2527 } 2528 if (encap) { 2529 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE) 2530 return rte_flow_error_set(error, ENOTSUP, 2531 RTE_FLOW_ERROR_TYPE_ACTION, 2532 NULL, 2533 "small raw encap size"); 2534 if (*action_flags & MLX5_FLOW_ACTION_ENCAP) 2535 return rte_flow_error_set(error, EINVAL, 2536 RTE_FLOW_ERROR_TYPE_ACTION, 2537 NULL, 2538 "more than one encap action"); 2539 if (!attr->transfer && priv->representor) 2540 return rte_flow_error_set 2541 (error, ENOTSUP, 2542 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2543 "encap action for VF representor " 2544 "not supported on NIC table"); 2545 *action_flags |= MLX5_FLOW_ACTION_ENCAP; 2546 ++(*actions_n); 2547 } 2548 return 0; 2549 } 2550 2551 /** 2552 * Find existing encap/decap resource or create and register a new one. 2553 * 2554 * @param[in, out] dev 2555 * Pointer to rte_eth_dev structure. 2556 * @param[in, out] resource 2557 * Pointer to encap/decap resource. 2558 * @parm[in, out] dev_flow 2559 * Pointer to the dev_flow. 2560 * @param[out] error 2561 * pointer to error structure. 2562 * 2563 * @return 2564 * 0 on success otherwise -errno and errno is set. 2565 */ 2566 static int 2567 flow_dv_encap_decap_resource_register 2568 (struct rte_eth_dev *dev, 2569 struct mlx5_flow_dv_encap_decap_resource *resource, 2570 struct mlx5_flow *dev_flow, 2571 struct rte_flow_error *error) 2572 { 2573 struct mlx5_priv *priv = dev->data->dev_private; 2574 struct mlx5_dev_ctx_shared *sh = priv->sh; 2575 struct mlx5_flow_dv_encap_decap_resource *cache_resource; 2576 struct mlx5dv_dr_domain *domain; 2577 uint32_t idx = 0; 2578 int ret; 2579 2580 resource->flags = dev_flow->dv.group ? 0 : 1; 2581 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) 2582 domain = sh->fdb_domain; 2583 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) 2584 domain = sh->rx_domain; 2585 else 2586 domain = sh->tx_domain; 2587 /* Lookup a matching resource from cache. */ 2588 ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], sh->encaps_decaps, idx, 2589 cache_resource, next) { 2590 if (resource->reformat_type == cache_resource->reformat_type && 2591 resource->ft_type == cache_resource->ft_type && 2592 resource->flags == cache_resource->flags && 2593 resource->size == cache_resource->size && 2594 !memcmp((const void *)resource->buf, 2595 (const void *)cache_resource->buf, 2596 resource->size)) { 2597 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++", 2598 (void *)cache_resource, 2599 rte_atomic32_read(&cache_resource->refcnt)); 2600 rte_atomic32_inc(&cache_resource->refcnt); 2601 dev_flow->handle->dvh.rix_encap_decap = idx; 2602 dev_flow->dv.encap_decap = cache_resource; 2603 return 0; 2604 } 2605 } 2606 /* Register new encap/decap resource. */ 2607 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], 2608 &dev_flow->handle->dvh.rix_encap_decap); 2609 if (!cache_resource) 2610 return rte_flow_error_set(error, ENOMEM, 2611 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2612 "cannot allocate resource memory"); 2613 *cache_resource = *resource; 2614 ret = mlx5_flow_os_create_flow_action_packet_reformat 2615 (sh->ctx, domain, cache_resource, 2616 &cache_resource->action); 2617 if (ret) { 2618 rte_free(cache_resource); 2619 return rte_flow_error_set(error, ENOMEM, 2620 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2621 NULL, "cannot create action"); 2622 } 2623 rte_atomic32_init(&cache_resource->refcnt); 2624 rte_atomic32_inc(&cache_resource->refcnt); 2625 ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps, 2626 dev_flow->handle->dvh.rix_encap_decap, cache_resource, 2627 next); 2628 dev_flow->dv.encap_decap = cache_resource; 2629 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++", 2630 (void *)cache_resource, 2631 rte_atomic32_read(&cache_resource->refcnt)); 2632 return 0; 2633 } 2634 2635 /** 2636 * Find existing table jump resource or create and register a new one. 2637 * 2638 * @param[in, out] dev 2639 * Pointer to rte_eth_dev structure. 2640 * @param[in, out] tbl 2641 * Pointer to flow table resource. 2642 * @parm[in, out] dev_flow 2643 * Pointer to the dev_flow. 2644 * @param[out] error 2645 * pointer to error structure. 2646 * 2647 * @return 2648 * 0 on success otherwise -errno and errno is set. 2649 */ 2650 static int 2651 flow_dv_jump_tbl_resource_register 2652 (struct rte_eth_dev *dev __rte_unused, 2653 struct mlx5_flow_tbl_resource *tbl, 2654 struct mlx5_flow *dev_flow, 2655 struct rte_flow_error *error) 2656 { 2657 struct mlx5_flow_tbl_data_entry *tbl_data = 2658 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); 2659 int cnt, ret; 2660 2661 MLX5_ASSERT(tbl); 2662 cnt = rte_atomic32_read(&tbl_data->jump.refcnt); 2663 if (!cnt) { 2664 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl 2665 (tbl->obj, &tbl_data->jump.action); 2666 if (ret) 2667 return rte_flow_error_set(error, ENOMEM, 2668 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2669 NULL, "cannot create jump action"); 2670 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++", 2671 (void *)&tbl_data->jump, cnt); 2672 } else { 2673 /* old jump should not make the table ref++. */ 2674 flow_dv_tbl_resource_release(dev, &tbl_data->tbl); 2675 MLX5_ASSERT(tbl_data->jump.action); 2676 DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++", 2677 (void *)&tbl_data->jump, cnt); 2678 } 2679 rte_atomic32_inc(&tbl_data->jump.refcnt); 2680 dev_flow->handle->rix_jump = tbl_data->idx; 2681 dev_flow->dv.jump = &tbl_data->jump; 2682 return 0; 2683 } 2684 2685 /** 2686 * Find existing default miss resource or create and register a new one. 2687 * 2688 * @param[in, out] dev 2689 * Pointer to rte_eth_dev structure. 2690 * @param[out] error 2691 * pointer to error structure. 2692 * 2693 * @return 2694 * 0 on success otherwise -errno and errno is set. 2695 */ 2696 static int 2697 flow_dv_default_miss_resource_register(struct rte_eth_dev *dev, 2698 struct rte_flow_error *error) 2699 { 2700 struct mlx5_priv *priv = dev->data->dev_private; 2701 struct mlx5_dev_ctx_shared *sh = priv->sh; 2702 struct mlx5_flow_default_miss_resource *cache_resource = 2703 &sh->default_miss; 2704 int cnt = rte_atomic32_read(&cache_resource->refcnt); 2705 2706 if (!cnt) { 2707 MLX5_ASSERT(cache_resource->action); 2708 cache_resource->action = 2709 mlx5_glue->dr_create_flow_action_default_miss(); 2710 if (!cache_resource->action) 2711 return rte_flow_error_set(error, ENOMEM, 2712 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2713 "cannot create default miss action"); 2714 DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++", 2715 (void *)cache_resource->action, cnt); 2716 } 2717 rte_atomic32_inc(&cache_resource->refcnt); 2718 return 0; 2719 } 2720 2721 /** 2722 * Find existing table port ID resource or create and register a new one. 2723 * 2724 * @param[in, out] dev 2725 * Pointer to rte_eth_dev structure. 2726 * @param[in, out] resource 2727 * Pointer to port ID action resource. 2728 * @parm[in, out] dev_flow 2729 * Pointer to the dev_flow. 2730 * @param[out] error 2731 * pointer to error structure. 2732 * 2733 * @return 2734 * 0 on success otherwise -errno and errno is set. 2735 */ 2736 static int 2737 flow_dv_port_id_action_resource_register 2738 (struct rte_eth_dev *dev, 2739 struct mlx5_flow_dv_port_id_action_resource *resource, 2740 struct mlx5_flow *dev_flow, 2741 struct rte_flow_error *error) 2742 { 2743 struct mlx5_priv *priv = dev->data->dev_private; 2744 struct mlx5_dev_ctx_shared *sh = priv->sh; 2745 struct mlx5_flow_dv_port_id_action_resource *cache_resource; 2746 uint32_t idx = 0; 2747 int ret; 2748 2749 /* Lookup a matching resource from cache. */ 2750 ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list, 2751 idx, cache_resource, next) { 2752 if (resource->port_id == cache_resource->port_id) { 2753 DRV_LOG(DEBUG, "port id action resource resource %p: " 2754 "refcnt %d++", 2755 (void *)cache_resource, 2756 rte_atomic32_read(&cache_resource->refcnt)); 2757 rte_atomic32_inc(&cache_resource->refcnt); 2758 dev_flow->handle->rix_port_id_action = idx; 2759 dev_flow->dv.port_id_action = cache_resource; 2760 return 0; 2761 } 2762 } 2763 /* Register new port id action resource. */ 2764 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], 2765 &dev_flow->handle->rix_port_id_action); 2766 if (!cache_resource) 2767 return rte_flow_error_set(error, ENOMEM, 2768 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2769 "cannot allocate resource memory"); 2770 *cache_resource = *resource; 2771 ret = mlx5_flow_os_create_flow_action_dest_port 2772 (priv->sh->fdb_domain, resource->port_id, 2773 &cache_resource->action); 2774 if (ret) { 2775 rte_free(cache_resource); 2776 return rte_flow_error_set(error, ENOMEM, 2777 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2778 NULL, "cannot create action"); 2779 } 2780 rte_atomic32_init(&cache_resource->refcnt); 2781 rte_atomic32_inc(&cache_resource->refcnt); 2782 ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list, 2783 dev_flow->handle->rix_port_id_action, cache_resource, 2784 next); 2785 dev_flow->dv.port_id_action = cache_resource; 2786 DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++", 2787 (void *)cache_resource, 2788 rte_atomic32_read(&cache_resource->refcnt)); 2789 return 0; 2790 } 2791 2792 /** 2793 * Find existing push vlan resource or create and register a new one. 2794 * 2795 * @param [in, out] dev 2796 * Pointer to rte_eth_dev structure. 2797 * @param[in, out] resource 2798 * Pointer to port ID action resource. 2799 * @parm[in, out] dev_flow 2800 * Pointer to the dev_flow. 2801 * @param[out] error 2802 * pointer to error structure. 2803 * 2804 * @return 2805 * 0 on success otherwise -errno and errno is set. 2806 */ 2807 static int 2808 flow_dv_push_vlan_action_resource_register 2809 (struct rte_eth_dev *dev, 2810 struct mlx5_flow_dv_push_vlan_action_resource *resource, 2811 struct mlx5_flow *dev_flow, 2812 struct rte_flow_error *error) 2813 { 2814 struct mlx5_priv *priv = dev->data->dev_private; 2815 struct mlx5_dev_ctx_shared *sh = priv->sh; 2816 struct mlx5_flow_dv_push_vlan_action_resource *cache_resource; 2817 struct mlx5dv_dr_domain *domain; 2818 uint32_t idx = 0; 2819 int ret; 2820 2821 /* Lookup a matching resource from cache. */ 2822 ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN], 2823 sh->push_vlan_action_list, idx, cache_resource, next) { 2824 if (resource->vlan_tag == cache_resource->vlan_tag && 2825 resource->ft_type == cache_resource->ft_type) { 2826 DRV_LOG(DEBUG, "push-VLAN action resource resource %p: " 2827 "refcnt %d++", 2828 (void *)cache_resource, 2829 rte_atomic32_read(&cache_resource->refcnt)); 2830 rte_atomic32_inc(&cache_resource->refcnt); 2831 dev_flow->handle->dvh.rix_push_vlan = idx; 2832 dev_flow->dv.push_vlan_res = cache_resource; 2833 return 0; 2834 } 2835 } 2836 /* Register new push_vlan action resource. */ 2837 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], 2838 &dev_flow->handle->dvh.rix_push_vlan); 2839 if (!cache_resource) 2840 return rte_flow_error_set(error, ENOMEM, 2841 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 2842 "cannot allocate resource memory"); 2843 *cache_resource = *resource; 2844 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) 2845 domain = sh->fdb_domain; 2846 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) 2847 domain = sh->rx_domain; 2848 else 2849 domain = sh->tx_domain; 2850 ret = mlx5_flow_os_create_flow_action_push_vlan 2851 (domain, resource->vlan_tag, 2852 &cache_resource->action); 2853 if (ret) { 2854 rte_free(cache_resource); 2855 return rte_flow_error_set(error, ENOMEM, 2856 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 2857 NULL, "cannot create action"); 2858 } 2859 rte_atomic32_init(&cache_resource->refcnt); 2860 rte_atomic32_inc(&cache_resource->refcnt); 2861 ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN], 2862 &sh->push_vlan_action_list, 2863 dev_flow->handle->dvh.rix_push_vlan, 2864 cache_resource, next); 2865 dev_flow->dv.push_vlan_res = cache_resource; 2866 DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++", 2867 (void *)cache_resource, 2868 rte_atomic32_read(&cache_resource->refcnt)); 2869 return 0; 2870 } 2871 /** 2872 * Get the size of specific rte_flow_item_type 2873 * 2874 * @param[in] item_type 2875 * Tested rte_flow_item_type. 2876 * 2877 * @return 2878 * sizeof struct item_type, 0 if void or irrelevant. 2879 */ 2880 static size_t 2881 flow_dv_get_item_len(const enum rte_flow_item_type item_type) 2882 { 2883 size_t retval; 2884 2885 switch (item_type) { 2886 case RTE_FLOW_ITEM_TYPE_ETH: 2887 retval = sizeof(struct rte_flow_item_eth); 2888 break; 2889 case RTE_FLOW_ITEM_TYPE_VLAN: 2890 retval = sizeof(struct rte_flow_item_vlan); 2891 break; 2892 case RTE_FLOW_ITEM_TYPE_IPV4: 2893 retval = sizeof(struct rte_flow_item_ipv4); 2894 break; 2895 case RTE_FLOW_ITEM_TYPE_IPV6: 2896 retval = sizeof(struct rte_flow_item_ipv6); 2897 break; 2898 case RTE_FLOW_ITEM_TYPE_UDP: 2899 retval = sizeof(struct rte_flow_item_udp); 2900 break; 2901 case RTE_FLOW_ITEM_TYPE_TCP: 2902 retval = sizeof(struct rte_flow_item_tcp); 2903 break; 2904 case RTE_FLOW_ITEM_TYPE_VXLAN: 2905 retval = sizeof(struct rte_flow_item_vxlan); 2906 break; 2907 case RTE_FLOW_ITEM_TYPE_GRE: 2908 retval = sizeof(struct rte_flow_item_gre); 2909 break; 2910 case RTE_FLOW_ITEM_TYPE_NVGRE: 2911 retval = sizeof(struct rte_flow_item_nvgre); 2912 break; 2913 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 2914 retval = sizeof(struct rte_flow_item_vxlan_gpe); 2915 break; 2916 case RTE_FLOW_ITEM_TYPE_MPLS: 2917 retval = sizeof(struct rte_flow_item_mpls); 2918 break; 2919 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */ 2920 default: 2921 retval = 0; 2922 break; 2923 } 2924 return retval; 2925 } 2926 2927 #define MLX5_ENCAP_IPV4_VERSION 0x40 2928 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05 2929 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40 2930 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000 2931 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff 2932 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000 2933 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04 2934 2935 /** 2936 * Convert the encap action data from list of rte_flow_item to raw buffer 2937 * 2938 * @param[in] items 2939 * Pointer to rte_flow_item objects list. 2940 * @param[out] buf 2941 * Pointer to the output buffer. 2942 * @param[out] size 2943 * Pointer to the output buffer size. 2944 * @param[out] error 2945 * Pointer to the error structure. 2946 * 2947 * @return 2948 * 0 on success, a negative errno value otherwise and rte_errno is set. 2949 */ 2950 static int 2951 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf, 2952 size_t *size, struct rte_flow_error *error) 2953 { 2954 struct rte_ether_hdr *eth = NULL; 2955 struct rte_vlan_hdr *vlan = NULL; 2956 struct rte_ipv4_hdr *ipv4 = NULL; 2957 struct rte_ipv6_hdr *ipv6 = NULL; 2958 struct rte_udp_hdr *udp = NULL; 2959 struct rte_vxlan_hdr *vxlan = NULL; 2960 struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL; 2961 struct rte_gre_hdr *gre = NULL; 2962 size_t len; 2963 size_t temp_size = 0; 2964 2965 if (!items) 2966 return rte_flow_error_set(error, EINVAL, 2967 RTE_FLOW_ERROR_TYPE_ACTION, 2968 NULL, "invalid empty data"); 2969 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 2970 len = flow_dv_get_item_len(items->type); 2971 if (len + temp_size > MLX5_ENCAP_MAX_LEN) 2972 return rte_flow_error_set(error, EINVAL, 2973 RTE_FLOW_ERROR_TYPE_ACTION, 2974 (void *)items->type, 2975 "items total size is too big" 2976 " for encap action"); 2977 rte_memcpy((void *)&buf[temp_size], items->spec, len); 2978 switch (items->type) { 2979 case RTE_FLOW_ITEM_TYPE_ETH: 2980 eth = (struct rte_ether_hdr *)&buf[temp_size]; 2981 break; 2982 case RTE_FLOW_ITEM_TYPE_VLAN: 2983 vlan = (struct rte_vlan_hdr *)&buf[temp_size]; 2984 if (!eth) 2985 return rte_flow_error_set(error, EINVAL, 2986 RTE_FLOW_ERROR_TYPE_ACTION, 2987 (void *)items->type, 2988 "eth header not found"); 2989 if (!eth->ether_type) 2990 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN); 2991 break; 2992 case RTE_FLOW_ITEM_TYPE_IPV4: 2993 ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size]; 2994 if (!vlan && !eth) 2995 return rte_flow_error_set(error, EINVAL, 2996 RTE_FLOW_ERROR_TYPE_ACTION, 2997 (void *)items->type, 2998 "neither eth nor vlan" 2999 " header found"); 3000 if (vlan && !vlan->eth_proto) 3001 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4); 3002 else if (eth && !eth->ether_type) 3003 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4); 3004 if (!ipv4->version_ihl) 3005 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION | 3006 MLX5_ENCAP_IPV4_IHL_MIN; 3007 if (!ipv4->time_to_live) 3008 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF; 3009 break; 3010 case RTE_FLOW_ITEM_TYPE_IPV6: 3011 ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size]; 3012 if (!vlan && !eth) 3013 return rte_flow_error_set(error, EINVAL, 3014 RTE_FLOW_ERROR_TYPE_ACTION, 3015 (void *)items->type, 3016 "neither eth nor vlan" 3017 " header found"); 3018 if (vlan && !vlan->eth_proto) 3019 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6); 3020 else if (eth && !eth->ether_type) 3021 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6); 3022 if (!ipv6->vtc_flow) 3023 ipv6->vtc_flow = 3024 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW); 3025 if (!ipv6->hop_limits) 3026 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT; 3027 break; 3028 case RTE_FLOW_ITEM_TYPE_UDP: 3029 udp = (struct rte_udp_hdr *)&buf[temp_size]; 3030 if (!ipv4 && !ipv6) 3031 return rte_flow_error_set(error, EINVAL, 3032 RTE_FLOW_ERROR_TYPE_ACTION, 3033 (void *)items->type, 3034 "ip header not found"); 3035 if (ipv4 && !ipv4->next_proto_id) 3036 ipv4->next_proto_id = IPPROTO_UDP; 3037 else if (ipv6 && !ipv6->proto) 3038 ipv6->proto = IPPROTO_UDP; 3039 break; 3040 case RTE_FLOW_ITEM_TYPE_VXLAN: 3041 vxlan = (struct rte_vxlan_hdr *)&buf[temp_size]; 3042 if (!udp) 3043 return rte_flow_error_set(error, EINVAL, 3044 RTE_FLOW_ERROR_TYPE_ACTION, 3045 (void *)items->type, 3046 "udp header not found"); 3047 if (!udp->dst_port) 3048 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN); 3049 if (!vxlan->vx_flags) 3050 vxlan->vx_flags = 3051 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS); 3052 break; 3053 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 3054 vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size]; 3055 if (!udp) 3056 return rte_flow_error_set(error, EINVAL, 3057 RTE_FLOW_ERROR_TYPE_ACTION, 3058 (void *)items->type, 3059 "udp header not found"); 3060 if (!vxlan_gpe->proto) 3061 return rte_flow_error_set(error, EINVAL, 3062 RTE_FLOW_ERROR_TYPE_ACTION, 3063 (void *)items->type, 3064 "next protocol not found"); 3065 if (!udp->dst_port) 3066 udp->dst_port = 3067 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE); 3068 if (!vxlan_gpe->vx_flags) 3069 vxlan_gpe->vx_flags = 3070 MLX5_ENCAP_VXLAN_GPE_FLAGS; 3071 break; 3072 case RTE_FLOW_ITEM_TYPE_GRE: 3073 case RTE_FLOW_ITEM_TYPE_NVGRE: 3074 gre = (struct rte_gre_hdr *)&buf[temp_size]; 3075 if (!gre->proto) 3076 return rte_flow_error_set(error, EINVAL, 3077 RTE_FLOW_ERROR_TYPE_ACTION, 3078 (void *)items->type, 3079 "next protocol not found"); 3080 if (!ipv4 && !ipv6) 3081 return rte_flow_error_set(error, EINVAL, 3082 RTE_FLOW_ERROR_TYPE_ACTION, 3083 (void *)items->type, 3084 "ip header not found"); 3085 if (ipv4 && !ipv4->next_proto_id) 3086 ipv4->next_proto_id = IPPROTO_GRE; 3087 else if (ipv6 && !ipv6->proto) 3088 ipv6->proto = IPPROTO_GRE; 3089 break; 3090 case RTE_FLOW_ITEM_TYPE_VOID: 3091 break; 3092 default: 3093 return rte_flow_error_set(error, EINVAL, 3094 RTE_FLOW_ERROR_TYPE_ACTION, 3095 (void *)items->type, 3096 "unsupported item type"); 3097 break; 3098 } 3099 temp_size += len; 3100 } 3101 *size = temp_size; 3102 return 0; 3103 } 3104 3105 static int 3106 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error) 3107 { 3108 struct rte_ether_hdr *eth = NULL; 3109 struct rte_vlan_hdr *vlan = NULL; 3110 struct rte_ipv6_hdr *ipv6 = NULL; 3111 struct rte_udp_hdr *udp = NULL; 3112 char *next_hdr; 3113 uint16_t proto; 3114 3115 eth = (struct rte_ether_hdr *)data; 3116 next_hdr = (char *)(eth + 1); 3117 proto = RTE_BE16(eth->ether_type); 3118 3119 /* VLAN skipping */ 3120 while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) { 3121 vlan = (struct rte_vlan_hdr *)next_hdr; 3122 proto = RTE_BE16(vlan->eth_proto); 3123 next_hdr += sizeof(struct rte_vlan_hdr); 3124 } 3125 3126 /* HW calculates IPv4 csum. no need to proceed */ 3127 if (proto == RTE_ETHER_TYPE_IPV4) 3128 return 0; 3129 3130 /* non IPv4/IPv6 header. not supported */ 3131 if (proto != RTE_ETHER_TYPE_IPV6) { 3132 return rte_flow_error_set(error, ENOTSUP, 3133 RTE_FLOW_ERROR_TYPE_ACTION, 3134 NULL, "Cannot offload non IPv4/IPv6"); 3135 } 3136 3137 ipv6 = (struct rte_ipv6_hdr *)next_hdr; 3138 3139 /* ignore non UDP */ 3140 if (ipv6->proto != IPPROTO_UDP) 3141 return 0; 3142 3143 udp = (struct rte_udp_hdr *)(ipv6 + 1); 3144 udp->dgram_cksum = 0; 3145 3146 return 0; 3147 } 3148 3149 /** 3150 * Convert L2 encap action to DV specification. 3151 * 3152 * @param[in] dev 3153 * Pointer to rte_eth_dev structure. 3154 * @param[in] action 3155 * Pointer to action structure. 3156 * @param[in, out] dev_flow 3157 * Pointer to the mlx5_flow. 3158 * @param[in] transfer 3159 * Mark if the flow is E-Switch flow. 3160 * @param[out] error 3161 * Pointer to the error structure. 3162 * 3163 * @return 3164 * 0 on success, a negative errno value otherwise and rte_errno is set. 3165 */ 3166 static int 3167 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev, 3168 const struct rte_flow_action *action, 3169 struct mlx5_flow *dev_flow, 3170 uint8_t transfer, 3171 struct rte_flow_error *error) 3172 { 3173 const struct rte_flow_item *encap_data; 3174 const struct rte_flow_action_raw_encap *raw_encap_data; 3175 struct mlx5_flow_dv_encap_decap_resource res = { 3176 .reformat_type = 3177 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL, 3178 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB : 3179 MLX5DV_FLOW_TABLE_TYPE_NIC_TX, 3180 }; 3181 3182 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { 3183 raw_encap_data = 3184 (const struct rte_flow_action_raw_encap *)action->conf; 3185 res.size = raw_encap_data->size; 3186 memcpy(res.buf, raw_encap_data->data, res.size); 3187 } else { 3188 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) 3189 encap_data = 3190 ((const struct rte_flow_action_vxlan_encap *) 3191 action->conf)->definition; 3192 else 3193 encap_data = 3194 ((const struct rte_flow_action_nvgre_encap *) 3195 action->conf)->definition; 3196 if (flow_dv_convert_encap_data(encap_data, res.buf, 3197 &res.size, error)) 3198 return -rte_errno; 3199 } 3200 if (flow_dv_zero_encap_udp_csum(res.buf, error)) 3201 return -rte_errno; 3202 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error)) 3203 return rte_flow_error_set(error, EINVAL, 3204 RTE_FLOW_ERROR_TYPE_ACTION, 3205 NULL, "can't create L2 encap action"); 3206 return 0; 3207 } 3208 3209 /** 3210 * Convert L2 decap action to DV specification. 3211 * 3212 * @param[in] dev 3213 * Pointer to rte_eth_dev structure. 3214 * @param[in, out] dev_flow 3215 * Pointer to the mlx5_flow. 3216 * @param[in] transfer 3217 * Mark if the flow is E-Switch flow. 3218 * @param[out] error 3219 * Pointer to the error structure. 3220 * 3221 * @return 3222 * 0 on success, a negative errno value otherwise and rte_errno is set. 3223 */ 3224 static int 3225 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev, 3226 struct mlx5_flow *dev_flow, 3227 uint8_t transfer, 3228 struct rte_flow_error *error) 3229 { 3230 struct mlx5_flow_dv_encap_decap_resource res = { 3231 .size = 0, 3232 .reformat_type = 3233 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2, 3234 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB : 3235 MLX5DV_FLOW_TABLE_TYPE_NIC_RX, 3236 }; 3237 3238 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error)) 3239 return rte_flow_error_set(error, EINVAL, 3240 RTE_FLOW_ERROR_TYPE_ACTION, 3241 NULL, "can't create L2 decap action"); 3242 return 0; 3243 } 3244 3245 /** 3246 * Convert raw decap/encap (L3 tunnel) action to DV specification. 3247 * 3248 * @param[in] dev 3249 * Pointer to rte_eth_dev structure. 3250 * @param[in] action 3251 * Pointer to action structure. 3252 * @param[in, out] dev_flow 3253 * Pointer to the mlx5_flow. 3254 * @param[in] attr 3255 * Pointer to the flow attributes. 3256 * @param[out] error 3257 * Pointer to the error structure. 3258 * 3259 * @return 3260 * 0 on success, a negative errno value otherwise and rte_errno is set. 3261 */ 3262 static int 3263 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev, 3264 const struct rte_flow_action *action, 3265 struct mlx5_flow *dev_flow, 3266 const struct rte_flow_attr *attr, 3267 struct rte_flow_error *error) 3268 { 3269 const struct rte_flow_action_raw_encap *encap_data; 3270 struct mlx5_flow_dv_encap_decap_resource res; 3271 3272 memset(&res, 0, sizeof(res)); 3273 encap_data = (const struct rte_flow_action_raw_encap *)action->conf; 3274 res.size = encap_data->size; 3275 memcpy(res.buf, encap_data->data, res.size); 3276 res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ? 3277 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 : 3278 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL; 3279 if (attr->transfer) 3280 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; 3281 else 3282 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : 3283 MLX5DV_FLOW_TABLE_TYPE_NIC_RX; 3284 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error)) 3285 return rte_flow_error_set(error, EINVAL, 3286 RTE_FLOW_ERROR_TYPE_ACTION, 3287 NULL, "can't create encap action"); 3288 return 0; 3289 } 3290 3291 /** 3292 * Create action push VLAN. 3293 * 3294 * @param[in] dev 3295 * Pointer to rte_eth_dev structure. 3296 * @param[in] attr 3297 * Pointer to the flow attributes. 3298 * @param[in] vlan 3299 * Pointer to the vlan to push to the Ethernet header. 3300 * @param[in, out] dev_flow 3301 * Pointer to the mlx5_flow. 3302 * @param[out] error 3303 * Pointer to the error structure. 3304 * 3305 * @return 3306 * 0 on success, a negative errno value otherwise and rte_errno is set. 3307 */ 3308 static int 3309 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev, 3310 const struct rte_flow_attr *attr, 3311 const struct rte_vlan_hdr *vlan, 3312 struct mlx5_flow *dev_flow, 3313 struct rte_flow_error *error) 3314 { 3315 struct mlx5_flow_dv_push_vlan_action_resource res; 3316 3317 memset(&res, 0, sizeof(res)); 3318 res.vlan_tag = 3319 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 | 3320 vlan->vlan_tci); 3321 if (attr->transfer) 3322 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; 3323 else 3324 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : 3325 MLX5DV_FLOW_TABLE_TYPE_NIC_RX; 3326 return flow_dv_push_vlan_action_resource_register 3327 (dev, &res, dev_flow, error); 3328 } 3329 3330 /** 3331 * Validate the modify-header actions. 3332 * 3333 * @param[in] action_flags 3334 * Holds the actions detected until now. 3335 * @param[in] action 3336 * Pointer to the modify action. 3337 * @param[out] error 3338 * Pointer to error structure. 3339 * 3340 * @return 3341 * 0 on success, a negative errno value otherwise and rte_errno is set. 3342 */ 3343 static int 3344 flow_dv_validate_action_modify_hdr(const uint64_t action_flags, 3345 const struct rte_flow_action *action, 3346 struct rte_flow_error *error) 3347 { 3348 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf) 3349 return rte_flow_error_set(error, EINVAL, 3350 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 3351 NULL, "action configuration not set"); 3352 if (action_flags & MLX5_FLOW_ACTION_ENCAP) 3353 return rte_flow_error_set(error, EINVAL, 3354 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3355 "can't have encap action before" 3356 " modify action"); 3357 return 0; 3358 } 3359 3360 /** 3361 * Validate the modify-header MAC address actions. 3362 * 3363 * @param[in] action_flags 3364 * Holds the actions detected until now. 3365 * @param[in] action 3366 * Pointer to the modify action. 3367 * @param[in] item_flags 3368 * Holds the items detected. 3369 * @param[out] error 3370 * Pointer to error structure. 3371 * 3372 * @return 3373 * 0 on success, a negative errno value otherwise and rte_errno is set. 3374 */ 3375 static int 3376 flow_dv_validate_action_modify_mac(const uint64_t action_flags, 3377 const struct rte_flow_action *action, 3378 const uint64_t item_flags, 3379 struct rte_flow_error *error) 3380 { 3381 int ret = 0; 3382 3383 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 3384 if (!ret) { 3385 if (!(item_flags & MLX5_FLOW_LAYER_L2)) 3386 return rte_flow_error_set(error, EINVAL, 3387 RTE_FLOW_ERROR_TYPE_ACTION, 3388 NULL, 3389 "no L2 item in pattern"); 3390 } 3391 return ret; 3392 } 3393 3394 /** 3395 * Validate the modify-header IPv4 address actions. 3396 * 3397 * @param[in] action_flags 3398 * Holds the actions detected until now. 3399 * @param[in] action 3400 * Pointer to the modify action. 3401 * @param[in] item_flags 3402 * Holds the items detected. 3403 * @param[out] error 3404 * Pointer to error structure. 3405 * 3406 * @return 3407 * 0 on success, a negative errno value otherwise and rte_errno is set. 3408 */ 3409 static int 3410 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags, 3411 const struct rte_flow_action *action, 3412 const uint64_t item_flags, 3413 struct rte_flow_error *error) 3414 { 3415 int ret = 0; 3416 uint64_t layer; 3417 3418 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 3419 if (!ret) { 3420 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 3421 MLX5_FLOW_LAYER_INNER_L3_IPV4 : 3422 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 3423 if (!(item_flags & layer)) 3424 return rte_flow_error_set(error, EINVAL, 3425 RTE_FLOW_ERROR_TYPE_ACTION, 3426 NULL, 3427 "no ipv4 item in pattern"); 3428 } 3429 return ret; 3430 } 3431 3432 /** 3433 * Validate the modify-header IPv6 address actions. 3434 * 3435 * @param[in] action_flags 3436 * Holds the actions detected until now. 3437 * @param[in] action 3438 * Pointer to the modify action. 3439 * @param[in] item_flags 3440 * Holds the items detected. 3441 * @param[out] error 3442 * Pointer to error structure. 3443 * 3444 * @return 3445 * 0 on success, a negative errno value otherwise and rte_errno is set. 3446 */ 3447 static int 3448 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags, 3449 const struct rte_flow_action *action, 3450 const uint64_t item_flags, 3451 struct rte_flow_error *error) 3452 { 3453 int ret = 0; 3454 uint64_t layer; 3455 3456 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 3457 if (!ret) { 3458 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 3459 MLX5_FLOW_LAYER_INNER_L3_IPV6 : 3460 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 3461 if (!(item_flags & layer)) 3462 return rte_flow_error_set(error, EINVAL, 3463 RTE_FLOW_ERROR_TYPE_ACTION, 3464 NULL, 3465 "no ipv6 item in pattern"); 3466 } 3467 return ret; 3468 } 3469 3470 /** 3471 * Validate the modify-header TP actions. 3472 * 3473 * @param[in] action_flags 3474 * Holds the actions detected until now. 3475 * @param[in] action 3476 * Pointer to the modify action. 3477 * @param[in] item_flags 3478 * Holds the items detected. 3479 * @param[out] error 3480 * Pointer to error structure. 3481 * 3482 * @return 3483 * 0 on success, a negative errno value otherwise and rte_errno is set. 3484 */ 3485 static int 3486 flow_dv_validate_action_modify_tp(const uint64_t action_flags, 3487 const struct rte_flow_action *action, 3488 const uint64_t item_flags, 3489 struct rte_flow_error *error) 3490 { 3491 int ret = 0; 3492 uint64_t layer; 3493 3494 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 3495 if (!ret) { 3496 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 3497 MLX5_FLOW_LAYER_INNER_L4 : 3498 MLX5_FLOW_LAYER_OUTER_L4; 3499 if (!(item_flags & layer)) 3500 return rte_flow_error_set(error, EINVAL, 3501 RTE_FLOW_ERROR_TYPE_ACTION, 3502 NULL, "no transport layer " 3503 "in pattern"); 3504 } 3505 return ret; 3506 } 3507 3508 /** 3509 * Validate the modify-header actions of increment/decrement 3510 * TCP Sequence-number. 3511 * 3512 * @param[in] action_flags 3513 * Holds the actions detected until now. 3514 * @param[in] action 3515 * Pointer to the modify action. 3516 * @param[in] item_flags 3517 * Holds the items detected. 3518 * @param[out] error 3519 * Pointer to error structure. 3520 * 3521 * @return 3522 * 0 on success, a negative errno value otherwise and rte_errno is set. 3523 */ 3524 static int 3525 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags, 3526 const struct rte_flow_action *action, 3527 const uint64_t item_flags, 3528 struct rte_flow_error *error) 3529 { 3530 int ret = 0; 3531 uint64_t layer; 3532 3533 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 3534 if (!ret) { 3535 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 3536 MLX5_FLOW_LAYER_INNER_L4_TCP : 3537 MLX5_FLOW_LAYER_OUTER_L4_TCP; 3538 if (!(item_flags & layer)) 3539 return rte_flow_error_set(error, EINVAL, 3540 RTE_FLOW_ERROR_TYPE_ACTION, 3541 NULL, "no TCP item in" 3542 " pattern"); 3543 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ && 3544 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) || 3545 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ && 3546 (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ))) 3547 return rte_flow_error_set(error, EINVAL, 3548 RTE_FLOW_ERROR_TYPE_ACTION, 3549 NULL, 3550 "cannot decrease and increase" 3551 " TCP sequence number" 3552 " at the same time"); 3553 } 3554 return ret; 3555 } 3556 3557 /** 3558 * Validate the modify-header actions of increment/decrement 3559 * TCP Acknowledgment number. 3560 * 3561 * @param[in] action_flags 3562 * Holds the actions detected until now. 3563 * @param[in] action 3564 * Pointer to the modify action. 3565 * @param[in] item_flags 3566 * Holds the items detected. 3567 * @param[out] error 3568 * Pointer to error structure. 3569 * 3570 * @return 3571 * 0 on success, a negative errno value otherwise and rte_errno is set. 3572 */ 3573 static int 3574 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags, 3575 const struct rte_flow_action *action, 3576 const uint64_t item_flags, 3577 struct rte_flow_error *error) 3578 { 3579 int ret = 0; 3580 uint64_t layer; 3581 3582 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 3583 if (!ret) { 3584 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 3585 MLX5_FLOW_LAYER_INNER_L4_TCP : 3586 MLX5_FLOW_LAYER_OUTER_L4_TCP; 3587 if (!(item_flags & layer)) 3588 return rte_flow_error_set(error, EINVAL, 3589 RTE_FLOW_ERROR_TYPE_ACTION, 3590 NULL, "no TCP item in" 3591 " pattern"); 3592 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK && 3593 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) || 3594 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK && 3595 (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK))) 3596 return rte_flow_error_set(error, EINVAL, 3597 RTE_FLOW_ERROR_TYPE_ACTION, 3598 NULL, 3599 "cannot decrease and increase" 3600 " TCP acknowledgment number" 3601 " at the same time"); 3602 } 3603 return ret; 3604 } 3605 3606 /** 3607 * Validate the modify-header TTL actions. 3608 * 3609 * @param[in] action_flags 3610 * Holds the actions detected until now. 3611 * @param[in] action 3612 * Pointer to the modify action. 3613 * @param[in] item_flags 3614 * Holds the items detected. 3615 * @param[out] error 3616 * Pointer to error structure. 3617 * 3618 * @return 3619 * 0 on success, a negative errno value otherwise and rte_errno is set. 3620 */ 3621 static int 3622 flow_dv_validate_action_modify_ttl(const uint64_t action_flags, 3623 const struct rte_flow_action *action, 3624 const uint64_t item_flags, 3625 struct rte_flow_error *error) 3626 { 3627 int ret = 0; 3628 uint64_t layer; 3629 3630 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 3631 if (!ret) { 3632 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? 3633 MLX5_FLOW_LAYER_INNER_L3 : 3634 MLX5_FLOW_LAYER_OUTER_L3; 3635 if (!(item_flags & layer)) 3636 return rte_flow_error_set(error, EINVAL, 3637 RTE_FLOW_ERROR_TYPE_ACTION, 3638 NULL, 3639 "no IP protocol in pattern"); 3640 } 3641 return ret; 3642 } 3643 3644 /** 3645 * Validate jump action. 3646 * 3647 * @param[in] action 3648 * Pointer to the jump action. 3649 * @param[in] action_flags 3650 * Holds the actions detected until now. 3651 * @param[in] attributes 3652 * Pointer to flow attributes 3653 * @param[in] external 3654 * Action belongs to flow rule created by request external to PMD. 3655 * @param[out] error 3656 * Pointer to error structure. 3657 * 3658 * @return 3659 * 0 on success, a negative errno value otherwise and rte_errno is set. 3660 */ 3661 static int 3662 flow_dv_validate_action_jump(const struct rte_flow_action *action, 3663 uint64_t action_flags, 3664 const struct rte_flow_attr *attributes, 3665 bool external, struct rte_flow_error *error) 3666 { 3667 uint32_t target_group, table; 3668 int ret = 0; 3669 3670 if (action_flags & (MLX5_FLOW_FATE_ACTIONS | 3671 MLX5_FLOW_FATE_ESWITCH_ACTIONS)) 3672 return rte_flow_error_set(error, EINVAL, 3673 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3674 "can't have 2 fate actions in" 3675 " same flow"); 3676 if (action_flags & MLX5_FLOW_ACTION_METER) 3677 return rte_flow_error_set(error, ENOTSUP, 3678 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3679 "jump with meter not support"); 3680 if (!action->conf) 3681 return rte_flow_error_set(error, EINVAL, 3682 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 3683 NULL, "action configuration not set"); 3684 target_group = 3685 ((const struct rte_flow_action_jump *)action->conf)->group; 3686 ret = mlx5_flow_group_to_table(attributes, external, target_group, 3687 true, &table, error); 3688 if (ret) 3689 return ret; 3690 if (attributes->group == target_group) 3691 return rte_flow_error_set(error, EINVAL, 3692 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3693 "target group must be other than" 3694 " the current flow group"); 3695 return 0; 3696 } 3697 3698 /* 3699 * Validate the port_id action. 3700 * 3701 * @param[in] dev 3702 * Pointer to rte_eth_dev structure. 3703 * @param[in] action_flags 3704 * Bit-fields that holds the actions detected until now. 3705 * @param[in] action 3706 * Port_id RTE action structure. 3707 * @param[in] attr 3708 * Attributes of flow that includes this action. 3709 * @param[out] error 3710 * Pointer to error structure. 3711 * 3712 * @return 3713 * 0 on success, a negative errno value otherwise and rte_errno is set. 3714 */ 3715 static int 3716 flow_dv_validate_action_port_id(struct rte_eth_dev *dev, 3717 uint64_t action_flags, 3718 const struct rte_flow_action *action, 3719 const struct rte_flow_attr *attr, 3720 struct rte_flow_error *error) 3721 { 3722 const struct rte_flow_action_port_id *port_id; 3723 struct mlx5_priv *act_priv; 3724 struct mlx5_priv *dev_priv; 3725 uint16_t port; 3726 3727 if (!attr->transfer) 3728 return rte_flow_error_set(error, ENOTSUP, 3729 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3730 NULL, 3731 "port id action is valid in transfer" 3732 " mode only"); 3733 if (!action || !action->conf) 3734 return rte_flow_error_set(error, ENOTSUP, 3735 RTE_FLOW_ERROR_TYPE_ACTION_CONF, 3736 NULL, 3737 "port id action parameters must be" 3738 " specified"); 3739 if (action_flags & (MLX5_FLOW_FATE_ACTIONS | 3740 MLX5_FLOW_FATE_ESWITCH_ACTIONS)) 3741 return rte_flow_error_set(error, EINVAL, 3742 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3743 "can have only one fate actions in" 3744 " a flow"); 3745 dev_priv = mlx5_dev_to_eswitch_info(dev); 3746 if (!dev_priv) 3747 return rte_flow_error_set(error, rte_errno, 3748 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3749 NULL, 3750 "failed to obtain E-Switch info"); 3751 port_id = action->conf; 3752 port = port_id->original ? dev->data->port_id : port_id->id; 3753 act_priv = mlx5_port_to_eswitch_info(port, false); 3754 if (!act_priv) 3755 return rte_flow_error_set 3756 (error, rte_errno, 3757 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id, 3758 "failed to obtain E-Switch port id for port"); 3759 if (act_priv->domain_id != dev_priv->domain_id) 3760 return rte_flow_error_set 3761 (error, EINVAL, 3762 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3763 "port does not belong to" 3764 " E-Switch being configured"); 3765 return 0; 3766 } 3767 3768 /** 3769 * Get the maximum number of modify header actions. 3770 * 3771 * @param dev 3772 * Pointer to rte_eth_dev structure. 3773 * @param flags 3774 * Flags bits to check if root level. 3775 * 3776 * @return 3777 * Max number of modify header actions device can support. 3778 */ 3779 static inline unsigned int 3780 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused, 3781 uint64_t flags) 3782 { 3783 /* 3784 * There's no way to directly query the max capacity from FW. 3785 * The maximal value on root table should be assumed to be supported. 3786 */ 3787 if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL)) 3788 return MLX5_MAX_MODIFY_NUM; 3789 else 3790 return MLX5_ROOT_TBL_MODIFY_NUM; 3791 } 3792 3793 /** 3794 * Validate the meter action. 3795 * 3796 * @param[in] dev 3797 * Pointer to rte_eth_dev structure. 3798 * @param[in] action_flags 3799 * Bit-fields that holds the actions detected until now. 3800 * @param[in] action 3801 * Pointer to the meter action. 3802 * @param[in] attr 3803 * Attributes of flow that includes this action. 3804 * @param[out] error 3805 * Pointer to error structure. 3806 * 3807 * @return 3808 * 0 on success, a negative errno value otherwise and rte_ernno is set. 3809 */ 3810 static int 3811 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, 3812 uint64_t action_flags, 3813 const struct rte_flow_action *action, 3814 const struct rte_flow_attr *attr, 3815 struct rte_flow_error *error) 3816 { 3817 struct mlx5_priv *priv = dev->data->dev_private; 3818 const struct rte_flow_action_meter *am = action->conf; 3819 struct mlx5_flow_meter *fm; 3820 3821 if (!am) 3822 return rte_flow_error_set(error, EINVAL, 3823 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3824 "meter action conf is NULL"); 3825 3826 if (action_flags & MLX5_FLOW_ACTION_METER) 3827 return rte_flow_error_set(error, ENOTSUP, 3828 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3829 "meter chaining not support"); 3830 if (action_flags & MLX5_FLOW_ACTION_JUMP) 3831 return rte_flow_error_set(error, ENOTSUP, 3832 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3833 "meter with jump not support"); 3834 if (!priv->mtr_en) 3835 return rte_flow_error_set(error, ENOTSUP, 3836 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3837 NULL, 3838 "meter action not supported"); 3839 fm = mlx5_flow_meter_find(priv, am->mtr_id); 3840 if (!fm) 3841 return rte_flow_error_set(error, EINVAL, 3842 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3843 "Meter not found"); 3844 if (fm->ref_cnt && (!(fm->transfer == attr->transfer || 3845 (!fm->ingress && !attr->ingress && attr->egress) || 3846 (!fm->egress && !attr->egress && attr->ingress)))) 3847 return rte_flow_error_set(error, EINVAL, 3848 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3849 "Flow attributes are either invalid " 3850 "or have a conflict with current " 3851 "meter attributes"); 3852 return 0; 3853 } 3854 3855 /** 3856 * Validate the age action. 3857 * 3858 * @param[in] action_flags 3859 * Holds the actions detected until now. 3860 * @param[in] action 3861 * Pointer to the age action. 3862 * @param[in] dev 3863 * Pointer to the Ethernet device structure. 3864 * @param[out] error 3865 * Pointer to error structure. 3866 * 3867 * @return 3868 * 0 on success, a negative errno value otherwise and rte_errno is set. 3869 */ 3870 static int 3871 flow_dv_validate_action_age(uint64_t action_flags, 3872 const struct rte_flow_action *action, 3873 struct rte_eth_dev *dev, 3874 struct rte_flow_error *error) 3875 { 3876 struct mlx5_priv *priv = dev->data->dev_private; 3877 const struct rte_flow_action_age *age = action->conf; 3878 3879 if (!priv->config.devx || priv->counter_fallback) 3880 return rte_flow_error_set(error, ENOTSUP, 3881 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3882 NULL, 3883 "age action not supported"); 3884 if (!(action->conf)) 3885 return rte_flow_error_set(error, EINVAL, 3886 RTE_FLOW_ERROR_TYPE_ACTION, action, 3887 "configuration cannot be null"); 3888 if (age->timeout >= UINT16_MAX / 2 / 10) 3889 return rte_flow_error_set(error, ENOTSUP, 3890 RTE_FLOW_ERROR_TYPE_ACTION, action, 3891 "Max age time: 3275 seconds"); 3892 if (action_flags & MLX5_FLOW_ACTION_AGE) 3893 return rte_flow_error_set(error, EINVAL, 3894 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 3895 "Duplicate age ctions set"); 3896 return 0; 3897 } 3898 3899 /** 3900 * Validate the modify-header IPv4 DSCP actions. 3901 * 3902 * @param[in] action_flags 3903 * Holds the actions detected until now. 3904 * @param[in] action 3905 * Pointer to the modify action. 3906 * @param[in] item_flags 3907 * Holds the items detected. 3908 * @param[out] error 3909 * Pointer to error structure. 3910 * 3911 * @return 3912 * 0 on success, a negative errno value otherwise and rte_errno is set. 3913 */ 3914 static int 3915 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags, 3916 const struct rte_flow_action *action, 3917 const uint64_t item_flags, 3918 struct rte_flow_error *error) 3919 { 3920 int ret = 0; 3921 3922 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 3923 if (!ret) { 3924 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4)) 3925 return rte_flow_error_set(error, EINVAL, 3926 RTE_FLOW_ERROR_TYPE_ACTION, 3927 NULL, 3928 "no ipv4 item in pattern"); 3929 } 3930 return ret; 3931 } 3932 3933 /** 3934 * Validate the modify-header IPv6 DSCP actions. 3935 * 3936 * @param[in] action_flags 3937 * Holds the actions detected until now. 3938 * @param[in] action 3939 * Pointer to the modify action. 3940 * @param[in] item_flags 3941 * Holds the items detected. 3942 * @param[out] error 3943 * Pointer to error structure. 3944 * 3945 * @return 3946 * 0 on success, a negative errno value otherwise and rte_errno is set. 3947 */ 3948 static int 3949 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags, 3950 const struct rte_flow_action *action, 3951 const uint64_t item_flags, 3952 struct rte_flow_error *error) 3953 { 3954 int ret = 0; 3955 3956 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); 3957 if (!ret) { 3958 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6)) 3959 return rte_flow_error_set(error, EINVAL, 3960 RTE_FLOW_ERROR_TYPE_ACTION, 3961 NULL, 3962 "no ipv6 item in pattern"); 3963 } 3964 return ret; 3965 } 3966 3967 /** 3968 * Find existing modify-header resource or create and register a new one. 3969 * 3970 * @param dev[in, out] 3971 * Pointer to rte_eth_dev structure. 3972 * @param[in, out] resource 3973 * Pointer to modify-header resource. 3974 * @parm[in, out] dev_flow 3975 * Pointer to the dev_flow. 3976 * @param[out] error 3977 * pointer to error structure. 3978 * 3979 * @return 3980 * 0 on success otherwise -errno and errno is set. 3981 */ 3982 static int 3983 flow_dv_modify_hdr_resource_register 3984 (struct rte_eth_dev *dev, 3985 struct mlx5_flow_dv_modify_hdr_resource *resource, 3986 struct mlx5_flow *dev_flow, 3987 struct rte_flow_error *error) 3988 { 3989 struct mlx5_priv *priv = dev->data->dev_private; 3990 struct mlx5_dev_ctx_shared *sh = priv->sh; 3991 struct mlx5_flow_dv_modify_hdr_resource *cache_resource; 3992 struct mlx5dv_dr_domain *ns; 3993 uint32_t actions_len; 3994 int ret; 3995 3996 resource->flags = dev_flow->dv.group ? 0 : 3997 MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; 3998 if (resource->actions_num > flow_dv_modify_hdr_action_max(dev, 3999 resource->flags)) 4000 return rte_flow_error_set(error, EOVERFLOW, 4001 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 4002 "too many modify header items"); 4003 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) 4004 ns = sh->fdb_domain; 4005 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX) 4006 ns = sh->tx_domain; 4007 else 4008 ns = sh->rx_domain; 4009 /* Lookup a matching resource from cache. */ 4010 actions_len = resource->actions_num * sizeof(resource->actions[0]); 4011 LIST_FOREACH(cache_resource, &sh->modify_cmds, next) { 4012 if (resource->ft_type == cache_resource->ft_type && 4013 resource->actions_num == cache_resource->actions_num && 4014 resource->flags == cache_resource->flags && 4015 !memcmp((const void *)resource->actions, 4016 (const void *)cache_resource->actions, 4017 actions_len)) { 4018 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++", 4019 (void *)cache_resource, 4020 rte_atomic32_read(&cache_resource->refcnt)); 4021 rte_atomic32_inc(&cache_resource->refcnt); 4022 dev_flow->handle->dvh.modify_hdr = cache_resource; 4023 return 0; 4024 } 4025 } 4026 /* Register new modify-header resource. */ 4027 cache_resource = rte_calloc(__func__, 1, 4028 sizeof(*cache_resource) + actions_len, 0); 4029 if (!cache_resource) 4030 return rte_flow_error_set(error, ENOMEM, 4031 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 4032 "cannot allocate resource memory"); 4033 *cache_resource = *resource; 4034 rte_memcpy(cache_resource->actions, resource->actions, actions_len); 4035 ret = mlx5_flow_os_create_flow_action_modify_header 4036 (sh->ctx, ns, cache_resource, 4037 actions_len, &cache_resource->action); 4038 if (ret) { 4039 rte_free(cache_resource); 4040 return rte_flow_error_set(error, ENOMEM, 4041 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4042 NULL, "cannot create action"); 4043 } 4044 rte_atomic32_init(&cache_resource->refcnt); 4045 rte_atomic32_inc(&cache_resource->refcnt); 4046 LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next); 4047 dev_flow->handle->dvh.modify_hdr = cache_resource; 4048 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++", 4049 (void *)cache_resource, 4050 rte_atomic32_read(&cache_resource->refcnt)); 4051 return 0; 4052 } 4053 4054 /** 4055 * Get DV flow counter by index. 4056 * 4057 * @param[in] dev 4058 * Pointer to the Ethernet device structure. 4059 * @param[in] idx 4060 * mlx5 flow counter index in the container. 4061 * @param[out] ppool 4062 * mlx5 flow counter pool in the container, 4063 * 4064 * @return 4065 * Pointer to the counter, NULL otherwise. 4066 */ 4067 static struct mlx5_flow_counter * 4068 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev, 4069 uint32_t idx, 4070 struct mlx5_flow_counter_pool **ppool) 4071 { 4072 struct mlx5_priv *priv = dev->data->dev_private; 4073 struct mlx5_pools_container *cont; 4074 struct mlx5_flow_counter_pool *pool; 4075 uint32_t batch = 0, age = 0; 4076 4077 idx--; 4078 age = MLX_CNT_IS_AGE(idx); 4079 idx = age ? idx - MLX5_CNT_AGE_OFFSET : idx; 4080 if (idx >= MLX5_CNT_BATCH_OFFSET) { 4081 idx -= MLX5_CNT_BATCH_OFFSET; 4082 batch = 1; 4083 } 4084 cont = MLX5_CNT_CONTAINER(priv->sh, batch, age); 4085 MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n); 4086 pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL]; 4087 MLX5_ASSERT(pool); 4088 if (ppool) 4089 *ppool = pool; 4090 return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL); 4091 } 4092 4093 /** 4094 * Check the devx counter belongs to the pool. 4095 * 4096 * @param[in] pool 4097 * Pointer to the counter pool. 4098 * @param[in] id 4099 * The counter devx ID. 4100 * 4101 * @return 4102 * True if counter belongs to the pool, false otherwise. 4103 */ 4104 static bool 4105 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id) 4106 { 4107 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) * 4108 MLX5_COUNTERS_PER_POOL; 4109 4110 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) 4111 return true; 4112 return false; 4113 } 4114 4115 /** 4116 * Get a pool by devx counter ID. 4117 * 4118 * @param[in] cont 4119 * Pointer to the counter container. 4120 * @param[in] id 4121 * The counter devx ID. 4122 * 4123 * @return 4124 * The counter pool pointer if exists, NULL otherwise, 4125 */ 4126 static struct mlx5_flow_counter_pool * 4127 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id) 4128 { 4129 uint32_t i; 4130 4131 /* Check last used pool. */ 4132 if (cont->last_pool_idx != POOL_IDX_INVALID && 4133 flow_dv_is_counter_in_pool(cont->pools[cont->last_pool_idx], id)) 4134 return cont->pools[cont->last_pool_idx]; 4135 /* ID out of range means no suitable pool in the container. */ 4136 if (id > cont->max_id || id < cont->min_id) 4137 return NULL; 4138 /* 4139 * Find the pool from the end of the container, since mostly counter 4140 * ID is sequence increasing, and the last pool should be the needed 4141 * one. 4142 */ 4143 i = rte_atomic16_read(&cont->n_valid); 4144 while (i--) { 4145 struct mlx5_flow_counter_pool *pool = cont->pools[i]; 4146 4147 if (flow_dv_is_counter_in_pool(pool, id)) 4148 return pool; 4149 } 4150 return NULL; 4151 } 4152 4153 /** 4154 * Allocate a new memory for the counter values wrapped by all the needed 4155 * management. 4156 * 4157 * @param[in] dev 4158 * Pointer to the Ethernet device structure. 4159 * @param[in] raws_n 4160 * The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters. 4161 * 4162 * @return 4163 * The new memory management pointer on success, otherwise NULL and rte_errno 4164 * is set. 4165 */ 4166 static struct mlx5_counter_stats_mem_mng * 4167 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n) 4168 { 4169 struct mlx5_priv *priv = dev->data->dev_private; 4170 struct mlx5_dev_ctx_shared *sh = priv->sh; 4171 struct mlx5_devx_mkey_attr mkey_attr; 4172 struct mlx5_counter_stats_mem_mng *mem_mng; 4173 volatile struct flow_counter_stats *raw_data; 4174 int size = (sizeof(struct flow_counter_stats) * 4175 MLX5_COUNTERS_PER_POOL + 4176 sizeof(struct mlx5_counter_stats_raw)) * raws_n + 4177 sizeof(struct mlx5_counter_stats_mem_mng); 4178 uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE)); 4179 int i; 4180 4181 if (!mem) { 4182 rte_errno = ENOMEM; 4183 return NULL; 4184 } 4185 mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1; 4186 size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n; 4187 mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size, 4188 IBV_ACCESS_LOCAL_WRITE); 4189 if (!mem_mng->umem) { 4190 rte_errno = errno; 4191 rte_free(mem); 4192 return NULL; 4193 } 4194 mkey_attr.addr = (uintptr_t)mem; 4195 mkey_attr.size = size; 4196 mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem); 4197 mkey_attr.pd = sh->pdn; 4198 mkey_attr.log_entity_size = 0; 4199 mkey_attr.pg_access = 0; 4200 mkey_attr.klm_array = NULL; 4201 mkey_attr.klm_num = 0; 4202 if (priv->config.hca_attr.relaxed_ordering_write && 4203 priv->config.hca_attr.relaxed_ordering_read && 4204 !haswell_broadwell_cpu) 4205 mkey_attr.relaxed_ordering = 1; 4206 mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr); 4207 if (!mem_mng->dm) { 4208 mlx5_glue->devx_umem_dereg(mem_mng->umem); 4209 rte_errno = errno; 4210 rte_free(mem); 4211 return NULL; 4212 } 4213 mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size); 4214 raw_data = (volatile struct flow_counter_stats *)mem; 4215 for (i = 0; i < raws_n; ++i) { 4216 mem_mng->raws[i].mem_mng = mem_mng; 4217 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL; 4218 } 4219 LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next); 4220 return mem_mng; 4221 } 4222 4223 /** 4224 * Resize a counter container. 4225 * 4226 * @param[in] dev 4227 * Pointer to the Ethernet device structure. 4228 * @param[in] batch 4229 * Whether the pool is for counter that was allocated by batch command. 4230 * @param[in] age 4231 * Whether the pool is for Aging counter. 4232 * 4233 * @return 4234 * 0 on success, otherwise negative errno value and rte_errno is set. 4235 */ 4236 static int 4237 flow_dv_container_resize(struct rte_eth_dev *dev, 4238 uint32_t batch, uint32_t age) 4239 { 4240 struct mlx5_priv *priv = dev->data->dev_private; 4241 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, 4242 age); 4243 struct mlx5_counter_stats_mem_mng *mem_mng = NULL; 4244 void *old_pools = cont->pools; 4245 uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE; 4246 uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize; 4247 void *pools = rte_calloc(__func__, 1, mem_size, 0); 4248 4249 if (!pools) { 4250 rte_errno = ENOMEM; 4251 return -ENOMEM; 4252 } 4253 if (old_pools) 4254 memcpy(pools, old_pools, cont->n * 4255 sizeof(struct mlx5_flow_counter_pool *)); 4256 /* 4257 * Fallback mode query the counter directly, no background query 4258 * resources are needed. 4259 */ 4260 if (!priv->counter_fallback) { 4261 int i; 4262 4263 mem_mng = flow_dv_create_counter_stat_mem_mng(dev, 4264 MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES); 4265 if (!mem_mng) { 4266 rte_free(pools); 4267 return -ENOMEM; 4268 } 4269 for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) 4270 LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws, 4271 mem_mng->raws + 4272 MLX5_CNT_CONTAINER_RESIZE + 4273 i, next); 4274 } 4275 rte_spinlock_lock(&cont->resize_sl); 4276 cont->n = resize; 4277 cont->mem_mng = mem_mng; 4278 cont->pools = pools; 4279 rte_spinlock_unlock(&cont->resize_sl); 4280 if (old_pools) 4281 rte_free(old_pools); 4282 return 0; 4283 } 4284 4285 /** 4286 * Query a devx flow counter. 4287 * 4288 * @param[in] dev 4289 * Pointer to the Ethernet device structure. 4290 * @param[in] cnt 4291 * Index to the flow counter. 4292 * @param[out] pkts 4293 * The statistics value of packets. 4294 * @param[out] bytes 4295 * The statistics value of bytes. 4296 * 4297 * @return 4298 * 0 on success, otherwise a negative errno value and rte_errno is set. 4299 */ 4300 static inline int 4301 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, 4302 uint64_t *bytes) 4303 { 4304 struct mlx5_priv *priv = dev->data->dev_private; 4305 struct mlx5_flow_counter_pool *pool = NULL; 4306 struct mlx5_flow_counter *cnt; 4307 struct mlx5_flow_counter_ext *cnt_ext = NULL; 4308 int offset; 4309 4310 cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); 4311 MLX5_ASSERT(pool); 4312 if (counter < MLX5_CNT_BATCH_OFFSET) { 4313 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); 4314 if (priv->counter_fallback) 4315 return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0, 4316 0, pkts, bytes, 0, NULL, NULL, 0); 4317 } 4318 4319 rte_spinlock_lock(&pool->sl); 4320 /* 4321 * The single counters allocation may allocate smaller ID than the 4322 * current allocated in parallel to the host reading. 4323 * In this case the new counter values must be reported as 0. 4324 */ 4325 if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) { 4326 *pkts = 0; 4327 *bytes = 0; 4328 } else { 4329 offset = MLX5_CNT_ARRAY_IDX(pool, cnt); 4330 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits); 4331 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes); 4332 } 4333 rte_spinlock_unlock(&pool->sl); 4334 return 0; 4335 } 4336 4337 /** 4338 * Create and initialize a new counter pool. 4339 * 4340 * @param[in] dev 4341 * Pointer to the Ethernet device structure. 4342 * @param[out] dcs 4343 * The devX counter handle. 4344 * @param[in] batch 4345 * Whether the pool is for counter that was allocated by batch command. 4346 * @param[in] age 4347 * Whether the pool is for counter that was allocated for aging. 4348 * @param[in/out] cont_cur 4349 * Pointer to the container pointer, it will be update in pool resize. 4350 * 4351 * @return 4352 * The pool container pointer on success, NULL otherwise and rte_errno is set. 4353 */ 4354 static struct mlx5_flow_counter_pool * 4355 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, 4356 uint32_t batch, uint32_t age) 4357 { 4358 struct mlx5_priv *priv = dev->data->dev_private; 4359 struct mlx5_flow_counter_pool *pool; 4360 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, 4361 age); 4362 int16_t n_valid = rte_atomic16_read(&cont->n_valid); 4363 uint32_t size = sizeof(*pool); 4364 4365 if (cont->n == n_valid && flow_dv_container_resize(dev, batch, age)) 4366 return NULL; 4367 size += MLX5_COUNTERS_PER_POOL * CNT_SIZE; 4368 size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE); 4369 size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE); 4370 pool = rte_calloc(__func__, 1, size, 0); 4371 if (!pool) { 4372 rte_errno = ENOMEM; 4373 return NULL; 4374 } 4375 pool->min_dcs = dcs; 4376 if (!priv->counter_fallback) 4377 pool->raw = cont->mem_mng->raws + n_valid % 4378 MLX5_CNT_CONTAINER_RESIZE; 4379 pool->raw_hw = NULL; 4380 pool->type = 0; 4381 pool->type |= (batch ? 0 : CNT_POOL_TYPE_EXT); 4382 pool->type |= (!age ? 0 : CNT_POOL_TYPE_AGE); 4383 pool->query_gen = 0; 4384 rte_spinlock_init(&pool->sl); 4385 TAILQ_INIT(&pool->counters[0]); 4386 TAILQ_INIT(&pool->counters[1]); 4387 TAILQ_INSERT_HEAD(&cont->pool_list, pool, next); 4388 pool->index = n_valid; 4389 cont->pools[n_valid] = pool; 4390 if (!batch) { 4391 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL); 4392 4393 if (base < cont->min_id) 4394 cont->min_id = base; 4395 if (base > cont->max_id) 4396 cont->max_id = base + MLX5_COUNTERS_PER_POOL - 1; 4397 cont->last_pool_idx = pool->index; 4398 } 4399 /* Pool initialization must be updated before host thread access. */ 4400 rte_cio_wmb(); 4401 rte_atomic16_add(&cont->n_valid, 1); 4402 return pool; 4403 } 4404 4405 /** 4406 * Update the minimum dcs-id for aged or no-aged counter pool. 4407 * 4408 * @param[in] dev 4409 * Pointer to the Ethernet device structure. 4410 * @param[in] pool 4411 * Current counter pool. 4412 * @param[in] batch 4413 * Whether the pool is for counter that was allocated by batch command. 4414 * @param[in] age 4415 * Whether the counter is for aging. 4416 */ 4417 static void 4418 flow_dv_counter_update_min_dcs(struct rte_eth_dev *dev, 4419 struct mlx5_flow_counter_pool *pool, 4420 uint32_t batch, uint32_t age) 4421 { 4422 struct mlx5_priv *priv = dev->data->dev_private; 4423 struct mlx5_flow_counter_pool *other; 4424 struct mlx5_pools_container *cont; 4425 4426 cont = MLX5_CNT_CONTAINER(priv->sh, batch, (age ^ 0x1)); 4427 other = flow_dv_find_pool_by_id(cont, pool->min_dcs->id); 4428 if (!other) 4429 return; 4430 if (pool->min_dcs->id < other->min_dcs->id) { 4431 rte_atomic64_set(&other->a64_dcs, 4432 rte_atomic64_read(&pool->a64_dcs)); 4433 } else { 4434 rte_atomic64_set(&pool->a64_dcs, 4435 rte_atomic64_read(&other->a64_dcs)); 4436 } 4437 } 4438 /** 4439 * Prepare a new counter and/or a new counter pool. 4440 * 4441 * @param[in] dev 4442 * Pointer to the Ethernet device structure. 4443 * @param[out] cnt_free 4444 * Where to put the pointer of a new counter. 4445 * @param[in] batch 4446 * Whether the pool is for counter that was allocated by batch command. 4447 * @param[in] age 4448 * Whether the pool is for counter that was allocated for aging. 4449 * 4450 * @return 4451 * The counter pool pointer and @p cnt_free is set on success, 4452 * NULL otherwise and rte_errno is set. 4453 */ 4454 static struct mlx5_flow_counter_pool * 4455 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, 4456 struct mlx5_flow_counter **cnt_free, 4457 uint32_t batch, uint32_t age) 4458 { 4459 struct mlx5_priv *priv = dev->data->dev_private; 4460 struct mlx5_pools_container *cont; 4461 struct mlx5_flow_counter_pool *pool; 4462 struct mlx5_counters tmp_tq; 4463 struct mlx5_devx_obj *dcs = NULL; 4464 struct mlx5_flow_counter *cnt; 4465 uint32_t i; 4466 4467 cont = MLX5_CNT_CONTAINER(priv->sh, batch, age); 4468 if (!batch) { 4469 /* bulk_bitmap must be 0 for single counter allocation. */ 4470 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0); 4471 if (!dcs) 4472 return NULL; 4473 pool = flow_dv_find_pool_by_id(cont, dcs->id); 4474 if (!pool) { 4475 pool = flow_dv_pool_create(dev, dcs, batch, age); 4476 if (!pool) { 4477 mlx5_devx_cmd_destroy(dcs); 4478 return NULL; 4479 } 4480 } else if (dcs->id < pool->min_dcs->id) { 4481 rte_atomic64_set(&pool->a64_dcs, 4482 (int64_t)(uintptr_t)dcs); 4483 } 4484 flow_dv_counter_update_min_dcs(dev, 4485 pool, batch, age); 4486 i = dcs->id % MLX5_COUNTERS_PER_POOL; 4487 cnt = MLX5_POOL_GET_CNT(pool, i); 4488 cnt->pool = pool; 4489 MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs; 4490 *cnt_free = cnt; 4491 return pool; 4492 } 4493 /* bulk_bitmap is in 128 counters units. */ 4494 if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) 4495 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4); 4496 if (!dcs) { 4497 rte_errno = ENODATA; 4498 return NULL; 4499 } 4500 pool = flow_dv_pool_create(dev, dcs, batch, age); 4501 if (!pool) { 4502 mlx5_devx_cmd_destroy(dcs); 4503 return NULL; 4504 } 4505 TAILQ_INIT(&tmp_tq); 4506 for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) { 4507 cnt = MLX5_POOL_GET_CNT(pool, i); 4508 cnt->pool = pool; 4509 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next); 4510 } 4511 rte_spinlock_lock(&cont->csl); 4512 TAILQ_CONCAT(&cont->counters, &tmp_tq, next); 4513 rte_spinlock_unlock(&cont->csl); 4514 *cnt_free = MLX5_POOL_GET_CNT(pool, 0); 4515 (*cnt_free)->pool = pool; 4516 return pool; 4517 } 4518 4519 /** 4520 * Search for existed shared counter. 4521 * 4522 * @param[in] dev 4523 * Pointer to the Ethernet device structure. 4524 * @param[in] id 4525 * The shared counter ID to search. 4526 * @param[out] ppool 4527 * mlx5 flow counter pool in the container, 4528 * 4529 * @return 4530 * NULL if not existed, otherwise pointer to the shared extend counter. 4531 */ 4532 static struct mlx5_flow_counter_ext * 4533 flow_dv_counter_shared_search(struct rte_eth_dev *dev, uint32_t id, 4534 struct mlx5_flow_counter_pool **ppool) 4535 { 4536 struct mlx5_priv *priv = dev->data->dev_private; 4537 union mlx5_l3t_data data; 4538 uint32_t cnt_idx; 4539 4540 if (mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) || !data.dword) 4541 return NULL; 4542 cnt_idx = data.dword; 4543 /* 4544 * Shared counters don't have age info. The counter extend is after 4545 * the counter datat structure. 4546 */ 4547 return (struct mlx5_flow_counter_ext *) 4548 ((flow_dv_counter_get_by_idx(dev, cnt_idx, ppool)) + 1); 4549 } 4550 4551 /** 4552 * Allocate a flow counter. 4553 * 4554 * @param[in] dev 4555 * Pointer to the Ethernet device structure. 4556 * @param[in] shared 4557 * Indicate if this counter is shared with other flows. 4558 * @param[in] id 4559 * Counter identifier. 4560 * @param[in] group 4561 * Counter flow group. 4562 * @param[in] age 4563 * Whether the counter was allocated for aging. 4564 * 4565 * @return 4566 * Index to flow counter on success, 0 otherwise and rte_errno is set. 4567 */ 4568 static uint32_t 4569 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, 4570 uint16_t group, uint32_t age) 4571 { 4572 struct mlx5_priv *priv = dev->data->dev_private; 4573 struct mlx5_flow_counter_pool *pool = NULL; 4574 struct mlx5_flow_counter *cnt_free = NULL; 4575 struct mlx5_flow_counter_ext *cnt_ext = NULL; 4576 /* 4577 * Currently group 0 flow counter cannot be assigned to a flow if it is 4578 * not the first one in the batch counter allocation, so it is better 4579 * to allocate counters one by one for these flows in a separate 4580 * container. 4581 * A counter can be shared between different groups so need to take 4582 * shared counters from the single container. 4583 */ 4584 uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0; 4585 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, 4586 age); 4587 uint32_t cnt_idx; 4588 4589 if (!priv->config.devx) { 4590 rte_errno = ENOTSUP; 4591 return 0; 4592 } 4593 if (shared) { 4594 cnt_ext = flow_dv_counter_shared_search(dev, id, &pool); 4595 if (cnt_ext) { 4596 if (cnt_ext->ref_cnt + 1 == 0) { 4597 rte_errno = E2BIG; 4598 return 0; 4599 } 4600 cnt_ext->ref_cnt++; 4601 cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL + 4602 (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL) 4603 + 1; 4604 return cnt_idx; 4605 } 4606 } 4607 /* Get free counters from container. */ 4608 rte_spinlock_lock(&cont->csl); 4609 cnt_free = TAILQ_FIRST(&cont->counters); 4610 if (cnt_free) 4611 TAILQ_REMOVE(&cont->counters, cnt_free, next); 4612 rte_spinlock_unlock(&cont->csl); 4613 if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, 4614 batch, age)) 4615 goto err; 4616 pool = cnt_free->pool; 4617 if (!batch) 4618 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free); 4619 /* Create a DV counter action only in the first time usage. */ 4620 if (!cnt_free->action) { 4621 uint16_t offset; 4622 struct mlx5_devx_obj *dcs; 4623 int ret; 4624 4625 if (batch) { 4626 offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free); 4627 dcs = pool->min_dcs; 4628 } else { 4629 offset = 0; 4630 dcs = cnt_ext->dcs; 4631 } 4632 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset, 4633 &cnt_free->action); 4634 if (ret) { 4635 rte_errno = errno; 4636 goto err; 4637 } 4638 } 4639 cnt_idx = MLX5_MAKE_CNT_IDX(pool->index, 4640 MLX5_CNT_ARRAY_IDX(pool, cnt_free)); 4641 cnt_idx += batch * MLX5_CNT_BATCH_OFFSET; 4642 cnt_idx += age * MLX5_CNT_AGE_OFFSET; 4643 /* Update the counter reset values. */ 4644 if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits, 4645 &cnt_free->bytes)) 4646 goto err; 4647 if (cnt_ext) { 4648 cnt_ext->shared = shared; 4649 cnt_ext->ref_cnt = 1; 4650 cnt_ext->id = id; 4651 if (shared) { 4652 union mlx5_l3t_data data; 4653 4654 data.dword = cnt_idx; 4655 if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data)) 4656 return 0; 4657 } 4658 } 4659 if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on) 4660 /* Start the asynchronous batch query by the host thread. */ 4661 mlx5_set_query_alarm(priv->sh); 4662 return cnt_idx; 4663 err: 4664 if (cnt_free) { 4665 cnt_free->pool = pool; 4666 rte_spinlock_lock(&cont->csl); 4667 TAILQ_INSERT_TAIL(&cont->counters, cnt_free, next); 4668 rte_spinlock_unlock(&cont->csl); 4669 } 4670 return 0; 4671 } 4672 4673 /** 4674 * Get age param from counter index. 4675 * 4676 * @param[in] dev 4677 * Pointer to the Ethernet device structure. 4678 * @param[in] counter 4679 * Index to the counter handler. 4680 * 4681 * @return 4682 * The aging parameter specified for the counter index. 4683 */ 4684 static struct mlx5_age_param* 4685 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev, 4686 uint32_t counter) 4687 { 4688 struct mlx5_flow_counter *cnt; 4689 struct mlx5_flow_counter_pool *pool = NULL; 4690 4691 flow_dv_counter_get_by_idx(dev, counter, &pool); 4692 counter = (counter - 1) % MLX5_COUNTERS_PER_POOL; 4693 cnt = MLX5_POOL_GET_CNT(pool, counter); 4694 return MLX5_CNT_TO_AGE(cnt); 4695 } 4696 4697 /** 4698 * Remove a flow counter from aged counter list. 4699 * 4700 * @param[in] dev 4701 * Pointer to the Ethernet device structure. 4702 * @param[in] counter 4703 * Index to the counter handler. 4704 * @param[in] cnt 4705 * Pointer to the counter handler. 4706 */ 4707 static void 4708 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev, 4709 uint32_t counter, struct mlx5_flow_counter *cnt) 4710 { 4711 struct mlx5_age_info *age_info; 4712 struct mlx5_age_param *age_param; 4713 struct mlx5_priv *priv = dev->data->dev_private; 4714 4715 age_info = GET_PORT_AGE_INFO(priv); 4716 age_param = flow_dv_counter_idx_get_age(dev, counter); 4717 if (rte_atomic16_cmpset((volatile uint16_t *) 4718 &age_param->state, 4719 AGE_CANDIDATE, AGE_FREE) 4720 != AGE_CANDIDATE) { 4721 /** 4722 * We need the lock even it is age timeout, 4723 * since counter may still in process. 4724 */ 4725 rte_spinlock_lock(&age_info->aged_sl); 4726 TAILQ_REMOVE(&age_info->aged_counters, cnt, next); 4727 rte_spinlock_unlock(&age_info->aged_sl); 4728 } 4729 rte_atomic16_set(&age_param->state, AGE_FREE); 4730 } 4731 /** 4732 * Release a flow counter. 4733 * 4734 * @param[in] dev 4735 * Pointer to the Ethernet device structure. 4736 * @param[in] counter 4737 * Index to the counter handler. 4738 */ 4739 static void 4740 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) 4741 { 4742 struct mlx5_priv *priv = dev->data->dev_private; 4743 struct mlx5_flow_counter_pool *pool = NULL; 4744 struct mlx5_flow_counter *cnt; 4745 struct mlx5_flow_counter_ext *cnt_ext = NULL; 4746 4747 if (!counter) 4748 return; 4749 cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); 4750 MLX5_ASSERT(pool); 4751 if (counter < MLX5_CNT_BATCH_OFFSET) { 4752 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); 4753 if (cnt_ext) { 4754 if (--cnt_ext->ref_cnt) 4755 return; 4756 if (cnt_ext->shared) 4757 mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, 4758 cnt_ext->id); 4759 } 4760 } 4761 if (IS_AGE_POOL(pool)) 4762 flow_dv_counter_remove_from_age(dev, counter, cnt); 4763 cnt->pool = pool; 4764 /* 4765 * Put the counter back to list to be updated in none fallback mode. 4766 * Currently, we are using two list alternately, while one is in query, 4767 * add the freed counter to the other list based on the pool query_gen 4768 * value. After query finishes, add counter the list to the global 4769 * container counter list. The list changes while query starts. In 4770 * this case, lock will not be needed as query callback and release 4771 * function both operate with the different list. 4772 * 4773 */ 4774 if (!priv->counter_fallback) 4775 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next); 4776 else 4777 TAILQ_INSERT_TAIL(&((MLX5_CNT_CONTAINER 4778 (priv->sh, 0, 0))->counters), 4779 cnt, next); 4780 } 4781 4782 /** 4783 * Verify the @p attributes will be correctly understood by the NIC and store 4784 * them in the @p flow if everything is correct. 4785 * 4786 * @param[in] dev 4787 * Pointer to dev struct. 4788 * @param[in] attributes 4789 * Pointer to flow attributes 4790 * @param[in] external 4791 * This flow rule is created by request external to PMD. 4792 * @param[out] error 4793 * Pointer to error structure. 4794 * 4795 * @return 4796 * - 0 on success and non root table. 4797 * - 1 on success and root table. 4798 * - a negative errno value otherwise and rte_errno is set. 4799 */ 4800 static int 4801 flow_dv_validate_attributes(struct rte_eth_dev *dev, 4802 const struct rte_flow_attr *attributes, 4803 bool external __rte_unused, 4804 struct rte_flow_error *error) 4805 { 4806 struct mlx5_priv *priv = dev->data->dev_private; 4807 uint32_t priority_max = priv->config.flow_prio - 1; 4808 int ret = 0; 4809 4810 #ifndef HAVE_MLX5DV_DR 4811 if (attributes->group) 4812 return rte_flow_error_set(error, ENOTSUP, 4813 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, 4814 NULL, 4815 "groups are not supported"); 4816 #else 4817 uint32_t table = 0; 4818 4819 ret = mlx5_flow_group_to_table(attributes, external, 4820 attributes->group, !!priv->fdb_def_rule, 4821 &table, error); 4822 if (ret) 4823 return ret; 4824 if (!table) 4825 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; 4826 #endif 4827 if (attributes->priority != MLX5_FLOW_PRIO_RSVD && 4828 attributes->priority >= priority_max) 4829 return rte_flow_error_set(error, ENOTSUP, 4830 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, 4831 NULL, 4832 "priority out of range"); 4833 if (attributes->transfer) { 4834 if (!priv->config.dv_esw_en) 4835 return rte_flow_error_set 4836 (error, ENOTSUP, 4837 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 4838 "E-Switch dr is not supported"); 4839 if (!(priv->representor || priv->master)) 4840 return rte_flow_error_set 4841 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 4842 NULL, "E-Switch configuration can only be" 4843 " done by a master or a representor device"); 4844 if (attributes->egress) 4845 return rte_flow_error_set 4846 (error, ENOTSUP, 4847 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes, 4848 "egress is not supported"); 4849 } 4850 if (!(attributes->egress ^ attributes->ingress)) 4851 return rte_flow_error_set(error, ENOTSUP, 4852 RTE_FLOW_ERROR_TYPE_ATTR, NULL, 4853 "must specify exactly one of " 4854 "ingress or egress"); 4855 return ret; 4856 } 4857 4858 /** 4859 * Internal validation function. For validating both actions and items. 4860 * 4861 * @param[in] dev 4862 * Pointer to the rte_eth_dev structure. 4863 * @param[in] attr 4864 * Pointer to the flow attributes. 4865 * @param[in] items 4866 * Pointer to the list of items. 4867 * @param[in] actions 4868 * Pointer to the list of actions. 4869 * @param[in] external 4870 * This flow rule is created by request external to PMD. 4871 * @param[in] hairpin 4872 * Number of hairpin TX actions, 0 means classic flow. 4873 * @param[out] error 4874 * Pointer to the error structure. 4875 * 4876 * @return 4877 * 0 on success, a negative errno value otherwise and rte_errno is set. 4878 */ 4879 static int 4880 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, 4881 const struct rte_flow_item items[], 4882 const struct rte_flow_action actions[], 4883 bool external, int hairpin, struct rte_flow_error *error) 4884 { 4885 int ret; 4886 uint64_t action_flags = 0; 4887 uint64_t item_flags = 0; 4888 uint64_t last_item = 0; 4889 uint8_t next_protocol = 0xff; 4890 uint16_t ether_type = 0; 4891 int actions_n = 0; 4892 uint8_t item_ipv6_proto = 0; 4893 const struct rte_flow_item *gre_item = NULL; 4894 const struct rte_flow_action_raw_decap *decap; 4895 const struct rte_flow_action_raw_encap *encap; 4896 const struct rte_flow_action_rss *rss; 4897 const struct rte_flow_item_tcp nic_tcp_mask = { 4898 .hdr = { 4899 .tcp_flags = 0xFF, 4900 .src_port = RTE_BE16(UINT16_MAX), 4901 .dst_port = RTE_BE16(UINT16_MAX), 4902 } 4903 }; 4904 const struct rte_flow_item_ipv4 nic_ipv4_mask = { 4905 .hdr = { 4906 .src_addr = RTE_BE32(0xffffffff), 4907 .dst_addr = RTE_BE32(0xffffffff), 4908 .type_of_service = 0xff, 4909 .next_proto_id = 0xff, 4910 .time_to_live = 0xff, 4911 }, 4912 }; 4913 const struct rte_flow_item_ipv6 nic_ipv6_mask = { 4914 .hdr = { 4915 .src_addr = 4916 "\xff\xff\xff\xff\xff\xff\xff\xff" 4917 "\xff\xff\xff\xff\xff\xff\xff\xff", 4918 .dst_addr = 4919 "\xff\xff\xff\xff\xff\xff\xff\xff" 4920 "\xff\xff\xff\xff\xff\xff\xff\xff", 4921 .vtc_flow = RTE_BE32(0xffffffff), 4922 .proto = 0xff, 4923 .hop_limits = 0xff, 4924 }, 4925 }; 4926 struct mlx5_priv *priv = dev->data->dev_private; 4927 struct mlx5_dev_config *dev_conf = &priv->config; 4928 uint16_t queue_index = 0xFFFF; 4929 const struct rte_flow_item_vlan *vlan_m = NULL; 4930 int16_t rw_act_num = 0; 4931 uint64_t is_root; 4932 4933 if (items == NULL) 4934 return -1; 4935 ret = flow_dv_validate_attributes(dev, attr, external, error); 4936 if (ret < 0) 4937 return ret; 4938 is_root = (uint64_t)ret; 4939 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 4940 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 4941 int type = items->type; 4942 4943 if (!mlx5_flow_os_item_supported(type)) 4944 return rte_flow_error_set(error, ENOTSUP, 4945 RTE_FLOW_ERROR_TYPE_ITEM, 4946 NULL, "item not supported"); 4947 switch (type) { 4948 case RTE_FLOW_ITEM_TYPE_VOID: 4949 break; 4950 case RTE_FLOW_ITEM_TYPE_PORT_ID: 4951 ret = flow_dv_validate_item_port_id 4952 (dev, items, attr, item_flags, error); 4953 if (ret < 0) 4954 return ret; 4955 last_item = MLX5_FLOW_ITEM_PORT_ID; 4956 break; 4957 case RTE_FLOW_ITEM_TYPE_ETH: 4958 ret = mlx5_flow_validate_item_eth(items, item_flags, 4959 error); 4960 if (ret < 0) 4961 return ret; 4962 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 4963 MLX5_FLOW_LAYER_OUTER_L2; 4964 if (items->mask != NULL && items->spec != NULL) { 4965 ether_type = 4966 ((const struct rte_flow_item_eth *) 4967 items->spec)->type; 4968 ether_type &= 4969 ((const struct rte_flow_item_eth *) 4970 items->mask)->type; 4971 ether_type = rte_be_to_cpu_16(ether_type); 4972 } else { 4973 ether_type = 0; 4974 } 4975 break; 4976 case RTE_FLOW_ITEM_TYPE_VLAN: 4977 ret = flow_dv_validate_item_vlan(items, item_flags, 4978 dev, error); 4979 if (ret < 0) 4980 return ret; 4981 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : 4982 MLX5_FLOW_LAYER_OUTER_VLAN; 4983 if (items->mask != NULL && items->spec != NULL) { 4984 ether_type = 4985 ((const struct rte_flow_item_vlan *) 4986 items->spec)->inner_type; 4987 ether_type &= 4988 ((const struct rte_flow_item_vlan *) 4989 items->mask)->inner_type; 4990 ether_type = rte_be_to_cpu_16(ether_type); 4991 } else { 4992 ether_type = 0; 4993 } 4994 /* Store outer VLAN mask for of_push_vlan action. */ 4995 if (!tunnel) 4996 vlan_m = items->mask; 4997 break; 4998 case RTE_FLOW_ITEM_TYPE_IPV4: 4999 mlx5_flow_tunnel_ip_check(items, next_protocol, 5000 &item_flags, &tunnel); 5001 ret = mlx5_flow_validate_item_ipv4(items, item_flags, 5002 last_item, 5003 ether_type, 5004 &nic_ipv4_mask, 5005 error); 5006 if (ret < 0) 5007 return ret; 5008 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 5009 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 5010 if (items->mask != NULL && 5011 ((const struct rte_flow_item_ipv4 *) 5012 items->mask)->hdr.next_proto_id) { 5013 next_protocol = 5014 ((const struct rte_flow_item_ipv4 *) 5015 (items->spec))->hdr.next_proto_id; 5016 next_protocol &= 5017 ((const struct rte_flow_item_ipv4 *) 5018 (items->mask))->hdr.next_proto_id; 5019 } else { 5020 /* Reset for inner layer. */ 5021 next_protocol = 0xff; 5022 } 5023 break; 5024 case RTE_FLOW_ITEM_TYPE_IPV6: 5025 mlx5_flow_tunnel_ip_check(items, next_protocol, 5026 &item_flags, &tunnel); 5027 ret = mlx5_flow_validate_item_ipv6(items, item_flags, 5028 last_item, 5029 ether_type, 5030 &nic_ipv6_mask, 5031 error); 5032 if (ret < 0) 5033 return ret; 5034 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 5035 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 5036 if (items->mask != NULL && 5037 ((const struct rte_flow_item_ipv6 *) 5038 items->mask)->hdr.proto) { 5039 item_ipv6_proto = 5040 ((const struct rte_flow_item_ipv6 *) 5041 items->spec)->hdr.proto; 5042 next_protocol = 5043 ((const struct rte_flow_item_ipv6 *) 5044 items->spec)->hdr.proto; 5045 next_protocol &= 5046 ((const struct rte_flow_item_ipv6 *) 5047 items->mask)->hdr.proto; 5048 } else { 5049 /* Reset for inner layer. */ 5050 next_protocol = 0xff; 5051 } 5052 break; 5053 case RTE_FLOW_ITEM_TYPE_TCP: 5054 ret = mlx5_flow_validate_item_tcp 5055 (items, item_flags, 5056 next_protocol, 5057 &nic_tcp_mask, 5058 error); 5059 if (ret < 0) 5060 return ret; 5061 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : 5062 MLX5_FLOW_LAYER_OUTER_L4_TCP; 5063 break; 5064 case RTE_FLOW_ITEM_TYPE_UDP: 5065 ret = mlx5_flow_validate_item_udp(items, item_flags, 5066 next_protocol, 5067 error); 5068 if (ret < 0) 5069 return ret; 5070 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : 5071 MLX5_FLOW_LAYER_OUTER_L4_UDP; 5072 break; 5073 case RTE_FLOW_ITEM_TYPE_GRE: 5074 ret = mlx5_flow_validate_item_gre(items, item_flags, 5075 next_protocol, error); 5076 if (ret < 0) 5077 return ret; 5078 gre_item = items; 5079 last_item = MLX5_FLOW_LAYER_GRE; 5080 break; 5081 case RTE_FLOW_ITEM_TYPE_NVGRE: 5082 ret = mlx5_flow_validate_item_nvgre(items, item_flags, 5083 next_protocol, 5084 error); 5085 if (ret < 0) 5086 return ret; 5087 last_item = MLX5_FLOW_LAYER_NVGRE; 5088 break; 5089 case RTE_FLOW_ITEM_TYPE_GRE_KEY: 5090 ret = mlx5_flow_validate_item_gre_key 5091 (items, item_flags, gre_item, error); 5092 if (ret < 0) 5093 return ret; 5094 last_item = MLX5_FLOW_LAYER_GRE_KEY; 5095 break; 5096 case RTE_FLOW_ITEM_TYPE_VXLAN: 5097 ret = mlx5_flow_validate_item_vxlan(items, item_flags, 5098 error); 5099 if (ret < 0) 5100 return ret; 5101 last_item = MLX5_FLOW_LAYER_VXLAN; 5102 break; 5103 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 5104 ret = mlx5_flow_validate_item_vxlan_gpe(items, 5105 item_flags, dev, 5106 error); 5107 if (ret < 0) 5108 return ret; 5109 last_item = MLX5_FLOW_LAYER_VXLAN_GPE; 5110 break; 5111 case RTE_FLOW_ITEM_TYPE_GENEVE: 5112 ret = mlx5_flow_validate_item_geneve(items, 5113 item_flags, dev, 5114 error); 5115 if (ret < 0) 5116 return ret; 5117 last_item = MLX5_FLOW_LAYER_GENEVE; 5118 break; 5119 case RTE_FLOW_ITEM_TYPE_MPLS: 5120 ret = mlx5_flow_validate_item_mpls(dev, items, 5121 item_flags, 5122 last_item, error); 5123 if (ret < 0) 5124 return ret; 5125 last_item = MLX5_FLOW_LAYER_MPLS; 5126 break; 5127 5128 case RTE_FLOW_ITEM_TYPE_MARK: 5129 ret = flow_dv_validate_item_mark(dev, items, attr, 5130 error); 5131 if (ret < 0) 5132 return ret; 5133 last_item = MLX5_FLOW_ITEM_MARK; 5134 break; 5135 case RTE_FLOW_ITEM_TYPE_META: 5136 ret = flow_dv_validate_item_meta(dev, items, attr, 5137 error); 5138 if (ret < 0) 5139 return ret; 5140 last_item = MLX5_FLOW_ITEM_METADATA; 5141 break; 5142 case RTE_FLOW_ITEM_TYPE_ICMP: 5143 ret = mlx5_flow_validate_item_icmp(items, item_flags, 5144 next_protocol, 5145 error); 5146 if (ret < 0) 5147 return ret; 5148 last_item = MLX5_FLOW_LAYER_ICMP; 5149 break; 5150 case RTE_FLOW_ITEM_TYPE_ICMP6: 5151 ret = mlx5_flow_validate_item_icmp6(items, item_flags, 5152 next_protocol, 5153 error); 5154 if (ret < 0) 5155 return ret; 5156 item_ipv6_proto = IPPROTO_ICMPV6; 5157 last_item = MLX5_FLOW_LAYER_ICMP6; 5158 break; 5159 case RTE_FLOW_ITEM_TYPE_TAG: 5160 ret = flow_dv_validate_item_tag(dev, items, 5161 attr, error); 5162 if (ret < 0) 5163 return ret; 5164 last_item = MLX5_FLOW_ITEM_TAG; 5165 break; 5166 case MLX5_RTE_FLOW_ITEM_TYPE_TAG: 5167 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE: 5168 break; 5169 case RTE_FLOW_ITEM_TYPE_GTP: 5170 ret = flow_dv_validate_item_gtp(dev, items, item_flags, 5171 error); 5172 if (ret < 0) 5173 return ret; 5174 last_item = MLX5_FLOW_LAYER_GTP; 5175 break; 5176 default: 5177 return rte_flow_error_set(error, ENOTSUP, 5178 RTE_FLOW_ERROR_TYPE_ITEM, 5179 NULL, "item not supported"); 5180 } 5181 item_flags |= last_item; 5182 } 5183 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 5184 int type = actions->type; 5185 5186 if (!mlx5_flow_os_action_supported(type)) 5187 return rte_flow_error_set(error, ENOTSUP, 5188 RTE_FLOW_ERROR_TYPE_ACTION, 5189 actions, 5190 "action not supported"); 5191 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS) 5192 return rte_flow_error_set(error, ENOTSUP, 5193 RTE_FLOW_ERROR_TYPE_ACTION, 5194 actions, "too many actions"); 5195 switch (type) { 5196 case RTE_FLOW_ACTION_TYPE_VOID: 5197 break; 5198 case RTE_FLOW_ACTION_TYPE_PORT_ID: 5199 ret = flow_dv_validate_action_port_id(dev, 5200 action_flags, 5201 actions, 5202 attr, 5203 error); 5204 if (ret) 5205 return ret; 5206 action_flags |= MLX5_FLOW_ACTION_PORT_ID; 5207 ++actions_n; 5208 break; 5209 case RTE_FLOW_ACTION_TYPE_FLAG: 5210 ret = flow_dv_validate_action_flag(dev, action_flags, 5211 attr, error); 5212 if (ret < 0) 5213 return ret; 5214 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 5215 /* Count all modify-header actions as one. */ 5216 if (!(action_flags & 5217 MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5218 ++actions_n; 5219 action_flags |= MLX5_FLOW_ACTION_FLAG | 5220 MLX5_FLOW_ACTION_MARK_EXT; 5221 } else { 5222 action_flags |= MLX5_FLOW_ACTION_FLAG; 5223 ++actions_n; 5224 } 5225 rw_act_num += MLX5_ACT_NUM_SET_MARK; 5226 break; 5227 case RTE_FLOW_ACTION_TYPE_MARK: 5228 ret = flow_dv_validate_action_mark(dev, actions, 5229 action_flags, 5230 attr, error); 5231 if (ret < 0) 5232 return ret; 5233 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 5234 /* Count all modify-header actions as one. */ 5235 if (!(action_flags & 5236 MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5237 ++actions_n; 5238 action_flags |= MLX5_FLOW_ACTION_MARK | 5239 MLX5_FLOW_ACTION_MARK_EXT; 5240 } else { 5241 action_flags |= MLX5_FLOW_ACTION_MARK; 5242 ++actions_n; 5243 } 5244 rw_act_num += MLX5_ACT_NUM_SET_MARK; 5245 break; 5246 case RTE_FLOW_ACTION_TYPE_SET_META: 5247 ret = flow_dv_validate_action_set_meta(dev, actions, 5248 action_flags, 5249 attr, error); 5250 if (ret < 0) 5251 return ret; 5252 /* Count all modify-header actions as one action. */ 5253 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5254 ++actions_n; 5255 action_flags |= MLX5_FLOW_ACTION_SET_META; 5256 rw_act_num += MLX5_ACT_NUM_SET_META; 5257 break; 5258 case RTE_FLOW_ACTION_TYPE_SET_TAG: 5259 ret = flow_dv_validate_action_set_tag(dev, actions, 5260 action_flags, 5261 attr, error); 5262 if (ret < 0) 5263 return ret; 5264 /* Count all modify-header actions as one action. */ 5265 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5266 ++actions_n; 5267 action_flags |= MLX5_FLOW_ACTION_SET_TAG; 5268 rw_act_num += MLX5_ACT_NUM_SET_TAG; 5269 break; 5270 case RTE_FLOW_ACTION_TYPE_DROP: 5271 ret = mlx5_flow_validate_action_drop(action_flags, 5272 attr, error); 5273 if (ret < 0) 5274 return ret; 5275 action_flags |= MLX5_FLOW_ACTION_DROP; 5276 ++actions_n; 5277 break; 5278 case RTE_FLOW_ACTION_TYPE_QUEUE: 5279 ret = mlx5_flow_validate_action_queue(actions, 5280 action_flags, dev, 5281 attr, error); 5282 if (ret < 0) 5283 return ret; 5284 queue_index = ((const struct rte_flow_action_queue *) 5285 (actions->conf))->index; 5286 action_flags |= MLX5_FLOW_ACTION_QUEUE; 5287 ++actions_n; 5288 break; 5289 case RTE_FLOW_ACTION_TYPE_RSS: 5290 rss = actions->conf; 5291 ret = mlx5_flow_validate_action_rss(actions, 5292 action_flags, dev, 5293 attr, item_flags, 5294 error); 5295 if (ret < 0) 5296 return ret; 5297 if (rss != NULL && rss->queue_num) 5298 queue_index = rss->queue[0]; 5299 action_flags |= MLX5_FLOW_ACTION_RSS; 5300 ++actions_n; 5301 break; 5302 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS: 5303 ret = 5304 mlx5_flow_validate_action_default_miss(action_flags, 5305 attr, error); 5306 if (ret < 0) 5307 return ret; 5308 action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS; 5309 ++actions_n; 5310 break; 5311 case RTE_FLOW_ACTION_TYPE_COUNT: 5312 ret = flow_dv_validate_action_count(dev, error); 5313 if (ret < 0) 5314 return ret; 5315 action_flags |= MLX5_FLOW_ACTION_COUNT; 5316 ++actions_n; 5317 break; 5318 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: 5319 if (flow_dv_validate_action_pop_vlan(dev, 5320 action_flags, 5321 actions, 5322 item_flags, attr, 5323 error)) 5324 return -rte_errno; 5325 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN; 5326 ++actions_n; 5327 break; 5328 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 5329 ret = flow_dv_validate_action_push_vlan(dev, 5330 action_flags, 5331 vlan_m, 5332 actions, attr, 5333 error); 5334 if (ret < 0) 5335 return ret; 5336 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN; 5337 ++actions_n; 5338 break; 5339 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 5340 ret = flow_dv_validate_action_set_vlan_pcp 5341 (action_flags, actions, error); 5342 if (ret < 0) 5343 return ret; 5344 /* Count PCP with push_vlan command. */ 5345 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP; 5346 break; 5347 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 5348 ret = flow_dv_validate_action_set_vlan_vid 5349 (item_flags, action_flags, 5350 actions, error); 5351 if (ret < 0) 5352 return ret; 5353 /* Count VID with push_vlan command. */ 5354 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID; 5355 rw_act_num += MLX5_ACT_NUM_MDF_VID; 5356 break; 5357 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 5358 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 5359 ret = flow_dv_validate_action_l2_encap(dev, 5360 action_flags, 5361 actions, attr, 5362 error); 5363 if (ret < 0) 5364 return ret; 5365 action_flags |= MLX5_FLOW_ACTION_ENCAP; 5366 ++actions_n; 5367 break; 5368 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 5369 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 5370 ret = flow_dv_validate_action_decap(dev, action_flags, 5371 attr, error); 5372 if (ret < 0) 5373 return ret; 5374 action_flags |= MLX5_FLOW_ACTION_DECAP; 5375 ++actions_n; 5376 break; 5377 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 5378 ret = flow_dv_validate_action_raw_encap_decap 5379 (dev, NULL, actions->conf, attr, &action_flags, 5380 &actions_n, error); 5381 if (ret < 0) 5382 return ret; 5383 break; 5384 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 5385 decap = actions->conf; 5386 while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID) 5387 ; 5388 if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { 5389 encap = NULL; 5390 actions--; 5391 } else { 5392 encap = actions->conf; 5393 } 5394 ret = flow_dv_validate_action_raw_encap_decap 5395 (dev, 5396 decap ? decap : &empty_decap, encap, 5397 attr, &action_flags, &actions_n, 5398 error); 5399 if (ret < 0) 5400 return ret; 5401 break; 5402 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: 5403 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: 5404 ret = flow_dv_validate_action_modify_mac(action_flags, 5405 actions, 5406 item_flags, 5407 error); 5408 if (ret < 0) 5409 return ret; 5410 /* Count all modify-header actions as one action. */ 5411 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5412 ++actions_n; 5413 action_flags |= actions->type == 5414 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ? 5415 MLX5_FLOW_ACTION_SET_MAC_SRC : 5416 MLX5_FLOW_ACTION_SET_MAC_DST; 5417 /* 5418 * Even if the source and destination MAC addresses have 5419 * overlap in the header with 4B alignment, the convert 5420 * function will handle them separately and 4 SW actions 5421 * will be created. And 2 actions will be added each 5422 * time no matter how many bytes of address will be set. 5423 */ 5424 rw_act_num += MLX5_ACT_NUM_MDF_MAC; 5425 break; 5426 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: 5427 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: 5428 ret = flow_dv_validate_action_modify_ipv4(action_flags, 5429 actions, 5430 item_flags, 5431 error); 5432 if (ret < 0) 5433 return ret; 5434 /* Count all modify-header actions as one action. */ 5435 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5436 ++actions_n; 5437 action_flags |= actions->type == 5438 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ? 5439 MLX5_FLOW_ACTION_SET_IPV4_SRC : 5440 MLX5_FLOW_ACTION_SET_IPV4_DST; 5441 rw_act_num += MLX5_ACT_NUM_MDF_IPV4; 5442 break; 5443 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: 5444 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: 5445 ret = flow_dv_validate_action_modify_ipv6(action_flags, 5446 actions, 5447 item_flags, 5448 error); 5449 if (ret < 0) 5450 return ret; 5451 if (item_ipv6_proto == IPPROTO_ICMPV6) 5452 return rte_flow_error_set(error, ENOTSUP, 5453 RTE_FLOW_ERROR_TYPE_ACTION, 5454 actions, 5455 "Can't change header " 5456 "with ICMPv6 proto"); 5457 /* Count all modify-header actions as one action. */ 5458 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5459 ++actions_n; 5460 action_flags |= actions->type == 5461 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ? 5462 MLX5_FLOW_ACTION_SET_IPV6_SRC : 5463 MLX5_FLOW_ACTION_SET_IPV6_DST; 5464 rw_act_num += MLX5_ACT_NUM_MDF_IPV6; 5465 break; 5466 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: 5467 case RTE_FLOW_ACTION_TYPE_SET_TP_DST: 5468 ret = flow_dv_validate_action_modify_tp(action_flags, 5469 actions, 5470 item_flags, 5471 error); 5472 if (ret < 0) 5473 return ret; 5474 /* Count all modify-header actions as one action. */ 5475 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5476 ++actions_n; 5477 action_flags |= actions->type == 5478 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ? 5479 MLX5_FLOW_ACTION_SET_TP_SRC : 5480 MLX5_FLOW_ACTION_SET_TP_DST; 5481 rw_act_num += MLX5_ACT_NUM_MDF_PORT; 5482 break; 5483 case RTE_FLOW_ACTION_TYPE_DEC_TTL: 5484 case RTE_FLOW_ACTION_TYPE_SET_TTL: 5485 ret = flow_dv_validate_action_modify_ttl(action_flags, 5486 actions, 5487 item_flags, 5488 error); 5489 if (ret < 0) 5490 return ret; 5491 /* Count all modify-header actions as one action. */ 5492 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5493 ++actions_n; 5494 action_flags |= actions->type == 5495 RTE_FLOW_ACTION_TYPE_SET_TTL ? 5496 MLX5_FLOW_ACTION_SET_TTL : 5497 MLX5_FLOW_ACTION_DEC_TTL; 5498 rw_act_num += MLX5_ACT_NUM_MDF_TTL; 5499 break; 5500 case RTE_FLOW_ACTION_TYPE_JUMP: 5501 ret = flow_dv_validate_action_jump(actions, 5502 action_flags, 5503 attr, external, 5504 error); 5505 if (ret) 5506 return ret; 5507 ++actions_n; 5508 action_flags |= MLX5_FLOW_ACTION_JUMP; 5509 break; 5510 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: 5511 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: 5512 ret = flow_dv_validate_action_modify_tcp_seq 5513 (action_flags, 5514 actions, 5515 item_flags, 5516 error); 5517 if (ret < 0) 5518 return ret; 5519 /* Count all modify-header actions as one action. */ 5520 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5521 ++actions_n; 5522 action_flags |= actions->type == 5523 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ? 5524 MLX5_FLOW_ACTION_INC_TCP_SEQ : 5525 MLX5_FLOW_ACTION_DEC_TCP_SEQ; 5526 rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ; 5527 break; 5528 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: 5529 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: 5530 ret = flow_dv_validate_action_modify_tcp_ack 5531 (action_flags, 5532 actions, 5533 item_flags, 5534 error); 5535 if (ret < 0) 5536 return ret; 5537 /* Count all modify-header actions as one action. */ 5538 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5539 ++actions_n; 5540 action_flags |= actions->type == 5541 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ? 5542 MLX5_FLOW_ACTION_INC_TCP_ACK : 5543 MLX5_FLOW_ACTION_DEC_TCP_ACK; 5544 rw_act_num += MLX5_ACT_NUM_MDF_TCPACK; 5545 break; 5546 case MLX5_RTE_FLOW_ACTION_TYPE_MARK: 5547 break; 5548 case MLX5_RTE_FLOW_ACTION_TYPE_TAG: 5549 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG: 5550 rw_act_num += MLX5_ACT_NUM_SET_TAG; 5551 break; 5552 case RTE_FLOW_ACTION_TYPE_METER: 5553 ret = mlx5_flow_validate_action_meter(dev, 5554 action_flags, 5555 actions, attr, 5556 error); 5557 if (ret < 0) 5558 return ret; 5559 action_flags |= MLX5_FLOW_ACTION_METER; 5560 ++actions_n; 5561 /* Meter action will add one more TAG action. */ 5562 rw_act_num += MLX5_ACT_NUM_SET_TAG; 5563 break; 5564 case RTE_FLOW_ACTION_TYPE_AGE: 5565 ret = flow_dv_validate_action_age(action_flags, 5566 actions, dev, 5567 error); 5568 if (ret < 0) 5569 return ret; 5570 action_flags |= MLX5_FLOW_ACTION_AGE; 5571 ++actions_n; 5572 break; 5573 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: 5574 ret = flow_dv_validate_action_modify_ipv4_dscp 5575 (action_flags, 5576 actions, 5577 item_flags, 5578 error); 5579 if (ret < 0) 5580 return ret; 5581 /* Count all modify-header actions as one action. */ 5582 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5583 ++actions_n; 5584 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP; 5585 rw_act_num += MLX5_ACT_NUM_SET_DSCP; 5586 break; 5587 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: 5588 ret = flow_dv_validate_action_modify_ipv6_dscp 5589 (action_flags, 5590 actions, 5591 item_flags, 5592 error); 5593 if (ret < 0) 5594 return ret; 5595 /* Count all modify-header actions as one action. */ 5596 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) 5597 ++actions_n; 5598 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP; 5599 rw_act_num += MLX5_ACT_NUM_SET_DSCP; 5600 break; 5601 default: 5602 return rte_flow_error_set(error, ENOTSUP, 5603 RTE_FLOW_ERROR_TYPE_ACTION, 5604 actions, 5605 "action not supported"); 5606 } 5607 } 5608 /* 5609 * Validate the drop action mutual exclusion with other actions. 5610 * Drop action is mutually-exclusive with any other action, except for 5611 * Count action. 5612 */ 5613 if ((action_flags & MLX5_FLOW_ACTION_DROP) && 5614 (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT))) 5615 return rte_flow_error_set(error, EINVAL, 5616 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 5617 "Drop action is mutually-exclusive " 5618 "with any other action, except for " 5619 "Count action"); 5620 /* Eswitch has few restrictions on using items and actions */ 5621 if (attr->transfer) { 5622 if (!mlx5_flow_ext_mreg_supported(dev) && 5623 action_flags & MLX5_FLOW_ACTION_FLAG) 5624 return rte_flow_error_set(error, ENOTSUP, 5625 RTE_FLOW_ERROR_TYPE_ACTION, 5626 NULL, 5627 "unsupported action FLAG"); 5628 if (!mlx5_flow_ext_mreg_supported(dev) && 5629 action_flags & MLX5_FLOW_ACTION_MARK) 5630 return rte_flow_error_set(error, ENOTSUP, 5631 RTE_FLOW_ERROR_TYPE_ACTION, 5632 NULL, 5633 "unsupported action MARK"); 5634 if (action_flags & MLX5_FLOW_ACTION_QUEUE) 5635 return rte_flow_error_set(error, ENOTSUP, 5636 RTE_FLOW_ERROR_TYPE_ACTION, 5637 NULL, 5638 "unsupported action QUEUE"); 5639 if (action_flags & MLX5_FLOW_ACTION_RSS) 5640 return rte_flow_error_set(error, ENOTSUP, 5641 RTE_FLOW_ERROR_TYPE_ACTION, 5642 NULL, 5643 "unsupported action RSS"); 5644 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS)) 5645 return rte_flow_error_set(error, EINVAL, 5646 RTE_FLOW_ERROR_TYPE_ACTION, 5647 actions, 5648 "no fate action is found"); 5649 } else { 5650 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress) 5651 return rte_flow_error_set(error, EINVAL, 5652 RTE_FLOW_ERROR_TYPE_ACTION, 5653 actions, 5654 "no fate action is found"); 5655 } 5656 /* Continue validation for Xcap actions.*/ 5657 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) && (queue_index == 0xFFFF || 5658 mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) { 5659 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) == 5660 MLX5_FLOW_XCAP_ACTIONS) 5661 return rte_flow_error_set(error, ENOTSUP, 5662 RTE_FLOW_ERROR_TYPE_ACTION, 5663 NULL, "encap and decap " 5664 "combination aren't supported"); 5665 if (!attr->transfer && attr->ingress && (action_flags & 5666 MLX5_FLOW_ACTION_ENCAP)) 5667 return rte_flow_error_set(error, ENOTSUP, 5668 RTE_FLOW_ERROR_TYPE_ACTION, 5669 NULL, "encap is not supported" 5670 " for ingress traffic"); 5671 } 5672 /* Hairpin flow will add one more TAG action. */ 5673 if (hairpin > 0) 5674 rw_act_num += MLX5_ACT_NUM_SET_TAG; 5675 /* extra metadata enabled: one more TAG action will be add. */ 5676 if (dev_conf->dv_flow_en && 5677 dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 5678 mlx5_flow_ext_mreg_supported(dev)) 5679 rw_act_num += MLX5_ACT_NUM_SET_TAG; 5680 if ((uint32_t)rw_act_num > 5681 flow_dv_modify_hdr_action_max(dev, is_root)) { 5682 return rte_flow_error_set(error, ENOTSUP, 5683 RTE_FLOW_ERROR_TYPE_ACTION, 5684 NULL, "too many header modify" 5685 " actions to support"); 5686 } 5687 return 0; 5688 } 5689 5690 /** 5691 * Internal preparation function. Allocates the DV flow size, 5692 * this size is constant. 5693 * 5694 * @param[in] dev 5695 * Pointer to the rte_eth_dev structure. 5696 * @param[in] attr 5697 * Pointer to the flow attributes. 5698 * @param[in] items 5699 * Pointer to the list of items. 5700 * @param[in] actions 5701 * Pointer to the list of actions. 5702 * @param[out] error 5703 * Pointer to the error structure. 5704 * 5705 * @return 5706 * Pointer to mlx5_flow object on success, 5707 * otherwise NULL and rte_errno is set. 5708 */ 5709 static struct mlx5_flow * 5710 flow_dv_prepare(struct rte_eth_dev *dev, 5711 const struct rte_flow_attr *attr __rte_unused, 5712 const struct rte_flow_item items[] __rte_unused, 5713 const struct rte_flow_action actions[] __rte_unused, 5714 struct rte_flow_error *error) 5715 { 5716 uint32_t handle_idx = 0; 5717 struct mlx5_flow *dev_flow; 5718 struct mlx5_flow_handle *dev_handle; 5719 struct mlx5_priv *priv = dev->data->dev_private; 5720 5721 /* In case of corrupting the memory. */ 5722 if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { 5723 rte_flow_error_set(error, ENOSPC, 5724 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 5725 "not free temporary device flow"); 5726 return NULL; 5727 } 5728 dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 5729 &handle_idx); 5730 if (!dev_handle) { 5731 rte_flow_error_set(error, ENOMEM, 5732 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 5733 "not enough memory to create flow handle"); 5734 return NULL; 5735 } 5736 /* No multi-thread supporting. */ 5737 dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++]; 5738 dev_flow->handle = dev_handle; 5739 dev_flow->handle_idx = handle_idx; 5740 dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param); 5741 /* 5742 * The matching value needs to be cleared to 0 before using. In the 5743 * past, it will be automatically cleared when using rte_*alloc 5744 * API. The time consumption will be almost the same as before. 5745 */ 5746 memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param)); 5747 dev_flow->ingress = attr->ingress; 5748 dev_flow->dv.transfer = attr->transfer; 5749 return dev_flow; 5750 } 5751 5752 #ifdef RTE_LIBRTE_MLX5_DEBUG 5753 /** 5754 * Sanity check for match mask and value. Similar to check_valid_spec() in 5755 * kernel driver. If unmasked bit is present in value, it returns failure. 5756 * 5757 * @param match_mask 5758 * pointer to match mask buffer. 5759 * @param match_value 5760 * pointer to match value buffer. 5761 * 5762 * @return 5763 * 0 if valid, -EINVAL otherwise. 5764 */ 5765 static int 5766 flow_dv_check_valid_spec(void *match_mask, void *match_value) 5767 { 5768 uint8_t *m = match_mask; 5769 uint8_t *v = match_value; 5770 unsigned int i; 5771 5772 for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) { 5773 if (v[i] & ~m[i]) { 5774 DRV_LOG(ERR, 5775 "match_value differs from match_criteria" 5776 " %p[%u] != %p[%u]", 5777 match_value, i, match_mask, i); 5778 return -EINVAL; 5779 } 5780 } 5781 return 0; 5782 } 5783 #endif 5784 5785 /** 5786 * Add match of ip_version. 5787 * 5788 * @param[in] group 5789 * Flow group. 5790 * @param[in] headers_v 5791 * Values header pointer. 5792 * @param[in] headers_m 5793 * Masks header pointer. 5794 * @param[in] ip_version 5795 * The IP version to set. 5796 */ 5797 static inline void 5798 flow_dv_set_match_ip_version(uint32_t group, 5799 void *headers_v, 5800 void *headers_m, 5801 uint8_t ip_version) 5802 { 5803 if (group == 0) 5804 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf); 5805 else 5806 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 5807 ip_version); 5808 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version); 5809 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0); 5810 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0); 5811 } 5812 5813 /** 5814 * Add Ethernet item to matcher and to the value. 5815 * 5816 * @param[in, out] matcher 5817 * Flow matcher. 5818 * @param[in, out] key 5819 * Flow matcher value. 5820 * @param[in] item 5821 * Flow pattern to translate. 5822 * @param[in] inner 5823 * Item is inner pattern. 5824 */ 5825 static void 5826 flow_dv_translate_item_eth(void *matcher, void *key, 5827 const struct rte_flow_item *item, int inner, 5828 uint32_t group) 5829 { 5830 const struct rte_flow_item_eth *eth_m = item->mask; 5831 const struct rte_flow_item_eth *eth_v = item->spec; 5832 const struct rte_flow_item_eth nic_mask = { 5833 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 5834 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", 5835 .type = RTE_BE16(0xffff), 5836 }; 5837 void *headers_m; 5838 void *headers_v; 5839 char *l24_v; 5840 unsigned int i; 5841 5842 if (!eth_v) 5843 return; 5844 if (!eth_m) 5845 eth_m = &nic_mask; 5846 if (inner) { 5847 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 5848 inner_headers); 5849 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 5850 } else { 5851 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 5852 outer_headers); 5853 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 5854 } 5855 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16), 5856 ð_m->dst, sizeof(eth_m->dst)); 5857 /* The value must be in the range of the mask. */ 5858 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16); 5859 for (i = 0; i < sizeof(eth_m->dst); ++i) 5860 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i]; 5861 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16), 5862 ð_m->src, sizeof(eth_m->src)); 5863 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16); 5864 /* The value must be in the range of the mask. */ 5865 for (i = 0; i < sizeof(eth_m->dst); ++i) 5866 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i]; 5867 if (eth_v->type) { 5868 /* When ethertype is present set mask for tagged VLAN. */ 5869 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); 5870 /* Set value for tagged VLAN if ethertype is 802.1Q. */ 5871 if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) || 5872 eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) { 5873 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 5874 1); 5875 /* Return here to avoid setting match on ethertype. */ 5876 return; 5877 } 5878 } 5879 /* 5880 * HW supports match on one Ethertype, the Ethertype following the last 5881 * VLAN tag of the packet (see PRM). 5882 * Set match on ethertype only if ETH header is not followed by VLAN. 5883 * HW is optimized for IPv4/IPv6. In such cases, avoid setting 5884 * ethertype, and use ip_version field instead. 5885 */ 5886 if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) && 5887 eth_m->type == 0xFFFF) { 5888 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); 5889 } else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) && 5890 eth_m->type == 0xFFFF) { 5891 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); 5892 } else { 5893 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 5894 rte_be_to_cpu_16(eth_m->type)); 5895 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 5896 ethertype); 5897 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type; 5898 } 5899 } 5900 5901 /** 5902 * Add VLAN item to matcher and to the value. 5903 * 5904 * @param[in, out] dev_flow 5905 * Flow descriptor. 5906 * @param[in, out] matcher 5907 * Flow matcher. 5908 * @param[in, out] key 5909 * Flow matcher value. 5910 * @param[in] item 5911 * Flow pattern to translate. 5912 * @param[in] inner 5913 * Item is inner pattern. 5914 */ 5915 static void 5916 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, 5917 void *matcher, void *key, 5918 const struct rte_flow_item *item, 5919 int inner, uint32_t group) 5920 { 5921 const struct rte_flow_item_vlan *vlan_m = item->mask; 5922 const struct rte_flow_item_vlan *vlan_v = item->spec; 5923 void *headers_m; 5924 void *headers_v; 5925 uint16_t tci_m; 5926 uint16_t tci_v; 5927 5928 if (inner) { 5929 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 5930 inner_headers); 5931 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 5932 } else { 5933 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 5934 outer_headers); 5935 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 5936 /* 5937 * This is workaround, masks are not supported, 5938 * and pre-validated. 5939 */ 5940 if (vlan_v) 5941 dev_flow->handle->vf_vlan.tag = 5942 rte_be_to_cpu_16(vlan_v->tci) & 0x0fff; 5943 } 5944 /* 5945 * When VLAN item exists in flow, mark packet as tagged, 5946 * even if TCI is not specified. 5947 */ 5948 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); 5949 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); 5950 if (!vlan_v) 5951 return; 5952 if (!vlan_m) 5953 vlan_m = &rte_flow_item_vlan_mask; 5954 tci_m = rte_be_to_cpu_16(vlan_m->tci); 5955 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci); 5956 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m); 5957 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v); 5958 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12); 5959 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12); 5960 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13); 5961 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13); 5962 /* 5963 * HW is optimized for IPv4/IPv6. In such cases, avoid setting 5964 * ethertype, and use ip_version field instead. 5965 */ 5966 if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) && 5967 vlan_m->inner_type == 0xFFFF) { 5968 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); 5969 } else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) && 5970 vlan_m->inner_type == 0xFFFF) { 5971 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); 5972 } else { 5973 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 5974 rte_be_to_cpu_16(vlan_m->inner_type)); 5975 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 5976 rte_be_to_cpu_16(vlan_m->inner_type & 5977 vlan_v->inner_type)); 5978 } 5979 } 5980 5981 /** 5982 * Add IPV4 item to matcher and to the value. 5983 * 5984 * @param[in, out] matcher 5985 * Flow matcher. 5986 * @param[in, out] key 5987 * Flow matcher value. 5988 * @param[in] item 5989 * Flow pattern to translate. 5990 * @param[in] item_flags 5991 * Bit-fields that holds the items detected until now. 5992 * @param[in] inner 5993 * Item is inner pattern. 5994 * @param[in] group 5995 * The group to insert the rule. 5996 */ 5997 static void 5998 flow_dv_translate_item_ipv4(void *matcher, void *key, 5999 const struct rte_flow_item *item, 6000 const uint64_t item_flags, 6001 int inner, uint32_t group) 6002 { 6003 const struct rte_flow_item_ipv4 *ipv4_m = item->mask; 6004 const struct rte_flow_item_ipv4 *ipv4_v = item->spec; 6005 const struct rte_flow_item_ipv4 nic_mask = { 6006 .hdr = { 6007 .src_addr = RTE_BE32(0xffffffff), 6008 .dst_addr = RTE_BE32(0xffffffff), 6009 .type_of_service = 0xff, 6010 .next_proto_id = 0xff, 6011 .time_to_live = 0xff, 6012 }, 6013 }; 6014 void *headers_m; 6015 void *headers_v; 6016 char *l24_m; 6017 char *l24_v; 6018 uint8_t tos; 6019 6020 if (inner) { 6021 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6022 inner_headers); 6023 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 6024 } else { 6025 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6026 outer_headers); 6027 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6028 } 6029 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); 6030 /* 6031 * On outer header (which must contains L2), or inner header with L2, 6032 * set cvlan_tag mask bit to mark this packet as untagged. 6033 * This should be done even if item->spec is empty. 6034 */ 6035 if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) 6036 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); 6037 if (!ipv4_v) 6038 return; 6039 if (!ipv4_m) 6040 ipv4_m = &nic_mask; 6041 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, 6042 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 6043 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 6044 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 6045 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr; 6046 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr; 6047 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, 6048 src_ipv4_src_ipv6.ipv4_layout.ipv4); 6049 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 6050 src_ipv4_src_ipv6.ipv4_layout.ipv4); 6051 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr; 6052 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr; 6053 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service; 6054 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, 6055 ipv4_m->hdr.type_of_service); 6056 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos); 6057 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, 6058 ipv4_m->hdr.type_of_service >> 2); 6059 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2); 6060 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 6061 ipv4_m->hdr.next_proto_id); 6062 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 6063 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id); 6064 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit, 6065 ipv4_m->hdr.time_to_live); 6066 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit, 6067 ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live); 6068 } 6069 6070 /** 6071 * Add IPV6 item to matcher and to the value. 6072 * 6073 * @param[in, out] matcher 6074 * Flow matcher. 6075 * @param[in, out] key 6076 * Flow matcher value. 6077 * @param[in] item 6078 * Flow pattern to translate. 6079 * @param[in] item_flags 6080 * Bit-fields that holds the items detected until now. 6081 * @param[in] inner 6082 * Item is inner pattern. 6083 * @param[in] group 6084 * The group to insert the rule. 6085 */ 6086 static void 6087 flow_dv_translate_item_ipv6(void *matcher, void *key, 6088 const struct rte_flow_item *item, 6089 const uint64_t item_flags, 6090 int inner, uint32_t group) 6091 { 6092 const struct rte_flow_item_ipv6 *ipv6_m = item->mask; 6093 const struct rte_flow_item_ipv6 *ipv6_v = item->spec; 6094 const struct rte_flow_item_ipv6 nic_mask = { 6095 .hdr = { 6096 .src_addr = 6097 "\xff\xff\xff\xff\xff\xff\xff\xff" 6098 "\xff\xff\xff\xff\xff\xff\xff\xff", 6099 .dst_addr = 6100 "\xff\xff\xff\xff\xff\xff\xff\xff" 6101 "\xff\xff\xff\xff\xff\xff\xff\xff", 6102 .vtc_flow = RTE_BE32(0xffffffff), 6103 .proto = 0xff, 6104 .hop_limits = 0xff, 6105 }, 6106 }; 6107 void *headers_m; 6108 void *headers_v; 6109 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 6110 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 6111 char *l24_m; 6112 char *l24_v; 6113 uint32_t vtc_m; 6114 uint32_t vtc_v; 6115 int i; 6116 int size; 6117 6118 if (inner) { 6119 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6120 inner_headers); 6121 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 6122 } else { 6123 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6124 outer_headers); 6125 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6126 } 6127 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); 6128 /* 6129 * On outer header (which must contains L2), or inner header with L2, 6130 * set cvlan_tag mask bit to mark this packet as untagged. 6131 * This should be done even if item->spec is empty. 6132 */ 6133 if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) 6134 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); 6135 if (!ipv6_v) 6136 return; 6137 if (!ipv6_m) 6138 ipv6_m = &nic_mask; 6139 size = sizeof(ipv6_m->hdr.dst_addr); 6140 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, 6141 dst_ipv4_dst_ipv6.ipv6_layout.ipv6); 6142 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 6143 dst_ipv4_dst_ipv6.ipv6_layout.ipv6); 6144 memcpy(l24_m, ipv6_m->hdr.dst_addr, size); 6145 for (i = 0; i < size; ++i) 6146 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i]; 6147 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, 6148 src_ipv4_src_ipv6.ipv6_layout.ipv6); 6149 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 6150 src_ipv4_src_ipv6.ipv6_layout.ipv6); 6151 memcpy(l24_m, ipv6_m->hdr.src_addr, size); 6152 for (i = 0; i < size; ++i) 6153 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i]; 6154 /* TOS. */ 6155 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow); 6156 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow); 6157 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20); 6158 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20); 6159 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22); 6160 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22); 6161 /* Label. */ 6162 if (inner) { 6163 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label, 6164 vtc_m); 6165 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label, 6166 vtc_v); 6167 } else { 6168 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label, 6169 vtc_m); 6170 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label, 6171 vtc_v); 6172 } 6173 /* Protocol. */ 6174 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 6175 ipv6_m->hdr.proto); 6176 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 6177 ipv6_v->hdr.proto & ipv6_m->hdr.proto); 6178 /* Hop limit. */ 6179 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit, 6180 ipv6_m->hdr.hop_limits); 6181 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit, 6182 ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits); 6183 } 6184 6185 /** 6186 * Add TCP item to matcher and to the value. 6187 * 6188 * @param[in, out] matcher 6189 * Flow matcher. 6190 * @param[in, out] key 6191 * Flow matcher value. 6192 * @param[in] item 6193 * Flow pattern to translate. 6194 * @param[in] inner 6195 * Item is inner pattern. 6196 */ 6197 static void 6198 flow_dv_translate_item_tcp(void *matcher, void *key, 6199 const struct rte_flow_item *item, 6200 int inner) 6201 { 6202 const struct rte_flow_item_tcp *tcp_m = item->mask; 6203 const struct rte_flow_item_tcp *tcp_v = item->spec; 6204 void *headers_m; 6205 void *headers_v; 6206 6207 if (inner) { 6208 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6209 inner_headers); 6210 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 6211 } else { 6212 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6213 outer_headers); 6214 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6215 } 6216 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); 6217 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP); 6218 if (!tcp_v) 6219 return; 6220 if (!tcp_m) 6221 tcp_m = &rte_flow_item_tcp_mask; 6222 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport, 6223 rte_be_to_cpu_16(tcp_m->hdr.src_port)); 6224 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport, 6225 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port)); 6226 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport, 6227 rte_be_to_cpu_16(tcp_m->hdr.dst_port)); 6228 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport, 6229 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port)); 6230 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags, 6231 tcp_m->hdr.tcp_flags); 6232 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags, 6233 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags)); 6234 } 6235 6236 /** 6237 * Add UDP item to matcher and to the value. 6238 * 6239 * @param[in, out] matcher 6240 * Flow matcher. 6241 * @param[in, out] key 6242 * Flow matcher value. 6243 * @param[in] item 6244 * Flow pattern to translate. 6245 * @param[in] inner 6246 * Item is inner pattern. 6247 */ 6248 static void 6249 flow_dv_translate_item_udp(void *matcher, void *key, 6250 const struct rte_flow_item *item, 6251 int inner) 6252 { 6253 const struct rte_flow_item_udp *udp_m = item->mask; 6254 const struct rte_flow_item_udp *udp_v = item->spec; 6255 void *headers_m; 6256 void *headers_v; 6257 6258 if (inner) { 6259 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6260 inner_headers); 6261 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 6262 } else { 6263 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6264 outer_headers); 6265 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6266 } 6267 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); 6268 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP); 6269 if (!udp_v) 6270 return; 6271 if (!udp_m) 6272 udp_m = &rte_flow_item_udp_mask; 6273 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport, 6274 rte_be_to_cpu_16(udp_m->hdr.src_port)); 6275 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, 6276 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port)); 6277 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 6278 rte_be_to_cpu_16(udp_m->hdr.dst_port)); 6279 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 6280 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port)); 6281 } 6282 6283 /** 6284 * Add GRE optional Key item to matcher and to the value. 6285 * 6286 * @param[in, out] matcher 6287 * Flow matcher. 6288 * @param[in, out] key 6289 * Flow matcher value. 6290 * @param[in] item 6291 * Flow pattern to translate. 6292 * @param[in] inner 6293 * Item is inner pattern. 6294 */ 6295 static void 6296 flow_dv_translate_item_gre_key(void *matcher, void *key, 6297 const struct rte_flow_item *item) 6298 { 6299 const rte_be32_t *key_m = item->mask; 6300 const rte_be32_t *key_v = item->spec; 6301 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 6302 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 6303 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); 6304 6305 /* GRE K bit must be on and should already be validated */ 6306 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1); 6307 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1); 6308 if (!key_v) 6309 return; 6310 if (!key_m) 6311 key_m = &gre_key_default_mask; 6312 MLX5_SET(fte_match_set_misc, misc_m, gre_key_h, 6313 rte_be_to_cpu_32(*key_m) >> 8); 6314 MLX5_SET(fte_match_set_misc, misc_v, gre_key_h, 6315 rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8); 6316 MLX5_SET(fte_match_set_misc, misc_m, gre_key_l, 6317 rte_be_to_cpu_32(*key_m) & 0xFF); 6318 MLX5_SET(fte_match_set_misc, misc_v, gre_key_l, 6319 rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF); 6320 } 6321 6322 /** 6323 * Add GRE item to matcher and to the value. 6324 * 6325 * @param[in, out] matcher 6326 * Flow matcher. 6327 * @param[in, out] key 6328 * Flow matcher value. 6329 * @param[in] item 6330 * Flow pattern to translate. 6331 * @param[in] inner 6332 * Item is inner pattern. 6333 */ 6334 static void 6335 flow_dv_translate_item_gre(void *matcher, void *key, 6336 const struct rte_flow_item *item, 6337 int inner) 6338 { 6339 const struct rte_flow_item_gre *gre_m = item->mask; 6340 const struct rte_flow_item_gre *gre_v = item->spec; 6341 void *headers_m; 6342 void *headers_v; 6343 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 6344 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 6345 struct { 6346 union { 6347 __extension__ 6348 struct { 6349 uint16_t version:3; 6350 uint16_t rsvd0:9; 6351 uint16_t s_present:1; 6352 uint16_t k_present:1; 6353 uint16_t rsvd_bit1:1; 6354 uint16_t c_present:1; 6355 }; 6356 uint16_t value; 6357 }; 6358 } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v; 6359 6360 if (inner) { 6361 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6362 inner_headers); 6363 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 6364 } else { 6365 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6366 outer_headers); 6367 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6368 } 6369 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); 6370 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE); 6371 if (!gre_v) 6372 return; 6373 if (!gre_m) 6374 gre_m = &rte_flow_item_gre_mask; 6375 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 6376 rte_be_to_cpu_16(gre_m->protocol)); 6377 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, 6378 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol)); 6379 gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver); 6380 gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver); 6381 MLX5_SET(fte_match_set_misc, misc_m, gre_c_present, 6382 gre_crks_rsvd0_ver_m.c_present); 6383 MLX5_SET(fte_match_set_misc, misc_v, gre_c_present, 6384 gre_crks_rsvd0_ver_v.c_present & 6385 gre_crks_rsvd0_ver_m.c_present); 6386 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 6387 gre_crks_rsvd0_ver_m.k_present); 6388 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 6389 gre_crks_rsvd0_ver_v.k_present & 6390 gre_crks_rsvd0_ver_m.k_present); 6391 MLX5_SET(fte_match_set_misc, misc_m, gre_s_present, 6392 gre_crks_rsvd0_ver_m.s_present); 6393 MLX5_SET(fte_match_set_misc, misc_v, gre_s_present, 6394 gre_crks_rsvd0_ver_v.s_present & 6395 gre_crks_rsvd0_ver_m.s_present); 6396 } 6397 6398 /** 6399 * Add NVGRE item to matcher and to the value. 6400 * 6401 * @param[in, out] matcher 6402 * Flow matcher. 6403 * @param[in, out] key 6404 * Flow matcher value. 6405 * @param[in] item 6406 * Flow pattern to translate. 6407 * @param[in] inner 6408 * Item is inner pattern. 6409 */ 6410 static void 6411 flow_dv_translate_item_nvgre(void *matcher, void *key, 6412 const struct rte_flow_item *item, 6413 int inner) 6414 { 6415 const struct rte_flow_item_nvgre *nvgre_m = item->mask; 6416 const struct rte_flow_item_nvgre *nvgre_v = item->spec; 6417 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 6418 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 6419 const char *tni_flow_id_m = (const char *)nvgre_m->tni; 6420 const char *tni_flow_id_v = (const char *)nvgre_v->tni; 6421 char *gre_key_m; 6422 char *gre_key_v; 6423 int size; 6424 int i; 6425 6426 /* For NVGRE, GRE header fields must be set with defined values. */ 6427 const struct rte_flow_item_gre gre_spec = { 6428 .c_rsvd0_ver = RTE_BE16(0x2000), 6429 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB) 6430 }; 6431 const struct rte_flow_item_gre gre_mask = { 6432 .c_rsvd0_ver = RTE_BE16(0xB000), 6433 .protocol = RTE_BE16(UINT16_MAX), 6434 }; 6435 const struct rte_flow_item gre_item = { 6436 .spec = &gre_spec, 6437 .mask = &gre_mask, 6438 .last = NULL, 6439 }; 6440 flow_dv_translate_item_gre(matcher, key, &gre_item, inner); 6441 if (!nvgre_v) 6442 return; 6443 if (!nvgre_m) 6444 nvgre_m = &rte_flow_item_nvgre_mask; 6445 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id); 6446 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h); 6447 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h); 6448 memcpy(gre_key_m, tni_flow_id_m, size); 6449 for (i = 0; i < size; ++i) 6450 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i]; 6451 } 6452 6453 /** 6454 * Add VXLAN item to matcher and to the value. 6455 * 6456 * @param[in, out] matcher 6457 * Flow matcher. 6458 * @param[in, out] key 6459 * Flow matcher value. 6460 * @param[in] item 6461 * Flow pattern to translate. 6462 * @param[in] inner 6463 * Item is inner pattern. 6464 */ 6465 static void 6466 flow_dv_translate_item_vxlan(void *matcher, void *key, 6467 const struct rte_flow_item *item, 6468 int inner) 6469 { 6470 const struct rte_flow_item_vxlan *vxlan_m = item->mask; 6471 const struct rte_flow_item_vxlan *vxlan_v = item->spec; 6472 void *headers_m; 6473 void *headers_v; 6474 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 6475 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 6476 char *vni_m; 6477 char *vni_v; 6478 uint16_t dport; 6479 int size; 6480 int i; 6481 6482 if (inner) { 6483 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6484 inner_headers); 6485 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 6486 } else { 6487 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6488 outer_headers); 6489 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6490 } 6491 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ? 6492 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE; 6493 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { 6494 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); 6495 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); 6496 } 6497 if (!vxlan_v) 6498 return; 6499 if (!vxlan_m) 6500 vxlan_m = &rte_flow_item_vxlan_mask; 6501 size = sizeof(vxlan_m->vni); 6502 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni); 6503 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni); 6504 memcpy(vni_m, vxlan_m->vni, size); 6505 for (i = 0; i < size; ++i) 6506 vni_v[i] = vni_m[i] & vxlan_v->vni[i]; 6507 } 6508 6509 /** 6510 * Add VXLAN-GPE item to matcher and to the value. 6511 * 6512 * @param[in, out] matcher 6513 * Flow matcher. 6514 * @param[in, out] key 6515 * Flow matcher value. 6516 * @param[in] item 6517 * Flow pattern to translate. 6518 * @param[in] inner 6519 * Item is inner pattern. 6520 */ 6521 6522 static void 6523 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key, 6524 const struct rte_flow_item *item, int inner) 6525 { 6526 const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask; 6527 const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec; 6528 void *headers_m; 6529 void *headers_v; 6530 void *misc_m = 6531 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3); 6532 void *misc_v = 6533 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); 6534 char *vni_m; 6535 char *vni_v; 6536 uint16_t dport; 6537 int size; 6538 int i; 6539 uint8_t flags_m = 0xff; 6540 uint8_t flags_v = 0xc; 6541 6542 if (inner) { 6543 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6544 inner_headers); 6545 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 6546 } else { 6547 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6548 outer_headers); 6549 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6550 } 6551 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ? 6552 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE; 6553 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { 6554 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); 6555 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); 6556 } 6557 if (!vxlan_v) 6558 return; 6559 if (!vxlan_m) 6560 vxlan_m = &rte_flow_item_vxlan_gpe_mask; 6561 size = sizeof(vxlan_m->vni); 6562 vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni); 6563 vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni); 6564 memcpy(vni_m, vxlan_m->vni, size); 6565 for (i = 0; i < size; ++i) 6566 vni_v[i] = vni_m[i] & vxlan_v->vni[i]; 6567 if (vxlan_m->flags) { 6568 flags_m = vxlan_m->flags; 6569 flags_v = vxlan_v->flags; 6570 } 6571 MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m); 6572 MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v); 6573 MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol, 6574 vxlan_m->protocol); 6575 MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol, 6576 vxlan_v->protocol); 6577 } 6578 6579 /** 6580 * Add Geneve item to matcher and to the value. 6581 * 6582 * @param[in, out] matcher 6583 * Flow matcher. 6584 * @param[in, out] key 6585 * Flow matcher value. 6586 * @param[in] item 6587 * Flow pattern to translate. 6588 * @param[in] inner 6589 * Item is inner pattern. 6590 */ 6591 6592 static void 6593 flow_dv_translate_item_geneve(void *matcher, void *key, 6594 const struct rte_flow_item *item, int inner) 6595 { 6596 const struct rte_flow_item_geneve *geneve_m = item->mask; 6597 const struct rte_flow_item_geneve *geneve_v = item->spec; 6598 void *headers_m; 6599 void *headers_v; 6600 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 6601 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 6602 uint16_t dport; 6603 uint16_t gbhdr_m; 6604 uint16_t gbhdr_v; 6605 char *vni_m; 6606 char *vni_v; 6607 size_t size, i; 6608 6609 if (inner) { 6610 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6611 inner_headers); 6612 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 6613 } else { 6614 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 6615 outer_headers); 6616 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6617 } 6618 dport = MLX5_UDP_PORT_GENEVE; 6619 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { 6620 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); 6621 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); 6622 } 6623 if (!geneve_v) 6624 return; 6625 if (!geneve_m) 6626 geneve_m = &rte_flow_item_geneve_mask; 6627 size = sizeof(geneve_m->vni); 6628 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni); 6629 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni); 6630 memcpy(vni_m, geneve_m->vni, size); 6631 for (i = 0; i < size; ++i) 6632 vni_v[i] = vni_m[i] & geneve_v->vni[i]; 6633 MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, 6634 rte_be_to_cpu_16(geneve_m->protocol)); 6635 MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, 6636 rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol)); 6637 gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0); 6638 gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0); 6639 MLX5_SET(fte_match_set_misc, misc_m, geneve_oam, 6640 MLX5_GENEVE_OAMF_VAL(gbhdr_m)); 6641 MLX5_SET(fte_match_set_misc, misc_v, geneve_oam, 6642 MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m)); 6643 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len, 6644 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m)); 6645 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, 6646 MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) & 6647 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m)); 6648 } 6649 6650 /** 6651 * Add MPLS item to matcher and to the value. 6652 * 6653 * @param[in, out] matcher 6654 * Flow matcher. 6655 * @param[in, out] key 6656 * Flow matcher value. 6657 * @param[in] item 6658 * Flow pattern to translate. 6659 * @param[in] prev_layer 6660 * The protocol layer indicated in previous item. 6661 * @param[in] inner 6662 * Item is inner pattern. 6663 */ 6664 static void 6665 flow_dv_translate_item_mpls(void *matcher, void *key, 6666 const struct rte_flow_item *item, 6667 uint64_t prev_layer, 6668 int inner) 6669 { 6670 const uint32_t *in_mpls_m = item->mask; 6671 const uint32_t *in_mpls_v = item->spec; 6672 uint32_t *out_mpls_m = 0; 6673 uint32_t *out_mpls_v = 0; 6674 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 6675 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 6676 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher, 6677 misc_parameters_2); 6678 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2); 6679 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); 6680 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 6681 6682 switch (prev_layer) { 6683 case MLX5_FLOW_LAYER_OUTER_L4_UDP: 6684 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff); 6685 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 6686 MLX5_UDP_PORT_MPLS); 6687 break; 6688 case MLX5_FLOW_LAYER_GRE: 6689 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff); 6690 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, 6691 RTE_ETHER_TYPE_MPLS); 6692 break; 6693 default: 6694 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); 6695 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 6696 IPPROTO_MPLS); 6697 break; 6698 } 6699 if (!in_mpls_v) 6700 return; 6701 if (!in_mpls_m) 6702 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask; 6703 switch (prev_layer) { 6704 case MLX5_FLOW_LAYER_OUTER_L4_UDP: 6705 out_mpls_m = 6706 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m, 6707 outer_first_mpls_over_udp); 6708 out_mpls_v = 6709 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v, 6710 outer_first_mpls_over_udp); 6711 break; 6712 case MLX5_FLOW_LAYER_GRE: 6713 out_mpls_m = 6714 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m, 6715 outer_first_mpls_over_gre); 6716 out_mpls_v = 6717 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v, 6718 outer_first_mpls_over_gre); 6719 break; 6720 default: 6721 /* Inner MPLS not over GRE is not supported. */ 6722 if (!inner) { 6723 out_mpls_m = 6724 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, 6725 misc2_m, 6726 outer_first_mpls); 6727 out_mpls_v = 6728 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, 6729 misc2_v, 6730 outer_first_mpls); 6731 } 6732 break; 6733 } 6734 if (out_mpls_m && out_mpls_v) { 6735 *out_mpls_m = *in_mpls_m; 6736 *out_mpls_v = *in_mpls_v & *in_mpls_m; 6737 } 6738 } 6739 6740 /** 6741 * Add metadata register item to matcher 6742 * 6743 * @param[in, out] matcher 6744 * Flow matcher. 6745 * @param[in, out] key 6746 * Flow matcher value. 6747 * @param[in] reg_type 6748 * Type of device metadata register 6749 * @param[in] value 6750 * Register value 6751 * @param[in] mask 6752 * Register mask 6753 */ 6754 static void 6755 flow_dv_match_meta_reg(void *matcher, void *key, 6756 enum modify_reg reg_type, 6757 uint32_t data, uint32_t mask) 6758 { 6759 void *misc2_m = 6760 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2); 6761 void *misc2_v = 6762 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2); 6763 uint32_t temp; 6764 6765 data &= mask; 6766 switch (reg_type) { 6767 case REG_A: 6768 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask); 6769 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data); 6770 break; 6771 case REG_B: 6772 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask); 6773 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data); 6774 break; 6775 case REG_C_0: 6776 /* 6777 * The metadata register C0 field might be divided into 6778 * source vport index and META item value, we should set 6779 * this field according to specified mask, not as whole one. 6780 */ 6781 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0); 6782 temp |= mask; 6783 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp); 6784 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0); 6785 temp &= ~mask; 6786 temp |= data; 6787 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp); 6788 break; 6789 case REG_C_1: 6790 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask); 6791 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data); 6792 break; 6793 case REG_C_2: 6794 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask); 6795 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data); 6796 break; 6797 case REG_C_3: 6798 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask); 6799 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data); 6800 break; 6801 case REG_C_4: 6802 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask); 6803 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data); 6804 break; 6805 case REG_C_5: 6806 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask); 6807 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data); 6808 break; 6809 case REG_C_6: 6810 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask); 6811 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data); 6812 break; 6813 case REG_C_7: 6814 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask); 6815 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data); 6816 break; 6817 default: 6818 MLX5_ASSERT(false); 6819 break; 6820 } 6821 } 6822 6823 /** 6824 * Add MARK item to matcher 6825 * 6826 * @param[in] dev 6827 * The device to configure through. 6828 * @param[in, out] matcher 6829 * Flow matcher. 6830 * @param[in, out] key 6831 * Flow matcher value. 6832 * @param[in] item 6833 * Flow pattern to translate. 6834 */ 6835 static void 6836 flow_dv_translate_item_mark(struct rte_eth_dev *dev, 6837 void *matcher, void *key, 6838 const struct rte_flow_item *item) 6839 { 6840 struct mlx5_priv *priv = dev->data->dev_private; 6841 const struct rte_flow_item_mark *mark; 6842 uint32_t value; 6843 uint32_t mask; 6844 6845 mark = item->mask ? (const void *)item->mask : 6846 &rte_flow_item_mark_mask; 6847 mask = mark->id & priv->sh->dv_mark_mask; 6848 mark = (const void *)item->spec; 6849 MLX5_ASSERT(mark); 6850 value = mark->id & priv->sh->dv_mark_mask & mask; 6851 if (mask) { 6852 enum modify_reg reg; 6853 6854 /* Get the metadata register index for the mark. */ 6855 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL); 6856 MLX5_ASSERT(reg > 0); 6857 if (reg == REG_C_0) { 6858 struct mlx5_priv *priv = dev->data->dev_private; 6859 uint32_t msk_c0 = priv->sh->dv_regc0_mask; 6860 uint32_t shl_c0 = rte_bsf32(msk_c0); 6861 6862 mask &= msk_c0; 6863 mask <<= shl_c0; 6864 value <<= shl_c0; 6865 } 6866 flow_dv_match_meta_reg(matcher, key, reg, value, mask); 6867 } 6868 } 6869 6870 /** 6871 * Add META item to matcher 6872 * 6873 * @param[in] dev 6874 * The devich to configure through. 6875 * @param[in, out] matcher 6876 * Flow matcher. 6877 * @param[in, out] key 6878 * Flow matcher value. 6879 * @param[in] attr 6880 * Attributes of flow that includes this item. 6881 * @param[in] item 6882 * Flow pattern to translate. 6883 */ 6884 static void 6885 flow_dv_translate_item_meta(struct rte_eth_dev *dev, 6886 void *matcher, void *key, 6887 const struct rte_flow_attr *attr, 6888 const struct rte_flow_item *item) 6889 { 6890 const struct rte_flow_item_meta *meta_m; 6891 const struct rte_flow_item_meta *meta_v; 6892 6893 meta_m = (const void *)item->mask; 6894 if (!meta_m) 6895 meta_m = &rte_flow_item_meta_mask; 6896 meta_v = (const void *)item->spec; 6897 if (meta_v) { 6898 int reg; 6899 uint32_t value = meta_v->data; 6900 uint32_t mask = meta_m->data; 6901 6902 reg = flow_dv_get_metadata_reg(dev, attr, NULL); 6903 if (reg < 0) 6904 return; 6905 /* 6906 * In datapath code there is no endianness 6907 * coversions for perfromance reasons, all 6908 * pattern conversions are done in rte_flow. 6909 */ 6910 value = rte_cpu_to_be_32(value); 6911 mask = rte_cpu_to_be_32(mask); 6912 if (reg == REG_C_0) { 6913 struct mlx5_priv *priv = dev->data->dev_private; 6914 uint32_t msk_c0 = priv->sh->dv_regc0_mask; 6915 uint32_t shl_c0 = rte_bsf32(msk_c0); 6916 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 6917 uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask); 6918 6919 value >>= shr_c0; 6920 mask >>= shr_c0; 6921 #endif 6922 value <<= shl_c0; 6923 mask <<= shl_c0; 6924 MLX5_ASSERT(msk_c0); 6925 MLX5_ASSERT(!(~msk_c0 & mask)); 6926 } 6927 flow_dv_match_meta_reg(matcher, key, reg, value, mask); 6928 } 6929 } 6930 6931 /** 6932 * Add vport metadata Reg C0 item to matcher 6933 * 6934 * @param[in, out] matcher 6935 * Flow matcher. 6936 * @param[in, out] key 6937 * Flow matcher value. 6938 * @param[in] reg 6939 * Flow pattern to translate. 6940 */ 6941 static void 6942 flow_dv_translate_item_meta_vport(void *matcher, void *key, 6943 uint32_t value, uint32_t mask) 6944 { 6945 flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask); 6946 } 6947 6948 /** 6949 * Add tag item to matcher 6950 * 6951 * @param[in] dev 6952 * The devich to configure through. 6953 * @param[in, out] matcher 6954 * Flow matcher. 6955 * @param[in, out] key 6956 * Flow matcher value. 6957 * @param[in] item 6958 * Flow pattern to translate. 6959 */ 6960 static void 6961 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev, 6962 void *matcher, void *key, 6963 const struct rte_flow_item *item) 6964 { 6965 const struct mlx5_rte_flow_item_tag *tag_v = item->spec; 6966 const struct mlx5_rte_flow_item_tag *tag_m = item->mask; 6967 uint32_t mask, value; 6968 6969 MLX5_ASSERT(tag_v); 6970 value = tag_v->data; 6971 mask = tag_m ? tag_m->data : UINT32_MAX; 6972 if (tag_v->id == REG_C_0) { 6973 struct mlx5_priv *priv = dev->data->dev_private; 6974 uint32_t msk_c0 = priv->sh->dv_regc0_mask; 6975 uint32_t shl_c0 = rte_bsf32(msk_c0); 6976 6977 mask &= msk_c0; 6978 mask <<= shl_c0; 6979 value <<= shl_c0; 6980 } 6981 flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask); 6982 } 6983 6984 /** 6985 * Add TAG item to matcher 6986 * 6987 * @param[in] dev 6988 * The devich to configure through. 6989 * @param[in, out] matcher 6990 * Flow matcher. 6991 * @param[in, out] key 6992 * Flow matcher value. 6993 * @param[in] item 6994 * Flow pattern to translate. 6995 */ 6996 static void 6997 flow_dv_translate_item_tag(struct rte_eth_dev *dev, 6998 void *matcher, void *key, 6999 const struct rte_flow_item *item) 7000 { 7001 const struct rte_flow_item_tag *tag_v = item->spec; 7002 const struct rte_flow_item_tag *tag_m = item->mask; 7003 enum modify_reg reg; 7004 7005 MLX5_ASSERT(tag_v); 7006 tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask; 7007 /* Get the metadata register index for the tag. */ 7008 reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL); 7009 MLX5_ASSERT(reg > 0); 7010 flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data); 7011 } 7012 7013 /** 7014 * Add source vport match to the specified matcher. 7015 * 7016 * @param[in, out] matcher 7017 * Flow matcher. 7018 * @param[in, out] key 7019 * Flow matcher value. 7020 * @param[in] port 7021 * Source vport value to match 7022 * @param[in] mask 7023 * Mask 7024 */ 7025 static void 7026 flow_dv_translate_item_source_vport(void *matcher, void *key, 7027 int16_t port, uint16_t mask) 7028 { 7029 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 7030 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 7031 7032 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask); 7033 MLX5_SET(fte_match_set_misc, misc_v, source_port, port); 7034 } 7035 7036 /** 7037 * Translate port-id item to eswitch match on port-id. 7038 * 7039 * @param[in] dev 7040 * The devich to configure through. 7041 * @param[in, out] matcher 7042 * Flow matcher. 7043 * @param[in, out] key 7044 * Flow matcher value. 7045 * @param[in] item 7046 * Flow pattern to translate. 7047 * 7048 * @return 7049 * 0 on success, a negative errno value otherwise. 7050 */ 7051 static int 7052 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, 7053 void *key, const struct rte_flow_item *item) 7054 { 7055 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL; 7056 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL; 7057 struct mlx5_priv *priv; 7058 uint16_t mask, id; 7059 7060 mask = pid_m ? pid_m->id : 0xffff; 7061 id = pid_v ? pid_v->id : dev->data->port_id; 7062 priv = mlx5_port_to_eswitch_info(id, item == NULL); 7063 if (!priv) 7064 return -rte_errno; 7065 /* Translate to vport field or to metadata, depending on mode. */ 7066 if (priv->vport_meta_mask) 7067 flow_dv_translate_item_meta_vport(matcher, key, 7068 priv->vport_meta_tag, 7069 priv->vport_meta_mask); 7070 else 7071 flow_dv_translate_item_source_vport(matcher, key, 7072 priv->vport_id, mask); 7073 return 0; 7074 } 7075 7076 /** 7077 * Add ICMP6 item to matcher and to the value. 7078 * 7079 * @param[in, out] matcher 7080 * Flow matcher. 7081 * @param[in, out] key 7082 * Flow matcher value. 7083 * @param[in] item 7084 * Flow pattern to translate. 7085 * @param[in] inner 7086 * Item is inner pattern. 7087 */ 7088 static void 7089 flow_dv_translate_item_icmp6(void *matcher, void *key, 7090 const struct rte_flow_item *item, 7091 int inner) 7092 { 7093 const struct rte_flow_item_icmp6 *icmp6_m = item->mask; 7094 const struct rte_flow_item_icmp6 *icmp6_v = item->spec; 7095 void *headers_m; 7096 void *headers_v; 7097 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher, 7098 misc_parameters_3); 7099 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); 7100 if (inner) { 7101 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7102 inner_headers); 7103 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 7104 } else { 7105 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7106 outer_headers); 7107 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 7108 } 7109 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF); 7110 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6); 7111 if (!icmp6_v) 7112 return; 7113 if (!icmp6_m) 7114 icmp6_m = &rte_flow_item_icmp6_mask; 7115 /* 7116 * Force flow only to match the non-fragmented IPv6 ICMPv6 packets. 7117 * If only the protocol is specified, no need to match the frag. 7118 */ 7119 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1); 7120 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); 7121 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type); 7122 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type, 7123 icmp6_v->type & icmp6_m->type); 7124 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code); 7125 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code, 7126 icmp6_v->code & icmp6_m->code); 7127 } 7128 7129 /** 7130 * Add ICMP item to matcher and to the value. 7131 * 7132 * @param[in, out] matcher 7133 * Flow matcher. 7134 * @param[in, out] key 7135 * Flow matcher value. 7136 * @param[in] item 7137 * Flow pattern to translate. 7138 * @param[in] inner 7139 * Item is inner pattern. 7140 */ 7141 static void 7142 flow_dv_translate_item_icmp(void *matcher, void *key, 7143 const struct rte_flow_item *item, 7144 int inner) 7145 { 7146 const struct rte_flow_item_icmp *icmp_m = item->mask; 7147 const struct rte_flow_item_icmp *icmp_v = item->spec; 7148 void *headers_m; 7149 void *headers_v; 7150 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher, 7151 misc_parameters_3); 7152 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); 7153 if (inner) { 7154 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7155 inner_headers); 7156 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 7157 } else { 7158 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7159 outer_headers); 7160 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 7161 } 7162 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF); 7163 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP); 7164 if (!icmp_v) 7165 return; 7166 if (!icmp_m) 7167 icmp_m = &rte_flow_item_icmp_mask; 7168 /* 7169 * Force flow only to match the non-fragmented IPv4 ICMP packets. 7170 * If only the protocol is specified, no need to match the frag. 7171 */ 7172 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1); 7173 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); 7174 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type, 7175 icmp_m->hdr.icmp_type); 7176 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type, 7177 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type); 7178 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code, 7179 icmp_m->hdr.icmp_code); 7180 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code, 7181 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code); 7182 } 7183 7184 /** 7185 * Add GTP item to matcher and to the value. 7186 * 7187 * @param[in, out] matcher 7188 * Flow matcher. 7189 * @param[in, out] key 7190 * Flow matcher value. 7191 * @param[in] item 7192 * Flow pattern to translate. 7193 * @param[in] inner 7194 * Item is inner pattern. 7195 */ 7196 static void 7197 flow_dv_translate_item_gtp(void *matcher, void *key, 7198 const struct rte_flow_item *item, int inner) 7199 { 7200 const struct rte_flow_item_gtp *gtp_m = item->mask; 7201 const struct rte_flow_item_gtp *gtp_v = item->spec; 7202 void *headers_m; 7203 void *headers_v; 7204 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher, 7205 misc_parameters_3); 7206 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); 7207 uint16_t dport = RTE_GTPU_UDP_PORT; 7208 7209 if (inner) { 7210 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7211 inner_headers); 7212 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); 7213 } else { 7214 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, 7215 outer_headers); 7216 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); 7217 } 7218 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { 7219 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); 7220 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); 7221 } 7222 if (!gtp_v) 7223 return; 7224 if (!gtp_m) 7225 gtp_m = &rte_flow_item_gtp_mask; 7226 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, 7227 gtp_m->v_pt_rsv_flags); 7228 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, 7229 gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags); 7230 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type); 7231 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type, 7232 gtp_v->msg_type & gtp_m->msg_type); 7233 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid, 7234 rte_be_to_cpu_32(gtp_m->teid)); 7235 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid, 7236 rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid)); 7237 } 7238 7239 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 }; 7240 7241 #define HEADER_IS_ZERO(match_criteria, headers) \ 7242 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \ 7243 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \ 7244 7245 /** 7246 * Calculate flow matcher enable bitmap. 7247 * 7248 * @param match_criteria 7249 * Pointer to flow matcher criteria. 7250 * 7251 * @return 7252 * Bitmap of enabled fields. 7253 */ 7254 static uint8_t 7255 flow_dv_matcher_enable(uint32_t *match_criteria) 7256 { 7257 uint8_t match_criteria_enable; 7258 7259 match_criteria_enable = 7260 (!HEADER_IS_ZERO(match_criteria, outer_headers)) << 7261 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT; 7262 match_criteria_enable |= 7263 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) << 7264 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT; 7265 match_criteria_enable |= 7266 (!HEADER_IS_ZERO(match_criteria, inner_headers)) << 7267 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT; 7268 match_criteria_enable |= 7269 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) << 7270 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT; 7271 match_criteria_enable |= 7272 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) << 7273 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT; 7274 return match_criteria_enable; 7275 } 7276 7277 7278 /** 7279 * Get a flow table. 7280 * 7281 * @param[in, out] dev 7282 * Pointer to rte_eth_dev structure. 7283 * @param[in] table_id 7284 * Table id to use. 7285 * @param[in] egress 7286 * Direction of the table. 7287 * @param[in] transfer 7288 * E-Switch or NIC flow. 7289 * @param[out] error 7290 * pointer to error structure. 7291 * 7292 * @return 7293 * Returns tables resource based on the index, NULL in case of failed. 7294 */ 7295 static struct mlx5_flow_tbl_resource * 7296 flow_dv_tbl_resource_get(struct rte_eth_dev *dev, 7297 uint32_t table_id, uint8_t egress, 7298 uint8_t transfer, 7299 struct rte_flow_error *error) 7300 { 7301 struct mlx5_priv *priv = dev->data->dev_private; 7302 struct mlx5_dev_ctx_shared *sh = priv->sh; 7303 struct mlx5_flow_tbl_resource *tbl; 7304 union mlx5_flow_tbl_key table_key = { 7305 { 7306 .table_id = table_id, 7307 .reserved = 0, 7308 .domain = !!transfer, 7309 .direction = !!egress, 7310 } 7311 }; 7312 struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls, 7313 table_key.v64); 7314 struct mlx5_flow_tbl_data_entry *tbl_data; 7315 uint32_t idx = 0; 7316 int ret; 7317 void *domain; 7318 7319 if (pos) { 7320 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, 7321 entry); 7322 tbl = &tbl_data->tbl; 7323 rte_atomic32_inc(&tbl->refcnt); 7324 return tbl; 7325 } 7326 tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx); 7327 if (!tbl_data) { 7328 rte_flow_error_set(error, ENOMEM, 7329 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 7330 NULL, 7331 "cannot allocate flow table data entry"); 7332 return NULL; 7333 } 7334 tbl_data->idx = idx; 7335 tbl = &tbl_data->tbl; 7336 pos = &tbl_data->entry; 7337 if (transfer) 7338 domain = sh->fdb_domain; 7339 else if (egress) 7340 domain = sh->tx_domain; 7341 else 7342 domain = sh->rx_domain; 7343 ret = mlx5_flow_os_create_flow_tbl(domain, table_id, &tbl->obj); 7344 if (ret) { 7345 rte_flow_error_set(error, ENOMEM, 7346 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 7347 NULL, "cannot create flow table object"); 7348 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); 7349 return NULL; 7350 } 7351 /* 7352 * No multi-threads now, but still better to initialize the reference 7353 * count before insert it into the hash list. 7354 */ 7355 rte_atomic32_init(&tbl->refcnt); 7356 /* Jump action reference count is initialized here. */ 7357 rte_atomic32_init(&tbl_data->jump.refcnt); 7358 pos->key = table_key.v64; 7359 ret = mlx5_hlist_insert(sh->flow_tbls, pos); 7360 if (ret < 0) { 7361 rte_flow_error_set(error, -ret, 7362 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 7363 "cannot insert flow table data entry"); 7364 mlx5_flow_os_destroy_flow_tbl(tbl->obj); 7365 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); 7366 } 7367 rte_atomic32_inc(&tbl->refcnt); 7368 return tbl; 7369 } 7370 7371 /** 7372 * Release a flow table. 7373 * 7374 * @param[in] dev 7375 * Pointer to rte_eth_dev structure. 7376 * @param[in] tbl 7377 * Table resource to be released. 7378 * 7379 * @return 7380 * Returns 0 if table was released, else return 1; 7381 */ 7382 static int 7383 flow_dv_tbl_resource_release(struct rte_eth_dev *dev, 7384 struct mlx5_flow_tbl_resource *tbl) 7385 { 7386 struct mlx5_priv *priv = dev->data->dev_private; 7387 struct mlx5_dev_ctx_shared *sh = priv->sh; 7388 struct mlx5_flow_tbl_data_entry *tbl_data = 7389 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); 7390 7391 if (!tbl) 7392 return 0; 7393 if (rte_atomic32_dec_and_test(&tbl->refcnt)) { 7394 struct mlx5_hlist_entry *pos = &tbl_data->entry; 7395 7396 mlx5_flow_os_destroy_flow_tbl(tbl->obj); 7397 tbl->obj = NULL; 7398 /* remove the entry from the hash list and free memory. */ 7399 mlx5_hlist_remove(sh->flow_tbls, pos); 7400 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP], 7401 tbl_data->idx); 7402 return 0; 7403 } 7404 return 1; 7405 } 7406 7407 /** 7408 * Register the flow matcher. 7409 * 7410 * @param[in, out] dev 7411 * Pointer to rte_eth_dev structure. 7412 * @param[in, out] matcher 7413 * Pointer to flow matcher. 7414 * @param[in, out] key 7415 * Pointer to flow table key. 7416 * @parm[in, out] dev_flow 7417 * Pointer to the dev_flow. 7418 * @param[out] error 7419 * pointer to error structure. 7420 * 7421 * @return 7422 * 0 on success otherwise -errno and errno is set. 7423 */ 7424 static int 7425 flow_dv_matcher_register(struct rte_eth_dev *dev, 7426 struct mlx5_flow_dv_matcher *matcher, 7427 union mlx5_flow_tbl_key *key, 7428 struct mlx5_flow *dev_flow, 7429 struct rte_flow_error *error) 7430 { 7431 struct mlx5_priv *priv = dev->data->dev_private; 7432 struct mlx5_dev_ctx_shared *sh = priv->sh; 7433 struct mlx5_flow_dv_matcher *cache_matcher; 7434 struct mlx5dv_flow_matcher_attr dv_attr = { 7435 .type = IBV_FLOW_ATTR_NORMAL, 7436 .match_mask = (void *)&matcher->mask, 7437 }; 7438 struct mlx5_flow_tbl_resource *tbl; 7439 struct mlx5_flow_tbl_data_entry *tbl_data; 7440 int ret; 7441 7442 tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction, 7443 key->domain, error); 7444 if (!tbl) 7445 return -rte_errno; /* No need to refill the error info */ 7446 tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); 7447 /* Lookup from cache. */ 7448 LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) { 7449 if (matcher->crc == cache_matcher->crc && 7450 matcher->priority == cache_matcher->priority && 7451 !memcmp((const void *)matcher->mask.buf, 7452 (const void *)cache_matcher->mask.buf, 7453 cache_matcher->mask.size)) { 7454 DRV_LOG(DEBUG, 7455 "%s group %u priority %hd use %s " 7456 "matcher %p: refcnt %d++", 7457 key->domain ? "FDB" : "NIC", key->table_id, 7458 cache_matcher->priority, 7459 key->direction ? "tx" : "rx", 7460 (void *)cache_matcher, 7461 rte_atomic32_read(&cache_matcher->refcnt)); 7462 rte_atomic32_inc(&cache_matcher->refcnt); 7463 dev_flow->handle->dvh.matcher = cache_matcher; 7464 /* old matcher should not make the table ref++. */ 7465 flow_dv_tbl_resource_release(dev, tbl); 7466 return 0; 7467 } 7468 } 7469 /* Register new matcher. */ 7470 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0); 7471 if (!cache_matcher) { 7472 flow_dv_tbl_resource_release(dev, tbl); 7473 return rte_flow_error_set(error, ENOMEM, 7474 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 7475 "cannot allocate matcher memory"); 7476 } 7477 *cache_matcher = *matcher; 7478 dv_attr.match_criteria_enable = 7479 flow_dv_matcher_enable(cache_matcher->mask.buf); 7480 dv_attr.priority = matcher->priority; 7481 if (key->direction) 7482 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS; 7483 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj, 7484 &cache_matcher->matcher_object); 7485 if (ret) { 7486 rte_free(cache_matcher); 7487 #ifdef HAVE_MLX5DV_DR 7488 flow_dv_tbl_resource_release(dev, tbl); 7489 #endif 7490 return rte_flow_error_set(error, ENOMEM, 7491 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 7492 NULL, "cannot create matcher"); 7493 } 7494 /* Save the table information */ 7495 cache_matcher->tbl = tbl; 7496 rte_atomic32_init(&cache_matcher->refcnt); 7497 /* only matcher ref++, table ref++ already done above in get API. */ 7498 rte_atomic32_inc(&cache_matcher->refcnt); 7499 LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next); 7500 dev_flow->handle->dvh.matcher = cache_matcher; 7501 DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d", 7502 key->domain ? "FDB" : "NIC", key->table_id, 7503 cache_matcher->priority, 7504 key->direction ? "tx" : "rx", (void *)cache_matcher, 7505 rte_atomic32_read(&cache_matcher->refcnt)); 7506 return 0; 7507 } 7508 7509 /** 7510 * Find existing tag resource or create and register a new one. 7511 * 7512 * @param dev[in, out] 7513 * Pointer to rte_eth_dev structure. 7514 * @param[in, out] tag_be24 7515 * Tag value in big endian then R-shift 8. 7516 * @parm[in, out] dev_flow 7517 * Pointer to the dev_flow. 7518 * @param[out] error 7519 * pointer to error structure. 7520 * 7521 * @return 7522 * 0 on success otherwise -errno and errno is set. 7523 */ 7524 static int 7525 flow_dv_tag_resource_register 7526 (struct rte_eth_dev *dev, 7527 uint32_t tag_be24, 7528 struct mlx5_flow *dev_flow, 7529 struct rte_flow_error *error) 7530 { 7531 struct mlx5_priv *priv = dev->data->dev_private; 7532 struct mlx5_dev_ctx_shared *sh = priv->sh; 7533 struct mlx5_flow_dv_tag_resource *cache_resource; 7534 struct mlx5_hlist_entry *entry; 7535 int ret; 7536 7537 /* Lookup a matching resource from cache. */ 7538 entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24); 7539 if (entry) { 7540 cache_resource = container_of 7541 (entry, struct mlx5_flow_dv_tag_resource, entry); 7542 rte_atomic32_inc(&cache_resource->refcnt); 7543 dev_flow->handle->dvh.rix_tag = cache_resource->idx; 7544 dev_flow->dv.tag_resource = cache_resource; 7545 DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++", 7546 (void *)cache_resource, 7547 rte_atomic32_read(&cache_resource->refcnt)); 7548 return 0; 7549 } 7550 /* Register new resource. */ 7551 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], 7552 &dev_flow->handle->dvh.rix_tag); 7553 if (!cache_resource) 7554 return rte_flow_error_set(error, ENOMEM, 7555 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 7556 "cannot allocate resource memory"); 7557 cache_resource->entry.key = (uint64_t)tag_be24; 7558 ret = mlx5_flow_os_create_flow_action_tag(tag_be24, 7559 &cache_resource->action); 7560 if (ret) { 7561 rte_free(cache_resource); 7562 return rte_flow_error_set(error, ENOMEM, 7563 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 7564 NULL, "cannot create action"); 7565 } 7566 rte_atomic32_init(&cache_resource->refcnt); 7567 rte_atomic32_inc(&cache_resource->refcnt); 7568 if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) { 7569 mlx5_flow_os_destroy_flow_action(cache_resource->action); 7570 rte_free(cache_resource); 7571 return rte_flow_error_set(error, EEXIST, 7572 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 7573 NULL, "cannot insert tag"); 7574 } 7575 dev_flow->dv.tag_resource = cache_resource; 7576 DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++", 7577 (void *)cache_resource, 7578 rte_atomic32_read(&cache_resource->refcnt)); 7579 return 0; 7580 } 7581 7582 /** 7583 * Release the tag. 7584 * 7585 * @param dev 7586 * Pointer to Ethernet device. 7587 * @param tag_idx 7588 * Tag index. 7589 * 7590 * @return 7591 * 1 while a reference on it exists, 0 when freed. 7592 */ 7593 static int 7594 flow_dv_tag_release(struct rte_eth_dev *dev, 7595 uint32_t tag_idx) 7596 { 7597 struct mlx5_priv *priv = dev->data->dev_private; 7598 struct mlx5_dev_ctx_shared *sh = priv->sh; 7599 struct mlx5_flow_dv_tag_resource *tag; 7600 7601 tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx); 7602 if (!tag) 7603 return 0; 7604 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--", 7605 dev->data->port_id, (void *)tag, 7606 rte_atomic32_read(&tag->refcnt)); 7607 if (rte_atomic32_dec_and_test(&tag->refcnt)) { 7608 claim_zero(mlx5_flow_os_destroy_flow_action(tag->action)); 7609 mlx5_hlist_remove(sh->tag_table, &tag->entry); 7610 DRV_LOG(DEBUG, "port %u tag %p: removed", 7611 dev->data->port_id, (void *)tag); 7612 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx); 7613 return 0; 7614 } 7615 return 1; 7616 } 7617 7618 /** 7619 * Translate port ID action to vport. 7620 * 7621 * @param[in] dev 7622 * Pointer to rte_eth_dev structure. 7623 * @param[in] action 7624 * Pointer to the port ID action. 7625 * @param[out] dst_port_id 7626 * The target port ID. 7627 * @param[out] error 7628 * Pointer to the error structure. 7629 * 7630 * @return 7631 * 0 on success, a negative errno value otherwise and rte_errno is set. 7632 */ 7633 static int 7634 flow_dv_translate_action_port_id(struct rte_eth_dev *dev, 7635 const struct rte_flow_action *action, 7636 uint32_t *dst_port_id, 7637 struct rte_flow_error *error) 7638 { 7639 uint32_t port; 7640 struct mlx5_priv *priv; 7641 const struct rte_flow_action_port_id *conf = 7642 (const struct rte_flow_action_port_id *)action->conf; 7643 7644 port = conf->original ? dev->data->port_id : conf->id; 7645 priv = mlx5_port_to_eswitch_info(port, false); 7646 if (!priv) 7647 return rte_flow_error_set(error, -rte_errno, 7648 RTE_FLOW_ERROR_TYPE_ACTION, 7649 NULL, 7650 "No eswitch info was found for port"); 7651 #ifdef HAVE_MLX5DV_DR_DEVX_PORT 7652 /* 7653 * This parameter is transferred to 7654 * mlx5dv_dr_action_create_dest_ib_port(). 7655 */ 7656 *dst_port_id = priv->dev_port; 7657 #else 7658 /* 7659 * Legacy mode, no LAG configurations is supported. 7660 * This parameter is transferred to 7661 * mlx5dv_dr_action_create_dest_vport(). 7662 */ 7663 *dst_port_id = priv->vport_id; 7664 #endif 7665 return 0; 7666 } 7667 7668 /** 7669 * Create a counter with aging configuration. 7670 * 7671 * @param[in] dev 7672 * Pointer to rte_eth_dev structure. 7673 * @param[out] count 7674 * Pointer to the counter action configuration. 7675 * @param[in] age 7676 * Pointer to the aging action configuration. 7677 * 7678 * @return 7679 * Index to flow counter on success, 0 otherwise. 7680 */ 7681 static uint32_t 7682 flow_dv_translate_create_counter(struct rte_eth_dev *dev, 7683 struct mlx5_flow *dev_flow, 7684 const struct rte_flow_action_count *count, 7685 const struct rte_flow_action_age *age) 7686 { 7687 uint32_t counter; 7688 struct mlx5_age_param *age_param; 7689 7690 counter = flow_dv_counter_alloc(dev, 7691 count ? count->shared : 0, 7692 count ? count->id : 0, 7693 dev_flow->dv.group, !!age); 7694 if (!counter || age == NULL) 7695 return counter; 7696 age_param = flow_dv_counter_idx_get_age(dev, counter); 7697 /* 7698 * The counter age accuracy may have a bit delay. Have 3/4 7699 * second bias on the timeount in order to let it age in time. 7700 */ 7701 age_param->context = age->context ? age->context : 7702 (void *)(uintptr_t)(dev_flow->flow_idx); 7703 /* 7704 * The counter age accuracy may have a bit delay. Have 3/4 7705 * second bias on the timeount in order to let it age in time. 7706 */ 7707 age_param->timeout = age->timeout * 10 - MLX5_AGING_TIME_DELAY; 7708 /* Set expire time in unit of 0.1 sec. */ 7709 age_param->port_id = dev->data->port_id; 7710 age_param->expire = age_param->timeout + 7711 rte_rdtsc() / (rte_get_tsc_hz() / 10); 7712 rte_atomic16_set(&age_param->state, AGE_CANDIDATE); 7713 return counter; 7714 } 7715 /** 7716 * Add Tx queue matcher 7717 * 7718 * @param[in] dev 7719 * Pointer to the dev struct. 7720 * @param[in, out] matcher 7721 * Flow matcher. 7722 * @param[in, out] key 7723 * Flow matcher value. 7724 * @param[in] item 7725 * Flow pattern to translate. 7726 * @param[in] inner 7727 * Item is inner pattern. 7728 */ 7729 static void 7730 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev, 7731 void *matcher, void *key, 7732 const struct rte_flow_item *item) 7733 { 7734 const struct mlx5_rte_flow_item_tx_queue *queue_m; 7735 const struct mlx5_rte_flow_item_tx_queue *queue_v; 7736 void *misc_m = 7737 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); 7738 void *misc_v = 7739 MLX5_ADDR_OF(fte_match_param, key, misc_parameters); 7740 struct mlx5_txq_ctrl *txq; 7741 uint32_t queue; 7742 7743 7744 queue_m = (const void *)item->mask; 7745 if (!queue_m) 7746 return; 7747 queue_v = (const void *)item->spec; 7748 if (!queue_v) 7749 return; 7750 txq = mlx5_txq_get(dev, queue_v->queue); 7751 if (!txq) 7752 return; 7753 queue = txq->obj->sq->id; 7754 MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue); 7755 MLX5_SET(fte_match_set_misc, misc_v, source_sqn, 7756 queue & queue_m->queue); 7757 mlx5_txq_release(dev, queue_v->queue); 7758 } 7759 7760 /** 7761 * Set the hash fields according to the @p flow information. 7762 * 7763 * @param[in] dev_flow 7764 * Pointer to the mlx5_flow. 7765 * @param[in] rss_desc 7766 * Pointer to the mlx5_flow_rss_desc. 7767 */ 7768 static void 7769 flow_dv_hashfields_set(struct mlx5_flow *dev_flow, 7770 struct mlx5_flow_rss_desc *rss_desc) 7771 { 7772 uint64_t items = dev_flow->handle->layers; 7773 int rss_inner = 0; 7774 uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types); 7775 7776 dev_flow->hash_fields = 0; 7777 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 7778 if (rss_desc->level >= 2) { 7779 dev_flow->hash_fields |= IBV_RX_HASH_INNER; 7780 rss_inner = 1; 7781 } 7782 #endif 7783 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) || 7784 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) { 7785 if (rss_types & MLX5_IPV4_LAYER_TYPES) { 7786 if (rss_types & ETH_RSS_L3_SRC_ONLY) 7787 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4; 7788 else if (rss_types & ETH_RSS_L3_DST_ONLY) 7789 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4; 7790 else 7791 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH; 7792 } 7793 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) || 7794 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) { 7795 if (rss_types & MLX5_IPV6_LAYER_TYPES) { 7796 if (rss_types & ETH_RSS_L3_SRC_ONLY) 7797 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6; 7798 else if (rss_types & ETH_RSS_L3_DST_ONLY) 7799 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6; 7800 else 7801 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH; 7802 } 7803 } 7804 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) || 7805 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) { 7806 if (rss_types & ETH_RSS_UDP) { 7807 if (rss_types & ETH_RSS_L4_SRC_ONLY) 7808 dev_flow->hash_fields |= 7809 IBV_RX_HASH_SRC_PORT_UDP; 7810 else if (rss_types & ETH_RSS_L4_DST_ONLY) 7811 dev_flow->hash_fields |= 7812 IBV_RX_HASH_DST_PORT_UDP; 7813 else 7814 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH; 7815 } 7816 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) || 7817 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) { 7818 if (rss_types & ETH_RSS_TCP) { 7819 if (rss_types & ETH_RSS_L4_SRC_ONLY) 7820 dev_flow->hash_fields |= 7821 IBV_RX_HASH_SRC_PORT_TCP; 7822 else if (rss_types & ETH_RSS_L4_DST_ONLY) 7823 dev_flow->hash_fields |= 7824 IBV_RX_HASH_DST_PORT_TCP; 7825 else 7826 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH; 7827 } 7828 } 7829 } 7830 7831 /** 7832 * Fill the flow with DV spec, lock free 7833 * (mutex should be acquired by caller). 7834 * 7835 * @param[in] dev 7836 * Pointer to rte_eth_dev structure. 7837 * @param[in, out] dev_flow 7838 * Pointer to the sub flow. 7839 * @param[in] attr 7840 * Pointer to the flow attributes. 7841 * @param[in] items 7842 * Pointer to the list of items. 7843 * @param[in] actions 7844 * Pointer to the list of actions. 7845 * @param[out] error 7846 * Pointer to the error structure. 7847 * 7848 * @return 7849 * 0 on success, a negative errno value otherwise and rte_errno is set. 7850 */ 7851 static int 7852 __flow_dv_translate(struct rte_eth_dev *dev, 7853 struct mlx5_flow *dev_flow, 7854 const struct rte_flow_attr *attr, 7855 const struct rte_flow_item items[], 7856 const struct rte_flow_action actions[], 7857 struct rte_flow_error *error) 7858 { 7859 struct mlx5_priv *priv = dev->data->dev_private; 7860 struct mlx5_dev_config *dev_conf = &priv->config; 7861 struct rte_flow *flow = dev_flow->flow; 7862 struct mlx5_flow_handle *handle = dev_flow->handle; 7863 struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) 7864 priv->rss_desc) 7865 [!!priv->flow_nested_idx]; 7866 uint64_t item_flags = 0; 7867 uint64_t last_item = 0; 7868 uint64_t action_flags = 0; 7869 uint64_t priority = attr->priority; 7870 struct mlx5_flow_dv_matcher matcher = { 7871 .mask = { 7872 .size = sizeof(matcher.mask.buf), 7873 }, 7874 }; 7875 int actions_n = 0; 7876 bool actions_end = false; 7877 union { 7878 struct mlx5_flow_dv_modify_hdr_resource res; 7879 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) + 7880 sizeof(struct mlx5_modification_cmd) * 7881 (MLX5_MAX_MODIFY_NUM + 1)]; 7882 } mhdr_dummy; 7883 struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res; 7884 const struct rte_flow_action_count *count = NULL; 7885 const struct rte_flow_action_age *age = NULL; 7886 union flow_dv_attr flow_attr = { .attr = 0 }; 7887 uint32_t tag_be; 7888 union mlx5_flow_tbl_key tbl_key; 7889 uint32_t modify_action_position = UINT32_MAX; 7890 void *match_mask = matcher.mask.buf; 7891 void *match_value = dev_flow->dv.value.buf; 7892 uint8_t next_protocol = 0xff; 7893 struct rte_vlan_hdr vlan = { 0 }; 7894 uint32_t table; 7895 int ret = 0; 7896 7897 mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : 7898 MLX5DV_FLOW_TABLE_TYPE_NIC_RX; 7899 ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group, 7900 !!priv->fdb_def_rule, &table, error); 7901 if (ret) 7902 return ret; 7903 dev_flow->dv.group = table; 7904 if (attr->transfer) 7905 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; 7906 if (priority == MLX5_FLOW_PRIO_RSVD) 7907 priority = dev_conf->flow_prio - 1; 7908 /* number of actions must be set to 0 in case of dirty stack. */ 7909 mhdr_res->actions_num = 0; 7910 for (; !actions_end ; actions++) { 7911 const struct rte_flow_action_queue *queue; 7912 const struct rte_flow_action_rss *rss; 7913 const struct rte_flow_action *action = actions; 7914 const uint8_t *rss_key; 7915 const struct rte_flow_action_jump *jump_data; 7916 const struct rte_flow_action_meter *mtr; 7917 struct mlx5_flow_tbl_resource *tbl; 7918 uint32_t port_id = 0; 7919 struct mlx5_flow_dv_port_id_action_resource port_id_resource; 7920 int action_type = actions->type; 7921 const struct rte_flow_action *found_action = NULL; 7922 struct mlx5_flow_meter *fm = NULL; 7923 7924 if (!mlx5_flow_os_action_supported(action_type)) 7925 return rte_flow_error_set(error, ENOTSUP, 7926 RTE_FLOW_ERROR_TYPE_ACTION, 7927 actions, 7928 "action not supported"); 7929 switch (action_type) { 7930 case RTE_FLOW_ACTION_TYPE_VOID: 7931 break; 7932 case RTE_FLOW_ACTION_TYPE_PORT_ID: 7933 if (flow_dv_translate_action_port_id(dev, action, 7934 &port_id, error)) 7935 return -rte_errno; 7936 port_id_resource.port_id = port_id; 7937 MLX5_ASSERT(!handle->rix_port_id_action); 7938 if (flow_dv_port_id_action_resource_register 7939 (dev, &port_id_resource, dev_flow, error)) 7940 return -rte_errno; 7941 dev_flow->dv.actions[actions_n++] = 7942 dev_flow->dv.port_id_action->action; 7943 action_flags |= MLX5_FLOW_ACTION_PORT_ID; 7944 dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID; 7945 break; 7946 case RTE_FLOW_ACTION_TYPE_FLAG: 7947 action_flags |= MLX5_FLOW_ACTION_FLAG; 7948 dev_flow->handle->mark = 1; 7949 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 7950 struct rte_flow_action_mark mark = { 7951 .id = MLX5_FLOW_MARK_DEFAULT, 7952 }; 7953 7954 if (flow_dv_convert_action_mark(dev, &mark, 7955 mhdr_res, 7956 error)) 7957 return -rte_errno; 7958 action_flags |= MLX5_FLOW_ACTION_MARK_EXT; 7959 break; 7960 } 7961 tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT); 7962 /* 7963 * Only one FLAG or MARK is supported per device flow 7964 * right now. So the pointer to the tag resource must be 7965 * zero before the register process. 7966 */ 7967 MLX5_ASSERT(!handle->dvh.rix_tag); 7968 if (flow_dv_tag_resource_register(dev, tag_be, 7969 dev_flow, error)) 7970 return -rte_errno; 7971 MLX5_ASSERT(dev_flow->dv.tag_resource); 7972 dev_flow->dv.actions[actions_n++] = 7973 dev_flow->dv.tag_resource->action; 7974 break; 7975 case RTE_FLOW_ACTION_TYPE_MARK: 7976 action_flags |= MLX5_FLOW_ACTION_MARK; 7977 dev_flow->handle->mark = 1; 7978 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 7979 const struct rte_flow_action_mark *mark = 7980 (const struct rte_flow_action_mark *) 7981 actions->conf; 7982 7983 if (flow_dv_convert_action_mark(dev, mark, 7984 mhdr_res, 7985 error)) 7986 return -rte_errno; 7987 action_flags |= MLX5_FLOW_ACTION_MARK_EXT; 7988 break; 7989 } 7990 /* Fall-through */ 7991 case MLX5_RTE_FLOW_ACTION_TYPE_MARK: 7992 /* Legacy (non-extensive) MARK action. */ 7993 tag_be = mlx5_flow_mark_set 7994 (((const struct rte_flow_action_mark *) 7995 (actions->conf))->id); 7996 MLX5_ASSERT(!handle->dvh.rix_tag); 7997 if (flow_dv_tag_resource_register(dev, tag_be, 7998 dev_flow, error)) 7999 return -rte_errno; 8000 MLX5_ASSERT(dev_flow->dv.tag_resource); 8001 dev_flow->dv.actions[actions_n++] = 8002 dev_flow->dv.tag_resource->action; 8003 break; 8004 case RTE_FLOW_ACTION_TYPE_SET_META: 8005 if (flow_dv_convert_action_set_meta 8006 (dev, mhdr_res, attr, 8007 (const struct rte_flow_action_set_meta *) 8008 actions->conf, error)) 8009 return -rte_errno; 8010 action_flags |= MLX5_FLOW_ACTION_SET_META; 8011 break; 8012 case RTE_FLOW_ACTION_TYPE_SET_TAG: 8013 if (flow_dv_convert_action_set_tag 8014 (dev, mhdr_res, 8015 (const struct rte_flow_action_set_tag *) 8016 actions->conf, error)) 8017 return -rte_errno; 8018 action_flags |= MLX5_FLOW_ACTION_SET_TAG; 8019 break; 8020 case RTE_FLOW_ACTION_TYPE_DROP: 8021 action_flags |= MLX5_FLOW_ACTION_DROP; 8022 dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP; 8023 break; 8024 case RTE_FLOW_ACTION_TYPE_QUEUE: 8025 queue = actions->conf; 8026 rss_desc->queue_num = 1; 8027 rss_desc->queue[0] = queue->index; 8028 action_flags |= MLX5_FLOW_ACTION_QUEUE; 8029 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; 8030 break; 8031 case RTE_FLOW_ACTION_TYPE_RSS: 8032 rss = actions->conf; 8033 memcpy(rss_desc->queue, rss->queue, 8034 rss->queue_num * sizeof(uint16_t)); 8035 rss_desc->queue_num = rss->queue_num; 8036 /* NULL RSS key indicates default RSS key. */ 8037 rss_key = !rss->key ? rss_hash_default_key : rss->key; 8038 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN); 8039 /* 8040 * rss->level and rss.types should be set in advance 8041 * when expanding items for RSS. 8042 */ 8043 action_flags |= MLX5_FLOW_ACTION_RSS; 8044 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; 8045 break; 8046 case RTE_FLOW_ACTION_TYPE_AGE: 8047 case RTE_FLOW_ACTION_TYPE_COUNT: 8048 if (!dev_conf->devx) { 8049 return rte_flow_error_set 8050 (error, ENOTSUP, 8051 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8052 NULL, 8053 "count action not supported"); 8054 } 8055 /* Save information first, will apply later. */ 8056 if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT) 8057 count = action->conf; 8058 else 8059 age = action->conf; 8060 action_flags |= MLX5_FLOW_ACTION_COUNT; 8061 break; 8062 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: 8063 dev_flow->dv.actions[actions_n++] = 8064 priv->sh->pop_vlan_action; 8065 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN; 8066 break; 8067 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 8068 if (!(action_flags & 8069 MLX5_FLOW_ACTION_OF_SET_VLAN_VID)) 8070 flow_dev_get_vlan_info_from_items(items, &vlan); 8071 vlan.eth_proto = rte_be_to_cpu_16 8072 ((((const struct rte_flow_action_of_push_vlan *) 8073 actions->conf)->ethertype)); 8074 found_action = mlx5_flow_find_action 8075 (actions + 1, 8076 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID); 8077 if (found_action) 8078 mlx5_update_vlan_vid_pcp(found_action, &vlan); 8079 found_action = mlx5_flow_find_action 8080 (actions + 1, 8081 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP); 8082 if (found_action) 8083 mlx5_update_vlan_vid_pcp(found_action, &vlan); 8084 if (flow_dv_create_action_push_vlan 8085 (dev, attr, &vlan, dev_flow, error)) 8086 return -rte_errno; 8087 dev_flow->dv.actions[actions_n++] = 8088 dev_flow->dv.push_vlan_res->action; 8089 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN; 8090 break; 8091 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 8092 /* of_vlan_push action handled this action */ 8093 MLX5_ASSERT(action_flags & 8094 MLX5_FLOW_ACTION_OF_PUSH_VLAN); 8095 break; 8096 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 8097 if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) 8098 break; 8099 flow_dev_get_vlan_info_from_items(items, &vlan); 8100 mlx5_update_vlan_vid_pcp(actions, &vlan); 8101 /* If no VLAN push - this is a modify header action */ 8102 if (flow_dv_convert_action_modify_vlan_vid 8103 (mhdr_res, actions, error)) 8104 return -rte_errno; 8105 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID; 8106 break; 8107 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 8108 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 8109 if (flow_dv_create_action_l2_encap(dev, actions, 8110 dev_flow, 8111 attr->transfer, 8112 error)) 8113 return -rte_errno; 8114 dev_flow->dv.actions[actions_n++] = 8115 dev_flow->dv.encap_decap->action; 8116 action_flags |= MLX5_FLOW_ACTION_ENCAP; 8117 break; 8118 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 8119 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: 8120 if (flow_dv_create_action_l2_decap(dev, dev_flow, 8121 attr->transfer, 8122 error)) 8123 return -rte_errno; 8124 dev_flow->dv.actions[actions_n++] = 8125 dev_flow->dv.encap_decap->action; 8126 action_flags |= MLX5_FLOW_ACTION_DECAP; 8127 break; 8128 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: 8129 /* Handle encap with preceding decap. */ 8130 if (action_flags & MLX5_FLOW_ACTION_DECAP) { 8131 if (flow_dv_create_action_raw_encap 8132 (dev, actions, dev_flow, attr, error)) 8133 return -rte_errno; 8134 dev_flow->dv.actions[actions_n++] = 8135 dev_flow->dv.encap_decap->action; 8136 } else { 8137 /* Handle encap without preceding decap. */ 8138 if (flow_dv_create_action_l2_encap 8139 (dev, actions, dev_flow, attr->transfer, 8140 error)) 8141 return -rte_errno; 8142 dev_flow->dv.actions[actions_n++] = 8143 dev_flow->dv.encap_decap->action; 8144 } 8145 action_flags |= MLX5_FLOW_ACTION_ENCAP; 8146 break; 8147 case RTE_FLOW_ACTION_TYPE_RAW_DECAP: 8148 while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID) 8149 ; 8150 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { 8151 if (flow_dv_create_action_l2_decap 8152 (dev, dev_flow, attr->transfer, error)) 8153 return -rte_errno; 8154 dev_flow->dv.actions[actions_n++] = 8155 dev_flow->dv.encap_decap->action; 8156 } 8157 /* If decap is followed by encap, handle it at encap. */ 8158 action_flags |= MLX5_FLOW_ACTION_DECAP; 8159 break; 8160 case RTE_FLOW_ACTION_TYPE_JUMP: 8161 jump_data = action->conf; 8162 ret = mlx5_flow_group_to_table(attr, dev_flow->external, 8163 jump_data->group, 8164 !!priv->fdb_def_rule, 8165 &table, error); 8166 if (ret) 8167 return ret; 8168 tbl = flow_dv_tbl_resource_get(dev, table, 8169 attr->egress, 8170 attr->transfer, error); 8171 if (!tbl) 8172 return rte_flow_error_set 8173 (error, errno, 8174 RTE_FLOW_ERROR_TYPE_ACTION, 8175 NULL, 8176 "cannot create jump action."); 8177 if (flow_dv_jump_tbl_resource_register 8178 (dev, tbl, dev_flow, error)) { 8179 flow_dv_tbl_resource_release(dev, tbl); 8180 return rte_flow_error_set 8181 (error, errno, 8182 RTE_FLOW_ERROR_TYPE_ACTION, 8183 NULL, 8184 "cannot create jump action."); 8185 } 8186 dev_flow->dv.actions[actions_n++] = 8187 dev_flow->dv.jump->action; 8188 action_flags |= MLX5_FLOW_ACTION_JUMP; 8189 dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP; 8190 break; 8191 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: 8192 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: 8193 if (flow_dv_convert_action_modify_mac 8194 (mhdr_res, actions, error)) 8195 return -rte_errno; 8196 action_flags |= actions->type == 8197 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ? 8198 MLX5_FLOW_ACTION_SET_MAC_SRC : 8199 MLX5_FLOW_ACTION_SET_MAC_DST; 8200 break; 8201 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: 8202 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: 8203 if (flow_dv_convert_action_modify_ipv4 8204 (mhdr_res, actions, error)) 8205 return -rte_errno; 8206 action_flags |= actions->type == 8207 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ? 8208 MLX5_FLOW_ACTION_SET_IPV4_SRC : 8209 MLX5_FLOW_ACTION_SET_IPV4_DST; 8210 break; 8211 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: 8212 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: 8213 if (flow_dv_convert_action_modify_ipv6 8214 (mhdr_res, actions, error)) 8215 return -rte_errno; 8216 action_flags |= actions->type == 8217 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ? 8218 MLX5_FLOW_ACTION_SET_IPV6_SRC : 8219 MLX5_FLOW_ACTION_SET_IPV6_DST; 8220 break; 8221 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: 8222 case RTE_FLOW_ACTION_TYPE_SET_TP_DST: 8223 if (flow_dv_convert_action_modify_tp 8224 (mhdr_res, actions, items, 8225 &flow_attr, dev_flow, !!(action_flags & 8226 MLX5_FLOW_ACTION_DECAP), error)) 8227 return -rte_errno; 8228 action_flags |= actions->type == 8229 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ? 8230 MLX5_FLOW_ACTION_SET_TP_SRC : 8231 MLX5_FLOW_ACTION_SET_TP_DST; 8232 break; 8233 case RTE_FLOW_ACTION_TYPE_DEC_TTL: 8234 if (flow_dv_convert_action_modify_dec_ttl 8235 (mhdr_res, items, &flow_attr, dev_flow, 8236 !!(action_flags & 8237 MLX5_FLOW_ACTION_DECAP), error)) 8238 return -rte_errno; 8239 action_flags |= MLX5_FLOW_ACTION_DEC_TTL; 8240 break; 8241 case RTE_FLOW_ACTION_TYPE_SET_TTL: 8242 if (flow_dv_convert_action_modify_ttl 8243 (mhdr_res, actions, items, &flow_attr, 8244 dev_flow, !!(action_flags & 8245 MLX5_FLOW_ACTION_DECAP), error)) 8246 return -rte_errno; 8247 action_flags |= MLX5_FLOW_ACTION_SET_TTL; 8248 break; 8249 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: 8250 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: 8251 if (flow_dv_convert_action_modify_tcp_seq 8252 (mhdr_res, actions, error)) 8253 return -rte_errno; 8254 action_flags |= actions->type == 8255 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ? 8256 MLX5_FLOW_ACTION_INC_TCP_SEQ : 8257 MLX5_FLOW_ACTION_DEC_TCP_SEQ; 8258 break; 8259 8260 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: 8261 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: 8262 if (flow_dv_convert_action_modify_tcp_ack 8263 (mhdr_res, actions, error)) 8264 return -rte_errno; 8265 action_flags |= actions->type == 8266 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ? 8267 MLX5_FLOW_ACTION_INC_TCP_ACK : 8268 MLX5_FLOW_ACTION_DEC_TCP_ACK; 8269 break; 8270 case MLX5_RTE_FLOW_ACTION_TYPE_TAG: 8271 if (flow_dv_convert_action_set_reg 8272 (mhdr_res, actions, error)) 8273 return -rte_errno; 8274 action_flags |= MLX5_FLOW_ACTION_SET_TAG; 8275 break; 8276 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG: 8277 if (flow_dv_convert_action_copy_mreg 8278 (dev, mhdr_res, actions, error)) 8279 return -rte_errno; 8280 action_flags |= MLX5_FLOW_ACTION_SET_TAG; 8281 break; 8282 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS: 8283 action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS; 8284 dev_flow->handle->fate_action = 8285 MLX5_FLOW_FATE_DEFAULT_MISS; 8286 break; 8287 case RTE_FLOW_ACTION_TYPE_METER: 8288 mtr = actions->conf; 8289 if (!flow->meter) { 8290 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id, 8291 attr, error); 8292 if (!fm) 8293 return rte_flow_error_set(error, 8294 rte_errno, 8295 RTE_FLOW_ERROR_TYPE_ACTION, 8296 NULL, 8297 "meter not found " 8298 "or invalid parameters"); 8299 flow->meter = fm->idx; 8300 } 8301 /* Set the meter action. */ 8302 if (!fm) { 8303 fm = mlx5_ipool_get(priv->sh->ipool 8304 [MLX5_IPOOL_MTR], flow->meter); 8305 if (!fm) 8306 return rte_flow_error_set(error, 8307 rte_errno, 8308 RTE_FLOW_ERROR_TYPE_ACTION, 8309 NULL, 8310 "meter not found " 8311 "or invalid parameters"); 8312 } 8313 dev_flow->dv.actions[actions_n++] = 8314 fm->mfts->meter_action; 8315 action_flags |= MLX5_FLOW_ACTION_METER; 8316 break; 8317 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: 8318 if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res, 8319 actions, error)) 8320 return -rte_errno; 8321 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP; 8322 break; 8323 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: 8324 if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res, 8325 actions, error)) 8326 return -rte_errno; 8327 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP; 8328 break; 8329 case RTE_FLOW_ACTION_TYPE_END: 8330 actions_end = true; 8331 if (mhdr_res->actions_num) { 8332 /* create modify action if needed. */ 8333 if (flow_dv_modify_hdr_resource_register 8334 (dev, mhdr_res, dev_flow, error)) 8335 return -rte_errno; 8336 dev_flow->dv.actions[modify_action_position] = 8337 handle->dvh.modify_hdr->action; 8338 } 8339 if (action_flags & MLX5_FLOW_ACTION_COUNT) { 8340 flow->counter = 8341 flow_dv_translate_create_counter(dev, 8342 dev_flow, count, age); 8343 8344 if (!flow->counter) 8345 return rte_flow_error_set 8346 (error, rte_errno, 8347 RTE_FLOW_ERROR_TYPE_ACTION, 8348 NULL, 8349 "cannot create counter" 8350 " object."); 8351 dev_flow->dv.actions[actions_n++] = 8352 (flow_dv_counter_get_by_idx(dev, 8353 flow->counter, NULL))->action; 8354 } 8355 break; 8356 default: 8357 break; 8358 } 8359 if (mhdr_res->actions_num && 8360 modify_action_position == UINT32_MAX) 8361 modify_action_position = actions_n++; 8362 } 8363 dev_flow->dv.actions_n = actions_n; 8364 dev_flow->act_flags = action_flags; 8365 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { 8366 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); 8367 int item_type = items->type; 8368 8369 if (!mlx5_flow_os_item_supported(item_type)) 8370 return rte_flow_error_set(error, ENOTSUP, 8371 RTE_FLOW_ERROR_TYPE_ITEM, 8372 NULL, "item not supported"); 8373 switch (item_type) { 8374 case RTE_FLOW_ITEM_TYPE_PORT_ID: 8375 flow_dv_translate_item_port_id(dev, match_mask, 8376 match_value, items); 8377 last_item = MLX5_FLOW_ITEM_PORT_ID; 8378 break; 8379 case RTE_FLOW_ITEM_TYPE_ETH: 8380 flow_dv_translate_item_eth(match_mask, match_value, 8381 items, tunnel, 8382 dev_flow->dv.group); 8383 matcher.priority = action_flags & 8384 MLX5_FLOW_ACTION_DEFAULT_MISS && 8385 !dev_flow->external ? 8386 MLX5_PRIORITY_MAP_L3 : 8387 MLX5_PRIORITY_MAP_L2; 8388 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : 8389 MLX5_FLOW_LAYER_OUTER_L2; 8390 break; 8391 case RTE_FLOW_ITEM_TYPE_VLAN: 8392 flow_dv_translate_item_vlan(dev_flow, 8393 match_mask, match_value, 8394 items, tunnel, 8395 dev_flow->dv.group); 8396 matcher.priority = MLX5_PRIORITY_MAP_L2; 8397 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | 8398 MLX5_FLOW_LAYER_INNER_VLAN) : 8399 (MLX5_FLOW_LAYER_OUTER_L2 | 8400 MLX5_FLOW_LAYER_OUTER_VLAN); 8401 break; 8402 case RTE_FLOW_ITEM_TYPE_IPV4: 8403 mlx5_flow_tunnel_ip_check(items, next_protocol, 8404 &item_flags, &tunnel); 8405 flow_dv_translate_item_ipv4(match_mask, match_value, 8406 items, item_flags, tunnel, 8407 dev_flow->dv.group); 8408 matcher.priority = MLX5_PRIORITY_MAP_L3; 8409 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : 8410 MLX5_FLOW_LAYER_OUTER_L3_IPV4; 8411 if (items->mask != NULL && 8412 ((const struct rte_flow_item_ipv4 *) 8413 items->mask)->hdr.next_proto_id) { 8414 next_protocol = 8415 ((const struct rte_flow_item_ipv4 *) 8416 (items->spec))->hdr.next_proto_id; 8417 next_protocol &= 8418 ((const struct rte_flow_item_ipv4 *) 8419 (items->mask))->hdr.next_proto_id; 8420 } else { 8421 /* Reset for inner layer. */ 8422 next_protocol = 0xff; 8423 } 8424 break; 8425 case RTE_FLOW_ITEM_TYPE_IPV6: 8426 mlx5_flow_tunnel_ip_check(items, next_protocol, 8427 &item_flags, &tunnel); 8428 flow_dv_translate_item_ipv6(match_mask, match_value, 8429 items, item_flags, tunnel, 8430 dev_flow->dv.group); 8431 matcher.priority = MLX5_PRIORITY_MAP_L3; 8432 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : 8433 MLX5_FLOW_LAYER_OUTER_L3_IPV6; 8434 if (items->mask != NULL && 8435 ((const struct rte_flow_item_ipv6 *) 8436 items->mask)->hdr.proto) { 8437 next_protocol = 8438 ((const struct rte_flow_item_ipv6 *) 8439 items->spec)->hdr.proto; 8440 next_protocol &= 8441 ((const struct rte_flow_item_ipv6 *) 8442 items->mask)->hdr.proto; 8443 } else { 8444 /* Reset for inner layer. */ 8445 next_protocol = 0xff; 8446 } 8447 break; 8448 case RTE_FLOW_ITEM_TYPE_TCP: 8449 flow_dv_translate_item_tcp(match_mask, match_value, 8450 items, tunnel); 8451 matcher.priority = MLX5_PRIORITY_MAP_L4; 8452 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : 8453 MLX5_FLOW_LAYER_OUTER_L4_TCP; 8454 break; 8455 case RTE_FLOW_ITEM_TYPE_UDP: 8456 flow_dv_translate_item_udp(match_mask, match_value, 8457 items, tunnel); 8458 matcher.priority = MLX5_PRIORITY_MAP_L4; 8459 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : 8460 MLX5_FLOW_LAYER_OUTER_L4_UDP; 8461 break; 8462 case RTE_FLOW_ITEM_TYPE_GRE: 8463 flow_dv_translate_item_gre(match_mask, match_value, 8464 items, tunnel); 8465 matcher.priority = rss_desc->level >= 2 ? 8466 MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; 8467 last_item = MLX5_FLOW_LAYER_GRE; 8468 break; 8469 case RTE_FLOW_ITEM_TYPE_GRE_KEY: 8470 flow_dv_translate_item_gre_key(match_mask, 8471 match_value, items); 8472 last_item = MLX5_FLOW_LAYER_GRE_KEY; 8473 break; 8474 case RTE_FLOW_ITEM_TYPE_NVGRE: 8475 flow_dv_translate_item_nvgre(match_mask, match_value, 8476 items, tunnel); 8477 matcher.priority = rss_desc->level >= 2 ? 8478 MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; 8479 last_item = MLX5_FLOW_LAYER_GRE; 8480 break; 8481 case RTE_FLOW_ITEM_TYPE_VXLAN: 8482 flow_dv_translate_item_vxlan(match_mask, match_value, 8483 items, tunnel); 8484 matcher.priority = rss_desc->level >= 2 ? 8485 MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; 8486 last_item = MLX5_FLOW_LAYER_VXLAN; 8487 break; 8488 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: 8489 flow_dv_translate_item_vxlan_gpe(match_mask, 8490 match_value, items, 8491 tunnel); 8492 matcher.priority = rss_desc->level >= 2 ? 8493 MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; 8494 last_item = MLX5_FLOW_LAYER_VXLAN_GPE; 8495 break; 8496 case RTE_FLOW_ITEM_TYPE_GENEVE: 8497 flow_dv_translate_item_geneve(match_mask, match_value, 8498 items, tunnel); 8499 matcher.priority = rss_desc->level >= 2 ? 8500 MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; 8501 last_item = MLX5_FLOW_LAYER_GENEVE; 8502 break; 8503 case RTE_FLOW_ITEM_TYPE_MPLS: 8504 flow_dv_translate_item_mpls(match_mask, match_value, 8505 items, last_item, tunnel); 8506 matcher.priority = rss_desc->level >= 2 ? 8507 MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; 8508 last_item = MLX5_FLOW_LAYER_MPLS; 8509 break; 8510 case RTE_FLOW_ITEM_TYPE_MARK: 8511 flow_dv_translate_item_mark(dev, match_mask, 8512 match_value, items); 8513 last_item = MLX5_FLOW_ITEM_MARK; 8514 break; 8515 case RTE_FLOW_ITEM_TYPE_META: 8516 flow_dv_translate_item_meta(dev, match_mask, 8517 match_value, attr, items); 8518 last_item = MLX5_FLOW_ITEM_METADATA; 8519 break; 8520 case RTE_FLOW_ITEM_TYPE_ICMP: 8521 flow_dv_translate_item_icmp(match_mask, match_value, 8522 items, tunnel); 8523 last_item = MLX5_FLOW_LAYER_ICMP; 8524 break; 8525 case RTE_FLOW_ITEM_TYPE_ICMP6: 8526 flow_dv_translate_item_icmp6(match_mask, match_value, 8527 items, tunnel); 8528 last_item = MLX5_FLOW_LAYER_ICMP6; 8529 break; 8530 case RTE_FLOW_ITEM_TYPE_TAG: 8531 flow_dv_translate_item_tag(dev, match_mask, 8532 match_value, items); 8533 last_item = MLX5_FLOW_ITEM_TAG; 8534 break; 8535 case MLX5_RTE_FLOW_ITEM_TYPE_TAG: 8536 flow_dv_translate_mlx5_item_tag(dev, match_mask, 8537 match_value, items); 8538 last_item = MLX5_FLOW_ITEM_TAG; 8539 break; 8540 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE: 8541 flow_dv_translate_item_tx_queue(dev, match_mask, 8542 match_value, 8543 items); 8544 last_item = MLX5_FLOW_ITEM_TX_QUEUE; 8545 break; 8546 case RTE_FLOW_ITEM_TYPE_GTP: 8547 flow_dv_translate_item_gtp(match_mask, match_value, 8548 items, tunnel); 8549 matcher.priority = rss_desc->level >= 2 ? 8550 MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; 8551 last_item = MLX5_FLOW_LAYER_GTP; 8552 break; 8553 default: 8554 break; 8555 } 8556 item_flags |= last_item; 8557 } 8558 /* 8559 * When E-Switch mode is enabled, we have two cases where we need to 8560 * set the source port manually. 8561 * The first one, is in case of Nic steering rule, and the second is 8562 * E-Switch rule where no port_id item was found. In both cases 8563 * the source port is set according the current port in use. 8564 */ 8565 if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) && 8566 (priv->representor || priv->master)) { 8567 if (flow_dv_translate_item_port_id(dev, match_mask, 8568 match_value, NULL)) 8569 return -rte_errno; 8570 } 8571 #ifdef RTE_LIBRTE_MLX5_DEBUG 8572 MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf, 8573 dev_flow->dv.value.buf)); 8574 #endif 8575 /* 8576 * Layers may be already initialized from prefix flow if this dev_flow 8577 * is the suffix flow. 8578 */ 8579 handle->layers |= item_flags; 8580 if (action_flags & MLX5_FLOW_ACTION_RSS) 8581 flow_dv_hashfields_set(dev_flow, rss_desc); 8582 /* Register matcher. */ 8583 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf, 8584 matcher.mask.size); 8585 matcher.priority = mlx5_flow_adjust_priority(dev, priority, 8586 matcher.priority); 8587 /* reserved field no needs to be set to 0 here. */ 8588 tbl_key.domain = attr->transfer; 8589 tbl_key.direction = attr->egress; 8590 tbl_key.table_id = dev_flow->dv.group; 8591 if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error)) 8592 return -rte_errno; 8593 return 0; 8594 } 8595 8596 /** 8597 * Apply the flow to the NIC, lock free, 8598 * (mutex should be acquired by caller). 8599 * 8600 * @param[in] dev 8601 * Pointer to the Ethernet device structure. 8602 * @param[in, out] flow 8603 * Pointer to flow structure. 8604 * @param[out] error 8605 * Pointer to error structure. 8606 * 8607 * @return 8608 * 0 on success, a negative errno value otherwise and rte_errno is set. 8609 */ 8610 static int 8611 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, 8612 struct rte_flow_error *error) 8613 { 8614 struct mlx5_flow_dv_workspace *dv; 8615 struct mlx5_flow_handle *dh; 8616 struct mlx5_flow_handle_dv *dv_h; 8617 struct mlx5_flow *dev_flow; 8618 struct mlx5_priv *priv = dev->data->dev_private; 8619 uint32_t handle_idx; 8620 int n; 8621 int err; 8622 int idx; 8623 8624 for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) { 8625 dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx]; 8626 dv = &dev_flow->dv; 8627 dh = dev_flow->handle; 8628 dv_h = &dh->dvh; 8629 n = dv->actions_n; 8630 if (dh->fate_action == MLX5_FLOW_FATE_DROP) { 8631 if (dv->transfer) { 8632 dv->actions[n++] = priv->sh->esw_drop_action; 8633 } else { 8634 struct mlx5_hrxq *drop_hrxq; 8635 drop_hrxq = mlx5_hrxq_drop_new(dev); 8636 if (!drop_hrxq) { 8637 rte_flow_error_set 8638 (error, errno, 8639 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8640 NULL, 8641 "cannot get drop hash queue"); 8642 goto error; 8643 } 8644 /* 8645 * Drop queues will be released by the specify 8646 * mlx5_hrxq_drop_release() function. Assign 8647 * the special index to hrxq to mark the queue 8648 * has been allocated. 8649 */ 8650 dh->rix_hrxq = UINT32_MAX; 8651 dv->actions[n++] = drop_hrxq->action; 8652 } 8653 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) { 8654 struct mlx5_hrxq *hrxq; 8655 uint32_t hrxq_idx; 8656 struct mlx5_flow_rss_desc *rss_desc = 8657 &((struct mlx5_flow_rss_desc *)priv->rss_desc) 8658 [!!priv->flow_nested_idx]; 8659 8660 MLX5_ASSERT(rss_desc->queue_num); 8661 hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, 8662 MLX5_RSS_HASH_KEY_LEN, 8663 dev_flow->hash_fields, 8664 rss_desc->queue, 8665 rss_desc->queue_num); 8666 if (!hrxq_idx) { 8667 hrxq_idx = mlx5_hrxq_new 8668 (dev, rss_desc->key, 8669 MLX5_RSS_HASH_KEY_LEN, 8670 dev_flow->hash_fields, 8671 rss_desc->queue, 8672 rss_desc->queue_num, 8673 !!(dh->layers & 8674 MLX5_FLOW_LAYER_TUNNEL)); 8675 } 8676 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], 8677 hrxq_idx); 8678 if (!hrxq) { 8679 rte_flow_error_set 8680 (error, rte_errno, 8681 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 8682 "cannot get hash queue"); 8683 goto error; 8684 } 8685 dh->rix_hrxq = hrxq_idx; 8686 dv->actions[n++] = hrxq->action; 8687 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) { 8688 if (flow_dv_default_miss_resource_register 8689 (dev, error)) { 8690 rte_flow_error_set 8691 (error, rte_errno, 8692 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 8693 "cannot create default miss resource"); 8694 goto error_default_miss; 8695 } 8696 dh->rix_default_fate = MLX5_FLOW_FATE_DEFAULT_MISS; 8697 dv->actions[n++] = priv->sh->default_miss.action; 8698 } 8699 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object, 8700 (void *)&dv->value, n, 8701 dv->actions, &dh->drv_flow); 8702 if (err) { 8703 rte_flow_error_set(error, errno, 8704 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 8705 NULL, 8706 "hardware refuses to create flow"); 8707 goto error; 8708 } 8709 if (priv->vmwa_context && 8710 dh->vf_vlan.tag && !dh->vf_vlan.created) { 8711 /* 8712 * The rule contains the VLAN pattern. 8713 * For VF we are going to create VLAN 8714 * interface to make hypervisor set correct 8715 * e-Switch vport context. 8716 */ 8717 mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan); 8718 } 8719 } 8720 return 0; 8721 error: 8722 if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) 8723 flow_dv_default_miss_resource_release(dev); 8724 error_default_miss: 8725 err = rte_errno; /* Save rte_errno before cleanup. */ 8726 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, 8727 handle_idx, dh, next) { 8728 /* hrxq is union, don't clear it if the flag is not set. */ 8729 if (dh->rix_hrxq) { 8730 if (dh->fate_action == MLX5_FLOW_FATE_DROP) { 8731 mlx5_hrxq_drop_release(dev); 8732 dh->rix_hrxq = 0; 8733 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) { 8734 mlx5_hrxq_release(dev, dh->rix_hrxq); 8735 dh->rix_hrxq = 0; 8736 } 8737 } 8738 if (dh->vf_vlan.tag && dh->vf_vlan.created) 8739 mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); 8740 } 8741 rte_errno = err; /* Restore rte_errno. */ 8742 return -rte_errno; 8743 } 8744 8745 /** 8746 * Release the flow matcher. 8747 * 8748 * @param dev 8749 * Pointer to Ethernet device. 8750 * @param handle 8751 * Pointer to mlx5_flow_handle. 8752 * 8753 * @return 8754 * 1 while a reference on it exists, 0 when freed. 8755 */ 8756 static int 8757 flow_dv_matcher_release(struct rte_eth_dev *dev, 8758 struct mlx5_flow_handle *handle) 8759 { 8760 struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher; 8761 8762 MLX5_ASSERT(matcher->matcher_object); 8763 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--", 8764 dev->data->port_id, (void *)matcher, 8765 rte_atomic32_read(&matcher->refcnt)); 8766 if (rte_atomic32_dec_and_test(&matcher->refcnt)) { 8767 claim_zero(mlx5_flow_os_destroy_flow_matcher 8768 (matcher->matcher_object)); 8769 LIST_REMOVE(matcher, next); 8770 /* table ref-- in release interface. */ 8771 flow_dv_tbl_resource_release(dev, matcher->tbl); 8772 rte_free(matcher); 8773 DRV_LOG(DEBUG, "port %u matcher %p: removed", 8774 dev->data->port_id, (void *)matcher); 8775 return 0; 8776 } 8777 return 1; 8778 } 8779 8780 /** 8781 * Release an encap/decap resource. 8782 * 8783 * @param dev 8784 * Pointer to Ethernet device. 8785 * @param handle 8786 * Pointer to mlx5_flow_handle. 8787 * 8788 * @return 8789 * 1 while a reference on it exists, 0 when freed. 8790 */ 8791 static int 8792 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, 8793 struct mlx5_flow_handle *handle) 8794 { 8795 struct mlx5_priv *priv = dev->data->dev_private; 8796 uint32_t idx = handle->dvh.rix_encap_decap; 8797 struct mlx5_flow_dv_encap_decap_resource *cache_resource; 8798 8799 cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], 8800 idx); 8801 if (!cache_resource) 8802 return 0; 8803 MLX5_ASSERT(cache_resource->action); 8804 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--", 8805 (void *)cache_resource, 8806 rte_atomic32_read(&cache_resource->refcnt)); 8807 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { 8808 claim_zero(mlx5_flow_os_destroy_flow_action 8809 (cache_resource->action)); 8810 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], 8811 &priv->sh->encaps_decaps, idx, 8812 cache_resource, next); 8813 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx); 8814 DRV_LOG(DEBUG, "encap/decap resource %p: removed", 8815 (void *)cache_resource); 8816 return 0; 8817 } 8818 return 1; 8819 } 8820 8821 /** 8822 * Release an jump to table action resource. 8823 * 8824 * @param dev 8825 * Pointer to Ethernet device. 8826 * @param handle 8827 * Pointer to mlx5_flow_handle. 8828 * 8829 * @return 8830 * 1 while a reference on it exists, 0 when freed. 8831 */ 8832 static int 8833 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, 8834 struct mlx5_flow_handle *handle) 8835 { 8836 struct mlx5_priv *priv = dev->data->dev_private; 8837 struct mlx5_flow_dv_jump_tbl_resource *cache_resource; 8838 struct mlx5_flow_tbl_data_entry *tbl_data; 8839 8840 tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP], 8841 handle->rix_jump); 8842 if (!tbl_data) 8843 return 0; 8844 cache_resource = &tbl_data->jump; 8845 MLX5_ASSERT(cache_resource->action); 8846 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--", 8847 (void *)cache_resource, 8848 rte_atomic32_read(&cache_resource->refcnt)); 8849 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { 8850 claim_zero(mlx5_flow_os_destroy_flow_action 8851 (cache_resource->action)); 8852 /* jump action memory free is inside the table release. */ 8853 flow_dv_tbl_resource_release(dev, &tbl_data->tbl); 8854 DRV_LOG(DEBUG, "jump table resource %p: removed", 8855 (void *)cache_resource); 8856 return 0; 8857 } 8858 return 1; 8859 } 8860 8861 /** 8862 * Release a default miss resource. 8863 * 8864 * @param dev 8865 * Pointer to Ethernet device. 8866 * @return 8867 * 1 while a reference on it exists, 0 when freed. 8868 */ 8869 static int 8870 flow_dv_default_miss_resource_release(struct rte_eth_dev *dev) 8871 { 8872 struct mlx5_priv *priv = dev->data->dev_private; 8873 struct mlx5_dev_ctx_shared *sh = priv->sh; 8874 struct mlx5_flow_default_miss_resource *cache_resource = 8875 &sh->default_miss; 8876 8877 MLX5_ASSERT(cache_resource->action); 8878 DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--", 8879 (void *)cache_resource->action, 8880 rte_atomic32_read(&cache_resource->refcnt)); 8881 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { 8882 claim_zero(mlx5_glue->destroy_flow_action 8883 (cache_resource->action)); 8884 DRV_LOG(DEBUG, "default miss resource %p: removed", 8885 (void *)cache_resource->action); 8886 return 0; 8887 } 8888 return 1; 8889 } 8890 8891 /** 8892 * Release a modify-header resource. 8893 * 8894 * @param handle 8895 * Pointer to mlx5_flow_handle. 8896 * 8897 * @return 8898 * 1 while a reference on it exists, 0 when freed. 8899 */ 8900 static int 8901 flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle) 8902 { 8903 struct mlx5_flow_dv_modify_hdr_resource *cache_resource = 8904 handle->dvh.modify_hdr; 8905 8906 MLX5_ASSERT(cache_resource->action); 8907 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--", 8908 (void *)cache_resource, 8909 rte_atomic32_read(&cache_resource->refcnt)); 8910 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { 8911 claim_zero(mlx5_flow_os_destroy_flow_action 8912 (cache_resource->action)); 8913 LIST_REMOVE(cache_resource, next); 8914 rte_free(cache_resource); 8915 DRV_LOG(DEBUG, "modify-header resource %p: removed", 8916 (void *)cache_resource); 8917 return 0; 8918 } 8919 return 1; 8920 } 8921 8922 /** 8923 * Release port ID action resource. 8924 * 8925 * @param dev 8926 * Pointer to Ethernet device. 8927 * @param handle 8928 * Pointer to mlx5_flow_handle. 8929 * 8930 * @return 8931 * 1 while a reference on it exists, 0 when freed. 8932 */ 8933 static int 8934 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, 8935 struct mlx5_flow_handle *handle) 8936 { 8937 struct mlx5_priv *priv = dev->data->dev_private; 8938 struct mlx5_flow_dv_port_id_action_resource *cache_resource; 8939 uint32_t idx = handle->rix_port_id_action; 8940 8941 cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], 8942 idx); 8943 if (!cache_resource) 8944 return 0; 8945 MLX5_ASSERT(cache_resource->action); 8946 DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--", 8947 (void *)cache_resource, 8948 rte_atomic32_read(&cache_resource->refcnt)); 8949 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { 8950 claim_zero(mlx5_flow_os_destroy_flow_action 8951 (cache_resource->action)); 8952 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID], 8953 &priv->sh->port_id_action_list, idx, 8954 cache_resource, next); 8955 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PORT_ID], idx); 8956 DRV_LOG(DEBUG, "port id action resource %p: removed", 8957 (void *)cache_resource); 8958 return 0; 8959 } 8960 return 1; 8961 } 8962 8963 /** 8964 * Release push vlan action resource. 8965 * 8966 * @param dev 8967 * Pointer to Ethernet device. 8968 * @param handle 8969 * Pointer to mlx5_flow_handle. 8970 * 8971 * @return 8972 * 1 while a reference on it exists, 0 when freed. 8973 */ 8974 static int 8975 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev, 8976 struct mlx5_flow_handle *handle) 8977 { 8978 struct mlx5_priv *priv = dev->data->dev_private; 8979 uint32_t idx = handle->dvh.rix_push_vlan; 8980 struct mlx5_flow_dv_push_vlan_action_resource *cache_resource; 8981 8982 cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], 8983 idx); 8984 if (!cache_resource) 8985 return 0; 8986 MLX5_ASSERT(cache_resource->action); 8987 DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--", 8988 (void *)cache_resource, 8989 rte_atomic32_read(&cache_resource->refcnt)); 8990 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { 8991 claim_zero(mlx5_flow_os_destroy_flow_action 8992 (cache_resource->action)); 8993 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], 8994 &priv->sh->push_vlan_action_list, idx, 8995 cache_resource, next); 8996 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx); 8997 DRV_LOG(DEBUG, "push vlan action resource %p: removed", 8998 (void *)cache_resource); 8999 return 0; 9000 } 9001 return 1; 9002 } 9003 9004 /** 9005 * Release the fate resource. 9006 * 9007 * @param dev 9008 * Pointer to Ethernet device. 9009 * @param handle 9010 * Pointer to mlx5_flow_handle. 9011 */ 9012 static void 9013 flow_dv_fate_resource_release(struct rte_eth_dev *dev, 9014 struct mlx5_flow_handle *handle) 9015 { 9016 if (!handle->rix_fate) 9017 return; 9018 switch (handle->fate_action) { 9019 case MLX5_FLOW_FATE_DROP: 9020 mlx5_hrxq_drop_release(dev); 9021 break; 9022 case MLX5_FLOW_FATE_QUEUE: 9023 mlx5_hrxq_release(dev, handle->rix_hrxq); 9024 break; 9025 case MLX5_FLOW_FATE_JUMP: 9026 flow_dv_jump_tbl_resource_release(dev, handle); 9027 break; 9028 case MLX5_FLOW_FATE_PORT_ID: 9029 flow_dv_port_id_action_resource_release(dev, handle); 9030 break; 9031 case MLX5_FLOW_FATE_DEFAULT_MISS: 9032 flow_dv_default_miss_resource_release(dev); 9033 break; 9034 default: 9035 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action); 9036 break; 9037 } 9038 handle->rix_fate = 0; 9039 } 9040 9041 /** 9042 * Remove the flow from the NIC but keeps it in memory. 9043 * Lock free, (mutex should be acquired by caller). 9044 * 9045 * @param[in] dev 9046 * Pointer to Ethernet device. 9047 * @param[in, out] flow 9048 * Pointer to flow structure. 9049 */ 9050 static void 9051 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 9052 { 9053 struct mlx5_flow_handle *dh; 9054 uint32_t handle_idx; 9055 struct mlx5_priv *priv = dev->data->dev_private; 9056 9057 if (!flow) 9058 return; 9059 handle_idx = flow->dev_handles; 9060 while (handle_idx) { 9061 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 9062 handle_idx); 9063 if (!dh) 9064 return; 9065 if (dh->drv_flow) { 9066 claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow)); 9067 dh->drv_flow = NULL; 9068 } 9069 if (dh->fate_action == MLX5_FLOW_FATE_DROP || 9070 dh->fate_action == MLX5_FLOW_FATE_QUEUE || 9071 dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) 9072 flow_dv_fate_resource_release(dev, dh); 9073 if (dh->vf_vlan.tag && dh->vf_vlan.created) 9074 mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); 9075 handle_idx = dh->next.next; 9076 } 9077 } 9078 9079 /** 9080 * Remove the flow from the NIC and the memory. 9081 * Lock free, (mutex should be acquired by caller). 9082 * 9083 * @param[in] dev 9084 * Pointer to the Ethernet device structure. 9085 * @param[in, out] flow 9086 * Pointer to flow structure. 9087 */ 9088 static void 9089 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 9090 { 9091 struct mlx5_flow_handle *dev_handle; 9092 struct mlx5_priv *priv = dev->data->dev_private; 9093 9094 if (!flow) 9095 return; 9096 __flow_dv_remove(dev, flow); 9097 if (flow->counter) { 9098 flow_dv_counter_release(dev, flow->counter); 9099 flow->counter = 0; 9100 } 9101 if (flow->meter) { 9102 struct mlx5_flow_meter *fm; 9103 9104 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR], 9105 flow->meter); 9106 if (fm) 9107 mlx5_flow_meter_detach(fm); 9108 flow->meter = 0; 9109 } 9110 while (flow->dev_handles) { 9111 uint32_t tmp_idx = flow->dev_handles; 9112 9113 dev_handle = mlx5_ipool_get(priv->sh->ipool 9114 [MLX5_IPOOL_MLX5_FLOW], tmp_idx); 9115 if (!dev_handle) 9116 return; 9117 flow->dev_handles = dev_handle->next.next; 9118 if (dev_handle->dvh.matcher) 9119 flow_dv_matcher_release(dev, dev_handle); 9120 if (dev_handle->dvh.rix_encap_decap) 9121 flow_dv_encap_decap_resource_release(dev, dev_handle); 9122 if (dev_handle->dvh.modify_hdr) 9123 flow_dv_modify_hdr_resource_release(dev_handle); 9124 if (dev_handle->dvh.rix_push_vlan) 9125 flow_dv_push_vlan_action_resource_release(dev, 9126 dev_handle); 9127 if (dev_handle->dvh.rix_tag) 9128 flow_dv_tag_release(dev, 9129 dev_handle->dvh.rix_tag); 9130 flow_dv_fate_resource_release(dev, dev_handle); 9131 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], 9132 tmp_idx); 9133 } 9134 } 9135 9136 /** 9137 * Query a dv flow rule for its statistics via devx. 9138 * 9139 * @param[in] dev 9140 * Pointer to Ethernet device. 9141 * @param[in] flow 9142 * Pointer to the sub flow. 9143 * @param[out] data 9144 * data retrieved by the query. 9145 * @param[out] error 9146 * Perform verbose error reporting if not NULL. 9147 * 9148 * @return 9149 * 0 on success, a negative errno value otherwise and rte_errno is set. 9150 */ 9151 static int 9152 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, 9153 void *data, struct rte_flow_error *error) 9154 { 9155 struct mlx5_priv *priv = dev->data->dev_private; 9156 struct rte_flow_query_count *qc = data; 9157 9158 if (!priv->config.devx) 9159 return rte_flow_error_set(error, ENOTSUP, 9160 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9161 NULL, 9162 "counters are not supported"); 9163 if (flow->counter) { 9164 uint64_t pkts, bytes; 9165 struct mlx5_flow_counter *cnt; 9166 9167 cnt = flow_dv_counter_get_by_idx(dev, flow->counter, 9168 NULL); 9169 int err = _flow_dv_query_count(dev, flow->counter, &pkts, 9170 &bytes); 9171 9172 if (err) 9173 return rte_flow_error_set(error, -err, 9174 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9175 NULL, "cannot read counters"); 9176 qc->hits_set = 1; 9177 qc->bytes_set = 1; 9178 qc->hits = pkts - cnt->hits; 9179 qc->bytes = bytes - cnt->bytes; 9180 if (qc->reset) { 9181 cnt->hits = pkts; 9182 cnt->bytes = bytes; 9183 } 9184 return 0; 9185 } 9186 return rte_flow_error_set(error, EINVAL, 9187 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9188 NULL, 9189 "counters are not available"); 9190 } 9191 9192 /** 9193 * Query a flow. 9194 * 9195 * @see rte_flow_query() 9196 * @see rte_flow_ops 9197 */ 9198 static int 9199 flow_dv_query(struct rte_eth_dev *dev, 9200 struct rte_flow *flow __rte_unused, 9201 const struct rte_flow_action *actions __rte_unused, 9202 void *data __rte_unused, 9203 struct rte_flow_error *error __rte_unused) 9204 { 9205 int ret = -EINVAL; 9206 9207 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 9208 switch (actions->type) { 9209 case RTE_FLOW_ACTION_TYPE_VOID: 9210 break; 9211 case RTE_FLOW_ACTION_TYPE_COUNT: 9212 ret = flow_dv_query_count(dev, flow, data, error); 9213 break; 9214 default: 9215 return rte_flow_error_set(error, ENOTSUP, 9216 RTE_FLOW_ERROR_TYPE_ACTION, 9217 actions, 9218 "action not supported"); 9219 } 9220 } 9221 return ret; 9222 } 9223 9224 /** 9225 * Destroy the meter table set. 9226 * Lock free, (mutex should be acquired by caller). 9227 * 9228 * @param[in] dev 9229 * Pointer to Ethernet device. 9230 * @param[in] tbl 9231 * Pointer to the meter table set. 9232 * 9233 * @return 9234 * Always 0. 9235 */ 9236 static int 9237 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev, 9238 struct mlx5_meter_domains_infos *tbl) 9239 { 9240 struct mlx5_priv *priv = dev->data->dev_private; 9241 struct mlx5_meter_domains_infos *mtd = 9242 (struct mlx5_meter_domains_infos *)tbl; 9243 9244 if (!mtd || !priv->config.dv_flow_en) 9245 return 0; 9246 if (mtd->ingress.policer_rules[RTE_MTR_DROPPED]) 9247 claim_zero(mlx5_flow_os_destroy_flow 9248 (mtd->ingress.policer_rules[RTE_MTR_DROPPED])); 9249 if (mtd->egress.policer_rules[RTE_MTR_DROPPED]) 9250 claim_zero(mlx5_flow_os_destroy_flow 9251 (mtd->egress.policer_rules[RTE_MTR_DROPPED])); 9252 if (mtd->transfer.policer_rules[RTE_MTR_DROPPED]) 9253 claim_zero(mlx5_flow_os_destroy_flow 9254 (mtd->transfer.policer_rules[RTE_MTR_DROPPED])); 9255 if (mtd->egress.color_matcher) 9256 claim_zero(mlx5_flow_os_destroy_flow_matcher 9257 (mtd->egress.color_matcher)); 9258 if (mtd->egress.any_matcher) 9259 claim_zero(mlx5_flow_os_destroy_flow_matcher 9260 (mtd->egress.any_matcher)); 9261 if (mtd->egress.tbl) 9262 flow_dv_tbl_resource_release(dev, mtd->egress.tbl); 9263 if (mtd->egress.sfx_tbl) 9264 flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl); 9265 if (mtd->ingress.color_matcher) 9266 claim_zero(mlx5_flow_os_destroy_flow_matcher 9267 (mtd->ingress.color_matcher)); 9268 if (mtd->ingress.any_matcher) 9269 claim_zero(mlx5_flow_os_destroy_flow_matcher 9270 (mtd->ingress.any_matcher)); 9271 if (mtd->ingress.tbl) 9272 flow_dv_tbl_resource_release(dev, mtd->ingress.tbl); 9273 if (mtd->ingress.sfx_tbl) 9274 flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl); 9275 if (mtd->transfer.color_matcher) 9276 claim_zero(mlx5_flow_os_destroy_flow_matcher 9277 (mtd->transfer.color_matcher)); 9278 if (mtd->transfer.any_matcher) 9279 claim_zero(mlx5_flow_os_destroy_flow_matcher 9280 (mtd->transfer.any_matcher)); 9281 if (mtd->transfer.tbl) 9282 flow_dv_tbl_resource_release(dev, mtd->transfer.tbl); 9283 if (mtd->transfer.sfx_tbl) 9284 flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl); 9285 if (mtd->drop_actn) 9286 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn)); 9287 rte_free(mtd); 9288 return 0; 9289 } 9290 9291 /* Number of meter flow actions, count and jump or count and drop. */ 9292 #define METER_ACTIONS 2 9293 9294 /** 9295 * Create specify domain meter table and suffix table. 9296 * 9297 * @param[in] dev 9298 * Pointer to Ethernet device. 9299 * @param[in,out] mtb 9300 * Pointer to DV meter table set. 9301 * @param[in] egress 9302 * Table attribute. 9303 * @param[in] transfer 9304 * Table attribute. 9305 * @param[in] color_reg_c_idx 9306 * Reg C index for color match. 9307 * 9308 * @return 9309 * 0 on success, -1 otherwise and rte_errno is set. 9310 */ 9311 static int 9312 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, 9313 struct mlx5_meter_domains_infos *mtb, 9314 uint8_t egress, uint8_t transfer, 9315 uint32_t color_reg_c_idx) 9316 { 9317 struct mlx5_priv *priv = dev->data->dev_private; 9318 struct mlx5_dev_ctx_shared *sh = priv->sh; 9319 struct mlx5_flow_dv_match_params mask = { 9320 .size = sizeof(mask.buf), 9321 }; 9322 struct mlx5_flow_dv_match_params value = { 9323 .size = sizeof(value.buf), 9324 }; 9325 struct mlx5dv_flow_matcher_attr dv_attr = { 9326 .type = IBV_FLOW_ATTR_NORMAL, 9327 .priority = 0, 9328 .match_criteria_enable = 0, 9329 .match_mask = (void *)&mask, 9330 }; 9331 void *actions[METER_ACTIONS]; 9332 struct mlx5_meter_domain_info *dtb; 9333 struct rte_flow_error error; 9334 int i = 0; 9335 int ret; 9336 9337 if (transfer) 9338 dtb = &mtb->transfer; 9339 else if (egress) 9340 dtb = &mtb->egress; 9341 else 9342 dtb = &mtb->ingress; 9343 /* Create the meter table with METER level. */ 9344 dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER, 9345 egress, transfer, &error); 9346 if (!dtb->tbl) { 9347 DRV_LOG(ERR, "Failed to create meter policer table."); 9348 return -1; 9349 } 9350 /* Create the meter suffix table with SUFFIX level. */ 9351 dtb->sfx_tbl = flow_dv_tbl_resource_get(dev, 9352 MLX5_FLOW_TABLE_LEVEL_SUFFIX, 9353 egress, transfer, &error); 9354 if (!dtb->sfx_tbl) { 9355 DRV_LOG(ERR, "Failed to create meter suffix table."); 9356 return -1; 9357 } 9358 /* Create matchers, Any and Color. */ 9359 dv_attr.priority = 3; 9360 dv_attr.match_criteria_enable = 0; 9361 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj, 9362 &dtb->any_matcher); 9363 if (ret) { 9364 DRV_LOG(ERR, "Failed to create meter" 9365 " policer default matcher."); 9366 goto error_exit; 9367 } 9368 dv_attr.priority = 0; 9369 dv_attr.match_criteria_enable = 9370 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT; 9371 flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx, 9372 rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX); 9373 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj, 9374 &dtb->color_matcher); 9375 if (ret) { 9376 DRV_LOG(ERR, "Failed to create meter policer color matcher."); 9377 goto error_exit; 9378 } 9379 if (mtb->count_actns[RTE_MTR_DROPPED]) 9380 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED]; 9381 actions[i++] = mtb->drop_actn; 9382 /* Default rule: lowest priority, match any, actions: drop. */ 9383 ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i, 9384 actions, 9385 &dtb->policer_rules[RTE_MTR_DROPPED]); 9386 if (ret) { 9387 DRV_LOG(ERR, "Failed to create meter policer drop rule."); 9388 goto error_exit; 9389 } 9390 return 0; 9391 error_exit: 9392 return -1; 9393 } 9394 9395 /** 9396 * Create the needed meter and suffix tables. 9397 * Lock free, (mutex should be acquired by caller). 9398 * 9399 * @param[in] dev 9400 * Pointer to Ethernet device. 9401 * @param[in] fm 9402 * Pointer to the flow meter. 9403 * 9404 * @return 9405 * Pointer to table set on success, NULL otherwise and rte_errno is set. 9406 */ 9407 static struct mlx5_meter_domains_infos * 9408 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev, 9409 const struct mlx5_flow_meter *fm) 9410 { 9411 struct mlx5_priv *priv = dev->data->dev_private; 9412 struct mlx5_meter_domains_infos *mtb; 9413 int ret; 9414 int i; 9415 9416 if (!priv->mtr_en) { 9417 rte_errno = ENOTSUP; 9418 return NULL; 9419 } 9420 mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0); 9421 if (!mtb) { 9422 DRV_LOG(ERR, "Failed to allocate memory for meter."); 9423 return NULL; 9424 } 9425 /* Create meter count actions */ 9426 for (i = 0; i <= RTE_MTR_DROPPED; i++) { 9427 struct mlx5_flow_counter *cnt; 9428 if (!fm->policer_stats.cnt[i]) 9429 continue; 9430 cnt = flow_dv_counter_get_by_idx(dev, 9431 fm->policer_stats.cnt[i], NULL); 9432 mtb->count_actns[i] = cnt->action; 9433 } 9434 /* Create drop action. */ 9435 ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn); 9436 if (ret) { 9437 DRV_LOG(ERR, "Failed to create drop action."); 9438 goto error_exit; 9439 } 9440 /* Egress meter table. */ 9441 ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg); 9442 if (ret) { 9443 DRV_LOG(ERR, "Failed to prepare egress meter table."); 9444 goto error_exit; 9445 } 9446 /* Ingress meter table. */ 9447 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg); 9448 if (ret) { 9449 DRV_LOG(ERR, "Failed to prepare ingress meter table."); 9450 goto error_exit; 9451 } 9452 /* FDB meter table. */ 9453 if (priv->config.dv_esw_en) { 9454 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1, 9455 priv->mtr_color_reg); 9456 if (ret) { 9457 DRV_LOG(ERR, "Failed to prepare fdb meter table."); 9458 goto error_exit; 9459 } 9460 } 9461 return mtb; 9462 error_exit: 9463 flow_dv_destroy_mtr_tbl(dev, mtb); 9464 return NULL; 9465 } 9466 9467 /** 9468 * Destroy domain policer rule. 9469 * 9470 * @param[in] dt 9471 * Pointer to domain table. 9472 */ 9473 static void 9474 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt) 9475 { 9476 int i; 9477 9478 for (i = 0; i < RTE_MTR_DROPPED; i++) { 9479 if (dt->policer_rules[i]) { 9480 claim_zero(mlx5_flow_os_destroy_flow 9481 (dt->policer_rules[i])); 9482 dt->policer_rules[i] = NULL; 9483 } 9484 } 9485 if (dt->jump_actn) { 9486 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn)); 9487 dt->jump_actn = NULL; 9488 } 9489 } 9490 9491 /** 9492 * Destroy policer rules. 9493 * 9494 * @param[in] dev 9495 * Pointer to Ethernet device. 9496 * @param[in] fm 9497 * Pointer to flow meter structure. 9498 * @param[in] attr 9499 * Pointer to flow attributes. 9500 * 9501 * @return 9502 * Always 0. 9503 */ 9504 static int 9505 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused, 9506 const struct mlx5_flow_meter *fm, 9507 const struct rte_flow_attr *attr) 9508 { 9509 struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL; 9510 9511 if (!mtb) 9512 return 0; 9513 if (attr->egress) 9514 flow_dv_destroy_domain_policer_rule(&mtb->egress); 9515 if (attr->ingress) 9516 flow_dv_destroy_domain_policer_rule(&mtb->ingress); 9517 if (attr->transfer) 9518 flow_dv_destroy_domain_policer_rule(&mtb->transfer); 9519 return 0; 9520 } 9521 9522 /** 9523 * Create specify domain meter policer rule. 9524 * 9525 * @param[in] fm 9526 * Pointer to flow meter structure. 9527 * @param[in] mtb 9528 * Pointer to DV meter table set. 9529 * @param[in] mtr_reg_c 9530 * Color match REG_C. 9531 * 9532 * @return 9533 * 0 on success, -1 otherwise. 9534 */ 9535 static int 9536 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm, 9537 struct mlx5_meter_domain_info *dtb, 9538 uint8_t mtr_reg_c) 9539 { 9540 struct mlx5_flow_dv_match_params matcher = { 9541 .size = sizeof(matcher.buf), 9542 }; 9543 struct mlx5_flow_dv_match_params value = { 9544 .size = sizeof(value.buf), 9545 }; 9546 struct mlx5_meter_domains_infos *mtb = fm->mfts; 9547 void *actions[METER_ACTIONS]; 9548 int i; 9549 int ret = 0; 9550 9551 /* Create jump action. */ 9552 if (!dtb->jump_actn) 9553 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl 9554 (dtb->sfx_tbl->obj, &dtb->jump_actn); 9555 if (ret) { 9556 DRV_LOG(ERR, "Failed to create policer jump action."); 9557 goto error; 9558 } 9559 for (i = 0; i < RTE_MTR_DROPPED; i++) { 9560 int j = 0; 9561 9562 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c, 9563 rte_col_2_mlx5_col(i), UINT8_MAX); 9564 if (mtb->count_actns[i]) 9565 actions[j++] = mtb->count_actns[i]; 9566 if (fm->action[i] == MTR_POLICER_ACTION_DROP) 9567 actions[j++] = mtb->drop_actn; 9568 else 9569 actions[j++] = dtb->jump_actn; 9570 ret = mlx5_flow_os_create_flow(dtb->color_matcher, 9571 (void *)&value, j, actions, 9572 &dtb->policer_rules[i]); 9573 if (ret) { 9574 DRV_LOG(ERR, "Failed to create policer rule."); 9575 goto error; 9576 } 9577 } 9578 return 0; 9579 error: 9580 rte_errno = errno; 9581 return -1; 9582 } 9583 9584 /** 9585 * Create policer rules. 9586 * 9587 * @param[in] dev 9588 * Pointer to Ethernet device. 9589 * @param[in] fm 9590 * Pointer to flow meter structure. 9591 * @param[in] attr 9592 * Pointer to flow attributes. 9593 * 9594 * @return 9595 * 0 on success, -1 otherwise. 9596 */ 9597 static int 9598 flow_dv_create_policer_rules(struct rte_eth_dev *dev, 9599 struct mlx5_flow_meter *fm, 9600 const struct rte_flow_attr *attr) 9601 { 9602 struct mlx5_priv *priv = dev->data->dev_private; 9603 struct mlx5_meter_domains_infos *mtb = fm->mfts; 9604 int ret; 9605 9606 if (attr->egress) { 9607 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress, 9608 priv->mtr_color_reg); 9609 if (ret) { 9610 DRV_LOG(ERR, "Failed to create egress policer."); 9611 goto error; 9612 } 9613 } 9614 if (attr->ingress) { 9615 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress, 9616 priv->mtr_color_reg); 9617 if (ret) { 9618 DRV_LOG(ERR, "Failed to create ingress policer."); 9619 goto error; 9620 } 9621 } 9622 if (attr->transfer) { 9623 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer, 9624 priv->mtr_color_reg); 9625 if (ret) { 9626 DRV_LOG(ERR, "Failed to create transfer policer."); 9627 goto error; 9628 } 9629 } 9630 return 0; 9631 error: 9632 flow_dv_destroy_policer_rules(dev, fm, attr); 9633 return -1; 9634 } 9635 9636 /** 9637 * Query a devx counter. 9638 * 9639 * @param[in] dev 9640 * Pointer to the Ethernet device structure. 9641 * @param[in] cnt 9642 * Index to the flow counter. 9643 * @param[in] clear 9644 * Set to clear the counter statistics. 9645 * @param[out] pkts 9646 * The statistics value of packets. 9647 * @param[out] bytes 9648 * The statistics value of bytes. 9649 * 9650 * @return 9651 * 0 on success, otherwise return -1. 9652 */ 9653 static int 9654 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear, 9655 uint64_t *pkts, uint64_t *bytes) 9656 { 9657 struct mlx5_priv *priv = dev->data->dev_private; 9658 struct mlx5_flow_counter *cnt; 9659 uint64_t inn_pkts, inn_bytes; 9660 int ret; 9661 9662 if (!priv->config.devx) 9663 return -1; 9664 9665 ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes); 9666 if (ret) 9667 return -1; 9668 cnt = flow_dv_counter_get_by_idx(dev, counter, NULL); 9669 *pkts = inn_pkts - cnt->hits; 9670 *bytes = inn_bytes - cnt->bytes; 9671 if (clear) { 9672 cnt->hits = inn_pkts; 9673 cnt->bytes = inn_bytes; 9674 } 9675 return 0; 9676 } 9677 9678 /** 9679 * Get aged-out flows. 9680 * 9681 * @param[in] dev 9682 * Pointer to the Ethernet device structure. 9683 * @param[in] context 9684 * The address of an array of pointers to the aged-out flows contexts. 9685 * @param[in] nb_contexts 9686 * The length of context array pointers. 9687 * @param[out] error 9688 * Perform verbose error reporting if not NULL. Initialized in case of 9689 * error only. 9690 * 9691 * @return 9692 * how many contexts get in success, otherwise negative errno value. 9693 * if nb_contexts is 0, return the amount of all aged contexts. 9694 * if nb_contexts is not 0 , return the amount of aged flows reported 9695 * in the context array. 9696 * @note: only stub for now 9697 */ 9698 static int 9699 flow_get_aged_flows(struct rte_eth_dev *dev, 9700 void **context, 9701 uint32_t nb_contexts, 9702 struct rte_flow_error *error) 9703 { 9704 struct mlx5_priv *priv = dev->data->dev_private; 9705 struct mlx5_age_info *age_info; 9706 struct mlx5_age_param *age_param; 9707 struct mlx5_flow_counter *counter; 9708 int nb_flows = 0; 9709 9710 if (nb_contexts && !context) 9711 return rte_flow_error_set(error, EINVAL, 9712 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 9713 NULL, 9714 "Should assign at least one flow or" 9715 " context to get if nb_contexts != 0"); 9716 age_info = GET_PORT_AGE_INFO(priv); 9717 rte_spinlock_lock(&age_info->aged_sl); 9718 TAILQ_FOREACH(counter, &age_info->aged_counters, next) { 9719 nb_flows++; 9720 if (nb_contexts) { 9721 age_param = MLX5_CNT_TO_AGE(counter); 9722 context[nb_flows - 1] = age_param->context; 9723 if (!(--nb_contexts)) 9724 break; 9725 } 9726 } 9727 rte_spinlock_unlock(&age_info->aged_sl); 9728 MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER); 9729 return nb_flows; 9730 } 9731 9732 /* 9733 * Mutex-protected thunk to lock-free __flow_dv_translate(). 9734 */ 9735 static int 9736 flow_dv_translate(struct rte_eth_dev *dev, 9737 struct mlx5_flow *dev_flow, 9738 const struct rte_flow_attr *attr, 9739 const struct rte_flow_item items[], 9740 const struct rte_flow_action actions[], 9741 struct rte_flow_error *error) 9742 { 9743 int ret; 9744 9745 flow_dv_shared_lock(dev); 9746 ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error); 9747 flow_dv_shared_unlock(dev); 9748 return ret; 9749 } 9750 9751 /* 9752 * Mutex-protected thunk to lock-free __flow_dv_apply(). 9753 */ 9754 static int 9755 flow_dv_apply(struct rte_eth_dev *dev, 9756 struct rte_flow *flow, 9757 struct rte_flow_error *error) 9758 { 9759 int ret; 9760 9761 flow_dv_shared_lock(dev); 9762 ret = __flow_dv_apply(dev, flow, error); 9763 flow_dv_shared_unlock(dev); 9764 return ret; 9765 } 9766 9767 /* 9768 * Mutex-protected thunk to lock-free __flow_dv_remove(). 9769 */ 9770 static void 9771 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) 9772 { 9773 flow_dv_shared_lock(dev); 9774 __flow_dv_remove(dev, flow); 9775 flow_dv_shared_unlock(dev); 9776 } 9777 9778 /* 9779 * Mutex-protected thunk to lock-free __flow_dv_destroy(). 9780 */ 9781 static void 9782 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 9783 { 9784 flow_dv_shared_lock(dev); 9785 __flow_dv_destroy(dev, flow); 9786 flow_dv_shared_unlock(dev); 9787 } 9788 9789 /* 9790 * Mutex-protected thunk to lock-free flow_dv_counter_alloc(). 9791 */ 9792 static uint32_t 9793 flow_dv_counter_allocate(struct rte_eth_dev *dev) 9794 { 9795 uint32_t cnt; 9796 9797 flow_dv_shared_lock(dev); 9798 cnt = flow_dv_counter_alloc(dev, 0, 0, 1, 0); 9799 flow_dv_shared_unlock(dev); 9800 return cnt; 9801 } 9802 9803 /* 9804 * Mutex-protected thunk to lock-free flow_dv_counter_release(). 9805 */ 9806 static void 9807 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt) 9808 { 9809 flow_dv_shared_lock(dev); 9810 flow_dv_counter_release(dev, cnt); 9811 flow_dv_shared_unlock(dev); 9812 } 9813 9814 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { 9815 .validate = flow_dv_validate, 9816 .prepare = flow_dv_prepare, 9817 .translate = flow_dv_translate, 9818 .apply = flow_dv_apply, 9819 .remove = flow_dv_remove, 9820 .destroy = flow_dv_destroy, 9821 .query = flow_dv_query, 9822 .create_mtr_tbls = flow_dv_create_mtr_tbl, 9823 .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl, 9824 .create_policer_rules = flow_dv_create_policer_rules, 9825 .destroy_policer_rules = flow_dv_destroy_policer_rules, 9826 .counter_alloc = flow_dv_counter_allocate, 9827 .counter_free = flow_dv_counter_free, 9828 .counter_query = flow_dv_counter_query, 9829 .get_aged_flows = flow_get_aged_flows, 9830 }; 9831 9832 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 9833